From b814f9fef3efa1bdcb7e03a9161e08721b7bc8c4 Mon Sep 17 00:00:00 2001 From: "vladimir.p" Date: Fri, 15 Jul 2011 17:56:27 -0700 Subject: VSA: first cut. merged with 1279 --- nova/CA/newcerts/.placeholder | 0 nova/CA/private/.placeholder | 0 nova/CA/projects/.gitignore | 1 - nova/CA/projects/.placeholder | 0 nova/CA/reqs/.gitignore | 1 - nova/CA/reqs/.placeholder | 0 nova/api/ec2/__init__.py | 4 + nova/api/ec2/cloud.py | 164 ++++++- nova/api/openstack/contrib/drive_types.py | 147 ++++++ .../openstack/contrib/virtual_storage_arrays.py | 454 +++++++++++++++++++ nova/api/openstack/contrib/volumes.py | 14 +- nova/compute/api.py | 10 +- nova/db/api.py | 88 +++- nova/db/sqlalchemy/api.py | 291 ++++++++++++ .../migrate_repo/versions/032_add_vsa_data.py | 152 +++++++ nova/db/sqlalchemy/migration.py | 3 +- nova/db/sqlalchemy/models.py | 95 ++++ nova/exception.py | 20 + nova/flags.py | 27 ++ nova/quota.py | 4 +- nova/scheduler/vsa.py | 495 +++++++++++++++++++++ nova/tests/test_libvirt.py | 2 +- nova/volume/api.py | 46 +- nova/volume/driver.py | 20 +- nova/volume/manager.py | 121 ++++- nova/volume/san.py | 323 +++++++++++++- nova/vsa/__init__.py | 18 + nova/vsa/api.py | 407 +++++++++++++++++ nova/vsa/connection.py | 25 ++ nova/vsa/fake.py | 22 + nova/vsa/manager.py | 172 +++++++ 31 files changed, 3078 insertions(+), 48 deletions(-) delete mode 100644 nova/CA/newcerts/.placeholder delete mode 100644 nova/CA/private/.placeholder delete mode 100644 nova/CA/projects/.gitignore delete mode 100644 nova/CA/projects/.placeholder delete mode 100644 nova/CA/reqs/.gitignore delete mode 100644 nova/CA/reqs/.placeholder create mode 100644 nova/api/openstack/contrib/drive_types.py create mode 100644 nova/api/openstack/contrib/virtual_storage_arrays.py create mode 100644 nova/db/sqlalchemy/migrate_repo/versions/032_add_vsa_data.py create mode 100644 nova/scheduler/vsa.py create mode 100644 nova/vsa/__init__.py create mode 100644 nova/vsa/api.py create mode 100644 nova/vsa/connection.py create mode 100644 nova/vsa/fake.py create mode 100644 nova/vsa/manager.py (limited to 'nova') diff --git a/nova/CA/newcerts/.placeholder b/nova/CA/newcerts/.placeholder deleted file mode 100644 index e69de29bb..000000000 diff --git a/nova/CA/private/.placeholder b/nova/CA/private/.placeholder deleted file mode 100644 index e69de29bb..000000000 diff --git a/nova/CA/projects/.gitignore b/nova/CA/projects/.gitignore deleted file mode 100644 index 72e8ffc0d..000000000 --- a/nova/CA/projects/.gitignore +++ /dev/null @@ -1 +0,0 @@ -* diff --git a/nova/CA/projects/.placeholder b/nova/CA/projects/.placeholder deleted file mode 100644 index e69de29bb..000000000 diff --git a/nova/CA/reqs/.gitignore b/nova/CA/reqs/.gitignore deleted file mode 100644 index 72e8ffc0d..000000000 --- a/nova/CA/reqs/.gitignore +++ /dev/null @@ -1 +0,0 @@ -* diff --git a/nova/CA/reqs/.placeholder b/nova/CA/reqs/.placeholder deleted file mode 100644 index e69de29bb..000000000 diff --git a/nova/api/ec2/__init__.py b/nova/api/ec2/__init__.py index 890d57fe7..ec44c02ef 100644 --- a/nova/api/ec2/__init__.py +++ b/nova/api/ec2/__init__.py @@ -269,6 +269,10 @@ class Authorizer(wsgi.Middleware): 'DescribeImageAttribute': ['all'], 'ModifyImageAttribute': ['projectmanager', 'sysadmin'], 'UpdateImage': ['projectmanager', 'sysadmin'], + 'CreateVsa': ['projectmanager', 'sysadmin'], + 'DeleteVsa': ['projectmanager', 'sysadmin'], + 'DescribeVsas': ['projectmanager', 'sysadmin'], + 'DescribeDriveTypes': ['projectmanager', 'sysadmin'], }, 'AdminController': { # All actions have the same permission: ['none'] (the default) diff --git a/nova/api/ec2/cloud.py b/nova/api/ec2/cloud.py index acfd1361c..786ceaccc 100644 --- a/nova/api/ec2/cloud.py +++ b/nova/api/ec2/cloud.py @@ -42,6 +42,8 @@ from nova import network from nova import rpc from nova import utils from nova import volume +from nova import vsa +from nova.vsa import drive_types from nova.api.ec2 import ec2utils from nova.compute import instance_types from nova.image import s3 @@ -87,6 +89,7 @@ class CloudController(object): self.compute_api = compute.API( network_api=self.network_api, volume_api=self.volume_api) + self.vsa_api = vsa.API(compute_api=self.compute_api) self.setup() def __str__(self): @@ -727,12 +730,26 @@ class CloudController(object): snapshot_id = None LOG.audit(_("Create volume of %s GB"), size, context=context) + to_vsa_id = kwargs.get('to_vsa_id', None) + if to_vsa_id: + to_vsa_id = ec2utils.ec2_id_to_id(to_vsa_id) + + from_vsa_id = kwargs.get('from_vsa_id', None) + if from_vsa_id: + from_vsa_id = ec2utils.ec2_id_to_id(from_vsa_id) + + if to_vsa_id or from_vsa_id: + LOG.audit(_("Create volume of %s GB associated with VSA "\ + "(to: %d, from: %d)"), + size, to_vsa_id, from_vsa_id, context=context) + volume = self.volume_api.create( context, size=size, snapshot_id=snapshot_id, name=kwargs.get('display_name'), - description=kwargs.get('display_description')) + description=kwargs.get('display_description'), + to_vsa_id=to_vsa_id, from_vsa_id=from_vsa_id) # TODO(vish): Instance should be None at db layer instead of # trying to lazy load, but for now we turn it into # a dict to avoid an error. @@ -786,6 +803,151 @@ class CloudController(object): 'status': volume['attach_status'], 'volumeId': ec2utils.id_to_ec2_id(volume_id, 'vol-%08x')} + def _format_vsa(self, context, p_vsa): + vsa = {} + vsa['vsaId'] = p_vsa['id'] + vsa['status'] = p_vsa['status'] + vsa['availabilityZone'] = p_vsa['availability_zone'] + vsa['createTime'] = p_vsa['created_at'] + vsa['name'] = p_vsa['name'] + vsa['displayName'] = p_vsa['display_name'] + vsa['displayDescription'] = p_vsa['display_description'] + vsa['vcCount'] = p_vsa['vc_count'] + if p_vsa['vsa_instance_type']: + vsa['vcType'] = p_vsa['vsa_instance_type'].get('name', None) + else: + vsa['vcType'] = None + return vsa + + def create_vsa(self, context, **kwargs): + display_name = kwargs.get('display_name') + display_description = kwargs.get('display_description') + vc_count = int(kwargs.get('vc_count', 1)) + instance_type = instance_types.get_instance_type_by_name( + kwargs.get('vc_type', FLAGS.default_vsa_instance_type)) + image_name = kwargs.get('image_name') + availability_zone = kwargs.get('placement', {}).get( + 'AvailabilityZone') + #storage = ast.literal_eval(kwargs.get('storage', '[]')) + storage = kwargs.get('storage', []) + shared = kwargs.get('shared', False) + + vc_type = instance_type['name'] + _storage = str(storage) + LOG.audit(_("Create VSA %(display_name)s vc_count:%(vc_count)d "\ + "vc_type:%(vc_type)s storage:%(_storage)s"), locals()) + + vsa = self.vsa_api.create(context, display_name, display_description, + vc_count, instance_type, image_name, + availability_zone, storage, shared) + return {'vsaSet': [self._format_vsa(context, vsa)]} + + def update_vsa(self, context, vsa_id, **kwargs): + LOG.audit(_("Update VSA %s"), vsa_id) + updatable_fields = ['display_name', 'display_description', 'vc_count'] + changes = {} + for field in updatable_fields: + if field in kwargs: + changes[field] = kwargs[field] + if changes: + vsa_id = ec2utils.ec2_id_to_id(vsa_id) + self.vsa_api.update(context, vsa_id=vsa_id, **changes) + return True + + def delete_vsa(self, context, vsa_id, **kwargs): + LOG.audit(_("Delete VSA %s"), vsa_id) + vsa_id = ec2utils.ec2_id_to_id(vsa_id) + + self.vsa_api.delete(context, vsa_id) + + return True + + def describe_vsas(self, context, vsa_id=None, status=None, + availability_zone=None, **kwargs): +# LOG.debug(_("vsa_id=%s, status=%s, az=%s"), +# (vsa_id, status, availability_zone)) + result = [] + vsas = [] + if vsa_id is not None: + for ec2_id in vsa_id: + internal_id = ec2utils.ec2_id_to_id(ec2_id) + vsa = self.vsa_api.get(context, internal_id) + vsas.append(vsa) + else: + vsas = self.vsa_api.get_all(context) + + if status: + result = [] + for vsa in vsas: + if vsa['status'] in status: + result.append(vsa) + vsas = result + + if availability_zone: + result = [] + for vsa in vsas: + if vsa['availability_zone'] in availability_zone: + result.append(vsa) + vsas = result + + return {'vsaSet': [self._format_vsa(context, vsa) for vsa in vsas]} + + def create_drive_type(self, context, **kwargs): + name = kwargs.get('name') + type = kwargs.get('type') + size_gb = int(kwargs.get('size_gb')) + rpm = kwargs.get('rpm') + capabilities = kwargs.get('capabilities') + visible = kwargs.get('visible', True) + + LOG.audit(_("Create Drive Type %(name)s: %(type)s %(size_gb)d "\ + "%(rpm)s %(capabilities)s %(visible)s"), + locals()) + + rv = drive_types.drive_type_create(context, type, size_gb, rpm, + capabilities, visible, name) + return {'driveTypeSet': [dict(rv)]} + + def update_drive_type(self, context, name, **kwargs): + LOG.audit(_("Update Drive Type %s"), name) + updatable_fields = ['type', + 'size_gb', + 'rpm', + 'capabilities', + 'visible'] + changes = {} + for field in updatable_fields: + if field in kwargs and \ + kwargs[field] is not None and \ + kwargs[field] != '': + changes[field] = kwargs[field] + if changes: + drive_types.drive_type_update(context, name, **changes) + return True + + def rename_drive_type(self, context, name, new_name): + drive_types.drive_type_rename(context, name, new_name) + return True + + def delete_drive_type(self, context, name): + drive_types.drive_type_delete(context, name) + return True + + def describe_drive_types(self, context, names=None, visible=True): + + drives = [] + if names is not None: + for name in names: + drive = drive_types.drive_type_get_by_name(context, name) + if drive['visible'] == visible: + drives.append(drive) + else: + drives = drive_types.drive_type_get_all(context, visible) + + # VP-TODO: Change it later to EC2 compatible func (output) + + return {'driveTypeSet': [dict(drive) for drive in drives]} + def _convert_to_set(self, lst, label): if lst is None or lst == []: return None diff --git a/nova/api/openstack/contrib/drive_types.py b/nova/api/openstack/contrib/drive_types.py new file mode 100644 index 000000000..85b3170cb --- /dev/null +++ b/nova/api/openstack/contrib/drive_types.py @@ -0,0 +1,147 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright (c) 2011 Zadara Storage Inc. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" The Drive Types extension for Virtual Storage Arrays""" + + +from webob import exc + +from nova.vsa import drive_types +from nova import db +from nova import quota +from nova import log as logging +from nova.api.openstack import common +from nova.api.openstack import extensions +from nova.api.openstack import faults +from nova.api.openstack import wsgi + +LOG = logging.getLogger("nova.api.drive_types") + + +class DriveTypeController(object): + """The Drive Type API controller for the OpenStack API.""" + + _serialization_metadata = { + 'application/xml': { + "attributes": { + "drive_type": [ + "id", + "displayName", + "type", + "size", + "rpm", + "capabilities", + ]}}} + + def _drive_type_view(self, context, drive): + """Maps keys for drive types view.""" + d = {} + + d['id'] = drive['id'] + d['displayName'] = drive['name'] + d['type'] = drive['type'] + d['size'] = drive['size_gb'] + d['rpm'] = drive['rpm'] + d['capabilities'] = drive['capabilities'] + return d + + def index(self, req): + """Returns a list of drive types.""" + + context = req.environ['nova.context'] + drive_types = drive_types.drive_type_get_all(context) + limited_list = common.limited(drive_types, req) + res = [self._drive_type_view(context, drive) for drive in limited_list] + return {'drive_types': res} + + def show(self, req, id): + """Return data about the given drive type.""" + context = req.environ['nova.context'] + + try: + drive = drive_types.drive_type_get(context, id) + except exception.NotFound: + return faults.Fault(exc.HTTPNotFound()) + + return {'drive_type': self._drive_type_view(context, drive)} + + def create(self, req, body): + """Creates a new drive type.""" + context = req.environ['nova.context'] + + if not body: + return faults.Fault(exc.HTTPUnprocessableEntity()) + + drive = body['drive_type'] + + name = drive.get('displayName') + type = drive.get('type') + size = drive.get('size') + rpm = drive.get('rpm') + capabilities = drive.get('capabilities') + + LOG.audit(_("Create drive type %(name)s for "\ + "%(type)s:%(size)s:%(rpm)s"), locals(), context=context) + + new_drive = drive_types.drive_type_create(context, + type=type, + size_gb=size, + rpm=rpm, + capabilities=capabilities, + name=name) + + return {'drive_type': self._drive_type_view(context, new_drive)} + + def delete(self, req, id): + """Deletes a drive type.""" + context = req.environ['nova.context'] + + LOG.audit(_("Delete drive type with id: %s"), id, context=context) + + try: + drive = drive_types.drive_type_get(context, id) + drive_types.drive_type_delete(context, drive['name']) + except exception.NotFound: + return faults.Fault(exc.HTTPNotFound()) + return exc.HTTPAccepted() + + +class Drive_types(extensions.ExtensionDescriptor): + + def get_name(self): + return "DriveTypes" + + def get_alias(self): + return "zadr-drive_types" + + def get_description(self): + return "Drive Types support" + + def get_namespace(self): + return "http://docs.openstack.org/ext/drive_types/api/v1.1" + + def get_updated(self): + return "2011-06-29T00:00:00+00:00" + + def get_resources(self): + resources = [] + res = extensions.ResourceExtension( + 'zadr-drive_types', + DriveTypeController()) + + resources.append(res) + return resources diff --git a/nova/api/openstack/contrib/virtual_storage_arrays.py b/nova/api/openstack/contrib/virtual_storage_arrays.py new file mode 100644 index 000000000..eca2d68dd --- /dev/null +++ b/nova/api/openstack/contrib/virtual_storage_arrays.py @@ -0,0 +1,454 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright (c) 2011 Zadara Storage Inc. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" The virtul storage array extension""" + + +from webob import exc + +from nova import vsa +from nova import volume +from nova import db +from nova import quota +from nova import exception +from nova import log as logging +from nova.api.openstack import common +from nova.api.openstack import extensions +from nova.api.openstack import faults +from nova.api.openstack import wsgi +from nova.api.openstack.contrib import volumes +from nova.compute import instance_types + +from nova import flags +FLAGS = flags.FLAGS + +LOG = logging.getLogger("nova.api.vsa") + + +class VsaController(object): + """The Virtual Storage Array API controller for the OpenStack API.""" + + _serialization_metadata = { + 'application/xml': { + "attributes": { + "vsa": [ + "id", + "name", + "displayName", + "displayDescription", + "createTime", + "status", + "vcType", + "vcCount", + "driveCount", + ]}}} + + def __init__(self): + self.vsa_api = vsa.API() + super(VsaController, self).__init__() + + def _vsa_view(self, context, vsa, details=False): + """Map keys for vsa summary/detailed view.""" + d = {} + + d['id'] = vsa['id'] + d['name'] = vsa['name'] + d['displayName'] = vsa['display_name'] + d['displayDescription'] = vsa['display_description'] + + d['createTime'] = vsa['created_at'] + d['status'] = vsa['status'] + + if vsa['vsa_instance_type']: + d['vcType'] = vsa['vsa_instance_type'].get('name', None) + else: + d['vcType'] = None + + d['vcCount'] = vsa['vc_count'] + d['driveCount'] = vsa['vol_count'] + + return d + + def _items(self, req, details): + """Return summary or detailed list of VSAs.""" + context = req.environ['nova.context'] + vsas = self.vsa_api.get_all(context) + limited_list = common.limited(vsas, req) + res = [self._vsa_view(context, vsa, details) for vsa in limited_list] + return {'vsaSet': res} + + def index(self, req): + """Return a short list of VSAs.""" + return self._items(req, details=False) + + def detail(self, req): + """Return a detailed list of VSAs.""" + return self._items(req, details=True) + + def show(self, req, id): + """Return data about the given VSA.""" + context = req.environ['nova.context'] + + try: + vsa = self.vsa_api.get(context, vsa_id=id) + except exception.NotFound: + return faults.Fault(exc.HTTPNotFound()) + + return {'vsa': self._vsa_view(context, vsa, details=True)} + + def create(self, req, body): + """Create a new VSA.""" + context = req.environ['nova.context'] + + if not body: + return faults.Fault(exc.HTTPUnprocessableEntity()) + + vsa = body['vsa'] + + display_name = vsa.get('displayName') + display_description = vsa.get('displayDescription') + storage = vsa.get('storage') + shared = vsa.get('shared') + vc_type = vsa.get('vcType', FLAGS.default_vsa_instance_type) + availability_zone = vsa.get('placement', {}).get('AvailabilityZone') + + try: + instance_type = instance_types.get_instance_type_by_name(vc_type) + except exception.NotFound: + return faults.Fault(exc.HTTPNotFound()) + + LOG.audit(_("Create VSA %(display_name)s of type %(vc_type)s"), + locals(), context=context) + + result = self.vsa_api.create(context, + display_name=display_name, + display_description=display_description, + storage=storage, + shared=shared, + instance_type=instance_type, + availability_zone=availability_zone) + + return {'vsa': self._vsa_view(context, result, details=True)} + + def delete(self, req, id): + """Delete a VSA.""" + context = req.environ['nova.context'] + + LOG.audit(_("Delete VSA with id: %s"), id, context=context) + + try: + self.vsa_api.delete(context, vsa_id=id) + except exception.NotFound: + return faults.Fault(exc.HTTPNotFound()) + return exc.HTTPAccepted() + + +class VsaVolumeDriveController(volumes.VolumeController): + """The base class for VSA volumes & drives. + + A child resource of the VSA object. Allows operations with + volumes and drives created to/from particular VSA + + """ + + _serialization_metadata = { + 'application/xml': { + "attributes": { + "volume": [ + "id", + "name", + "status", + "size", + "availabilityZone", + "createdAt", + "displayName", + "displayDescription", + "vsaId", + ]}}} + + def __init__(self): + # self.compute_api = compute.API() + # self.vsa_api = vsa.API() + self.volume_api = volume.API() + super(VsaVolumeDriveController, self).__init__() + + def _translation(self, context, vol, vsa_id, details): + if details: + translation = volumes.translate_volume_detail_view + else: + translation = volumes.translate_volume_summary_view + + d = translation(context, vol) + d['vsaId'] = vol[self.direction] + return d + + def _check_volume_ownership(self, context, vsa_id, id): + obj = self.object + try: + volume_ref = self.volume_api.get(context, volume_id=id) + except exception.NotFound: + LOG.error(_("%(obj)s with ID %(id)s not found"), locals()) + raise + + own_vsa_id = volume_ref[self.direction] + if own_vsa_id != int(vsa_id): + LOG.error(_("%(obj)s with ID %(id)s belongs to VSA %(own_vsa_id)s"\ + " and not to VSA %(vsa_id)s."), locals()) + raise exception.Invalid() + + def _items(self, req, vsa_id, details): + """Return summary or detailed list of volumes for particular VSA.""" + context = req.environ['nova.context'] + + vols = self.volume_api.get_all_by_vsa(context, vsa_id, + self.direction.split('_')[0]) + limited_list = common.limited(vols, req) + + res = [self._translation(context, vol, vsa_id, details) \ + for vol in limited_list] + + return {self.objects: res} + + def index(self, req, vsa_id): + """Return a short list of volumes created from particular VSA.""" + LOG.audit(_("Index. vsa_id=%(vsa_id)s"), locals()) + return self._items(req, vsa_id, details=False) + + def detail(self, req, vsa_id): + """Return a detailed list of volumes created from particular VSA.""" + LOG.audit(_("Detail. vsa_id=%(vsa_id)s"), locals()) + return self._items(req, vsa_id, details=True) + + def create(self, req, vsa_id, body): + """Create a new volume from VSA.""" + LOG.audit(_("Create. vsa_id=%(vsa_id)s, body=%(body)s"), locals()) + context = req.environ['nova.context'] + + if not body: + return faults.Fault(exc.HTTPUnprocessableEntity()) + + vol = body[self.object] + size = vol['size'] + LOG.audit(_("Create volume of %(size)s GB from VSA ID %(vsa_id)s"), + locals(), context=context) + + new_volume = self.volume_api.create(context, size, None, + vol.get('displayName'), + vol.get('displayDescription'), + from_vsa_id=vsa_id) + + return {self.object: self._translation(context, new_volume, + vsa_id, True)} + + def update(self, req, vsa_id, id, body): + """Update a volume.""" + context = req.environ['nova.context'] + + try: + self._check_volume_ownership(context, vsa_id, id) + except exception.NotFound: + return faults.Fault(exc.HTTPNotFound()) + except exception.Invalid: + return faults.Fault(exc.HTTPBadRequest()) + + vol = body[self.object] + updatable_fields = ['display_name', + 'display_description', + 'status', + 'provider_location', + 'provider_auth'] + changes = {} + for field in updatable_fields: + if field in vol: + changes[field] = vol[field] + + obj = self.object + LOG.audit(_("Update %(obj)s with id: %(id)s, changes: %(changes)s"), + locals(), context=context) + + try: + self.volume_api.update(context, volume_id=id, fields=changes) + except exception.NotFound: + return faults.Fault(exc.HTTPNotFound()) + return exc.HTTPAccepted() + + def delete(self, req, vsa_id, id): + """Delete a volume.""" + context = req.environ['nova.context'] + + LOG.audit(_("Delete. vsa_id=%(vsa_id)s, id=%(id)s"), locals()) + + try: + self._check_volume_ownership(context, vsa_id, id) + except exception.NotFound: + return faults.Fault(exc.HTTPNotFound()) + except exception.Invalid: + return faults.Fault(exc.HTTPBadRequest()) + + return super(VsaVolumeDriveController, self).delete(req, id) + + def show(self, req, vsa_id, id): + """Return data about the given volume.""" + context = req.environ['nova.context'] + + LOG.audit(_("Show. vsa_id=%(vsa_id)s, id=%(id)s"), locals()) + + try: + self._check_volume_ownership(context, vsa_id, id) + except exception.NotFound: + return faults.Fault(exc.HTTPNotFound()) + except exception.Invalid: + return faults.Fault(exc.HTTPBadRequest()) + + return super(VsaVolumeDriveController, self).show(req, id) + + +class VsaVolumeController(VsaVolumeDriveController): + """The VSA volume API controller for the Openstack API. + + A child resource of the VSA object. Allows operations with volumes created + by particular VSA + + """ + + def __init__(self): + self.direction = 'from_vsa_id' + self.objects = 'volumes' + self.object = 'volume' + super(VsaVolumeController, self).__init__() + + +class VsaDriveController(VsaVolumeDriveController): + """The VSA Drive API controller for the Openstack API. + + A child resource of the VSA object. Allows operations with drives created + for particular VSA + + """ + + def __init__(self): + self.direction = 'to_vsa_id' + self.objects = 'drives' + self.object = 'drive' + super(VsaDriveController, self).__init__() + + def create(self, req, vsa_id, body): + """Create a new drive for VSA. Should be done through VSA APIs""" + return faults.Fault(exc.HTTPBadRequest()) + + def update(self, req, vsa_id, id, body): + """Update a drive. Should be done through VSA APIs""" + return faults.Fault(exc.HTTPBadRequest()) + + +class VsaVPoolController(object): + """The vPool VSA API controller for the OpenStack API.""" + + _serialization_metadata = { + 'application/xml': { + "attributes": { + "vpool": [ + "id", + "vsaId", + "name", + "displayName", + "displayDescription", + "driveCount", + "driveIds", + "protection", + "stripeSize", + "stripeWidth", + "createTime", + "status", + ]}}} + + def __init__(self): + self.vsa_api = vsa.API() + super(VsaVPoolController, self).__init__() + + def index(self, req, vsa_id): + """Return a short list of vpools created from particular VSA.""" + return {'vpools': []} + + def create(self, req, vsa_id, body): + """Create a new vPool for VSA.""" + return faults.Fault(exc.HTTPBadRequest()) + + def update(self, req, vsa_id, id, body): + """Update vPool parameters.""" + return faults.Fault(exc.HTTPBadRequest()) + + def delete(self, req, vsa_id, id): + """Delete a vPool.""" + return faults.Fault(exc.HTTPBadRequest()) + + def show(self, req, vsa_id, id): + """Return data about the given vPool.""" + return faults.Fault(exc.HTTPBadRequest()) + + +class Virtual_storage_arrays(extensions.ExtensionDescriptor): + + def get_name(self): + return "VSAs" + + def get_alias(self): + return "zadr-vsa" + + def get_description(self): + return "Virtual Storage Arrays support" + + def get_namespace(self): + return "http://docs.openstack.org/ext/vsa/api/v1.1" + + def get_updated(self): + return "2011-06-29T00:00:00+00:00" + + def get_resources(self): + resources = [] + res = extensions.ResourceExtension( + 'zadr-vsa', + VsaController(), + collection_actions={'detail': 'GET'}, + member_actions={'add_capacity': 'POST', + 'remove_capacity': 'POST'}) + resources.append(res) + + res = extensions.ResourceExtension('volumes', + VsaVolumeController(), + collection_actions={'detail': 'GET'}, + parent=dict( + member_name='vsa', + collection_name='zadr-vsa')) + resources.append(res) + + res = extensions.ResourceExtension('drives', + VsaDriveController(), + collection_actions={'detail': 'GET'}, + parent=dict( + member_name='vsa', + collection_name='zadr-vsa')) + resources.append(res) + + res = extensions.ResourceExtension('vpools', + VsaVPoolController(), + parent=dict( + member_name='vsa', + collection_name='zadr-vsa')) + resources.append(res) + + return resources diff --git a/nova/api/openstack/contrib/volumes.py b/nova/api/openstack/contrib/volumes.py index e5e2c5b50..3c3d40c0f 100644 --- a/nova/api/openstack/contrib/volumes.py +++ b/nova/api/openstack/contrib/volumes.py @@ -33,17 +33,17 @@ LOG = logging.getLogger("nova.api.volumes") FLAGS = flags.FLAGS -def _translate_volume_detail_view(context, vol): +def translate_volume_detail_view(context, vol): """Maps keys for volumes details view.""" - d = _translate_volume_summary_view(context, vol) + d = translate_volume_summary_view(context, vol) # No additional data / lookups at the moment return d -def _translate_volume_summary_view(context, vol): +def translate_volume_summary_view(context, vol): """Maps keys for volumes summary view.""" d = {} @@ -92,7 +92,7 @@ class VolumeController(object): except exception.NotFound: return faults.Fault(exc.HTTPNotFound()) - return {'volume': _translate_volume_detail_view(context, vol)} + return {'volume': translate_volume_detail_view(context, vol)} def delete(self, req, id): """Delete a volume.""" @@ -108,11 +108,11 @@ class VolumeController(object): def index(self, req): """Returns a summary list of volumes.""" - return self._items(req, entity_maker=_translate_volume_summary_view) + return self._items(req, entity_maker=translate_volume_summary_view) def detail(self, req): """Returns a detailed list of volumes.""" - return self._items(req, entity_maker=_translate_volume_detail_view) + return self._items(req, entity_maker=translate_volume_detail_view) def _items(self, req, entity_maker): """Returns a list of volumes, transformed through entity_maker.""" @@ -140,7 +140,7 @@ class VolumeController(object): # Work around problem that instance is lazy-loaded... new_volume['instance'] = None - retval = _translate_volume_detail_view(context, new_volume) + retval = translate_volume_detail_view(context, new_volume) return {'volume': retval} diff --git a/nova/compute/api.py b/nova/compute/api.py index 432658bbb..a48a5bc98 100644 --- a/nova/compute/api.py +++ b/nova/compute/api.py @@ -149,7 +149,7 @@ class API(base.Base): key_name=None, key_data=None, security_group='default', availability_zone=None, user_data=None, metadata={}, injected_files=None, admin_password=None, zone_blob=None, - reservation_id=None): + reservation_id=None, vsa_id=None): """Verify all the input parameters regardless of the provisioning strategy being performed.""" @@ -241,7 +241,8 @@ class API(base.Base): 'availability_zone': availability_zone, 'os_type': os_type, 'architecture': architecture, - 'vm_mode': vm_mode} + 'vm_mode': vm_mode, + 'vsa_id': vsa_id} return (num_instances, base_options) @@ -381,7 +382,8 @@ class API(base.Base): key_name=None, key_data=None, security_group='default', availability_zone=None, user_data=None, metadata={}, injected_files=None, admin_password=None, zone_blob=None, - reservation_id=None, block_device_mapping=None): + reservation_id=None, block_device_mapping=None, + vsa_id=None): """ Provision the instances by sending off a series of single instance requests to the Schedulers. This is fine for trival @@ -402,7 +404,7 @@ class API(base.Base): key_name, key_data, security_group, availability_zone, user_data, metadata, injected_files, admin_password, zone_blob, - reservation_id) + reservation_id, vsa_id) instances = [] LOG.debug(_("Going to run %s instances..."), num_instances) diff --git a/nova/db/api.py b/nova/db/api.py index b7c5700e5..9147f136b 100644 --- a/nova/db/api.py +++ b/nova/db/api.py @@ -49,7 +49,8 @@ flags.DEFINE_string('volume_name_template', 'volume-%08x', 'Template string to be used to generate instance names') flags.DEFINE_string('snapshot_name_template', 'snapshot-%08x', 'Template string to be used to generate snapshot names') - +flags.DEFINE_string('vsa_name_template', 'vsa-%08x', + 'Template string to be used to generate VSA names') IMPL = utils.LazyPluggable(FLAGS['db_backend'], sqlalchemy='nova.db.sqlalchemy.api') @@ -509,6 +510,13 @@ def instance_get_all_by_project(context, project_id): return IMPL.instance_get_all_by_project(context, project_id) +def instance_get_all_by_project_and_vsa(context, project_id, vsa_id): + """Get all instance spawned by a given VSA belonging to a project.""" + return IMPL.instance_get_all_by_project_and_vsa(context, + project_id, + vsa_id) + + def instance_get_all_by_host(context, host): """Get all instance belonging to a host.""" return IMPL.instance_get_all_by_host(context, host) @@ -914,6 +922,16 @@ def volume_get_all_by_project(context, project_id): return IMPL.volume_get_all_by_project(context, project_id) +def volume_get_all_assigned_to_vsa(context, vsa_id): + """Get all volumes assigned to particular VSA.""" + return IMPL.volume_get_all_assigned_to_vsa(context, vsa_id) + + +def volume_get_all_assigned_from_vsa(context, vsa_id): + """Get all volumes created from particular VSA.""" + return IMPL.volume_get_all_assigned_from_vsa(context, vsa_id) + + def volume_get_by_ec2_id(context, ec2_id): """Get a volume by ec2 id.""" return IMPL.volume_get_by_ec2_id(context, ec2_id) @@ -1422,3 +1440,71 @@ def instance_type_extra_specs_update_or_create(context, instance_type_id, key/value pairs specified in the extra specs dict argument""" IMPL.instance_type_extra_specs_update_or_create(context, instance_type_id, extra_specs) + + +#################### + + +def drive_type_create(context, values): + """Creates drive type record.""" + return IMPL.drive_type_create(context, values) + + +def drive_type_update(context, name, values): + """Updates drive type record.""" + return IMPL.drive_type_update(context, name, values) + + +def drive_type_destroy(context, name): + """Deletes drive type record.""" + return IMPL.drive_type_destroy(context, name) + + +def drive_type_get(context, drive_type_id): + """Get drive type record by id.""" + return IMPL.drive_type_get(context, drive_type_id) + + +def drive_type_get_by_name(context, name): + """Get drive type record by name.""" + return IMPL.drive_type_get_by_name(context, name) + + +def drive_type_get_all(context, visible=None): + """Returns all (or only visible) drive types.""" + return IMPL.drive_type_get_all(context, visible) + + +def vsa_create(context, values): + """Creates Virtual Storage Array record.""" + return IMPL.vsa_create(context, values) + + +def vsa_update(context, vsa_id, values): + """Updates Virtual Storage Array record.""" + return IMPL.vsa_update(context, vsa_id, values) + + +def vsa_destroy(context, vsa_id): + """Deletes Virtual Storage Array record.""" + return IMPL.vsa_destroy(context, vsa_id) + + +def vsa_get(context, vsa_id): + """Get Virtual Storage Array record by ID.""" + return IMPL.vsa_get(context, vsa_id) + + +def vsa_get_all(context): + """Get all Virtual Storage Array records.""" + return IMPL.vsa_get_all(context) + + +def vsa_get_all_by_project(context, project_id): + """Get all Virtual Storage Array records by project ID.""" + return IMPL.vsa_get_all_by_project(context, project_id) + + +def vsa_get_vc_ips_list(context, vsa_id): + """Retrieves IPs of instances associated with Virtual Storage Array.""" + return IMPL.vsa_get_vc_ips_list(context, vsa_id) diff --git a/nova/db/sqlalchemy/api.py b/nova/db/sqlalchemy/api.py index a831516a8..aa5a6e052 100644 --- a/nova/db/sqlalchemy/api.py +++ b/nova/db/sqlalchemy/api.py @@ -1217,6 +1217,35 @@ def instance_get_all_by_project(context, project_id): all() +@require_context +def instance_get_all_by_project_and_vsa(context, project_id, vsa_id): + authorize_project_context(context, project_id) + + session = get_session() + return session.query(models.Instance).\ + options(joinedload_all('fixed_ips.floating_ips')).\ + options(joinedload('security_groups')).\ + options(joinedload_all('fixed_ips.network')).\ + options(joinedload('instance_type')).\ + filter_by(project_id=project_id).\ + filter_by(vsa_id=vsa_id).\ + filter_by(deleted=can_read_deleted(context)).\ + all() + + +@require_admin_context +def instance_get_all_by_vsa(context, vsa_id): + session = get_session() + return session.query(models.Instance).\ + options(joinedload_all('fixed_ips.floating_ips')).\ + options(joinedload('security_groups')).\ + options(joinedload_all('fixed_ips.network')).\ + options(joinedload('instance_type')).\ + filter_by(vsa_id=vsa_id).\ + filter_by(deleted=can_read_deleted(context)).\ + all() + + @require_context def instance_get_all_by_reservation(context, reservation_id): session = get_session() @@ -2018,12 +2047,14 @@ def volume_get(context, volume_id, session=None): if is_admin_context(context): result = session.query(models.Volume).\ options(joinedload('instance')).\ + options(joinedload('drive_type')).\ filter_by(id=volume_id).\ filter_by(deleted=can_read_deleted(context)).\ first() elif is_user_context(context): result = session.query(models.Volume).\ options(joinedload('instance')).\ + options(joinedload('drive_type')).\ filter_by(project_id=context.project_id).\ filter_by(id=volume_id).\ filter_by(deleted=False).\ @@ -2039,6 +2070,7 @@ def volume_get_all(context): session = get_session() return session.query(models.Volume).\ options(joinedload('instance')).\ + options(joinedload('drive_type')).\ filter_by(deleted=can_read_deleted(context)).\ all() @@ -2048,6 +2080,7 @@ def volume_get_all_by_host(context, host): session = get_session() return session.query(models.Volume).\ options(joinedload('instance')).\ + options(joinedload('drive_type')).\ filter_by(host=host).\ filter_by(deleted=can_read_deleted(context)).\ all() @@ -2057,6 +2090,7 @@ def volume_get_all_by_host(context, host): def volume_get_all_by_instance(context, instance_id): session = get_session() result = session.query(models.Volume).\ + options(joinedload('drive_type')).\ filter_by(instance_id=instance_id).\ filter_by(deleted=False).\ all() @@ -2065,6 +2099,28 @@ def volume_get_all_by_instance(context, instance_id): return result +@require_admin_context +def volume_get_all_assigned_to_vsa(context, vsa_id): + session = get_session() + result = session.query(models.Volume).\ + options(joinedload('drive_type')).\ + filter_by(to_vsa_id=vsa_id).\ + filter_by(deleted=False).\ + all() + return result + + +@require_admin_context +def volume_get_all_assigned_from_vsa(context, vsa_id): + session = get_session() + result = session.query(models.Volume).\ + options(joinedload('drive_type')).\ + filter_by(from_vsa_id=vsa_id).\ + filter_by(deleted=False).\ + all() + return result + + @require_context def volume_get_all_by_project(context, project_id): authorize_project_context(context, project_id) @@ -2072,6 +2128,7 @@ def volume_get_all_by_project(context, project_id): session = get_session() return session.query(models.Volume).\ options(joinedload('instance')).\ + options(joinedload('drive_type')).\ filter_by(project_id=project_id).\ filter_by(deleted=can_read_deleted(context)).\ all() @@ -2084,6 +2141,7 @@ def volume_get_instance(context, volume_id): filter_by(id=volume_id).\ filter_by(deleted=can_read_deleted(context)).\ options(joinedload('instance')).\ + options(joinedload('drive_type')).\ first() if not result: raise exception.VolumeNotFound(volume_id=volume_id) @@ -3286,3 +3344,236 @@ def instance_type_extra_specs_update_or_create(context, instance_type_id, "deleted": 0}) spec_ref.save(session=session) return specs + + + #################### + + +@require_admin_context +def drive_type_create(context, values): + """ + Creates drive type record. + """ + try: + drive_type_ref = models.DriveTypes() + drive_type_ref.update(values) + drive_type_ref.save() + except Exception, e: + raise exception.DBError(e) + return drive_type_ref + + +@require_admin_context +def drive_type_update(context, name, values): + """ + Updates drive type record. + """ + session = get_session() + with session.begin(): + drive_type_ref = drive_type_get_by_name(context, name, session=session) + drive_type_ref.update(values) + drive_type_ref.save(session=session) + return drive_type_ref + + +@require_admin_context +def drive_type_destroy(context, name): + """ + Deletes drive type record. + """ + session = get_session() + drive_type_ref = session.query(models.DriveTypes).\ + filter_by(name=name) + records = drive_type_ref.delete() + if records == 0: + raise exception.VirtualDiskTypeNotFoundByName(name=name) + else: + return drive_type_ref + + +@require_context +def drive_type_get(context, drive_type_id, session=None): + """ + Get drive type record by id. + """ + if not session: + session = get_session() + + result = session.query(models.DriveTypes).\ + filter_by(id=drive_type_id).\ + filter_by(deleted=can_read_deleted(context)).\ + first() + if not result: + raise exception.VirtualDiskTypeNotFound(id=drive_type_id) + + return result + + +@require_context +def drive_type_get_by_name(context, name, session=None): + """ + Get drive type record by name. + """ + if not session: + session = get_session() + + result = session.query(models.DriveTypes).\ + filter_by(name=name).\ + filter_by(deleted=can_read_deleted(context)).\ + first() + if not result: + raise exception.VirtualDiskTypeNotFoundByName(name=name) + + return result + + +@require_context +def drive_type_get_all(context, visible=False): + """ + Returns all (or only visible) drive types. + """ + session = get_session() + if not visible: + drive_types = session.query(models.DriveTypes).\ + filter_by(deleted=can_read_deleted(context)).\ + order_by("name").\ + all() + else: + drive_types = session.query(models.DriveTypes).\ + filter_by(deleted=can_read_deleted(context)).\ + filter_by(visible=True).\ + order_by("name").\ + all() + return drive_types + + + #################### + + +@require_admin_context +def vsa_create(context, values): + """ + Creates Virtual Storage Array record. + """ + try: + vsa_ref = models.VirtualStorageArray() + vsa_ref.update(values) + vsa_ref.save() + except Exception, e: + raise exception.DBError(e) + return vsa_ref + + +@require_admin_context +def vsa_update(context, vsa_id, values): + """ + Updates Virtual Storage Array record. + """ + session = get_session() + with session.begin(): + vsa_ref = vsa_get(context, vsa_id, session=session) + vsa_ref.update(values) + vsa_ref.save(session=session) + return vsa_ref + + +@require_admin_context +def vsa_destroy(context, vsa_id): + """ + Deletes Virtual Storage Array record. + """ + session = get_session() + with session.begin(): + #vsa_ref = vsa_get(context, vsa_id, session=session) + #vsa_ref.delete(session=session) + session.query(models.VirtualStorageArray).\ + filter_by(id=vsa_id).\ + update({'deleted': True, + 'deleted_at': utils.utcnow(), + 'updated_at': literal_column('updated_at')}) + + +@require_context +def vsa_get(context, vsa_id, session=None): + """ + Get Virtual Storage Array record by ID. + """ + if not session: + session = get_session() + result = None + + if is_admin_context(context): + result = session.query(models.VirtualStorageArray).\ + options(joinedload('vsa_instance_type')).\ + filter_by(id=vsa_id).\ + filter_by(deleted=can_read_deleted(context)).\ + first() + elif is_user_context(context): + result = session.query(models.VirtualStorageArray).\ + options(joinedload('vsa_instance_type')).\ + filter_by(project_id=context.project_id).\ + filter_by(id=vsa_id).\ + filter_by(deleted=False).\ + first() + if not result: + raise exception.VirtualStorageArrayNotFound(id=vsa_id) + + return result + + +@require_admin_context +def vsa_get_all(context): + """ + Get all Virtual Storage Array records. + """ + session = get_session() + return session.query(models.VirtualStorageArray).\ + options(joinedload('vsa_instance_type')).\ + filter_by(deleted=can_read_deleted(context)).\ + all() + + +@require_context +def vsa_get_all_by_project(context, project_id): + """ + Get all Virtual Storage Array records by project ID. + """ + authorize_project_context(context, project_id) + + session = get_session() + return session.query(models.VirtualStorageArray).\ + options(joinedload('vsa_instance_type')).\ + filter_by(project_id=project_id).\ + filter_by(deleted=can_read_deleted(context)).\ + all() + + +@require_context +def vsa_get_vc_ips_list(context, vsa_id): + """ + Retrieves IPs of instances associated with Virtual Storage Array. + """ + result = [] + session = get_session() + vc_instances = session.query(models.Instance).\ + options(joinedload_all('fixed_ips.floating_ips')).\ + options(joinedload('security_groups')).\ + options(joinedload_all('fixed_ips.network')).\ + options(joinedload('instance_type')).\ + filter_by(vsa_id=vsa_id).\ + filter_by(deleted=False).\ + all() + for vc_instance in vc_instances: + if vc_instance['fixed_ips']: + for fixed in vc_instance['fixed_ips']: + # insert the [floating,fixed] (if exists) in the head, + # otherwise append the [none,fixed] in the tail + ip = {} + ip['fixed'] = fixed['address'] + if fixed['floating_ips']: + ip['floating'] = fixed['floating_ips'][0]['address'] + result.append(ip) + + return result + + #################### diff --git a/nova/db/sqlalchemy/migrate_repo/versions/032_add_vsa_data.py b/nova/db/sqlalchemy/migrate_repo/versions/032_add_vsa_data.py new file mode 100644 index 000000000..7fc8f955c --- /dev/null +++ b/nova/db/sqlalchemy/migrate_repo/versions/032_add_vsa_data.py @@ -0,0 +1,152 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright (c) 2011 Zadara Storage Inc. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from sqlalchemy import Column, DateTime, Integer, MetaData, String, Table +from sqlalchemy import Text, Boolean, ForeignKey + +from nova import log as logging + +meta = MetaData() + +# Just for the ForeignKey and column creation to succeed, these are not the +# actual definitions of tables . +# + +instances = Table('instances', meta, + Column('id', Integer(), primary_key=True, nullable=False), + ) + +volumes = Table('volumes', meta, + Column('id', Integer(), primary_key=True, nullable=False), + ) + +vsa_id = Column('vsa_id', Integer(), nullable=True) +to_vsa_id = Column('to_vsa_id', Integer(), nullable=True) +from_vsa_id = Column('from_vsa_id', Integer(), nullable=True) +drive_type_id = Column('drive_type_id', Integer(), nullable=True) + + +# New Tables +# + +virtual_storage_arrays = Table('virtual_storage_arrays', meta, + Column('created_at', DateTime(timezone=False)), + Column('updated_at', DateTime(timezone=False)), + Column('deleted_at', DateTime(timezone=False)), + Column('deleted', Boolean(create_constraint=True, name=None)), + Column('id', Integer(), primary_key=True, nullable=False), + Column('display_name', + String(length=255, convert_unicode=False, assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('display_description', + String(length=255, convert_unicode=False, assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('project_id', + String(length=255, convert_unicode=False, assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('availability_zone', + String(length=255, convert_unicode=False, assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('instance_type_id', Integer(), nullable=False), + Column('image_ref', + String(length=255, convert_unicode=False, assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('vc_count', Integer(), nullable=False), + Column('vol_count', Integer(), nullable=False), + Column('status', + String(length=255, convert_unicode=False, assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + ) + +drive_types = Table('drive_types', meta, + Column('created_at', DateTime(timezone=False)), + Column('updated_at', DateTime(timezone=False)), + Column('deleted_at', DateTime(timezone=False)), + Column('deleted', Boolean(create_constraint=True, name=None)), + Column('id', Integer(), primary_key=True, nullable=False), + Column('name', + String(length=255, convert_unicode=False, assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False), + unique=True), + Column('type', + String(length=255, convert_unicode=False, assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('size_gb', Integer(), nullable=False), + Column('rpm', + String(length=255, convert_unicode=False, assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('capabilities', + String(length=255, convert_unicode=False, assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('visible', Boolean(create_constraint=True, name=None)), + ) + +#vsa_disk_association = Table('vsa_disk_association', meta, +# Column('created_at', DateTime(timezone=False)), +# Column('updated_at', DateTime(timezone=False)), +# Column('deleted_at', DateTime(timezone=False)), +# Column('deleted', Boolean(create_constraint=True, name=None)), +# Column('id', Integer(), primary_key=True, nullable=False), +# Column('drive_type_id', Integer(), ForeignKey('drive_types.id')), +# Column('vsa_id', Integer(), ForeignKey('virtual_storage_arrays.id')), +# Column('disk_num', Integer(), nullable=False), +# ) + +#new_tables = (virtual_storage_arrays, drive_types, vsa_disk_association) +new_tables = (virtual_storage_arrays, drive_types) + +# +# Tables to alter +# + + +def upgrade(migrate_engine): + + from nova import context + from nova import db + from nova import flags + + FLAGS = flags.FLAGS + + # Upgrade operations go here. Don't create your own engine; + # bind migrate_engine to your metadata + meta.bind = migrate_engine + + for table in new_tables: + try: + table.create() + except Exception: + logging.info(repr(table)) + logging.exception('Exception while creating table') + raise + + instances.create_column(vsa_id) + volumes.create_column(to_vsa_id) + volumes.create_column(from_vsa_id) + volumes.create_column(drive_type_id) + + +def downgrade(migrate_engine): + meta.bind = migrate_engine + + instances.drop_column(vsa_id) + volumes.drop_column(to_vsa_id) + volumes.drop_column(from_vsa_id) + volumes.drop_column(drive_type_id) + + for table in new_tables: + table.drop() diff --git a/nova/db/sqlalchemy/migration.py b/nova/db/sqlalchemy/migration.py index d9e303599..9b64671a3 100644 --- a/nova/db/sqlalchemy/migration.py +++ b/nova/db/sqlalchemy/migration.py @@ -64,7 +64,8 @@ def db_version(): 'users', 'user_project_association', 'user_project_role_association', 'user_role_association', - 'volumes'): + 'volumes', + 'virtual_storage_arrays', 'drive_types'): assert table in meta.tables return db_version_control(1) except AssertionError: diff --git a/nova/db/sqlalchemy/models.py b/nova/db/sqlalchemy/models.py index d29d3d6f1..7f2e9d39c 100644 --- a/nova/db/sqlalchemy/models.py +++ b/nova/db/sqlalchemy/models.py @@ -247,6 +247,43 @@ class Instance(BASE, NovaBase): # assert(state in ['nostate', 'running', 'blocked', 'paused', # 'shutdown', 'shutoff', 'crashed']) + vsa_id = Column(Integer, ForeignKey('virtual_storage_arrays.id'), + nullable=True) + + +class VirtualStorageArray(BASE, NovaBase): + """ + Represents a virtual storage array supplying block storage to instances. + """ + __tablename__ = 'virtual_storage_arrays' + + id = Column(Integer, primary_key=True, autoincrement=True) + + @property + def name(self): + return FLAGS.vsa_name_template % self.id + + # User editable field for display in user-facing UIs + display_name = Column(String(255)) + display_description = Column(String(255)) + + project_id = Column(String(255)) + availability_zone = Column(String(255)) + + instance_type_id = Column(Integer, ForeignKey('instance_types.id')) + image_ref = Column(String(255)) + vc_count = Column(Integer, default=0) # number of requested VC instances + vol_count = Column(Integer, default=0) # total number of BE volumes + status = Column(String(255)) + + #admin_pass = Column(String(255)) + + #disks = relationship(VsaDiskAssociation, + # backref=backref('vsa', uselist=False), + # foreign_keys=id, + # primaryjoin='and_(VsaDiskAssociation.vsa_id == ' + # 'VirtualStorageArray.id)') + class InstanceActions(BASE, NovaBase): """Represents a guest VM's actions and results""" @@ -277,6 +314,12 @@ class InstanceTypes(BASE, NovaBase): primaryjoin='and_(Instance.instance_type_id == ' 'InstanceTypes.id)') + vsas = relationship(VirtualStorageArray, + backref=backref('vsa_instance_type', uselist=False), + foreign_keys=id, + primaryjoin='and_(VirtualStorageArray.instance_type_id' + ' == InstanceTypes.id)') + class Volume(BASE, NovaBase): """Represents a block storage device that can be attached to a vm.""" @@ -316,6 +359,57 @@ class Volume(BASE, NovaBase): provider_location = Column(String(255)) provider_auth = Column(String(255)) + to_vsa_id = Column(Integer, + ForeignKey('virtual_storage_arrays.id'), nullable=True) + from_vsa_id = Column(Integer, + ForeignKey('virtual_storage_arrays.id'), nullable=True) + drive_type_id = Column(Integer, + ForeignKey('drive_types.id'), nullable=True) + + +class DriveTypes(BASE, NovaBase): + """Represents the known drive types (storage media).""" + __tablename__ = 'drive_types' + + id = Column(Integer, primary_key=True, autoincrement=True) + + """ + @property + def name(self): + if self.capabilities: + return FLAGS.drive_type_template_long % \ + (self.type, str(self.size_gb), self.rpm, self.capabilities) + else: + return FLAGS.drive_type_template_short % \ + (self.type, str(self.size_gb), self.rpm) + """ + + name = Column(String(255), unique=True) + type = Column(String(255)) + size_gb = Column(Integer) + rpm = Column(String(255)) + capabilities = Column(String(255)) + + visible = Column(Boolean, default=True) + + volumes = relationship(Volume, + backref=backref('drive_type', uselist=False), + foreign_keys=id, + primaryjoin='and_(Volume.drive_type_id == ' + 'DriveTypes.id)') + +# +#class VsaDiskAssociation(BASE, NovaBase): +# """associates drive types with Virtual Storage Arrays.""" +# __tablename__ = 'vsa_disk_association' +# +# id = Column(Integer, primary_key=True, autoincrement=True) +# +# drive_type_id = Column(Integer, ForeignKey('drive_types.id')) +# vsa_id = Column(Integer, ForeignKey('virtual_storage_arrays.id')) +# +# disk_num = Column(Integer, nullable=False) # number of disks + class Quota(BASE, NovaBase): """Represents a single quota override for a project. @@ -785,6 +879,7 @@ def register_models(): Network, SecurityGroup, SecurityGroupIngressRule, SecurityGroupInstanceAssociation, AuthToken, User, Project, Certificate, ConsolePool, Console, Zone, + VirtualStorageArray, DriveTypes, AgentBuild, InstanceMetadata, InstanceTypeExtraSpecs, Migration) engine = create_engine(FLAGS.sql_connection, echo=False) for model in models: diff --git a/nova/exception.py b/nova/exception.py index ad6c005f8..a3d1a4b3f 100644 --- a/nova/exception.py +++ b/nova/exception.py @@ -311,6 +311,10 @@ class VolumeNotFoundForInstance(VolumeNotFound): message = _("Volume not found for instance %(instance_id)s.") +class VolumeNotFoundForVsa(VolumeNotFound): + message = _("Volume not found for vsa %(vsa_id)s.") + + class SnapshotNotFound(NotFound): message = _("Snapshot %(snapshot_id)s could not be found.") @@ -682,3 +686,19 @@ class PasteConfigNotFound(NotFound): class PasteAppNotFound(NotFound): message = _("Could not load paste app '%(name)s' from %(path)s") + + +class VirtualStorageArrayNotFound(NotFound): + message = _("Virtual Storage Array %(id)d could not be found.") + + +class VirtualStorageArrayNotFoundByName(NotFound): + message = _("Virtual Storage Array %(name)s could not be found.") + + +class VirtualDiskTypeNotFound(NotFound): + message = _("Drive Type %(id)d could not be found.") + + +class VirtualDiskTypeNotFoundByName(NotFound): + message = _("Drive Type %(name)s could not be found.") diff --git a/nova/flags.py b/nova/flags.py index 49355b436..8000eac4a 100644 --- a/nova/flags.py +++ b/nova/flags.py @@ -292,6 +292,7 @@ DEFINE_string('ajax_console_proxy_url', in the form "http://127.0.0.1:8000"') DEFINE_string('ajax_console_proxy_port', 8000, 'port that ajax_console_proxy binds') +DEFINE_string('vsa_topic', 'vsa', 'the topic that nova-vsa service listens on') DEFINE_bool('verbose', False, 'show debug output') DEFINE_boolean('fake_rabbit', False, 'use a fake rabbit') DEFINE_bool('fake_network', False, @@ -364,6 +365,32 @@ DEFINE_string('volume_manager', 'nova.volume.manager.VolumeManager', 'Manager for volume') DEFINE_string('scheduler_manager', 'nova.scheduler.manager.SchedulerManager', 'Manager for scheduler') +DEFINE_string('vsa_manager', 'nova.vsa.manager.VsaManager', + 'Manager for vsa') +DEFINE_string('vc_image_name', 'vc_image', + 'the VC image ID (for a VC image that exists in DB Glance)') +#--------------------------------------------------------------------- +# VSA constants and enums +DEFINE_string('default_vsa_instance_type', 'm1.small', + 'default instance type for VSA instances') +DEFINE_integer('max_vcs_in_vsa', 32, + 'maxinum VCs in a VSA') +DEFINE_integer('vsa_part_size_gb', 100, + 'default partition size for shared capacity') + +DEFINE_string('vsa_status_creating', 'creating', + 'VSA creating (not ready yet)') +DEFINE_string('vsa_status_launching', 'launching', + 'Launching VCs (all BE volumes were created)') +DEFINE_string('vsa_status_created', 'created', + 'VSA fully created and ready for use') +DEFINE_string('vsa_status_partial', 'partial', + 'Some BE storage allocations failed') +DEFINE_string('vsa_status_failed', 'failed', + 'Some BE storage allocations failed') +DEFINE_string('vsa_status_deleting', 'deleting', + 'VSA started the deletion procedure') + # The service to use for image search and retrieval DEFINE_string('image_service', 'nova.image.glance.GlanceImageService', diff --git a/nova/quota.py b/nova/quota.py index 58766e846..46322d60c 100644 --- a/nova/quota.py +++ b/nova/quota.py @@ -24,13 +24,13 @@ from nova import flags FLAGS = flags.FLAGS -flags.DEFINE_integer('quota_instances', 10, +flags.DEFINE_integer('quota_instances', 100, # 10 'number of instances allowed per project') flags.DEFINE_integer('quota_cores', 20, 'number of instance cores allowed per project') flags.DEFINE_integer('quota_ram', 50 * 1024, 'megabytes of instance ram allowed per project') -flags.DEFINE_integer('quota_volumes', 10, +flags.DEFINE_integer('quota_volumes', 100, # 10 'number of volumes allowed per project') flags.DEFINE_integer('quota_gigabytes', 1000, 'number of volume gigabytes allowed per project') diff --git a/nova/scheduler/vsa.py b/nova/scheduler/vsa.py new file mode 100644 index 000000000..4277c0ba8 --- /dev/null +++ b/nova/scheduler/vsa.py @@ -0,0 +1,495 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright (c) 2011 Zadara Storage Inc. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +VSA Simple Scheduler +""" + +from nova import context +from nova import rpc +from nova import db +from nova import flags +from nova import utils +from nova.volume import api as volume_api +from nova.scheduler import driver +from nova.scheduler import simple +from nova import log as logging + +LOG = logging.getLogger('nova.scheduler.vsa') + +FLAGS = flags.FLAGS +flags.DEFINE_integer('gb_to_bytes_shift', 30, + 'Conversion shift between GB and bytes') +flags.DEFINE_integer('drive_type_approx_capacity_percent', 10, + 'The percentage range for capacity comparison') +flags.DEFINE_integer('vsa_unique_hosts_per_alloc', 10, + 'The number of unique hosts per storage allocation') +flags.DEFINE_boolean('vsa_select_unique_drives', True, + 'Allow selection of same host for multiple drives') + + +class VsaScheduler(simple.SimpleScheduler): + """Implements Naive Scheduler that tries to find least loaded host.""" + + def __init__(self, *args, **kwargs): + super(VsaScheduler, self).__init__(*args, **kwargs) + self._notify_all_volume_hosts("startup") + + def _notify_all_volume_hosts(self, event): + rpc.cast(context.get_admin_context(), + FLAGS.volume_topic, + {"method": "notification", + "args": {"event": event}}) + + def _compare_names(self, str1, str2): + result = str1.lower() == str2.lower() + # LOG.debug(_("Comparing %(str1)s and %(str2)s. "\ + # "Result %(result)s"), locals()) + return result + + def _compare_sizes_exact_match(self, cap_capacity, size_gb): + cap_capacity = int(cap_capacity) >> FLAGS.gb_to_bytes_shift + size_gb = int(size_gb) + result = cap_capacity == size_gb + # LOG.debug(_("Comparing %(cap_capacity)d and %(size_gb)d. "\ + # "Result %(result)s"), locals()) + return result + + def _compare_sizes_approxim(self, cap_capacity, size_gb): + cap_capacity = int(cap_capacity) >> FLAGS.gb_to_bytes_shift + size_gb = int(size_gb) + size_perc = size_gb * FLAGS.drive_type_approx_capacity_percent / 100 + + result = cap_capacity >= size_gb - size_perc and \ + cap_capacity <= size_gb + size_perc + # LOG.debug(_("Comparing %(cap_capacity)d and %(size_gb)d. "\ + # "Result %(result)s"), locals()) + return result + + def _qosgrp_match(self, drive_type, qos_values): + + # Add more entries for additional comparisons + compare_list = [{'cap1': 'DriveType', + 'cap2': 'type', + 'cmp_func': self._compare_names}, + {'cap1': 'DriveCapacity', + 'cap2': 'size_gb', + 'cmp_func': self._compare_sizes_approxim}] + + for cap in compare_list: + if cap['cap1'] in qos_values.keys() and \ + cap['cap2'] in drive_type.keys() and \ + cap['cmp_func'] is not None and \ + cap['cmp_func'](qos_values[cap['cap1']], + drive_type[cap['cap2']]): + # LOG.debug(_("One of required capabilities found: %s:%s"), + # cap['cap1'], drive_type[cap['cap2']]) + pass + else: + return False + return True + + def _filter_hosts(self, topic, request_spec, host_list=None): + + drive_type = request_spec['drive_type'] + LOG.debug(_("Filter hosts for drive type %(drive_type)s") % locals()) + + if host_list is None: + host_list = self.zone_manager.service_states.iteritems() + + filtered_hosts = [] # returns list of (hostname, capability_dict) + for host, host_dict in host_list: + for service_name, service_dict in host_dict.iteritems(): + if service_name != topic: + continue + + gos_info = service_dict.get('drive_qos_info', {}) + for qosgrp, qos_values in gos_info.iteritems(): + if self._qosgrp_match(drive_type, qos_values): + if qos_values['AvailableCapacity'] > 0: + LOG.debug(_("Adding host %s to the list"), host) + filtered_hosts.append((host, gos_info)) + else: + LOG.debug(_("Host %s has no free capacity. Skip"), + host) + break + + LOG.debug(_("Found hosts %(filtered_hosts)s") % locals()) + return filtered_hosts + + def _allowed_to_use_host(self, host, selected_hosts, unique): + if unique == False or \ + host not in [item[0] for item in selected_hosts]: + return True + else: + return False + + def _add_hostcap_to_list(self, selected_hosts, host, cap): + if host not in [item[0] for item in selected_hosts]: + selected_hosts.append((host, cap)) + + def _alg_least_used_host(self, request_spec, all_hosts, selected_hosts): + size = request_spec['size'] + drive_type = request_spec['drive_type'] + best_host = None + best_qoscap = None + best_cap = None + min_used = 0 + + LOG.debug(_("Selecting best host for %(size)sGB volume of type "\ + "%(drive_type)s from %(all_hosts)s"), locals()) + + for (host, capabilities) in all_hosts: + has_enough_capacity = False + used_capacity = 0 + for qosgrp, qos_values in capabilities.iteritems(): + + used_capacity = used_capacity + qos_values['TotalCapacity'] \ + - qos_values['AvailableCapacity'] + + if self._qosgrp_match(drive_type, qos_values): + # we found required qosgroup + + if size == 0: # full drive match + if qos_values['FullDrive']['NumFreeDrives'] > 0: + has_enough_capacity = True + matched_qos = qos_values + else: + break + else: + if qos_values['AvailableCapacity'] >= size and \ + (qos_values['PartitionDrive'][ + 'NumFreePartitions'] > 0 or \ + qos_values['FullDrive']['NumFreeDrives'] > 0): + has_enough_capacity = True + matched_qos = qos_values + else: + break + + if has_enough_capacity and \ + self._allowed_to_use_host(host, + selected_hosts, + unique) and \ + (best_host is None or used_capacity < min_used): + + min_used = used_capacity + best_host = host + best_qoscap = matched_qos + best_cap = capabilities + + if best_host: + self._add_hostcap_to_list(selected_hosts, host, best_cap) + LOG.debug(_("Best host found: %(best_host)s. "\ + "(used capacity %(min_used)s)"), locals()) + return (best_host, best_qoscap) + + def _alg_most_avail_capacity(self, request_spec, all_hosts, + selected_hosts, unique): + size = request_spec['size'] + drive_type = request_spec['drive_type'] + best_host = None + best_qoscap = None + best_cap = None + max_avail = 0 + + LOG.debug(_("Selecting best host for %(size)sGB volume of type "\ + "%(drive_type)s from %(all_hosts)s"), locals()) + + for (host, capabilities) in all_hosts: + for qosgrp, qos_values in capabilities.iteritems(): + if self._qosgrp_match(drive_type, qos_values): + # we found required qosgroup + + if size == 0: # full drive match + available = qos_values['FullDrive']['NumFreeDrives'] + else: + available = qos_values['AvailableCapacity'] + + if available > max_avail and \ + self._allowed_to_use_host(host, + selected_hosts, + unique): + max_avail = available + best_host = host + best_qoscap = qos_values + best_cap = capabilities + break # go to the next host + + if best_host: + self._add_hostcap_to_list(selected_hosts, host, best_cap) + LOG.debug(_("Best host found: %(best_host)s. "\ + "(available capacity %(max_avail)s)"), locals()) + + return (best_host, best_qoscap) + + def _select_hosts(self, request_spec, all_hosts, selected_hosts=None): + + #self._alg_most_avail_capacity(request_spec, all_hosts, selected_hosts) + + if selected_hosts is None: + selected_hosts = [] + + host = None + if len(selected_hosts) >= FLAGS.vsa_unique_hosts_per_alloc: + # try to select from already selected hosts only + LOG.debug(_("Maximum number of hosts selected (%d)"), + len(selected_hosts)) + unique = False + (host, qos_cap) = self._alg_most_avail_capacity(request_spec, + selected_hosts, + selected_hosts, + unique) + + LOG.debug(_("Selected excessive host %(host)s"), locals()) + else: + unique = FLAGS.vsa_select_unique_drives + + if host is None: + # if we've not tried yet (# of sel hosts < max) - unique=True + # or failed to select from selected_hosts - unique=False + # select from all hosts + (host, qos_cap) = self._alg_most_avail_capacity(request_spec, + all_hosts, + selected_hosts, + unique) + LOG.debug(_("Selected host %(host)s"), locals()) + + if host is None: + raise driver.WillNotSchedule(_("No available hosts")) + + return (host, qos_cap) + + def _provision_volume(self, context, vol, vsa_id, availability_zone): + + if availability_zone is None: + availability_zone = FLAGS.storage_availability_zone + + now = utils.utcnow() + options = { + 'size': vol['size'], + 'user_id': context.user_id, + 'project_id': context.project_id, + 'snapshot_id': None, + 'availability_zone': availability_zone, + 'status': "creating", + 'attach_status': "detached", + 'display_name': vol['name'], + 'display_description': vol['description'], + 'to_vsa_id': vsa_id, + 'drive_type_id': vol['drive_ref']['id'], + 'host': vol['host'], + 'scheduled_at': now + } + + size = vol['size'] + host = vol['host'] + name = vol['name'] + LOG.debug(_("Provision volume %(name)s of size %(size)s GB on "\ + "host %(host)s"), locals()) + + volume_ref = db.volume_create(context, options) + rpc.cast(context, + db.queue_get_for(context, "volume", vol['host']), + {"method": "create_volume", + "args": {"volume_id": volume_ref['id'], + "snapshot_id": None}}) + + def _check_host_enforcement(self, availability_zone): + if (availability_zone + and ':' in availability_zone + and context.is_admin): + zone, _x, host = availability_zone.partition(':') + service = db.service_get_by_args(context.elevated(), host, + 'nova-volume') + if not self.service_is_up(service): + raise driver.WillNotSchedule(_("Host %s not available") % host) + + return host + else: + return None + + def _assign_hosts_to_volumes(self, context, volume_params, forced_host): + + prev_drive_type_id = None + selected_hosts = [] + + LOG.debug(_("volume_params %(volume_params)s") % locals()) + + for vol in volume_params: + LOG.debug(_("Assigning host to volume %s") % vol['name']) + + if forced_host: + vol['host'] = forced_host + vol['capabilities'] = None + continue + + drive_type = vol['drive_ref'] + request_spec = {'size': vol['size'], + 'drive_type': dict(drive_type)} + + if prev_drive_type_id != drive_type['id']: + # generate list of hosts for this drive type + all_hosts = self._filter_hosts("volume", request_spec) + prev_drive_type_id = drive_type['id'] + + (host, qos_cap) = self._select_hosts(request_spec, + all_hosts, selected_hosts) + vol['host'] = host + vol['capabilities'] = qos_cap + self._consume_resource(qos_cap, vol['size'], -1) + + LOG.debug(_("Assigned host %(host)s, capabilities %(qos_cap)s"), + locals()) + + LOG.debug(_("END: volume_params %(volume_params)s") % locals()) + + def schedule_create_volumes(self, context, request_spec, + availability_zone, *_args, **_kwargs): + """Picks hosts for hosting multiple volumes.""" + + num_volumes = request_spec.get('num_volumes') + LOG.debug(_("Attempting to spawn %(num_volumes)d volume(s)") % + locals()) + + LOG.debug(_("Service states BEFORE %s"), + self.zone_manager.service_states) + + vsa_id = request_spec.get('vsa_id') + volume_params = request_spec.get('volumes') + + host = self._check_host_enforcement(availability_zone) + + try: + self._assign_hosts_to_volumes(context, volume_params, host) + + for vol in volume_params: + self._provision_volume(context, vol, vsa_id, availability_zone) + + LOG.debug(_("Service states AFTER %s"), + self.zone_manager.service_states) + + except: + if vsa_id: + db.vsa_update(context, vsa_id, + dict(status=FLAGS.vsa_status_failed)) + + for vol in volume_params: + if 'capabilities' in vol: + self._consume_resource(vol['capabilities'], + vol['size'], 1) + LOG.debug(_("Service states AFTER %s"), + self.zone_manager.service_states) + raise + + return None + + def schedule_create_volume(self, context, volume_id, *_args, **_kwargs): + """Picks the best host based on requested drive type capability.""" + volume_ref = db.volume_get(context, volume_id) + + host = self._check_host_enforcement(volume_ref['availability_zone']) + if host: + now = utils.utcnow() + db.volume_update(context, volume_id, {'host': host, + 'scheduled_at': now}) + return host + + drive_type = volume_ref['drive_type'] + if drive_type is None: + LOG.debug(_("Non-VSA volume %d"), volume_ref['id']) + return super(VsaScheduler, self).schedule_create_volume(context, + volume_id, *_args, **_kwargs) + drive_type = dict(drive_type) + + # otherwise - drive type is loaded + LOG.debug(_("Spawning volume %d with drive type %s"), + volume_ref['id'], drive_type) + + LOG.debug(_("Service states BEFORE %s"), + self.zone_manager.service_states) + + request_spec = {'size': volume_ref['size'], + 'drive_type': drive_type} + hosts = self._filter_hosts("volume", request_spec) + + try: + (host, qos_cap) = self._select_hosts(request_spec, all_hosts=hosts) + except: + if volume_ref['to_vsa_id']: + db.vsa_update(context, volume_ref['to_vsa_id'], + dict(status=FLAGS.vsa_status_failed)) + raise + #return super(VsaScheduler, self).schedule_create_volume(context, + # volume_id, *_args, **_kwargs) + + if host: + now = utils.utcnow() + db.volume_update(context, volume_id, {'host': host, + 'scheduled_at': now}) + self._consume_resource(qos_cap, volume_ref['size'], -1) + + LOG.debug(_("Service states AFTER %s"), + self.zone_manager.service_states) + return host + + def _consume_full_drive(self, qos_values, direction): + qos_values['FullDrive']['NumFreeDrives'] += direction + qos_values['FullDrive']['NumOccupiedDrives'] -= direction + + def _consume_partition(self, qos_values, size, direction): + + if qos_values['PartitionDrive']['PartitionSize'] != 0: + partition_size = qos_values['PartitionDrive']['PartitionSize'] + else: + partition_size = size + part_per_drive = qos_values['DriveCapacity'] / partition_size + + if direction == -1 and \ + qos_values['PartitionDrive']['NumFreePartitions'] == 0: + + self._consume_full_drive(qos_values, direction) + qos_values['PartitionDrive']['NumFreePartitions'] += \ + part_per_drive + + qos_values['PartitionDrive']['NumFreePartitions'] += direction + qos_values['PartitionDrive']['NumOccupiedPartitions'] -= direction + + if direction == 1 and \ + qos_values['PartitionDrive']['NumFreePartitions'] >= \ + part_per_drive: + + self._consume_full_drive(qos_values, direction) + qos_values['PartitionDrive']['NumFreePartitions'] -= \ + part_per_drive + + def _consume_resource(self, qos_values, size, direction): + if qos_values is None: + LOG.debug(_("No capability selected for volume of size %(size)s"), + locals()) + return + + if size == 0: # full drive match + qos_values['AvailableCapacity'] += direction * \ + qos_values['DriveCapacity'] + self._consume_full_drive(qos_values, direction) + else: + qos_values['AvailableCapacity'] += direction * \ + (size << FLAGS.gb_to_bytes_shift) + self._consume_partition(qos_values, + size << FLAGS.gb_to_bytes_shift, + direction) + return diff --git a/nova/tests/test_libvirt.py b/nova/tests/test_libvirt.py index f99e1713d..36e469be3 100644 --- a/nova/tests/test_libvirt.py +++ b/nova/tests/test_libvirt.py @@ -242,7 +242,7 @@ class LibvirtConnTestCase(test.TestCase): return """ - + diff --git a/nova/volume/api.py b/nova/volume/api.py index 7d27abff9..f81222017 100644 --- a/nova/volume/api.py +++ b/nova/volume/api.py @@ -41,7 +41,9 @@ LOG = logging.getLogger('nova.volume') class API(base.Base): """API for interacting with the volume manager.""" - def create(self, context, size, snapshot_id, name, description): + def create(self, context, size, snapshot_id, name, description, + to_vsa_id=None, from_vsa_id=None, drive_type_id=None, + availability_zone=None): if snapshot_id != None: snapshot = self.get_snapshot(context, snapshot_id) if snapshot['status'] != "available": @@ -50,25 +52,36 @@ class API(base.Base): if not size: size = snapshot['volume_size'] - if quota.allowed_volumes(context, 1, size) < 1: - pid = context.project_id - LOG.warn(_("Quota exceeeded for %(pid)s, tried to create" - " %(size)sG volume") % locals()) - raise quota.QuotaError(_("Volume quota exceeded. You cannot " - "create a volume of size %sG") % size) + if availability_zone is None: + availability_zone = FLAGS.storage_availability_zone + + if to_vsa_id is None: + # VP-TODO: for now don't check quotas for BE volumes + if quota.allowed_volumes(context, 1, size) < 1: + pid = context.project_id + LOG.warn(_("Quota exceeeded for %(pid)s, tried to create" + " %(size)sG volume") % locals()) + raise quota.QuotaError(_("Volume quota exceeded. You cannot " + "create a volume of size %sG") % size) options = { 'size': size, 'user_id': context.user_id, 'project_id': context.project_id, 'snapshot_id': snapshot_id, - 'availability_zone': FLAGS.storage_availability_zone, + 'availability_zone': availability_zone, 'status': "creating", 'attach_status': "detached", 'display_name': name, - 'display_description': description} + 'display_description': description, + 'to_vsa_id': to_vsa_id, + 'from_vsa_id': from_vsa_id, + 'drive_type_id': drive_type_id} volume = self.db.volume_create(context, options) + if from_vsa_id is not None: # for FE VSA volumes do nothing + return volume + rpc.cast(context, FLAGS.scheduler_topic, {"method": "create_volume", @@ -89,6 +102,12 @@ class API(base.Base): volume = self.get(context, volume_id) if volume['status'] != "available": raise exception.ApiError(_("Volume status must be available")) + + if volume['from_vsa_id'] is not None: + self.db.volume_destroy(context, volume['id']) + LOG.debug(_("volume %d: deleted successfully"), volume['id']) + return + now = utils.utcnow() self.db.volume_update(context, volume_id, {'status': 'deleting', 'terminated_at': now}) @@ -110,6 +129,15 @@ class API(base.Base): return self.db.volume_get_all(context) return self.db.volume_get_all_by_project(context, context.project_id) + def get_all_by_vsa(self, context, vsa_id, direction): + if direction == "to": + return self.db.volume_get_all_assigned_to_vsa(context, vsa_id) + elif direction == "from": + return self.db.volume_get_all_assigned_from_vsa(context, vsa_id) + else: + raise exception.ApiError(_("Unsupported vol assignment type %s"), + direction) + def get_snapshot(self, context, snapshot_id): rv = self.db.snapshot_get(context, snapshot_id) return dict(rv.iteritems()) diff --git a/nova/volume/driver.py b/nova/volume/driver.py index 23e845deb..ec09325d8 100644 --- a/nova/volume/driver.py +++ b/nova/volume/driver.py @@ -501,7 +501,15 @@ class ISCSIDriver(VolumeDriver): iscsi_properties = self._get_iscsi_properties(volume) if not iscsi_properties['target_discovered']: - self._run_iscsiadm(iscsi_properties, ('--op', 'new')) + # zadara-begin: Bug in cactus. _run_iscsiadm() cannot accept + # multiple args for iscsi-command. Like in --op new. Hence + # using a local version here which does the same thing + (out, err) = self._execute('sudo', 'iscsiadm', '--op', 'new', + '-m', 'node', + '-T', iscsi_properties['target_iqn'], + '-p', iscsi_properties['target_portal']) + # self._run_iscsiadm(iscsi_properties, ('--op', 'new')) + # zadara-end if iscsi_properties.get('auth_method'): self._iscsiadm_update(iscsi_properties, @@ -553,7 +561,15 @@ class ISCSIDriver(VolumeDriver): iscsi_properties = self._get_iscsi_properties(volume) self._iscsiadm_update(iscsi_properties, "node.startup", "manual") self._run_iscsiadm(iscsi_properties, "--logout") - self._run_iscsiadm(iscsi_properties, ('--op', 'delete')) + # zadara-begin: Bug in cactus. _run_iscsiadm() cannot accept + # multiple args for iscsi-command. Like in --op delete. Hence + # using a local version here which does the same thing + (out, err) = self._execute('sudo', 'iscsiadm', '--op', 'delete', + '-m', 'node', + '-T', iscsi_properties['target_iqn'], + '-p', iscsi_properties['target_portal']) + #self._run_iscsiadm(iscsi_properties, ('--op', 'delete')) + # zadara-end def check_for_export(self, context, volume_id): """Make sure volume is exported.""" diff --git a/nova/volume/manager.py b/nova/volume/manager.py index 798bd379a..3e2892fee 100644 --- a/nova/volume/manager.py +++ b/nova/volume/manager.py @@ -42,6 +42,7 @@ intact. """ +import time from nova import context from nova import exception @@ -49,6 +50,7 @@ from nova import flags from nova import log as logging from nova import manager from nova import utils +from nova import rpc LOG = logging.getLogger('nova.volume.manager') @@ -58,22 +60,40 @@ flags.DEFINE_string('storage_availability_zone', 'availability zone of this service') flags.DEFINE_string('volume_driver', 'nova.volume.driver.ISCSIDriver', 'Driver to use for volume creation') +flags.DEFINE_string('vsa_volume_driver', 'nova.volume.san.ZadaraVsaDriver', + 'Driver to use for FE/BE volume creation with VSA') flags.DEFINE_boolean('use_local_volumes', True, 'if True, will not discover local volumes') +flags.DEFINE_integer('volume_state_interval', 60, + 'Interval in seconds for querying volumes status') class VolumeManager(manager.SchedulerDependentManager): """Manages attachable block storage devices.""" - def __init__(self, volume_driver=None, *args, **kwargs): + def __init__(self, volume_driver=None, vsa_volume_driver=None, + *args, **kwargs): """Load the driver from the one specified in args, or from flags.""" if not volume_driver: volume_driver = FLAGS.volume_driver self.driver = utils.import_object(volume_driver) + if not vsa_volume_driver: + vsa_volume_driver = FLAGS.vsa_volume_driver + self.vsadriver = utils.import_object(vsa_volume_driver) super(VolumeManager, self).__init__(service_name='volume', *args, **kwargs) # NOTE(vish): Implementation specific db handling is done # by the driver. self.driver.db = self.db + self.vsadriver.db = self.db + self._last_volume_stats = [] + #self._last_host_check = 0 + + def _get_driver(self, volume_ref): + if volume_ref['to_vsa_id'] is None and \ + volume_ref['from_vsa_id'] is None: + return self.driver + else: + return self.vsadriver def init_host(self): """Do any initialization that needs to be run if this is a @@ -84,10 +104,15 @@ class VolumeManager(manager.SchedulerDependentManager): LOG.debug(_("Re-exporting %s volumes"), len(volumes)) for volume in volumes: if volume['status'] in ['available', 'in-use']: - self.driver.ensure_export(ctxt, volume) + driver = self._get_driver(volume) + driver.ensure_export(ctxt, volume) else: LOG.info(_("volume %s: skipping export"), volume['name']) + def create_volumes(self, context, request_spec, availability_zone): + LOG.info(_("create_volumes called with req=%(request_spec)s, "\ + "availability_zone=%(availability_zone)s"), locals()) + def create_volume(self, context, volume_id, snapshot_id=None): """Creates and exports the volume.""" context = context.elevated() @@ -101,28 +126,31 @@ class VolumeManager(manager.SchedulerDependentManager): # before passing it to the driver. volume_ref['host'] = self.host + driver = self._get_driver(volume_ref) try: vol_name = volume_ref['name'] vol_size = volume_ref['size'] LOG.debug(_("volume %(vol_name)s: creating lv of" " size %(vol_size)sG") % locals()) if snapshot_id == None: - model_update = self.driver.create_volume(volume_ref) + model_update = driver.create_volume(volume_ref) else: snapshot_ref = self.db.snapshot_get(context, snapshot_id) - model_update = self.driver.create_volume_from_snapshot( + model_update = driver.create_volume_from_snapshot( volume_ref, snapshot_ref) if model_update: self.db.volume_update(context, volume_ref['id'], model_update) LOG.debug(_("volume %s: creating export"), volume_ref['name']) - model_update = self.driver.create_export(context, volume_ref) + model_update = driver.create_export(context, volume_ref) if model_update: self.db.volume_update(context, volume_ref['id'], model_update) - except Exception: + # except Exception: + except: self.db.volume_update(context, volume_ref['id'], {'status': 'error'}) + self._notify_vsa(context, volume_ref, 'error') raise now = utils.utcnow() @@ -130,8 +158,20 @@ class VolumeManager(manager.SchedulerDependentManager): volume_ref['id'], {'status': 'available', 'launched_at': now}) LOG.debug(_("volume %s: created successfully"), volume_ref['name']) + + self._notify_vsa(context, volume_ref, 'available') + return volume_id + def _notify_vsa(self, context, volume_ref, status): + if volume_ref['to_vsa_id'] is not None: + rpc.cast(context, + FLAGS.vsa_topic, + {"method": "vsa_volume_created", + "args": {"vol_id": volume_ref['id'], + "vsa_id": volume_ref['to_vsa_id'], + "status": status}}) + def delete_volume(self, context, volume_id): """Deletes and unexports volume.""" context = context.elevated() @@ -141,14 +181,15 @@ class VolumeManager(manager.SchedulerDependentManager): if volume_ref['host'] != self.host: raise exception.Error(_("Volume is not local to this node")) + driver = self._get_driver(volume_ref) try: LOG.debug(_("volume %s: removing export"), volume_ref['name']) - self.driver.remove_export(context, volume_ref) + driver.remove_export(context, volume_ref) LOG.debug(_("volume %s: deleting"), volume_ref['name']) - self.driver.delete_volume(volume_ref) + driver.delete_volume(volume_ref) except exception.VolumeIsBusy, e: LOG.debug(_("volume %s: volume is busy"), volume_ref['name']) - self.driver.ensure_export(context, volume_ref) + driver.ensure_export(context, volume_ref) self.db.volume_update(context, volume_ref['id'], {'status': 'available'}) return True @@ -171,6 +212,7 @@ class VolumeManager(manager.SchedulerDependentManager): try: snap_name = snapshot_ref['name'] LOG.debug(_("snapshot %(snap_name)s: creating") % locals()) + # snapshot-related operations are irrelevant for vsadriver model_update = self.driver.create_snapshot(snapshot_ref) if model_update: self.db.snapshot_update(context, snapshot_ref['id'], @@ -194,6 +236,7 @@ class VolumeManager(manager.SchedulerDependentManager): try: LOG.debug(_("snapshot %s: deleting"), snapshot_ref['name']) + # snapshot-related operations are irrelevant for vsadriver self.driver.delete_snapshot(snapshot_ref) except Exception: self.db.snapshot_update(context, @@ -211,23 +254,75 @@ class VolumeManager(manager.SchedulerDependentManager): Returns path to device.""" context = context.elevated() volume_ref = self.db.volume_get(context, volume_id) + driver = self._get_driver(volume_ref) if volume_ref['host'] == self.host and FLAGS.use_local_volumes: - path = self.driver.local_path(volume_ref) + path = driver.local_path(volume_ref) else: - path = self.driver.discover_volume(context, volume_ref) + path = driver.discover_volume(context, volume_ref) return path def remove_compute_volume(self, context, volume_id): """Remove remote volume on compute host.""" context = context.elevated() volume_ref = self.db.volume_get(context, volume_id) + driver = self._get_driver(volume_ref) if volume_ref['host'] == self.host and FLAGS.use_local_volumes: return True else: - self.driver.undiscover_volume(volume_ref) + driver.undiscover_volume(volume_ref) def check_for_export(self, context, instance_id): """Make sure whether volume is exported.""" instance_ref = self.db.instance_get(context, instance_id) for volume in instance_ref['volumes']: - self.driver.check_for_export(context, volume['id']) + driver = self._get_driver(volume) + driver.check_for_export(context, volume['id']) + + def periodic_tasks(self, context=None): + """Tasks to be run at a periodic interval.""" + + error_list = [] + try: + self._report_driver_status() + except Exception as ex: + LOG.warning(_("Error during report_driver_status(): %s"), + unicode(ex)) + error_list.append(ex) + + super(VolumeManager, self).periodic_tasks(context) + + return error_list + + def _volume_stats_changed(self, stat1, stat2): + #LOG.info(_("stat1=%s"), stat1) + #LOG.info(_("stat2=%s"), stat2) + + if len(stat1) != len(stat2): + return True + for (k, v) in stat1.iteritems(): + if (k, v) not in stat2.iteritems(): + return True + return False + + def _report_driver_status(self): + #curr_time = time.time() + #LOG.info(_("Report Volume node status")) + #if curr_time - self._last_host_check > FLAGS.volume_state_interval: + # self._last_host_check = curr_time + + LOG.info(_("Updating volume status")) + + volume_stats = self.vsadriver.get_volume_stats(refresh=True) + if self._volume_stats_changed(self._last_volume_stats, volume_stats): + LOG.info(_("New capabilities found: %s"), volume_stats) + self._last_volume_stats = volume_stats + + # This will grab info about the host and queue it + # to be sent to the Schedulers. + self.update_service_capabilities(self._last_volume_stats) + else: + self.update_service_capabilities(None) + + def notification(self, context, event): + LOG.info(_("Notification {%s} received"), event) + self._last_volume_stats = [] diff --git a/nova/volume/san.py b/nova/volume/san.py index 9532c8116..6a962c6f2 100644 --- a/nova/volume/san.py +++ b/nova/volume/san.py @@ -26,6 +26,7 @@ import paramiko from xml.etree import ElementTree +from nova import context from nova import exception from nova import flags from nova import log as logging @@ -64,12 +65,16 @@ class SanISCSIDriver(ISCSIDriver): # discover_volume is still OK # undiscover_volume is still OK - def _connect_to_ssh(self): + def _connect_to_ssh(self, san_ip=None): + if san_ip: + ssh_ip = san_ip + else: + ssh_ip = FLAGS.san_ip ssh = paramiko.SSHClient() #TODO(justinsb): We need a better SSH key policy ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy()) if FLAGS.san_password: - ssh.connect(FLAGS.san_ip, + ssh.connect(ssh_ip, port=FLAGS.san_ssh_port, username=FLAGS.san_login, password=FLAGS.san_password) @@ -77,7 +82,7 @@ class SanISCSIDriver(ISCSIDriver): privatekeyfile = os.path.expanduser(FLAGS.san_privatekey) # It sucks that paramiko doesn't support DSA keys privatekey = paramiko.RSAKey.from_private_key_file(privatekeyfile) - ssh.connect(FLAGS.san_ip, + ssh.connect(ssh_ip, port=FLAGS.san_ssh_port, username=FLAGS.san_login, pkey=privatekey) @@ -85,9 +90,9 @@ class SanISCSIDriver(ISCSIDriver): raise exception.Error(_("Specify san_password or san_privatekey")) return ssh - def _run_ssh(self, command, check_exit_code=True): + def _run_ssh(self, command, check_exit_code=True, san_ip=None): #TODO(justinsb): SSH connection caching (?) - ssh = self._connect_to_ssh() + ssh = self._connect_to_ssh(san_ip) #TODO(justinsb): Reintroduce the retry hack ret = ssh_execute(ssh, command, check_exit_code=check_exit_code) @@ -583,3 +588,311 @@ class HpSanISCSIDriver(SanISCSIDriver): cliq_args['volumeName'] = volume['name'] self._cliq_run_xml("unassignVolume", cliq_args) + + +class ZadaraVsaDriver(SanISCSIDriver): + """Executes commands relating to Virtual Storage Array volumes. + + There are two types of volumes. Front-end(FE) volumes and Back-end(BE) + volumes. + + FE volumes are nova-volumes that are exported by VSA instance & can be + consumed by user instances. We use SSH to connect into the VSA instance + to execute those steps. + + BE volumes are nova-volumes that are attached as back-end storage for the + VSA instance. + + VSA instance essentially consumes the BE volumes and allows creation of FE + volumes over it. + """ + + """ Volume Driver methods """ + def create_volume(self, volume): + """Creates FE/BE volume.""" + if volume['to_vsa_id']: + self._create_be_volume(volume) + else: + self._create_fe_volume(volume) + + def delete_volume(self, volume): + """Deletes FE/BE volume.""" + if volume['to_vsa_id']: + self._delete_be_volume(volume) + else: + self._delete_fe_volume(volume) + + def local_path(self, volume): + # TODO: Is this needed here? + raise exception.Error(_("local_path not supported")) + + def ensure_export(self, context, volume): + """On bootup synchronously ensures a volume export is available.""" + if volume['to_vsa_id']: + return self._ensure_be_export(context, volume) + + # Not required for FE volumes. VSA VM will ensure volume exposure + pass + + def create_export(self, context, volume): + """For first time creates volume export.""" + if volume['to_vsa_id']: + return self._create_be_export(context, volume) + else: + return self._create_fe_export(context, volume) + + def remove_export(self, context, volume): + if volume['to_vsa_id']: + return self._remove_be_export(context, volume) + else: + return self._remove_fe_export(context, volume) + + def check_for_setup_error(self): + """Returns an error if prerequisites aren't met""" + # skip the flags.san_ip check & do the regular check + + if not (FLAGS.san_password or FLAGS.san_privatekey): + raise exception.Error(_("Specify san_password or san_privatekey")) + + """ Internal BE Volume methods """ + def _create_be_volume(self, volume): + """Creates BE volume.""" + if int(volume['size']) == 0: + sizestr = '0' # indicates full-partition + else: + sizestr = '%s' % (int(volume['size']) << 30) # size in bytes + + # Set the qos-str to default type sas + # TODO - later for this piece we will get the direct qos-group name + # in create_volume and hence this lookup will not be needed + qosstr = 'SAS_1000' + drive_type = volume.get('drive_type') + if drive_type is not None: + # for now just use the qos-type string from the disktypes. + qosstr = drive_type['type'] + ("_%s" % drive_type['size_gb']) + + self._sync_exec('sudo', '/var/lib/zadara/bin/zadara_sncfg', + 'create_qospart', + '--qos', qosstr, + '--pname', volume['name'], + '--psize', sizestr, + check_exit_code=0) + LOG.debug(_("VSA BE create_volume for %s succeeded"), volume['name']) + + def _delete_be_volume(self, volume): + try: + self._sync_exec('sudo', '/var/lib/zadara/bin/zadara_sncfg', + 'delete_partition', + '--pname', volume['name'], + check_exit_code=0) + except exception.ProcessExecutionError: + LOG.debug(_("VSA BE delete_volume for %s failed"), volume['name']) + return + + LOG.debug(_("VSA BE delete_volume for %s suceeded"), volume['name']) + + def _create_be_export(self, context, volume): + """create BE export for a volume""" + self._ensure_iscsi_targets(context, volume['host']) + iscsi_target = self.db.volume_allocate_iscsi_target(context, + volume['id'], + volume['host']) + return self._common_be_export(context, volume, iscsi_target) + + def _ensure_be_export(self, context, volume): + """ensure BE export for a volume""" + try: + iscsi_target = self.db.volume_get_iscsi_target_num(context, + volume['id']) + except exception.NotFound: + LOG.info(_("Skipping ensure_export. No iscsi_target " + + "provisioned for volume: %d"), volume['id']) + return + + return self._common_be_export(context, volume, iscsi_target) + + def _common_be_export(self, context, volume, iscsi_target): + """ + Common logic that asks zadara_sncfg to setup iSCSI target/lun for + this volume + """ + (out, err) = self._sync_exec('sudo', + '/var/lib/zadara/bin/zadara_sncfg', + 'create_export', + '--pname', volume['name'], + '--tid', iscsi_target, + check_exit_code=0) + + result_xml = ElementTree.fromstring(out) + response_node = result_xml.find("Sn") + if response_node is None: + msg = "Malformed response from zadara_sncfg" + raise exception.Error(msg) + + sn_ip = response_node.findtext("SnIp") + sn_iqn = response_node.findtext("IqnName") + iscsi_portal = sn_ip + ":3260," + ("%s" % iscsi_target) + + model_update = {} + model_update['provider_location'] = ("%s %s" % + (iscsi_portal, + sn_iqn)) + return model_update + + def _remove_be_export(self, context, volume): + """Removes BE export for a volume.""" + try: + iscsi_target = self.db.volume_get_iscsi_target_num(context, + volume['id']) + except exception.NotFound: + LOG.info(_("Skipping remove_export. No iscsi_target " + + "provisioned for volume: %d"), volume['id']) + return + + try: + self._sync_exec('sudo', '/var/lib/zadara/bin/zadara_sncfg', + 'remove_export', + '--pname', volume['name'], + '--tid', iscsi_target, + check_exit_code=0) + except exception.ProcessExecutionError: + LOG.debug(_("VSA BE remove_export for %s failed"), volume['name']) + return + + def _get_qosgroup_summary(self): + """gets the list of qosgroups from Zadara SN""" + (out, err) = self._sync_exec('sudo', + '/var/lib/zadara/bin/zadara_sncfg', + 'get_qosgroups_xml', + check_exit_code=0) + qos_groups = {} + #qos_groups = [] + result_xml = ElementTree.fromstring(out) + for element in result_xml.findall('QosGroup'): + qos_group = {} + # get the name of the group. + # If we cannot find it, forget this element + group_name = element.findtext("Name") + if not group_name: + continue + + # loop through all child nodes & fill up attributes of this group + for child in element.getchildren(): + # two types of elements - property of qos-group & sub property + # classify them accordingly + if child.text: + qos_group[child.tag] = int(child.text) \ + if child.text.isdigit() else child.text + else: + subelement = {} + for subchild in child.getchildren(): + subelement[subchild.tag] = int(subchild.text) \ + if subchild.text.isdigit() else subchild.text + qos_group[child.tag] = subelement + + # Now add this group to the master qos_groups + qos_groups[group_name] = qos_group + #qos_groups.append(qos_group) + + return qos_groups + + """ Internal FE Volume methods """ + def _vsa_run(self, volume, verb, vsa_args): + """ + Runs a command over SSH to VSA instance and checks for return status + """ + vsa_arg_strings = [] + + if vsa_args: + for k, v in vsa_args.items(): + vsa_arg_strings.append(" --%s %s" % (k, v)) + + # Form the zadara_cfg script that will do the configuration at VSA VM + cmd = "/var/lib/zadara/bin/zadara_cfg.py " + verb + \ + ''.join(vsa_arg_strings) + + # get the list of IP's corresponding to VSA VM's + vsa_ips = self.db.vsa_get_vc_ips_list(context.get_admin_context(), + volume['from_vsa_id']) + if not vsa_ips: + raise exception.Error(_("Cannot Lookup VSA VM's IP")) + return + + # pick the first element in the return's fixed_ip for SSH + vsa_ip = vsa_ips[0]['fixed'] + + (out, _err) = self._run_ssh(cmd, san_ip=vsa_ip) + + # check the xml StatusCode to check fro real status + result_xml = ElementTree.fromstring(out) + + status = result_xml.findtext("StatusCode") + if status != '0': + statusmsg = result_xml.findtext("StatusMessage") + msg = (_('vsa_run failed to ' + verb + ' for ' + volume['name'] + + '. Result=' + str(statusmsg))) + raise exception.Error(msg) + + return out, _err + + def _create_fe_volume(self, volume): + """Creates FE volume.""" + vsa_args = {} + vsa_args['volname'] = volume['name'] + if int(volume['size']) == 0: + sizestr = '100M' + else: + sizestr = '%sG' % volume['size'] + vsa_args['volsize'] = sizestr + (out, _err) = self._vsa_run(volume, "create_volume", vsa_args) + + LOG.debug(_("VSA FE create_volume for %s suceeded"), volume['name']) + + def _delete_fe_volume(self, volume): + """Deletes FE volume.""" + vsa_args = {} + vsa_args['volname'] = volume['name'] + (out, _err) = self._vsa_run(volume, "delete_volume", vsa_args) + LOG.debug(_("VSA FE delete_volume for %s suceeded"), volume['name']) + return + + def _create_fe_export(self, context, volume): + """Create FE volume exposure at VSA VM""" + vsa_args = {} + vsa_args['volname'] = volume['name'] + (out, _err) = self._vsa_run(volume, "create_export", vsa_args) + + result_xml = ElementTree.fromstring(out) + response_node = result_xml.find("Vsa") + if response_node is None: + msg = "Malformed response to VSA command " + raise exception.Error(msg) + + LOG.debug(_("VSA create_export for %s suceeded"), volume['name']) + + vsa_ip = response_node.findtext("VsaIp") + vsa_iqn = response_node.findtext("IqnName") + vsa_interface = response_node.findtext("VsaInterface") + iscsi_portal = vsa_ip + ":3260," + vsa_interface + + model_update = {} + model_update['provider_location'] = ("%s %s" % + (iscsi_portal, + vsa_iqn)) + + return model_update + + def remove_fe_export(self, context, volume): + """Remove FE volume exposure at VSA VM""" + vsa_args = {} + vsa_args['volname'] = volume['name'] + (out, _err) = self._vsa_run(volume, "remove_export", vsa_args) + LOG.debug(_("VSA FE remove_export for %s suceeded"), volume['name']) + return + + def get_volume_stats(self, refresh=False): + """Return the current state of the volume service. If 'refresh' is + True, run the update first.""" + + drive_info = self._get_qosgroup_summary() + return {'drive_qos_info': drive_info} diff --git a/nova/vsa/__init__.py b/nova/vsa/__init__.py new file mode 100644 index 000000000..a94a6b7a4 --- /dev/null +++ b/nova/vsa/__init__.py @@ -0,0 +1,18 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright (c) 2011 Zadara Storage Inc. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from nova.vsa.api import API diff --git a/nova/vsa/api.py b/nova/vsa/api.py new file mode 100644 index 000000000..ed83ff563 --- /dev/null +++ b/nova/vsa/api.py @@ -0,0 +1,407 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright (c) 2011 Zadara Storage Inc. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +Handles all requests relating to Virtual Storage Arrays (VSAs). +""" + +#import datetime +import sys +import base64 + +from xml.etree import ElementTree +from xml.etree.ElementTree import Element, SubElement + +from nova import db +from nova import exception +from nova import flags +from nova import log as logging +from nova import quota +from nova import rpc +from nova.db import base + +from nova import compute +from nova import volume +from nova.compute import instance_types +from nova.vsa import drive_types + + +FLAGS = flags.FLAGS +flags.DEFINE_boolean('vsa_multi_vol_creation', True, + 'Ask scheduler to create multiple volumes in one call') + +LOG = logging.getLogger('nova.vsa') + + +class API(base.Base): + """API for interacting with the VSA manager.""" + + def __init__(self, compute_api=None, volume_api=None, **kwargs): + self.compute_api = compute_api or compute.API() + self.volume_api = volume_api or volume.API() + super(API, self).__init__(**kwargs) + + def _get_default_vsa_instance_type(self): + return instance_types.get_instance_type_by_name( + FLAGS.default_vsa_instance_type) + + def _check_storage_parameters(self, context, vsa_name, storage, shared): + """ + Translates storage array of disks to the list of volumes + :param storage: List of dictionaries with following keys: + disk_name, num_disks, size + :param shared: Specifies if storage is dedicated or shared. + For shared storage disks split into partitions + """ + volume_params = [] + for node in storage: + + name = node.get('drive_name', None) + num_disks = node.get('num_drives', 1) + + if name is None: + raise exception.ApiError(_("No drive_name param found in %s"), + node) + + # find DB record for this disk + try: + drive_ref = drive_types.drive_type_get_by_name(context, name) + except exception.NotFound: + raise exception.ApiError(_("Invalid drive type name %s"), + name) + + # if size field present - override disk size specified in DB + size = node.get('size', drive_ref['size_gb']) + + if shared: + part_size = FLAGS.vsa_part_size_gb + total_capacity = num_disks * size + num_volumes = total_capacity / part_size + size = part_size + else: + num_volumes = num_disks + size = 0 # special handling for full drives + + for i in range(num_volumes): + # VP-TODO: potentialy may conflict with previous volumes + volume_name = vsa_name + ("_%s_vol-%d" % (name, i)) + volume = { + 'size': size, + 'snapshot_id': None, + 'name': volume_name, + 'description': 'BE volume for ' + volume_name, + 'drive_ref': drive_ref + } + volume_params.append(volume) + + return volume_params + + def create(self, context, display_name='', display_description='', + vc_count=1, instance_type=None, image_name=None, + availability_zone=None, storage=[], shared=None): + """ + Provision VSA instance with corresponding compute instances + and associated volumes + :param storage: List of dictionaries with following keys: + disk_name, num_disks, size + :param shared: Specifies if storage is dedicated or shared. + For shared storage disks split into partitions + """ + + if vc_count > FLAGS.max_vcs_in_vsa: + LOG.warning(_("Requested number of VCs (%d) is too high."\ + " Setting to default"), vc_count) + vc_count = FLAGS.max_vcs_in_vsa + + if instance_type is None: + instance_type = self._get_default_vsa_instance_type() + + if availability_zone is None: + availability_zone = FLAGS.storage_availability_zone + + if storage is None: + storage = [] + + if shared is None or shared == 'False' or shared == False: + shared = False + else: + shared = True + + # check if image is ready before starting any work + if image_name is None or image_name == '': + image_name = FLAGS.vc_image_name + try: + image_service = self.compute_api.image_service + vc_image = image_service.show_by_name(context, image_name) + vc_image_href = vc_image['id'] + except exception.ImageNotFound: + raise exception.ApiError(_("Failed to find configured image %s"), + image_name) + + options = { + 'display_name': display_name, + 'display_description': display_description, + 'project_id': context.project_id, + 'availability_zone': availability_zone, + 'instance_type_id': instance_type['id'], + 'image_ref': vc_image_href, + 'vc_count': vc_count, + 'status': FLAGS.vsa_status_creating, + } + LOG.info(_("Creating VSA: %s") % options) + + # create DB entry for VSA instance + try: + vsa_ref = self.db.vsa_create(context, options) + except exception.Error: + raise exception.ApiError(_(sys.exc_info()[1])) + vsa_id = vsa_ref['id'] + vsa_name = vsa_ref['name'] + + # check storage parameters + try: + volume_params = self._check_storage_parameters(context, vsa_name, + storage, shared) + except exception.ApiError: + self.update_vsa_status(context, vsa_id, + status=FLAGS.vsa_status_failed) + raise + + # after creating DB entry, re-check and set some defaults + updates = {} + if (not hasattr(vsa_ref, 'display_name') or + vsa_ref.display_name is None or + vsa_ref.display_name == ''): + updates['display_name'] = display_name = vsa_name + updates['vol_count'] = len(volume_params) + vsa_ref = self.update(context, vsa_id, **updates) + + # create volumes + if FLAGS.vsa_multi_vol_creation: + if len(volume_params) > 0: + #filter_class = 'nova.scheduler.vsa.InstanceTypeFilter' + request_spec = { + 'num_volumes': len(volume_params), + 'vsa_id': vsa_id, + 'volumes': volume_params, + #'filter': filter_class, + } + + rpc.cast(context, + FLAGS.scheduler_topic, + {"method": "create_volumes", + "args": {"topic": FLAGS.volume_topic, + "request_spec": request_spec, + "availability_zone": availability_zone}}) + else: + # create BE volumes one-by-one + for vol in volume_params: + try: + vol_name = vol['name'] + vol_size = vol['size'] + LOG.debug(_("VSA ID %(vsa_id)d %(vsa_name)s: Create "\ + "volume %(vol_name)s, %(vol_size)d GB"), + locals()) + + vol_ref = self.volume_api.create(context, + vol_size, + vol['snapshot_id'], + vol_name, + vol['description'], + to_vsa_id=vsa_id, + drive_type_id=vol['drive_ref'].get('id'), + availability_zone=availability_zone) + except: + self.update_vsa_status(context, vsa_id, + status=FLAGS.vsa_status_partial) + raise + + if len(volume_params) == 0: + # No BE volumes - ask VSA manager to start VCs + rpc.cast(context, + FLAGS.vsa_topic, + {"method": "create_vsa", + "args": {"vsa_id": vsa_id}}) + + return vsa_ref + + def update_vsa_status(self, context, vsa_id, status): + updates = dict(status=status) + LOG.info(_("VSA ID %(vsa_id)d: Update VSA status to %(status)s"), + locals()) + return self.update(context, vsa_id, **updates) + + def update(self, context, vsa_id, **kwargs): + """Updates the VSA instance in the datastore. + + :param context: The security context + :param vsa_id: ID of the VSA instance to update + :param kwargs: All additional keyword args are treated + as data fields of the instance to be + updated + + :returns: None + """ + LOG.info(_("VSA ID %(vsa_id)d: Update VSA call"), locals()) + + vc_count = kwargs.get('vc_count', None) + if vc_count is not None: + # VP-TODO: This request may want to update number of VCs + # Get number of current VCs and add/delete VCs appropriately + vsa = self.get(context, vsa_id) + vc_count = int(vc_count) + if vsa['vc_count'] != vc_count: + self.update_num_vcs(context, vsa, vc_count) + + return self.db.vsa_update(context, vsa_id, kwargs) + + def update_num_vcs(self, context, vsa, vc_count): + if vc_count > FLAGS.max_vcs_in_vsa: + LOG.warning(_("Requested number of VCs (%d) is too high."\ + " Setting to default"), vc_count) + vc_count = FLAGS.max_vcs_in_vsa + + old_vc_count = vsa['vc_count'] + if vc_count > old_vc_count: + LOG.debug(_("Adding %d VCs to VSA %s."), + (vc_count - old_vc_count, vsa['name'])) + # VP-TODO: actual code for adding new VCs + + elif vc_count < old_vc_count: + LOG.debug(_("Deleting %d VCs from VSA %s."), + (old_vc_count - vc_count, vsa['name'])) + # VP-TODO: actual code for deleting extra VCs + + def _force_volume_delete(self, ctxt, volume): + """Delete a volume, bypassing the check that it must be available.""" + host = volume['host'] + + if not host: + # Volume not yet assigned to host + # Deleting volume from database and skipping rpc. + self.db.volume_destroy(ctxt, volume['id']) + return + + rpc.cast(ctxt, + self.db.queue_get_for(ctxt, FLAGS.volume_topic, host), + {"method": "delete_volume", + "args": {"volume_id": volume['id']}}) + + def delete_be_volumes(self, context, vsa_id, force_delete=True): + + be_volumes = self.db.volume_get_all_assigned_to_vsa(context, vsa_id) + for volume in be_volumes: + try: + vol_name = volume['name'] + LOG.info(_("VSA ID %(vsa_id)s: Deleting BE volume "\ + "%(vol_name)s"), locals()) + self.volume_api.delete(context, volume['id']) + except exception.ApiError: + LOG.info(_("Unable to delete volume %s"), volume['name']) + if force_delete: + LOG.info(_("VSA ID %(vsa_id)s: Forced delete. BE volume "\ + "%(vol_name)s"), locals()) + self._force_volume_delete(context, volume) + + def delete(self, context, vsa_id): + """Terminate a VSA instance.""" + LOG.info(_("Going to try to terminate VSA ID %s"), vsa_id) + + # allow deletion of volumes in "abnormal" state + + # Delete all FE volumes + fe_volumes = self.db.volume_get_all_assigned_from_vsa(context, vsa_id) + for volume in fe_volumes: + try: + vol_name = volume['name'] + LOG.info(_("VSA ID %(vsa_id)s: Deleting FE volume "\ + "%(vol_name)s"), locals()) + self.volume_api.delete(context, volume['id']) + except exception.ApiError: + LOG.info(_("Unable to delete volume %s"), volume['name']) + + # Delete all BE volumes + self.delete_be_volumes(context, vsa_id, force_delete=True) + + # Delete all VC instances + instances = self.db.instance_get_all_by_vsa(context, vsa_id) + for instance in instances: + name = instance['name'] + LOG.debug(_("VSA ID %(vsa_id)s: Delete instance %(name)s"), + locals()) + self.compute_api.delete(context, instance['id']) + + # Delete VSA instance + self.db.vsa_destroy(context, vsa_id) + + def get(self, context, vsa_id): + rv = self.db.vsa_get(context, vsa_id) + return rv + + def get_all(self, context): + if context.is_admin: + return self.db.vsa_get_all(context) + return self.db.vsa_get_all_by_project(context, context.project_id) + + def generate_user_data(self, context, vsa, volumes): + e_vsa = Element("vsa") + + e_vsa_detail = SubElement(e_vsa, "id") + e_vsa_detail.text = str(vsa['id']) + e_vsa_detail = SubElement(e_vsa, "name") + e_vsa_detail.text = vsa['display_name'] + e_vsa_detail = SubElement(e_vsa, "description") + e_vsa_detail.text = vsa['display_description'] + e_vsa_detail = SubElement(e_vsa, "vc_count") + e_vsa_detail.text = str(vsa['vc_count']) + + e_volumes = SubElement(e_vsa, "volumes") + for volume in volumes: + + loc = volume['provider_location'] + if loc is None: + ip = '' + iscsi_iqn = '' + iscsi_portal = '' + else: + (iscsi_target, _sep, iscsi_iqn) = loc.partition(" ") + (ip, iscsi_portal) = iscsi_target.split(":", 1) + + e_vol = SubElement(e_volumes, "volume") + e_vol_detail = SubElement(e_vol, "id") + e_vol_detail.text = str(volume['id']) + e_vol_detail = SubElement(e_vol, "name") + e_vol_detail.text = volume['name'] + e_vol_detail = SubElement(e_vol, "display_name") + e_vol_detail.text = volume['display_name'] + e_vol_detail = SubElement(e_vol, "size_gb") + e_vol_detail.text = str(volume['size']) + e_vol_detail = SubElement(e_vol, "status") + e_vol_detail.text = volume['status'] + e_vol_detail = SubElement(e_vol, "ip") + e_vol_detail.text = ip + e_vol_detail = SubElement(e_vol, "iscsi_iqn") + e_vol_detail.text = iscsi_iqn + e_vol_detail = SubElement(e_vol, "iscsi_portal") + e_vol_detail.text = iscsi_portal + e_vol_detail = SubElement(e_vol, "lun") + e_vol_detail.text = '0' + e_vol_detail = SubElement(e_vol, "sn_host") + e_vol_detail.text = volume['host'] + + _xml = ElementTree.tostring(e_vsa) + return base64.b64encode(_xml) diff --git a/nova/vsa/connection.py b/nova/vsa/connection.py new file mode 100644 index 000000000..6c61acee4 --- /dev/null +++ b/nova/vsa/connection.py @@ -0,0 +1,25 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright (c) 2011 Zadara Storage Inc. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +"""Abstraction of the underlying connection to VC.""" + +from nova.vsa import fake + + +def get_connection(): + # Return an object that is able to talk to VCs + return fake.FakeVcConnection() diff --git a/nova/vsa/fake.py b/nova/vsa/fake.py new file mode 100644 index 000000000..308d21fec --- /dev/null +++ b/nova/vsa/fake.py @@ -0,0 +1,22 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright (c) 2011 Zadara Storage Inc. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + + +class FakeVcConnection: + + def init_host(self, host): + pass diff --git a/nova/vsa/manager.py b/nova/vsa/manager.py new file mode 100644 index 000000000..a9a9fa2e8 --- /dev/null +++ b/nova/vsa/manager.py @@ -0,0 +1,172 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright (c) 2011 Zadara Storage Inc. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +Handles all processes relating to Virtual Storage Arrays (VSA). + +**Related Flags** + +""" + +from nova import log as logging +from nova import manager +from nova import flags +from nova import utils +from nova import exception +from nova import compute +from nova import volume +from nova import vsa +from nova.compute import instance_types + + +FLAGS = flags.FLAGS +flags.DEFINE_string('vsa_driver', 'nova.vsa.connection.get_connection', + 'Driver to use for controlling VSAs') + +LOG = logging.getLogger('nova.vsa.manager') + + +class VsaManager(manager.SchedulerDependentManager): + """Manages Virtual Storage Arrays (VSAs).""" + + def __init__(self, vsa_driver=None, *args, **kwargs): + if not vsa_driver: + vsa_driver = FLAGS.vsa_driver + self.driver = utils.import_object(vsa_driver) + self.compute_manager = utils.import_object(FLAGS.compute_manager) + + self.compute_api = compute.API() + self.volume_api = volume.API() + self.vsa_api = vsa.API() + + super(VsaManager, self).__init__(*args, **kwargs) + + def init_host(self): + self.driver.init_host(host=self.host) + super(VsaManager, self).init_host() + + @exception.wrap_exception() + def create_vsa(self, context, vsa_id): + """Called by API if there were no BE volumes assigned""" + LOG.debug(_("Create call received for VSA %s"), vsa_id) + + vsa_id = int(vsa_id) # just in case + + try: + vsa = self.vsa_api.get(context, vsa_id) + except Exception as ex: + msg = _("Failed to find VSA %(vsa_id)d") % locals() + LOG.exception(msg) + return + + return self._start_vcs(context, vsa) + + @exception.wrap_exception() + def vsa_volume_created(self, context, vol_id, vsa_id, status): + """Callback for volume creations""" + LOG.debug(_("VSA ID %(vsa_id)s: Volume %(vol_id)s created. "\ + "Status %(status)s"), locals()) + vsa_id = int(vsa_id) # just in case + + # Get all volumes for this VSA + # check if any of them still in creating phase + volumes = self.db.volume_get_all_assigned_to_vsa(context, vsa_id) + for volume in volumes: + if volume['status'] == 'creating': + vol_name = volume['name'] + vol_disp_name = volume['display_name'] + LOG.debug(_("Volume %(vol_name)s (%(vol_disp_name)s) still "\ + "in creating phase - wait"), locals()) + return + + try: + vsa = self.vsa_api.get(context, vsa_id) + except Exception as ex: + msg = _("Failed to find VSA %(vsa_id)d") % locals() + LOG.exception(msg) + return + + if len(volumes) != vsa['vol_count']: + LOG.debug(_("VSA ID %d: Not all volumes are created (%d of %d)"), + vsa_id, len(volumes), vsa['vol_count']) + return + + # all volumes created (successfully or not) + return self._start_vcs(context, vsa, volumes) + + def _start_vcs(self, context, vsa, volumes=[]): + """Start VCs for VSA """ + + vsa_id = vsa['id'] + if vsa['status'] == FLAGS.vsa_status_creating: + self.vsa_api.update_vsa_status(context, vsa_id, + FLAGS.vsa_status_launching) + else: + return + + # in _separate_ loop go over all volumes and mark as "attached" + has_failed_volumes = False + for volume in volumes: + vol_name = volume['name'] + vol_disp_name = volume['display_name'] + status = volume['status'] + LOG.info(_("VSA ID %(vsa_id)d: Volume %(vol_name)s "\ + "(%(vol_disp_name)s) is in %(status)s state"), + locals()) + if status == 'available': + try: + # self.volume_api.update(context, volume['id'], + # dict(attach_status="attached")) + pass + except Exception as ex: + msg = _("Failed to update attach status for volume " + "%(vol_name)s. %(ex)s") % locals() + LOG.exception(msg) + else: + has_failed_volumes = True + + if has_failed_volumes: + LOG.info(_("VSA ID %(vsa_id)d: Delete all BE volumes"), locals()) + self.vsa_api.delete_be_volumes(context, vsa_id, force_delete=True) + self.vsa_api.update_vsa_status(context, vsa_id, + FLAGS.vsa_status_failed) + return + + # create user-data record for VC + storage_data = self.vsa_api.generate_user_data(context, vsa, volumes) + + instance_type = instance_types.get_instance_type( + vsa['instance_type_id']) + + # now start the VC instance + + vc_count = vsa['vc_count'] + LOG.info(_("VSA ID %(vsa_id)d: Start %(vc_count)d instances"), + locals()) + vc_instances = self.compute_api.create(context, + instance_type, # vsa['vsa_instance_type'], + vsa['image_ref'], + min_count=1, + max_count=vc_count, + display_name='vc-' + vsa['display_name'], + display_description='VC for VSA ' + vsa['display_name'], + availability_zone=vsa['availability_zone'], + user_data=storage_data, + vsa_id=vsa_id) + + self.vsa_api.update_vsa_status(context, vsa_id, + FLAGS.vsa_status_created) -- cgit From f6844960dd062154244c706283cf1916ee7194ff Mon Sep 17 00:00:00 2001 From: "vladimir.p" Date: Fri, 15 Jul 2011 18:11:13 -0700 Subject: added missing instance_get_all_by_vsa --- nova/db/api.py | 5 +++++ 1 file changed, 5 insertions(+) (limited to 'nova') diff --git a/nova/db/api.py b/nova/db/api.py index 9147f136b..fde229099 100644 --- a/nova/db/api.py +++ b/nova/db/api.py @@ -522,6 +522,11 @@ def instance_get_all_by_host(context, host): return IMPL.instance_get_all_by_host(context, host) +def instance_get_all_by_vsa(context, vsa_id): + """Get all instance belonging to a VSA.""" + return IMPL.instance_get_all_by_vsa(context, vsa_id) + + def instance_get_all_by_reservation(context, reservation_id): """Get all instance belonging to a reservation.""" return IMPL.instance_get_all_by_reservation(context, reservation_id) -- cgit From d340d7e90e245c79182906d603aec57d086cca1f Mon Sep 17 00:00:00 2001 From: "vladimir.p" Date: Fri, 15 Jul 2011 18:25:37 -0700 Subject: added missing drive_types.py --- nova/vsa/drive_types.py | 106 ++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 106 insertions(+) create mode 100644 nova/vsa/drive_types.py (limited to 'nova') diff --git a/nova/vsa/drive_types.py b/nova/vsa/drive_types.py new file mode 100644 index 000000000..b8cb66b22 --- /dev/null +++ b/nova/vsa/drive_types.py @@ -0,0 +1,106 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright (c) 2011 Zadara Storage Inc. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +Handles all requests relating to Virtual Storage Arrays (VSAs). +""" + +from nova import db +from nova import exception +from nova import flags +from nova import log as logging +from nova import quota +from nova import rpc +from nova.db import base + +from nova import compute +from nova import volume +from nova.compute import instance_types + + +FLAGS = flags.FLAGS +flags.DEFINE_string('drive_type_template_short', '%s_%sGB_%sRPM', + 'Template string for generation of drive type name') +flags.DEFINE_string('drive_type_template_long', '%s_%sGB_%sRPM_%s', + 'Template string for generation of drive type name') + + +LOG = logging.getLogger('nova.drive_types') + + +def _generate_default_drive_name(type, size_gb, rpm, capabilities): + if capabilities is None or capabilities == '': + return FLAGS.drive_type_template_short % \ + (type, str(size_gb), rpm) + else: + return FLAGS.drive_type_template_long % \ + (type, str(size_gb), rpm, capabilities) + + +def drive_type_create(context, type, size_gb, rpm, + capabilities='', visible=True, name=None): + if name is None: + name = _generate_default_drive_name(type, size_gb, rpm, + capabilities) + LOG.debug(_("Creating drive type %(name)s: "\ + "%(type)s %(size_gb)s %(rpm)s %(capabilities)s"), locals()) + + values = { + 'type': type, + 'size_gb': size_gb, + 'rpm': rpm, + 'capabilities': capabilities, + 'visible': visible, + 'name': name + } + return db.drive_type_create(context, values) + + +def drive_type_update(context, name, **kwargs): + LOG.debug(_("Updating drive type %(name)s: "), locals()) + return db.drive_type_update(context, name, kwargs) + + +def drive_type_rename(context, name, new_name=None): + + if new_name is None or \ + new_name == '': + disk = db.drive_type_get_by_name(context, name) + new_name = _generate_default_drive_name(disk['type'], + disk['size_gb'], disk['rpm'], disk['capabilities']) + + LOG.debug(_("Renaming drive type %(name)s to %(new_name)s"), locals()) + + values = dict(name=new_name) + return db.drive_type_update(context, name, values) + + +def drive_type_delete(context, name): + LOG.debug(_("Deleting drive type %(name)s"), locals()) + db.drive_type_destroy(context, name) + + +def drive_type_get(context, id): + return db.drive_type_get(context, id) + + +def drive_type_get_by_name(context, name): + return db.drive_type_get_by_name(context, name) + + +def drive_type_get_all(context, visible=None): + return db.drive_type_get_all(context, visible) -- cgit From cc7c1c49cb15d39445e94c248697d62f63a014a7 Mon Sep 17 00:00:00 2001 From: "vladimir.p" Date: Mon, 18 Jul 2011 08:59:00 -0700 Subject: Added auth info to XML --- nova/vsa/api.py | 4 ++++ 1 file changed, 4 insertions(+) (limited to 'nova') diff --git a/nova/vsa/api.py b/nova/vsa/api.py index ed83ff563..853816477 100644 --- a/nova/vsa/api.py +++ b/nova/vsa/api.py @@ -368,6 +368,10 @@ class API(base.Base): e_vsa_detail.text = vsa['display_description'] e_vsa_detail = SubElement(e_vsa, "vc_count") e_vsa_detail.text = str(vsa['vc_count']) + e_vsa_detail = SubElement(e_vsa, "auth_user") + e_vsa_detail.text = str(context.user.name) + e_vsa_detail = SubElement(e_vsa, "auth_access_key") + e_vsa_detail.text = str(context.user.access) e_volumes = SubElement(e_vsa, "volumes") for volume in volumes: -- cgit From 15bbaf8bbdd48231f9ce98e4d8867b0477b44645 Mon Sep 17 00:00:00 2001 From: "vladimir.p" Date: Mon, 18 Jul 2011 09:57:31 -0700 Subject: localization changes. Removed vsa params from volume cloud API. Alex changes --- nova/api/ec2/cloud.py | 19 ++----------------- nova/scheduler/vsa.py | 8 ++++---- nova/vsa/api.py | 11 +++++++---- nova/vsa/drive_types.py | 8 -------- nova/vsa/manager.py | 6 ++++-- 5 files changed, 17 insertions(+), 35 deletions(-) (limited to 'nova') diff --git a/nova/api/ec2/cloud.py b/nova/api/ec2/cloud.py index 786ceaccc..e31b755de 100644 --- a/nova/api/ec2/cloud.py +++ b/nova/api/ec2/cloud.py @@ -730,26 +730,12 @@ class CloudController(object): snapshot_id = None LOG.audit(_("Create volume of %s GB"), size, context=context) - to_vsa_id = kwargs.get('to_vsa_id', None) - if to_vsa_id: - to_vsa_id = ec2utils.ec2_id_to_id(to_vsa_id) - - from_vsa_id = kwargs.get('from_vsa_id', None) - if from_vsa_id: - from_vsa_id = ec2utils.ec2_id_to_id(from_vsa_id) - - if to_vsa_id or from_vsa_id: - LOG.audit(_("Create volume of %s GB associated with VSA "\ - "(to: %d, from: %d)"), - size, to_vsa_id, from_vsa_id, context=context) - volume = self.volume_api.create( context, size=size, snapshot_id=snapshot_id, name=kwargs.get('display_name'), - description=kwargs.get('display_description'), - to_vsa_id=to_vsa_id, from_vsa_id=from_vsa_id) + description=kwargs.get('display_description')) # TODO(vish): Instance should be None at db layer instead of # trying to lazy load, but for now we turn it into # a dict to avoid an error. @@ -864,8 +850,7 @@ class CloudController(object): def describe_vsas(self, context, vsa_id=None, status=None, availability_zone=None, **kwargs): -# LOG.debug(_("vsa_id=%s, status=%s, az=%s"), -# (vsa_id, status, availability_zone)) + LOG.audit(_("Describe VSAs")) result = [] vsas = [] if vsa_id is not None: diff --git a/nova/scheduler/vsa.py b/nova/scheduler/vsa.py index 4277c0ba8..260545746 100644 --- a/nova/scheduler/vsa.py +++ b/nova/scheduler/vsa.py @@ -96,8 +96,8 @@ class VsaScheduler(simple.SimpleScheduler): cap['cmp_func'] is not None and \ cap['cmp_func'](qos_values[cap['cap1']], drive_type[cap['cap2']]): - # LOG.debug(_("One of required capabilities found: %s:%s"), - # cap['cap1'], drive_type[cap['cap2']]) + # LOG.debug(("One of required capabilities found: %s:%s"), + # cap['cap1'], drive_type[cap['cap2']]) pass else: return False @@ -416,8 +416,8 @@ class VsaScheduler(simple.SimpleScheduler): drive_type = dict(drive_type) # otherwise - drive type is loaded - LOG.debug(_("Spawning volume %d with drive type %s"), - volume_ref['id'], drive_type) + LOG.debug(_("Spawning volume %(volume_id)s with drive type "\ + "%(drive_type)s"), locals()) LOG.debug(_("Service states BEFORE %s"), self.zone_manager.service_states) diff --git a/nova/vsa/api.py b/nova/vsa/api.py index 853816477..7ce643aab 100644 --- a/nova/vsa/api.py +++ b/nova/vsa/api.py @@ -275,15 +275,18 @@ class API(base.Base): " Setting to default"), vc_count) vc_count = FLAGS.max_vcs_in_vsa + vsa_name = vsa['name'] old_vc_count = vsa['vc_count'] if vc_count > old_vc_count: - LOG.debug(_("Adding %d VCs to VSA %s."), - (vc_count - old_vc_count, vsa['name'])) + add_cnt = vc_count - old_vc_count + LOG.debug(_("Adding %(add_cnt)d VCs to VSA %(vsa_name)s."), + locals()) # VP-TODO: actual code for adding new VCs elif vc_count < old_vc_count: - LOG.debug(_("Deleting %d VCs from VSA %s."), - (old_vc_count - vc_count, vsa['name'])) + del_cnt = old_vc_count - vc_count + LOG.debug(_("Deleting %(add_cnt)d VCs from VSA %(vsa_name)s."), + locals()) # VP-TODO: actual code for deleting extra VCs def _force_volume_delete(self, ctxt, volume): diff --git a/nova/vsa/drive_types.py b/nova/vsa/drive_types.py index b8cb66b22..781206cdf 100644 --- a/nova/vsa/drive_types.py +++ b/nova/vsa/drive_types.py @@ -23,14 +23,6 @@ from nova import db from nova import exception from nova import flags from nova import log as logging -from nova import quota -from nova import rpc -from nova.db import base - -from nova import compute -from nova import volume -from nova.compute import instance_types - FLAGS = flags.FLAGS flags.DEFINE_string('drive_type_template_short', '%s_%sGB_%sRPM', diff --git a/nova/vsa/manager.py b/nova/vsa/manager.py index a9a9fa2e8..c67358672 100644 --- a/nova/vsa/manager.py +++ b/nova/vsa/manager.py @@ -101,8 +101,10 @@ class VsaManager(manager.SchedulerDependentManager): return if len(volumes) != vsa['vol_count']: - LOG.debug(_("VSA ID %d: Not all volumes are created (%d of %d)"), - vsa_id, len(volumes), vsa['vol_count']) + cvol_real = len(volumes) + cvol_exp = vsa['vol_count'] + LOG.debug(_("VSA ID %(vsa_id)d: Not all volumes are created "\ + "(%(cvol_real)d of %(cvol_exp)d)"), locals()) return # all volumes created (successfully or not) -- cgit From 3983bca4c9528d286b4e154956ceb749b4875274 Mon Sep 17 00:00:00 2001 From: "vladimir.p" Date: Mon, 18 Jul 2011 14:00:19 -0700 Subject: VSA schedulers reorg --- nova/scheduler/vsa.py | 267 ++++++++++++++++++++++------------------- nova/scheduler/zone_manager.py | 4 +- 2 files changed, 147 insertions(+), 124 deletions(-) (limited to 'nova') diff --git a/nova/scheduler/vsa.py b/nova/scheduler/vsa.py index 260545746..c6517d9d5 100644 --- a/nova/scheduler/vsa.py +++ b/nova/scheduler/vsa.py @@ -41,16 +41,22 @@ flags.DEFINE_integer('vsa_unique_hosts_per_alloc', 10, flags.DEFINE_boolean('vsa_select_unique_drives', True, 'Allow selection of same host for multiple drives') +def BYTES_TO_GB(bytes): + return bytes >> FLAGS.gb_to_bytes_shift + +def GB_TO_BYTES(gb): + return gb << FLAGS.gb_to_bytes_shift + class VsaScheduler(simple.SimpleScheduler): - """Implements Naive Scheduler that tries to find least loaded host.""" + """Implements Scheduler for volume placement.""" def __init__(self, *args, **kwargs): super(VsaScheduler, self).__init__(*args, **kwargs) self._notify_all_volume_hosts("startup") def _notify_all_volume_hosts(self, event): - rpc.cast(context.get_admin_context(), + rpc.fanout_cast(context.get_admin_context(), FLAGS.volume_topic, {"method": "notification", "args": {"event": event}}) @@ -62,7 +68,7 @@ class VsaScheduler(simple.SimpleScheduler): return result def _compare_sizes_exact_match(self, cap_capacity, size_gb): - cap_capacity = int(cap_capacity) >> FLAGS.gb_to_bytes_shift + cap_capacity = BYTES_TO_GB(int(cap_capacity)) size_gb = int(size_gb) result = cap_capacity == size_gb # LOG.debug(_("Comparing %(cap_capacity)d and %(size_gb)d. "\ @@ -70,7 +76,7 @@ class VsaScheduler(simple.SimpleScheduler): return result def _compare_sizes_approxim(self, cap_capacity, size_gb): - cap_capacity = int(cap_capacity) >> FLAGS.gb_to_bytes_shift + cap_capacity = BYTES_TO_GB(int(cap_capacity)) size_gb = int(size_gb) size_perc = size_gb * FLAGS.drive_type_approx_capacity_percent / 100 @@ -106,7 +112,7 @@ class VsaScheduler(simple.SimpleScheduler): def _filter_hosts(self, topic, request_spec, host_list=None): drive_type = request_spec['drive_type'] - LOG.debug(_("Filter hosts for drive type %(drive_type)s") % locals()) + LOG.debug(_("Filter hosts for drive type %s"), drive_type['name']) if host_list is None: host_list = self.zone_manager.service_states.iteritems() @@ -121,14 +127,15 @@ class VsaScheduler(simple.SimpleScheduler): for qosgrp, qos_values in gos_info.iteritems(): if self._qosgrp_match(drive_type, qos_values): if qos_values['AvailableCapacity'] > 0: - LOG.debug(_("Adding host %s to the list"), host) + # LOG.debug(_("Adding host %s to the list"), host) filtered_hosts.append((host, gos_info)) else: LOG.debug(_("Host %s has no free capacity. Skip"), host) break - LOG.debug(_("Found hosts %(filtered_hosts)s") % locals()) + host_names = [item[0] for item in filtered_hosts] + LOG.debug(_("Filter hosts: %s"), host_names) return filtered_hosts def _allowed_to_use_host(self, host, selected_hosts, unique): @@ -142,104 +149,13 @@ class VsaScheduler(simple.SimpleScheduler): if host not in [item[0] for item in selected_hosts]: selected_hosts.append((host, cap)) - def _alg_least_used_host(self, request_spec, all_hosts, selected_hosts): - size = request_spec['size'] - drive_type = request_spec['drive_type'] - best_host = None - best_qoscap = None - best_cap = None - min_used = 0 - - LOG.debug(_("Selecting best host for %(size)sGB volume of type "\ - "%(drive_type)s from %(all_hosts)s"), locals()) - - for (host, capabilities) in all_hosts: - has_enough_capacity = False - used_capacity = 0 - for qosgrp, qos_values in capabilities.iteritems(): - - used_capacity = used_capacity + qos_values['TotalCapacity'] \ - - qos_values['AvailableCapacity'] - - if self._qosgrp_match(drive_type, qos_values): - # we found required qosgroup - - if size == 0: # full drive match - if qos_values['FullDrive']['NumFreeDrives'] > 0: - has_enough_capacity = True - matched_qos = qos_values - else: - break - else: - if qos_values['AvailableCapacity'] >= size and \ - (qos_values['PartitionDrive'][ - 'NumFreePartitions'] > 0 or \ - qos_values['FullDrive']['NumFreeDrives'] > 0): - has_enough_capacity = True - matched_qos = qos_values - else: - break - - if has_enough_capacity and \ - self._allowed_to_use_host(host, - selected_hosts, - unique) and \ - (best_host is None or used_capacity < min_used): - - min_used = used_capacity - best_host = host - best_qoscap = matched_qos - best_cap = capabilities - - if best_host: - self._add_hostcap_to_list(selected_hosts, host, best_cap) - LOG.debug(_("Best host found: %(best_host)s. "\ - "(used capacity %(min_used)s)"), locals()) - return (best_host, best_qoscap) - - def _alg_most_avail_capacity(self, request_spec, all_hosts, + def host_selection_algorithm(self, request_spec, all_hosts, selected_hosts, unique): - size = request_spec['size'] - drive_type = request_spec['drive_type'] - best_host = None - best_qoscap = None - best_cap = None - max_avail = 0 - - LOG.debug(_("Selecting best host for %(size)sGB volume of type "\ - "%(drive_type)s from %(all_hosts)s"), locals()) - - for (host, capabilities) in all_hosts: - for qosgrp, qos_values in capabilities.iteritems(): - if self._qosgrp_match(drive_type, qos_values): - # we found required qosgroup - - if size == 0: # full drive match - available = qos_values['FullDrive']['NumFreeDrives'] - else: - available = qos_values['AvailableCapacity'] - - if available > max_avail and \ - self._allowed_to_use_host(host, - selected_hosts, - unique): - max_avail = available - best_host = host - best_qoscap = qos_values - best_cap = capabilities - break # go to the next host - - if best_host: - self._add_hostcap_to_list(selected_hosts, host, best_cap) - LOG.debug(_("Best host found: %(best_host)s. "\ - "(available capacity %(max_avail)s)"), locals()) - - return (best_host, best_qoscap) + """Must override this method for VSA scheduler to work.""" + raise NotImplementedError(_("Must implement host selection mechanism")) def _select_hosts(self, request_spec, all_hosts, selected_hosts=None): - #self._alg_most_avail_capacity(request_spec, all_hosts, selected_hosts) - if selected_hosts is None: selected_hosts = [] @@ -249,7 +165,7 @@ class VsaScheduler(simple.SimpleScheduler): LOG.debug(_("Maximum number of hosts selected (%d)"), len(selected_hosts)) unique = False - (host, qos_cap) = self._alg_most_avail_capacity(request_spec, + (host, qos_cap) = self.host_selection_algorithm(request_spec, selected_hosts, selected_hosts, unique) @@ -262,12 +178,10 @@ class VsaScheduler(simple.SimpleScheduler): # if we've not tried yet (# of sel hosts < max) - unique=True # or failed to select from selected_hosts - unique=False # select from all hosts - (host, qos_cap) = self._alg_most_avail_capacity(request_spec, + (host, qos_cap) = self.host_selection_algorithm(request_spec, all_hosts, selected_hosts, unique) - LOG.debug(_("Selected host %(host)s"), locals()) - if host is None: raise driver.WillNotSchedule(_("No available hosts")) @@ -329,8 +243,11 @@ class VsaScheduler(simple.SimpleScheduler): LOG.debug(_("volume_params %(volume_params)s") % locals()) + i = 1 for vol in volume_params: - LOG.debug(_("Assigning host to volume %s") % vol['name']) + name = vol['name'] + LOG.debug(_("%(i)d: Volume %(name)s"), locals()) + i += 1 if forced_host: vol['host'] = forced_host @@ -352,22 +269,19 @@ class VsaScheduler(simple.SimpleScheduler): vol['capabilities'] = qos_cap self._consume_resource(qos_cap, vol['size'], -1) - LOG.debug(_("Assigned host %(host)s, capabilities %(qos_cap)s"), - locals()) - - LOG.debug(_("END: volume_params %(volume_params)s") % locals()) + # LOG.debug(_("Volume %(name)s assigned to host %(host)s"), locals()) def schedule_create_volumes(self, context, request_spec, availability_zone, *_args, **_kwargs): """Picks hosts for hosting multiple volumes.""" + LOG.debug(_("Service states BEFORE %s"), + self.zone_manager.service_states) + num_volumes = request_spec.get('num_volumes') LOG.debug(_("Attempting to spawn %(num_volumes)d volume(s)") % locals()) - LOG.debug(_("Service states BEFORE %s"), - self.zone_manager.service_states) - vsa_id = request_spec.get('vsa_id') volume_params = request_spec.get('volumes') @@ -381,7 +295,6 @@ class VsaScheduler(simple.SimpleScheduler): LOG.debug(_("Service states AFTER %s"), self.zone_manager.service_states) - except: if vsa_id: db.vsa_update(context, vsa_id, @@ -415,13 +328,12 @@ class VsaScheduler(simple.SimpleScheduler): volume_id, *_args, **_kwargs) drive_type = dict(drive_type) - # otherwise - drive type is loaded - LOG.debug(_("Spawning volume %(volume_id)s with drive type "\ - "%(drive_type)s"), locals()) - LOG.debug(_("Service states BEFORE %s"), self.zone_manager.service_states) + LOG.debug(_("Spawning volume %(volume_id)s with drive type "\ + "%(drive_type)s"), locals()) + request_spec = {'size': volume_ref['size'], 'drive_type': drive_type} hosts = self._filter_hosts("volume", request_spec) @@ -487,9 +399,118 @@ class VsaScheduler(simple.SimpleScheduler): qos_values['DriveCapacity'] self._consume_full_drive(qos_values, direction) else: - qos_values['AvailableCapacity'] += direction * \ - (size << FLAGS.gb_to_bytes_shift) - self._consume_partition(qos_values, - size << FLAGS.gb_to_bytes_shift, - direction) + qos_values['AvailableCapacity'] += direction * GB_TO_BYTES(size) + self._consume_partition(qos_values, GB_TO_BYTES(size), direction) return + + +class VsaSchedulerLeastUsedHost(VsaScheduler): + """ + Implements VSA scheduler to select the host with least used capacity + of particular type. + """ + + def __init__(self, *args, **kwargs): + super(VsaSchedulerLeastUsedHost, self).__init__(*args, **kwargs) + + def host_selection_algorithm(self, request_spec, all_hosts, + selected_hosts, unique): + size = request_spec['size'] + drive_type = request_spec['drive_type'] + best_host = None + best_qoscap = None + best_cap = None + min_used = 0 + + for (host, capabilities) in all_hosts: + + has_enough_capacity = False + used_capacity = 0 + for qosgrp, qos_values in capabilities.iteritems(): + + used_capacity = used_capacity + qos_values['TotalCapacity'] \ + - qos_values['AvailableCapacity'] + + if self._qosgrp_match(drive_type, qos_values): + # we found required qosgroup + + if size == 0: # full drive match + if qos_values['FullDrive']['NumFreeDrives'] > 0: + has_enough_capacity = True + matched_qos = qos_values + else: + break + else: + if qos_values['AvailableCapacity'] >= size and \ + (qos_values['PartitionDrive'][ + 'NumFreePartitions'] > 0 or \ + qos_values['FullDrive']['NumFreeDrives'] > 0): + has_enough_capacity = True + matched_qos = qos_values + else: + break + + if has_enough_capacity and \ + self._allowed_to_use_host(host, + selected_hosts, + unique) and \ + (best_host is None or used_capacity < min_used): + + min_used = used_capacity + best_host = host + best_qoscap = matched_qos + best_cap = capabilities + + if best_host: + self._add_hostcap_to_list(selected_hosts, best_host, best_cap) + min_used = BYTES_TO_GB(min_used) + LOG.debug(_("\t LeastUsedHost: Best host: %(best_host)s. "\ + "(used capacity %(min_used)s)"), locals()) + return (best_host, best_qoscap) + + +class VsaSchedulerMostAvailCapacity(VsaScheduler): + """ + Implements VSA scheduler to select the host with most available capacity + of one particular type. + """ + + def __init__(self, *args, **kwargs): + super(VsaSchedulerMostAvailCapacity, self).__init__(*args, **kwargs) + + def host_selection_algorithm(self, request_spec, all_hosts, + selected_hosts, unique): + size = request_spec['size'] + drive_type = request_spec['drive_type'] + best_host = None + best_qoscap = None + best_cap = None + max_avail = 0 + + for (host, capabilities) in all_hosts: + for qosgrp, qos_values in capabilities.iteritems(): + if self._qosgrp_match(drive_type, qos_values): + # we found required qosgroup + + if size == 0: # full drive match + available = qos_values['FullDrive']['NumFreeDrives'] + else: + available = qos_values['AvailableCapacity'] + + if available > max_avail and \ + self._allowed_to_use_host(host, + selected_hosts, + unique): + max_avail = available + best_host = host + best_qoscap = qos_values + best_cap = capabilities + break # go to the next host + + if best_host: + self._add_hostcap_to_list(selected_hosts, best_host, best_cap) + type_str = "drives" if size == 0 else "bytes" + LOG.debug(_("\t MostAvailCap: Best host: %(best_host)s. "\ + "(available %(max_avail)s %(type_str)s)"), locals()) + + return (best_host, best_qoscap) diff --git a/nova/scheduler/zone_manager.py b/nova/scheduler/zone_manager.py index efdac06e1..b23bdbf85 100644 --- a/nova/scheduler/zone_manager.py +++ b/nova/scheduler/zone_manager.py @@ -196,8 +196,10 @@ class ZoneManager(object): def update_service_capabilities(self, service_name, host, capabilities): """Update the per-service capabilities based on this notification.""" + # logging.debug(_("Received %(service_name)s service update from " + # "%(host)s: %(capabilities)s") % locals()) logging.debug(_("Received %(service_name)s service update from " - "%(host)s: %(capabilities)s") % locals()) + "%(host)s") % locals()) service_caps = self.service_states.get(host, {}) capabilities["timestamp"] = utils.utcnow() # Reported time service_caps[service_name] = capabilities -- cgit From 9e74803d5eb8a70ba829ac0569f1cd6cd372a6f2 Mon Sep 17 00:00:00 2001 From: "vladimir.p" Date: Fri, 22 Jul 2011 15:14:29 -0700 Subject: Reverted volume driver part --- nova/api/ec2/cloud.py | 19 +- nova/api/openstack/contrib/drive_types.py | 55 ++-- .../openstack/contrib/virtual_storage_arrays.py | 77 +++--- nova/db/api.py | 10 +- nova/db/sqlalchemy/api.py | 19 +- nova/scheduler/vsa.py | 5 +- .../api/openstack/contrib/test_drive_types.py | 192 +++++++++++++ nova/tests/api/openstack/contrib/test_vsa.py | 239 ++++++++++++++++ nova/tests/test_drive_types.py | 146 ++++++++++ nova/volume/driver.py | 220 +++++++++++++++ nova/volume/manager.py | 81 ++---- nova/volume/san.py | 308 --------------------- nova/vsa/api.py | 2 +- nova/vsa/drive_types.py | 27 +- 14 files changed, 938 insertions(+), 462 deletions(-) create mode 100644 nova/tests/api/openstack/contrib/test_drive_types.py create mode 100644 nova/tests/api/openstack/contrib/test_vsa.py create mode 100644 nova/tests/test_drive_types.py (limited to 'nova') diff --git a/nova/api/ec2/cloud.py b/nova/api/ec2/cloud.py index e31b755de..7d0ce360f 100644 --- a/nova/api/ec2/cloud.py +++ b/nova/api/ec2/cloud.py @@ -889,12 +889,15 @@ class CloudController(object): "%(rpm)s %(capabilities)s %(visible)s"), locals()) - rv = drive_types.drive_type_create(context, type, size_gb, rpm, - capabilities, visible, name) + rv = drive_types.create(context, type, size_gb, rpm, + capabilities, visible, name) return {'driveTypeSet': [dict(rv)]} def update_drive_type(self, context, name, **kwargs): LOG.audit(_("Update Drive Type %s"), name) + + dtype = drive_types.get_by_name(context, name) + updatable_fields = ['type', 'size_gb', 'rpm', @@ -906,16 +909,18 @@ class CloudController(object): kwargs[field] is not None and \ kwargs[field] != '': changes[field] = kwargs[field] + if changes: - drive_types.drive_type_update(context, name, **changes) + drive_types.update(context, dtype['id'], **changes) return True def rename_drive_type(self, context, name, new_name): - drive_types.drive_type_rename(context, name, new_name) + drive_types.rename(context, name, new_name) return True def delete_drive_type(self, context, name): - drive_types.drive_type_delete(context, name) + dtype = drive_types.get_by_name(context, name) + drive_types.delete(context, dtype['id']) return True def describe_drive_types(self, context, names=None, visible=True): @@ -923,11 +928,11 @@ class CloudController(object): drives = [] if names is not None: for name in names: - drive = drive_types.drive_type_get_by_name(context, name) + drive = drive_types.get_by_name(context, name) if drive['visible'] == visible: drives.append(drive) else: - drives = drive_types.drive_type_get_all(context, visible) + drives = drive_types.get_all(context, visible) # VP-TODO: Change it later to EC2 compatible func (output) diff --git a/nova/api/openstack/contrib/drive_types.py b/nova/api/openstack/contrib/drive_types.py index 85b3170cb..590eaaec0 100644 --- a/nova/api/openstack/contrib/drive_types.py +++ b/nova/api/openstack/contrib/drive_types.py @@ -21,6 +21,7 @@ from webob import exc from nova.vsa import drive_types +from nova import exception from nova import db from nova import quota from nova import log as logging @@ -32,6 +33,19 @@ from nova.api.openstack import wsgi LOG = logging.getLogger("nova.api.drive_types") +def _drive_type_view(drive): + """Maps keys for drive types view.""" + d = {} + + d['id'] = drive['id'] + d['displayName'] = drive['name'] + d['type'] = drive['type'] + d['size'] = drive['size_gb'] + d['rpm'] = drive['rpm'] + d['capabilities'] = drive['capabilities'] + return d + + class DriveTypeController(object): """The Drive Type API controller for the OpenStack API.""" @@ -47,25 +61,13 @@ class DriveTypeController(object): "capabilities", ]}}} - def _drive_type_view(self, context, drive): - """Maps keys for drive types view.""" - d = {} - - d['id'] = drive['id'] - d['displayName'] = drive['name'] - d['type'] = drive['type'] - d['size'] = drive['size_gb'] - d['rpm'] = drive['rpm'] - d['capabilities'] = drive['capabilities'] - return d - def index(self, req): """Returns a list of drive types.""" context = req.environ['nova.context'] - drive_types = drive_types.drive_type_get_all(context) - limited_list = common.limited(drive_types, req) - res = [self._drive_type_view(context, drive) for drive in limited_list] + dtypes = drive_types.get_all(context) + limited_list = common.limited(dtypes, req) + res = [_drive_type_view(drive) for drive in limited_list] return {'drive_types': res} def show(self, req, id): @@ -73,11 +75,11 @@ class DriveTypeController(object): context = req.environ['nova.context'] try: - drive = drive_types.drive_type_get(context, id) + drive = drive_types.get(context, id) except exception.NotFound: return faults.Fault(exc.HTTPNotFound()) - return {'drive_type': self._drive_type_view(context, drive)} + return {'drive_type': _drive_type_view(drive)} def create(self, req, body): """Creates a new drive type.""" @@ -97,14 +99,14 @@ class DriveTypeController(object): LOG.audit(_("Create drive type %(name)s for "\ "%(type)s:%(size)s:%(rpm)s"), locals(), context=context) - new_drive = drive_types.drive_type_create(context, - type=type, - size_gb=size, - rpm=rpm, - capabilities=capabilities, - name=name) + new_drive = drive_types.create(context, + type=type, + size_gb=size, + rpm=rpm, + capabilities=capabilities, + name=name) - return {'drive_type': self._drive_type_view(context, new_drive)} + return {'drive_type': _drive_type_view(new_drive)} def delete(self, req, id): """Deletes a drive type.""" @@ -113,11 +115,10 @@ class DriveTypeController(object): LOG.audit(_("Delete drive type with id: %s"), id, context=context) try: - drive = drive_types.drive_type_get(context, id) - drive_types.drive_type_delete(context, drive['name']) + drive_types.delete(context, id) except exception.NotFound: return faults.Fault(exc.HTTPNotFound()) - return exc.HTTPAccepted() + # return exc.HTTPAccepted() class Drive_types(extensions.ExtensionDescriptor): diff --git a/nova/api/openstack/contrib/virtual_storage_arrays.py b/nova/api/openstack/contrib/virtual_storage_arrays.py index eca2d68dd..3c1362f0c 100644 --- a/nova/api/openstack/contrib/virtual_storage_arrays.py +++ b/nova/api/openstack/contrib/virtual_storage_arrays.py @@ -39,6 +39,29 @@ FLAGS = flags.FLAGS LOG = logging.getLogger("nova.api.vsa") +def _vsa_view(context, vsa, details=False): + """Map keys for vsa summary/detailed view.""" + d = {} + + d['id'] = vsa.get('id') + d['name'] = vsa.get('name') + d['displayName'] = vsa.get('display_name') + d['displayDescription'] = vsa.get('display_description') + + d['createTime'] = vsa.get('created_at') + d['status'] = vsa.get('status') + + if 'vsa_instance_type' in vsa: + d['vcType'] = vsa['vsa_instance_type'].get('name', None) + else: + d['vcType'] = None + + d['vcCount'] = vsa.get('vc_count') + d['driveCount'] = vsa.get('vol_count') + + return d + + class VsaController(object): """The Virtual Storage Array API controller for the OpenStack API.""" @@ -61,34 +84,12 @@ class VsaController(object): self.vsa_api = vsa.API() super(VsaController, self).__init__() - def _vsa_view(self, context, vsa, details=False): - """Map keys for vsa summary/detailed view.""" - d = {} - - d['id'] = vsa['id'] - d['name'] = vsa['name'] - d['displayName'] = vsa['display_name'] - d['displayDescription'] = vsa['display_description'] - - d['createTime'] = vsa['created_at'] - d['status'] = vsa['status'] - - if vsa['vsa_instance_type']: - d['vcType'] = vsa['vsa_instance_type'].get('name', None) - else: - d['vcType'] = None - - d['vcCount'] = vsa['vc_count'] - d['driveCount'] = vsa['vol_count'] - - return d - def _items(self, req, details): """Return summary or detailed list of VSAs.""" context = req.environ['nova.context'] vsas = self.vsa_api.get_all(context) limited_list = common.limited(vsas, req) - res = [self._vsa_view(context, vsa, details) for vsa in limited_list] + res = [_vsa_view(context, vsa, details) for vsa in limited_list] return {'vsaSet': res} def index(self, req): @@ -108,24 +109,20 @@ class VsaController(object): except exception.NotFound: return faults.Fault(exc.HTTPNotFound()) - return {'vsa': self._vsa_view(context, vsa, details=True)} + return {'vsa': _vsa_view(context, vsa, details=True)} def create(self, req, body): """Create a new VSA.""" context = req.environ['nova.context'] - if not body: + if not body or 'vsa' not in body: + LOG.debug(_("No body provided"), context=context) return faults.Fault(exc.HTTPUnprocessableEntity()) vsa = body['vsa'] display_name = vsa.get('displayName') - display_description = vsa.get('displayDescription') - storage = vsa.get('storage') - shared = vsa.get('shared') vc_type = vsa.get('vcType', FLAGS.default_vsa_instance_type) - availability_zone = vsa.get('placement', {}).get('AvailabilityZone') - try: instance_type = instance_types.get_instance_type_by_name(vc_type) except exception.NotFound: @@ -134,15 +131,17 @@ class VsaController(object): LOG.audit(_("Create VSA %(display_name)s of type %(vc_type)s"), locals(), context=context) - result = self.vsa_api.create(context, - display_name=display_name, - display_description=display_description, - storage=storage, - shared=shared, - instance_type=instance_type, - availability_zone=availability_zone) + args = dict(display_name=display_name, + display_description=vsa.get('displayDescription'), + instance_type=instance_type, + storage=vsa.get('storage'), + shared=vsa.get('shared'), + availability_zone=vsa.get('placement', {}).\ + get('AvailabilityZone')) - return {'vsa': self._vsa_view(context, result, details=True)} + result = self.vsa_api.create(context, **args) + + return {'vsa': _vsa_view(context, result, details=True)} def delete(self, req, id): """Delete a VSA.""" @@ -154,7 +153,7 @@ class VsaController(object): self.vsa_api.delete(context, vsa_id=id) except exception.NotFound: return faults.Fault(exc.HTTPNotFound()) - return exc.HTTPAccepted() + # return exc.HTTPAccepted() class VsaVolumeDriveController(volumes.VolumeController): diff --git a/nova/db/api.py b/nova/db/api.py index fde229099..a3a6d47c4 100644 --- a/nova/db/api.py +++ b/nova/db/api.py @@ -1455,14 +1455,14 @@ def drive_type_create(context, values): return IMPL.drive_type_create(context, values) -def drive_type_update(context, name, values): +def drive_type_update(context, drive_type_id, values): """Updates drive type record.""" - return IMPL.drive_type_update(context, name, values) + return IMPL.drive_type_update(context, drive_type_id, values) -def drive_type_destroy(context, name): +def drive_type_destroy(context, drive_type_id): """Deletes drive type record.""" - return IMPL.drive_type_destroy(context, name) + return IMPL.drive_type_destroy(context, drive_type_id) def drive_type_get(context, drive_type_id): @@ -1475,7 +1475,7 @@ def drive_type_get_by_name(context, name): return IMPL.drive_type_get_by_name(context, name) -def drive_type_get_all(context, visible=None): +def drive_type_get_all(context, visible): """Returns all (or only visible) drive types.""" return IMPL.drive_type_get_all(context, visible) diff --git a/nova/db/sqlalchemy/api.py b/nova/db/sqlalchemy/api.py index aa5a6e052..c08524265 100644 --- a/nova/db/sqlalchemy/api.py +++ b/nova/db/sqlalchemy/api.py @@ -3364,31 +3364,30 @@ def drive_type_create(context, values): @require_admin_context -def drive_type_update(context, name, values): +def drive_type_update(context, drive_type_id, values): """ Updates drive type record. """ session = get_session() with session.begin(): - drive_type_ref = drive_type_get_by_name(context, name, session=session) + drive_type_ref = drive_type_get(context, drive_type_id, + session=session) drive_type_ref.update(values) drive_type_ref.save(session=session) return drive_type_ref @require_admin_context -def drive_type_destroy(context, name): +def drive_type_destroy(context, drive_type_id): """ Deletes drive type record. """ session = get_session() drive_type_ref = session.query(models.DriveTypes).\ - filter_by(name=name) + filter_by(id=drive_type_id) records = drive_type_ref.delete() if records == 0: - raise exception.VirtualDiskTypeNotFoundByName(name=name) - else: - return drive_type_ref + raise exception.VirtualDiskTypeNotFound(id=drive_type_id) @require_context @@ -3428,20 +3427,20 @@ def drive_type_get_by_name(context, name, session=None): @require_context -def drive_type_get_all(context, visible=False): +def drive_type_get_all(context, visible): """ Returns all (or only visible) drive types. """ session = get_session() - if not visible: + if visible: drive_types = session.query(models.DriveTypes).\ filter_by(deleted=can_read_deleted(context)).\ + filter_by(visible=True).\ order_by("name").\ all() else: drive_types = session.query(models.DriveTypes).\ filter_by(deleted=can_read_deleted(context)).\ - filter_by(visible=True).\ order_by("name").\ all() return drive_types diff --git a/nova/scheduler/vsa.py b/nova/scheduler/vsa.py index c6517d9d5..059afce68 100644 --- a/nova/scheduler/vsa.py +++ b/nova/scheduler/vsa.py @@ -41,9 +41,11 @@ flags.DEFINE_integer('vsa_unique_hosts_per_alloc', 10, flags.DEFINE_boolean('vsa_select_unique_drives', True, 'Allow selection of same host for multiple drives') + def BYTES_TO_GB(bytes): return bytes >> FLAGS.gb_to_bytes_shift + def GB_TO_BYTES(gb): return gb << FLAGS.gb_to_bytes_shift @@ -269,7 +271,8 @@ class VsaScheduler(simple.SimpleScheduler): vol['capabilities'] = qos_cap self._consume_resource(qos_cap, vol['size'], -1) - # LOG.debug(_("Volume %(name)s assigned to host %(host)s"), locals()) + # LOG.debug(_("Volume %(name)s assigned to host %(host)s"), + # locals()) def schedule_create_volumes(self, context, request_spec, availability_zone, *_args, **_kwargs): diff --git a/nova/tests/api/openstack/contrib/test_drive_types.py b/nova/tests/api/openstack/contrib/test_drive_types.py new file mode 100644 index 000000000..2f7d327d3 --- /dev/null +++ b/nova/tests/api/openstack/contrib/test_drive_types.py @@ -0,0 +1,192 @@ +# Copyright 2011 OpenStack LLC. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import json +import stubout +import webob + +#from nova import compute +from nova.vsa import drive_types +from nova import exception +from nova import context +from nova import test +from nova import log as logging +from nova.tests.api.openstack import fakes + +from nova.api.openstack.contrib.drive_types import _drive_type_view + +LOG = logging.getLogger('nova.tests.api.openstack.drive_types') + +last_param = {} + + +def _get_default_drive_type(): + param = { + 'name': 'Test drive type', + 'type': 'SATA', + 'size_gb': 123, + 'rpm': '7200', + 'capabilities': '', + 'visible': True + } + return param + + +def _create(context, **param): + global last_param + LOG.debug(_("_create: %s"), param) + param['id'] = 123 + last_param = param + return param + + +def _delete(context, id): + global last_param + last_param = dict(id=id) + + LOG.debug(_("_delete: %s"), locals()) + + +def _get(context, id): + global last_param + last_param = dict(id=id) + + LOG.debug(_("_get: %s"), locals()) + if id != '123': + raise exception.NotFound + + dtype = _get_default_drive_type() + dtype['id'] = id + return dtype + + +def _get_all(context, visible=True): + LOG.debug(_("_get_all: %s"), locals()) + dtype = _get_default_drive_type() + dtype['id'] = 123 + return [dtype] + + +class DriveTypesApiTest(test.TestCase): + def setUp(self): + super(DriveTypesApiTest, self).setUp() + self.stubs = stubout.StubOutForTesting() + fakes.FakeAuthManager.reset_fake_data() + fakes.FakeAuthDatabase.data = {} + fakes.stub_out_networking(self.stubs) + fakes.stub_out_rate_limiting(self.stubs) + fakes.stub_out_auth(self.stubs) + self.stubs.Set(drive_types, "create", _create) + self.stubs.Set(drive_types, "delete", _delete) + self.stubs.Set(drive_types, "get", _get) + self.stubs.Set(drive_types, "get_all", _get_all) + + self.context = context.get_admin_context() + + def tearDown(self): + self.stubs.UnsetAll() + super(DriveTypesApiTest, self).tearDown() + + def test_drive_types_api_create(self): + global last_param + last_param = {} + + dtype = _get_default_drive_type() + dtype['id'] = 123 + + body = dict(drive_type=_drive_type_view(dtype)) + req = webob.Request.blank('/v1.1/zadr-drive_types') + req.method = 'POST' + req.body = json.dumps(body) + req.headers['content-type'] = 'application/json' + + resp = req.get_response(fakes.wsgi_app()) + self.assertEqual(resp.status_int, 200) + + # Compare if parameters were correctly passed to stub + for k, v in last_param.iteritems(): + self.assertEqual(last_param[k], dtype[k]) + + resp_dict = json.loads(resp.body) + + # Compare response + self.assertTrue('drive_type' in resp_dict) + resp_dtype = resp_dict['drive_type'] + self.assertEqual(resp_dtype, _drive_type_view(dtype)) + + def test_drive_types_api_delete(self): + global last_param + last_param = {} + + dtype_id = 123 + req = webob.Request.blank('/v1.1/zadr-drive_types/%d' % dtype_id) + req.method = 'DELETE' + + resp = req.get_response(fakes.wsgi_app()) + self.assertEqual(resp.status_int, 200) + self.assertEqual(str(last_param['id']), str(dtype_id)) + + def test_drive_types_show(self): + global last_param + last_param = {} + + dtype_id = 123 + req = webob.Request.blank('/v1.1/zadr-drive_types/%d' % dtype_id) + req.method = 'GET' + resp = req.get_response(fakes.wsgi_app()) + self.assertEqual(resp.status_int, 200) + self.assertEqual(str(last_param['id']), str(dtype_id)) + + resp_dict = json.loads(resp.body) + + # Compare response + self.assertTrue('drive_type' in resp_dict) + resp_dtype = resp_dict['drive_type'] + exp_dtype = _get_default_drive_type() + exp_dtype['id'] = dtype_id + exp_dtype_view = _drive_type_view(exp_dtype) + for k, v in exp_dtype_view.iteritems(): + self.assertEqual(str(resp_dtype[k]), str(exp_dtype_view[k])) + + def test_drive_types_show_invalid_id(self): + global last_param + last_param = {} + + dtype_id = 234 + req = webob.Request.blank('/v1.1/zadr-drive_types/%d' % dtype_id) + req.method = 'GET' + resp = req.get_response(fakes.wsgi_app()) + self.assertEqual(resp.status_int, 404) + self.assertEqual(str(last_param['id']), str(dtype_id)) + + def test_drive_types_index(self): + + req = webob.Request.blank('/v1.1/zadr-drive_types') + req.method = 'GET' + resp = req.get_response(fakes.wsgi_app()) + self.assertEqual(resp.status_int, 200) + + resp_dict = json.loads(resp.body) + + self.assertTrue('drive_types' in resp_dict) + resp_dtypes = resp_dict['drive_types'] + self.assertEqual(len(resp_dtypes), 1) + + resp_dtype = resp_dtypes.pop() + exp_dtype = _get_default_drive_type() + exp_dtype['id'] = 123 + exp_dtype_view = _drive_type_view(exp_dtype) + for k, v in exp_dtype_view.iteritems(): + self.assertEqual(str(resp_dtype[k]), str(exp_dtype_view[k])) diff --git a/nova/tests/api/openstack/contrib/test_vsa.py b/nova/tests/api/openstack/contrib/test_vsa.py new file mode 100644 index 000000000..bc0b7eaa6 --- /dev/null +++ b/nova/tests/api/openstack/contrib/test_vsa.py @@ -0,0 +1,239 @@ +# Copyright 2011 OpenStack LLC. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import json +import stubout +import webob + +#from nova import compute +from nova import vsa +from nova import exception +from nova import context +from nova import test +from nova import log as logging +from nova.tests.api.openstack import fakes + +from nova.api.openstack.contrib.virtual_storage_arrays import _vsa_view + +LOG = logging.getLogger('nova.tests.api.openstack.vsa') + +last_param = {} + + +def _get_default_vsa_param(): + return { + 'display_name': 'Test_VSA_name', + 'display_description': 'Test_VSA_description', + 'vc_count': 1, + 'instance_type': 'm1.small', + 'image_name': None, + 'availability_zone': None, + 'storage': [], + 'shared': False + } + + +def stub_vsa_create(self, context, **param): + global last_param + LOG.debug(_("_create: param=%s"), param) + param['id'] = 123 + param['name'] = 'Test name' + last_param = param + return param + + +def stub_vsa_delete(self, context, vsa_id): + global last_param + last_param = dict(vsa_id=vsa_id) + + LOG.debug(_("_delete: %s"), locals()) + if vsa_id != '123': + raise exception.NotFound + + +def stub_vsa_get(self, context, vsa_id): + global last_param + last_param = dict(vsa_id=vsa_id) + + LOG.debug(_("_get: %s"), locals()) + if vsa_id != '123': + raise exception.NotFound + + param = _get_default_vsa_param() + param['id'] = vsa_id + return param + + +def stub_vsa_get_all(self, context): + LOG.debug(_("_get_all: %s"), locals()) + param = _get_default_vsa_param() + param['id'] = 123 + return [param] + + +class VSAApiTest(test.TestCase): + def setUp(self): + super(VSAApiTest, self).setUp() + self.stubs = stubout.StubOutForTesting() + fakes.FakeAuthManager.reset_fake_data() + fakes.FakeAuthDatabase.data = {} + fakes.stub_out_networking(self.stubs) + fakes.stub_out_rate_limiting(self.stubs) + fakes.stub_out_auth(self.stubs) + self.stubs.Set(vsa.api.API, "create", stub_vsa_create) + self.stubs.Set(vsa.api.API, "delete", stub_vsa_delete) + self.stubs.Set(vsa.api.API, "get", stub_vsa_get) + self.stubs.Set(vsa.api.API, "get_all", stub_vsa_get_all) + + self.context = context.get_admin_context() + + def tearDown(self): + self.stubs.UnsetAll() + super(VSAApiTest, self).tearDown() + + def test_vsa_api_create(self): + global last_param + last_param = {} + + vsa = {"displayName": "VSA Test Name", + "displayDescription": "VSA Test Desc"} + body = dict(vsa=vsa) + req = webob.Request.blank('/v1.1/zadr-vsa') + req.method = 'POST' + req.body = json.dumps(body) + req.headers['content-type'] = 'application/json' + + resp = req.get_response(fakes.wsgi_app()) + self.assertEqual(resp.status_int, 200) + + # Compare if parameters were correctly passed to stub + self.assertEqual(last_param['display_name'], "VSA Test Name") + self.assertEqual(last_param['display_description'], "VSA Test Desc") + + resp_dict = json.loads(resp.body) + self.assertTrue('vsa' in resp_dict) + self.assertEqual(resp_dict['vsa']['displayName'], vsa['displayName']) + self.assertEqual(resp_dict['vsa']['displayDescription'], + vsa['displayDescription']) + + def test_vsa_api_create_no_body(self): + req = webob.Request.blank('/v1.1/zadr-vsa') + req.method = 'POST' + req.body = json.dumps({}) + req.headers['content-type'] = 'application/json' + + resp = req.get_response(fakes.wsgi_app()) + self.assertEqual(resp.status_int, 422) + + def test_vsa_api_delete(self): + global last_param + last_param = {} + + vsa_id = 123 + req = webob.Request.blank('/v1.1/zadr-vsa/%d' % vsa_id) + req.method = 'DELETE' + + resp = req.get_response(fakes.wsgi_app()) + self.assertEqual(resp.status_int, 200) + self.assertEqual(str(last_param['vsa_id']), str(vsa_id)) + + def test_vsa_api_delete_invalid_id(self): + global last_param + last_param = {} + + vsa_id = 234 + req = webob.Request.blank('/v1.1/zadr-vsa/%d' % vsa_id) + req.method = 'DELETE' + + resp = req.get_response(fakes.wsgi_app()) + self.assertEqual(resp.status_int, 404) + self.assertEqual(str(last_param['vsa_id']), str(vsa_id)) + + def test_vsa_api_show(self): + global last_param + last_param = {} + + vsa_id = 123 + req = webob.Request.blank('/v1.1/zadr-vsa/%d' % vsa_id) + req.method = 'GET' + resp = req.get_response(fakes.wsgi_app()) + self.assertEqual(resp.status_int, 200) + self.assertEqual(str(last_param['vsa_id']), str(vsa_id)) + + resp_dict = json.loads(resp.body) + self.assertTrue('vsa' in resp_dict) + self.assertEqual(resp_dict['vsa']['id'], str(vsa_id)) + + def test_vsa_api_show_invalid_id(self): + global last_param + last_param = {} + + vsa_id = 234 + req = webob.Request.blank('/v1.1/zadr-vsa/%d' % vsa_id) + req.method = 'GET' + resp = req.get_response(fakes.wsgi_app()) + self.assertEqual(resp.status_int, 404) + self.assertEqual(str(last_param['vsa_id']), str(vsa_id)) + + def test_vsa_api_index(self): + req = webob.Request.blank('/v1.1/zadr-vsa') + req.method = 'GET' + resp = req.get_response(fakes.wsgi_app()) + self.assertEqual(resp.status_int, 200) + + resp_dict = json.loads(resp.body) + + self.assertTrue('vsaSet' in resp_dict) + resp_vsas = resp_dict['vsaSet'] + self.assertEqual(len(resp_vsas), 1) + + resp_vsa = resp_vsas.pop() + self.assertEqual(resp_vsa['id'], 123) + + def test_vsa_api_detail(self): + req = webob.Request.blank('/v1.1/zadr-vsa/detail') + req.method = 'GET' + resp = req.get_response(fakes.wsgi_app()) + self.assertEqual(resp.status_int, 200) + + resp_dict = json.loads(resp.body) + + self.assertTrue('vsaSet' in resp_dict) + resp_vsas = resp_dict['vsaSet'] + self.assertEqual(len(resp_vsas), 1) + + resp_vsa = resp_vsas.pop() + self.assertEqual(resp_vsa['id'], 123) + + +class VSAVolumeDriveApiTest(test.TestCase): + def setUp(self): + super(VSAVolumeDriveApiTest, self).setUp() + self.stubs = stubout.StubOutForTesting() + fakes.FakeAuthManager.reset_fake_data() + fakes.FakeAuthDatabase.data = {} + fakes.stub_out_networking(self.stubs) + fakes.stub_out_rate_limiting(self.stubs) + fakes.stub_out_auth(self.stubs) + self.stubs.Set(vsa.api.API, "create", stub_vsa_create) + self.stubs.Set(vsa.api.API, "delete", stub_vsa_delete) + self.stubs.Set(vsa.api.API, "get", stub_vsa_get) + self.stubs.Set(vsa.api.API, "get_all", stub_vsa_get_all) + + self.context = context.get_admin_context() + + def tearDown(self): + self.stubs.UnsetAll() + super(VSAVolumeDriveApiTest, self).tearDown() diff --git a/nova/tests/test_drive_types.py b/nova/tests/test_drive_types.py new file mode 100644 index 000000000..8534bcde5 --- /dev/null +++ b/nova/tests/test_drive_types.py @@ -0,0 +1,146 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright (c) 2011 Zadara Storage Inc. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +Unit Tests for drive types codecode +""" +import time + +from nova import context +from nova import flags +from nova import log as logging +from nova import test +from nova.vsa import drive_types + +FLAGS = flags.FLAGS +LOG = logging.getLogger('nova.tests.vsa') + + +class DriveTypesTestCase(test.TestCase): + """Test cases for driver types code""" + def setUp(self): + super(DriveTypesTestCase, self).setUp() + self.cntx = context.RequestContext(None, None) + self.cntx_admin = context.get_admin_context() + self._dtype = self._create_drive_type() + + def tearDown(self): + self._dtype = None + + def _create_drive_type(self): + """Create a volume object.""" + dtype = {} + dtype['type'] = 'SATA' + dtype['size_gb'] = 150 + dtype['rpm'] = 5000 + dtype['capabilities'] = None + dtype['visible'] = True + + LOG.debug(_("Drive Type created %s"), dtype) + return dtype + + def test_drive_type_create_delete(self): + dtype = self._dtype + prev_all_dtypes = drive_types.get_all(self.cntx_admin, False) + + new = drive_types.create(self.cntx_admin, **dtype) + for k, v in dtype.iteritems(): + self.assertEqual(v, new[k], 'one of fields doesnt match') + + new_all_dtypes = drive_types.get_all(self.cntx_admin, False) + self.assertNotEqual(len(prev_all_dtypes), + len(new_all_dtypes), + 'drive type was not created') + + drive_types.delete(self.cntx_admin, new['id']) + new_all_dtypes = drive_types.get_all(self.cntx_admin, False) + self.assertEqual(prev_all_dtypes, + new_all_dtypes, + 'drive types was not deleted') + + def test_drive_type_check_name_generation(self): + dtype = self._dtype + new = drive_types.create(self.cntx_admin, **dtype) + expected_name = FLAGS.drive_type_template_short % \ + (dtype['type'], dtype['size_gb'], dtype['rpm']) + self.assertEqual(new['name'], expected_name, + 'name was not generated correctly') + + dtype['capabilities'] = 'SEC' + new2 = drive_types.create(self.cntx_admin, **dtype) + expected_name = FLAGS.drive_type_template_long % \ + (dtype['type'], dtype['size_gb'], dtype['rpm'], + dtype['capabilities']) + self.assertEqual(new2['name'], expected_name, + 'name was not generated correctly') + + drive_types.delete(self.cntx_admin, new['id']) + drive_types.delete(self.cntx_admin, new2['id']) + + def test_drive_type_create_delete_invisible(self): + dtype = self._dtype + dtype['visible'] = False + prev_all_dtypes = drive_types.get_all(self.cntx_admin, True) + new = drive_types.create(self.cntx_admin, **dtype) + + new_all_dtypes = drive_types.get_all(self.cntx_admin, True) + self.assertEqual(prev_all_dtypes, new_all_dtypes) + + new_all_dtypes = drive_types.get_all(self.cntx_admin, False) + self.assertNotEqual(prev_all_dtypes, new_all_dtypes) + + drive_types.delete(self.cntx_admin, new['id']) + + def test_drive_type_rename_update(self): + dtype = self._dtype + dtype['capabilities'] = None + + new = drive_types.create(self.cntx_admin, **dtype) + for k, v in dtype.iteritems(): + self.assertEqual(v, new[k], 'one of fields doesnt match') + + new_name = 'NEW_DRIVE_NAME' + new = drive_types.rename(self.cntx_admin, new['name'], new_name) + self.assertEqual(new['name'], new_name) + + new = drive_types.rename(self.cntx_admin, new_name) + expected_name = FLAGS.drive_type_template_short % \ + (dtype['type'], dtype['size_gb'], dtype['rpm']) + self.assertEqual(new['name'], expected_name) + + changes = {'rpm': 7200} + new = drive_types.update(self.cntx_admin, new['id'], **changes) + for k, v in changes.iteritems(): + self.assertEqual(v, new[k], 'one of fields doesnt match') + + drive_types.delete(self.cntx_admin, new['id']) + + def test_drive_type_get(self): + dtype = self._dtype + new = drive_types.create(self.cntx_admin, **dtype) + + new2 = drive_types.get(self.cntx_admin, new['id']) + for k, v in new2.iteritems(): + self.assertEqual(str(new[k]), str(new2[k]), + 'one of fields doesnt match') + + new2 = drive_types.get_by_name(self.cntx_admin, new['name']) + for k, v in new.iteritems(): + self.assertEqual(str(new[k]), str(new2[k]), + 'one of fields doesnt match') + + drive_types.delete(self.cntx_admin, new['id']) diff --git a/nova/volume/driver.py b/nova/volume/driver.py index ec09325d8..b93fc1d92 100644 --- a/nova/volume/driver.py +++ b/nova/volume/driver.py @@ -22,6 +22,7 @@ Drivers for volumes. import time import os +from xml.etree import ElementTree from nova import exception from nova import flags @@ -208,6 +209,11 @@ class VolumeDriver(object): """Make sure volume is exported.""" raise NotImplementedError() + def get_volume_stats(self, refresh=False): + """Return the current state of the volume service. If 'refresh' is + True, run the update first.""" + return None + class AOEDriver(VolumeDriver): """Implements AOE specific volume commands.""" @@ -809,3 +815,217 @@ class LoggingVolumeDriver(VolumeDriver): if match: matches.append(entry) return matches + + +class ZadaraBEDriver(ISCSIDriver): + """Performs actions to configure Zadara BE module.""" + + def _not_vsa_be_volume(self, volume): + """Returns True if volume is not VSA BE volume.""" + if volume['to_vsa_id'] is None: + LOG.debug(_("\tVolume %s is NOT VSA volume"), volume['name']) + return True + else: + return False + + def check_for_setup_error(self): + """No setup necessary for Zadara BE.""" + pass + + """ Volume Driver methods """ + def create_volume(self, volume): + """Creates BE volume.""" + if self._not_vsa_be_volume(volume): + return super(ZadaraBEDriver, self).create_volume(volume) + + if int(volume['size']) == 0: + sizestr = '0' # indicates full-partition + else: + sizestr = '%s' % (int(volume['size']) << 30) # size in bytes + + # Set the qos-str to default type sas + qosstr = 'SAS_1000' + drive_type = volume.get('drive_type') + if drive_type is not None: + qosstr = drive_type['type'] + ("_%s" % drive_type['size_gb']) + + try: + self._sync_exec('sudo', '/var/lib/zadara/bin/zadara_sncfg', + 'create_qospart', + '--qos', qosstr, + '--pname', volume['name'], + '--psize', sizestr, + check_exit_code=0) + except exception.ProcessExecutionError: + LOG.debug(_("VSA BE create_volume for %s failed"), volume['name']) + raise + + LOG.debug(_("VSA BE create_volume for %s succeeded"), volume['name']) + + def delete_volume(self, volume): + """Deletes BE volume.""" + if self._not_vsa_be_volume(volume): + return super(ZadaraBEDriver, self).delete_volume(volume) + + try: + self._sync_exec('sudo', '/var/lib/zadara/bin/zadara_sncfg', + 'delete_partition', + '--pname', volume['name'], + check_exit_code=0) + except exception.ProcessExecutionError: + LOG.debug(_("VSA BE delete_volume for %s failed"), volume['name']) + return + + LOG.debug(_("VSA BE delete_volume for %s suceeded"), volume['name']) + + def local_path(self, volume): + if self._not_vsa_be_volume(volume): + return super(ZadaraBEDriver, self).local_path(volume) + + raise exception.Error(_("local_path not supported")) + + def ensure_export(self, context, volume): + """ensure BE export for a volume""" + if self._not_vsa_be_volume(volume): + return super(ZadaraBEDriver, self).ensure_export(context, volume) + + try: + iscsi_target = self.db.volume_get_iscsi_target_num(context, + volume['id']) + except exception.NotFound: + LOG.info(_("Skipping ensure_export. No iscsi_target " + + "provisioned for volume: %d"), volume['id']) + return + + try: + ret = self._common_be_export(context, volume, iscsi_target) + except exception.ProcessExecutionError: + return + return ret + + def create_export(self, context, volume): + """create BE export for a volume""" + if self._not_vsa_be_volume(volume): + return super(ZadaraBEDriver, self).create_export(context, volume) + + self._ensure_iscsi_targets(context, volume['host']) + iscsi_target = self.db.volume_allocate_iscsi_target(context, + volume['id'], + volume['host']) + try: + ret = self._common_be_export(context, volume, iscsi_target) + except: + raise exception.ProcessExecutionError + + def remove_export(self, context, volume): + """Removes BE export for a volume.""" + if self._not_vsa_be_volume(volume): + return super(ZadaraBEDriver, self).remove_export(context, volume) + + try: + iscsi_target = self.db.volume_get_iscsi_target_num(context, + volume['id']) + except exception.NotFound: + LOG.info(_("Skipping remove_export. No iscsi_target " + + "provisioned for volume: %d"), volume['id']) + return + + try: + self._sync_exec('sudo', '/var/lib/zadara/bin/zadara_sncfg', + 'remove_export', + '--pname', volume['name'], + '--tid', iscsi_target, + check_exit_code=0) + except exception.ProcessExecutionError: + LOG.debug(_("VSA BE remove_export for %s failed"), volume['name']) + return + + def create_snapshot(self, snapshot): + """Nothing required for snapshot""" + if self._not_vsa_be_volume(volume): + return super(ZadaraBEDriver, self).create_snapshot(volume) + + pass + + def delete_snapshot(self, snapshot): + """Nothing required to delete a snapshot""" + if self._not_vsa_be_volume(volume): + return super(ZadaraBEDriver, self).delete_snapshot(volume) + + pass + + """ Internal BE Volume methods """ + def _common_be_export(self, context, volume, iscsi_target): + """ + Common logic that asks zadara_sncfg to setup iSCSI target/lun for + this volume + """ + (out, err) = self._sync_exec('sudo', + '/var/lib/zadara/bin/zadara_sncfg', + 'create_export', + '--pname', volume['name'], + '--tid', iscsi_target, + check_exit_code=0) + + result_xml = ElementTree.fromstring(out) + response_node = result_xml.find("Sn") + if response_node is None: + msg = "Malformed response from zadara_sncfg" + raise exception.Error(msg) + + sn_ip = response_node.findtext("SnIp") + sn_iqn = response_node.findtext("IqnName") + iscsi_portal = sn_ip + ":3260," + ("%s" % iscsi_target) + + model_update = {} + model_update['provider_location'] = ("%s %s" % + (iscsi_portal, + sn_iqn)) + return model_update + + def _get_qosgroup_summary(self): + """gets the list of qosgroups from Zadara BE""" + try: + (out, err) = self._sync_exec('sudo', + '/var/lib/zadara/bin/zadara_sncfg', + 'get_qosgroups_xml', + check_exit_code=0) + except exception.ProcessExecutionError: + LOG.debug(_("Failed to retrieve QoS info")) + return {} + + qos_groups = {} + result_xml = ElementTree.fromstring(out) + for element in result_xml.findall('QosGroup'): + qos_group = {} + # get the name of the group. + # If we cannot find it, forget this element + group_name = element.findtext("Name") + if not group_name: + continue + + # loop through all child nodes & fill up attributes of this group + for child in element.getchildren(): + # two types of elements - property of qos-group & sub property + # classify them accordingly + if child.text: + qos_group[child.tag] = int(child.text) \ + if child.text.isdigit() else child.text + else: + subelement = {} + for subchild in child.getchildren(): + subelement[subchild.tag] = int(subchild.text) \ + if subchild.text.isdigit() else subchild.text + qos_group[child.tag] = subelement + + # Now add this group to the master qos_groups + qos_groups[group_name] = qos_group + + return qos_groups + + def get_volume_stats(self, refresh=False): + """Return the current state of the volume service. If 'refresh' is + True, run the update first.""" + + drive_info = self._get_qosgroup_summary() + return {'drive_qos_info': drive_info} diff --git a/nova/volume/manager.py b/nova/volume/manager.py index 3e2892fee..d2c36e96f 100644 --- a/nova/volume/manager.py +++ b/nova/volume/manager.py @@ -42,7 +42,7 @@ intact. """ -import time +# import time from nova import context from nova import exception @@ -60,41 +60,27 @@ flags.DEFINE_string('storage_availability_zone', 'availability zone of this service') flags.DEFINE_string('volume_driver', 'nova.volume.driver.ISCSIDriver', 'Driver to use for volume creation') -flags.DEFINE_string('vsa_volume_driver', 'nova.volume.san.ZadaraVsaDriver', - 'Driver to use for FE/BE volume creation with VSA') flags.DEFINE_boolean('use_local_volumes', True, 'if True, will not discover local volumes') -flags.DEFINE_integer('volume_state_interval', 60, - 'Interval in seconds for querying volumes status') +# flags.DEFINE_integer('volume_state_interval', 60, +# 'Interval in seconds for querying volumes status') class VolumeManager(manager.SchedulerDependentManager): """Manages attachable block storage devices.""" - def __init__(self, volume_driver=None, vsa_volume_driver=None, - *args, **kwargs): + def __init__(self, volume_driver=None, *args, **kwargs): """Load the driver from the one specified in args, or from flags.""" if not volume_driver: volume_driver = FLAGS.volume_driver self.driver = utils.import_object(volume_driver) - if not vsa_volume_driver: - vsa_volume_driver = FLAGS.vsa_volume_driver - self.vsadriver = utils.import_object(vsa_volume_driver) super(VolumeManager, self).__init__(service_name='volume', *args, **kwargs) # NOTE(vish): Implementation specific db handling is done # by the driver. self.driver.db = self.db - self.vsadriver.db = self.db self._last_volume_stats = [] #self._last_host_check = 0 - def _get_driver(self, volume_ref): - if volume_ref['to_vsa_id'] is None and \ - volume_ref['from_vsa_id'] is None: - return self.driver - else: - return self.vsadriver - def init_host(self): """Do any initialization that needs to be run if this is a standalone service.""" @@ -104,8 +90,7 @@ class VolumeManager(manager.SchedulerDependentManager): LOG.debug(_("Re-exporting %s volumes"), len(volumes)) for volume in volumes: if volume['status'] in ['available', 'in-use']: - driver = self._get_driver(volume) - driver.ensure_export(ctxt, volume) + self.driver.ensure_export(ctxt, volume) else: LOG.info(_("volume %s: skipping export"), volume['name']) @@ -126,28 +111,26 @@ class VolumeManager(manager.SchedulerDependentManager): # before passing it to the driver. volume_ref['host'] = self.host - driver = self._get_driver(volume_ref) try: vol_name = volume_ref['name'] vol_size = volume_ref['size'] LOG.debug(_("volume %(vol_name)s: creating lv of" " size %(vol_size)sG") % locals()) if snapshot_id == None: - model_update = driver.create_volume(volume_ref) + model_update = self.driver.create_volume(volume_ref) else: snapshot_ref = self.db.snapshot_get(context, snapshot_id) - model_update = driver.create_volume_from_snapshot( + model_update = self.driver.create_volume_from_snapshot( volume_ref, snapshot_ref) if model_update: self.db.volume_update(context, volume_ref['id'], model_update) LOG.debug(_("volume %s: creating export"), volume_ref['name']) - model_update = driver.create_export(context, volume_ref) + model_update = self.driver.create_export(context, volume_ref) if model_update: self.db.volume_update(context, volume_ref['id'], model_update) - # except Exception: - except: + except Exception: self.db.volume_update(context, volume_ref['id'], {'status': 'error'}) self._notify_vsa(context, volume_ref, 'error') @@ -181,15 +164,14 @@ class VolumeManager(manager.SchedulerDependentManager): if volume_ref['host'] != self.host: raise exception.Error(_("Volume is not local to this node")) - driver = self._get_driver(volume_ref) try: LOG.debug(_("volume %s: removing export"), volume_ref['name']) - driver.remove_export(context, volume_ref) + self.driver.remove_export(context, volume_ref) LOG.debug(_("volume %s: deleting"), volume_ref['name']) - driver.delete_volume(volume_ref) + self.driver.delete_volume(volume_ref) except exception.VolumeIsBusy, e: LOG.debug(_("volume %s: volume is busy"), volume_ref['name']) - driver.ensure_export(context, volume_ref) + self.driver.ensure_export(context, volume_ref) self.db.volume_update(context, volume_ref['id'], {'status': 'available'}) return True @@ -212,7 +194,6 @@ class VolumeManager(manager.SchedulerDependentManager): try: snap_name = snapshot_ref['name'] LOG.debug(_("snapshot %(snap_name)s: creating") % locals()) - # snapshot-related operations are irrelevant for vsadriver model_update = self.driver.create_snapshot(snapshot_ref) if model_update: self.db.snapshot_update(context, snapshot_ref['id'], @@ -236,7 +217,6 @@ class VolumeManager(manager.SchedulerDependentManager): try: LOG.debug(_("snapshot %s: deleting"), snapshot_ref['name']) - # snapshot-related operations are irrelevant for vsadriver self.driver.delete_snapshot(snapshot_ref) except Exception: self.db.snapshot_update(context, @@ -254,29 +234,26 @@ class VolumeManager(manager.SchedulerDependentManager): Returns path to device.""" context = context.elevated() volume_ref = self.db.volume_get(context, volume_id) - driver = self._get_driver(volume_ref) if volume_ref['host'] == self.host and FLAGS.use_local_volumes: - path = driver.local_path(volume_ref) + path = self.driver.local_path(volume_ref) else: - path = driver.discover_volume(context, volume_ref) + path = self.driver.discover_volume(context, volume_ref) return path def remove_compute_volume(self, context, volume_id): """Remove remote volume on compute host.""" context = context.elevated() volume_ref = self.db.volume_get(context, volume_id) - driver = self._get_driver(volume_ref) if volume_ref['host'] == self.host and FLAGS.use_local_volumes: return True else: - driver.undiscover_volume(volume_ref) + self.driver.undiscover_volume(volume_ref) def check_for_export(self, context, instance_id): """Make sure whether volume is exported.""" instance_ref = self.db.instance_get(context, instance_id) for volume in instance_ref['volumes']: - driver = self._get_driver(volume) - driver.check_for_export(context, volume['id']) + self.driver.check_for_export(context, volume['id']) def periodic_tasks(self, context=None): """Tasks to be run at a periodic interval.""" @@ -310,18 +287,20 @@ class VolumeManager(manager.SchedulerDependentManager): #if curr_time - self._last_host_check > FLAGS.volume_state_interval: # self._last_host_check = curr_time - LOG.info(_("Updating volume status")) - - volume_stats = self.vsadriver.get_volume_stats(refresh=True) - if self._volume_stats_changed(self._last_volume_stats, volume_stats): - LOG.info(_("New capabilities found: %s"), volume_stats) - self._last_volume_stats = volume_stats - - # This will grab info about the host and queue it - # to be sent to the Schedulers. - self.update_service_capabilities(self._last_volume_stats) - else: - self.update_service_capabilities(None) + volume_stats = self.driver.get_volume_stats(refresh=True) + if volume_stats: + LOG.info(_("Checking volume capabilities")) + + if self._volume_stats_changed(self._last_volume_stats, volume_stats): + + LOG.info(_("New capabilities found: %s"), volume_stats) + self._last_volume_stats = volume_stats + + # This will grab info about the host and queue it + # to be sent to the Schedulers. + self.update_service_capabilities(self._last_volume_stats) + else: + self.update_service_capabilities(None) def notification(self, context, event): LOG.info(_("Notification {%s} received"), event) diff --git a/nova/volume/san.py b/nova/volume/san.py index 6a962c6f2..be7869ac7 100644 --- a/nova/volume/san.py +++ b/nova/volume/san.py @@ -588,311 +588,3 @@ class HpSanISCSIDriver(SanISCSIDriver): cliq_args['volumeName'] = volume['name'] self._cliq_run_xml("unassignVolume", cliq_args) - - -class ZadaraVsaDriver(SanISCSIDriver): - """Executes commands relating to Virtual Storage Array volumes. - - There are two types of volumes. Front-end(FE) volumes and Back-end(BE) - volumes. - - FE volumes are nova-volumes that are exported by VSA instance & can be - consumed by user instances. We use SSH to connect into the VSA instance - to execute those steps. - - BE volumes are nova-volumes that are attached as back-end storage for the - VSA instance. - - VSA instance essentially consumes the BE volumes and allows creation of FE - volumes over it. - """ - - """ Volume Driver methods """ - def create_volume(self, volume): - """Creates FE/BE volume.""" - if volume['to_vsa_id']: - self._create_be_volume(volume) - else: - self._create_fe_volume(volume) - - def delete_volume(self, volume): - """Deletes FE/BE volume.""" - if volume['to_vsa_id']: - self._delete_be_volume(volume) - else: - self._delete_fe_volume(volume) - - def local_path(self, volume): - # TODO: Is this needed here? - raise exception.Error(_("local_path not supported")) - - def ensure_export(self, context, volume): - """On bootup synchronously ensures a volume export is available.""" - if volume['to_vsa_id']: - return self._ensure_be_export(context, volume) - - # Not required for FE volumes. VSA VM will ensure volume exposure - pass - - def create_export(self, context, volume): - """For first time creates volume export.""" - if volume['to_vsa_id']: - return self._create_be_export(context, volume) - else: - return self._create_fe_export(context, volume) - - def remove_export(self, context, volume): - if volume['to_vsa_id']: - return self._remove_be_export(context, volume) - else: - return self._remove_fe_export(context, volume) - - def check_for_setup_error(self): - """Returns an error if prerequisites aren't met""" - # skip the flags.san_ip check & do the regular check - - if not (FLAGS.san_password or FLAGS.san_privatekey): - raise exception.Error(_("Specify san_password or san_privatekey")) - - """ Internal BE Volume methods """ - def _create_be_volume(self, volume): - """Creates BE volume.""" - if int(volume['size']) == 0: - sizestr = '0' # indicates full-partition - else: - sizestr = '%s' % (int(volume['size']) << 30) # size in bytes - - # Set the qos-str to default type sas - # TODO - later for this piece we will get the direct qos-group name - # in create_volume and hence this lookup will not be needed - qosstr = 'SAS_1000' - drive_type = volume.get('drive_type') - if drive_type is not None: - # for now just use the qos-type string from the disktypes. - qosstr = drive_type['type'] + ("_%s" % drive_type['size_gb']) - - self._sync_exec('sudo', '/var/lib/zadara/bin/zadara_sncfg', - 'create_qospart', - '--qos', qosstr, - '--pname', volume['name'], - '--psize', sizestr, - check_exit_code=0) - LOG.debug(_("VSA BE create_volume for %s succeeded"), volume['name']) - - def _delete_be_volume(self, volume): - try: - self._sync_exec('sudo', '/var/lib/zadara/bin/zadara_sncfg', - 'delete_partition', - '--pname', volume['name'], - check_exit_code=0) - except exception.ProcessExecutionError: - LOG.debug(_("VSA BE delete_volume for %s failed"), volume['name']) - return - - LOG.debug(_("VSA BE delete_volume for %s suceeded"), volume['name']) - - def _create_be_export(self, context, volume): - """create BE export for a volume""" - self._ensure_iscsi_targets(context, volume['host']) - iscsi_target = self.db.volume_allocate_iscsi_target(context, - volume['id'], - volume['host']) - return self._common_be_export(context, volume, iscsi_target) - - def _ensure_be_export(self, context, volume): - """ensure BE export for a volume""" - try: - iscsi_target = self.db.volume_get_iscsi_target_num(context, - volume['id']) - except exception.NotFound: - LOG.info(_("Skipping ensure_export. No iscsi_target " + - "provisioned for volume: %d"), volume['id']) - return - - return self._common_be_export(context, volume, iscsi_target) - - def _common_be_export(self, context, volume, iscsi_target): - """ - Common logic that asks zadara_sncfg to setup iSCSI target/lun for - this volume - """ - (out, err) = self._sync_exec('sudo', - '/var/lib/zadara/bin/zadara_sncfg', - 'create_export', - '--pname', volume['name'], - '--tid', iscsi_target, - check_exit_code=0) - - result_xml = ElementTree.fromstring(out) - response_node = result_xml.find("Sn") - if response_node is None: - msg = "Malformed response from zadara_sncfg" - raise exception.Error(msg) - - sn_ip = response_node.findtext("SnIp") - sn_iqn = response_node.findtext("IqnName") - iscsi_portal = sn_ip + ":3260," + ("%s" % iscsi_target) - - model_update = {} - model_update['provider_location'] = ("%s %s" % - (iscsi_portal, - sn_iqn)) - return model_update - - def _remove_be_export(self, context, volume): - """Removes BE export for a volume.""" - try: - iscsi_target = self.db.volume_get_iscsi_target_num(context, - volume['id']) - except exception.NotFound: - LOG.info(_("Skipping remove_export. No iscsi_target " + - "provisioned for volume: %d"), volume['id']) - return - - try: - self._sync_exec('sudo', '/var/lib/zadara/bin/zadara_sncfg', - 'remove_export', - '--pname', volume['name'], - '--tid', iscsi_target, - check_exit_code=0) - except exception.ProcessExecutionError: - LOG.debug(_("VSA BE remove_export for %s failed"), volume['name']) - return - - def _get_qosgroup_summary(self): - """gets the list of qosgroups from Zadara SN""" - (out, err) = self._sync_exec('sudo', - '/var/lib/zadara/bin/zadara_sncfg', - 'get_qosgroups_xml', - check_exit_code=0) - qos_groups = {} - #qos_groups = [] - result_xml = ElementTree.fromstring(out) - for element in result_xml.findall('QosGroup'): - qos_group = {} - # get the name of the group. - # If we cannot find it, forget this element - group_name = element.findtext("Name") - if not group_name: - continue - - # loop through all child nodes & fill up attributes of this group - for child in element.getchildren(): - # two types of elements - property of qos-group & sub property - # classify them accordingly - if child.text: - qos_group[child.tag] = int(child.text) \ - if child.text.isdigit() else child.text - else: - subelement = {} - for subchild in child.getchildren(): - subelement[subchild.tag] = int(subchild.text) \ - if subchild.text.isdigit() else subchild.text - qos_group[child.tag] = subelement - - # Now add this group to the master qos_groups - qos_groups[group_name] = qos_group - #qos_groups.append(qos_group) - - return qos_groups - - """ Internal FE Volume methods """ - def _vsa_run(self, volume, verb, vsa_args): - """ - Runs a command over SSH to VSA instance and checks for return status - """ - vsa_arg_strings = [] - - if vsa_args: - for k, v in vsa_args.items(): - vsa_arg_strings.append(" --%s %s" % (k, v)) - - # Form the zadara_cfg script that will do the configuration at VSA VM - cmd = "/var/lib/zadara/bin/zadara_cfg.py " + verb + \ - ''.join(vsa_arg_strings) - - # get the list of IP's corresponding to VSA VM's - vsa_ips = self.db.vsa_get_vc_ips_list(context.get_admin_context(), - volume['from_vsa_id']) - if not vsa_ips: - raise exception.Error(_("Cannot Lookup VSA VM's IP")) - return - - # pick the first element in the return's fixed_ip for SSH - vsa_ip = vsa_ips[0]['fixed'] - - (out, _err) = self._run_ssh(cmd, san_ip=vsa_ip) - - # check the xml StatusCode to check fro real status - result_xml = ElementTree.fromstring(out) - - status = result_xml.findtext("StatusCode") - if status != '0': - statusmsg = result_xml.findtext("StatusMessage") - msg = (_('vsa_run failed to ' + verb + ' for ' + volume['name'] + - '. Result=' + str(statusmsg))) - raise exception.Error(msg) - - return out, _err - - def _create_fe_volume(self, volume): - """Creates FE volume.""" - vsa_args = {} - vsa_args['volname'] = volume['name'] - if int(volume['size']) == 0: - sizestr = '100M' - else: - sizestr = '%sG' % volume['size'] - vsa_args['volsize'] = sizestr - (out, _err) = self._vsa_run(volume, "create_volume", vsa_args) - - LOG.debug(_("VSA FE create_volume for %s suceeded"), volume['name']) - - def _delete_fe_volume(self, volume): - """Deletes FE volume.""" - vsa_args = {} - vsa_args['volname'] = volume['name'] - (out, _err) = self._vsa_run(volume, "delete_volume", vsa_args) - LOG.debug(_("VSA FE delete_volume for %s suceeded"), volume['name']) - return - - def _create_fe_export(self, context, volume): - """Create FE volume exposure at VSA VM""" - vsa_args = {} - vsa_args['volname'] = volume['name'] - (out, _err) = self._vsa_run(volume, "create_export", vsa_args) - - result_xml = ElementTree.fromstring(out) - response_node = result_xml.find("Vsa") - if response_node is None: - msg = "Malformed response to VSA command " - raise exception.Error(msg) - - LOG.debug(_("VSA create_export for %s suceeded"), volume['name']) - - vsa_ip = response_node.findtext("VsaIp") - vsa_iqn = response_node.findtext("IqnName") - vsa_interface = response_node.findtext("VsaInterface") - iscsi_portal = vsa_ip + ":3260," + vsa_interface - - model_update = {} - model_update['provider_location'] = ("%s %s" % - (iscsi_portal, - vsa_iqn)) - - return model_update - - def remove_fe_export(self, context, volume): - """Remove FE volume exposure at VSA VM""" - vsa_args = {} - vsa_args['volname'] = volume['name'] - (out, _err) = self._vsa_run(volume, "remove_export", vsa_args) - LOG.debug(_("VSA FE remove_export for %s suceeded"), volume['name']) - return - - def get_volume_stats(self, refresh=False): - """Return the current state of the volume service. If 'refresh' is - True, run the update first.""" - - drive_info = self._get_qosgroup_summary() - return {'drive_qos_info': drive_info} diff --git a/nova/vsa/api.py b/nova/vsa/api.py index 7ce643aab..b366b6587 100644 --- a/nova/vsa/api.py +++ b/nova/vsa/api.py @@ -79,7 +79,7 @@ class API(base.Base): # find DB record for this disk try: - drive_ref = drive_types.drive_type_get_by_name(context, name) + drive_ref = drive_types.get_by_name(context, name) except exception.NotFound: raise exception.ApiError(_("Invalid drive type name %s"), name) diff --git a/nova/vsa/drive_types.py b/nova/vsa/drive_types.py index 781206cdf..5bec96047 100644 --- a/nova/vsa/drive_types.py +++ b/nova/vsa/drive_types.py @@ -43,8 +43,8 @@ def _generate_default_drive_name(type, size_gb, rpm, capabilities): (type, str(size_gb), rpm, capabilities) -def drive_type_create(context, type, size_gb, rpm, - capabilities='', visible=True, name=None): +def create(context, type, size_gb, rpm, capabilities='', + visible=True, name=None): if name is None: name = _generate_default_drive_name(type, size_gb, rpm, capabilities) @@ -62,12 +62,12 @@ def drive_type_create(context, type, size_gb, rpm, return db.drive_type_create(context, values) -def drive_type_update(context, name, **kwargs): - LOG.debug(_("Updating drive type %(name)s: "), locals()) - return db.drive_type_update(context, name, kwargs) +def update(context, id, **kwargs): + LOG.debug(_("Updating drive type with id %(id)s"), locals()) + return db.drive_type_update(context, id, kwargs) -def drive_type_rename(context, name, new_name=None): +def rename(context, name, new_name=None): if new_name is None or \ new_name == '': @@ -78,21 +78,22 @@ def drive_type_rename(context, name, new_name=None): LOG.debug(_("Renaming drive type %(name)s to %(new_name)s"), locals()) values = dict(name=new_name) - return db.drive_type_update(context, name, values) + dtype = db.drive_type_get_by_name(context, name) + return db.drive_type_update(context, dtype['id'], values) -def drive_type_delete(context, name): - LOG.debug(_("Deleting drive type %(name)s"), locals()) - db.drive_type_destroy(context, name) +def delete(context, id): + LOG.debug(_("Deleting drive type %d"), id) + db.drive_type_destroy(context, id) -def drive_type_get(context, id): +def get(context, id): return db.drive_type_get(context, id) -def drive_type_get_by_name(context, name): +def get_by_name(context, name): return db.drive_type_get_by_name(context, name) -def drive_type_get_all(context, visible=None): +def get_all(context, visible=True): return db.drive_type_get_all(context, visible) -- cgit From 0750370553c3ce40fdd5e88d9616ddb0fbeedbc1 Mon Sep 17 00:00:00 2001 From: "vladimir.p" Date: Fri, 22 Jul 2011 15:22:05 -0700 Subject: pep8-compliant. Prior to merge with 1305 --- nova/volume/manager.py | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) (limited to 'nova') diff --git a/nova/volume/manager.py b/nova/volume/manager.py index d2c36e96f..348dab782 100644 --- a/nova/volume/manager.py +++ b/nova/volume/manager.py @@ -291,15 +291,16 @@ class VolumeManager(manager.SchedulerDependentManager): if volume_stats: LOG.info(_("Checking volume capabilities")) - if self._volume_stats_changed(self._last_volume_stats, volume_stats): - + if self._volume_stats_changed(self._last_volume_stats, + volume_stats): LOG.info(_("New capabilities found: %s"), volume_stats) self._last_volume_stats = volume_stats - + # This will grab info about the host and queue it # to be sent to the Schedulers. self.update_service_capabilities(self._last_volume_stats) else: + # avoid repeating fanouts self.update_service_capabilities(None) def notification(self, context, event): -- cgit From 61781dae931ced36db0f2735da474d0bd38a53cf Mon Sep 17 00:00:00 2001 From: "vladimir.p" Date: Fri, 22 Jul 2011 20:25:32 -0700 Subject: more unittest changes --- .../openstack/contrib/virtual_storage_arrays.py | 4 + nova/tests/api/openstack/contrib/test_vsa.py | 238 +++++++++++++++++++-- nova/virt/libvirt/netutils.py | 1 - 3 files changed, 224 insertions(+), 19 deletions(-) (limited to 'nova') diff --git a/nova/api/openstack/contrib/virtual_storage_arrays.py b/nova/api/openstack/contrib/virtual_storage_arrays.py index 3c1362f0c..6139b494e 100644 --- a/nova/api/openstack/contrib/virtual_storage_arrays.py +++ b/nova/api/openstack/contrib/virtual_storage_arrays.py @@ -353,6 +353,10 @@ class VsaDriveController(VsaVolumeDriveController): """Update a drive. Should be done through VSA APIs""" return faults.Fault(exc.HTTPBadRequest()) + def delete(self, req, vsa_id, id): + """Delete a volume. Should be done through VSA APIs""" + return faults.Fault(exc.HTTPBadRequest()) + class VsaVPoolController(object): """The vPool VSA API controller for the OpenStack API.""" diff --git a/nova/tests/api/openstack/contrib/test_vsa.py b/nova/tests/api/openstack/contrib/test_vsa.py index bc0b7eaa6..c3150fa9c 100644 --- a/nova/tests/api/openstack/contrib/test_vsa.py +++ b/nova/tests/api/openstack/contrib/test_vsa.py @@ -15,18 +15,26 @@ import json import stubout +import unittest import webob -#from nova import compute -from nova import vsa + from nova import exception +from nova import flags +from nova import vsa +from nova import db +from nova import volume from nova import context from nova import test from nova import log as logging +from nova.api import openstack from nova.tests.api.openstack import fakes +import nova.wsgi from nova.api.openstack.contrib.virtual_storage_arrays import _vsa_view +FLAGS = flags.FLAGS + LOG = logging.getLogger('nova.tests.api.openstack.vsa') last_param = {} @@ -103,7 +111,7 @@ class VSAApiTest(test.TestCase): self.stubs.UnsetAll() super(VSAApiTest, self).tearDown() - def test_vsa_api_create(self): + def test_vsa_create(self): global last_param last_param = {} @@ -128,7 +136,7 @@ class VSAApiTest(test.TestCase): self.assertEqual(resp_dict['vsa']['displayDescription'], vsa['displayDescription']) - def test_vsa_api_create_no_body(self): + def test_vsa_create_no_body(self): req = webob.Request.blank('/v1.1/zadr-vsa') req.method = 'POST' req.body = json.dumps({}) @@ -137,7 +145,7 @@ class VSAApiTest(test.TestCase): resp = req.get_response(fakes.wsgi_app()) self.assertEqual(resp.status_int, 422) - def test_vsa_api_delete(self): + def test_vsa_delete(self): global last_param last_param = {} @@ -149,7 +157,7 @@ class VSAApiTest(test.TestCase): self.assertEqual(resp.status_int, 200) self.assertEqual(str(last_param['vsa_id']), str(vsa_id)) - def test_vsa_api_delete_invalid_id(self): + def test_vsa_delete_invalid_id(self): global last_param last_param = {} @@ -161,7 +169,7 @@ class VSAApiTest(test.TestCase): self.assertEqual(resp.status_int, 404) self.assertEqual(str(last_param['vsa_id']), str(vsa_id)) - def test_vsa_api_show(self): + def test_vsa_show(self): global last_param last_param = {} @@ -176,7 +184,7 @@ class VSAApiTest(test.TestCase): self.assertTrue('vsa' in resp_dict) self.assertEqual(resp_dict['vsa']['id'], str(vsa_id)) - def test_vsa_api_show_invalid_id(self): + def test_vsa_show_invalid_id(self): global last_param last_param = {} @@ -187,7 +195,7 @@ class VSAApiTest(test.TestCase): self.assertEqual(resp.status_int, 404) self.assertEqual(str(last_param['vsa_id']), str(vsa_id)) - def test_vsa_api_index(self): + def test_vsa_index(self): req = webob.Request.blank('/v1.1/zadr-vsa') req.method = 'GET' resp = req.get_response(fakes.wsgi_app()) @@ -202,7 +210,7 @@ class VSAApiTest(test.TestCase): resp_vsa = resp_vsas.pop() self.assertEqual(resp_vsa['id'], 123) - def test_vsa_api_detail(self): + def test_vsa_detail(self): req = webob.Request.blank('/v1.1/zadr-vsa/detail') req.method = 'GET' resp = req.get_response(fakes.wsgi_app()) @@ -218,22 +226,216 @@ class VSAApiTest(test.TestCase): self.assertEqual(resp_vsa['id'], 123) -class VSAVolumeDriveApiTest(test.TestCase): - def setUp(self): - super(VSAVolumeDriveApiTest, self).setUp() +def _get_default_volume_param(): + return { + 'id': 123, + 'status': 'available', + 'size': 100, + 'availability_zone': 'nova', + 'created_at': None, + 'attach_status': 'detached', + 'display_name': 'Default vol name', + 'display_description': 'Default vol description', + 'from_vsa_id': None, + 'to_vsa_id': None, + } + + +def stub_volume_create(self, context, size, snapshot_id, name, description, + **param): + LOG.debug(_("_create: param=%s"), size) + vol = _get_default_volume_param() + for k, v in param.iteritems(): + vol[k] = v + vol['size'] = size + vol['display_name'] = name + vol['display_description'] = description + return vol + + +def stub_volume_update(self, context, **param): + LOG.debug(_("_volume_update: param=%s"), param) + pass + + +def stub_volume_delete(self, context, **param): + LOG.debug(_("_volume_delete: param=%s"), param) + pass + + +def stub_volume_get(self, context, volume_id): + LOG.debug(_("_volume_get: volume_id=%s"), volume_id) + vol = _get_default_volume_param() + vol['id'] = volume_id + if volume_id == '234': + vol['from_vsa_id'] = 123 + if volume_id == '345': + vol['to_vsa_id'] = 123 + return vol + + +def stub_volume_get_notfound(self, context, volume_id): + raise exception.NotFound + + +def stub_volume_get_all_by_vsa(self, context, vsa_id, direction): + vol = stub_volume_get(self, context, '123') + vol['%s_vsa_id' % direction] = vsa_id + return [vol] + + +def return_vsa(context, vsa_id): + return {'id': vsa_id} + + +class VSAVolumeApiTest(test.TestCase): + + def setUp(self, test_obj=None, test_objs=None): + super(VSAVolumeApiTest, self).setUp() self.stubs = stubout.StubOutForTesting() fakes.FakeAuthManager.reset_fake_data() fakes.FakeAuthDatabase.data = {} fakes.stub_out_networking(self.stubs) fakes.stub_out_rate_limiting(self.stubs) fakes.stub_out_auth(self.stubs) - self.stubs.Set(vsa.api.API, "create", stub_vsa_create) - self.stubs.Set(vsa.api.API, "delete", stub_vsa_delete) - self.stubs.Set(vsa.api.API, "get", stub_vsa_get) - self.stubs.Set(vsa.api.API, "get_all", stub_vsa_get_all) + self.stubs.Set(nova.db.api, 'vsa_get', return_vsa) + + self.stubs.Set(volume.api.API, "create", stub_volume_create) + self.stubs.Set(volume.api.API, "update", stub_volume_update) + self.stubs.Set(volume.api.API, "delete", stub_volume_delete) + self.stubs.Set(volume.api.API, "get_all_by_vsa", + stub_volume_get_all_by_vsa) + self.stubs.Set(volume.api.API, "get", stub_volume_get) self.context = context.get_admin_context() + self.test_obj = test_obj if test_obj else "volume" + self.test_objs = test_objs if test_objs else "volumes" + + def tearDown(self): + self.stubs.UnsetAll() + super(VSAVolumeApiTest, self).tearDown() + + def test_vsa_volume_create(self): + vol = {"size": 100, + "displayName": "VSA Volume Test Name", + "displayDescription": "VSA Volume Test Desc"} + body = {self.test_obj: vol} + req = webob.Request.blank('/v1.1/zadr-vsa/123/%s' % self.test_objs) + req.method = 'POST' + req.body = json.dumps(body) + req.headers['content-type'] = 'application/json' + resp = req.get_response(fakes.wsgi_app()) + + if self.test_obj == "volume": + self.assertEqual(resp.status_int, 200) + + resp_dict = json.loads(resp.body) + self.assertTrue(self.test_obj in resp_dict) + self.assertEqual(resp_dict[self.test_obj]['size'], + vol['size']) + self.assertEqual(resp_dict[self.test_obj]['displayName'], + vol['displayName']) + self.assertEqual(resp_dict[self.test_obj]['displayDescription'], + vol['displayDescription']) + else: + self.assertEqual(resp.status_int, 400) + + def test_vsa_volume_create_no_body(self): + req = webob.Request.blank('/v1.1/zadr-vsa/123/%s' % self.test_objs) + req.method = 'POST' + req.body = json.dumps({}) + req.headers['content-type'] = 'application/json' + + resp = req.get_response(fakes.wsgi_app()) + if self.test_obj == "volume": + self.assertEqual(resp.status_int, 422) + else: + self.assertEqual(resp.status_int, 400) + + def test_vsa_volume_index(self): + req = webob.Request.blank('/v1.1/zadr-vsa/123/%s' % self.test_objs) + resp = req.get_response(fakes.wsgi_app()) + self.assertEqual(resp.status_int, 200) + + def test_vsa_volume_detail(self): + req = webob.Request.blank('/v1.1/zadr-vsa/123/%s/detail' % \ + self.test_objs) + resp = req.get_response(fakes.wsgi_app()) + self.assertEqual(resp.status_int, 200) + + def test_vsa_volume_show(self): + obj_num = 234 if self.test_objs == "volumes" else 345 + req = webob.Request.blank('/v1.1/zadr-vsa/123/%s/%s' % \ + (self.test_objs, obj_num)) + resp = req.get_response(fakes.wsgi_app()) + self.assertEqual(resp.status_int, 200) + + def test_vsa_volume_show_no_vsa_assignment(self): + req = webob.Request.blank('/v1.1/zadr-vsa/123/%s/333' % \ + (self.test_objs)) + resp = req.get_response(fakes.wsgi_app()) + self.assertEqual(resp.status_int, 400) + + def test_vsa_volume_show_no_volume(self): + self.stubs.Set(volume.api.API, "get", stub_volume_get_notfound) + + req = webob.Request.blank('/v1.1/zadr-vsa/123/%s/333' % \ + (self.test_objs)) + resp = req.get_response(fakes.wsgi_app()) + self.assertEqual(resp.status_int, 404) + + def test_vsa_volume_update(self): + obj_num = 234 if self.test_objs == "volumes" else 345 + update = {"status": "available"} + body = {self.test_obj: update} + req = webob.Request.blank('/v1.1/zadr-vsa/123/%s/%s' % \ + (self.test_objs, obj_num)) + req.method = 'PUT' + req.body = json.dumps(body) + req.headers['content-type'] = 'application/json' + + resp = req.get_response(fakes.wsgi_app()) + if self.test_obj == "volume": + self.assertEqual(resp.status_int, 202) + else: + self.assertEqual(resp.status_int, 400) + + def test_vsa_volume_delete(self): + obj_num = 234 if self.test_objs == "volumes" else 345 + req = webob.Request.blank('/v1.1/zadr-vsa/123/%s/%s' % \ + (self.test_objs, obj_num)) + req.method = 'DELETE' + resp = req.get_response(fakes.wsgi_app()) + if self.test_obj == "volume": + self.assertEqual(resp.status_int, 202) + else: + self.assertEqual(resp.status_int, 400) + + def test_vsa_volume_delete_no_vsa_assignment(self): + req = webob.Request.blank('/v1.1/zadr-vsa/123/%s/333' % \ + (self.test_objs)) + req.method = 'DELETE' + resp = req.get_response(fakes.wsgi_app()) + self.assertEqual(resp.status_int, 400) + + def test_vsa_volume_delete_no_volume(self): + self.stubs.Set(volume.api.API, "get", stub_volume_get_notfound) + + req = webob.Request.blank('/v1.1/zadr-vsa/123/%s/333' % \ + (self.test_objs)) + req.method = 'DELETE' + resp = req.get_response(fakes.wsgi_app()) + if self.test_obj == "volume": + self.assertEqual(resp.status_int, 404) + else: + self.assertEqual(resp.status_int, 400) + + +class VSADriveApiTest(VSAVolumeApiTest): + def setUp(self): + super(VSADriveApiTest, self).setUp(test_obj="drive", + test_objs="drives") def tearDown(self): self.stubs.UnsetAll() - super(VSAVolumeDriveApiTest, self).tearDown() + super(VSADriveApiTest, self).tearDown() diff --git a/nova/virt/libvirt/netutils.py b/nova/virt/libvirt/netutils.py index c0d808cd3..041eacb2d 100644 --- a/nova/virt/libvirt/netutils.py +++ b/nova/virt/libvirt/netutils.py @@ -99,7 +99,6 @@ def get_network_info(instance): if network['dns2']: mapping['dns'].append(network['dns2']) - if FLAGS.use_ipv6: mapping['ip6s'] = [ip6_dict()] mapping['gateway6'] = network['gateway_v6'] -- cgit From d963e25906b75a48c75b6e589deb2a53f75d6ee3 Mon Sep 17 00:00:00 2001 From: Christopher MacGown Date: Fri, 22 Jul 2011 20:29:37 -0700 Subject: Config-Drive happiness, minus smoketest --- nova/api/openstack/create_instance_helper.py | 7 +- nova/api/openstack/views/servers.py | 4 + nova/compute/api.py | 21 +++- .../versions/035_add_config_drive_to_instances.py | 43 +++++++ nova/db/sqlalchemy/models.py | 1 + nova/scheduler/simple.py | 1 + nova/tests/api/openstack/test_servers.py | 135 ++++++++++++++++++++- nova/tests/test_compute.py | 14 +++ nova/virt/libvirt.xml.template | 7 ++ nova/virt/libvirt/connection.py | 51 ++++++-- 10 files changed, 263 insertions(+), 21 deletions(-) create mode 100644 nova/db/sqlalchemy/migrate_repo/versions/035_add_config_drive_to_instances.py (limited to 'nova') diff --git a/nova/api/openstack/create_instance_helper.py b/nova/api/openstack/create_instance_helper.py index 2654e3c40..fe92bae2e 100644 --- a/nova/api/openstack/create_instance_helper.py +++ b/nova/api/openstack/create_instance_helper.py @@ -96,6 +96,7 @@ class CreateInstanceHelper(object): locals()) raise faults.Fault(exc.HTTPBadRequest(explanation=msg)) + config_drive = body['server'].get('config_drive') personality = body['server'].get('personality') injected_files = [] @@ -130,6 +131,7 @@ class CreateInstanceHelper(object): extra_values = { 'instance_type': inst_type, 'image_ref': image_href, + 'config_drive': config_drive, 'password': password} return (extra_values, @@ -148,7 +150,8 @@ class CreateInstanceHelper(object): zone_blob=zone_blob, reservation_id=reservation_id, min_count=min_count, - max_count=max_count)) + max_count=max_count, + config_drive=config_drive,)) except quota.QuotaError as error: self._handle_quota_error(error) except exception.ImageNotFound as error: @@ -160,6 +163,8 @@ class CreateInstanceHelper(object): def _handle_quota_error(self, error): """ Reraise quota errors as api-specific http exceptions + + """ if error.code == "OnsetFileLimitExceeded": expl = _("Personality file limit exceeded") diff --git a/nova/api/openstack/views/servers.py b/nova/api/openstack/views/servers.py index ab7e8da61..961932a4e 100644 --- a/nova/api/openstack/views/servers.py +++ b/nova/api/openstack/views/servers.py @@ -164,6 +164,7 @@ class ViewBuilderV11(ViewBuilder): def _build_extra(self, response, inst): self._build_links(response, inst) + self._build_config_drive(response, inst) def _build_links(self, response, inst): href = self.generate_href(inst["id"]) @@ -182,6 +183,9 @@ class ViewBuilderV11(ViewBuilder): response["server"]["links"] = links + def _build_config_drive(self, response, inst): + response['server']['config_drive'] = inst.get('config_drive') + def generate_href(self, server_id): """Create an url that refers to a specific server id.""" return os.path.join(self.base_url, "servers", str(server_id)) diff --git a/nova/compute/api.py b/nova/compute/api.py index 67aa3c20f..8adcbbef6 100644 --- a/nova/compute/api.py +++ b/nova/compute/api.py @@ -150,7 +150,7 @@ class API(base.Base): key_name=None, key_data=None, security_group='default', availability_zone=None, user_data=None, metadata={}, injected_files=None, admin_password=None, zone_blob=None, - reservation_id=None): + reservation_id=None, config_drive=None,): """Verify all the input parameters regardless of the provisioning strategy being performed.""" @@ -181,6 +181,11 @@ class API(base.Base): (image_service, image_id) = nova.image.get_image_service(image_href) image = image_service.show(context, image_id) + config_drive_id = None + if config_drive and config_drive is not True: + # config_drive is volume id + config_drive, config_drive_id = None, config_drive + os_type = None if 'properties' in image and 'os_type' in image['properties']: os_type = image['properties']['os_type'] @@ -208,6 +213,8 @@ class API(base.Base): image_service.show(context, kernel_id) if ramdisk_id: image_service.show(context, ramdisk_id) + if config_drive_id: + image_service.show(context, config_drive_id) self.ensure_default_security_group(context) @@ -226,6 +233,8 @@ class API(base.Base): 'image_ref': image_href, 'kernel_id': kernel_id or '', 'ramdisk_id': ramdisk_id or '', + 'config_drive_id': config_drive_id or '', + 'config_drive': config_drive or '', 'state': 0, 'state_description': 'scheduling', 'user_id': context.user_id, @@ -397,7 +406,8 @@ class API(base.Base): key_name=None, key_data=None, security_group='default', availability_zone=None, user_data=None, metadata={}, injected_files=None, admin_password=None, zone_blob=None, - reservation_id=None, block_device_mapping=None): + reservation_id=None, block_device_mapping=None, + config_drive=None): """Provision the instances by passing the whole request to the Scheduler for execution. Returns a Reservation ID related to the creation of all of these instances.""" @@ -409,7 +419,7 @@ class API(base.Base): key_name, key_data, security_group, availability_zone, user_data, metadata, injected_files, admin_password, zone_blob, - reservation_id) + reservation_id, config_drive) self._ask_scheduler_to_create_instance(context, base_options, instance_type, zone_blob, @@ -426,7 +436,8 @@ class API(base.Base): key_name=None, key_data=None, security_group='default', availability_zone=None, user_data=None, metadata={}, injected_files=None, admin_password=None, zone_blob=None, - reservation_id=None, block_device_mapping=None): + reservation_id=None, block_device_mapping=None, + config_drive=None,): """ Provision the instances by sending off a series of single instance requests to the Schedulers. This is fine for trival @@ -447,7 +458,7 @@ class API(base.Base): key_name, key_data, security_group, availability_zone, user_data, metadata, injected_files, admin_password, zone_blob, - reservation_id) + reservation_id, config_drive) block_device_mapping = block_device_mapping or [] instances = [] diff --git a/nova/db/sqlalchemy/migrate_repo/versions/035_add_config_drive_to_instances.py b/nova/db/sqlalchemy/migrate_repo/versions/035_add_config_drive_to_instances.py new file mode 100644 index 000000000..65ea012dd --- /dev/null +++ b/nova/db/sqlalchemy/migrate_repo/versions/035_add_config_drive_to_instances.py @@ -0,0 +1,43 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2011 OpenStack LLC. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from sqlalchemy import Column, Integer, MetaData, String, Table + +from nova import utils + + +meta = MetaData() + +instances = Table("instances", meta, + Column("id", Integer(), primary_key=True, nullable=False)) +config_drive_column = Column("config_drive", String(255)) # matches image_ref + + +def upgrade(migrate_engine): + meta.bind = migrate_engine + instances.create_column(config_drive_column) + + rows = migrate_engine.execute(instances.select()) + for row in rows: + instance_config_drive = None # pre-existing instances don't have one. + migrate_engine.execute(instances.update()\ + .where(instances.c.id == row[0])\ + .values(config_drive=instance_config_drive)) + + +def downgrade(migrate_engine): + meta.bind = migrate_engine + instances.drop_column(config_drive_column) diff --git a/nova/db/sqlalchemy/models.py b/nova/db/sqlalchemy/models.py index c1150f7ca..73ad1f011 100644 --- a/nova/db/sqlalchemy/models.py +++ b/nova/db/sqlalchemy/models.py @@ -238,6 +238,7 @@ class Instance(BASE, NovaBase): uuid = Column(String(36)) root_device_name = Column(String(255)) + config_drive = Column(String(255)) # TODO(vish): see Ewan's email about state improvements, probably # should be in a driver base class or some such diff --git a/nova/scheduler/simple.py b/nova/scheduler/simple.py index fc1b3142a..61c76b35d 100644 --- a/nova/scheduler/simple.py +++ b/nova/scheduler/simple.py @@ -41,6 +41,7 @@ class SimpleScheduler(chance.ChanceScheduler): def _schedule_instance(self, context, instance_id, *_args, **_kwargs): """Picks a host that is up and has the fewest running instances.""" + instance_ref = db.instance_get(context, instance_id) if (instance_ref['availability_zone'] and ':' in instance_ref['availability_zone'] diff --git a/nova/tests/api/openstack/test_servers.py b/nova/tests/api/openstack/test_servers.py index 3fc38b73c..6a1eb8c87 100644 --- a/nova/tests/api/openstack/test_servers.py +++ b/nova/tests/api/openstack/test_servers.py @@ -775,11 +775,14 @@ class ServersTest(test.TestCase): self.assertEqual(res.status_int, 400) self.assertTrue(res.body.find('marker param') > -1) - def _setup_for_create_instance(self): + def _setup_for_create_instance(self, instance_db_stub=None): """Shared implementation for tests below that create instance""" def instance_create(context, inst): - return {'id': 1, 'display_name': 'server_test', - 'uuid': FAKE_UUID} + if instance_db_stub: + return instance_db_stub + else: + return {'id': 1, 'display_name': 'server_test', + 'uuid': FAKE_UUID,} def server_update(context, id, params): return instance_create(context, id) @@ -997,6 +1000,132 @@ class ServersTest(test.TestCase): self.assertEqual(flavor_ref, server['flavorRef']) self.assertEqual(image_href, server['imageRef']) + def test_create_instance_with_config_drive_v1_1(self): + db_stub = {'id': 100, 'display_name': 'config_drive_test', + 'uuid': FAKE_UUID, 'config_drive': True} + self._setup_for_create_instance(instance_db_stub=db_stub) + + image_href = 'http://localhost/v1.1/images/2' + flavor_ref = 'http://localhost/v1.1/flavors/3' + body = { + 'server': { + 'name': 'config_drive_test', + 'imageRef': image_href, + 'flavorRef': flavor_ref, + 'metadata': { + 'hello': 'world', + 'open': 'stack', + }, + 'personality': {}, + 'config_drive': True, + }, + } + + req = webob.Request.blank('/v1.1/servers') + req.method = 'POST' + req.body = json.dumps(body) + req.headers["content-type"] = "application/json" + + res = req.get_response(fakes.wsgi_app()) + self.assertEqual(res.status_int, 200) + server = json.loads(res.body)['server'] + self.assertEqual(100, server['id']) + self.assertTrue(server['config_drive']) + + def test_create_instance_with_config_drive_as_id_v1_1(self): + db_stub = {'id': 100, 'display_name': 'config_drive_test', + 'uuid': FAKE_UUID, 'config_drive': 2} + self._setup_for_create_instance(instance_db_stub=db_stub) + + image_href = 'http://localhost/v1.1/images/2' + flavor_ref = 'http://localhost/v1.1/flavors/3' + body = { + 'server': { + 'name': 'config_drive_test', + 'imageRef': image_href, + 'flavorRef': flavor_ref, + 'metadata': { + 'hello': 'world', + 'open': 'stack', + }, + 'personality': {}, + 'config_drive': 2, + }, + } + + req = webob.Request.blank('/v1.1/servers') + req.method = 'POST' + req.body = json.dumps(body) + req.headers["content-type"] = "application/json" + + res = req.get_response(fakes.wsgi_app()) + self.assertEqual(res.status_int, 200) + server = json.loads(res.body)['server'] + self.assertEqual(100, server['id']) + self.assertTrue(server['config_drive']) + self.assertEqual(2, server['config_drive']) + + def test_create_instance_with_bad_config_drive_v1_1(self): + db_stub = {'id': 100, 'display_name': 'config_drive_test', + 'uuid': FAKE_UUID, 'config_drive': 'asdf'} + self._setup_for_create_instance(instance_db_stub=db_stub) + + image_href = 'http://localhost/v1.1/images/2' + flavor_ref = 'http://localhost/v1.1/flavors/3' + body = { + 'server': { + 'name': 'config_drive_test', + 'imageRef': image_href, + 'flavorRef': flavor_ref, + 'metadata': { + 'hello': 'world', + 'open': 'stack', + }, + 'personality': {}, + 'config_drive': 'asdf', + }, + } + + req = webob.Request.blank('/v1.1/servers') + req.method = 'POST' + req.body = json.dumps(body) + req.headers["content-type"] = "application/json" + + res = req.get_response(fakes.wsgi_app()) + self.assertEqual(res.status_int, 400) + + def test_create_instance_without_config_drive_v1_1(self): + db_stub = {'id': 100, 'display_name': 'config_drive_test', + 'uuid': FAKE_UUID, 'config_drive': None} + self._setup_for_create_instance(instance_db_stub=db_stub) + + image_href = 'http://localhost/v1.1/images/2' + flavor_ref = 'http://localhost/v1.1/flavors/3' + body = { + 'server': { + 'name': 'config_drive_test', + 'imageRef': image_href, + 'flavorRef': flavor_ref, + 'metadata': { + 'hello': 'world', + 'open': 'stack', + }, + 'personality': {}, + 'config_drive': True, + }, + } + + req = webob.Request.blank('/v1.1/servers') + req.method = 'POST' + req.body = json.dumps(body) + req.headers["content-type"] = "application/json" + + res = req.get_response(fakes.wsgi_app()) + self.assertEqual(res.status_int, 200) + server = json.loads(res.body)['server'] + self.assertEqual(100, server['id']) + self.assertFalse(server['config_drive']) + def test_create_instance_v1_1_bad_href(self): self._setup_for_create_instance() diff --git a/nova/tests/test_compute.py b/nova/tests/test_compute.py index 5d59b628a..bc681706d 100644 --- a/nova/tests/test_compute.py +++ b/nova/tests/test_compute.py @@ -162,6 +162,20 @@ class ComputeTestCase(test.TestCase): db.security_group_destroy(self.context, group['id']) db.instance_destroy(self.context, ref[0]['id']) + def test_create_instance_associates_config_drive(self): + """Make sure create associates a config drive.""" + + instance_id = self._create_instance(params={'config_drive': True,}) + + try: + self.compute.run_instance(self.context, instance_id) + instances = db.instance_get_all(context.get_admin_context()) + instance = instances[0] + + self.assertTrue(instance.config_drive) + finally: + db.instance_destroy(self.context, instance_id) + def test_default_hostname_generator(self): cases = [(None, 'server_1'), ('Hello, Server!', 'hello_server'), ('<}\x1fh\x10e\x08l\x02l\x05o\x12!{>', 'hello')] diff --git a/nova/virt/libvirt.xml.template b/nova/virt/libvirt.xml.template index e1a683da8..4422f349f 100644 --- a/nova/virt/libvirt.xml.template +++ b/nova/virt/libvirt.xml.template @@ -55,6 +55,13 @@ #else + #if $getVar('config', False) + + + + + + #end if #if $getVar('rescue', False) diff --git a/nova/virt/libvirt/connection.py b/nova/virt/libvirt/connection.py index 342dea98f..ad97dc796 100644 --- a/nova/virt/libvirt/connection.py +++ b/nova/virt/libvirt/connection.py @@ -123,6 +123,8 @@ flags.DEFINE_string('qemu_img', 'qemu-img', 'binary to use for qemu-img commands') flags.DEFINE_bool('start_guests_on_host_boot', False, 'Whether to restart guests when the host reboots') +flags.DEFINE_string('default_local_format', None, + 'Default filesystem format for local drives') def get_connection(read_only): @@ -586,6 +588,8 @@ class LibvirtConnection(driver.ComputeDriver): block_device_mapping = block_device_mapping or [] self.firewall_driver.setup_basic_filtering(instance, network_info) self.firewall_driver.prepare_instance_filter(instance, network_info) + + # This is where things actually get built. self._create_image(instance, xml, network_info=network_info, block_device_mapping=block_device_mapping) domain = self._create_new_domain(xml) @@ -763,10 +767,15 @@ class LibvirtConnection(driver.ComputeDriver): if size: disk.extend(target, size) - def _create_local(self, target, local_gb): + def _create_local(self, target, local_size, prefix='G', fs_format=None): """Create a blank image of specified size""" - utils.execute('truncate', target, '-s', "%dG" % local_gb) - # TODO(vish): should we format disk by default? + + if not fs_format: + fs_format = FLAGS.default_local_format + + utils.execute('truncate', target, '-s', "%d%c" % (local_size, prefix)) + if fs_format: + utils.execute('mkfs', '-t', fs_format, target) def _create_image(self, inst, libvirt_xml, suffix='', disk_images=None, network_info=None, block_device_mapping=None): @@ -859,6 +868,18 @@ class LibvirtConnection(driver.ComputeDriver): if FLAGS.libvirt_type == 'lxc': target_partition = None + else: + if inst['config_drive_id']: + fname = '%08x' % int(inst['config_drive_id']) + self._cache_image(fn=self._fetch_image, + target=basepath('config'), + fname=fname, + image_id=inst['config_drive_id'], + user=user, + project=project) + elif inst['config_drive']: + self._create_local(basepath('config'), 64, prefix="M", + fs_format='msdos') # 64MB if inst['key_data']: key = str(inst['key_data']) @@ -903,17 +924,23 @@ class LibvirtConnection(driver.ComputeDriver): searchList=[{'interfaces': nets, 'use_ipv6': FLAGS.use_ipv6}])) - if key or net: + if any(key, net, inst['metadata']): inst_name = inst['name'] - img_id = inst.image_ref - if key: - LOG.info(_('instance %(inst_name)s: injecting key into' - ' image %(img_id)s') % locals()) - if net: - LOG.info(_('instance %(inst_name)s: injecting net into' - ' image %(img_id)s') % locals()) + + if inst['config_drive']: # Should be True or None by now. + injection_path = basepath('config') + img_id = 'config-drive' + else: + injection_path = basepath('disk') + img_id = inst.image_ref + + for injection in ('metadata', 'key', 'net'): + if locals()[injection]: + LOG.info(_('instance %(inst_name)s: injecting ' + '%(injection)s into image %(img_id)s' + % locals())) try: - disk.inject_data(basepath('disk'), key, net, + disk.inject_data(injection_path, key, net, inst['metadata'], partition=target_partition, nbd=FLAGS.use_cow_images) -- cgit From fb755ae05b0b6a7b3701614c8d702e8a24ff380c Mon Sep 17 00:00:00 2001 From: "vladimir.p" Date: Sun, 24 Jul 2011 00:07:00 -0700 Subject: some cosmetic changes. Prior to merge proposal --- nova/tests/test_vsa.py | 185 +++++++++++++++++++++++++++++++++++++++++++++++++ nova/vsa/api.py | 44 +++++++----- 2 files changed, 212 insertions(+), 17 deletions(-) create mode 100644 nova/tests/test_vsa.py (limited to 'nova') diff --git a/nova/tests/test_vsa.py b/nova/tests/test_vsa.py new file mode 100644 index 000000000..859fe3325 --- /dev/null +++ b/nova/tests/test_vsa.py @@ -0,0 +1,185 @@ +# Copyright 2011 OpenStack LLC. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import stubout +import base64 + +from xml.etree import ElementTree +from xml.etree.ElementTree import Element, SubElement + +from nova import exception +from nova import flags +from nova import vsa +from nova import db +from nova import context +from nova import test +from nova import log as logging +import nova.image.fake + +FLAGS = flags.FLAGS +LOG = logging.getLogger('nova.tests.vsa') + + +def fake_drive_type_get_by_name(context, name): + drive_type = { + 'id': 1, + 'name': name, + 'type': name.split('_')[0], + 'size_gb': int(name.split('_')[1]), + 'rpm': name.split('_')[2], + 'capabilities': '', + 'visible': True} + return drive_type + + +class VsaTestCase(test.TestCase): + + def setUp(self): + super(VsaTestCase, self).setUp() + self.stubs = stubout.StubOutForTesting() + self.vsa_api = vsa.API() + + self.context_non_admin = context.RequestContext(None, None) + self.context = context.get_admin_context() + + def fake_show_by_name(meh, context, name): + if name == 'wrong_image_name': + LOG.debug(_("Test: Emulate wrong VSA name. Raise")) + raise exception.ImageNotFound + return {'id': 1, 'properties': {'kernel_id': 1, 'ramdisk_id': 1}} + + self.stubs.Set(nova.image.fake._FakeImageService, 'show_by_name', + fake_show_by_name) + + def tearDown(self): + self.stubs.UnsetAll() + super(VsaTestCase, self).tearDown() + + def test_vsa_create_delete_defaults(self): + param = {'display_name': 'VSA name test'} + vsa_ref = self.vsa_api.create(self.context, **param) + self.assertEqual(vsa_ref['display_name'], param['display_name']) + self.vsa_api.delete(self.context, vsa_ref['id']) + + def test_vsa_create_delete_check_in_db(self): + vsa_list1 = self.vsa_api.get_all(self.context) + vsa_ref = self.vsa_api.create(self.context) + vsa_list2 = self.vsa_api.get_all(self.context) + self.assertEqual(len(vsa_list2), len(vsa_list1) + 1) + + self.vsa_api.delete(self.context, vsa_ref['id']) + vsa_list3 = self.vsa_api.get_all(self.context) + self.assertEqual(len(vsa_list3), len(vsa_list2) - 1) + + def test_vsa_create_delete_high_vc_count(self): + param = {'vc_count': FLAGS.max_vcs_in_vsa + 1} + vsa_ref = self.vsa_api.create(self.context, **param) + self.assertEqual(vsa_ref['vc_count'], FLAGS.max_vcs_in_vsa) + self.vsa_api.delete(self.context, vsa_ref['id']) + + def test_vsa_create_wrong_image_name(self): + param = {'image_name': 'wrong_image_name'} + self.assertRaises(exception.ApiError, + self.vsa_api.create, self.context, **param) + + def test_vsa_create_db_error(self): + + def fake_vsa_create(context, options): + LOG.debug(_("Test: Emulate DB error. Raise")) + raise exception.Error + + self.stubs.Set(nova.db.api, 'vsa_create', fake_vsa_create) + self.assertRaises(exception.ApiError, + self.vsa_api.create, self.context) + + def test_vsa_create_wrong_storage_params(self): + vsa_list1 = self.vsa_api.get_all(self.context) + param = {'storage': [{'stub': 1}]} + self.assertRaises(exception.ApiError, + self.vsa_api.create, self.context, **param) + vsa_list2 = self.vsa_api.get_all(self.context) + self.assertEqual(len(vsa_list2), len(vsa_list1) + 1) + + param = {'storage': [{'drive_name': 'wrong name'}]} + self.assertRaises(exception.ApiError, + self.vsa_api.create, self.context, **param) + + def test_vsa_create_with_storage(self, multi_vol_creation=True): + """Test creation of VSA with BE storage""" + + FLAGS.vsa_multi_vol_creation = multi_vol_creation + + self.stubs.Set(nova.vsa.drive_types, 'get_by_name', + fake_drive_type_get_by_name) + + param = {'storage': [{'drive_name': 'SATA_500_7200', + 'num_drives': 3}]} + vsa_ref = self.vsa_api.create(self.context, **param) + self.assertEqual(vsa_ref['vol_count'], 3) + self.vsa_api.delete(self.context, vsa_ref['id']) + + param = {'storage': [{'drive_name': 'SATA_500_7200', + 'num_drives': 3}], + 'shared': True} + vsa_ref = self.vsa_api.create(self.context, **param) + self.assertEqual(vsa_ref['vol_count'], 15) + self.vsa_api.delete(self.context, vsa_ref['id']) + + def test_vsa_create_with_storage_single_volumes(self): + self.test_vsa_create_with_storage(multi_vol_creation=False) + + def test_vsa_update(self): + vsa_ref = self.vsa_api.create(self.context) + + param = {'vc_count': FLAGS.max_vcs_in_vsa + 1} + vsa_ref = self.vsa_api.update(self.context, vsa_ref['id'], **param) + self.assertEqual(vsa_ref['vc_count'], FLAGS.max_vcs_in_vsa) + + param = {'vc_count': 2} + vsa_ref = self.vsa_api.update(self.context, vsa_ref['id'], **param) + self.assertEqual(vsa_ref['vc_count'], 2) + + self.vsa_api.delete(self.context, vsa_ref['id']) + + def test_vsa_generate_user_data(self): + self.stubs.Set(nova.vsa.drive_types, 'get_by_name', + fake_drive_type_get_by_name) + + FLAGS.vsa_multi_vol_creation = False + param = {'display_name': 'VSA name test', + 'display_description': 'VSA desc test', + 'vc_count': 2, + 'storage': [{'drive_name': 'SATA_500_7200', + 'num_drives': 3}]} + vsa_ref = self.vsa_api.create(self.context, **param) + volumes = db.volume_get_all_assigned_to_vsa(self.context, + vsa_ref['id']) + + user_data = self.vsa_api.generate_user_data(self.context, + vsa_ref, + volumes) + user_data = base64.b64decode(user_data) + + LOG.debug(_("Test: user_data = %s"), user_data) + + elem = ElementTree.fromstring(user_data) + self.assertEqual(elem.findtext('name'), + param['display_name']) + self.assertEqual(elem.findtext('description'), + param['display_description']) + self.assertEqual(elem.findtext('vc_count'), + str(param['vc_count'])) + + self.vsa_api.delete(self.context, vsa_ref['id']) diff --git a/nova/vsa/api.py b/nova/vsa/api.py index b366b6587..80637cc9e 100644 --- a/nova/vsa/api.py +++ b/nova/vsa/api.py @@ -74,15 +74,15 @@ class API(base.Base): num_disks = node.get('num_drives', 1) if name is None: - raise exception.ApiError(_("No drive_name param found in %s"), - node) + raise exception.ApiError(_("No drive_name param found in %s") + % node) # find DB record for this disk try: drive_ref = drive_types.get_by_name(context, name) except exception.NotFound: - raise exception.ApiError(_("Invalid drive type name %s"), - name) + raise exception.ApiError(_("Invalid drive type name %s") + % name) # if size field present - override disk size specified in DB size = node.get('size', drive_ref['size_gb']) @@ -149,8 +149,8 @@ class API(base.Base): vc_image = image_service.show_by_name(context, image_name) vc_image_href = vc_image['id'] except exception.ImageNotFound: - raise exception.ApiError(_("Failed to find configured image %s"), - image_name) + raise exception.ApiError(_("Failed to find configured image %s") + % image_name) options = { 'display_name': display_name, @@ -258,34 +258,42 @@ class API(base.Base): """ LOG.info(_("VSA ID %(vsa_id)d: Update VSA call"), locals()) + updatable_fields = ['status', 'vc_count', 'vol_count', + 'display_name', 'display_description'] + changes = {} + for field in updatable_fields: + if field in kwargs: + changes[field] = kwargs[field] + vc_count = kwargs.get('vc_count', None) if vc_count is not None: # VP-TODO: This request may want to update number of VCs # Get number of current VCs and add/delete VCs appropriately vsa = self.get(context, vsa_id) vc_count = int(vc_count) + if vc_count > FLAGS.max_vcs_in_vsa: + LOG.warning(_("Requested number of VCs (%d) is too high."\ + " Setting to default"), vc_count) + vc_count = FLAGS.max_vcs_in_vsa + if vsa['vc_count'] != vc_count: self.update_num_vcs(context, vsa, vc_count) + changes['vc_count'] = vc_count - return self.db.vsa_update(context, vsa_id, kwargs) + return self.db.vsa_update(context, vsa_id, changes) def update_num_vcs(self, context, vsa, vc_count): - if vc_count > FLAGS.max_vcs_in_vsa: - LOG.warning(_("Requested number of VCs (%d) is too high."\ - " Setting to default"), vc_count) - vc_count = FLAGS.max_vcs_in_vsa - vsa_name = vsa['name'] - old_vc_count = vsa['vc_count'] + old_vc_count = int(vsa['vc_count']) if vc_count > old_vc_count: add_cnt = vc_count - old_vc_count - LOG.debug(_("Adding %(add_cnt)d VCs to VSA %(vsa_name)s."), + LOG.debug(_("Adding %(add_cnt)s VCs to VSA %(vsa_name)s."), locals()) # VP-TODO: actual code for adding new VCs elif vc_count < old_vc_count: del_cnt = old_vc_count - vc_count - LOG.debug(_("Deleting %(add_cnt)d VCs from VSA %(vsa_name)s."), + LOG.debug(_("Deleting %(del_cnt)s VCs from VSA %(vsa_name)s."), locals()) # VP-TODO: actual code for deleting extra VCs @@ -372,9 +380,11 @@ class API(base.Base): e_vsa_detail = SubElement(e_vsa, "vc_count") e_vsa_detail.text = str(vsa['vc_count']) e_vsa_detail = SubElement(e_vsa, "auth_user") - e_vsa_detail.text = str(context.user.name) + if context.user is not None: + e_vsa_detail.text = str(context.user.name) e_vsa_detail = SubElement(e_vsa, "auth_access_key") - e_vsa_detail.text = str(context.user.access) + if context.user is not None: + e_vsa_detail.text = str(context.user.access) e_volumes = SubElement(e_vsa, "volumes") for volume in volumes: -- cgit From c500eac4589e9cb22e5e71b900164a151290ec03 Mon Sep 17 00:00:00 2001 From: "vladimir.p" Date: Mon, 25 Jul 2011 16:26:23 -0700 Subject: some cleanup. VSA flag status changes. returned some files --- nova/CA/newcerts/.placeholder | 0 nova/CA/private/.placeholder | 0 nova/CA/projects/.gitignore | 1 + nova/CA/projects/.placeholder | 0 nova/CA/reqs/.gitignore | 1 + nova/CA/reqs/.placeholder | 0 nova/api/ec2/cloud.py | 1 - nova/api/openstack/contrib/drive_types.py | 2 +- .../openstack/contrib/virtual_storage_arrays.py | 19 +++++++++++-------- .../migrate_repo/versions/036_add_vsa_data.py | 1 + nova/flags.py | 14 -------------- nova/scheduler/vsa.py | 6 ++++-- nova/tests/api/openstack/contrib/test_vsa.py | 4 +++- nova/tests/test_drive_types.py | 1 + nova/volume/driver.py | 21 +++------------------ nova/vsa/__init__.py | 1 + nova/vsa/api.py | 22 ++++++++++++++++------ nova/vsa/connection.py | 1 + nova/vsa/drive_types.py | 1 + nova/vsa/fake.py | 1 + nova/vsa/manager.py | 12 ++++++------ 21 files changed, 52 insertions(+), 57 deletions(-) create mode 100644 nova/CA/newcerts/.placeholder create mode 100644 nova/CA/private/.placeholder create mode 100644 nova/CA/projects/.gitignore create mode 100644 nova/CA/projects/.placeholder create mode 100644 nova/CA/reqs/.gitignore create mode 100644 nova/CA/reqs/.placeholder (limited to 'nova') diff --git a/nova/CA/newcerts/.placeholder b/nova/CA/newcerts/.placeholder new file mode 100644 index 000000000..e69de29bb diff --git a/nova/CA/private/.placeholder b/nova/CA/private/.placeholder new file mode 100644 index 000000000..e69de29bb diff --git a/nova/CA/projects/.gitignore b/nova/CA/projects/.gitignore new file mode 100644 index 000000000..72e8ffc0d --- /dev/null +++ b/nova/CA/projects/.gitignore @@ -0,0 +1 @@ +* diff --git a/nova/CA/projects/.placeholder b/nova/CA/projects/.placeholder new file mode 100644 index 000000000..e69de29bb diff --git a/nova/CA/reqs/.gitignore b/nova/CA/reqs/.gitignore new file mode 100644 index 000000000..72e8ffc0d --- /dev/null +++ b/nova/CA/reqs/.gitignore @@ -0,0 +1 @@ +* diff --git a/nova/CA/reqs/.placeholder b/nova/CA/reqs/.placeholder new file mode 100644 index 000000000..e69de29bb diff --git a/nova/api/ec2/cloud.py b/nova/api/ec2/cloud.py index 56a5850f6..6fc74c92a 100644 --- a/nova/api/ec2/cloud.py +++ b/nova/api/ec2/cloud.py @@ -902,7 +902,6 @@ class CloudController(object): image_name = kwargs.get('image_name') availability_zone = kwargs.get('placement', {}).get( 'AvailabilityZone') - #storage = ast.literal_eval(kwargs.get('storage', '[]')) storage = kwargs.get('storage', []) shared = kwargs.get('shared', False) diff --git a/nova/api/openstack/contrib/drive_types.py b/nova/api/openstack/contrib/drive_types.py index 590eaaec0..6454fd81f 100644 --- a/nova/api/openstack/contrib/drive_types.py +++ b/nova/api/openstack/contrib/drive_types.py @@ -1,6 +1,7 @@ # vim: tabstop=4 shiftwidth=4 softtabstop=4 # Copyright (c) 2011 Zadara Storage Inc. +# Copyright (c) 2011 OpenStack LLC. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may @@ -118,7 +119,6 @@ class DriveTypeController(object): drive_types.delete(context, id) except exception.NotFound: return faults.Fault(exc.HTTPNotFound()) - # return exc.HTTPAccepted() class Drive_types(extensions.ExtensionDescriptor): diff --git a/nova/api/openstack/contrib/virtual_storage_arrays.py b/nova/api/openstack/contrib/virtual_storage_arrays.py index 6139b494e..68a00fd7d 100644 --- a/nova/api/openstack/contrib/virtual_storage_arrays.py +++ b/nova/api/openstack/contrib/virtual_storage_arrays.py @@ -1,6 +1,7 @@ # vim: tabstop=4 shiftwidth=4 softtabstop=4 # Copyright (c) 2011 Zadara Storage Inc. +# Copyright (c) 2011 OpenStack LLC. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may @@ -153,7 +154,6 @@ class VsaController(object): self.vsa_api.delete(context, vsa_id=id) except exception.NotFound: return faults.Fault(exc.HTTPNotFound()) - # return exc.HTTPAccepted() class VsaVolumeDriveController(volumes.VolumeController): @@ -193,6 +193,7 @@ class VsaVolumeDriveController(volumes.VolumeController): d = translation(context, vol) d['vsaId'] = vol[self.direction] + d['name'] = vol['name'] return d def _check_volume_ownership(self, context, vsa_id, id): @@ -265,15 +266,17 @@ class VsaVolumeDriveController(volumes.VolumeController): return faults.Fault(exc.HTTPBadRequest()) vol = body[self.object] - updatable_fields = ['display_name', - 'display_description', - 'status', - 'provider_location', - 'provider_auth'] + updatable_fields = [{'displayName': 'display_name'}, + {'displayDescription': 'display_description'}, + {'status': 'status'}, + {'providerLocation': 'provider_location'}, + {'providerAuth': 'provider_auth'}] changes = {} for field in updatable_fields: - if field in vol: - changes[field] = vol[field] + key = field.keys()[0] + val = field[key] + if key in vol: + changes[val] = vol[key] obj = self.object LOG.audit(_("Update %(obj)s with id: %(id)s, changes: %(changes)s"), diff --git a/nova/db/sqlalchemy/migrate_repo/versions/036_add_vsa_data.py b/nova/db/sqlalchemy/migrate_repo/versions/036_add_vsa_data.py index 7fc8f955c..5d2e56a7e 100644 --- a/nova/db/sqlalchemy/migrate_repo/versions/036_add_vsa_data.py +++ b/nova/db/sqlalchemy/migrate_repo/versions/036_add_vsa_data.py @@ -1,6 +1,7 @@ # vim: tabstop=4 shiftwidth=4 softtabstop=4 # Copyright (c) 2011 Zadara Storage Inc. +# Copyright (c) 2011 OpenStack LLC. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may diff --git a/nova/flags.py b/nova/flags.py index 8000eac4a..9f5965919 100644 --- a/nova/flags.py +++ b/nova/flags.py @@ -378,20 +378,6 @@ DEFINE_integer('max_vcs_in_vsa', 32, DEFINE_integer('vsa_part_size_gb', 100, 'default partition size for shared capacity') -DEFINE_string('vsa_status_creating', 'creating', - 'VSA creating (not ready yet)') -DEFINE_string('vsa_status_launching', 'launching', - 'Launching VCs (all BE volumes were created)') -DEFINE_string('vsa_status_created', 'created', - 'VSA fully created and ready for use') -DEFINE_string('vsa_status_partial', 'partial', - 'Some BE storage allocations failed') -DEFINE_string('vsa_status_failed', 'failed', - 'Some BE storage allocations failed') -DEFINE_string('vsa_status_deleting', 'deleting', - 'VSA started the deletion procedure') - - # The service to use for image search and retrieval DEFINE_string('image_service', 'nova.image.glance.GlanceImageService', 'The service to use for retrieving and searching for images.') diff --git a/nova/scheduler/vsa.py b/nova/scheduler/vsa.py index 059afce68..6931afc2b 100644 --- a/nova/scheduler/vsa.py +++ b/nova/scheduler/vsa.py @@ -1,6 +1,7 @@ # vim: tabstop=4 shiftwidth=4 softtabstop=4 # Copyright (c) 2011 Zadara Storage Inc. +# Copyright (c) 2011 OpenStack LLC. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may @@ -24,6 +25,7 @@ from nova import rpc from nova import db from nova import flags from nova import utils +from nova.vsa.api import VsaState from nova.volume import api as volume_api from nova.scheduler import driver from nova.scheduler import simple @@ -301,7 +303,7 @@ class VsaScheduler(simple.SimpleScheduler): except: if vsa_id: db.vsa_update(context, vsa_id, - dict(status=FLAGS.vsa_status_failed)) + dict(status=VsaState.FAILED)) for vol in volume_params: if 'capabilities' in vol: @@ -346,7 +348,7 @@ class VsaScheduler(simple.SimpleScheduler): except: if volume_ref['to_vsa_id']: db.vsa_update(context, volume_ref['to_vsa_id'], - dict(status=FLAGS.vsa_status_failed)) + dict(status=VsaState.FAILED)) raise #return super(VsaScheduler, self).schedule_create_volume(context, # volume_id, *_args, **_kwargs) diff --git a/nova/tests/api/openstack/contrib/test_vsa.py b/nova/tests/api/openstack/contrib/test_vsa.py index c3150fa9c..3c9136e14 100644 --- a/nova/tests/api/openstack/contrib/test_vsa.py +++ b/nova/tests/api/openstack/contrib/test_vsa.py @@ -234,6 +234,7 @@ def _get_default_volume_param(): 'availability_zone': 'nova', 'created_at': None, 'attach_status': 'detached', + 'name': 'vol name', 'display_name': 'Default vol name', 'display_description': 'Default vol description', 'from_vsa_id': None, @@ -386,7 +387,8 @@ class VSAVolumeApiTest(test.TestCase): def test_vsa_volume_update(self): obj_num = 234 if self.test_objs == "volumes" else 345 - update = {"status": "available"} + update = {"status": "available", + "displayName": "Test Display name"} body = {self.test_obj: update} req = webob.Request.blank('/v1.1/zadr-vsa/123/%s/%s' % \ (self.test_objs, obj_num)) diff --git a/nova/tests/test_drive_types.py b/nova/tests/test_drive_types.py index 8534bcde5..e91c41321 100644 --- a/nova/tests/test_drive_types.py +++ b/nova/tests/test_drive_types.py @@ -1,6 +1,7 @@ # vim: tabstop=4 shiftwidth=4 softtabstop=4 # Copyright (c) 2011 Zadara Storage Inc. +# Copyright (c) 2011 OpenStack LLC. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may diff --git a/nova/volume/driver.py b/nova/volume/driver.py index b93fc1d92..2e3da57b2 100644 --- a/nova/volume/driver.py +++ b/nova/volume/driver.py @@ -507,15 +507,7 @@ class ISCSIDriver(VolumeDriver): iscsi_properties = self._get_iscsi_properties(volume) if not iscsi_properties['target_discovered']: - # zadara-begin: Bug in cactus. _run_iscsiadm() cannot accept - # multiple args for iscsi-command. Like in --op new. Hence - # using a local version here which does the same thing - (out, err) = self._execute('sudo', 'iscsiadm', '--op', 'new', - '-m', 'node', - '-T', iscsi_properties['target_iqn'], - '-p', iscsi_properties['target_portal']) - # self._run_iscsiadm(iscsi_properties, ('--op', 'new')) - # zadara-end + self._run_iscsiadm(iscsi_properties, ('--op', 'new')) if iscsi_properties.get('auth_method'): self._iscsiadm_update(iscsi_properties, @@ -567,15 +559,7 @@ class ISCSIDriver(VolumeDriver): iscsi_properties = self._get_iscsi_properties(volume) self._iscsiadm_update(iscsi_properties, "node.startup", "manual") self._run_iscsiadm(iscsi_properties, "--logout") - # zadara-begin: Bug in cactus. _run_iscsiadm() cannot accept - # multiple args for iscsi-command. Like in --op delete. Hence - # using a local version here which does the same thing - (out, err) = self._execute('sudo', 'iscsiadm', '--op', 'delete', - '-m', 'node', - '-T', iscsi_properties['target_iqn'], - '-p', iscsi_properties['target_portal']) - #self._run_iscsiadm(iscsi_properties, ('--op', 'delete')) - # zadara-end + self._run_iscsiadm(iscsi_properties, ('--op', 'delete')) def check_for_export(self, context, volume_id): """Make sure volume is exported.""" @@ -916,6 +900,7 @@ class ZadaraBEDriver(ISCSIDriver): ret = self._common_be_export(context, volume, iscsi_target) except: raise exception.ProcessExecutionError + return ret def remove_export(self, context, volume): """Removes BE export for a volume.""" diff --git a/nova/vsa/__init__.py b/nova/vsa/__init__.py index a94a6b7a4..779b7fb65 100644 --- a/nova/vsa/__init__.py +++ b/nova/vsa/__init__.py @@ -1,6 +1,7 @@ # vim: tabstop=4 shiftwidth=4 softtabstop=4 # Copyright (c) 2011 Zadara Storage Inc. +# Copyright (c) 2011 OpenStack LLC. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may diff --git a/nova/vsa/api.py b/nova/vsa/api.py index 80637cc9e..99793efa3 100644 --- a/nova/vsa/api.py +++ b/nova/vsa/api.py @@ -1,6 +1,7 @@ # vim: tabstop=4 shiftwidth=4 softtabstop=4 # Copyright (c) 2011 Zadara Storage Inc. +# Copyright (c) 2011 OpenStack LLC. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may @@ -19,12 +20,10 @@ Handles all requests relating to Virtual Storage Arrays (VSAs). """ -#import datetime import sys import base64 from xml.etree import ElementTree -from xml.etree.ElementTree import Element, SubElement from nova import db from nova import exception @@ -47,6 +46,15 @@ flags.DEFINE_boolean('vsa_multi_vol_creation', True, LOG = logging.getLogger('nova.vsa') +class VsaState: + CREATING = 'creating' # VSA creating (not ready yet) + LAUNCHING = 'launching' # Launching VCs (all BE volumes were created) + CREATED = 'created' # VSA fully created and ready for use + PARTIAL = 'partial' # Some BE storage allocations failed + FAILED = 'failed' # Some BE storage allocations failed + DELETING = 'deleting' # VSA started the deletion procedure + + class API(base.Base): """API for interacting with the VSA manager.""" @@ -160,7 +168,7 @@ class API(base.Base): 'instance_type_id': instance_type['id'], 'image_ref': vc_image_href, 'vc_count': vc_count, - 'status': FLAGS.vsa_status_creating, + 'status': VsaState.CREATING, } LOG.info(_("Creating VSA: %s") % options) @@ -178,7 +186,7 @@ class API(base.Base): storage, shared) except exception.ApiError: self.update_vsa_status(context, vsa_id, - status=FLAGS.vsa_status_failed) + status=VsaState.FAILED) raise # after creating DB entry, re-check and set some defaults @@ -227,7 +235,7 @@ class API(base.Base): availability_zone=availability_zone) except: self.update_vsa_status(context, vsa_id, - status=FLAGS.vsa_status_partial) + status=VsaState.PARTIAL) raise if len(volume_params) == 0: @@ -369,7 +377,9 @@ class API(base.Base): return self.db.vsa_get_all_by_project(context, context.project_id) def generate_user_data(self, context, vsa, volumes): - e_vsa = Element("vsa") + SubElement = ElementTree.SubElement + + e_vsa = ElementTree.Element("vsa") e_vsa_detail = SubElement(e_vsa, "id") e_vsa_detail.text = str(vsa['id']) diff --git a/nova/vsa/connection.py b/nova/vsa/connection.py index 6c61acee4..5de8021a7 100644 --- a/nova/vsa/connection.py +++ b/nova/vsa/connection.py @@ -1,6 +1,7 @@ # vim: tabstop=4 shiftwidth=4 softtabstop=4 # Copyright (c) 2011 Zadara Storage Inc. +# Copyright (c) 2011 OpenStack LLC. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may diff --git a/nova/vsa/drive_types.py b/nova/vsa/drive_types.py index 5bec96047..86ff76b96 100644 --- a/nova/vsa/drive_types.py +++ b/nova/vsa/drive_types.py @@ -1,6 +1,7 @@ # vim: tabstop=4 shiftwidth=4 softtabstop=4 # Copyright (c) 2011 Zadara Storage Inc. +# Copyright (c) 2011 OpenStack LLC. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may diff --git a/nova/vsa/fake.py b/nova/vsa/fake.py index 308d21fec..d96138255 100644 --- a/nova/vsa/fake.py +++ b/nova/vsa/fake.py @@ -1,6 +1,7 @@ # vim: tabstop=4 shiftwidth=4 softtabstop=4 # Copyright (c) 2011 Zadara Storage Inc. +# Copyright (c) 2011 OpenStack LLC. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may diff --git a/nova/vsa/manager.py b/nova/vsa/manager.py index c67358672..1390f8146 100644 --- a/nova/vsa/manager.py +++ b/nova/vsa/manager.py @@ -1,6 +1,7 @@ # vim: tabstop=4 shiftwidth=4 softtabstop=4 # Copyright (c) 2011 Zadara Storage Inc. +# Copyright (c) 2011 OpenStack LLC. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may @@ -30,6 +31,7 @@ from nova import exception from nova import compute from nova import volume from nova import vsa +from nova.vsa.api import VsaState from nova.compute import instance_types @@ -114,9 +116,9 @@ class VsaManager(manager.SchedulerDependentManager): """Start VCs for VSA """ vsa_id = vsa['id'] - if vsa['status'] == FLAGS.vsa_status_creating: + if vsa['status'] == VsaState.CREATING: self.vsa_api.update_vsa_status(context, vsa_id, - FLAGS.vsa_status_launching) + VsaState.LAUNCHING) else: return @@ -144,8 +146,7 @@ class VsaManager(manager.SchedulerDependentManager): if has_failed_volumes: LOG.info(_("VSA ID %(vsa_id)d: Delete all BE volumes"), locals()) self.vsa_api.delete_be_volumes(context, vsa_id, force_delete=True) - self.vsa_api.update_vsa_status(context, vsa_id, - FLAGS.vsa_status_failed) + self.vsa_api.update_vsa_status(context, vsa_id, VsaState.FAILED) return # create user-data record for VC @@ -170,5 +171,4 @@ class VsaManager(manager.SchedulerDependentManager): user_data=storage_data, vsa_id=vsa_id) - self.vsa_api.update_vsa_status(context, vsa_id, - FLAGS.vsa_status_created) + self.vsa_api.update_vsa_status(context, vsa_id, VsaState.CREATED) -- cgit From a0a3f0157d6f4e8563a5a1e4ee1bde92388f25fc Mon Sep 17 00:00:00 2001 From: "vladimir.p" Date: Mon, 25 Jul 2011 16:58:09 -0700 Subject: volume name change. some cleanup --- nova/db/sqlalchemy/models.py | 20 -------------------- nova/flags.py | 1 - nova/vsa/api.py | 12 ++++++++---- 3 files changed, 8 insertions(+), 25 deletions(-) (limited to 'nova') diff --git a/nova/db/sqlalchemy/models.py b/nova/db/sqlalchemy/models.py index fbc8e9e19..42b97867d 100644 --- a/nova/db/sqlalchemy/models.py +++ b/nova/db/sqlalchemy/models.py @@ -279,14 +279,6 @@ class VirtualStorageArray(BASE, NovaBase): vol_count = Column(Integer, default=0) # total number of BE volumes status = Column(String(255)) - #admin_pass = Column(String(255)) - - #disks = relationship(VsaDiskAssociation, - # backref=backref('vsa', uselist=False), - # foreign_keys=id, - # primaryjoin='and_(VsaDiskAssociation.vsa_id == ' - # 'VirtualStorageArray.id)') - class InstanceActions(BASE, NovaBase): """Represents a guest VM's actions and results""" @@ -401,18 +393,6 @@ class DriveTypes(BASE, NovaBase): primaryjoin='and_(Volume.drive_type_id == ' 'DriveTypes.id)') -# -#class VsaDiskAssociation(BASE, NovaBase): -# """associates drive types with Virtual Storage Arrays.""" -# __tablename__ = 'vsa_disk_association' -# -# id = Column(Integer, primary_key=True, autoincrement=True) -# -# drive_type_id = Column(Integer, ForeignKey('drive_types.id')) -# vsa_id = Column(Integer, ForeignKey('virtual_storage_arrays.id')) -# -# disk_num = Column(Integer, nullable=False) # number of disks - class Quota(BASE, NovaBase): """Represents a single quota override for a project. diff --git a/nova/flags.py b/nova/flags.py index 9f5965919..c192b5281 100644 --- a/nova/flags.py +++ b/nova/flags.py @@ -369,7 +369,6 @@ DEFINE_string('vsa_manager', 'nova.vsa.manager.VsaManager', 'Manager for vsa') DEFINE_string('vc_image_name', 'vc_image', 'the VC image ID (for a VC image that exists in DB Glance)') -#--------------------------------------------------------------------- # VSA constants and enums DEFINE_string('default_vsa_instance_type', 'm1.small', 'default instance type for VSA instances') diff --git a/nova/vsa/api.py b/nova/vsa/api.py index 99793efa3..9b2750d82 100644 --- a/nova/vsa/api.py +++ b/nova/vsa/api.py @@ -67,7 +67,8 @@ class API(base.Base): return instance_types.get_instance_type_by_name( FLAGS.default_vsa_instance_type) - def _check_storage_parameters(self, context, vsa_name, storage, shared): + def _check_storage_parameters(self, context, vsa_name, storage, + shared, first_index=0): """ Translates storage array of disks to the list of volumes :param storage: List of dictionaries with following keys: @@ -105,13 +106,16 @@ class API(base.Base): size = 0 # special handling for full drives for i in range(num_volumes): - # VP-TODO: potentialy may conflict with previous volumes - volume_name = vsa_name + ("_%s_vol-%d" % (name, i)) + # volume_name = vsa_name + ("_%s_vol-%d" % (name, i)) + volume_name = "drive-%03d" % first_index + first_index += 1 + volume_desc = 'BE volume for VSA %s type %s' % \ + (vsa_name, name) volume = { 'size': size, 'snapshot_id': None, 'name': volume_name, - 'description': 'BE volume for ' + volume_name, + 'description': volume_desc, 'drive_ref': drive_ref } volume_params.append(volume) -- cgit From a72f2e29e2a35791a1c53f4f606948572ab52280 Mon Sep 17 00:00:00 2001 From: "vladimir.p" Date: Tue, 26 Jul 2011 13:25:34 -0700 Subject: VSA volume creation/deletion changes --- nova/db/sqlalchemy/api.py | 1 + nova/tests/test_vsa.py | 5 +- nova/tests/test_vsa_volumes.py | 108 +++++++++++++++++++++++++++++++++++++++++ nova/volume/api.py | 12 ++++- 4 files changed, 122 insertions(+), 4 deletions(-) create mode 100644 nova/tests/test_vsa_volumes.py (limited to 'nova') diff --git a/nova/db/sqlalchemy/api.py b/nova/db/sqlalchemy/api.py index 3b14f114a..50037e259 100644 --- a/nova/db/sqlalchemy/api.py +++ b/nova/db/sqlalchemy/api.py @@ -2205,6 +2205,7 @@ def volume_update(context, volume_id, values): volume_ref = volume_get(context, volume_id, session=session) volume_ref.update(values) volume_ref.save(session=session) + return volume_ref ################### diff --git a/nova/tests/test_vsa.py b/nova/tests/test_vsa.py index 859fe3325..8e4d58960 100644 --- a/nova/tests/test_vsa.py +++ b/nova/tests/test_vsa.py @@ -60,8 +60,9 @@ class VsaTestCase(test.TestCase): raise exception.ImageNotFound return {'id': 1, 'properties': {'kernel_id': 1, 'ramdisk_id': 1}} - self.stubs.Set(nova.image.fake._FakeImageService, 'show_by_name', - fake_show_by_name) + self.stubs.Set(nova.image.fake._FakeImageService, + 'show_by_name', + fake_show_by_name) def tearDown(self): self.stubs.UnsetAll() diff --git a/nova/tests/test_vsa_volumes.py b/nova/tests/test_vsa_volumes.py new file mode 100644 index 000000000..0facd3b1b --- /dev/null +++ b/nova/tests/test_vsa_volumes.py @@ -0,0 +1,108 @@ +# Copyright 2011 OpenStack LLC. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import stubout + +from nova import exception +from nova import flags +from nova import vsa +from nova import volume +from nova import db +from nova import context +from nova import test +from nova import log as logging +import nova.image.fake + +FLAGS = flags.FLAGS +LOG = logging.getLogger('nova.tests.vsa.volumes') + + +def _default_volume_param(): + return { + 'size': 1, + 'snapshot_id': None, + 'name': 'Test volume name', + 'description': 'Test volume desc name' + } + + +class VsaVolumesTestCase(test.TestCase): + + def setUp(self): + super(VsaVolumesTestCase, self).setUp() + self.stubs = stubout.StubOutForTesting() + self.vsa_api = vsa.API() + self.volume_api = volume.API() + + self.context_non_admin = context.RequestContext(None, None) + self.context = context.get_admin_context() + + def fake_show_by_name(meh, context, name): + return {'id': 1, 'properties': {'kernel_id': 1, 'ramdisk_id': 1}} + + self.stubs.Set(nova.image.fake._FakeImageService, + 'show_by_name', + fake_show_by_name) + + param = {'display_name': 'VSA name test'} + vsa_ref = self.vsa_api.create(self.context, **param) + self.vsa_id = vsa_ref['id'] + + def tearDown(self): + self.vsa_api.delete(self.context, self.vsa_id) + self.stubs.UnsetAll() + super(VsaVolumesTestCase, self).tearDown() + + def test_vsa_volume_create_delete(self): + """ Check if volume properly created and deleted. """ + vols1 = self.volume_api.get_all_by_vsa(self.context, + self.vsa_id, "from") + volume_param = _default_volume_param() + volume_param['from_vsa_id'] = self.vsa_id + volume_ref = self.volume_api.create(self.context, **volume_param) + + self.assertEqual(volume_ref['display_name'], + volume_param['name']) + self.assertEqual(volume_ref['display_description'], + volume_param['description']) + self.assertEqual(volume_ref['size'], + volume_param['size']) + self.assertEqual(volume_ref['status'], + 'available') + + vols2 = self.volume_api.get_all_by_vsa(self.context, + self.vsa_id, "from") + self.assertEqual(len(vols1) + 1, len(vols2)) + + self.volume_api.delete(self.context, volume_ref['id']) + vols3 = self.volume_api.get_all_by_vsa(self.context, + self.vsa_id, "from") + self.assertEqual(len(vols3) + 1, len(vols2)) + + def test_vsa_volume_delete_nonavail_volume(self): + """ Check volume deleton in different states. """ + volume_param = _default_volume_param() + volume_param['from_vsa_id'] = self.vsa_id + volume_ref = self.volume_api.create(self.context, **volume_param) + + self.volume_api.update(self.context, + volume_ref['id'], {'status': 'in-use'}) + self.assertRaises(exception.ApiError, + self.volume_api.delete, + self.context, volume_ref['id']) + + self.volume_api.update(self.context, + volume_ref['id'], {'status': 'error'}) + self.volume_api.delete(self.context, volume_ref['id']) diff --git a/nova/volume/api.py b/nova/volume/api.py index df55e9dc3..6b220cc54 100644 --- a/nova/volume/api.py +++ b/nova/volume/api.py @@ -80,6 +80,10 @@ class API(base.Base): volume = self.db.volume_create(context, options) if from_vsa_id is not None: # for FE VSA volumes do nothing + now = utils.utcnow() + volume = self.db.volume_update(context, + volume['id'], {'status': 'available', + 'launched_at': now}) return volume rpc.cast(context, @@ -100,14 +104,18 @@ class API(base.Base): def delete(self, context, volume_id): volume = self.get(context, volume_id) - if volume['status'] != "available": - raise exception.ApiError(_("Volume status must be available")) if volume['from_vsa_id'] is not None: + if volume['status'] == "in-use": + raise exception.ApiError(_("Volume is in use. "\ + "Detach it first")) self.db.volume_destroy(context, volume['id']) LOG.debug(_("volume %d: deleted successfully"), volume['id']) return + if volume['status'] != "available": + raise exception.ApiError(_("Volume status must be available")) + now = utils.utcnow() self.db.volume_update(context, volume_id, {'status': 'deleting', 'terminated_at': now}) -- cgit From 336b2703ef90fcd7b422434434c9967880b97204 Mon Sep 17 00:00:00 2001 From: "vladimir.p" Date: Tue, 26 Jul 2011 13:28:23 -0700 Subject: pep8 compliance --- nova/tests/test_vsa_volumes.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) (limited to 'nova') diff --git a/nova/tests/test_vsa_volumes.py b/nova/tests/test_vsa_volumes.py index 0facd3b1b..e1d4cd756 100644 --- a/nova/tests/test_vsa_volumes.py +++ b/nova/tests/test_vsa_volumes.py @@ -67,7 +67,7 @@ class VsaVolumesTestCase(test.TestCase): def test_vsa_volume_create_delete(self): """ Check if volume properly created and deleted. """ - vols1 = self.volume_api.get_all_by_vsa(self.context, + vols1 = self.volume_api.get_all_by_vsa(self.context, self.vsa_id, "from") volume_param = _default_volume_param() volume_param['from_vsa_id'] = self.vsa_id @@ -82,7 +82,7 @@ class VsaVolumesTestCase(test.TestCase): self.assertEqual(volume_ref['status'], 'available') - vols2 = self.volume_api.get_all_by_vsa(self.context, + vols2 = self.volume_api.get_all_by_vsa(self.context, self.vsa_id, "from") self.assertEqual(len(vols1) + 1, len(vols2)) @@ -90,7 +90,7 @@ class VsaVolumesTestCase(test.TestCase): vols3 = self.volume_api.get_all_by_vsa(self.context, self.vsa_id, "from") self.assertEqual(len(vols3) + 1, len(vols2)) - + def test_vsa_volume_delete_nonavail_volume(self): """ Check volume deleton in different states. """ volume_param = _default_volume_param() -- cgit From 401de172b86a13010885e70bc78351e72a7dfde3 Mon Sep 17 00:00:00 2001 From: "vladimir.p" Date: Wed, 27 Jul 2011 22:49:16 -0700 Subject: prior to nova-1336 merge --- nova/scheduler/vsa.py | 77 ++-- nova/tests/api/openstack/test_extensions.py | 7 +- nova/tests/scheduler/test_vsa_scheduler.py | 616 ++++++++++++++++++++++++++++ nova/tests/test_vsa.py | 2 + nova/tests/test_vsa_volumes.py | 23 +- nova/volume/driver.py | 4 +- nova/vsa/api.py | 41 +- nova/vsa/manager.py | 2 +- 8 files changed, 689 insertions(+), 83 deletions(-) create mode 100644 nova/tests/scheduler/test_vsa_scheduler.py (limited to 'nova') diff --git a/nova/scheduler/vsa.py b/nova/scheduler/vsa.py index 6931afc2b..f66ce989c 100644 --- a/nova/scheduler/vsa.py +++ b/nova/scheduler/vsa.py @@ -65,40 +65,29 @@ class VsaScheduler(simple.SimpleScheduler): {"method": "notification", "args": {"event": event}}) - def _compare_names(self, str1, str2): - result = str1.lower() == str2.lower() - # LOG.debug(_("Comparing %(str1)s and %(str2)s. "\ - # "Result %(result)s"), locals()) - return result - - def _compare_sizes_exact_match(self, cap_capacity, size_gb): - cap_capacity = BYTES_TO_GB(int(cap_capacity)) - size_gb = int(size_gb) - result = cap_capacity == size_gb - # LOG.debug(_("Comparing %(cap_capacity)d and %(size_gb)d. "\ - # "Result %(result)s"), locals()) - return result - - def _compare_sizes_approxim(self, cap_capacity, size_gb): - cap_capacity = BYTES_TO_GB(int(cap_capacity)) - size_gb = int(size_gb) - size_perc = size_gb * FLAGS.drive_type_approx_capacity_percent / 100 - - result = cap_capacity >= size_gb - size_perc and \ - cap_capacity <= size_gb + size_perc - # LOG.debug(_("Comparing %(cap_capacity)d and %(size_gb)d. "\ - # "Result %(result)s"), locals()) - return result - def _qosgrp_match(self, drive_type, qos_values): + def _compare_names(str1, str2): + result = str1.lower() == str2.lower() + return result + + def _compare_sizes_approxim(cap_capacity, size_gb): + cap_capacity = BYTES_TO_GB(int(cap_capacity)) + size_gb = int(size_gb) + size_perc = size_gb * \ + FLAGS.drive_type_approx_capacity_percent / 100 + + result = cap_capacity >= size_gb - size_perc and \ + cap_capacity <= size_gb + size_perc + return result + # Add more entries for additional comparisons compare_list = [{'cap1': 'DriveType', 'cap2': 'type', - 'cmp_func': self._compare_names}, + 'cmp_func': _compare_names}, {'cap1': 'DriveCapacity', 'cap2': 'size_gb', - 'cmp_func': self._compare_sizes_approxim}] + 'cmp_func': _compare_sizes_approxim}] for cap in compare_list: if cap['cap1'] in qos_values.keys() and \ @@ -106,20 +95,23 @@ class VsaScheduler(simple.SimpleScheduler): cap['cmp_func'] is not None and \ cap['cmp_func'](qos_values[cap['cap1']], drive_type[cap['cap2']]): - # LOG.debug(("One of required capabilities found: %s:%s"), - # cap['cap1'], drive_type[cap['cap2']]) pass else: return False return True + def _get_service_states(self): + return self.zone_manager.service_states + def _filter_hosts(self, topic, request_spec, host_list=None): + LOG.debug(_("_filter_hosts: %(request_spec)s"), locals()) + drive_type = request_spec['drive_type'] LOG.debug(_("Filter hosts for drive type %s"), drive_type['name']) if host_list is None: - host_list = self.zone_manager.service_states.iteritems() + host_list = self._get_service_states().iteritems() filtered_hosts = [] # returns list of (hostname, capability_dict) for host, host_dict in host_list: @@ -131,7 +123,6 @@ class VsaScheduler(simple.SimpleScheduler): for qosgrp, qos_values in gos_info.iteritems(): if self._qosgrp_match(drive_type, qos_values): if qos_values['AvailableCapacity'] > 0: - # LOG.debug(_("Adding host %s to the list"), host) filtered_hosts.append((host, gos_info)) else: LOG.debug(_("Host %s has no free capacity. Skip"), @@ -226,7 +217,7 @@ class VsaScheduler(simple.SimpleScheduler): "args": {"volume_id": volume_ref['id'], "snapshot_id": None}}) - def _check_host_enforcement(self, availability_zone): + def _check_host_enforcement(self, context, availability_zone): if (availability_zone and ':' in availability_zone and context.is_admin): @@ -273,16 +264,10 @@ class VsaScheduler(simple.SimpleScheduler): vol['capabilities'] = qos_cap self._consume_resource(qos_cap, vol['size'], -1) - # LOG.debug(_("Volume %(name)s assigned to host %(host)s"), - # locals()) - def schedule_create_volumes(self, context, request_spec, availability_zone, *_args, **_kwargs): """Picks hosts for hosting multiple volumes.""" - LOG.debug(_("Service states BEFORE %s"), - self.zone_manager.service_states) - num_volumes = request_spec.get('num_volumes') LOG.debug(_("Attempting to spawn %(num_volumes)d volume(s)") % locals()) @@ -290,16 +275,13 @@ class VsaScheduler(simple.SimpleScheduler): vsa_id = request_spec.get('vsa_id') volume_params = request_spec.get('volumes') - host = self._check_host_enforcement(availability_zone) + host = self._check_host_enforcement(context, availability_zone) try: self._assign_hosts_to_volumes(context, volume_params, host) for vol in volume_params: self._provision_volume(context, vol, vsa_id, availability_zone) - - LOG.debug(_("Service states AFTER %s"), - self.zone_manager.service_states) except: if vsa_id: db.vsa_update(context, vsa_id, @@ -309,8 +291,6 @@ class VsaScheduler(simple.SimpleScheduler): if 'capabilities' in vol: self._consume_resource(vol['capabilities'], vol['size'], 1) - LOG.debug(_("Service states AFTER %s"), - self.zone_manager.service_states) raise return None @@ -319,7 +299,8 @@ class VsaScheduler(simple.SimpleScheduler): """Picks the best host based on requested drive type capability.""" volume_ref = db.volume_get(context, volume_id) - host = self._check_host_enforcement(volume_ref['availability_zone']) + host = self._check_host_enforcement(context, + volume_ref['availability_zone']) if host: now = utils.utcnow() db.volume_update(context, volume_id, {'host': host, @@ -333,9 +314,6 @@ class VsaScheduler(simple.SimpleScheduler): volume_id, *_args, **_kwargs) drive_type = dict(drive_type) - LOG.debug(_("Service states BEFORE %s"), - self.zone_manager.service_states) - LOG.debug(_("Spawning volume %(volume_id)s with drive type "\ "%(drive_type)s"), locals()) @@ -358,9 +336,6 @@ class VsaScheduler(simple.SimpleScheduler): db.volume_update(context, volume_id, {'host': host, 'scheduled_at': now}) self._consume_resource(qos_cap, volume_ref['size'], -1) - - LOG.debug(_("Service states AFTER %s"), - self.zone_manager.service_states) return host def _consume_full_drive(self, qos_values, direction): diff --git a/nova/tests/api/openstack/test_extensions.py b/nova/tests/api/openstack/test_extensions.py index d459c694f..2febe50e5 100644 --- a/nova/tests/api/openstack/test_extensions.py +++ b/nova/tests/api/openstack/test_extensions.py @@ -97,8 +97,9 @@ class ExtensionControllerTest(unittest.TestCase): data = json.loads(response.body) names = [x['name'] for x in data['extensions']] names.sort() - self.assertEqual(names, ["FlavorExtraSpecs", "Floating_ips", - "Fox In Socks", "Hosts", "Multinic", "Volumes"]) + self.assertEqual(names, ["DriveTypes", "FlavorExtraSpecs", + "Floating_ips", "Fox In Socks", "Hosts", "Multinic", "VSAs", + "Volumes"]) # Make sure that at least Fox in Sox is correct. (fox_ext,) = [ @@ -145,7 +146,7 @@ class ExtensionControllerTest(unittest.TestCase): # Make sure we have all the extensions. exts = root.findall('{0}extension'.format(NS)) - self.assertEqual(len(exts), 6) + self.assertEqual(len(exts), 8) # Make sure that at least Fox in Sox is correct. (fox_ext,) = [x for x in exts if x.get('alias') == 'FOXNSOX'] diff --git a/nova/tests/scheduler/test_vsa_scheduler.py b/nova/tests/scheduler/test_vsa_scheduler.py new file mode 100644 index 000000000..697ad3842 --- /dev/null +++ b/nova/tests/scheduler/test_vsa_scheduler.py @@ -0,0 +1,616 @@ +# Copyright 2011 OpenStack LLC. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import stubout + +import nova +from nova import exception +from nova import flags +from nova import db +from nova import context +from nova import test +from nova import utils +from nova import log as logging + +from nova.scheduler import vsa as vsa_sched +from nova.scheduler import driver + +FLAGS = flags.FLAGS +LOG = logging.getLogger('nova.tests.scheduler.vsa') + +scheduled_volumes = [] +scheduled_volume = {} +global_volume = {} + + +class FakeVsaLeastUsedScheduler( + vsa_sched.VsaSchedulerLeastUsedHost): + # No need to stub anything at the moment + pass + + +class FakeVsaMostAvailCapacityScheduler( + vsa_sched.VsaSchedulerMostAvailCapacity): + # No need to stub anything at the moment + pass + + +class VsaSchedulerTestCase(test.TestCase): + + def _get_vol_creation_request(self, num_vols, drive_ix, size=0): + volume_params = [] + for i in range(num_vols): + drive_type = {'id': i, + 'name': 'name_' + str(drive_ix), + 'type': 'type_' + str(drive_ix), + 'size_gb': 1 + 100 * (drive_ix)} + volume = {'size': size, + 'snapshot_id': None, + 'name': 'vol_' + str(i), + 'description': None, + 'drive_ref': drive_type} + volume_params.append(volume) + + return {'num_volumes': len(volume_params), + 'vsa_id': 123, + 'volumes': volume_params} + + def _generate_default_service_states(self): + service_states = {} + for i in range(self.host_num): + host = {} + hostname = 'host_' + str(i) + if hostname in self.exclude_host_list: + continue + + host['volume'] = {'timestamp': utils.utcnow(), + 'drive_qos_info': {}} + + for j in range(self.drive_type_start_ix, + self.drive_type_start_ix + self.drive_type_num): + dtype = {} + dtype['Name'] = 'name_' + str(j) + dtype['DriveType'] = 'type_' + str(j) + dtype['TotalDrives'] = 2 * (self.init_num_drives + i) + dtype['DriveCapacity'] = vsa_sched.GB_TO_BYTES(1 + 100 * j) + dtype['TotalCapacity'] = dtype['TotalDrives'] * \ + dtype['DriveCapacity'] + dtype['AvailableCapacity'] = (dtype['TotalDrives'] - i) * \ + dtype['DriveCapacity'] + dtype['DriveRpm'] = 7200 + dtype['DifCapable'] = 0 + dtype['SedCapable'] = 0 + dtype['PartitionDrive'] = { + 'PartitionSize': 0, + 'NumOccupiedPartitions': 0, + 'NumFreePartitions': 0} + dtype['FullDrive'] = { + 'NumFreeDrives': dtype['TotalDrives'] - i, + 'NumOccupiedDrives': i} + host['volume']['drive_qos_info'][dtype['Name']] = dtype + + service_states[hostname] = host + + return service_states + + def _print_service_states(self): + for host, host_val in self.service_states.iteritems(): + LOG.info(_("Host %s"), host) + total_used = 0 + total_available = 0 + qos = host_val['volume']['drive_qos_info'] + + for k, d in qos.iteritems(): + LOG.info("\t%s: type %s: drives (used %2d, total %2d) "\ + "size %3d, total %4d, used %4d, avail %d", + k, d['DriveType'], + d['FullDrive']['NumOccupiedDrives'], d['TotalDrives'], + vsa_sched.BYTES_TO_GB(d['DriveCapacity']), + vsa_sched.BYTES_TO_GB(d['TotalCapacity']), + vsa_sched.BYTES_TO_GB(d['TotalCapacity'] - \ + d['AvailableCapacity']), + vsa_sched.BYTES_TO_GB(d['AvailableCapacity'])) + + total_used += vsa_sched.BYTES_TO_GB(d['TotalCapacity'] - \ + d['AvailableCapacity']) + total_available += vsa_sched.BYTES_TO_GB( + d['AvailableCapacity']) + LOG.info("Host %s: used %d, avail %d", + host, total_used, total_available) + + def _set_service_states(self, host_num, + drive_type_start_ix, drive_type_num, + init_num_drives=10, + exclude_host_list=[]): + self.host_num = host_num + self.drive_type_start_ix = drive_type_start_ix + self.drive_type_num = drive_type_num + self.exclude_host_list = exclude_host_list + self.init_num_drives = init_num_drives + self.service_states = self._generate_default_service_states() + + def _get_service_states(self): + return self.service_states + + def _fake_get_service_states(self): + return self._get_service_states() + + def _fake_provision_volume(self, context, vol, vsa_id, availability_zone): + global scheduled_volumes + scheduled_volumes.append(dict(vol=vol, + vsa_id=vsa_id, + az=availability_zone)) + name = vol['name'] + host = vol['host'] + LOG.debug(_("Test: provision vol %(name)s on host %(host)s"), + locals()) + LOG.debug(_("\t vol=%(vol)s"), locals()) + pass + + def _fake_vsa_update(self, context, vsa_id, values): + LOG.debug(_("Test: VSA update request: vsa_id=%(vsa_id)s "\ + "values=%(values)s"), locals()) + pass + + def _fake_volume_create(self, context, options): + LOG.debug(_("Test: Volume create: %s"), options) + options['id'] = 123 + global global_volume + global_volume = options + return options + + def _fake_volume_get(self, context, volume_id): + LOG.debug(_("Test: Volume get request: id=%(volume_id)s"), locals()) + global global_volume + global_volume['id'] = volume_id + global_volume['availability_zone'] = None + return global_volume + + def _fake_volume_update(self, context, volume_id, values): + LOG.debug(_("Test: Volume update request: id=%(volume_id)s "\ + "values=%(values)s"), locals()) + global scheduled_volume + scheduled_volume = {'id': volume_id, 'host': values['host']} + pass + + def _fake_service_get_by_args(self, context, host, binary): + return "service" + + def _fake_service_is_up_True(self, service): + return True + + def _fake_service_is_up_False(self, service): + return False + + def setUp(self, sched_class=None): + super(VsaSchedulerTestCase, self).setUp() + self.stubs = stubout.StubOutForTesting() + self.context_non_admin = context.RequestContext(None, None) + self.context = context.get_admin_context() + + if sched_class is None: + self.sched = FakeVsaLeastUsedScheduler() + else: + self.sched = sched_class + + self.host_num = 10 + self.drive_type_num = 5 + + self.stubs.Set(self.sched, + '_get_service_states', self._fake_get_service_states) + self.stubs.Set(self.sched, + '_provision_volume', self._fake_provision_volume) + self.stubs.Set(nova.db, 'vsa_update', self._fake_vsa_update) + + self.stubs.Set(nova.db, 'volume_get', self._fake_volume_get) + self.stubs.Set(nova.db, 'volume_update', self._fake_volume_update) + + def tearDown(self): + self.stubs.UnsetAll() + super(VsaSchedulerTestCase, self).tearDown() + + def test_vsa_sched_create_volumes_simple(self): + global scheduled_volumes + scheduled_volumes = [] + self._set_service_states(host_num=10, + drive_type_start_ix=0, + drive_type_num=5, + init_num_drives=10, + exclude_host_list=['host_1', 'host_3']) + prev = self._generate_default_service_states() + request_spec = self._get_vol_creation_request(num_vols=3, drive_ix=2) + + self.sched.schedule_create_volumes(self.context, + request_spec, + availability_zone=None) + + self.assertEqual(len(scheduled_volumes), 3) + self.assertEqual(scheduled_volumes[0]['vol']['host'], 'host_0') + self.assertEqual(scheduled_volumes[1]['vol']['host'], 'host_2') + self.assertEqual(scheduled_volumes[2]['vol']['host'], 'host_4') + + cur = self._get_service_states() + for host in ['host_0', 'host_2', 'host_4']: + cur_dtype = cur[host]['volume']['drive_qos_info']['name_2'] + prev_dtype = prev[host]['volume']['drive_qos_info']['name_2'] + self.assertEqual(cur_dtype['DriveType'], prev_dtype['DriveType']) + self.assertEqual(cur_dtype['FullDrive']['NumFreeDrives'], + prev_dtype['FullDrive']['NumFreeDrives'] - 1) + self.assertEqual(cur_dtype['FullDrive']['NumOccupiedDrives'], + prev_dtype['FullDrive']['NumOccupiedDrives'] + 1) + + def test_vsa_sched_no_drive_type(self): + self._set_service_states(host_num=10, + drive_type_start_ix=0, + drive_type_num=5, + init_num_drives=1) + request_spec = self._get_vol_creation_request(num_vols=1, drive_ix=6) + self.assertRaises(driver.WillNotSchedule, + self.sched.schedule_create_volumes, + self.context, + request_spec, + availability_zone=None) + + def test_vsa_sched_no_enough_drives(self): + global scheduled_volumes + scheduled_volumes = [] + + self._set_service_states(host_num=3, + drive_type_start_ix=0, + drive_type_num=1, + init_num_drives=0) + prev = self._generate_default_service_states() + request_spec = self._get_vol_creation_request(num_vols=3, drive_ix=0) + + self.assertRaises(driver.WillNotSchedule, + self.sched.schedule_create_volumes, + self.context, + request_spec, + availability_zone=None) + + # check that everything was returned back + cur = self._get_service_states() + for k, v in prev.iteritems(): + self.assertEqual(prev[k]['volume']['drive_qos_info'], + cur[k]['volume']['drive_qos_info']) + + def test_vsa_sched_wrong_topic(self): + self._set_service_states(host_num=1, + drive_type_start_ix=0, + drive_type_num=5, + init_num_drives=1) + states = self._get_service_states() + new_states = {} + new_states['host_0'] = {'compute': states['host_0']['volume']} + self.service_states = new_states + request_spec = self._get_vol_creation_request(num_vols=1, drive_ix=0) + + self.assertRaises(driver.WillNotSchedule, + self.sched.schedule_create_volumes, + self.context, + request_spec, + availability_zone=None) + + def test_vsa_sched_provision_volume(self): + global global_volume + global_volume = {} + self._set_service_states(host_num=1, + drive_type_start_ix=0, + drive_type_num=1, + init_num_drives=1) + request_spec = self._get_vol_creation_request(num_vols=1, drive_ix=0) + + self.stubs.UnsetAll() + self.stubs.Set(self.sched, + '_get_service_states', self._fake_get_service_states) + self.stubs.Set(nova.db, 'volume_create', self._fake_volume_create) + + self.sched.schedule_create_volumes(self.context, + request_spec, + availability_zone=None) + + self.assertEqual(request_spec['volumes'][0]['name'], + global_volume['display_name']) + + def test_vsa_sched_no_free_drives(self): + self._set_service_states(host_num=1, + drive_type_start_ix=0, + drive_type_num=1, + init_num_drives=1) + request_spec = self._get_vol_creation_request(num_vols=1, drive_ix=0) + + self.sched.schedule_create_volumes(self.context, + request_spec, + availability_zone=None) + + cur = self._get_service_states() + cur_dtype = cur['host_0']['volume']['drive_qos_info']['name_0'] + self.assertEqual(cur_dtype['FullDrive']['NumFreeDrives'], 1) + + new_request = self._get_vol_creation_request(num_vols=1, drive_ix=0) + + self.sched.schedule_create_volumes(self.context, + request_spec, + availability_zone=None) + self._print_service_states() + + self.assertRaises(driver.WillNotSchedule, + self.sched.schedule_create_volumes, + self.context, + new_request, + availability_zone=None) + + def test_vsa_sched_forced_host(self): + global scheduled_volumes + scheduled_volumes = [] + + self._set_service_states(host_num=10, + drive_type_start_ix=0, + drive_type_num=5, + init_num_drives=10) + + request_spec = self._get_vol_creation_request(num_vols=3, drive_ix=2) + + self.assertRaises(exception.HostBinaryNotFound, + self.sched.schedule_create_volumes, + self.context, + request_spec, + availability_zone="nova:host_5") + + self.stubs.Set(nova.db, + 'service_get_by_args', self._fake_service_get_by_args) + self.stubs.Set(self.sched, + 'service_is_up', self._fake_service_is_up_False) + + self.assertRaises(driver.WillNotSchedule, + self.sched.schedule_create_volumes, + self.context, + request_spec, + availability_zone="nova:host_5") + + self.stubs.Set(self.sched, + 'service_is_up', self._fake_service_is_up_True) + + self.sched.schedule_create_volumes(self.context, + request_spec, + availability_zone="nova:host_5") + + self.assertEqual(len(scheduled_volumes), 3) + self.assertEqual(scheduled_volumes[0]['vol']['host'], 'host_5') + self.assertEqual(scheduled_volumes[1]['vol']['host'], 'host_5') + self.assertEqual(scheduled_volumes[2]['vol']['host'], 'host_5') + + def test_vsa_sched_create_volumes_partition(self): + global scheduled_volumes + scheduled_volumes = [] + self._set_service_states(host_num=5, + drive_type_start_ix=0, + drive_type_num=5, + init_num_drives=1, + exclude_host_list=['host_0', 'host_2']) + prev = self._generate_default_service_states() + request_spec = self._get_vol_creation_request(num_vols=3, + drive_ix=3, + size=50) + self.sched.schedule_create_volumes(self.context, + request_spec, + availability_zone=None) + + self.assertEqual(len(scheduled_volumes), 3) + self.assertEqual(scheduled_volumes[0]['vol']['host'], 'host_1') + self.assertEqual(scheduled_volumes[1]['vol']['host'], 'host_3') + self.assertEqual(scheduled_volumes[2]['vol']['host'], 'host_4') + + cur = self._get_service_states() + for host in ['host_1', 'host_3', 'host_4']: + cur_dtype = cur[host]['volume']['drive_qos_info']['name_3'] + prev_dtype = prev[host]['volume']['drive_qos_info']['name_3'] + + self.assertEqual(cur_dtype['DriveType'], prev_dtype['DriveType']) + self.assertEqual(cur_dtype['FullDrive']['NumFreeDrives'], + prev_dtype['FullDrive']['NumFreeDrives'] - 1) + self.assertEqual(cur_dtype['FullDrive']['NumOccupiedDrives'], + prev_dtype['FullDrive']['NumOccupiedDrives'] + 1) + + self.assertEqual(prev_dtype['PartitionDrive'] + ['NumOccupiedPartitions'], 0) + self.assertEqual(cur_dtype['PartitionDrive'] + ['NumOccupiedPartitions'], 1) + self.assertEqual(cur_dtype['PartitionDrive'] + ['NumFreePartitions'], 5) + + self.assertEqual(prev_dtype['PartitionDrive'] + ['NumFreePartitions'], 0) + self.assertEqual(prev_dtype['PartitionDrive'] + ['PartitionSize'], 0) + + def test_vsa_sched_create_single_volume_az(self): + global scheduled_volume + scheduled_volume = {} + + def _fake_volume_get_az(context, volume_id): + LOG.debug(_("Test: Volume get: id=%(volume_id)s"), locals()) + return {'id': volume_id, 'availability_zone': 'nova:host_3'} + + self.stubs.Set(nova.db, 'volume_get', _fake_volume_get_az) + self.stubs.Set(nova.db, + 'service_get_by_args', self._fake_service_get_by_args) + self.stubs.Set(self.sched, + 'service_is_up', self._fake_service_is_up_True) + + host = self.sched.schedule_create_volume(self.context, + 123, availability_zone=None) + + self.assertEqual(host, 'host_3') + self.assertEqual(scheduled_volume['id'], 123) + self.assertEqual(scheduled_volume['host'], 'host_3') + + def test_vsa_sched_create_single_non_vsa_volume(self): + global scheduled_volume + scheduled_volume = {} + + global global_volume + global_volume = {} + global_volume['drive_type'] = None + + self.assertRaises(driver.NoValidHost, + self.sched.schedule_create_volume, + self.context, + 123, + availability_zone=None) + + def test_vsa_sched_create_single_volume(self): + global scheduled_volume + scheduled_volume = {} + self._set_service_states(host_num=10, + drive_type_start_ix=0, + drive_type_num=5, + init_num_drives=10, + exclude_host_list=['host_0', 'host_1']) + prev = self._generate_default_service_states() + + global global_volume + global_volume = {} + + drive_ix = 2 + drive_type = {'id': drive_ix, + 'name': 'name_' + str(drive_ix), + 'type': 'type_' + str(drive_ix), + 'size_gb': 1 + 100 * (drive_ix)} + + global_volume['drive_type'] = drive_type + global_volume['size'] = 0 + + host = self.sched.schedule_create_volume(self.context, + 123, availability_zone=None) + + self.assertEqual(host, 'host_2') + self.assertEqual(scheduled_volume['id'], 123) + self.assertEqual(scheduled_volume['host'], 'host_2') + + +class VsaSchedulerTestCaseMostAvail(VsaSchedulerTestCase): + + def setUp(self): + super(VsaSchedulerTestCaseMostAvail, self).setUp( + FakeVsaMostAvailCapacityScheduler()) + + def tearDown(self): + self.stubs.UnsetAll() + super(VsaSchedulerTestCaseMostAvail, self).tearDown() + + def test_vsa_sched_create_single_volume(self): + global scheduled_volume + scheduled_volume = {} + self._set_service_states(host_num=10, + drive_type_start_ix=0, + drive_type_num=5, + init_num_drives=10, + exclude_host_list=['host_0', 'host_1']) + prev = self._generate_default_service_states() + + global global_volume + global_volume = {} + + drive_ix = 2 + drive_type = {'id': drive_ix, + 'name': 'name_' + str(drive_ix), + 'type': 'type_' + str(drive_ix), + 'size_gb': 1 + 100 * (drive_ix)} + + global_volume['drive_type'] = drive_type + global_volume['size'] = 0 + + host = self.sched.schedule_create_volume(self.context, + 123, availability_zone=None) + + self.assertEqual(host, 'host_9') + self.assertEqual(scheduled_volume['id'], 123) + self.assertEqual(scheduled_volume['host'], 'host_9') + + def test_vsa_sched_create_volumes_simple(self): + global scheduled_volumes + scheduled_volumes = [] + self._set_service_states(host_num=10, + drive_type_start_ix=0, + drive_type_num=5, + init_num_drives=10, + exclude_host_list=['host_1', 'host_3']) + prev = self._generate_default_service_states() + request_spec = self._get_vol_creation_request(num_vols=3, drive_ix=2) + + self._print_service_states() + + self.sched.schedule_create_volumes(self.context, + request_spec, + availability_zone=None) + + self.assertEqual(len(scheduled_volumes), 3) + self.assertEqual(scheduled_volumes[0]['vol']['host'], 'host_9') + self.assertEqual(scheduled_volumes[1]['vol']['host'], 'host_8') + self.assertEqual(scheduled_volumes[2]['vol']['host'], 'host_7') + + cur = self._get_service_states() + for host in ['host_9', 'host_8', 'host_7']: + cur_dtype = cur[host]['volume']['drive_qos_info']['name_2'] + prev_dtype = prev[host]['volume']['drive_qos_info']['name_2'] + self.assertEqual(cur_dtype['DriveType'], prev_dtype['DriveType']) + self.assertEqual(cur_dtype['FullDrive']['NumFreeDrives'], + prev_dtype['FullDrive']['NumFreeDrives'] - 1) + self.assertEqual(cur_dtype['FullDrive']['NumOccupiedDrives'], + prev_dtype['FullDrive']['NumOccupiedDrives'] + 1) + + def test_vsa_sched_create_volumes_partition(self): + global scheduled_volumes + scheduled_volumes = [] + self._set_service_states(host_num=5, + drive_type_start_ix=0, + drive_type_num=5, + init_num_drives=1, + exclude_host_list=['host_0', 'host_2']) + prev = self._generate_default_service_states() + request_spec = self._get_vol_creation_request(num_vols=3, + drive_ix=3, + size=50) + self.sched.schedule_create_volumes(self.context, + request_spec, + availability_zone=None) + + self.assertEqual(len(scheduled_volumes), 3) + self.assertEqual(scheduled_volumes[0]['vol']['host'], 'host_4') + self.assertEqual(scheduled_volumes[1]['vol']['host'], 'host_3') + self.assertEqual(scheduled_volumes[2]['vol']['host'], 'host_1') + + cur = self._get_service_states() + for host in ['host_1', 'host_3', 'host_4']: + cur_dtype = cur[host]['volume']['drive_qos_info']['name_3'] + prev_dtype = prev[host]['volume']['drive_qos_info']['name_3'] + + self.assertEqual(cur_dtype['DriveType'], prev_dtype['DriveType']) + self.assertEqual(cur_dtype['FullDrive']['NumFreeDrives'], + prev_dtype['FullDrive']['NumFreeDrives'] - 1) + self.assertEqual(cur_dtype['FullDrive']['NumOccupiedDrives'], + prev_dtype['FullDrive']['NumOccupiedDrives'] + 1) + + self.assertEqual(prev_dtype['PartitionDrive'] + ['NumOccupiedPartitions'], 0) + self.assertEqual(cur_dtype['PartitionDrive'] + ['NumOccupiedPartitions'], 1) + self.assertEqual(cur_dtype['PartitionDrive'] + ['NumFreePartitions'], 5) + self.assertEqual(prev_dtype['PartitionDrive'] + ['NumFreePartitions'], 0) + self.assertEqual(prev_dtype['PartitionDrive'] + ['PartitionSize'], 0) diff --git a/nova/tests/test_vsa.py b/nova/tests/test_vsa.py index 8e4d58960..cff23a800 100644 --- a/nova/tests/test_vsa.py +++ b/nova/tests/test_vsa.py @@ -22,6 +22,7 @@ from xml.etree.ElementTree import Element, SubElement from nova import exception from nova import flags from nova import vsa +from nova import volume from nova import db from nova import context from nova import test @@ -50,6 +51,7 @@ class VsaTestCase(test.TestCase): super(VsaTestCase, self).setUp() self.stubs = stubout.StubOutForTesting() self.vsa_api = vsa.API() + self.volume_api = volume.API() self.context_non_admin = context.RequestContext(None, None) self.context = context.get_admin_context() diff --git a/nova/tests/test_vsa_volumes.py b/nova/tests/test_vsa_volumes.py index e1d4cd756..d451a4377 100644 --- a/nova/tests/test_vsa_volumes.py +++ b/nova/tests/test_vsa_volumes.py @@ -61,7 +61,8 @@ class VsaVolumesTestCase(test.TestCase): self.vsa_id = vsa_ref['id'] def tearDown(self): - self.vsa_api.delete(self.context, self.vsa_id) + if self.vsa_id: + self.vsa_api.delete(self.context, self.vsa_id) self.stubs.UnsetAll() super(VsaVolumesTestCase, self).tearDown() @@ -106,3 +107,23 @@ class VsaVolumesTestCase(test.TestCase): self.volume_api.update(self.context, volume_ref['id'], {'status': 'error'}) self.volume_api.delete(self.context, volume_ref['id']) + + def test_vsa_volume_delete_vsa_with_volumes(self): + """ Check volume deleton in different states. """ + + vols1 = self.volume_api.get_all_by_vsa(self.context, + self.vsa_id, "from") + for i in range(3): + volume_param = _default_volume_param() + volume_param['from_vsa_id'] = self.vsa_id + volume_ref = self.volume_api.create(self.context, **volume_param) + + vols2 = self.volume_api.get_all_by_vsa(self.context, + self.vsa_id, "from") + self.assertEqual(len(vols1) + 3, len(vols2)) + + self.vsa_api.delete(self.context, self.vsa_id) + + vols3 = self.volume_api.get_all_by_vsa(self.context, + self.vsa_id, "from") + self.assertEqual(len(vols1), len(vols3)) diff --git a/nova/volume/driver.py b/nova/volume/driver.py index 2e3da57b2..98d115088 100644 --- a/nova/volume/driver.py +++ b/nova/volume/driver.py @@ -507,7 +507,7 @@ class ISCSIDriver(VolumeDriver): iscsi_properties = self._get_iscsi_properties(volume) if not iscsi_properties['target_discovered']: - self._run_iscsiadm(iscsi_properties, ('--op', 'new')) + self._run_iscsiadm(iscsi_properties, '--op=new') if iscsi_properties.get('auth_method'): self._iscsiadm_update(iscsi_properties, @@ -559,7 +559,7 @@ class ISCSIDriver(VolumeDriver): iscsi_properties = self._get_iscsi_properties(volume) self._iscsiadm_update(iscsi_properties, "node.startup", "manual") self._run_iscsiadm(iscsi_properties, "--logout") - self._run_iscsiadm(iscsi_properties, ('--op', 'delete')) + self._run_iscsiadm(iscsi_properties, '--op=delete') def check_for_export(self, context, volume_id): """Make sure volume is exported.""" diff --git a/nova/vsa/api.py b/nova/vsa/api.py index 9b2750d82..39f7d1431 100644 --- a/nova/vsa/api.py +++ b/nova/vsa/api.py @@ -312,9 +312,8 @@ class API(base.Base): def _force_volume_delete(self, ctxt, volume): """Delete a volume, bypassing the check that it must be available.""" host = volume['host'] - - if not host: - # Volume not yet assigned to host + if not host or volume['from_vsa_id']: + # Volume not yet assigned to host OR FE volume # Deleting volume from database and skipping rpc. self.db.volume_destroy(ctxt, volume['id']) return @@ -324,41 +323,33 @@ class API(base.Base): {"method": "delete_volume", "args": {"volume_id": volume['id']}}) - def delete_be_volumes(self, context, vsa_id, force_delete=True): + def delete_vsa_volumes(self, context, vsa_id, direction, + force_delete=True): + if direction == "FE": + volumes = self.db.volume_get_all_assigned_from_vsa(context, vsa_id) + else: + volumes = self.db.volume_get_all_assigned_to_vsa(context, vsa_id) - be_volumes = self.db.volume_get_all_assigned_to_vsa(context, vsa_id) - for volume in be_volumes: + for volume in volumes: try: vol_name = volume['name'] - LOG.info(_("VSA ID %(vsa_id)s: Deleting BE volume "\ - "%(vol_name)s"), locals()) + LOG.info(_("VSA ID %(vsa_id)s: Deleting %(direction)s "\ + "volume %(vol_name)s"), locals()) self.volume_api.delete(context, volume['id']) except exception.ApiError: LOG.info(_("Unable to delete volume %s"), volume['name']) if force_delete: - LOG.info(_("VSA ID %(vsa_id)s: Forced delete. BE volume "\ - "%(vol_name)s"), locals()) + LOG.info(_("VSA ID %(vsa_id)s: Forced delete. "\ + "%(direction)s volume %(vol_name)s"), locals()) self._force_volume_delete(context, volume) def delete(self, context, vsa_id): """Terminate a VSA instance.""" LOG.info(_("Going to try to terminate VSA ID %s"), vsa_id) - # allow deletion of volumes in "abnormal" state - - # Delete all FE volumes - fe_volumes = self.db.volume_get_all_assigned_from_vsa(context, vsa_id) - for volume in fe_volumes: - try: - vol_name = volume['name'] - LOG.info(_("VSA ID %(vsa_id)s: Deleting FE volume "\ - "%(vol_name)s"), locals()) - self.volume_api.delete(context, volume['id']) - except exception.ApiError: - LOG.info(_("Unable to delete volume %s"), volume['name']) - - # Delete all BE volumes - self.delete_be_volumes(context, vsa_id, force_delete=True) + # Delete all FrontEnd and BackEnd volumes + self.delete_vsa_volumes(context, vsa_id, "FE", force_delete=True) + self.delete_vsa_volumes(context, vsa_id, "BE", force_delete=True) # Delete all VC instances instances = self.db.instance_get_all_by_vsa(context, vsa_id) diff --git a/nova/vsa/manager.py b/nova/vsa/manager.py index 1390f8146..e963d26c5 100644 --- a/nova/vsa/manager.py +++ b/nova/vsa/manager.py @@ -145,7 +145,7 @@ class VsaManager(manager.SchedulerDependentManager): if has_failed_volumes: LOG.info(_("VSA ID %(vsa_id)d: Delete all BE volumes"), locals()) - self.vsa_api.delete_be_volumes(context, vsa_id, force_delete=True) + self.vsa_api.delete_vsa_volumes(context, vsa_id, "BE", True) self.vsa_api.update_vsa_status(context, vsa_id, VsaState.FAILED) return -- cgit From b4159d95c32382d124c3f3f0a49f8ad9f2d41036 Mon Sep 17 00:00:00 2001 From: "vladimir.p" Date: Thu, 28 Jul 2011 00:27:16 -0700 Subject: some minor cosmetic work. addressed some dead code section --- nova/api/openstack/contrib/drive_types.py | 4 ---- nova/db/sqlalchemy/api.py | 2 -- .../sqlalchemy/migrate_repo/versions/036_add_vsa_data.py | 12 ------------ nova/flags.py | 2 -- nova/scheduler/vsa.py | 14 +++++--------- 5 files changed, 5 insertions(+), 29 deletions(-) (limited to 'nova') diff --git a/nova/api/openstack/contrib/drive_types.py b/nova/api/openstack/contrib/drive_types.py index 6454fd81f..f2cbd3715 100644 --- a/nova/api/openstack/contrib/drive_types.py +++ b/nova/api/openstack/contrib/drive_types.py @@ -18,18 +18,14 @@ """ The Drive Types extension for Virtual Storage Arrays""" - from webob import exc from nova.vsa import drive_types from nova import exception -from nova import db -from nova import quota from nova import log as logging from nova.api.openstack import common from nova.api.openstack import extensions from nova.api.openstack import faults -from nova.api.openstack import wsgi LOG = logging.getLogger("nova.api.drive_types") diff --git a/nova/db/sqlalchemy/api.py b/nova/db/sqlalchemy/api.py index e17859f69..d71d8787b 100644 --- a/nova/db/sqlalchemy/api.py +++ b/nova/db/sqlalchemy/api.py @@ -3526,8 +3526,6 @@ def vsa_destroy(context, vsa_id): """ session = get_session() with session.begin(): - #vsa_ref = vsa_get(context, vsa_id, session=session) - #vsa_ref.delete(session=session) session.query(models.VirtualStorageArray).\ filter_by(id=vsa_id).\ update({'deleted': True, diff --git a/nova/db/sqlalchemy/migrate_repo/versions/036_add_vsa_data.py b/nova/db/sqlalchemy/migrate_repo/versions/036_add_vsa_data.py index 5d2e56a7e..3b39ff493 100644 --- a/nova/db/sqlalchemy/migrate_repo/versions/036_add_vsa_data.py +++ b/nova/db/sqlalchemy/migrate_repo/versions/036_add_vsa_data.py @@ -96,18 +96,6 @@ drive_types = Table('drive_types', meta, Column('visible', Boolean(create_constraint=True, name=None)), ) -#vsa_disk_association = Table('vsa_disk_association', meta, -# Column('created_at', DateTime(timezone=False)), -# Column('updated_at', DateTime(timezone=False)), -# Column('deleted_at', DateTime(timezone=False)), -# Column('deleted', Boolean(create_constraint=True, name=None)), -# Column('id', Integer(), primary_key=True, nullable=False), -# Column('drive_type_id', Integer(), ForeignKey('drive_types.id')), -# Column('vsa_id', Integer(), ForeignKey('virtual_storage_arrays.id')), -# Column('disk_num', Integer(), nullable=False), -# ) - -#new_tables = (virtual_storage_arrays, drive_types, vsa_disk_association) new_tables = (virtual_storage_arrays, drive_types) # diff --git a/nova/flags.py b/nova/flags.py index c192b5281..7e9be5d84 100644 --- a/nova/flags.py +++ b/nova/flags.py @@ -365,8 +365,6 @@ DEFINE_string('volume_manager', 'nova.volume.manager.VolumeManager', 'Manager for volume') DEFINE_string('scheduler_manager', 'nova.scheduler.manager.SchedulerManager', 'Manager for scheduler') -DEFINE_string('vsa_manager', 'nova.vsa.manager.VsaManager', - 'Manager for vsa') DEFINE_string('vc_image_name', 'vc_image', 'the VC image ID (for a VC image that exists in DB Glance)') # VSA constants and enums diff --git a/nova/scheduler/vsa.py b/nova/scheduler/vsa.py index f66ce989c..ed5039f4d 100644 --- a/nova/scheduler/vsa.py +++ b/nova/scheduler/vsa.py @@ -34,8 +34,6 @@ from nova import log as logging LOG = logging.getLogger('nova.scheduler.vsa') FLAGS = flags.FLAGS -flags.DEFINE_integer('gb_to_bytes_shift', 30, - 'Conversion shift between GB and bytes') flags.DEFINE_integer('drive_type_approx_capacity_percent', 10, 'The percentage range for capacity comparison') flags.DEFINE_integer('vsa_unique_hosts_per_alloc', 10, @@ -45,11 +43,11 @@ flags.DEFINE_boolean('vsa_select_unique_drives', True, def BYTES_TO_GB(bytes): - return bytes >> FLAGS.gb_to_bytes_shift + return bytes >> 30 def GB_TO_BYTES(gb): - return gb << FLAGS.gb_to_bytes_shift + return gb << 30 class VsaScheduler(simple.SimpleScheduler): @@ -68,8 +66,7 @@ class VsaScheduler(simple.SimpleScheduler): def _qosgrp_match(self, drive_type, qos_values): def _compare_names(str1, str2): - result = str1.lower() == str2.lower() - return result + return str1.lower() == str2.lower() def _compare_sizes_approxim(cap_capacity, size_gb): cap_capacity = BYTES_TO_GB(int(cap_capacity)) @@ -77,9 +74,8 @@ class VsaScheduler(simple.SimpleScheduler): size_perc = size_gb * \ FLAGS.drive_type_approx_capacity_percent / 100 - result = cap_capacity >= size_gb - size_perc and \ - cap_capacity <= size_gb + size_perc - return result + return cap_capacity >= size_gb - size_perc and \ + cap_capacity <= size_gb + size_perc # Add more entries for additional comparisons compare_list = [{'cap1': 'DriveType', -- cgit From 16cbba0838e9a2ac712b91b103dc794b0edebd00 Mon Sep 17 00:00:00 2001 From: "vladimir.p" Date: Thu, 28 Jul 2011 00:45:16 -0700 Subject: more commented code removed --- nova/api/openstack/contrib/virtual_storage_arrays.py | 2 -- nova/scheduler/vsa.py | 2 -- nova/volume/manager.py | 3 --- 3 files changed, 7 deletions(-) (limited to 'nova') diff --git a/nova/api/openstack/contrib/virtual_storage_arrays.py b/nova/api/openstack/contrib/virtual_storage_arrays.py index 68a00fd7d..842573f8a 100644 --- a/nova/api/openstack/contrib/virtual_storage_arrays.py +++ b/nova/api/openstack/contrib/virtual_storage_arrays.py @@ -180,8 +180,6 @@ class VsaVolumeDriveController(volumes.VolumeController): ]}}} def __init__(self): - # self.compute_api = compute.API() - # self.vsa_api = vsa.API() self.volume_api = volume.API() super(VsaVolumeDriveController, self).__init__() diff --git a/nova/scheduler/vsa.py b/nova/scheduler/vsa.py index ed5039f4d..10c9b5a02 100644 --- a/nova/scheduler/vsa.py +++ b/nova/scheduler/vsa.py @@ -324,8 +324,6 @@ class VsaScheduler(simple.SimpleScheduler): db.vsa_update(context, volume_ref['to_vsa_id'], dict(status=VsaState.FAILED)) raise - #return super(VsaScheduler, self).schedule_create_volume(context, - # volume_id, *_args, **_kwargs) if host: now = utils.utcnow() diff --git a/nova/volume/manager.py b/nova/volume/manager.py index e46f8536d..fd1d5acfa 100644 --- a/nova/volume/manager.py +++ b/nova/volume/manager.py @@ -265,9 +265,6 @@ class VolumeManager(manager.SchedulerDependentManager): return error_list def _volume_stats_changed(self, stat1, stat2): - #LOG.info(_("stat1=%s"), stat1) - #LOG.info(_("stat2=%s"), stat2) - if len(stat1) != len(stat2): return True for (k, v) in stat1.iteritems(): -- cgit From f4359a7789ae96a36aaab8f53aa3234d13b1725a Mon Sep 17 00:00:00 2001 From: "vladimir.p" Date: Thu, 28 Jul 2011 15:54:02 -0700 Subject: returned vsa_manager, nova-manage arg and print changes --- nova/api/ec2/cloud.py | 4 ++++ nova/flags.py | 2 ++ nova/vsa/api.py | 2 -- 3 files changed, 6 insertions(+), 2 deletions(-) (limited to 'nova') diff --git a/nova/api/ec2/cloud.py b/nova/api/ec2/cloud.py index ca1fef51f..0a0644351 100644 --- a/nova/api/ec2/cloud.py +++ b/nova/api/ec2/cloud.py @@ -965,6 +965,10 @@ class CloudController(object): vsa['vcType'] = p_vsa['vsa_instance_type'].get('name', None) else: vsa['vcType'] = None + + vols = self.volume_api.get_all_by_vsa(context, p_vsa['id'], "to") + vsa['volCount'] = 0 if vols is None else len(vols) + return vsa def create_vsa(self, context, **kwargs): diff --git a/nova/flags.py b/nova/flags.py index 7e9be5d84..c192b5281 100644 --- a/nova/flags.py +++ b/nova/flags.py @@ -365,6 +365,8 @@ DEFINE_string('volume_manager', 'nova.volume.manager.VolumeManager', 'Manager for volume') DEFINE_string('scheduler_manager', 'nova.scheduler.manager.SchedulerManager', 'Manager for scheduler') +DEFINE_string('vsa_manager', 'nova.vsa.manager.VsaManager', + 'Manager for vsa') DEFINE_string('vc_image_name', 'vc_image', 'the VC image ID (for a VC image that exists in DB Glance)') # VSA constants and enums diff --git a/nova/vsa/api.py b/nova/vsa/api.py index 39f7d1431..0baba6180 100644 --- a/nova/vsa/api.py +++ b/nova/vsa/api.py @@ -205,12 +205,10 @@ class API(base.Base): # create volumes if FLAGS.vsa_multi_vol_creation: if len(volume_params) > 0: - #filter_class = 'nova.scheduler.vsa.InstanceTypeFilter' request_spec = { 'num_volumes': len(volume_params), 'vsa_id': vsa_id, 'volumes': volume_params, - #'filter': filter_class, } rpc.cast(context, -- cgit From bd39829cc1908cb5ead899c9659a5c516b073a4f Mon Sep 17 00:00:00 2001 From: "vladimir.p" Date: Tue, 9 Aug 2011 16:55:51 -0700 Subject: merge with nova-1411. fixed --- nova/api/ec2/cloud.py | 2 +- nova/api/openstack/contrib/floating_ips.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) (limited to 'nova') diff --git a/nova/api/ec2/cloud.py b/nova/api/ec2/cloud.py index 9094f6b56..ac0ff713b 100644 --- a/nova/api/ec2/cloud.py +++ b/nova/api/ec2/cloud.py @@ -1149,7 +1149,7 @@ class CloudController(object): return {'driveTypeSet': [dict(drive) for drive in drives]} @staticmethod - def _convert_to_set(self, lst, label): + def _convert_to_set(lst, label): if lst is None or lst == []: return None if not isinstance(lst, list): diff --git a/nova/api/openstack/contrib/floating_ips.py b/nova/api/openstack/contrib/floating_ips.py index 52c9c6cf9..2aba1068a 100644 --- a/nova/api/openstack/contrib/floating_ips.py +++ b/nova/api/openstack/contrib/floating_ips.py @@ -102,7 +102,7 @@ class FloatingIPController(object): def delete(self, req, id): context = req.environ['nova.context'] ip = self.network_api.get_floating_ip(context, id) - + if 'fixed_ip' in ip: try: self.disassociate(req, id, '') -- cgit From 820d28dcf09088b5878d4cd5dcb5f4765e0b4992 Mon Sep 17 00:00:00 2001 From: "vladimir.p" Date: Tue, 9 Aug 2011 18:14:41 -0700 Subject: Dropped vsa_id from instances --- nova/compute/api.py | 8 +++--- nova/db/api.py | 12 --------- nova/db/sqlalchemy/api.py | 30 +--------------------- .../migrate_repo/versions/037_add_vsa_data.py | 7 ----- nova/db/sqlalchemy/models.py | 3 --- nova/vsa/manager.py | 3 ++- 6 files changed, 6 insertions(+), 57 deletions(-) (limited to 'nova') diff --git a/nova/compute/api.py b/nova/compute/api.py index 42e627712..4ac0ffef2 100644 --- a/nova/compute/api.py +++ b/nova/compute/api.py @@ -151,7 +151,7 @@ class API(base.Base): key_name=None, key_data=None, security_group='default', availability_zone=None, user_data=None, metadata={}, injected_files=None, admin_password=None, zone_blob=None, - reservation_id=None, vsa_id=None): + reservation_id=None): """Verify all the input parameters regardless of the provisioning strategy being performed.""" @@ -247,7 +247,6 @@ class API(base.Base): 'os_type': os_type, 'architecture': architecture, 'vm_mode': vm_mode, - 'vsa_id': vsa_id, 'root_device_name': root_device_name} return (num_instances, base_options, image) @@ -469,8 +468,7 @@ class API(base.Base): key_name=None, key_data=None, security_group='default', availability_zone=None, user_data=None, metadata={}, injected_files=None, admin_password=None, zone_blob=None, - reservation_id=None, block_device_mapping=None, - vsa_id=None): + reservation_id=None, block_device_mapping=None): """ Provision the instances by sending off a series of single instance requests to the Schedulers. This is fine for trival @@ -491,7 +489,7 @@ class API(base.Base): key_name, key_data, security_group, availability_zone, user_data, metadata, injected_files, admin_password, zone_blob, - reservation_id, vsa_id) + reservation_id) block_device_mapping = block_device_mapping or [] instances = [] diff --git a/nova/db/api.py b/nova/db/api.py index 59baf94dd..0b6995f90 100644 --- a/nova/db/api.py +++ b/nova/db/api.py @@ -512,23 +512,11 @@ def instance_get_all_by_project(context, project_id): return IMPL.instance_get_all_by_project(context, project_id) -def instance_get_all_by_project_and_vsa(context, project_id, vsa_id): - """Get all instance spawned by a given VSA belonging to a project.""" - return IMPL.instance_get_all_by_project_and_vsa(context, - project_id, - vsa_id) - - def instance_get_all_by_host(context, host): """Get all instance belonging to a host.""" return IMPL.instance_get_all_by_host(context, host) -def instance_get_all_by_vsa(context, vsa_id): - """Get all instance belonging to a VSA.""" - return IMPL.instance_get_all_by_vsa(context, vsa_id) - - def instance_get_all_by_reservation(context, reservation_id): """Get all instances belonging to a reservation.""" return IMPL.instance_get_all_by_reservation(context, reservation_id) diff --git a/nova/db/sqlalchemy/api.py b/nova/db/sqlalchemy/api.py index ff6d756a1..bc1a3046c 100644 --- a/nova/db/sqlalchemy/api.py +++ b/nova/db/sqlalchemy/api.py @@ -1321,35 +1321,6 @@ def instance_get_all_by_project(context, project_id): all() -@require_context -def instance_get_all_by_project_and_vsa(context, project_id, vsa_id): - authorize_project_context(context, project_id) - - session = get_session() - return session.query(models.Instance).\ - options(joinedload_all('fixed_ips.floating_ips')).\ - options(joinedload('security_groups')).\ - options(joinedload_all('fixed_ips.network')).\ - options(joinedload('instance_type')).\ - filter_by(project_id=project_id).\ - filter_by(vsa_id=vsa_id).\ - filter_by(deleted=can_read_deleted(context)).\ - all() - - -@require_admin_context -def instance_get_all_by_vsa(context, vsa_id): - session = get_session() - return session.query(models.Instance).\ - options(joinedload_all('fixed_ips.floating_ips')).\ - options(joinedload('security_groups')).\ - options(joinedload_all('fixed_ips.network')).\ - options(joinedload('instance_type')).\ - filter_by(vsa_id=vsa_id).\ - filter_by(deleted=can_read_deleted(context)).\ - all() - - @require_context def instance_get_all_by_reservation(context, reservation_id): session = get_session() @@ -3748,6 +3719,7 @@ def vsa_get_vc_ips_list(context, vsa_id): """ result = [] session = get_session() + """ VP-TODO: CHANGE THIS!!! Need to perform a search based on meta-data """ vc_instances = session.query(models.Instance).\ options(joinedload_all('fixed_ips.floating_ips')).\ options(joinedload('security_groups')).\ diff --git a/nova/db/sqlalchemy/migrate_repo/versions/037_add_vsa_data.py b/nova/db/sqlalchemy/migrate_repo/versions/037_add_vsa_data.py index 3b39ff493..5a80f4e7a 100644 --- a/nova/db/sqlalchemy/migrate_repo/versions/037_add_vsa_data.py +++ b/nova/db/sqlalchemy/migrate_repo/versions/037_add_vsa_data.py @@ -27,15 +27,10 @@ meta = MetaData() # actual definitions of tables . # -instances = Table('instances', meta, - Column('id', Integer(), primary_key=True, nullable=False), - ) - volumes = Table('volumes', meta, Column('id', Integer(), primary_key=True, nullable=False), ) -vsa_id = Column('vsa_id', Integer(), nullable=True) to_vsa_id = Column('to_vsa_id', Integer(), nullable=True) from_vsa_id = Column('from_vsa_id', Integer(), nullable=True) drive_type_id = Column('drive_type_id', Integer(), nullable=True) @@ -123,7 +118,6 @@ def upgrade(migrate_engine): logging.exception('Exception while creating table') raise - instances.create_column(vsa_id) volumes.create_column(to_vsa_id) volumes.create_column(from_vsa_id) volumes.create_column(drive_type_id) @@ -132,7 +126,6 @@ def upgrade(migrate_engine): def downgrade(migrate_engine): meta.bind = migrate_engine - instances.drop_column(vsa_id) volumes.drop_column(to_vsa_id) volumes.drop_column(from_vsa_id) volumes.drop_column(drive_type_id) diff --git a/nova/db/sqlalchemy/models.py b/nova/db/sqlalchemy/models.py index f80029e97..236f148e4 100644 --- a/nova/db/sqlalchemy/models.py +++ b/nova/db/sqlalchemy/models.py @@ -243,9 +243,6 @@ class Instance(BASE, NovaBase): # assert(state in ['nostate', 'running', 'blocked', 'paused', # 'shutdown', 'shutoff', 'crashed']) - vsa_id = Column(Integer, ForeignKey('virtual_storage_arrays.id'), - nullable=True) - class VirtualStorageArray(BASE, NovaBase): """ diff --git a/nova/vsa/manager.py b/nova/vsa/manager.py index 0da6fe460..1d17340f2 100644 --- a/nova/vsa/manager.py +++ b/nova/vsa/manager.py @@ -173,6 +173,7 @@ class VsaManager(manager.SchedulerDependentManager): display_description='VC for VSA ' + vsa['display_name'], availability_zone=vsa['availability_zone'], user_data=storage_data, - vsa_id=vsa_id) + vsa_id=vsa_id, + metadata=dict(vsa_id=str(vsa_id))) self.vsa_api.update_vsa_status(context, vsa_id, VsaState.CREATED) -- cgit From 57b8f976f18b1f45de16ef8e87a6e215c009d228 Mon Sep 17 00:00:00 2001 From: "vladimir.p" Date: Thu, 11 Aug 2011 12:04:03 -0700 Subject: moved vsa_id to metadata. Added search my meta --- nova/db/sqlalchemy/api.py | 33 ++++++++++++++++-------- nova/tests/test_compute.py | 63 ++++++++++++++++++++++++++++++++++++++++++++++ nova/vsa/api.py | 3 ++- nova/vsa/manager.py | 1 - 4 files changed, 87 insertions(+), 13 deletions(-) (limited to 'nova') diff --git a/nova/db/sqlalchemy/api.py b/nova/db/sqlalchemy/api.py index bc1a3046c..b77f11abb 100644 --- a/nova/db/sqlalchemy/api.py +++ b/nova/db/sqlalchemy/api.py @@ -1175,6 +1175,19 @@ def instance_get_all_by_filters(context, filters): return True return False + def _regexp_filter_by_metadata(instance, meta): + inst_metadata = [{node['key']: node['value']} \ + for node in instance['metadata']] + if isinstance(meta, list): + for node in meta: + if node not in inst_metadata: + return False + elif isinstance(meta, dict): + for k, v in meta.iteritems(): + if {k: v} not in inst_metadata: + return False + return True + def _regexp_filter_by_column(instance, filter_name, filter_re): try: v = getattr(instance, filter_name) @@ -1232,7 +1245,9 @@ def instance_get_all_by_filters(context, filters): query_prefix = _exact_match_filter(query_prefix, filter_name, filters.pop(filter_name)) - instances = query_prefix.all() + instances = query_prefix.\ + filter_by(deleted=can_read_deleted(context)).\ + all() if not instances: return [] @@ -1248,6 +1263,9 @@ def instance_get_all_by_filters(context, filters): filter_re = re.compile(str(filters[filter_name])) if filter_func: filter_l = lambda instance: filter_func(instance, filter_re) + elif filter_name == 'metadata': + filter_l = lambda instance: _regexp_filter_by_metadata(instance, + filters[filter_name]) else: filter_l = lambda instance: _regexp_filter_by_column(instance, filter_name, filter_re) @@ -3718,16 +3736,9 @@ def vsa_get_vc_ips_list(context, vsa_id): Retrieves IPs of instances associated with Virtual Storage Array. """ result = [] - session = get_session() - """ VP-TODO: CHANGE THIS!!! Need to perform a search based on meta-data """ - vc_instances = session.query(models.Instance).\ - options(joinedload_all('fixed_ips.floating_ips')).\ - options(joinedload('security_groups')).\ - options(joinedload_all('fixed_ips.network')).\ - options(joinedload('instance_type')).\ - filter_by(vsa_id=vsa_id).\ - filter_by(deleted=False).\ - all() + + vc_instances = instance_get_all_by_filters(context, + search_opts={'metadata': dict(vsa_id=str(vsa_id))}) for vc_instance in vc_instances: if vc_instance['fixed_ips']: for fixed in vc_instance['fixed_ips']: diff --git a/nova/tests/test_compute.py b/nova/tests/test_compute.py index 80f7ff489..661acc980 100644 --- a/nova/tests/test_compute.py +++ b/nova/tests/test_compute.py @@ -1320,6 +1320,69 @@ class ComputeTestCase(test.TestCase): db.instance_destroy(c, instance_id2) db.instance_destroy(c, instance_id3) + def test_get_all_by_metadata(self): + """Test searching instances by metadata""" + + c = context.get_admin_context() + instance_id0 = self._create_instance() + instance_id1 = self._create_instance({ + 'metadata': {'key1': 'value1'}}) + instance_id2 = self._create_instance({ + 'metadata': {'key2': 'value2'}}) + instance_id3 = self._create_instance({ + 'metadata': {'key3': 'value3'}}) + instance_id4 = self._create_instance({ + 'metadata': {'key3': 'value3', + 'key4': 'value4'}}) + + # get all instances + instances = self.compute_api.get_all(c, + search_opts={'metadata': {}}) + self.assertEqual(len(instances), 5) + + # wrong key/value combination + instances = self.compute_api.get_all(c, + search_opts={'metadata': {'key1': 'value3'}}) + self.assertEqual(len(instances), 0) + + # non-existing keys + instances = self.compute_api.get_all(c, + search_opts={'metadata': {'key5': 'value1'}}) + self.assertEqual(len(instances), 0) + + # find existing instance + instances = self.compute_api.get_all(c, + search_opts={'metadata': {'key2': 'value2'}}) + self.assertEqual(len(instances), 1) + self.assertEqual(instances[0].id, instance_id2) + + instances = self.compute_api.get_all(c, + search_opts={'metadata': {'key3': 'value3'}}) + self.assertEqual(len(instances), 2) + instance_ids = [instance.id for instance in instances] + self.assertTrue(instance_id3 in instance_ids) + self.assertTrue(instance_id4 in instance_ids) + + # multiple criterias as a dict + instances = self.compute_api.get_all(c, + search_opts={'metadata': {'key3': 'value3', + 'key4': 'value4'}}) + self.assertEqual(len(instances), 1) + self.assertEqual(instances[0].id, instance_id4) + + # multiple criterias as a list + instances = self.compute_api.get_all(c, + search_opts={'metadata': [{'key4': 'value4'}, + {'key3': 'value3'}]}) + self.assertEqual(len(instances), 1) + self.assertEqual(instances[0].id, instance_id4) + + db.instance_destroy(c, instance_id0) + db.instance_destroy(c, instance_id1) + db.instance_destroy(c, instance_id2) + db.instance_destroy(c, instance_id3) + db.instance_destroy(c, instance_id4) + @staticmethod def _parse_db_block_device_mapping(bdm_ref): attr_list = ('delete_on_termination', 'device_name', 'no_device', diff --git a/nova/vsa/api.py b/nova/vsa/api.py index 00ab96162..3588e58cc 100644 --- a/nova/vsa/api.py +++ b/nova/vsa/api.py @@ -355,7 +355,8 @@ class API(base.Base): self.delete_vsa_volumes(context, vsa_id, "BE", force_delete=True) # Delete all VC instances - instances = self.db.instance_get_all_by_vsa(context, vsa_id) + instances = self.compute_api.get_all(context, + search_opts={'metadata': dict(vsa_id=str(vsa_id))}) for instance in instances: name = instance['name'] LOG.debug(_("VSA ID %(vsa_id)s: Delete instance %(name)s"), diff --git a/nova/vsa/manager.py b/nova/vsa/manager.py index 1d17340f2..d98d0fcb2 100644 --- a/nova/vsa/manager.py +++ b/nova/vsa/manager.py @@ -173,7 +173,6 @@ class VsaManager(manager.SchedulerDependentManager): display_description='VC for VSA ' + vsa['display_name'], availability_zone=vsa['availability_zone'], user_data=storage_data, - vsa_id=vsa_id, metadata=dict(vsa_id=str(vsa_id))) self.vsa_api.update_vsa_status(context, vsa_id, VsaState.CREATED) -- cgit From fe8b1023bc9b800f628c0e35b29c165863b17206 Mon Sep 17 00:00:00 2001 From: "vladimir.p" Date: Thu, 11 Aug 2011 13:45:55 -0700 Subject: capabilities fix, run_as_root fix --- nova/scheduler/manager.py | 4 ++-- nova/scheduler/zone_manager.py | 2 -- nova/volume/driver.py | 15 ++++++++++----- 3 files changed, 12 insertions(+), 9 deletions(-) (limited to 'nova') diff --git a/nova/scheduler/manager.py b/nova/scheduler/manager.py index c8b16b622..294de62e4 100644 --- a/nova/scheduler/manager.py +++ b/nova/scheduler/manager.py @@ -71,8 +71,8 @@ class SchedulerManager(manager.Manager): def update_service_capabilities(self, context=None, service_name=None, host=None, capabilities=None): """Process a capability update from a service node.""" - if not capability: - capability = {} + if not capabilities: + capabilities = {} self.zone_manager.update_service_capabilities(service_name, host, capabilities) diff --git a/nova/scheduler/zone_manager.py b/nova/scheduler/zone_manager.py index 71889e99f..9d05ea42e 100644 --- a/nova/scheduler/zone_manager.py +++ b/nova/scheduler/zone_manager.py @@ -197,8 +197,6 @@ class ZoneManager(object): def update_service_capabilities(self, service_name, host, capabilities): """Update the per-service capabilities based on this notification.""" - # logging.debug(_("Received %(service_name)s service update from " - # "%(host)s: %(capabilities)s") % locals()) logging.debug(_("Received %(service_name)s service update from " "%(host)s.") % locals()) service_caps = self.service_states.get(host, {}) diff --git a/nova/volume/driver.py b/nova/volume/driver.py index f54f3b5aa..a1d7f700e 100644 --- a/nova/volume/driver.py +++ b/nova/volume/driver.py @@ -843,11 +843,12 @@ class ZadaraBEDriver(ISCSIDriver): qosstr = drive_type['type'] + ("_%s" % drive_type['size_gb']) try: - self._sync_exec('sudo', '/var/lib/zadara/bin/zadara_sncfg', + self._sync_exec('/var/lib/zadara/bin/zadara_sncfg', 'create_qospart', '--qos', qosstr, '--pname', volume['name'], '--psize', sizestr, + run_as_root=True, check_exit_code=0) except exception.ProcessExecutionError: LOG.debug(_("VSA BE create_volume for %s failed"), volume['name']) @@ -861,9 +862,10 @@ class ZadaraBEDriver(ISCSIDriver): return super(ZadaraBEDriver, self).delete_volume(volume) try: - self._sync_exec('sudo', '/var/lib/zadara/bin/zadara_sncfg', + self._sync_exec('/var/lib/zadara/bin/zadara_sncfg', 'delete_partition', '--pname', volume['name'], + run_as_root=True, check_exit_code=0) except exception.ProcessExecutionError: LOG.debug(_("VSA BE delete_volume for %s failed"), volume['name']) @@ -925,10 +927,11 @@ class ZadaraBEDriver(ISCSIDriver): return try: - self._sync_exec('sudo', '/var/lib/zadara/bin/zadara_sncfg', + self._sync_exec('/var/lib/zadara/bin/zadara_sncfg', 'remove_export', '--pname', volume['name'], '--tid', iscsi_target, + run_as_root=True, check_exit_code=0) except exception.ProcessExecutionError: LOG.debug(_("VSA BE remove_export for %s failed"), volume['name']) @@ -954,11 +957,12 @@ class ZadaraBEDriver(ISCSIDriver): Common logic that asks zadara_sncfg to setup iSCSI target/lun for this volume """ - (out, err) = self._sync_exec('sudo', + (out, err) = self._sync_exec( '/var/lib/zadara/bin/zadara_sncfg', 'create_export', '--pname', volume['name'], '--tid', iscsi_target, + run_as_root=True, check_exit_code=0) result_xml = ElementTree.fromstring(out) @@ -980,9 +984,10 @@ class ZadaraBEDriver(ISCSIDriver): def _get_qosgroup_summary(self): """gets the list of qosgroups from Zadara BE""" try: - (out, err) = self._sync_exec('sudo', + (out, err) = self._sync_exec( '/var/lib/zadara/bin/zadara_sncfg', 'get_qosgroups_xml', + run_as_root=True, check_exit_code=0) except exception.ProcessExecutionError: LOG.debug(_("Failed to retrieve QoS info")) -- cgit From f22cfa05f7c796fbda3d832e4bfadc325f8af6f5 Mon Sep 17 00:00:00 2001 From: Christopher MacGown Date: Thu, 11 Aug 2011 17:40:13 -0700 Subject: Updates to libvirt, write metadata, net, and key to the config drive --- nova/network/manager.py | 3 ++- nova/virt/disk.py | 33 +++++++++++++++++++++----- nova/virt/libvirt.xml.template | 14 +++++------ nova/virt/libvirt/connection.py | 51 ++++++++++++++++++++++++++--------------- 4 files changed, 68 insertions(+), 33 deletions(-) (limited to 'nova') diff --git a/nova/network/manager.py b/nova/network/manager.py index 75c3f668d..44bf662ce 100644 --- a/nova/network/manager.py +++ b/nova/network/manager.py @@ -893,7 +893,6 @@ class VlanManager(RPCAllocateFixedIP, FloatingIP, NetworkManager): def _setup_network(self, context, network_ref): """Sets up network on this host.""" - network_ref['dhcp_server'] = self._get_dhcp_ip(context, network_ref) if not network_ref['vpn_public_address']: net = {} address = FLAGS.vpn_ip @@ -901,6 +900,8 @@ class VlanManager(RPCAllocateFixedIP, FloatingIP, NetworkManager): network_ref = db.network_update(context, network_ref['id'], net) else: address = network_ref['vpn_public_address'] + + network_ref['dhcp_server'] = self._get_dhcp_ip(context, network_ref) self.driver.ensure_vlan_bridge(network_ref['vlan'], network_ref['bridge'], network_ref['bridge_interface'], diff --git a/nova/virt/disk.py b/nova/virt/disk.py index f8aea1f34..fda3f5f29 100644 --- a/nova/virt/disk.py +++ b/nova/virt/disk.py @@ -2,6 +2,9 @@ # Copyright 2010 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. +# +# Copyright 2011, Piston Cloud Computing, Inc. +# # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may @@ -22,6 +25,7 @@ Includes injection of SSH PGP keys into authorized_keys file. """ +import json import os import tempfile import time @@ -60,7 +64,8 @@ def extend(image, size): utils.execute('resize2fs', image, check_exit_code=False) -def inject_data(image, key=None, net=None, partition=None, nbd=False): +def inject_data(image, key=None, net=None, metadata=None, + partition=None, nbd=False, tune2fs=True): """Injects a ssh key and optionally net data into a disk image. it will mount the image as a fully partitioned disk and attempt to inject @@ -89,9 +94,10 @@ def inject_data(image, key=None, net=None, partition=None, nbd=False): ' only inject raw disk images): %s' % mapped_device) - # Configure ext2fs so that it doesn't auto-check every N boots - out, err = utils.execute('sudo', 'tune2fs', - '-c', 0, '-i', 0, mapped_device) + if tune2fs: + # Configure ext2fs so that it doesn't auto-check every N boots + out, err = utils.execute('sudo', 'tune2fs', + '-c', 0, '-i', 0, mapped_device) tmpdir = tempfile.mkdtemp() try: @@ -103,7 +109,8 @@ def inject_data(image, key=None, net=None, partition=None, nbd=False): % err) try: - inject_data_into_fs(tmpdir, key, net, utils.execute) + inject_data_into_fs(tmpdir, key, net, metadata, + utils.execute) finally: # unmount device utils.execute('sudo', 'umount', mapped_device) @@ -155,6 +162,7 @@ def destroy_container(target, instance, nbd=False): def _link_device(image, nbd): """Link image to device using loopback or nbd""" + if nbd: device = _allocate_device() utils.execute('sudo', 'qemu-nbd', '-c', device, image) @@ -189,6 +197,7 @@ def _allocate_device(): # NOTE(vish): This assumes no other processes are allocating nbd devices. # It may race cause a race condition if multiple # workers are running on a given machine. + while True: if not _DEVICES: raise exception.Error(_('No free nbd devices')) @@ -202,7 +211,7 @@ def _free_device(device): _DEVICES.append(device) -def inject_data_into_fs(fs, key, net, execute): +def inject_data_into_fs(fs, key, net, metadata, execute): """Injects data into a filesystem already mounted by the caller. Virt connections can call this directly if they mount their fs in a different way to inject_data @@ -211,7 +220,19 @@ def inject_data_into_fs(fs, key, net, execute): _inject_key_into_fs(key, fs, execute=execute) if net: _inject_net_into_fs(net, fs, execute=execute) + if metadata: + _inject_metadata_into_fs(metadata, fs, execute=execute) + +def _inject_file_into_fs(injected_files, fs, execute=None): + for path, data in injected_files: + + +def _inject_metadata_into_fs(metadata, fs, execute=None): + metadata_path = os.path.join(fs, "meta.js") + metadata = dict([(m.key, m.value) for m in metadata]) + utils.execute('sudo', 'tee', '-a', metadata_path, + process_input=json.dumps(metadata)) def _inject_key_into_fs(key, fs, execute=None): """Add the given public ssh key to root's authorized_keys. diff --git a/nova/virt/libvirt.xml.template b/nova/virt/libvirt.xml.template index 4422f349f..1fa536ad1 100644 --- a/nova/virt/libvirt.xml.template +++ b/nova/virt/libvirt.xml.template @@ -55,13 +55,6 @@ #else - #if $getVar('config', False) - - - - - - #end if #if $getVar('rescue', False) @@ -96,6 +89,13 @@ #end for #end if + #if $getVar('config_drive', False) + + + + + + #end if #end if #for $nic in $nics diff --git a/nova/virt/libvirt/connection.py b/nova/virt/libvirt/connection.py index ad97dc796..35ce0dcde 100644 --- a/nova/virt/libvirt/connection.py +++ b/nova/virt/libvirt/connection.py @@ -781,6 +781,7 @@ class LibvirtConnection(driver.ComputeDriver): network_info=None, block_device_mapping=None): block_device_mapping = block_device_mapping or [] + if not suffix: suffix = '' @@ -857,7 +858,7 @@ class LibvirtConnection(driver.ComputeDriver): target=basepath('disk.local'), fname="local_%s" % inst_type['local_gb'], cow=FLAGS.use_cow_images, - local_gb=inst_type['local_gb']) + local_size=inst_type['local_gb']) # For now, we assume that if we're not using a kernel, we're using a # partitioned disk image where the target partition is the first @@ -866,20 +867,23 @@ class LibvirtConnection(driver.ComputeDriver): if not inst['kernel_id']: target_partition = "1" - if FLAGS.libvirt_type == 'lxc': + config_drive_id = inst.get('config_drive_id') + config_drive = inst.get('config_drive') + + if any((FLAGS.libvirt_type == 'lxc', config_drive, config_drive_id)): target_partition = None - else: - if inst['config_drive_id']: - fname = '%08x' % int(inst['config_drive_id']) - self._cache_image(fn=self._fetch_image, - target=basepath('config'), - fname=fname, - image_id=inst['config_drive_id'], - user=user, - project=project) - elif inst['config_drive']: - self._create_local(basepath('config'), 64, prefix="M", - fs_format='msdos') # 64MB + + if config_drive_id: + fname = '%08x' % int(config_drive_id) + self._cache_image(fn=self._fetch_image, + target=basepath('disk.config'), + fname=fname, + image_id=config_drive_id, + user=user, + project=project) + elif config_drive: + self._create_local(basepath('disk.config'), 64, prefix="M", + fs_format='msdos') # 64MB if inst['key_data']: key = str(inst['key_data']) @@ -924,15 +928,18 @@ class LibvirtConnection(driver.ComputeDriver): searchList=[{'interfaces': nets, 'use_ipv6': FLAGS.use_ipv6}])) - if any(key, net, inst['metadata']): + metadata = inst.get('metadata') + if any((key, net, metadata)): inst_name = inst['name'] - if inst['config_drive']: # Should be True or None by now. - injection_path = basepath('config') + if config_drive: # Should be True or None by now. + injection_path = basepath('disk.config') img_id = 'config-drive' + tune2fs = False else: injection_path = basepath('disk') img_id = inst.image_ref + tune2fs = True for injection in ('metadata', 'key', 'net'): if locals()[injection]: @@ -940,9 +947,10 @@ class LibvirtConnection(driver.ComputeDriver): '%(injection)s into image %(img_id)s' % locals())) try: - disk.inject_data(injection_path, key, net, inst['metadata'], + disk.inject_data(injection_path, key, net, metadata, partition=target_partition, - nbd=FLAGS.use_cow_images) + nbd=FLAGS.use_cow_images, + tune2fs=tune2fs) if FLAGS.libvirt_type == 'lxc': disk.setup_container(basepath('disk'), @@ -1043,6 +1051,11 @@ class LibvirtConnection(driver.ComputeDriver): 'ebs_root': ebs_root, 'volumes': block_device_mapping} + + config_drive = False + if instance.get('config_drive') or instance.get('config_drive_id'): + xml_info['config_drive'] = xml_info['basepath'] + "/disk.config" + if FLAGS.vnc_enabled and FLAGS.libvirt_type not in ('lxc', 'uml'): xml_info['vncserver_host'] = FLAGS.vncserver_host xml_info['vnc_keymap'] = FLAGS.vnc_keymap -- cgit From b776f19c21d1a56ac851435182c0c267166d49dd Mon Sep 17 00:00:00 2001 From: Christopher MacGown Date: Thu, 11 Aug 2011 17:59:41 -0700 Subject: Accidentally added inject_files to merge --- nova/virt/disk.py | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) (limited to 'nova') diff --git a/nova/virt/disk.py b/nova/virt/disk.py index fda3f5f29..63e09d014 100644 --- a/nova/virt/disk.py +++ b/nova/virt/disk.py @@ -223,9 +223,6 @@ def inject_data_into_fs(fs, key, net, metadata, execute): if metadata: _inject_metadata_into_fs(metadata, fs, execute=execute) -def _inject_file_into_fs(injected_files, fs, execute=None): - for path, data in injected_files: - def _inject_metadata_into_fs(metadata, fs, execute=None): metadata_path = os.path.join(fs, "meta.js") @@ -234,6 +231,7 @@ def _inject_metadata_into_fs(metadata, fs, execute=None): utils.execute('sudo', 'tee', '-a', metadata_path, process_input=json.dumps(metadata)) + def _inject_key_into_fs(key, fs, execute=None): """Add the given public ssh key to root's authorized_keys. -- cgit From b66ea57ae10bac1656e11663e273837dfae67814 Mon Sep 17 00:00:00 2001 From: "vladimir.p" Date: Fri, 12 Aug 2011 12:51:54 -0700 Subject: removed VSA/drive_type code from EC2 cloud. changed nova-manage not to use cloud APIs --- nova/api/ec2/__init__.py | 4 -- nova/api/ec2/cloud.py | 153 ----------------------------------------------- nova/vsa/api.py | 2 +- nova/vsa/drive_types.py | 19 +++++- 4 files changed, 18 insertions(+), 160 deletions(-) (limited to 'nova') diff --git a/nova/api/ec2/__init__.py b/nova/api/ec2/__init__.py index 35c809547..8b6e47cfb 100644 --- a/nova/api/ec2/__init__.py +++ b/nova/api/ec2/__init__.py @@ -268,10 +268,6 @@ class Authorizer(wsgi.Middleware): 'StartInstances': ['projectmanager', 'sysadmin'], 'StopInstances': ['projectmanager', 'sysadmin'], 'DeleteVolume': ['projectmanager', 'sysadmin'], - 'CreateVsa': ['projectmanager', 'sysadmin'], - 'DeleteVsa': ['projectmanager', 'sysadmin'], - 'DescribeVsas': ['projectmanager', 'sysadmin'], - 'DescribeDriveTypes': ['projectmanager', 'sysadmin'], 'DescribeImages': ['all'], 'DeregisterImage': ['projectmanager', 'sysadmin'], 'RegisterImage': ['projectmanager', 'sysadmin'], diff --git a/nova/api/ec2/cloud.py b/nova/api/ec2/cloud.py index ac0ff713b..87bba58c3 100644 --- a/nova/api/ec2/cloud.py +++ b/nova/api/ec2/cloud.py @@ -45,8 +45,6 @@ from nova import network from nova import rpc from nova import utils from nova import volume -from nova import vsa -from nova.vsa import drive_types from nova.api.ec2 import ec2utils from nova.compute import instance_types from nova.image import s3 @@ -184,7 +182,6 @@ class CloudController(object): self.compute_api = compute.API( network_api=self.network_api, volume_api=self.volume_api) - self.vsa_api = vsa.API(compute_api=self.compute_api) self.setup() def __str__(self): @@ -998,156 +995,6 @@ class CloudController(object): 'status': volume['attach_status'], 'volumeId': ec2utils.id_to_ec2_vol_id(volume_id)} - def _format_vsa(self, context, p_vsa): - vsa = {} - vsa['vsaId'] = p_vsa['id'] - vsa['status'] = p_vsa['status'] - vsa['availabilityZone'] = p_vsa['availability_zone'] - vsa['createTime'] = p_vsa['created_at'] - vsa['name'] = p_vsa['name'] - vsa['displayName'] = p_vsa['display_name'] - vsa['displayDescription'] = p_vsa['display_description'] - vsa['vcCount'] = p_vsa['vc_count'] - if p_vsa['vsa_instance_type']: - vsa['vcType'] = p_vsa['vsa_instance_type'].get('name', None) - else: - vsa['vcType'] = None - - vols = self.volume_api.get_all_by_vsa(context, p_vsa['id'], "to") - vsa['volCount'] = 0 if vols is None else len(vols) - - return vsa - - def create_vsa(self, context, **kwargs): - display_name = kwargs.get('display_name') - display_description = kwargs.get('display_description') - vc_count = int(kwargs.get('vc_count', 1)) - instance_type = instance_types.get_instance_type_by_name( - kwargs.get('vc_type', FLAGS.default_vsa_instance_type)) - image_name = kwargs.get('image_name') - availability_zone = kwargs.get('placement', {}).get( - 'AvailabilityZone') - storage = kwargs.get('storage', []) - shared = kwargs.get('shared', False) - - vc_type = instance_type['name'] - _storage = str(storage) - LOG.audit(_("Create VSA %(display_name)s vc_count:%(vc_count)d "\ - "vc_type:%(vc_type)s storage:%(_storage)s"), locals()) - - vsa = self.vsa_api.create(context, display_name, display_description, - vc_count, instance_type, image_name, - availability_zone, storage, shared) - return {'vsaSet': [self._format_vsa(context, vsa)]} - - def update_vsa(self, context, vsa_id, **kwargs): - LOG.audit(_("Update VSA %s"), vsa_id) - updatable_fields = ['display_name', 'display_description', 'vc_count'] - changes = {} - for field in updatable_fields: - if field in kwargs: - changes[field] = kwargs[field] - if changes: - vsa_id = ec2utils.ec2_id_to_id(vsa_id) - self.vsa_api.update(context, vsa_id=vsa_id, **changes) - return True - - def delete_vsa(self, context, vsa_id, **kwargs): - LOG.audit(_("Delete VSA %s"), vsa_id) - vsa_id = ec2utils.ec2_id_to_id(vsa_id) - - self.vsa_api.delete(context, vsa_id) - - return True - - def describe_vsas(self, context, vsa_id=None, status=None, - availability_zone=None, **kwargs): - LOG.audit(_("Describe VSAs")) - result = [] - vsas = [] - if vsa_id is not None: - for ec2_id in vsa_id: - internal_id = ec2utils.ec2_id_to_id(ec2_id) - vsa = self.vsa_api.get(context, internal_id) - vsas.append(vsa) - else: - vsas = self.vsa_api.get_all(context) - - if status: - result = [] - for vsa in vsas: - if vsa['status'] in status: - result.append(vsa) - vsas = result - - if availability_zone: - result = [] - for vsa in vsas: - if vsa['availability_zone'] in availability_zone: - result.append(vsa) - vsas = result - - return {'vsaSet': [self._format_vsa(context, vsa) for vsa in vsas]} - - def create_drive_type(self, context, **kwargs): - name = kwargs.get('name') - type = kwargs.get('type') - size_gb = int(kwargs.get('size_gb')) - rpm = kwargs.get('rpm') - capabilities = kwargs.get('capabilities') - visible = kwargs.get('visible', True) - - LOG.audit(_("Create Drive Type %(name)s: %(type)s %(size_gb)d "\ - "%(rpm)s %(capabilities)s %(visible)s"), - locals()) - - rv = drive_types.create(context, type, size_gb, rpm, - capabilities, visible, name) - return {'driveTypeSet': [dict(rv)]} - - def update_drive_type(self, context, name, **kwargs): - LOG.audit(_("Update Drive Type %s"), name) - - dtype = drive_types.get_by_name(context, name) - - updatable_fields = ['type', - 'size_gb', - 'rpm', - 'capabilities', - 'visible'] - changes = {} - for field in updatable_fields: - if field in kwargs and \ - kwargs[field] is not None and \ - kwargs[field] != '': - changes[field] = kwargs[field] - - if changes: - drive_types.update(context, dtype['id'], **changes) - return True - - def rename_drive_type(self, context, name, new_name): - drive_types.rename(context, name, new_name) - return True - - def delete_drive_type(self, context, name): - dtype = drive_types.get_by_name(context, name) - drive_types.delete(context, dtype['id']) - return True - - def describe_drive_types(self, context, names=None, visible=True): - - drives = [] - if names is not None: - for name in names: - drive = drive_types.get_by_name(context, name) - if drive['visible'] == visible: - drives.append(drive) - else: - drives = drive_types.get_all(context, visible) - # (VP-TMP): Change to EC2 compliant output later - return {'driveTypeSet': [dict(drive) for drive in drives]} - @staticmethod def _convert_to_set(lst, label): if lst is None or lst == []: diff --git a/nova/vsa/api.py b/nova/vsa/api.py index 3588e58cc..19185b907 100644 --- a/nova/vsa/api.py +++ b/nova/vsa/api.py @@ -159,7 +159,7 @@ class API(base.Base): shared = True # check if image is ready before starting any work - if image_name is None or image_name == '': + if image_name is None: image_name = FLAGS.vc_image_name try: image_service = self.compute_api.image_service diff --git a/nova/vsa/drive_types.py b/nova/vsa/drive_types.py index 86ff76b96..3c67fdbb9 100644 --- a/nova/vsa/drive_types.py +++ b/nova/vsa/drive_types.py @@ -64,8 +64,23 @@ def create(context, type, size_gb, rpm, capabilities='', def update(context, id, **kwargs): - LOG.debug(_("Updating drive type with id %(id)s"), locals()) - return db.drive_type_update(context, id, kwargs) + + LOG.debug(_("Updating drive type with id %(id)s: %(kwargs)s"), locals()) + + updatable_fields = ['type', + 'size_gb', + 'rpm', + 'capabilities', + 'visible'] + changes = {} + for field in updatable_fields: + if field in kwargs and \ + kwargs[field] is not None and \ + kwargs[field] != '': + changes[field] = kwargs[field] + + # call update regadless if changes is empty or not + return db.drive_type_update(context, id, changes) def rename(context, name, new_name=None): -- cgit From 91eaa647506a2e343e8c689289529eafea0bc9d3 Mon Sep 17 00:00:00 2001 From: Christopher MacGown Date: Fri, 12 Aug 2011 14:33:27 -0700 Subject: Fix ugly little violations before someone says anything --- nova/api/openstack/create_instance_helper.py | 2 -- 1 file changed, 2 deletions(-) (limited to 'nova') diff --git a/nova/api/openstack/create_instance_helper.py b/nova/api/openstack/create_instance_helper.py index 0ec455167..d776ae92d 100644 --- a/nova/api/openstack/create_instance_helper.py +++ b/nova/api/openstack/create_instance_helper.py @@ -178,8 +178,6 @@ class CreateInstanceHelper(object): def _handle_quota_error(self, error): """ Reraise quota errors as api-specific http exceptions - - """ if error.code == "OnsetFileLimitExceeded": expl = _("Personality file limit exceeded") -- cgit From f11581221a3739c25e5f7b77b96f3dc8d332af5c Mon Sep 17 00:00:00 2001 From: Tim Simpson Date: Tue, 16 Aug 2011 08:55:17 -0500 Subject: Added list_notifier, a driver for the notifer api which calls a list of other drivers. --- nova/notifier/list_notifier.py | 50 ++++++++++++++++++ nova/tests/notifier/__init__.py | 16 ++++++ nova/tests/notifier/test_list_notifier.py | 85 +++++++++++++++++++++++++++++++ 3 files changed, 151 insertions(+) create mode 100644 nova/notifier/list_notifier.py create mode 100644 nova/tests/notifier/__init__.py create mode 100644 nova/tests/notifier/test_list_notifier.py (limited to 'nova') diff --git a/nova/notifier/list_notifier.py b/nova/notifier/list_notifier.py new file mode 100644 index 000000000..d45a31ecb --- /dev/null +++ b/nova/notifier/list_notifier.py @@ -0,0 +1,50 @@ +# Copyright 2011 OpenStack LLC. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from nova import flags +from nova import log as logging +from nova import utils + +flags.DEFINE_multistring('list_notifier_drivers', + ['nova.notifier.no_op_notifier'], + 'List of drivers to send notifications') + +FLAGS = flags.FLAGS + +LOG = logging.getLogger('nova.notifier.list_notifier') + +drivers = None + +def __get_drivers(): + """Instantiates and returns drivers based on the flag values.""" + global drivers + if not drivers: + drivers = [utils.import_object(notification_driver) + for notification_driver in FLAGS.list_notifier_drivers] + return drivers + +def notify(message): + """Passes notification to mulitple notifiers in a list.""" + for driver in __get_drivers(): + try: + driver.notify(message) + except Exception as e: + LOG.exception(_("Problem '%(e)s' attempting to send to " + "notification driver %(driver)s." % locals())) + +def _reset_drivers(): + """Used by unit tests to reset the drivers.""" + global drivers + drivers = None diff --git a/nova/tests/notifier/__init__.py b/nova/tests/notifier/__init__.py new file mode 100644 index 000000000..bd862c46a --- /dev/null +++ b/nova/tests/notifier/__init__.py @@ -0,0 +1,16 @@ +# Copyright 2011 Openstack LLC. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from nova.tests import * diff --git a/nova/tests/notifier/test_list_notifier.py b/nova/tests/notifier/test_list_notifier.py new file mode 100644 index 000000000..65c2ecd90 --- /dev/null +++ b/nova/tests/notifier/test_list_notifier.py @@ -0,0 +1,85 @@ +# Copyright 2011 OpenStack LLC. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import stubout + +import nova +from nova import context +from nova import flags +from nova import log as logging +from nova import rpc +import nova.notifier.api +from nova.notifier.api import notify +from nova.notifier import log_notifier +from nova.notifier import no_op_notifier +from nova.notifier import list_notifier +from nova.notifier import rabbit_notifier +from nova import test + + +class NotifierListTestCase(test.TestCase): + """Test case for notifications""" + def setUp(self): + super(NotifierListTestCase, self).setUp() + list_notifier._reset_drivers() + self.stubs = stubout.StubOutForTesting() + + def tearDown(self): + self.stubs.UnsetAll() + list_notifier._reset_drivers() + super(NotifierListTestCase, self).tearDown() + + def test_send_notifications_successfully(self): + self.flags(notification_driver='nova.notifier.list_notifier', + list_notifier_drivers=['nova.notifier.no_op_notifier', + 'nova.notifier.no_op_notifier']) + self.notify_count = 0 + + def mock_notify(cls, *args): + self.notify_count += 1 + + self.stubs.Set(nova.notifier.no_op_notifier, 'notify', + mock_notify) + + notify('publisher_id', 'event_type', + nova.notifier.api.WARN, dict(a=3)) + self.assertEqual(self.notify_count, 2) + + def test_send_notifications_with_errors(self): + self.exception_count = 0 + def mock_exception(cls, *args): + self.exception_count += 1 + + self.notify_count = 0 + def mock_notify(cls, *args): + self.notify_count += 1 + + def mock_notify2(cls, *args): + raise RuntimeError("Bad notifier.") + + self.flags(notification_driver='nova.notifier.list_notifier', + list_notifier_drivers=['nova.notifier.no_op_notifier', + 'nova.notifier.log_notifier']) + + list_notifier_log = logging.getLogger('nova.notifier.list_notifier') + list_notifier_log.exception + self.stubs.Set(list_notifier_log, "exception", mock_exception) + self.stubs.Set(nova.notifier.no_op_notifier, 'notify', mock_notify) + self.stubs.Set(nova.notifier.log_notifier, 'notify', mock_notify2) + + notify('publisher_id', 'event_type', nova.notifier.api.WARN, dict(a=3)) + + self.assertEqual(self.notify_count, 1) + self.assertEqual(self.exception_count, 1) -- cgit From 9ad17dec99608b3738d662d91c49964b3f207d02 Mon Sep 17 00:00:00 2001 From: Hisaharu Ishii Date: Wed, 17 Aug 2011 09:57:15 -0700 Subject: Add NetworkCommandsTestCase into unit test of nova-manage --- nova/tests/test_nova_manage.py | 73 ++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 73 insertions(+) (limited to 'nova') diff --git a/nova/tests/test_nova_manage.py b/nova/tests/test_nova_manage.py index 9c6563f14..69e73367f 100644 --- a/nova/tests/test_nova_manage.py +++ b/nova/tests/test_nova_manage.py @@ -15,6 +15,7 @@ # License for the specific language governing permissions and limitations # under the License. +import gettext import os import sys @@ -24,16 +25,20 @@ TOPDIR = os.path.normpath(os.path.join( os.pardir)) NOVA_MANAGE_PATH = os.path.join(TOPDIR, 'bin', 'nova-manage') +gettext.install('nova', unicode=1) + sys.dont_write_bytecode = True import imp nova_manage = imp.load_source('nova_manage.py', NOVA_MANAGE_PATH) sys.dont_write_bytecode = False import netaddr +import StringIO from nova import context from nova import db from nova import flags from nova import test +from nova import exception FLAGS = flags.FLAGS @@ -80,3 +85,71 @@ class FixedIpCommandsTestCase(test.TestCase): address = db.fixed_ip_get_by_address(context.get_admin_context(), '10.0.0.100') self.assertEqual(address['reserved'], False) + +class NetworkCommandsTestCase(test.TestCase): + def setUp(self): +# print 'piyo' + super(NetworkCommandsTestCase, self).setUp() + self.commands = nova_manage.NetworkCommands() + self.context = context.get_admin_context() + nets = db.network_get_all(self.context) + for net in nets: + db.network_delete_safe(self.context, net['id']) + + def tearDown(self): + super(NetworkCommandsTestCase, self).tearDown() + + def test_create(self): + self.commands.create( + label = 'Test', + fixed_range_v4 = '10.2.0.0/24', + fixed_range_v6 = 'fd00:2::/64', + num_networks = 1, + network_size = 256, + vlan_start = 200, + bridge_interface = 'eth0', + ) + net = db.network_get_by_cidr(self.context, '10.2.0.0/24') + self.assertEqual(net['label'], 'Test') + self.assertEqual(net['cidr'], '10.2.0.0/24') + self.assertEqual(net['netmask'], '255.255.255.0') + self.assertEqual(net['cidr_v6'], 'fd00:2::/64') + self.assertEqual(net['bridge_interface'], 'eth0') + self.assertEqual(net['vlan'], 200) + + def test_list(self): + format = "%-18s\t%-15s\t%-15s\t%-15s\t%-15s\t%-15s\t%-15s" + head = format % ( + _('IPv4'), + _('IPv6'), + _('start address'), + _('DNS1'), + _('DNS2'), + _('VlanID'), + _('project')) + body = format % ( + '10.2.0.0/24', + 'fd00:2::/64', + '10.2.0.3', + 'None', + 'None', + '200', + 'None',) + self.test_create() + output = StringIO.StringIO() + sys.stdout = output + self.commands.list() + sys.stdout = sys.__stdout__ + result = output.getvalue() + answer = '%s\n%s\n' % (head, body) + self.assertEqual(result, answer) + + def test_delete(self): + self.test_create() + self.commands.delete(fixed_range = '10.2.0.0/24') + net_exist = True + try: + net = db.network_get_by_cidr(self.context, '10.2.0.0/24') + except exception.NetworkNotFoundForCidr, e: + net_exist = False + self.assertEqual(net_exist, False) -- cgit From cabf9cc8f29ad8c99971c434516e1b911f07f32f Mon Sep 17 00:00:00 2001 From: "vladimir.p" Date: Wed, 17 Aug 2011 16:27:12 -0700 Subject: nova-manage VSA print & forced update_cap changes; fixed bug with report capabilities; added IP address to VSA APIs; added instances to APIs --- .../openstack/contrib/virtual_storage_arrays.py | 107 +++++++++++++++++++-- nova/tests/api/openstack/contrib/test_vsa.py | 2 + nova/volume/manager.py | 12 ++- 3 files changed, 113 insertions(+), 8 deletions(-) (limited to 'nova') diff --git a/nova/api/openstack/contrib/virtual_storage_arrays.py b/nova/api/openstack/contrib/virtual_storage_arrays.py index 842573f8a..d6c4a5ef4 100644 --- a/nova/api/openstack/contrib/virtual_storage_arrays.py +++ b/nova/api/openstack/contrib/virtual_storage_arrays.py @@ -23,6 +23,7 @@ from webob import exc from nova import vsa from nova import volume +from nova import compute from nova import db from nova import quota from nova import exception @@ -31,6 +32,7 @@ from nova.api.openstack import common from nova.api.openstack import extensions from nova.api.openstack import faults from nova.api.openstack import wsgi +from nova.api.openstack import servers from nova.api.openstack.contrib import volumes from nova.compute import instance_types @@ -40,7 +42,7 @@ FLAGS = flags.FLAGS LOG = logging.getLogger("nova.api.vsa") -def _vsa_view(context, vsa, details=False): +def _vsa_view(context, vsa, details=False, instances=None): """Map keys for vsa summary/detailed view.""" d = {} @@ -55,11 +57,27 @@ def _vsa_view(context, vsa, details=False): if 'vsa_instance_type' in vsa: d['vcType'] = vsa['vsa_instance_type'].get('name', None) else: - d['vcType'] = None + d['vcType'] = vsa['instance_type_id'] d['vcCount'] = vsa.get('vc_count') d['driveCount'] = vsa.get('vol_count') + d['ipAddress'] = None + for instance in instances: + fixed_addr = None + floating_addr = None + if instance['fixed_ips']: + fixed = instance['fixed_ips'][0] + fixed_addr = fixed['address'] + if fixed['floating_ips']: + floating_addr = fixed['floating_ips'][0]['address'] + + if floating_addr: + d['ipAddress'] = floating_addr + break + else: + d['ipAddress'] = d['ipAddress'] or fixed_addr + return d @@ -79,10 +97,12 @@ class VsaController(object): "vcType", "vcCount", "driveCount", + "ipAddress", ]}}} def __init__(self): self.vsa_api = vsa.API() + self.compute_api = compute.API() super(VsaController, self).__init__() def _items(self, req, details): @@ -90,8 +110,13 @@ class VsaController(object): context = req.environ['nova.context'] vsas = self.vsa_api.get_all(context) limited_list = common.limited(vsas, req) - res = [_vsa_view(context, vsa, details) for vsa in limited_list] - return {'vsaSet': res} + + vsa_list = [] + for vsa in limited_list: + instances = self.compute_api.get_all(context, + search_opts={'metadata': dict(vsa_id=str(vsa.get('id')))}) + vsa_list.append(_vsa_view(context, vsa, details, instances)) + return {'vsaSet': vsa_list} def index(self, req): """Return a short list of VSAs.""" @@ -110,7 +135,10 @@ class VsaController(object): except exception.NotFound: return faults.Fault(exc.HTTPNotFound()) - return {'vsa': _vsa_view(context, vsa, details=True)} + instances = self.compute_api.get_all(context, + search_opts={'metadata': dict(vsa_id=str(vsa.get('id')))}) + + return {'vsa': _vsa_view(context, vsa, True, instances)} def create(self, req, body): """Create a new VSA.""" @@ -140,9 +168,12 @@ class VsaController(object): availability_zone=vsa.get('placement', {}).\ get('AvailabilityZone')) - result = self.vsa_api.create(context, **args) + vsa = self.vsa_api.create(context, **args) + + instances = self.compute_api.get_all(context, + search_opts={'metadata': dict(vsa_id=str(vsa.get('id')))}) - return {'vsa': _vsa_view(context, result, details=True)} + return {'vsa': _vsa_view(context, vsa, True, instances)} def delete(self, req, id): """Delete a VSA.""" @@ -405,6 +436,61 @@ class VsaVPoolController(object): return faults.Fault(exc.HTTPBadRequest()) +class VsaVCController(servers.ControllerV11): + """The VSA Virtual Controller API controller for the OpenStack API.""" + + def __init__(self): + self.vsa_api = vsa.API() + self.compute_api = compute.API() + self.vsa_id = None # VP-TODO: temporary ugly hack + super(VsaVCController, self).__init__() + + def _get_servers(self, req, is_detail): + """Returns a list of servers, taking into account any search + options specified. + """ + + if self.vsa_id is None: + super(VsaVCController, self)._get_servers(req, is_detail) + + context = req.environ['nova.context'] + + search_opts = {'metadata': dict(vsa_id=str(self.vsa_id))} + instance_list = self.compute_api.get_all( + context, search_opts=search_opts) + + limited_list = self._limit_items(instance_list, req) + servers = [self._build_view(req, inst, is_detail)['server'] + for inst in limited_list] + return dict(servers=servers) + + def index(self, req, vsa_id): + """Return list of instances for particular VSA.""" + + LOG.audit(_("Index instances for VSA %s"), vsa_id) + + self.vsa_id = vsa_id # VP-TODO: temporary ugly hack + result = super(VsaVCController, self).detail(req) + self.vsa_id = None + return result + + def create(self, req, vsa_id, body): + """Create a new instance for VSA.""" + return faults.Fault(exc.HTTPBadRequest()) + + def update(self, req, vsa_id, id, body): + """Update VSA instance.""" + return faults.Fault(exc.HTTPBadRequest()) + + def delete(self, req, vsa_id, id): + """Delete VSA instance.""" + return faults.Fault(exc.HTTPBadRequest()) + + def show(self, req, vsa_id, id): + """Return data about the given instance.""" + return super(VsaVCController, self).show(req, id) + + class Virtual_storage_arrays(extensions.ExtensionDescriptor): def get_name(self): @@ -455,4 +541,11 @@ class Virtual_storage_arrays(extensions.ExtensionDescriptor): collection_name='zadr-vsa')) resources.append(res) + res = extensions.ResourceExtension('instances', + VsaVCController(), + parent=dict( + member_name='vsa', + collection_name='zadr-vsa')) + resources.append(res) + return resources diff --git a/nova/tests/api/openstack/contrib/test_vsa.py b/nova/tests/api/openstack/contrib/test_vsa.py index 3c9136e14..a9b76b0ff 100644 --- a/nova/tests/api/openstack/contrib/test_vsa.py +++ b/nova/tests/api/openstack/contrib/test_vsa.py @@ -46,6 +46,7 @@ def _get_default_vsa_param(): 'display_description': 'Test_VSA_description', 'vc_count': 1, 'instance_type': 'm1.small', + 'instance_type_id': 5, 'image_name': None, 'availability_zone': None, 'storage': [], @@ -58,6 +59,7 @@ def stub_vsa_create(self, context, **param): LOG.debug(_("_create: param=%s"), param) param['id'] = 123 param['name'] = 'Test name' + param['instance_type_id'] = 5 last_param = param return param diff --git a/nova/volume/manager.py b/nova/volume/manager.py index fd1d5acfa..b23bff1fc 100644 --- a/nova/volume/manager.py +++ b/nova/volume/manager.py @@ -61,6 +61,8 @@ flags.DEFINE_string('volume_driver', 'nova.volume.driver.ISCSIDriver', 'Driver to use for volume creation') flags.DEFINE_boolean('use_local_volumes', True, 'if True, will not discover local volumes') +flags.DEFINE_boolean('volume_force_update_capabilities', False, + 'if True will force update capabilities on each check') class VolumeManager(manager.SchedulerDependentManager): @@ -138,6 +140,7 @@ class VolumeManager(manager.SchedulerDependentManager): 'launched_at': now}) LOG.debug(_("volume %s: created successfully"), volume_ref['name']) self._notify_vsa(context, volume_ref, 'available') + self._reset_stats() return volume_id def _notify_vsa(self, context, volume_ref, status): @@ -158,6 +161,7 @@ class VolumeManager(manager.SchedulerDependentManager): if volume_ref['host'] != self.host: raise exception.Error(_("Volume is not local to this node")) + self._reset_stats() try: LOG.debug(_("volume %s: removing export"), volume_ref['name']) self.driver.remove_export(context, volume_ref) @@ -265,6 +269,8 @@ class VolumeManager(manager.SchedulerDependentManager): return error_list def _volume_stats_changed(self, stat1, stat2): + if FLAGS.volume_force_update_capabilities: + return True if len(stat1) != len(stat2): return True for (k, v) in stat1.iteritems(): @@ -289,6 +295,10 @@ class VolumeManager(manager.SchedulerDependentManager): # avoid repeating fanouts self.update_service_capabilities(None) + def _reset_stats(self): + LOG.info(_("Clear capabilities")) + self._last_volume_stats = [] + def notification(self, context, event): LOG.info(_("Notification {%s} received"), event) - self._last_volume_stats = [] + self._reset_stats() -- cgit From 90650e5becb541790a8949edebaf0bff0ceb8f5b Mon Sep 17 00:00:00 2001 From: Vishvananda Ishaya Date: Wed, 17 Aug 2011 19:31:01 -0700 Subject: make admin context the default, clean up pipelib --- nova/api/auth.py | 18 ++++++++++++++++++ nova/api/ec2/admin.py | 4 +++- nova/auth/manager.py | 3 +++ nova/cloudpipe/pipelib.py | 7 +++---- 4 files changed, 27 insertions(+), 5 deletions(-) (limited to 'nova') diff --git a/nova/api/auth.py b/nova/api/auth.py index cd3e3e8a0..050216fd7 100644 --- a/nova/api/auth.py +++ b/nova/api/auth.py @@ -45,6 +45,24 @@ class InjectContext(wsgi.Middleware): return self.application +class AdminContext(wsgi.Middleware): + """Return an admin context no matter what""" + + @webob.dec.wsgify(RequestClass=wsgi.Request) + def __call__(self, req): + # Build a context, including the auth_token... + remote_address = req.remote_addr + if FLAGS.use_forwarded_for: + remote_address = req.headers.get('X-Forwarded-For', remote_address) + ctx = context.RequestContext('admin', + 'admin', + is_admin=True, + remote_address=remote_address) + + req.environ['nova.context'] = ctx + return self.application + + class KeystoneContext(wsgi.Middleware): """Make a request context from keystone headers""" diff --git a/nova/api/ec2/admin.py b/nova/api/ec2/admin.py index df7876b9d..dfbbc0a2b 100644 --- a/nova/api/ec2/admin.py +++ b/nova/api/ec2/admin.py @@ -283,8 +283,10 @@ class AdminController(object): # NOTE(vish) import delayed because of __init__.py from nova.cloudpipe import pipelib pipe = pipelib.CloudPipe() + proj = manager.AuthManager().get_project(project) + user_id = proj.project_manager_id try: - pipe.launch_vpn_instance(project) + pipe.launch_vpn_instance(project, user_id) except db.NoMoreNetworks: raise exception.ApiError("Unable to claim IP for VPN instance" ", ensure it isn't running, and try " diff --git a/nova/auth/manager.py b/nova/auth/manager.py index 6205cfb56..c9178c0dd 100644 --- a/nova/auth/manager.py +++ b/nova/auth/manager.py @@ -17,6 +17,9 @@ # under the License. """ +WARNING: This code is deprecated and will be removed. +Keystone is recommended is the recommended solution for auth management. + Nova authentication management """ diff --git a/nova/cloudpipe/pipelib.py b/nova/cloudpipe/pipelib.py index 2c4673f9e..1b742384c 100644 --- a/nova/cloudpipe/pipelib.py +++ b/nova/cloudpipe/pipelib.py @@ -93,11 +93,10 @@ class CloudPipe(object): zippy.close() return encoded - def launch_vpn_instance(self, project_id): + def launch_vpn_instance(self, project_id, user_id): LOG.debug(_("Launching VPN for %s") % (project_id)) - project = self.manager.get_project(project_id) - ctxt = context.RequestContext(user=project.project_manager_id, - project=project.id) + ctxt = context.RequestContext(user_id=user_id, + project_id=project_id) key_name = self.setup_key_pair(ctxt) group_name = self.setup_security_group(ctxt) -- cgit From 41819d8d048b889f2e7f5e4ee0ff2873bfdef904 Mon Sep 17 00:00:00 2001 From: Vishvananda Ishaya Date: Wed, 17 Aug 2011 20:22:30 -0700 Subject: fix integration tests --- nova/api/openstack/auth.py | 17 +++++ nova/tests/integrated/integrated_helpers.py | 109 ++++------------------------ nova/tests/integrated/test_login.py | 33 --------- nova/tests/integrated/test_servers.py | 2 +- 4 files changed, 31 insertions(+), 130 deletions(-) (limited to 'nova') diff --git a/nova/api/openstack/auth.py b/nova/api/openstack/auth.py index d42abe1f8..f4a50fc46 100644 --- a/nova/api/openstack/auth.py +++ b/nova/api/openstack/auth.py @@ -34,6 +34,23 @@ LOG = logging.getLogger('nova.api.openstack') FLAGS = flags.FLAGS +class NoAuthMiddleware(wsgi.Middleware): + """Return a fake token if one isn't specified.""" + + @webob.dec.wsgify(RequestClass=wsgi.Request) + def __call__(self, req): + if 'X-Auth-Token' in req.headers: + return self.application + logging.debug("Got no auth token, returning fake info.") + res = webob.Response() + res.headers['X-Auth-Token'] = 'fake' + res.headers['X-Server-Management-Url'] = req.url + res.headers['X-Storage-Url'] = '' + res.headers['X-CDN-Management-Url'] = '' + res.content_type = 'text/plain' + res.status = '204' + return res + class AuthMiddleware(wsgi.Middleware): """Authorize the openstack API request or return an HTTP Forbidden.""" diff --git a/nova/tests/integrated/integrated_helpers.py b/nova/tests/integrated/integrated_helpers.py index fb2f88502..343190427 100644 --- a/nova/tests/integrated/integrated_helpers.py +++ b/nova/tests/integrated/integrated_helpers.py @@ -22,10 +22,8 @@ Provides common functionality for integrated unit tests import random import string -from nova import exception from nova import service from nova import test # For the flags -from nova.auth import manager import nova.image.glance from nova.log import logging from nova.tests.integrated.api import client @@ -58,90 +56,6 @@ def generate_new_element(items, prefix, numeric=False): LOG.debug("Random collision on %s" % candidate) -class TestUser(object): - def __init__(self, name, secret, auth_url): - self.name = name - self.secret = secret - self.auth_url = auth_url - - if not auth_url: - raise exception.Error("auth_url is required") - self.openstack_api = client.TestOpenStackClient(self.name, - self.secret, - self.auth_url) - - def get_unused_server_name(self): - servers = self.openstack_api.get_servers() - server_names = [server['name'] for server in servers] - return generate_new_element(server_names, 'server') - - def get_invalid_image(self): - images = self.openstack_api.get_images() - image_ids = [image['id'] for image in images] - return generate_new_element(image_ids, '', numeric=True) - - def get_valid_image(self, create=False): - images = self.openstack_api.get_images() - if create and not images: - # TODO(justinsb): No way currently to create an image through API - #created_image = self.openstack_api.post_image(image) - #images.append(created_image) - raise exception.Error("No way to create an image through API") - - if images: - return images[0] - return None - - -class IntegratedUnitTestContext(object): - def __init__(self, auth_url): - self.auth_manager = manager.AuthManager() - - self.auth_url = auth_url - self.project_name = None - - self.test_user = None - - self.setup() - - def setup(self): - self._create_test_user() - - def _create_test_user(self): - self.test_user = self._create_unittest_user() - - # No way to currently pass this through the OpenStack API - self.project_name = 'openstack' - self._configure_project(self.project_name, self.test_user) - - def cleanup(self): - self.test_user = None - - def _create_unittest_user(self): - users = self.auth_manager.get_users() - user_names = [user.name for user in users] - auth_name = generate_new_element(user_names, 'unittest_user_') - auth_key = generate_random_alphanumeric(16) - - # Right now there's a bug where auth_name and auth_key are reversed - # bug732907 - auth_key = auth_name - - self.auth_manager.create_user(auth_name, auth_name, auth_key, False) - return TestUser(auth_name, auth_key, self.auth_url) - - def _configure_project(self, project_name, user): - projects = self.auth_manager.get_projects() - project_names = [project.name for project in projects] - if not project_name in project_names: - project = self.auth_manager.create_project(project_name, - user.name, - description=None, - member_users=None) - else: - self.auth_manager.add_to_project(user.name, project_name) - - class _IntegratedTestBase(test.TestCase): def setUp(self): super(_IntegratedTestBase, self).setUp() @@ -163,10 +77,7 @@ class _IntegratedTestBase(test.TestCase): self._start_api_service() - self.context = IntegratedUnitTestContext(self.auth_url) - - self.user = self.context.test_user - self.api = self.user.openstack_api + self.api = client.TestOpenStackClient('fake', 'fake', self.auth_url) def _start_api_service(self): osapi = service.WSGIService("osapi") @@ -174,10 +85,6 @@ class _IntegratedTestBase(test.TestCase): self.auth_url = 'http://%s:%s/v1.1' % (osapi.host, osapi.port) LOG.warn(self.auth_url) - def tearDown(self): - self.context.cleanup() - super(_IntegratedTestBase, self).tearDown() - def _get_flags(self): """An opportunity to setup flags, before the services are started.""" f = {} @@ -190,10 +97,20 @@ class _IntegratedTestBase(test.TestCase): f['fake_network'] = True return f + def get_unused_server_name(self): + servers = self.api.get_servers() + server_names = [server['name'] for server in servers] + return generate_new_element(server_names, 'server') + + def get_invalid_image(self): + images = self.api.get_images() + image_ids = [image['id'] for image in images] + return generate_new_element(image_ids, '', numeric=True) + def _build_minimal_create_server_request(self): server = {} - image = self.user.get_valid_image(create=True) + image = self.api.get_images()[0] LOG.debug("Image: %s" % image) if 'imageRef' in image: @@ -211,7 +128,7 @@ class _IntegratedTestBase(test.TestCase): server['flavorRef'] = 'http://fake.server/%s' % flavor['id'] # Set a valid server name - server_name = self.user.get_unused_server_name() + server_name = self.get_unused_server_name() server['name'] = server_name return server diff --git a/nova/tests/integrated/test_login.py b/nova/tests/integrated/test_login.py index 06359a52f..3a863d0f9 100644 --- a/nova/tests/integrated/test_login.py +++ b/nova/tests/integrated/test_login.py @@ -15,11 +15,9 @@ # License for the specific language governing permissions and limitations # under the License. -import unittest from nova.log import logging from nova.tests.integrated import integrated_helpers -from nova.tests.integrated.api import client LOG = logging.getLogger('nova.tests.integrated') @@ -31,34 +29,3 @@ class LoginTest(integrated_helpers._IntegratedTestBase): flavors = self.api.get_flavors() for flavor in flavors: LOG.debug(_("flavor: %s") % flavor) - - def test_bad_login_password(self): - """Test that I get a 401 with a bad username.""" - bad_credentials_api = client.TestOpenStackClient(self.user.name, - "notso_password", - self.user.auth_url) - - self.assertRaises(client.OpenStackApiAuthenticationException, - bad_credentials_api.get_flavors) - - def test_bad_login_username(self): - """Test that I get a 401 with a bad password.""" - bad_credentials_api = client.TestOpenStackClient("notso_username", - self.user.secret, - self.user.auth_url) - - self.assertRaises(client.OpenStackApiAuthenticationException, - bad_credentials_api.get_flavors) - - def test_bad_login_both_bad(self): - """Test that I get a 401 with both bad username and bad password.""" - bad_credentials_api = client.TestOpenStackClient("notso_username", - "notso_password", - self.user.auth_url) - - self.assertRaises(client.OpenStackApiAuthenticationException, - bad_credentials_api.get_flavors) - - -if __name__ == "__main__": - unittest.main() diff --git a/nova/tests/integrated/test_servers.py b/nova/tests/integrated/test_servers.py index 725f6d529..c2f800689 100644 --- a/nova/tests/integrated/test_servers.py +++ b/nova/tests/integrated/test_servers.py @@ -51,7 +51,7 @@ class ServersTest(integrated_helpers._IntegratedTestBase): self.api.post_server, post) # With an invalid imageRef, this throws 500. - server['imageRef'] = self.user.get_invalid_image() + server['imageRef'] = self.get_invalid_image() # TODO(justinsb): Check whatever the spec says should be thrown here self.assertRaises(client.OpenStackApiException, self.api.post_server, post) -- cgit From a1ceed43d6ab871d3dea721b855bd7eabec48433 Mon Sep 17 00:00:00 2001 From: Vishvananda Ishaya Date: Wed, 17 Aug 2011 20:24:52 -0700 Subject: clean up fake auth from server actions test --- nova/tests/api/openstack/test_server_actions.py | 8 -------- 1 file changed, 8 deletions(-) (limited to 'nova') diff --git a/nova/tests/api/openstack/test_server_actions.py b/nova/tests/api/openstack/test_server_actions.py index 687a19390..3b419dec5 100644 --- a/nova/tests/api/openstack/test_server_actions.py +++ b/nova/tests/api/openstack/test_server_actions.py @@ -1,17 +1,13 @@ import base64 import json -import unittest -from xml.dom import minidom import stubout import webob from nova import context -from nova import db from nova import utils from nova import flags from nova.api.openstack import create_instance_helper -from nova.compute import instance_types from nova.compute import power_state import nova.db.api from nova import test @@ -103,8 +99,6 @@ class ServerActionsTest(test.TestCase): super(ServerActionsTest, self).setUp() self.flags(verbose=True) self.stubs = stubout.StubOutForTesting() - fakes.FakeAuthManager.reset_fake_data() - fakes.FakeAuthDatabase.data = {} fakes.stub_out_auth(self.stubs) self.stubs.Set(nova.db.api, 'instance_get', return_server_by_id) self.stubs.Set(nova.db.api, 'instance_update', instance_update) @@ -468,8 +462,6 @@ class ServerActionsTestV11(test.TestCase): self.maxDiff = None super(ServerActionsTestV11, self).setUp() self.stubs = stubout.StubOutForTesting() - fakes.FakeAuthManager.reset_fake_data() - fakes.FakeAuthDatabase.data = {} fakes.stub_out_auth(self.stubs) self.stubs.Set(nova.db.api, 'instance_get', return_server_by_id) self.stubs.Set(nova.db.api, 'instance_update', instance_update) -- cgit From 6c256a9a5c013f8674776adb2005b4f541f705b5 Mon Sep 17 00:00:00 2001 From: Vishvananda Ishaya Date: Wed, 17 Aug 2011 20:26:33 -0700 Subject: remove extra reference in pipelib --- nova/cloudpipe/pipelib.py | 2 -- 1 file changed, 2 deletions(-) (limited to 'nova') diff --git a/nova/cloudpipe/pipelib.py b/nova/cloudpipe/pipelib.py index 1b742384c..3eb372844 100644 --- a/nova/cloudpipe/pipelib.py +++ b/nova/cloudpipe/pipelib.py @@ -34,7 +34,6 @@ from nova import exception from nova import flags from nova import log as logging from nova import utils -from nova.auth import manager # TODO(eday): Eventually changes these to something not ec2-specific from nova.api.ec2 import cloud @@ -57,7 +56,6 @@ LOG = logging.getLogger('nova.cloudpipe') class CloudPipe(object): def __init__(self): self.controller = cloud.CloudController() - self.manager = manager.AuthManager() def get_encoded_zip(self, project_id): # Make a payload.zip -- cgit From 6d87608cf835e1c27f3b6b6b31e6b41b0aa90b90 Mon Sep 17 00:00:00 2001 From: Vishvananda Ishaya Date: Wed, 17 Aug 2011 20:35:54 -0700 Subject: pep8 --- nova/api/openstack/auth.py | 1 + 1 file changed, 1 insertion(+) (limited to 'nova') diff --git a/nova/api/openstack/auth.py b/nova/api/openstack/auth.py index f4a50fc46..b37f9aade 100644 --- a/nova/api/openstack/auth.py +++ b/nova/api/openstack/auth.py @@ -51,6 +51,7 @@ class NoAuthMiddleware(wsgi.Middleware): res.status = '204' return res + class AuthMiddleware(wsgi.Middleware): """Authorize the openstack API request or return an HTTP Forbidden.""" -- cgit From 50b7db2ab71c40732a979b1f424bd60627a74768 Mon Sep 17 00:00:00 2001 From: "vladimir.p" Date: Thu, 18 Aug 2011 13:24:56 -0700 Subject: first cut on types & extra-data (only DB work, no tests) --- nova/db/api.py | 60 +++++++ nova/db/sqlalchemy/api.py | 180 ++++++++++++++++++++- .../versions/037_add_volume_types_and_extradata.py | 103 ++++++++++++ nova/db/sqlalchemy/models.py | 30 ++++ nova/exception.py | 18 +++ 5 files changed, 390 insertions(+), 1 deletion(-) create mode 100644 nova/db/sqlalchemy/migrate_repo/versions/037_add_volume_types_and_extradata.py (limited to 'nova') diff --git a/nova/db/api.py b/nova/db/api.py index b9ea8757c..47e73226a 100644 --- a/nova/db/api.py +++ b/nova/db/api.py @@ -1424,3 +1424,63 @@ def instance_type_extra_specs_update_or_create(context, instance_type_id, key/value pairs specified in the extra specs dict argument""" IMPL.instance_type_extra_specs_update_or_create(context, instance_type_id, extra_specs) + + +################## + + +def volume_type_create(context, values): + """Create a new volume type.""" + return IMPL.volume_type_create(context, values) + + +def volume_type_get_all(context, inactive=False): + """Get all volume types.""" + return IMPL.volume_type_get_all(context, inactive) + + +def volume_type_get(context, id): + """Get volume type by id.""" + return IMPL.volume_type_get(context, id) + + +def volume_type_get_by_name(context, name): + """Get volume type by name.""" + return IMPL.volume_type_get_by_name(context, name) + + +def volume_type_destroy(context, name): + """Delete a volume type.""" + return IMPL.volume_type_destroy(context, name) + + +def volume_type_purge(context, name): + """Purges (removes) a volume type from DB. + + Use volume_type_destroy for most cases + + """ + return IMPL.volume_type_purge(context, name) + + +#################### + + +def volume_type_extra_specs_get(context, volume_type_id): + """Get all extra specs for a volume type.""" + return IMPL.volume_type_extra_specs_get(context, volume_type_id) + + +def volume_type_extra_specs_delete(context, volume_type_id, key): + """Delete the given extra specs item.""" + IMPL.volume_type_extra_specs_delete(context, volume_type_id, key) + + +def volume_type_extra_specs_update_or_create(context, volume_type_id, + extra_specs): + """Create or update volume type extra specs. This adds or modifies the + key/value pairs specified in the extra specs dict argument""" + IMPL.volume_type_extra_specs_update_or_create(context, volume_type_id, + extra_specs) + + diff --git a/nova/db/sqlalchemy/api.py b/nova/db/sqlalchemy/api.py index 95ec3f715..ce1066e42 100644 --- a/nova/db/sqlalchemy/api.py +++ b/nova/db/sqlalchemy/api.py @@ -3080,7 +3080,7 @@ def instance_type_create(_context, values): def _dict_with_extra_specs(inst_type_query): - """Takes an instance type query returned by sqlalchemy + """Takes an instance OR volume type query returned by sqlalchemy and returns it as a dictionary, converting the extra_specs entry from a list of dicts: @@ -3462,3 +3462,181 @@ def instance_type_extra_specs_update_or_create(context, instance_type_id, "deleted": 0}) spec_ref.save(session=session) return specs + + +################## + + +@require_admin_context +def volume_type_create(_context, values): + """Create a new instance type. In order to pass in extra specs, + the values dict should contain a 'extra_specs' key/value pair: + + {'extra_specs' : {'k1': 'v1', 'k2': 'v2', ...}} + + """ + try: + specs = values.get('extra_specs') + specs_refs = [] + if specs: + for k, v in specs.iteritems(): + specs_ref = models.VolumeTypeExtraSpecs() + specs_ref['key'] = k + specs_ref['value'] = v + specs_refs.append(specs_ref) + values['extra_specs'] = specs_refs + volume_type_ref = models.VolumeTypes() + volume_type_ref.update(values) + volume_type_ref.save() + except Exception, e: + raise exception.DBError(e) + return volume_type_ref + + +@require_context +def volume_type_get_all(context, inactive=False): + """ + Returns a dict describing all volume_types with name as key. + """ + session = get_session() + if inactive: + inst_types = session.query(models.VolumeTypes).\ + options(joinedload('extra_specs')).\ + order_by("name").\ + all() + else: + inst_types = session.query(models.VolumeTypes).\ + options(joinedload('extra_specs')).\ + filter_by(deleted=False).\ + order_by("name").\ + all() + inst_dict = {} + if inst_types: + for i in inst_types: + inst_dict[i['name']] = _dict_with_extra_specs(i) + return inst_dict + + +@require_context +def volume_type_get(context, id): + """Returns a dict describing specific volume_type""" + session = get_session() + inst_type = session.query(models.VolumeTypes).\ + options(joinedload('extra_specs')).\ + filter_by(id=id).\ + first() + + if not inst_type: + raise exception.VolumeTypeNotFound(volume_type=id) + else: + return _dict_with_extra_specs(inst_type) + + +@require_context +def volume_type_get_by_name(context, name): + """Returns a dict describing specific volume_type""" + session = get_session() + inst_type = session.query(models.VolumeTypes).\ + options(joinedload('extra_specs')).\ + filter_by(name=name).\ + first() + if not inst_type: + raise exception.VolumeTypeNotFoundByName(volume_type_name=name) + else: + return _dict_with_extra_specs(inst_type) + + +@require_admin_context +def volume_type_destroy(context, name): + """ Marks specific volume_type as deleted""" + session = get_session() + volume_type_ref = session.query(models.VolumeTypes).\ + filter_by(name=name) + records = volume_type_ref.update(dict(deleted=True)) + if records == 0: + raise exception.VolumeTypeNotFoundByName(volume_type_name=name) + else: + return volume_type_ref + + +@require_admin_context +def volume_type_purge(context, name): + """ Removes specific volume_type from DB + Usually volume_type_destroy should be used + """ + session = get_session() + volume_type_ref = session.query(models.VolumeTypes).\ + filter_by(name=name) + records = volume_type_ref.delete() + if records == 0: + raise exception.VolumeTypeNotFoundByName(volume_type_name=name) + else: + return volume_type_ref + + +#################### + + +@require_context +def volume_type_extra_specs_get(context, volume_type_id): + session = get_session() + + spec_results = session.query(models.VolumeTypeExtraSpecs).\ + filter_by(volume_type_id=volume_type_id).\ + filter_by(deleted=False).\ + all() + + spec_dict = {} + for i in spec_results: + spec_dict[i['key']] = i['value'] + return spec_dict + + +@require_context +def volume_type_extra_specs_delete(context, volume_type_id, key): + session = get_session() + session.query(models.VolumeTypeExtraSpecs).\ + filter_by(volume_type_id=volume_type_id).\ + filter_by(key=key).\ + filter_by(deleted=False).\ + update({'deleted': True, + 'deleted_at': utils.utcnow(), + 'updated_at': literal_column('updated_at')}) + + +@require_context +def volume_type_extra_specs_get_item(context, volume_type_id, key, + session=None): + + if not session: + session = get_session() + + spec_result = session.query(models.VolumeTypeExtraSpecs).\ + filter_by(volume_type_id=volume_type_id).\ + filter_by(key=key).\ + filter_by(deleted=False).\ + first() + + if not spec_result: + raise exception.\ + VolumeTypeExtraSpecsNotFound(extra_specs_key=key, + volume_type_id=volume_type_id) + return spec_result + + +@require_context +def volume_type_extra_specs_update_or_create(context, volume_type_id, + specs): + session = get_session() + spec_ref = None + for key, value in specs.iteritems(): + try: + spec_ref = volume_type_extra_specs_get_item( + context, volume_type_id, key, session) + except exception.VolumeTypeExtraSpecsNotFound, e: + spec_ref = models.VolumeTypeExtraSpecs() + spec_ref.update({"key": key, "value": value, + "volume_type_id": volume_type_id, + "deleted": 0}) + spec_ref.save(session=session) + return specs diff --git a/nova/db/sqlalchemy/migrate_repo/versions/037_add_volume_types_and_extradata.py b/nova/db/sqlalchemy/migrate_repo/versions/037_add_volume_types_and_extradata.py new file mode 100644 index 000000000..1bfa26845 --- /dev/null +++ b/nova/db/sqlalchemy/migrate_repo/versions/037_add_volume_types_and_extradata.py @@ -0,0 +1,103 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright (c) 2011 Zadara Storage Inc. +# Copyright (c) 2011 OpenStack LLC. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from sqlalchemy import Column, DateTime, Integer, MetaData, String, Table +from sqlalchemy import Text, Boolean, ForeignKey + +from nova import log as logging + +meta = MetaData() + +# Just for the ForeignKey and column creation to succeed, these are not the +# actual definitions of tables . +# + +volumes = Table('volumes', meta, + Column('id', Integer(), primary_key=True, nullable=False), + ) + +volume_type_id = Column('volume_type_id', Integer(), nullable=True) + + +# New Tables +# + +volume_types = Table('volume_types', meta, + Column('created_at', DateTime(timezone=False)), + Column('updated_at', DateTime(timezone=False)), + Column('deleted_at', DateTime(timezone=False)), + Column('deleted', Boolean(create_constraint=True, name=None)), + Column('id', Integer(), primary_key=True, nullable=False), + Column('name', + String(length=255, convert_unicode=False, assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False))) + +volume_type_extra_specs_table = Table('volume_type_extra_specs', meta, + Column('created_at', DateTime(timezone=False)), + Column('updated_at', DateTime(timezone=False)), + Column('deleted_at', DateTime(timezone=False)), + Column('deleted', Boolean(create_constraint=True, name=None)), + Column('id', Integer(), primary_key=True, nullable=False), + Column('volume_type_id', + Integer(), + ForeignKey('volume_types.id'), + nullable=False), + Column('key', + String(length=255, convert_unicode=False, assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('value', + String(length=255, convert_unicode=False, assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False))) + + +new_tables = (volume_types, volume_type_extra_specs_table) + +# +# Tables to alter +# + + +def upgrade(migrate_engine): + + from nova import context + from nova import db + from nova import flags + + FLAGS = flags.FLAGS + + # Upgrade operations go here. Don't create your own engine; + # bind migrate_engine to your metadata + meta.bind = migrate_engine + + for table in new_tables: + try: + table.create() + except Exception: + logging.info(repr(table)) + logging.exception('Exception while creating table') + raise + + volumes.create_column(volume_type_id) + +def downgrade(migrate_engine): + meta.bind = migrate_engine + + volumes.drop_column(volume_type_id) + + for table in new_tables: + table.drop() diff --git a/nova/db/sqlalchemy/models.py b/nova/db/sqlalchemy/models.py index f2a4680b0..70834ddb5 100644 --- a/nova/db/sqlalchemy/models.py +++ b/nova/db/sqlalchemy/models.py @@ -312,6 +312,36 @@ class Volume(BASE, NovaBase): provider_location = Column(String(255)) provider_auth = Column(String(255)) + volume_type_id = Column(Integer) + + +class VolumeTypes(BASE, NovaBase): + """Represent possible volume_types of volumes offered""" + __tablename__ = "volume_types" + id = Column(Integer, primary_key=True) + name = Column(String(255), unique=True) + + volumes = relationship(Volume, + backref=backref('volume_type', uselist=False), + foreign_keys=id, + primaryjoin='and_(Volume.volume_type_id == ' + 'VolumeTypes.id)') + + +class VolumeTypeExtraSpecs(BASE, NovaBase): + """Represents additional specs as key/value pairs for a volume_type""" + __tablename__ = 'volume_type_extra_specs' + id = Column(Integer, primary_key=True) + key = Column(String(255)) + value = Column(String(255)) + volume_type_id = Column(Integer, ForeignKey('volume_types.id'), + nullable=False) + volume_type = relationship(VolumeTypes, backref="extra_specs", + foreign_keys=volume_type_id, + primaryjoin='and_(' + 'VolumeTypeExtraSpecs.instance_type_id == VolumeTypes.id,' + 'VolumeTypeExtraSpecs.deleted == False)') + class Quota(BASE, NovaBase): """Represents a single quota override for a project. diff --git a/nova/exception.py b/nova/exception.py index b09d50797..ff4b7c80e 100644 --- a/nova/exception.py +++ b/nova/exception.py @@ -338,6 +338,24 @@ class VolumeNotFoundForInstance(VolumeNotFound): message = _("Volume not found for instance %(instance_id)s.") +class NoVolumeTypesFound(NotFound): + message = _("Zero volume types found.") + + +class VolumeTypeNotFound(NotFound): + message = _("Volume type %(volume_type_id)s could not be found.") + + +class VolumeTypeNotFoundByName(VolumeTypeNotFound): + message = _("Volume type with name %(volume_type_name)s " + "could not be found.") + + +class VolumeTypeExtraSpecsNotFound(NotFound): + message = _("Volume Type %(volume_type_id)s has no extra specs with " + "key %(extra_specs_key)s.") + + class SnapshotNotFound(NotFound): message = _("Snapshot %(snapshot_id)s could not be found.") -- cgit From 7399805b96cefd9d0f88cec202edd9fdb2c91ec0 Mon Sep 17 00:00:00 2001 From: "vladimir.p" Date: Thu, 18 Aug 2011 13:38:11 -0700 Subject: typo --- nova/db/sqlalchemy/models.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'nova') diff --git a/nova/db/sqlalchemy/models.py b/nova/db/sqlalchemy/models.py index 70834ddb5..08ce34647 100644 --- a/nova/db/sqlalchemy/models.py +++ b/nova/db/sqlalchemy/models.py @@ -339,7 +339,7 @@ class VolumeTypeExtraSpecs(BASE, NovaBase): volume_type = relationship(VolumeTypes, backref="extra_specs", foreign_keys=volume_type_id, primaryjoin='and_(' - 'VolumeTypeExtraSpecs.instance_type_id == VolumeTypes.id,' + 'VolumeTypeExtraSpecs.volume_type_id == VolumeTypes.id,' 'VolumeTypeExtraSpecs.deleted == False)') -- cgit From b703b33cdd48c2409205504ef09cc91d287862bf Mon Sep 17 00:00:00 2001 From: "vladimir.p" Date: Thu, 18 Aug 2011 14:40:05 -0700 Subject: added unittests for volume_extra_data --- nova/db/sqlalchemy/api.py | 26 ++-- .../versions/037_add_volume_types_and_extradata.py | 9 -- nova/tests/test_volume_types_extra_specs.py | 131 +++++++++++++++++++++ 3 files changed, 144 insertions(+), 22 deletions(-) create mode 100644 nova/tests/test_volume_types_extra_specs.py (limited to 'nova') diff --git a/nova/db/sqlalchemy/api.py b/nova/db/sqlalchemy/api.py index ce1066e42..a57133b72 100644 --- a/nova/db/sqlalchemy/api.py +++ b/nova/db/sqlalchemy/api.py @@ -3500,50 +3500,50 @@ def volume_type_get_all(context, inactive=False): """ session = get_session() if inactive: - inst_types = session.query(models.VolumeTypes).\ + vol_types = session.query(models.VolumeTypes).\ options(joinedload('extra_specs')).\ order_by("name").\ all() else: - inst_types = session.query(models.VolumeTypes).\ + vol_types = session.query(models.VolumeTypes).\ options(joinedload('extra_specs')).\ filter_by(deleted=False).\ order_by("name").\ all() - inst_dict = {} - if inst_types: - for i in inst_types: - inst_dict[i['name']] = _dict_with_extra_specs(i) - return inst_dict + vol_dict = {} + if vol_types: + for i in vol_types: + vol_dict[i['name']] = _dict_with_extra_specs(i) + return vol_dict @require_context def volume_type_get(context, id): """Returns a dict describing specific volume_type""" session = get_session() - inst_type = session.query(models.VolumeTypes).\ + vol_type = session.query(models.VolumeTypes).\ options(joinedload('extra_specs')).\ filter_by(id=id).\ first() - if not inst_type: + if not vol_type: raise exception.VolumeTypeNotFound(volume_type=id) else: - return _dict_with_extra_specs(inst_type) + return _dict_with_extra_specs(vol_type) @require_context def volume_type_get_by_name(context, name): """Returns a dict describing specific volume_type""" session = get_session() - inst_type = session.query(models.VolumeTypes).\ + vol_type = session.query(models.VolumeTypes).\ options(joinedload('extra_specs')).\ filter_by(name=name).\ first() - if not inst_type: + if not vol_type: raise exception.VolumeTypeNotFoundByName(volume_type_name=name) else: - return _dict_with_extra_specs(inst_type) + return _dict_with_extra_specs(vol_type) @require_admin_context diff --git a/nova/db/sqlalchemy/migrate_repo/versions/037_add_volume_types_and_extradata.py b/nova/db/sqlalchemy/migrate_repo/versions/037_add_volume_types_and_extradata.py index 1bfa26845..ed8eeb172 100644 --- a/nova/db/sqlalchemy/migrate_repo/versions/037_add_volume_types_and_extradata.py +++ b/nova/db/sqlalchemy/migrate_repo/versions/037_add_volume_types_and_extradata.py @@ -73,15 +73,6 @@ new_tables = (volume_types, volume_type_extra_specs_table) def upgrade(migrate_engine): - - from nova import context - from nova import db - from nova import flags - - FLAGS = flags.FLAGS - - # Upgrade operations go here. Don't create your own engine; - # bind migrate_engine to your metadata meta.bind = migrate_engine for table in new_tables: diff --git a/nova/tests/test_volume_types_extra_specs.py b/nova/tests/test_volume_types_extra_specs.py new file mode 100644 index 000000000..c48ed789e --- /dev/null +++ b/nova/tests/test_volume_types_extra_specs.py @@ -0,0 +1,131 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright (c) 2011 Zadara Storage Inc. +# Copyright (c) 2011 OpenStack LLC. +# Copyright 2011 University of Southern California +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +""" +Unit Tests for volume types extra specs code +""" + +from nova import context +from nova import db +from nova import test +from nova.db.sqlalchemy.session import get_session +from nova.db.sqlalchemy import models + + +class VolumeTypeExtraSpecsTestCase(test.TestCase): + + def setUp(self): + super(VolumeTypeExtraSpecsTestCase, self).setUp() + self.context = context.get_admin_context() + self.vol_type1 = dict(name="TEST: Regular volume test") + self.vol_type1_specs = dict(vol_extra1="value1", + vol_extra2="value2", + vol_extra3=3) + self.vol_type1['extra_specs'] = self.vol_type1_specs + ref = db.api.volume_type_create(self.context, self.vol_type1) + self.volume_type1_id = ref.id + + self.vol_type2_noextra = dict(name="TEST: Volume type without extra") + ref = db.api.volume_type_create(self.context, self.vol_type2_noextra) + self.vol_type2_id = ref.id + + + def tearDown(self): + # Remove the instance type from the database + db.api.volume_type_purge(context.get_admin_context(), + self.vol_type1['name']) + db.api.volume_type_purge(context.get_admin_context(), + self.vol_type2_noextra['name']) + super(VolumeTypeExtraSpecsTestCase, self).tearDown() + + def test_volume_type_specs_get(self): + expected_specs = self.vol_type1_specs.copy() + actual_specs = db.api.volume_type_extra_specs_get( + context.get_admin_context(), + self.volume_type1_id) + self.assertEquals(expected_specs, actual_specs) + + def test_volume_type_extra_specs_delete(self): + expected_specs = self.vol_type1_specs.copy() + del expected_specs['vol_extra2'] + db.api.volume_type_extra_specs_delete(context.get_admin_context(), + self.volume_type1_id, + 'vol_extra2') + actual_specs = db.api.volume_type_extra_specs_get( + context.get_admin_context(), + self.volume_type1_id) + self.assertEquals(expected_specs, actual_specs) + + def test_volume_type_extra_specs_update(self): + expected_specs = self.vol_type1_specs.copy() + expected_specs['vol_extra3'] = 4 + db.api.volume_type_extra_specs_update_or_create( + context.get_admin_context(), + self.volume_type1_id, + dict(vol_extra3=4)) + actual_specs = db.api.volume_type_extra_specs_get( + context.get_admin_context(), + self.volume_type1_id) + self.assertEquals(expected_specs, actual_specs) + + def test_volume_type_extra_specs_create(self): + expected_specs = self.vol_type1_specs.copy() + expected_specs['vol_extra4'] = 'value4' + expected_specs['vol_extra5'] = 'value5' + db.api.volume_type_extra_specs_update_or_create( + context.get_admin_context(), + self.volume_type1_id, + dict(vol_extra4="value4", + vol_extra5=5)) + actual_specs = db.api.volume_type_extra_specs_get( + context.get_admin_context(), + self.volume_type1_id) + self.assertEquals(expected_specs, actual_specs) + + def test_volume_type_get_with_extra_specs(self): + volume_type = db.api.volume_type_get( + context.get_admin_context(), + self.volume_type1_id) + self.assertEquals(volume_type['extra_specs'], + self.vol_type1_specs) + + volume_type = db.api.volume_type_get( + context.get_admin_context(), + self.vol_type2_id) + self.assertEquals(volume_type['extra_specs'], {}) + + def test_volume_type_get_by_name_with_extra_specs(self): + volume_type = db.api.volume_type_get_by_name( + context.get_admin_context(), + self.vol_type1['name']) + self.assertEquals(volume_type['extra_specs'], + self.vol_type1_specs) + + volume_type = db.api.volume_type_get_by_name( + context.get_admin_context(), + self.vol_type2_noextra['name']) + self.assertEquals(volume_type['extra_specs'], {}) + + def test_volume_type_get_all(self): + expected_specs = self.vol_type1_specs.copy() + + types = db.api.volume_type_get_all(context.get_admin_context()) + + self.assertEquals( + types[self.vol_type1['name']]['extra_specs'], expected_specs) + + self.assertEquals( + types[self.vol_type2_noextra['name']]['extra_specs'], {}) -- cgit From ef3f02fb37d49ccf6099e012bc27b87d7859a306 Mon Sep 17 00:00:00 2001 From: "vladimir.p" Date: Thu, 18 Aug 2011 15:42:30 -0700 Subject: added volume metadata. Fixed test_volume_types_extra_specs --- nova/db/api.py | 18 +++ nova/db/sqlalchemy/api.py | 140 +++++++++++++++++++++ .../versions/037_add_volume_types_and_extradata.py | 20 ++- nova/db/sqlalchemy/models.py | 15 +++ nova/exception.py | 5 + nova/tests/test_volume_types_extra_specs.py | 6 +- 6 files changed, 201 insertions(+), 3 deletions(-) (limited to 'nova') diff --git a/nova/db/api.py b/nova/db/api.py index 47e73226a..494d27708 100644 --- a/nova/db/api.py +++ b/nova/db/api.py @@ -1429,6 +1429,24 @@ def instance_type_extra_specs_update_or_create(context, instance_type_id, ################## +def volume_metadata_get(context, volume_id): + """Get all metadata for a volume.""" + return IMPL.volume_metadata_get(context, volume_id) + + +def volume_metadata_delete(context, volume_id, key): + """Delete the given metadata item.""" + IMPL.volume_metadata_delete(context, volume_id, key) + + +def volume_metadata_update(context, volume_id, metadata, delete): + """Update metadata if it exists, otherwise create it.""" + IMPL.volume_metadata_update(context, volume_id, metadata, delete) + + +################## + + def volume_type_create(context, values): """Create a new volume type.""" return IMPL.volume_type_create(context, values) diff --git a/nova/db/sqlalchemy/api.py b/nova/db/sqlalchemy/api.py index a57133b72..143162fc6 100644 --- a/nova/db/sqlalchemy/api.py +++ b/nova/db/sqlalchemy/api.py @@ -132,6 +132,20 @@ def require_instance_exists(f): return wrapper +def require_volume_exists(f): + """Decorator to require the specified volume to exist. + + Requres the wrapped function to use context and volume_id as + their first two arguments. + """ + + def wrapper(context, volume_id, *args, **kwargs): + db.api.volume_get(context, volume_id) + return f(context, volume_id, *args, **kwargs) + wrapper.__name__ = f.__name__ + return wrapper + + ################### @@ -2083,6 +2097,8 @@ def volume_attached(context, volume_id, instance_id, mountpoint): @require_context def volume_create(context, values): + values['metadata'] = _metadata_refs(values.get('metadata')) + volume_ref = models.Volume() volume_ref.update(values) @@ -2119,6 +2135,11 @@ def volume_destroy(context, volume_id): session.query(models.IscsiTarget).\ filter_by(volume_id=volume_id).\ update({'volume_id': None}) + session.query(models.VolumeMetadata).\ + filter_by(volume_id=volume_id).\ + update({'deleted': True, + 'deleted_at': utils.utcnow(), + 'updated_at': literal_column('updated_at')}) @require_admin_context @@ -2142,12 +2163,16 @@ def volume_get(context, volume_id, session=None): if is_admin_context(context): result = session.query(models.Volume).\ options(joinedload('instance')).\ + options(joinedload('metadata')).\ + options(joinedload('volume_type')).\ filter_by(id=volume_id).\ filter_by(deleted=can_read_deleted(context)).\ first() elif is_user_context(context): result = session.query(models.Volume).\ options(joinedload('instance')).\ + options(joinedload('metadata')).\ + options(joinedload('volume_type')).\ filter_by(project_id=context.project_id).\ filter_by(id=volume_id).\ filter_by(deleted=False).\ @@ -2163,6 +2188,8 @@ def volume_get_all(context): session = get_session() return session.query(models.Volume).\ options(joinedload('instance')).\ + options(joinedload('metadata')).\ + options(joinedload('volume_type')).\ filter_by(deleted=can_read_deleted(context)).\ all() @@ -2172,6 +2199,8 @@ def volume_get_all_by_host(context, host): session = get_session() return session.query(models.Volume).\ options(joinedload('instance')).\ + options(joinedload('metadata')).\ + options(joinedload('volume_type')).\ filter_by(host=host).\ filter_by(deleted=can_read_deleted(context)).\ all() @@ -2181,6 +2210,8 @@ def volume_get_all_by_host(context, host): def volume_get_all_by_instance(context, instance_id): session = get_session() result = session.query(models.Volume).\ + options(joinedload('metadata')).\ + options(joinedload('volume_type')).\ filter_by(instance_id=instance_id).\ filter_by(deleted=False).\ all() @@ -2196,6 +2227,8 @@ def volume_get_all_by_project(context, project_id): session = get_session() return session.query(models.Volume).\ options(joinedload('instance')).\ + options(joinedload('metadata')).\ + options(joinedload('volume_type')).\ filter_by(project_id=project_id).\ filter_by(deleted=can_read_deleted(context)).\ all() @@ -2208,6 +2241,8 @@ def volume_get_instance(context, volume_id): filter_by(id=volume_id).\ filter_by(deleted=can_read_deleted(context)).\ options(joinedload('instance')).\ + options(joinedload('metadata')).\ + options(joinedload('volume_type')).\ first() if not result: raise exception.VolumeNotFound(volume_id=volume_id) @@ -2242,12 +2277,117 @@ def volume_get_iscsi_target_num(context, volume_id): @require_context def volume_update(context, volume_id, values): session = get_session() + metadata = values.get('metadata') + if metadata is not None: + volume_metadata_update(context, + volume_id, + values.pop('metadata'), + delete=True) with session.begin(): volume_ref = volume_get(context, volume_id, session=session) volume_ref.update(values) volume_ref.save(session=session) + +#################### + + +@require_context +@require_volume_exists +def volume_metadata_get(context, volume_id): + session = get_session() + + meta_results = session.query(models.VolumeMetadata).\ + filter_by(volume_id=volume_id).\ + filter_by(deleted=False).\ + all() + + meta_dict = {} + for i in meta_results: + meta_dict[i['key']] = i['value'] + return meta_dict + + +@require_context +@require_volume_exists +def volume_metadata_delete(context, volume_id, key): + session = get_session() + session.query(models.VolumeMetadata).\ + filter_by(volume_id=volume_id).\ + filter_by(key=key).\ + filter_by(deleted=False).\ + update({'deleted': True, + 'deleted_at': utils.utcnow(), + 'updated_at': literal_column('updated_at')}) + + +@require_context +@require_volume_exists +def volume_metadata_delete_all(context, volume_id): + session = get_session() + session.query(models.VolumeMetadata).\ + filter_by(volume_id=volume_id).\ + filter_by(deleted=False).\ + update({'deleted': True, + 'deleted_at': utils.utcnow(), + 'updated_at': literal_column('updated_at')}) + + +@require_context +@require_volume_exists +def volume_metadata_get_item(context, volume_id, key, session=None): + if not session: + session = get_session() + + meta_result = session.query(models.VolumeMetadata).\ + filter_by(volume_id=volume_id).\ + filter_by(key=key).\ + filter_by(deleted=False).\ + first() + + if not meta_result: + raise exception.VolumeMetadataNotFound(metadata_key=key, + volume_id=volume_id) + return meta_result + + +@require_context +@require_volume_exists +def volume_metadata_update(context, volume_id, metadata, delete): + session = get_session() + + # Set existing metadata to deleted if delete argument is True + if delete: + original_metadata = volume_metadata_get(context, volume_id) + for meta_key, meta_value in original_metadata.iteritems(): + if meta_key not in metadata: + meta_ref = volume_metadata_get_item(context, volume_id, + meta_key, session) + meta_ref.update({'deleted': True}) + meta_ref.save(session=session) + + meta_ref = None + + # Now update all existing items with new values, or create new meta objects + for meta_key, meta_value in metadata.iteritems(): + + # update the value whether it exists or not + item = {"value": meta_value} + + try: + meta_ref = volume_metadata_get_item(context, volume_id, + meta_key, session) + except exception.VolumeMetadataNotFound, e: + meta_ref = models.VolumeMetadata() + item.update({"key": meta_key, "volume_id": volume_id}) + + meta_ref.update(item) + meta_ref.save(session=session) + + return metadata + + ################### diff --git a/nova/db/sqlalchemy/migrate_repo/versions/037_add_volume_types_and_extradata.py b/nova/db/sqlalchemy/migrate_repo/versions/037_add_volume_types_and_extradata.py index ed8eeb172..fc365d2b2 100644 --- a/nova/db/sqlalchemy/migrate_repo/versions/037_add_volume_types_and_extradata.py +++ b/nova/db/sqlalchemy/migrate_repo/versions/037_add_volume_types_and_extradata.py @@ -65,7 +65,25 @@ volume_type_extra_specs_table = Table('volume_type_extra_specs', meta, unicode_error=None, _warn_on_bytestring=False))) -new_tables = (volume_types, volume_type_extra_specs_table) +volume_metadata_table = Table('volume_metadata', meta, + Column('created_at', DateTime(timezone=False)), + Column('updated_at', DateTime(timezone=False)), + Column('deleted_at', DateTime(timezone=False)), + Column('deleted', Boolean(create_constraint=True, name=None)), + Column('id', Integer(), primary_key=True, nullable=False), + Column('volume_id', + Integer(), + ForeignKey('volumes.id'), + nullable=False), + Column('key', + String(length=255, convert_unicode=False, assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('value', + String(length=255, convert_unicode=False, assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False))) + + +new_tables = (volume_types, volume_type_extra_specs_table, volume_metadata_table) # # Tables to alter diff --git a/nova/db/sqlalchemy/models.py b/nova/db/sqlalchemy/models.py index 08ce34647..99e6f412e 100644 --- a/nova/db/sqlalchemy/models.py +++ b/nova/db/sqlalchemy/models.py @@ -315,6 +315,20 @@ class Volume(BASE, NovaBase): volume_type_id = Column(Integer) +class VolumeMetadata(BASE, NovaBase): + """Represents a metadata key/value pair for a volume""" + __tablename__ = 'volume_metadata' + id = Column(Integer, primary_key=True) + key = Column(String(255)) + value = Column(String(255)) + volume_id = Column(Integer, ForeignKey('volumes.id'), nullable=False) + volume = relationship(Volume, backref="metadata", + foreign_keys=volume_id, + primaryjoin='and_(' + 'VolumeMetadata.volume_id == Volume.id,' + 'VolumeMetadata.deleted == False)') + + class VolumeTypes(BASE, NovaBase): """Represent possible volume_types of volumes offered""" __tablename__ = "volume_types" @@ -824,6 +838,7 @@ def register_models(): Network, SecurityGroup, SecurityGroupIngressRule, SecurityGroupInstanceAssociation, AuthToken, User, Project, Certificate, ConsolePool, Console, Zone, + VolumeMetadata, VolumeTypes, VolumeTypeExtraSpecs, AgentBuild, InstanceMetadata, InstanceTypeExtraSpecs, Migration) engine = create_engine(FLAGS.sql_connection, echo=False) for model in models: diff --git a/nova/exception.py b/nova/exception.py index ff4b7c80e..1b118f6f9 100644 --- a/nova/exception.py +++ b/nova/exception.py @@ -338,6 +338,11 @@ class VolumeNotFoundForInstance(VolumeNotFound): message = _("Volume not found for instance %(instance_id)s.") +class VolumeMetadataNotFound(NotFound): + message = _("Volume %(volume_id)s has no metadata with " + "key %(metadata_key)s.") + + class NoVolumeTypesFound(NotFound): message = _("Zero volume types found.") diff --git a/nova/tests/test_volume_types_extra_specs.py b/nova/tests/test_volume_types_extra_specs.py index c48ed789e..8d2aa2df3 100644 --- a/nova/tests/test_volume_types_extra_specs.py +++ b/nova/tests/test_volume_types_extra_specs.py @@ -37,6 +37,8 @@ class VolumeTypeExtraSpecsTestCase(test.TestCase): self.vol_type1['extra_specs'] = self.vol_type1_specs ref = db.api.volume_type_create(self.context, self.vol_type1) self.volume_type1_id = ref.id + for k, v in self.vol_type1_specs.iteritems(): + self.vol_type1_specs[k] = str(v) self.vol_type2_noextra = dict(name="TEST: Volume type without extra") ref = db.api.volume_type_create(self.context, self.vol_type2_noextra) @@ -71,7 +73,7 @@ class VolumeTypeExtraSpecsTestCase(test.TestCase): def test_volume_type_extra_specs_update(self): expected_specs = self.vol_type1_specs.copy() - expected_specs['vol_extra3'] = 4 + expected_specs['vol_extra3'] = "4" db.api.volume_type_extra_specs_update_or_create( context.get_admin_context(), self.volume_type1_id, @@ -89,7 +91,7 @@ class VolumeTypeExtraSpecsTestCase(test.TestCase): context.get_admin_context(), self.volume_type1_id, dict(vol_extra4="value4", - vol_extra5=5)) + vol_extra5="value5")) actual_specs = db.api.volume_type_extra_specs_get( context.get_admin_context(), self.volume_type1_id) -- cgit From 83d4c5b9b1f7ed9b75ae04464423b7ca4b5d627d Mon Sep 17 00:00:00 2001 From: Christopher MacGown Date: Fri, 19 Aug 2011 08:08:23 -0700 Subject: Fix config_drive migration, per Matt Dietz. --- .../versions/037_add_config_drive_to_instances.py | 11 +++-------- 1 file changed, 3 insertions(+), 8 deletions(-) (limited to 'nova') diff --git a/nova/db/sqlalchemy/migrate_repo/versions/037_add_config_drive_to_instances.py b/nova/db/sqlalchemy/migrate_repo/versions/037_add_config_drive_to_instances.py index 65ea012dd..36a6af16f 100644 --- a/nova/db/sqlalchemy/migrate_repo/versions/037_add_config_drive_to_instances.py +++ b/nova/db/sqlalchemy/migrate_repo/versions/037_add_config_drive_to_instances.py @@ -23,20 +23,15 @@ meta = MetaData() instances = Table("instances", meta, Column("id", Integer(), primary_key=True, nullable=False)) -config_drive_column = Column("config_drive", String(255)) # matches image_ref + +# matches the size of an image_ref +config_drive_column = Column("config_drive", String(255), nullable=True) def upgrade(migrate_engine): meta.bind = migrate_engine instances.create_column(config_drive_column) - rows = migrate_engine.execute(instances.select()) - for row in rows: - instance_config_drive = None # pre-existing instances don't have one. - migrate_engine.execute(instances.update()\ - .where(instances.c.id == row[0])\ - .values(config_drive=instance_config_drive)) - def downgrade(migrate_engine): meta.bind = migrate_engine -- cgit From 276403dcb6a8c7802c456b88f8dad249b7513e64 Mon Sep 17 00:00:00 2001 From: Christopher MacGown Date: Fri, 19 Aug 2011 08:16:17 -0700 Subject: Define FLAGS.default_local_format. By default it's None, to match current expected _create_local --- nova/virt/libvirt/connection.py | 4 ++++ 1 file changed, 4 insertions(+) (limited to 'nova') diff --git a/nova/virt/libvirt/connection.py b/nova/virt/libvirt/connection.py index 1b48fd795..a715da66e 100644 --- a/nova/virt/libvirt/connection.py +++ b/nova/virt/libvirt/connection.py @@ -126,6 +126,10 @@ flags.DEFINE_string('libvirt_vif_type', 'bridge', flags.DEFINE_string('libvirt_vif_driver', 'nova.virt.libvirt.vif.LibvirtBridgeDriver', 'The libvirt VIF driver to configure the VIFs.') +flags.DEFINE_string('default_local_format', + None, + 'The default format a local_volume will be formatted with ' + 'on creation.') def get_connection(read_only): -- cgit From c4fc9f0737ec9f8d5c950b850fed9930a68164f4 Mon Sep 17 00:00:00 2001 From: Christopher MacGown Date: Fri, 19 Aug 2011 08:44:14 -0700 Subject: Add copyright notices --- nova/api/openstack/create_instance_helper.py | 1 + nova/api/openstack/views/servers.py | 1 + nova/compute/api.py | 1 + .../migrate_repo/versions/037_add_config_drive_to_instances.py | 4 ++-- nova/db/sqlalchemy/models.py | 1 + nova/scheduler/simple.py | 1 - nova/tests/api/openstack/test_servers.py | 1 + nova/tests/test_compute.py | 1 + nova/virt/libvirt/connection.py | 1 + nova/virt/xenapi/vm_utils.py | 1 + 10 files changed, 10 insertions(+), 3 deletions(-) (limited to 'nova') diff --git a/nova/api/openstack/create_instance_helper.py b/nova/api/openstack/create_instance_helper.py index d776ae92d..563ef1c42 100644 --- a/nova/api/openstack/create_instance_helper.py +++ b/nova/api/openstack/create_instance_helper.py @@ -1,4 +1,5 @@ # Copyright 2011 OpenStack LLC. +# Copyright 2011 Piston Cloud Computing, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may diff --git a/nova/api/openstack/views/servers.py b/nova/api/openstack/views/servers.py index c7bc03bcb..19acb0899 100644 --- a/nova/api/openstack/views/servers.py +++ b/nova/api/openstack/views/servers.py @@ -1,6 +1,7 @@ # vim: tabstop=4 shiftwidth=4 softtabstop=4 # Copyright 2010-2011 OpenStack LLC. +# Copyright 2011 Piston Cloud Computing, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may diff --git a/nova/compute/api.py b/nova/compute/api.py index e42a5bbad..ccf29bd1a 100644 --- a/nova/compute/api.py +++ b/nova/compute/api.py @@ -2,6 +2,7 @@ # Copyright 2010 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. +# Copyright 2011 Piston Cloud Computing, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may diff --git a/nova/db/sqlalchemy/migrate_repo/versions/037_add_config_drive_to_instances.py b/nova/db/sqlalchemy/migrate_repo/versions/037_add_config_drive_to_instances.py index 36a6af16f..d3058f00d 100644 --- a/nova/db/sqlalchemy/migrate_repo/versions/037_add_config_drive_to_instances.py +++ b/nova/db/sqlalchemy/migrate_repo/versions/037_add_config_drive_to_instances.py @@ -1,6 +1,6 @@ # vim: tabstop=4 shiftwidth=4 softtabstop=4 - -# Copyright 2011 OpenStack LLC. +# +# Copyright 2011 Piston Cloud Computing, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain diff --git a/nova/db/sqlalchemy/models.py b/nova/db/sqlalchemy/models.py index 8a6e2f673..c454cfcc3 100644 --- a/nova/db/sqlalchemy/models.py +++ b/nova/db/sqlalchemy/models.py @@ -2,6 +2,7 @@ # Copyright 2010 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. +# Copyright 2011 Piston Cloud Computing, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may diff --git a/nova/scheduler/simple.py b/nova/scheduler/simple.py index 61c76b35d..fc1b3142a 100644 --- a/nova/scheduler/simple.py +++ b/nova/scheduler/simple.py @@ -41,7 +41,6 @@ class SimpleScheduler(chance.ChanceScheduler): def _schedule_instance(self, context, instance_id, *_args, **_kwargs): """Picks a host that is up and has the fewest running instances.""" - instance_ref = db.instance_get(context, instance_id) if (instance_ref['availability_zone'] and ':' in instance_ref['availability_zone'] diff --git a/nova/tests/api/openstack/test_servers.py b/nova/tests/api/openstack/test_servers.py index 7d8b222cc..0a46c3fd1 100644 --- a/nova/tests/api/openstack/test_servers.py +++ b/nova/tests/api/openstack/test_servers.py @@ -1,6 +1,7 @@ # vim: tabstop=4 shiftwidth=4 softtabstop=4 # Copyright 2010-2011 OpenStack LLC. +# Copyright 2011 Piston Cloud Computing, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may diff --git a/nova/tests/test_compute.py b/nova/tests/test_compute.py index 227b42fd7..8d1b95f74 100644 --- a/nova/tests/test_compute.py +++ b/nova/tests/test_compute.py @@ -2,6 +2,7 @@ # Copyright 2010 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. +# Copyright 2011 Piston Cloud Computing, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may diff --git a/nova/virt/libvirt/connection.py b/nova/virt/libvirt/connection.py index a715da66e..ea47fa99d 100644 --- a/nova/virt/libvirt/connection.py +++ b/nova/virt/libvirt/connection.py @@ -4,6 +4,7 @@ # Administrator of the National Aeronautics and Space Administration. # All Rights Reserved. # Copyright (c) 2010 Citrix Systems, Inc. +# Copyright (c) 2011 Piston Cloud Computing, Inc # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain diff --git a/nova/virt/xenapi/vm_utils.py b/nova/virt/xenapi/vm_utils.py index 206f522c9..3861f6bd8 100644 --- a/nova/virt/xenapi/vm_utils.py +++ b/nova/virt/xenapi/vm_utils.py @@ -1,6 +1,7 @@ # vim: tabstop=4 shiftwidth=4 softtabstop=4 # Copyright (c) 2010 Citrix Systems, Inc. +# Copyright 2011 Piston Cloud Computing, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain -- cgit From b3d3f735a7402c49b8db0e19c9b1f8521187378b Mon Sep 17 00:00:00 2001 From: Hisaharu Ishii Date: Fri, 19 Aug 2011 09:32:39 -0700 Subject: Fix unit test for the change of 'nova-manage network list' format --- nova/tests/test_nova_manage.py | 42 +++++++++++++++++++++++------------------- 1 file changed, 23 insertions(+), 19 deletions(-) (limited to 'nova') diff --git a/nova/tests/test_nova_manage.py b/nova/tests/test_nova_manage.py index 69e73367f..e4914e0b1 100644 --- a/nova/tests/test_nova_manage.py +++ b/nova/tests/test_nova_manage.py @@ -103,7 +103,7 @@ class NetworkCommandsTestCase(test.TestCase): self.commands.create( label = 'Test', fixed_range_v4 = '10.2.0.0/24', - fixed_range_v6 = 'fd00:2::/64', + fixed_range_v6 = 'fd00:2::/120', num_networks = 1, network_size = 256, vlan_start = 200, @@ -113,34 +113,38 @@ class NetworkCommandsTestCase(test.TestCase): self.assertEqual(net['label'], 'Test') self.assertEqual(net['cidr'], '10.2.0.0/24') self.assertEqual(net['netmask'], '255.255.255.0') - self.assertEqual(net['cidr_v6'], 'fd00:2::/64') + self.assertEqual(net['cidr_v6'], 'fd00:2::/120') self.assertEqual(net['bridge_interface'], 'eth0') self.assertEqual(net['vlan'], 200) def test_list(self): - format = "%-18s\t%-15s\t%-15s\t%-15s\t%-15s\t%-15s\t%-15s" - head = format % ( - _('IPv4'), - _('IPv6'), - _('start address'), - _('DNS1'), - _('DNS2'), - _('VlanID'), - _('project')) - body = format % ( - '10.2.0.0/24', - 'fd00:2::/64', - '10.2.0.3', - 'None', - 'None', - '200', - 'None',) self.test_create() + net = db.network_get_by_cidr(self.context, '10.2.0.0/24') output = StringIO.StringIO() sys.stdout = output self.commands.list() sys.stdout = sys.__stdout__ result = output.getvalue() + _fmt = "%-5s\t%-18s\t%-15s\t%-15s\t%-15s\t%-15s\t%-15s\t%-15s\t%-15s" + head = _fmt % (_('id'), + _('IPv4'), + _('IPv6'), + _('start address'), + _('DNS1'), + _('DNS2'), + _('VlanID'), + _('project'), + _("uuid")) + body = _fmt % ( + net['id'], + '10.2.0.0/24', + 'fd00:2::/120', + '10.2.0.3', + 'None', + 'None', + '200', + 'None', + net['uuid'],) answer = '%s\n%s\n' % (head, body) self.assertEqual(result, answer) -- cgit From c06bbe99734f2fea35cfb4bdd854814c9119b617 Mon Sep 17 00:00:00 2001 From: Nachi Ueno Date: Fri, 19 Aug 2011 12:30:55 -0700 Subject: Added monkey patching notification code function w --- nova/flags.py | 8 ++++++++ nova/notifier/api.py | 18 ++++++++++++++++++ nova/utils.py | 24 +++++++++++++++++++++++- 3 files changed, 49 insertions(+), 1 deletion(-) (limited to 'nova') diff --git a/nova/flags.py b/nova/flags.py index 48d5e8168..09c30c7c0 100644 --- a/nova/flags.py +++ b/nova/flags.py @@ -402,3 +402,11 @@ DEFINE_bool('resume_guests_state_on_host_boot', False, DEFINE_string('root_helper', 'sudo', 'Command prefix to use for running commands as root') + +DEFINE_bool('monkey_patch', False, + 'Whether to monkey patch') + +DEFINE_list('monkey_patch_modules', + ['nova.api.ec2.cloud:nova.notifier.api.notify_decorator', + 'nova.compute.api:nova.notifier.api.notify_decorator'], + 'Module list representng monkey patched module and decorator') diff --git a/nova/notifier/api.py b/nova/notifier/api.py index e18f3e280..ca0ff60a4 100644 --- a/nova/notifier/api.py +++ b/nova/notifier/api.py @@ -39,6 +39,24 @@ class BadPriorityException(Exception): pass +def notify_decorator(name, fn): + def wrapped_func(*args, **kwarg): + body = {} + body['args'] = [] + body['kwarg'] = {} + for arg in args: + body['args'].append(arg) + for key in kwarg: + body['kwarg'][key] = kwarg[key] + LOG.debug("Notify Decorator: %s %r" % (name, body)) + notify(FLAGS.host, + name, + DEBUG, + body) + fn(*args, **kwarg) + return wrapped_func + + def publisher_id(service, host=None): if not host: host = FLAGS.host diff --git a/nova/utils.py b/nova/utils.py index 7276b6bd5..8024a3517 100644 --- a/nova/utils.py +++ b/nova/utils.py @@ -35,6 +35,7 @@ import sys import time import types import uuid +import pyclbr from xml.sax import saxutils from eventlet import event @@ -634,7 +635,7 @@ def synchronized(name, external=False): Different methods can share the same lock: @synchronized('mylock') - def foo(self, *args): + Gdef foo(self, *args): ... @synchronized('mylock') @@ -873,3 +874,24 @@ class Bootstrapper(object): for key in FLAGS: value = FLAGS.get(key, None) logging.audit(_("%(key)s : %(value)s" % locals())) + + +def monkey_patch(): + if not FLAGS.monkey_patch: + return + for module_and_decorator in FLAGS.monkey_patch_modules: + module, decorator_name = module_and_decorator.split(':') + decorator = import_class(decorator_name) + __import__(module) + module_data = pyclbr.readmodule_ex(module) + for key in module_data.keys(): + if isinstance(module_data[key], pyclbr.Class): + clz = import_class("%s.%s" % (module, key)) + for method, func in inspect.getmembers(clz, inspect.ismethod): + setattr(clz, method,\ + decorator("%s.%s" % (module, key), func)) + if isinstance(module_data[key], pyclbr.Function): + func = import_class("%s.%s" % (module, key)) + setattr(sys.modules[module], key,\ + setattr(sys.modules[module], key, \ + decorator("%s.%s" % (module, key), func))) -- cgit From 75eb485c3a0e53380b9247d45e2a66159928dcd2 Mon Sep 17 00:00:00 2001 From: Nachi Ueno Date: Fri, 19 Aug 2011 14:04:58 -0700 Subject: Fixed NoneType returned bugw --- nova/notifier/api.py | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) (limited to 'nova') diff --git a/nova/notifier/api.py b/nova/notifier/api.py index ca0ff60a4..99b8b0102 100644 --- a/nova/notifier/api.py +++ b/nova/notifier/api.py @@ -48,12 +48,11 @@ def notify_decorator(name, fn): body['args'].append(arg) for key in kwarg: body['kwarg'][key] = kwarg[key] - LOG.debug("Notify Decorator: %s %r" % (name, body)) notify(FLAGS.host, name, DEBUG, body) - fn(*args, **kwarg) + return fn(*args, **kwarg) return wrapped_func -- cgit From 27723b95f7b3a64226ebe431e17cbf681b40303b Mon Sep 17 00:00:00 2001 From: Nachi Ueno Date: Fri, 19 Aug 2011 14:13:39 -0700 Subject: Fixed typo --- nova/flags.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'nova') diff --git a/nova/flags.py b/nova/flags.py index 09c30c7c0..4e6e3ad40 100644 --- a/nova/flags.py +++ b/nova/flags.py @@ -409,4 +409,4 @@ DEFINE_bool('monkey_patch', False, DEFINE_list('monkey_patch_modules', ['nova.api.ec2.cloud:nova.notifier.api.notify_decorator', 'nova.compute.api:nova.notifier.api.notify_decorator'], - 'Module list representng monkey patched module and decorator') + 'Module list representing monkey patched module and decorator') -- cgit From e10aa40bd6c2f96b2f5bba8b38b9605f019328e9 Mon Sep 17 00:00:00 2001 From: Nachi Ueno Date: Fri, 19 Aug 2011 14:22:53 -0700 Subject: Fixed typo --- nova/utils.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'nova') diff --git a/nova/utils.py b/nova/utils.py index 171035068..d7e14b1b0 100644 --- a/nova/utils.py +++ b/nova/utils.py @@ -636,7 +636,7 @@ def synchronized(name, external=False): Different methods can share the same lock: @synchronized('mylock') - Gdef foo(self, *args): + def foo(self, *args): ... @synchronized('mylock') -- cgit From 37508da788c5b2c2eadb36ef61d58836d93a3365 Mon Sep 17 00:00:00 2001 From: Ken Pepple Date: Sat, 20 Aug 2011 12:41:38 -0700 Subject: improve test coverage for instance types / flavors --- nova/tests/test_instance_types.py | 66 ++++++++++++++++++++++++++++++++++++++- 1 file changed, 65 insertions(+), 1 deletion(-) (limited to 'nova') diff --git a/nova/tests/test_instance_types.py b/nova/tests/test_instance_types.py index ef271518c..989c6f32f 100644 --- a/nova/tests/test_instance_types.py +++ b/nova/tests/test_instance_types.py @@ -47,6 +47,29 @@ class InstanceTypeTestCase(test.TestCase): self.id = max_id["id"] + 1 self.name = str(int(time.time())) + def _nonexistant_flavor_name(self): + """return an instance type name not in the DB""" + nonexistant_flavor = "sdfsfsdf" + flavors = instance_types.get_all_types() + while flavors.has_key(nonexistant_flavor): + nonexistant_flavor = nonexistant_flavor.join("z") + else: + return nonexistant_flavor + + def _nonexistant_flavor_id(self): + """return an instance type ID not in the DB""" + nonexistant_flavor = 2700 + flavor_ids = [ value["id"] for key, value in\ + instance_types.get_all_types().iteritems() ] + while nonexistant_flavor in flavor_ids: + nonexistant_flavor += 1 + else: + return nonexistant_flavor + + def _existing_flavor(self): + """return first instance type name""" + return instance_types.get_all_types().keys()[0] + def test_instance_type_create_then_delete(self): """Ensure instance types can be created""" starting_inst_list = instance_types.get_all_types() @@ -87,7 +110,8 @@ class InstanceTypeTestCase(test.TestCase): def test_non_existant_inst_type_shouldnt_delete(self): """Ensures that instance type creation fails with invalid args""" self.assertRaises(exception.ApiError, - instance_types.destroy, "sfsfsdfdfs") + instance_types.destroy, + self._nonexistant_flavor_name()) def test_repeated_inst_types_should_raise_api_error(self): """Ensures that instance duplicates raises ApiError""" @@ -97,3 +121,43 @@ class InstanceTypeTestCase(test.TestCase): self.assertRaises( exception.ApiError, instance_types.create, new_name, 256, 1, 120, self.flavorid) + + def test_will_not_destroy_with_no_name(self): + """Ensure destroy sad path of no name raises error""" + self.assertRaises(exception.ApiError, + instance_types.destroy, + self._nonexistant_flavor_name()) + + def test_will_not_purge_without_name(self): + """Ensure purge without a name raises error""" + self.assertRaises(exception.InvalidInstanceType, + instance_types.purge, None) + + def test_will_not_purge_with_wrong_name(self): + """Ensure purge without correct name raises error""" + self.assertRaises(exception.ApiError, + instance_types.purge, + self._nonexistant_flavor_name()) + + def test_will_not_get_bad_default_instance_type(self): + """ensures error raised on bad default instance type""" + FLAGS.default_instance_type = self._nonexistant_flavor_name() + self.assertRaises(exception.InstanceTypeNotFoundByName, + instance_types.get_default_instance_type) + + def test_will_not_get_instance_type_by_name_with_no_name(self): + """Ensure get by name returns default flavor with no name""" + self.assertEqual(instance_types.get_default_instance_type(), + instance_types.get_instance_type_by_name(None)) + + def test_will_not_get_instance_type_with_bad_name(self): + """Ensure get by name returns default flavor with bad name""" + self.assertRaises(exception.InstanceTypeNotFound, + instance_types.get_instance_type, + self._nonexistant_flavor_name()) + + def test_will_not_get_flavor_by_bad_flavor_id(self): + """Ensure get by flavor raises error with wrong flavorid""" + self.assertRaises(exception.InstanceTypeNotFound, + instance_types.get_instance_type_by_name, + self._nonexistant_flavor_id()) -- cgit From 65b30ad73338fa481d1ab9155153b8265fbe8f90 Mon Sep 17 00:00:00 2001 From: Ken Pepple Date: Sat, 20 Aug 2011 12:43:50 -0700 Subject: pep8 --- nova/tests/test_instance_types.py | 20 ++++++++++---------- 1 file changed, 10 insertions(+), 10 deletions(-) (limited to 'nova') diff --git a/nova/tests/test_instance_types.py b/nova/tests/test_instance_types.py index 989c6f32f..556ba91a9 100644 --- a/nova/tests/test_instance_types.py +++ b/nova/tests/test_instance_types.py @@ -51,20 +51,20 @@ class InstanceTypeTestCase(test.TestCase): """return an instance type name not in the DB""" nonexistant_flavor = "sdfsfsdf" flavors = instance_types.get_all_types() - while flavors.has_key(nonexistant_flavor): - nonexistant_flavor = nonexistant_flavor.join("z") + while nonexistant_flavor in flavors: + nonexistant_flavor = nonexistant_flavor.join("z") else: - return nonexistant_flavor + return nonexistant_flavor def _nonexistant_flavor_id(self): """return an instance type ID not in the DB""" nonexistant_flavor = 2700 - flavor_ids = [ value["id"] for key, value in\ - instance_types.get_all_types().iteritems() ] + flavor_ids = [value["id"] for key, value in\ + instance_types.get_all_types().iteritems()] while nonexistant_flavor in flavor_ids: - nonexistant_flavor += 1 + nonexistant_flavor += 1 else: - return nonexistant_flavor + return nonexistant_flavor def _existing_flavor(self): """return first instance type name""" @@ -127,12 +127,12 @@ class InstanceTypeTestCase(test.TestCase): self.assertRaises(exception.ApiError, instance_types.destroy, self._nonexistant_flavor_name()) - + def test_will_not_purge_without_name(self): """Ensure purge without a name raises error""" self.assertRaises(exception.InvalidInstanceType, instance_types.purge, None) - + def test_will_not_purge_with_wrong_name(self): """Ensure purge without correct name raises error""" self.assertRaises(exception.ApiError, @@ -149,7 +149,7 @@ class InstanceTypeTestCase(test.TestCase): """Ensure get by name returns default flavor with no name""" self.assertEqual(instance_types.get_default_instance_type(), instance_types.get_instance_type_by_name(None)) - + def test_will_not_get_instance_type_with_bad_name(self): """Ensure get by name returns default flavor with bad name""" self.assertRaises(exception.InstanceTypeNotFound, -- cgit From f4cd3a72fa2a3630ccab2c0224777c3eff05380e Mon Sep 17 00:00:00 2001 From: Ken Pepple Date: Sat, 20 Aug 2011 14:55:41 -0700 Subject: added rainy day test for ipv6 tests. fixed ipv6.to_global to trap correct exception. --- nova/ipv6/rfc2462.py | 2 +- nova/tests/test_ipv6.py | 6 ++++++ 2 files changed, 7 insertions(+), 1 deletion(-) (limited to 'nova') diff --git a/nova/ipv6/rfc2462.py b/nova/ipv6/rfc2462.py index 0074efe98..351df742e 100644 --- a/nova/ipv6/rfc2462.py +++ b/nova/ipv6/rfc2462.py @@ -30,7 +30,7 @@ def to_global(prefix, mac, project_id): maskIP = netaddr.IPNetwork(prefix).ip return (mac64_addr ^ netaddr.IPAddress('::0200:0:0:0') | maskIP).\ format() - except TypeError: + except netaddr.AddrFormatError: raise TypeError(_('Bad mac for to_global_ipv6: %s') % mac) diff --git a/nova/tests/test_ipv6.py b/nova/tests/test_ipv6.py index d123df6f1..12d64f776 100644 --- a/nova/tests/test_ipv6.py +++ b/nova/tests/test_ipv6.py @@ -23,6 +23,7 @@ from nova import test LOG = logging.getLogger('nova.tests.test_ipv6') import sys +import netaddr class IPv6RFC2462TestCase(test.TestCase): @@ -40,6 +41,11 @@ class IPv6RFC2462TestCase(test.TestCase): mac = ipv6.to_mac('2001:db8::216:3eff:fe33:4455') self.assertEquals(mac, '00:16:3e:33:44:55') + def test_to_global_with_bad_mac(self): + bad_mac = '02:16:3e:33:44:5Z' + self.assertRaises(TypeError, ipv6.to_global, + '2001:db8::', bad_mac, 'test') + class IPv6AccountIdentiferTestCase(test.TestCase): """Unit tests for IPv6 account_identifier backend operations.""" -- cgit From bad921b5efa7b11a91d1df32b3d17fdb95852589 Mon Sep 17 00:00:00 2001 From: Ken Pepple Date: Sat, 20 Aug 2011 15:07:37 -0700 Subject: removed leftover netaddr import --- nova/tests/test_ipv6.py | 1 - 1 file changed, 1 deletion(-) (limited to 'nova') diff --git a/nova/tests/test_ipv6.py b/nova/tests/test_ipv6.py index 12d64f776..891d52358 100644 --- a/nova/tests/test_ipv6.py +++ b/nova/tests/test_ipv6.py @@ -23,7 +23,6 @@ from nova import test LOG = logging.getLogger('nova.tests.test_ipv6') import sys -import netaddr class IPv6RFC2462TestCase(test.TestCase): -- cgit From 43e2ca531f0fdea5173b7f237627fc3543caf13b Mon Sep 17 00:00:00 2001 From: Ken Pepple Date: Sat, 20 Aug 2011 15:30:59 -0700 Subject: lp:828610 --- nova/ipv6/account_identifier.py | 2 +- nova/tests/test_ipv6.py | 5 +++++ 2 files changed, 6 insertions(+), 1 deletion(-) (limited to 'nova') diff --git a/nova/ipv6/account_identifier.py b/nova/ipv6/account_identifier.py index 258678f0a..02b653925 100644 --- a/nova/ipv6/account_identifier.py +++ b/nova/ipv6/account_identifier.py @@ -34,7 +34,7 @@ def to_global(prefix, mac, project_id): mac_addr = netaddr.IPAddress(int_addr) maskIP = netaddr.IPNetwork(prefix).ip return (project_hash ^ static_num ^ mac_addr | maskIP).format() - except TypeError: + except netaddr.AddrFormatError: raise TypeError(_('Bad mac for to_global_ipv6: %s') % mac) diff --git a/nova/tests/test_ipv6.py b/nova/tests/test_ipv6.py index d123df6f1..6afc7e3f9 100644 --- a/nova/tests/test_ipv6.py +++ b/nova/tests/test_ipv6.py @@ -55,3 +55,8 @@ class IPv6AccountIdentiferTestCase(test.TestCase): def test_to_mac(self): mac = ipv6.to_mac('2001:db8::a94a:8fe5:ff33:4455') self.assertEquals(mac, '02:16:3e:33:44:55') + + def test_to_global_with_bad_mac(self): + bad_mac = '02:16:3e:33:44:5X' + self.assertRaises(TypeError, ipv6.to_global, + '2001:db8::', bad_mac, 'test') -- cgit From 3bbecbbb8c857079f75bea6fc6610bce9942de34 Mon Sep 17 00:00:00 2001 From: Ken Pepple Date: Sat, 20 Aug 2011 18:20:55 -0700 Subject: added unit tests for versions.py --- nova/tests/test_versions.py | 58 +++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 58 insertions(+) create mode 100644 nova/tests/test_versions.py (limited to 'nova') diff --git a/nova/tests/test_versions.py b/nova/tests/test_versions.py new file mode 100644 index 000000000..9578eda6d --- /dev/null +++ b/nova/tests/test_versions.py @@ -0,0 +1,58 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2011 Ken Pepple +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + + +from nova import exception +from nova import test +from nova import utils +from nova import version + + +class VersionTestCase(test.TestCase): + def setUp(self): + """setup test with unchanging values""" + super(VersionTestCase, self).setUp() + self.version = version + self.version.FINAL = False + self.version.NOVA_VERSION = ['2012', '10'] + self.version.YEAR, self.version.COUNT = self.version.NOVA_VERSION + self.version.version_info = {'branch_nick': u'LOCALBRANCH', + 'revision_id': 'LOCALREVISION', + 'revno': 0} + + def test_version_string_is_good(self): + """Ensure version string works""" + self.assertEqual("2012.10-dev", self.version.version_string()) + + def test_canonical_version_string_is_good(self): + """Ensure canonical version works""" + self.assertEqual("2012.10", self.version.canonical_version_string()) + + def test_final_version_strings_are_identical(self): + """Ensure final version strings match""" + self.version.FINAL = True + self.assertEqual(self.version.canonical_version_string(), + self.version.version_string()) + + def test_vcs_version_string_is_good(self): + """""" + self.assertEqual("LOCALBRANCH:LOCALREVISION", + self.version.vcs_version_string()) + + def test_version_string_with_vcs_is_good(self): + """""" + self.assertEqual("2012.10-LOCALBRANCH:LOCALREVISION", + self.version.version_string_with_vcs()) -- cgit From f2981d8463779fa1fca52c840d91b47845719340 Mon Sep 17 00:00:00 2001 From: Ken Pepple Date: Sat, 20 Aug 2011 18:28:30 -0700 Subject: comment strings --- nova/tests/test_versions.py | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) (limited to 'nova') diff --git a/nova/tests/test_versions.py b/nova/tests/test_versions.py index 9578eda6d..4621b042b 100644 --- a/nova/tests/test_versions.py +++ b/nova/tests/test_versions.py @@ -22,6 +22,7 @@ from nova import version class VersionTestCase(test.TestCase): + """Test cases for Versions code""" def setUp(self): """setup test with unchanging values""" super(VersionTestCase, self).setUp() @@ -42,17 +43,19 @@ class VersionTestCase(test.TestCase): self.assertEqual("2012.10", self.version.canonical_version_string()) def test_final_version_strings_are_identical(self): - """Ensure final version strings match""" + """Ensure final version strings match only at release""" + self.assertNotEqual(self.version.canonical_version_string(), + self.version.version_string()) self.version.FINAL = True self.assertEqual(self.version.canonical_version_string(), self.version.version_string()) def test_vcs_version_string_is_good(self): - """""" + """Ensure uninstalled code generates local """ self.assertEqual("LOCALBRANCH:LOCALREVISION", self.version.vcs_version_string()) def test_version_string_with_vcs_is_good(self): - """""" + """Ensure uninstalled code get version string""" self.assertEqual("2012.10-LOCALBRANCH:LOCALREVISION", self.version.version_string_with_vcs()) -- cgit From 0fdbea56baaef08575b98e8a553ceac9876e4962 Mon Sep 17 00:00:00 2001 From: Ken Pepple Date: Sun, 21 Aug 2011 17:52:14 -0700 Subject: added exception catch and test for bad prefix --- nova/ipv6/rfc2462.py | 2 ++ nova/tests/test_ipv6.py | 7 +++++++ 2 files changed, 9 insertions(+) (limited to 'nova') diff --git a/nova/ipv6/rfc2462.py b/nova/ipv6/rfc2462.py index 351df742e..acf42d201 100644 --- a/nova/ipv6/rfc2462.py +++ b/nova/ipv6/rfc2462.py @@ -32,6 +32,8 @@ def to_global(prefix, mac, project_id): format() except netaddr.AddrFormatError: raise TypeError(_('Bad mac for to_global_ipv6: %s') % mac) + except TypeError: + raise TypeError(_('Bad prefix for to_global_ipv6: %s') % prefix) def to_mac(ipv6_address): diff --git a/nova/tests/test_ipv6.py b/nova/tests/test_ipv6.py index 891d52358..6e72b7330 100644 --- a/nova/tests/test_ipv6.py +++ b/nova/tests/test_ipv6.py @@ -45,6 +45,13 @@ class IPv6RFC2462TestCase(test.TestCase): self.assertRaises(TypeError, ipv6.to_global, '2001:db8::', bad_mac, 'test') + def test_to_global_with_bad_prefix(self): + bad_prefix = '82' + self.assertRaises(TypeError, ipv6.to_global, + bad_prefix, + '2001:db8::216:3eff:fe33:4455', + 'test') + class IPv6AccountIdentiferTestCase(test.TestCase): """Unit tests for IPv6 account_identifier backend operations.""" -- cgit From 326cfda8cc50f5db083e9df381d3109e0302605d Mon Sep 17 00:00:00 2001 From: Ken Pepple Date: Sun, 21 Aug 2011 17:55:54 -0700 Subject: added exception catch for bad prefix and matching test --- nova/ipv6/account_identifier.py | 2 ++ nova/tests/test_ipv6.py | 7 +++++++ 2 files changed, 9 insertions(+) (limited to 'nova') diff --git a/nova/ipv6/account_identifier.py b/nova/ipv6/account_identifier.py index 02b653925..4ca7b5983 100644 --- a/nova/ipv6/account_identifier.py +++ b/nova/ipv6/account_identifier.py @@ -36,6 +36,8 @@ def to_global(prefix, mac, project_id): return (project_hash ^ static_num ^ mac_addr | maskIP).format() except netaddr.AddrFormatError: raise TypeError(_('Bad mac for to_global_ipv6: %s') % mac) + except TypeError: + raise TypeError(_('Bad prefix for to_global_ipv6: %s') % prefix) def to_mac(ipv6_address): diff --git a/nova/tests/test_ipv6.py b/nova/tests/test_ipv6.py index 6afc7e3f9..f9c2517a7 100644 --- a/nova/tests/test_ipv6.py +++ b/nova/tests/test_ipv6.py @@ -60,3 +60,10 @@ class IPv6AccountIdentiferTestCase(test.TestCase): bad_mac = '02:16:3e:33:44:5X' self.assertRaises(TypeError, ipv6.to_global, '2001:db8::', bad_mac, 'test') + + def test_to_global_with_bad_prefix(self): + bad_prefix = '78' + self.assertRaises(TypeError, ipv6.to_global, + bad_prefix, + '2001:db8::a94a:8fe5:ff33:4455', + 'test') -- cgit From b5bf5fbb77e95b44f3254a111374ddba73016c4d Mon Sep 17 00:00:00 2001 From: Ken Pepple Date: Sun, 21 Aug 2011 18:01:34 -0700 Subject: added exception catch and test for bad project_id --- nova/ipv6/account_identifier.py | 2 ++ nova/tests/test_ipv6.py | 7 +++++++ 2 files changed, 9 insertions(+) (limited to 'nova') diff --git a/nova/ipv6/account_identifier.py b/nova/ipv6/account_identifier.py index 4ca7b5983..27bb01988 100644 --- a/nova/ipv6/account_identifier.py +++ b/nova/ipv6/account_identifier.py @@ -38,6 +38,8 @@ def to_global(prefix, mac, project_id): raise TypeError(_('Bad mac for to_global_ipv6: %s') % mac) except TypeError: raise TypeError(_('Bad prefix for to_global_ipv6: %s') % prefix) + except NameError: + raise TypeError(_('Bad project_id for to_global_ipv6: %s') % project_id) def to_mac(ipv6_address): diff --git a/nova/tests/test_ipv6.py b/nova/tests/test_ipv6.py index f9c2517a7..5c333b17e 100644 --- a/nova/tests/test_ipv6.py +++ b/nova/tests/test_ipv6.py @@ -67,3 +67,10 @@ class IPv6AccountIdentiferTestCase(test.TestCase): bad_prefix, '2001:db8::a94a:8fe5:ff33:4455', 'test') + + def test_to_global_with_bad_project(self): + bad_project = 'non-existent-project-name' + self.assertRaises(TypeError, ipv6.to_global, + '2001:db8::', + '2001:db8::a94a:8fe5:ff33:4455', + bad_project) -- cgit From d4b09b85ad20bd0b83bc48d7bd1e0c6754b2649b Mon Sep 17 00:00:00 2001 From: Ken Pepple Date: Sun, 21 Aug 2011 18:07:07 -0700 Subject: added test for bad project_id ... although it may not be used --- nova/tests/test_ipv6.py | 7 +++++++ 1 file changed, 7 insertions(+) (limited to 'nova') diff --git a/nova/tests/test_ipv6.py b/nova/tests/test_ipv6.py index 6e72b7330..f151b9840 100644 --- a/nova/tests/test_ipv6.py +++ b/nova/tests/test_ipv6.py @@ -52,6 +52,13 @@ class IPv6RFC2462TestCase(test.TestCase): '2001:db8::216:3eff:fe33:4455', 'test') + def test_to_global_with_bad_project(self): + bad_project = 'non-existent-project-name' + self.assertRaises(TypeError, ipv6.to_global, + '2001:db8::', + '2001:db8::a94a:8fe5:ff33:4455', + bad_project) + class IPv6AccountIdentiferTestCase(test.TestCase): """Unit tests for IPv6 account_identifier backend operations.""" -- cgit From 34e310eff24b96bcc27df176bfecbd02ac863e7c Mon Sep 17 00:00:00 2001 From: Brian Waldon Date: Sun, 21 Aug 2011 22:59:46 -0400 Subject: fixing bug lp:830817 --- nova/api/openstack/views/addresses.py | 26 +++++++++++++++++++++++--- nova/api/openstack/wsgi.py | 6 +++--- nova/tests/api/openstack/test_servers.py | 21 +++++++++++++++++++++ 3 files changed, 47 insertions(+), 6 deletions(-) (limited to 'nova') diff --git a/nova/api/openstack/views/addresses.py b/nova/api/openstack/views/addresses.py index ddbf7a144..05028db41 100644 --- a/nova/api/openstack/views/addresses.py +++ b/nova/api/openstack/views/addresses.py @@ -15,11 +15,15 @@ # License for the specific language governing permissions and limitations # under the License. +import traceback + from nova import flags from nova import utils +from nova import log as logging from nova.api.openstack import common FLAGS = flags.FLAGS +LOG = logging.getLogger('nova.api.openstack.views.addresses') class ViewBuilder(object): @@ -48,7 +52,11 @@ class ViewBuilderV11(ViewBuilder): def build(self, interfaces): networks = {} for interface in interfaces: - network_label = interface['network']['label'] + try: + network_label = self._extract_network_label(interface) + except TypeError: + LOG.error(traceback.format_exc()) + continue if network_label not in networks: networks[network_label] = [] @@ -64,9 +72,14 @@ class ViewBuilderV11(ViewBuilder): return networks - def build_network(self, interfaces, network_label): + def build_network(self, interfaces, requested_network): for interface in interfaces: - if interface['network']['label'] == network_label: + try: + network_label = self._extract_network_label(interface) + except TypeError: + continue + + if network_label == requested_network: ips = list(self._extract_ipv4_addresses(interface)) ipv6 = self._extract_ipv6_address(interface) if ipv6 is not None: @@ -74,6 +87,13 @@ class ViewBuilderV11(ViewBuilder): return {network_label: ips} return None + def _extract_network_label(self, interface): + try: + return interface['network']['label'] + except (TypeError, KeyError): + LOG.error(traceback.format_exc()) + raise TypeError + def _extract_ipv4_addresses(self, interface): for fixed_ip in interface['fixed_ips']: yield self._build_ip_entity(fixed_ip['address'], 4) diff --git a/nova/api/openstack/wsgi.py b/nova/api/openstack/wsgi.py index 0eb47044e..c2185ebfd 100644 --- a/nova/api/openstack/wsgi.py +++ b/nova/api/openstack/wsgi.py @@ -516,6 +516,6 @@ class Resource(wsgi.Application): controller_method = getattr(self.controller, action) try: return controller_method(req=request, **action_args) - except TypeError, exc: - LOG.debug(str(exc)) - return webob.exc.HTTPBadRequest() + except TypeError: + LOG.debug(traceback.format_exc()) + return faults.Fault(webob.exc.HTTPBadRequest()) diff --git a/nova/tests/api/openstack/test_servers.py b/nova/tests/api/openstack/test_servers.py index 437620854..8167c4ace 100644 --- a/nova/tests/api/openstack/test_servers.py +++ b/nova/tests/api/openstack/test_servers.py @@ -752,6 +752,27 @@ class ServersTest(test.TestCase): (ip,) = private_node.getElementsByTagName('ip') self.assertEquals(ip.getAttribute('addr'), private) + # NOTE(bcwaldon): lp830817 + def test_get_server_by_id_malformed_networks_v1_1(self): + ifaces = [ + { + 'network': None, + 'fixed_ips': [ + {'address': '192.168.0.3'}, + {'address': '192.168.0.4'}, + ], + }, + ] + new_return_server = return_server_with_attributes(interfaces=ifaces) + self.stubs.Set(nova.db.api, 'instance_get', new_return_server) + + req = webob.Request.blank('/v1.1/servers/1') + res = req.get_response(fakes.wsgi_app()) + self.assertEqual(res.status_int, 200) + res_dict = json.loads(res.body) + self.assertEqual(res_dict['server']['id'], 1) + self.assertEqual(res_dict['server']['name'], 'server1') + def test_get_server_by_id_with_addresses_v1_1(self): self.flags(use_ipv6=True) interfaces = [ -- cgit From 9fc23f1055be435e8a21b999f748a8461552bd13 Mon Sep 17 00:00:00 2001 From: Brian Waldon Date: Sun, 21 Aug 2011 23:16:26 -0400 Subject: adding import --- nova/api/openstack/wsgi.py | 1 + 1 file changed, 1 insertion(+) (limited to 'nova') diff --git a/nova/api/openstack/wsgi.py b/nova/api/openstack/wsgi.py index c2185ebfd..572aba993 100644 --- a/nova/api/openstack/wsgi.py +++ b/nova/api/openstack/wsgi.py @@ -1,5 +1,6 @@ import json +import traceback import webob from xml.dom import minidom from xml.parsers import expat -- cgit From 9508bb599c15035f7afbdc80fe70d539e8598edf Mon Sep 17 00:00:00 2001 From: Hisaharu Ishii Date: Mon, 22 Aug 2011 08:59:35 -0700 Subject: Add 'nova-manage network modify' command. --- nova/db/api.py | 5 +++++ nova/db/sqlalchemy/api.py | 32 +++++++++++++++++++++++++++ nova/tests/test_nova_manage.py | 50 ++++++++++++++++++++++++++++++++---------- 3 files changed, 76 insertions(+), 11 deletions(-) (limited to 'nova') diff --git a/nova/db/api.py b/nova/db/api.py index 6833e6312..ec701ebdc 100644 --- a/nova/db/api.py +++ b/nova/db/api.py @@ -623,6 +623,11 @@ def network_associate(context, project_id, force=False): return IMPL.network_associate(context, project_id, force) +def network_associate_by_id(context, network_id, project_id, force=False): + """Associate a project with a network specified by id.""" + return IMPL.network_associate_by_id(context, network_id, project_id, force=False) + + def network_count(context): """Return the number of networks.""" return IMPL.network_count(context) diff --git a/nova/db/sqlalchemy/api.py b/nova/db/sqlalchemy/api.py index 2f9cab1ab..37dac1444 100644 --- a/nova/db/sqlalchemy/api.py +++ b/nova/db/sqlalchemy/api.py @@ -1669,6 +1669,38 @@ def network_associate(context, project_id, force=False): return network_ref +@require_admin_context +def network_associate_by_id(context, network_id, project_id, force=False): + """Associate a project with a network specified by id. + + only associate if the network isn't already associated + with a project or if force is True + """ + session = get_session() + with session.begin(): + + def network_query(network_filter): + if force: + return session.query(models.Network).\ + filter_by(deleted=False).\ + filter_by(id=network_filter).\ + with_lockmode('update').\ + first() + else: + return session.query(models.Network).\ + filter_by(deleted=False).\ + filter_by(project_id=None).\ + filter_by(id=network_filter).\ + with_lockmode('update').\ + first() + network_ref = network_query(network_id) + if network_ref: + network_ref['project_id'] = project_id + session.add(network_ref) + LOG.debug("piyo: network_ref['project_id']=%s" % network_ref['project_id']) + return network_ref + + @require_admin_context def network_count(context): session = get_session() diff --git a/nova/tests/test_nova_manage.py b/nova/tests/test_nova_manage.py index e4914e0b1..4c828da16 100644 --- a/nova/tests/test_nova_manage.py +++ b/nova/tests/test_nova_manage.py @@ -39,8 +39,10 @@ from nova import db from nova import flags from nova import test from nova import exception +from nova import log as logging FLAGS = flags.FLAGS +LOG = logging.getLogger('nova.tests.nova_manage') class FixedIpCommandsTestCase(test.TestCase): @@ -88,7 +90,6 @@ class FixedIpCommandsTestCase(test.TestCase): class NetworkCommandsTestCase(test.TestCase): def setUp(self): -# print 'piyo' super(NetworkCommandsTestCase, self).setUp() self.commands = nova_manage.NetworkCommands() self.context = context.get_admin_context() @@ -100,22 +101,29 @@ class NetworkCommandsTestCase(test.TestCase): super(NetworkCommandsTestCase, self).tearDown() def test_create(self): + FLAGS.network_manager='nova.network.manager.VlanManager' self.commands.create( - label = 'Test', - fixed_range_v4 = '10.2.0.0/24', - fixed_range_v6 = 'fd00:2::/120', - num_networks = 1, - network_size = 256, - vlan_start = 200, - bridge_interface = 'eth0', - ) + label='Test', + fixed_range_v4='10.2.0.0/24', + num_networks=1, + network_size=256, + multi_host='F', + vlan_start=200, + vpn_start=2000, + fixed_range_v6='fd00:2::/120', + gateway_v6='fd00:2::22', + bridge_interface='eth0') net = db.network_get_by_cidr(self.context, '10.2.0.0/24') self.assertEqual(net['label'], 'Test') self.assertEqual(net['cidr'], '10.2.0.0/24') self.assertEqual(net['netmask'], '255.255.255.0') + self.assertEqual(net['multi_host'], False) + self.assertEqual(net['vlan'], 200) + self.assertEqual(net['bridge'], 'br200') + self.assertEqual(net['vpn_public_port'], 2000) self.assertEqual(net['cidr_v6'], 'fd00:2::/120') + self.assertEqual(net['gateway_v6'], 'fd00:2::22') self.assertEqual(net['bridge_interface'], 'eth0') - self.assertEqual(net['vlan'], 200) def test_list(self): self.test_create() @@ -150,10 +158,30 @@ class NetworkCommandsTestCase(test.TestCase): def test_delete(self): self.test_create() - self.commands.delete(fixed_range = '10.2.0.0/24') + self.commands.delete(fixed_range='10.2.0.0/24') net_exist = True try: net = db.network_get_by_cidr(self.context, '10.2.0.0/24') except exception.NetworkNotFoundForCidr, e: net_exist = False self.assertEqual(net_exist, False) + + def test_modify(self): + self.test_create() + net = db.network_get_by_cidr(self.context, '10.2.0.0/24') + db.network_disassociate(self.context, net['id']) + net = db.network_get_by_cidr(self.context, '10.2.0.0/24') + self.assertEqual(net['project_id'], None) + self.assertEqual(net['host'], None) + self.commands.modify('10.2.0.0/24', project='test_project', host='test_host') + net = db.network_get_by_cidr(self.context, '10.2.0.0/24') + self.assertEqual(net['project_id'], 'test_project') + self.assertEqual(net['host'], 'test_host') + self.commands.modify('10.2.0.0/24') + net = db.network_get_by_cidr(self.context, '10.2.0.0/24') + self.assertEqual(net['project_id'], 'test_project') + self.assertEqual(net['host'], 'test_host') + self.commands.modify('10.2.0.0/24', project='None', host='None') + net = db.network_get_by_cidr(self.context, '10.2.0.0/24') + self.assertEqual(net['project_id'], None) + self.assertEqual(net['host'], None) -- cgit From 9bcd9fc5b339af97d161a65f3da84ed1bd99da2f Mon Sep 17 00:00:00 2001 From: Hisaharu Ishii Date: Mon, 22 Aug 2011 10:27:54 -0700 Subject: delete debug code. --- nova/db/sqlalchemy/api.py | 1 - 1 file changed, 1 deletion(-) (limited to 'nova') diff --git a/nova/db/sqlalchemy/api.py b/nova/db/sqlalchemy/api.py index 37dac1444..f87d32b63 100644 --- a/nova/db/sqlalchemy/api.py +++ b/nova/db/sqlalchemy/api.py @@ -1697,7 +1697,6 @@ def network_associate_by_id(context, network_id, project_id, force=False): if network_ref: network_ref['project_id'] = project_id session.add(network_ref) - LOG.debug("piyo: network_ref['project_id']=%s" % network_ref['project_id']) return network_ref -- cgit From f05da72e28fac1bfc7f208ce57d4462a53f290f2 Mon Sep 17 00:00:00 2001 From: Hisaharu Ishii Date: Mon, 22 Aug 2011 11:50:44 -0700 Subject: Fix pep8 --- nova/db/api.py | 3 ++- nova/tests/test_nova_manage.py | 6 ++++-- 2 files changed, 6 insertions(+), 3 deletions(-) (limited to 'nova') diff --git a/nova/db/api.py b/nova/db/api.py index ec701ebdc..928c695fd 100644 --- a/nova/db/api.py +++ b/nova/db/api.py @@ -625,7 +625,8 @@ def network_associate(context, project_id, force=False): def network_associate_by_id(context, network_id, project_id, force=False): """Associate a project with a network specified by id.""" - return IMPL.network_associate_by_id(context, network_id, project_id, force=False) + return IMPL.network_associate_by_id(context, network_id, project_id, + force=False) def network_count(context): diff --git a/nova/tests/test_nova_manage.py b/nova/tests/test_nova_manage.py index 4c828da16..d6edc8ba9 100644 --- a/nova/tests/test_nova_manage.py +++ b/nova/tests/test_nova_manage.py @@ -88,6 +88,7 @@ class FixedIpCommandsTestCase(test.TestCase): '10.0.0.100') self.assertEqual(address['reserved'], False) + class NetworkCommandsTestCase(test.TestCase): def setUp(self): super(NetworkCommandsTestCase, self).setUp() @@ -101,7 +102,7 @@ class NetworkCommandsTestCase(test.TestCase): super(NetworkCommandsTestCase, self).tearDown() def test_create(self): - FLAGS.network_manager='nova.network.manager.VlanManager' + FLAGS.network_manager = 'nova.network.manager.VlanManager' self.commands.create( label='Test', fixed_range_v4='10.2.0.0/24', @@ -173,7 +174,8 @@ class NetworkCommandsTestCase(test.TestCase): net = db.network_get_by_cidr(self.context, '10.2.0.0/24') self.assertEqual(net['project_id'], None) self.assertEqual(net['host'], None) - self.commands.modify('10.2.0.0/24', project='test_project', host='test_host') + self.commands.modify('10.2.0.0/24', project='test_project', + host='test_host') net = db.network_get_by_cidr(self.context, '10.2.0.0/24') self.assertEqual(net['project_id'], 'test_project') self.assertEqual(net['host'], 'test_host') -- cgit From e36ebf31699546e48d27754ac1e26b3704399ab0 Mon Sep 17 00:00:00 2001 From: Tim Simpson Date: Mon, 22 Aug 2011 14:06:59 -0500 Subject: Added ability to detect import errors in list_notifier if one or more drivers could not be loaded. --- nova/notifier/list_notifier.py | 23 +++++++++++--- nova/tests/notifier/test_list_notifier.py | 52 +++++++++++++++---------------- 2 files changed, 45 insertions(+), 30 deletions(-) (limited to 'nova') diff --git a/nova/notifier/list_notifier.py b/nova/notifier/list_notifier.py index d45a31ecb..78d51628c 100644 --- a/nova/notifier/list_notifier.py +++ b/nova/notifier/list_notifier.py @@ -16,6 +16,7 @@ from nova import flags from nova import log as logging from nova import utils +from nova.exception import ClassNotFound flags.DEFINE_multistring('list_notifier_drivers', ['nova.notifier.no_op_notifier'], @@ -27,17 +28,31 @@ LOG = logging.getLogger('nova.notifier.list_notifier') drivers = None -def __get_drivers(): + +class ImportFailureNotifier(object): + + def __init__(self, exception): + self.exception = exception + + def notify(message): + raise self.exception + + +def _get_drivers(): """Instantiates and returns drivers based on the flag values.""" global drivers if not drivers: - drivers = [utils.import_object(notification_driver) - for notification_driver in FLAGS.list_notifier_drivers] + drivers = [] + for notification_driver in FLAGS.list_notifier_drivers: + try: + drivers.append(utils.import_object(notification_driver)) + except ClassNotFound as e: + drivers.append(ImportFailureNotifier(e)) return drivers def notify(message): """Passes notification to mulitple notifiers in a list.""" - for driver in __get_drivers(): + for driver in _get_drivers(): try: driver.notify(message) except Exception as e: diff --git a/nova/tests/notifier/test_list_notifier.py b/nova/tests/notifier/test_list_notifier.py index 65c2ecd90..ca8b3e0a7 100644 --- a/nova/tests/notifier/test_list_notifier.py +++ b/nova/tests/notifier/test_list_notifier.py @@ -31,10 +31,26 @@ from nova import test class NotifierListTestCase(test.TestCase): """Test case for notifications""" + def setUp(self): super(NotifierListTestCase, self).setUp() list_notifier._reset_drivers() self.stubs = stubout.StubOutForTesting() + # Mock log to add one to exception_count when log.exception is called + def mock_exception(cls, *args): + self.exception_count += 1 + self.exception_count = 0 + list_notifier_log = logging.getLogger('nova.notifier.list_notifier') + self.stubs.Set(list_notifier_log, "exception", mock_exception) + # Mock no_op notifier to add one to notify_count when called. + def mock_notify(cls, *args): + self.notify_count += 1 + self.notify_count = 0 + self.stubs.Set(nova.notifier.no_op_notifier, 'notify', mock_notify) + # Mock log_notifier to raise RuntimeError when called. + def mock_notify2(cls, *args): + raise RuntimeError("Bad notifier.") + self.stubs.Set(nova.notifier.log_notifier, 'notify', mock_notify2) def tearDown(self): self.stubs.UnsetAll() @@ -45,41 +61,25 @@ class NotifierListTestCase(test.TestCase): self.flags(notification_driver='nova.notifier.list_notifier', list_notifier_drivers=['nova.notifier.no_op_notifier', 'nova.notifier.no_op_notifier']) - self.notify_count = 0 - - def mock_notify(cls, *args): - self.notify_count += 1 - - self.stubs.Set(nova.notifier.no_op_notifier, 'notify', - mock_notify) - notify('publisher_id', 'event_type', nova.notifier.api.WARN, dict(a=3)) self.assertEqual(self.notify_count, 2) + self.assertEqual(self.exception_count, 0) def test_send_notifications_with_errors(self): - self.exception_count = 0 - def mock_exception(cls, *args): - self.exception_count += 1 - - self.notify_count = 0 - def mock_notify(cls, *args): - self.notify_count += 1 - - def mock_notify2(cls, *args): - raise RuntimeError("Bad notifier.") self.flags(notification_driver='nova.notifier.list_notifier', list_notifier_drivers=['nova.notifier.no_op_notifier', 'nova.notifier.log_notifier']) - - list_notifier_log = logging.getLogger('nova.notifier.list_notifier') - list_notifier_log.exception - self.stubs.Set(list_notifier_log, "exception", mock_exception) - self.stubs.Set(nova.notifier.no_op_notifier, 'notify', mock_notify) - self.stubs.Set(nova.notifier.log_notifier, 'notify', mock_notify2) - notify('publisher_id', 'event_type', nova.notifier.api.WARN, dict(a=3)) - self.assertEqual(self.notify_count, 1) self.assertEqual(self.exception_count, 1) + + def test_when_driver_fails_to_import(self): + self.flags(notification_driver='nova.notifier.list_notifier', + list_notifier_drivers=['nova.notifier.no_op_notifier', + 'nova.notifier.logo_notifier', + 'fdsjgsdfhjkhgsfkj']) + notify('publisher_id', 'event_type', nova.notifier.api.WARN, dict(a=3)) + self.assertEqual(self.exception_count, 2) + self.assertEqual(self.notify_count, 1) -- cgit From 25ee794d803fa522d31177dc16d8c535d9b8daab Mon Sep 17 00:00:00 2001 From: Soren Hansen Date: Mon, 22 Aug 2011 21:52:49 +0200 Subject: Add a generic set of tests for hypervisor drivers. --- nova/tests/test_virt_drivers.py | 483 ++++++++++++++++++++++++++++++++++++++++ nova/virt/fake.py | 8 + 2 files changed, 491 insertions(+) create mode 100644 nova/tests/test_virt_drivers.py (limited to 'nova') diff --git a/nova/tests/test_virt_drivers.py b/nova/tests/test_virt_drivers.py new file mode 100644 index 000000000..2c91c0664 --- /dev/null +++ b/nova/tests/test_virt_drivers.py @@ -0,0 +1,483 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 +# +# Copyright 2010 OpenStack LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import base64 +import netaddr +import sys +import traceback + +from nova import exception +from nova import flags +from nova import image +from nova import log as logging +from nova import test +from nova.tests import utils as test_utils + +libvirt = None +FLAGS = flags.FLAGS + +LOG = logging.getLogger('nova.tests.test_virt_drivers') + +def catch_notimplementederror(f): + """Decorator to simplify catching drivers raising NotImplementedError + + If a particular call makes a driver raise NotImplementedError, we + log it so that we can extract this information afterwards to + automatically generate a hypervisor/feature support matrix.""" + def wrapped_func(self, *args, **kwargs): + try: + return f(self, *args, **kwargs) + except NotImplementedError: + frame = traceback.extract_tb(sys.exc_info()[2])[-1] + LOG.error('%(driver)s does not implement %(method)s' % { + 'driver': type(self.connection), + 'method': frame[2]}) + + wrapped_func.__name__ = f.__name__ + wrapped_func.__doc__ = f.__doc__ + return wrapped_func + +class _VirtDriverTestCase(test.TestCase): + def setUp(self): + super(_VirtDriverTestCase, self).setUp() + self.connection = self.driver_module.get_connection('') + self.ctxt = test_utils.get_test_admin_context() + self.image_service = image.get_default_image_service() + + @catch_notimplementederror + def test_init_host(self): + self.connection.init_host('myhostname') + + @catch_notimplementederror + def test_list_instances(self): + self.connection.list_instances() + + @catch_notimplementederror + def test_list_instances_detail(self): + self.connection.list_instances_detail() + + @catch_notimplementederror + def test_spawn(self): + instance_ref = test_utils.get_test_instance() + network_info = test_utils.get_test_network_info() + self.connection.spawn(self.ctxt, instance_ref, network_info) + + domains = self.connection.list_instances() + self.assertIn(instance_ref['name'], domains) + + domains_details = self.connection.list_instances_detail() + self.assertIn(instance_ref['name'], [i.name for i in domains_details]) + + @catch_notimplementederror + def test_snapshot_not_running(self): + instance_ref = test_utils.get_test_instance() + img_ref = self.image_service.create(self.ctxt, {'name': 'snap-1'}) + self.assertRaises(exception.InstanceNotRunning, + self.connection.snapshot, + self.ctxt, instance_ref, img_ref['id']) + + @catch_notimplementederror + def test_snapshot_running(self): + instance_ref = test_utils.get_test_instance() + network_info = test_utils.get_test_network_info() + img_ref = self.image_service.create(self.ctxt, {'name': 'snap-1'}) + self.connection.spawn(self.ctxt, instance_ref, network_info) + self.connection.snapshot(self.ctxt, instance_ref, img_ref['id']) + + @catch_notimplementederror + def test_reboot(self): + instance_ref = test_utils.get_test_instance() + network_info = test_utils.get_test_network_info() + self.connection.spawn(self.ctxt, instance_ref, network_info) + self.connection.reboot(instance_ref, network_info) + + @catch_notimplementederror + def test_get_host_ip_addr(self): + host_ip = self.connection.get_host_ip_addr() + + # Will raise an exception if it's not a valid IP at all + ip = netaddr.IPAddress(host_ip) + + # For now, assume IPv4. + self.assertEquals(ip.version, 4) + + @catch_notimplementederror + def test_resize_running(self): + instance_ref = test_utils.get_test_instance() + network_info = test_utils.get_test_network_info() + self.connection.spawn(self.ctxt, instance_ref, network_info) + self.connection.resize(instance_ref, 7) + + @catch_notimplementederror + def test_set_admin_password(self): + instance_ref = test_utils.get_test_instance() + network_info = test_utils.get_test_network_info() + self.connection.spawn(self.ctxt, instance_ref, network_info) + self.connection.set_admin_password(instance_ref, 'p4ssw0rd') + + @catch_notimplementederror + def test_inject_file(self): + instance_ref = test_utils.get_test_instance() + network_info = test_utils.get_test_network_info() + self.connection.spawn(self.ctxt, instance_ref, network_info) + self.connection.inject_file(instance_ref, + base64.b64encode('/testfile'), + base64.b64encode('testcontents')) + + @catch_notimplementederror + def test_agent_update(self): + instance_ref = test_utils.get_test_instance() + network_info = test_utils.get_test_network_info() + self.connection.spawn(self.ctxt, instance_ref, network_info) + self.connection.agent_update(instance_ref, 'http://www.openstack.org/', + 'd41d8cd98f00b204e9800998ecf8427e') + + @catch_notimplementederror + def test_rescue(self): + instance_ref = test_utils.get_test_instance() + network_info = test_utils.get_test_network_info() + self.connection.spawn(self.ctxt, instance_ref, network_info) + self.connection.rescue(self.ctxt, instance_ref, + lambda x:None, network_info) + + @catch_notimplementederror + def test_unrescue_unrescued_instance(self): + instance_ref = test_utils.get_test_instance() + network_info = test_utils.get_test_network_info() + self.connection.spawn(self.ctxt, instance_ref, network_info) + self.connection.unrescue(instance_ref, lambda x:None, network_info) + + @catch_notimplementederror + def test_unrescue_rescued_instance(self): + instance_ref = test_utils.get_test_instance() + network_info = test_utils.get_test_network_info() + self.connection.spawn(self.ctxt, instance_ref, network_info) + self.connection.rescue(self.ctxt, instance_ref, + lambda x:None, network_info) + self.connection.unrescue(instance_ref, lambda x:None, network_info) + + @catch_notimplementederror + def test_poll_rescued_instances(self): + self.connection.poll_rescued_instances(10) + + @catch_notimplementederror + def test_migrate_disk_and_power_off(self): + instance_ref = test_utils.get_test_instance() + network_info = test_utils.get_test_network_info() + self.connection.spawn(self.ctxt, instance_ref, network_info) + self.connection.migrate_disk_and_power_off(instance_ref, 'dest_host') + + @catch_notimplementederror + def test_pause(self): + instance_ref = test_utils.get_test_instance() + network_info = test_utils.get_test_network_info() + self.connection.spawn(self.ctxt, instance_ref, network_info) + self.connection.pause(instance_ref, None) + + @catch_notimplementederror + def test_unpause_unpaused_instance(self): + instance_ref = test_utils.get_test_instance() + network_info = test_utils.get_test_network_info() + self.connection.spawn(self.ctxt, instance_ref, network_info) + self.connection.unpause(instance_ref, None) + + @catch_notimplementederror + def test_unpause_paused_instance(self): + instance_ref = test_utils.get_test_instance() + network_info = test_utils.get_test_network_info() + self.connection.spawn(self.ctxt, instance_ref, network_info) + self.connection.pause(instance_ref, None) + self.connection.unpause(instance_ref, None) + + @catch_notimplementederror + def test_suspend(self): + instance_ref = test_utils.get_test_instance() + network_info = test_utils.get_test_network_info() + self.connection.spawn(self.ctxt, instance_ref, network_info) + self.connection.suspend(instance_ref, None) + + @catch_notimplementederror + def test_resume_unsuspended_instance(self): + instance_ref = test_utils.get_test_instance() + network_info = test_utils.get_test_network_info() + self.connection.spawn(self.ctxt, instance_ref, network_info) + self.connection.resume(instance_ref, None) + + @catch_notimplementederror + def test_resume_suspended_instance(self): + instance_ref = test_utils.get_test_instance() + network_info = test_utils.get_test_network_info() + self.connection.spawn(self.ctxt, instance_ref, network_info) + self.connection.suspend(instance_ref, None) + self.connection.resume(instance_ref, None) + + @catch_notimplementederror + def test_destroy_instance_nonexistant(self): + fake_instance = { 'id': 42, 'name': 'I just made this up!' } + network_info = test_utils.get_test_network_info() + self.connection.destroy(fake_instance, network_info) + + @catch_notimplementederror + def test_destroy_instance(self): + instance_ref = test_utils.get_test_instance() + network_info = test_utils.get_test_network_info() + self.connection.spawn(self.ctxt, instance_ref, network_info) + self.assertIn(instance_ref['name'], + self.connection.list_instances()) + self.connection.destroy(instance_ref, network_info) + self.assertNotIn(instance_ref['name'], + self.connection.list_instances()) + + @catch_notimplementederror + def test_attach_detach_volume(self): + network_info = test_utils.get_test_network_info() + instance_ref = test_utils.get_test_instance() + self.connection.spawn(self.ctxt, instance_ref, network_info) + self.connection.attach_volume(instance_ref['name'], + '/dev/null', '/mnt/nova/something') + self.connection.detach_volume(instance_ref['name'], + '/mnt/nova/something') + + @catch_notimplementederror + def test_get_info(self): + network_info = test_utils.get_test_network_info() + instance_ref = test_utils.get_test_instance() + self.connection.spawn(self.ctxt, instance_ref, network_info) + info = self.connection.get_info(instance_ref['name']) + self.assertIn('state', info) + self.assertIn('max_mem', info) + self.assertIn('mem', info) + self.assertIn('num_cpu', info) + self.assertIn('cpu_time', info) + + @catch_notimplementederror + def test_get_info_for_unknown_instance(self): + self.assertRaises(exception.NotFound, + self.connection.get_info, 'I just made this name up') + + @catch_notimplementederror + def test_get_diagnostics(self): + network_info = test_utils.get_test_network_info() + instance_ref = test_utils.get_test_instance() + self.connection.spawn(self.ctxt, instance_ref, network_info) + self.connection.get_diagnostics(instance_ref['name']) + + @catch_notimplementederror + def test_list_disks(self): + network_info = test_utils.get_test_network_info() + instance_ref = test_utils.get_test_instance() + self.connection.spawn(self.ctxt, instance_ref, network_info) + self.connection.list_disks(instance_ref['name']) + + @catch_notimplementederror + def test_list_interfaces(self): + network_info = test_utils.get_test_network_info() + instance_ref = test_utils.get_test_instance() + self.connection.spawn(self.ctxt, instance_ref, network_info) + self.connection.list_interfaces(instance_ref['name']) + + @catch_notimplementederror + def test_block_stats(self): + network_info = test_utils.get_test_network_info() + instance_ref = test_utils.get_test_instance() + self.connection.spawn(self.ctxt, instance_ref, network_info) + stats = self.connection.block_stats(instance_ref['name'], 'someid') + self.assertEquals(len(stats), 5) + + @catch_notimplementederror + def test_interface_stats(self): + network_info = test_utils.get_test_network_info() + instance_ref = test_utils.get_test_instance() + self.connection.spawn(self.ctxt, instance_ref, network_info) + stats = self.connection.interface_stats(instance_ref['name'], 'someid') + self.assertEquals(len(stats), 8) + + @catch_notimplementederror + def test_get_console_output(self): + network_info = test_utils.get_test_network_info() + instance_ref = test_utils.get_test_instance() + self.connection.spawn(self.ctxt, instance_ref, network_info) + console_output = self.connection.get_console_output(instance_ref) + self.assertTrue(isinstance(console_output, basestring)) + + @catch_notimplementederror + def test_get_ajax_console(self): + network_info = test_utils.get_test_network_info() + instance_ref = test_utils.get_test_instance() + self.connection.spawn(self.ctxt, instance_ref, network_info) + ajax_console = self.connection.get_ajax_console(instance_ref) + self.assertIn('token', ajax_console) + self.assertIn('host', ajax_console) + self.assertIn('port', ajax_console) + + @catch_notimplementederror + def test_get_vnc_console(self): + network_info = test_utils.get_test_network_info() + instance_ref = test_utils.get_test_instance() + self.connection.spawn(self.ctxt, instance_ref, network_info) + vnc_console = self.connection.get_vnc_console(instance_ref) + self.assertIn('token', vnc_console) + self.assertIn('host', vnc_console) + self.assertIn('port', vnc_console) + + @catch_notimplementederror + def test_get_console_pool_info(self): + network_info = test_utils.get_test_network_info() + instance_ref = test_utils.get_test_instance() + self.connection.spawn(self.ctxt, instance_ref, network_info) + console_pool = self.connection.get_console_pool_info(instance_ref) + self.assertIn('address', console_pool) + self.assertIn('username', console_pool) + self.assertIn('password', console_pool) + + @catch_notimplementederror + def test_refresh_security_group_rules(self): + network_info = test_utils.get_test_network_info() + instance_ref = test_utils.get_test_instance() + # FIXME: Create security group and add the instance to it + self.connection.spawn(self.ctxt, instance_ref, network_info) + self.connection.refresh_security_group_rules(1) + + @catch_notimplementederror + def test_refresh_security_group_members(self): + network_info = test_utils.get_test_network_info() + instance_ref = test_utils.get_test_instance() + # FIXME: Create security group and add the instance to it + self.connection.spawn(self.ctxt, instance_ref, network_info) + self.connection.refresh_security_group_members(1) + + @catch_notimplementederror + def test_refresh_provider_fw_rules(self): + network_info = test_utils.get_test_network_info() + instance_ref = test_utils.get_test_instance() + self.connection.spawn(self.ctxt, instance_ref, network_info) + self.connection.refresh_provider_fw_rules() + + @catch_notimplementederror + def test_update_available_resource(self): + self.compute = self.start_service('compute', host='dummy') + self.connection.update_available_resource(self.ctxt, 'dummy') + + @catch_notimplementederror + def test_compare_cpu(self): + cpu_info = '''{ "topology": { + "sockets": 1, + "cores": 2, + "threads": 1 }, + "features": [ + "xtpr", + "tm2", + "est", + "vmx", + "ds_cpl", + "monitor", + "pbe", + "tm", + "ht", + "ss", + "acpi", + "ds", + "vme"], + "arch": "x86_64", + "model": "Penryn", + "vendor": "Intel" }''' + + self.connection.compare_cpu(cpu_info) + + @catch_notimplementederror + def test_ensure_filtering_for_instance(self): + instance_ref = test_utils.get_test_instance() + network_info = test_utils.get_test_network_info() + self.connection.ensure_filtering_rules_for_instance(instance_ref, + network_info) + + @catch_notimplementederror + def test_unfilter_instance(self): + instance_ref = test_utils.get_test_instance() + network_info = test_utils.get_test_network_info() + self.connection.unfilter_instance(instance_ref, network_info) + + + @catch_notimplementederror + def test_live_migration(self): + network_info = test_utils.get_test_network_info() + instance_ref = test_utils.get_test_instance() + self.connection.spawn(self.ctxt, instance_ref, network_info) + self.connection.live_migration(self.ctxt, instance_ref, 'otherhost', + None, None) + + @catch_notimplementederror + def _check_host_status_fields(self, host_status): + self.assertIn('host_name-description', host_status) + self.assertIn('host_hostname', host_status) + self.assertIn('host_memory_total', host_status) + self.assertIn('host_memory_overhead', host_status) + self.assertIn('host_memory_free', host_status) + self.assertIn('host_memory_free_computed', host_status) + self.assertIn('host_other_config', host_status) + self.assertIn('host_ip_address', host_status) + self.assertIn('host_cpu_info', host_status) + self.assertIn('disk_available', host_status) + self.assertIn('disk_total', host_status) + self.assertIn('disk_used', host_status) + self.assertIn('host_uuid', host_status) + self.assertIn('host_name_label', host_status) + + @catch_notimplementederror + def test_update_host_status(self): + host_status = self.connection.update_host_status() + self._check_host_status_fields(host_status) + + @catch_notimplementederror + def test_get_host_stats(self): + host_status = self.connection.get_host_stats() + self._check_host_status_fields(host_status) + + @catch_notimplementederror + def test_set_host_enabled(self): + self.connection.set_host_enabled('Am I a useless argument?', True) + + @catch_notimplementederror + def test_host_power_action_reboot(self): + self.connection.host_power_action('Am I a useless argument?', 'reboot') + + @catch_notimplementederror + def test_host_power_action_shutdown(self): + self.connection.host_power_action('Am I a useless argument?', 'shutdown') + + @catch_notimplementederror + def test_host_power_action_startup(self): + self.connection.host_power_action('Am I a useless argument?', 'startup') + +class AbstractDriverTestCase(_VirtDriverTestCase): + def setUp(self): + import nova.virt.driver + self.driver_module = nova.virt.driver + def get_driver_connection(_): + return nova.virt.driver.ComputeDriver() + self.driver_module.get_connection = get_driver_connection + super(AbstractDriverTestCase, self).setUp() + +class FakeConnectionTestCase(_VirtDriverTestCase): + def setUp(self): + import nova.virt.fake + self.driver_module = nova.virt.fake + super(FakeConnectionTestCase, self).setUp() + +# Before long, we'll add the real hypervisor drivers here as well +# with whatever instrumentation they need to work independently of +# their hypervisor. This way, we can verify that they all act the +# same. diff --git a/nova/virt/fake.py b/nova/virt/fake.py index 13b7aeab5..e1e909c42 100644 --- a/nova/virt/fake.py +++ b/nova/virt/fake.py @@ -67,6 +67,7 @@ class FakeConnection(driver.ComputeDriver): 'disk_used': 100000000000, 'host_uuid': 'cedb9b39-9388-41df-8891-c5c9a0c0fe5f', 'host_name_label': 'fake-mini'} + self._mounts = {} @classmethod def instance(cls): @@ -152,9 +153,16 @@ class FakeConnection(driver.ComputeDriver): (key, self.instances)) def attach_volume(self, instance_name, device_path, mountpoint): + if not instance_name in self._mounts: + self._mounts[instance_name] = {} + self._mounts[instance_name][mountpoint] = device_path return True def detach_volume(self, instance_name, mountpoint): + try: + del self._mounts[instance_name][mountpoint] + except KeyError: + pass return True def get_info(self, instance_name): -- cgit From 51344d7be195f9342d24d461f4c07fa1c9141da4 Mon Sep 17 00:00:00 2001 From: Tim Simpson Date: Mon, 22 Aug 2011 15:21:31 -0500 Subject: Changed list_notifier to call sys.exit if a notification driver could not be found. --- nova/notifier/list_notifier.py | 15 ++++----------- nova/tests/notifier/test_list_notifier.py | 15 ++++++++++----- 2 files changed, 14 insertions(+), 16 deletions(-) (limited to 'nova') diff --git a/nova/notifier/list_notifier.py b/nova/notifier/list_notifier.py index 78d51628c..21067df55 100644 --- a/nova/notifier/list_notifier.py +++ b/nova/notifier/list_notifier.py @@ -13,6 +13,8 @@ # License for the specific language governing permissions and limitations # under the License. +import sys + from nova import flags from nova import log as logging from nova import utils @@ -29,15 +31,6 @@ LOG = logging.getLogger('nova.notifier.list_notifier') drivers = None -class ImportFailureNotifier(object): - - def __init__(self, exception): - self.exception = exception - - def notify(message): - raise self.exception - - def _get_drivers(): """Instantiates and returns drivers based on the flag values.""" global drivers @@ -46,8 +39,8 @@ def _get_drivers(): for notification_driver in FLAGS.list_notifier_drivers: try: drivers.append(utils.import_object(notification_driver)) - except ClassNotFound as e: - drivers.append(ImportFailureNotifier(e)) + except ClassNotFound: + sys.exit(1) return drivers def notify(message): diff --git a/nova/tests/notifier/test_list_notifier.py b/nova/tests/notifier/test_list_notifier.py index ca8b3e0a7..bab1a0ab8 100644 --- a/nova/tests/notifier/test_list_notifier.py +++ b/nova/tests/notifier/test_list_notifier.py @@ -14,18 +14,15 @@ # under the License. import stubout +import sys import nova -from nova import context -from nova import flags from nova import log as logging -from nova import rpc import nova.notifier.api from nova.notifier.api import notify from nova.notifier import log_notifier from nova.notifier import no_op_notifier from nova.notifier import list_notifier -from nova.notifier import rabbit_notifier from nova import test @@ -51,6 +48,11 @@ class NotifierListTestCase(test.TestCase): def mock_notify2(cls, *args): raise RuntimeError("Bad notifier.") self.stubs.Set(nova.notifier.log_notifier, 'notify', mock_notify2) + # mock sys.exit so we don't actually kill the program during our tests. + self.sys_exit_code = 0 + def mock_sys_exit(code): + self.sys_exit_code += code + self.stubs.Set(sys, 'exit', mock_sys_exit) def tearDown(self): self.stubs.UnsetAll() @@ -65,6 +67,7 @@ class NotifierListTestCase(test.TestCase): nova.notifier.api.WARN, dict(a=3)) self.assertEqual(self.notify_count, 2) self.assertEqual(self.exception_count, 0) + self.assertEqual(self.sys_exit_code, 0) def test_send_notifications_with_errors(self): @@ -74,6 +77,7 @@ class NotifierListTestCase(test.TestCase): notify('publisher_id', 'event_type', nova.notifier.api.WARN, dict(a=3)) self.assertEqual(self.notify_count, 1) self.assertEqual(self.exception_count, 1) + self.assertEqual(self.sys_exit_code, 0) def test_when_driver_fails_to_import(self): self.flags(notification_driver='nova.notifier.list_notifier', @@ -81,5 +85,6 @@ class NotifierListTestCase(test.TestCase): 'nova.notifier.logo_notifier', 'fdsjgsdfhjkhgsfkj']) notify('publisher_id', 'event_type', nova.notifier.api.WARN, dict(a=3)) - self.assertEqual(self.exception_count, 2) + self.assertEqual(self.exception_count, 0) self.assertEqual(self.notify_count, 1) + self.assertEqual(self.sys_exit_code, 2) -- cgit From 269b4e00e82b8f99d2fc24f935ff165d62f19891 Mon Sep 17 00:00:00 2001 From: Brian Waldon Date: Mon, 22 Aug 2011 16:23:48 -0400 Subject: logging as exception rather than error --- nova/api/openstack/views/addresses.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'nova') diff --git a/nova/api/openstack/views/addresses.py b/nova/api/openstack/views/addresses.py index 05028db41..d54013d61 100644 --- a/nova/api/openstack/views/addresses.py +++ b/nova/api/openstack/views/addresses.py @@ -55,7 +55,7 @@ class ViewBuilderV11(ViewBuilder): try: network_label = self._extract_network_label(interface) except TypeError: - LOG.error(traceback.format_exc()) + LOG.exception(traceback.format_exc()) continue if network_label not in networks: @@ -91,7 +91,7 @@ class ViewBuilderV11(ViewBuilder): try: return interface['network']['label'] except (TypeError, KeyError): - LOG.error(traceback.format_exc()) + LOG.exception(traceback.format_exc()) raise TypeError def _extract_ipv4_addresses(self, interface): -- cgit From 5a288485215a13f3892ae17a46b9644ed84fc089 Mon Sep 17 00:00:00 2001 From: Nachi Ueno Date: Mon, 22 Aug 2011 14:24:37 -0700 Subject: Added Test Code, doc string, and fixed pip-requiresw --- nova/flags.py | 5 +++-- nova/notifier/api.py | 8 ++++++++ nova/tests/test_notifier.py | 22 ++++++++++++++++++++++ nova/tests/test_utils.py | 44 ++++++++++++++++++++++++++++++++++++++++++++ nova/utils.py | 26 +++++++++++++++++++++++--- 5 files changed, 100 insertions(+), 5 deletions(-) (limited to 'nova') diff --git a/nova/flags.py b/nova/flags.py index 4e6e3ad40..e9bf6e334 100644 --- a/nova/flags.py +++ b/nova/flags.py @@ -404,9 +404,10 @@ DEFINE_string('root_helper', 'sudo', 'Command prefix to use for running commands as root') DEFINE_bool('monkey_patch', False, - 'Whether to monkey patch') + 'Whether to log monkey patching') DEFINE_list('monkey_patch_modules', ['nova.api.ec2.cloud:nova.notifier.api.notify_decorator', 'nova.compute.api:nova.notifier.api.notify_decorator'], - 'Module list representing monkey patched module and decorator') + 'Module list representing monkey\ + patched module and decorator') diff --git a/nova/notifier/api.py b/nova/notifier/api.py index 99b8b0102..a98f17dbe 100644 --- a/nova/notifier/api.py +++ b/nova/notifier/api.py @@ -40,6 +40,14 @@ class BadPriorityException(Exception): def notify_decorator(name, fn): + """ decorator for notify which is used from utils.monkey_patch() + Parameters: + + name - name of the function + function - object of the function + + + """ def wrapped_func(*args, **kwarg): body = {} body['args'] = [] diff --git a/nova/tests/test_notifier.py b/nova/tests/test_notifier.py index 64b799a2c..ab5dfb692 100644 --- a/nova/tests/test_notifier.py +++ b/nova/tests/test_notifier.py @@ -134,3 +134,25 @@ class NotifierTestCase(test.TestCase): self.assertEqual(msg['event_type'], 'error_notification') self.assertEqual(msg['priority'], 'ERROR') self.assertEqual(msg['payload']['error'], 'foo') + + def test_send_notification_by_decorator(self): + self.notify_called = False + + def example_api(arg1, arg2): + return arg1 + arg2 + + example_api =\ + nova.notifier.api.notify_decorator( + 'example_api', + example_api) + + def mock_notify(cls, *args): + self.notify_called = True + + self.stubs.Set(nova.notifier.no_op_notifier, 'notify', + mock_notify) + + class Mock(object): + pass + self.assertEqual(3, example_api(1, 2)) + self.assertEqual(self.notify_called, True) diff --git a/nova/tests/test_utils.py b/nova/tests/test_utils.py index ec5098a37..317ec46d0 100644 --- a/nova/tests/test_utils.py +++ b/nova/tests/test_utils.py @@ -18,6 +18,7 @@ import datetime import os import tempfile +import nova from nova import exception from nova import test from nova import utils @@ -384,3 +385,46 @@ class ToPrimitiveTestCase(test.TestCase): def test_typeerror(self): x = bytearray # Class, not instance self.assertEquals(utils.to_primitive(x), u"") + + +class MonkeyPatchTestCase(test.TestCase): + """Unit test for utils.monkey_patch().""" + def setUp(self): + super(MonkeyPatchTestCase, self).setUp() + self.flags( + monkey_patch=True, + monkey_patch_modules=['nova.tests.example.example_a' + ':' + + 'nova.tests.example.example_decorator']) + + def test_monkey_patch(self): + utils.monkey_patch() + nova.tests.example.CALLED_FUNCTION = [] + from nova.tests.example import example_a, example_b + + self.assertEqual('Example function', example_a.example_function_a()) + exampleA = example_a.ExampleClassA() + exampleA.example_method() + ret_a = exampleA.example_method_add(3, 5) + self.assertEqual(ret_a, 8) + + self.assertEqual('Example function', example_b.example_function_b()) + exampleB = example_b.ExampleClassB() + exampleB.example_method() + ret_b = exampleB.example_method_add(3, 5) + + self.assertEqual(ret_b, 8) + package_a = 'nova.tests.example.example_a.' + self.assertTrue(package_a + 'example_function_a' + in nova.tests.example.CALLED_FUNCTION) + + self.assertTrue(package_a + 'ExampleClassA.example_method' + in nova.tests.example.CALLED_FUNCTION) + self.assertTrue(package_a + 'ExampleClassA.example_method_add' + in nova.tests.example.CALLED_FUNCTION) + package_b = 'nova.tests.example.example_b.' + self.assertFalse(package_b + 'example_function_b' + in nova.tests.example.CALLED_FUNCTION) + self.assertFalse(package_b + 'ExampleClassB.example_method' + in nova.tests.example.CALLED_FUNCTION) + self.assertFalse(package_b + 'ExampleClassB.example_method_add' + in nova.tests.example.CALLED_FUNCTION) diff --git a/nova/utils.py b/nova/utils.py index d7e14b1b0..be2ba68f9 100644 --- a/nova/utils.py +++ b/nova/utils.py @@ -842,21 +842,41 @@ def bool_from_str(val): def monkey_patch(): + """ If the Flags.monkey_patch set as True, + this functuion patches a decorator + for all functions in specified modules. + You can set decorators for each modules + using FLAGS.monkey_patch_modules. + The format is "Module path:Decorator function". + Example: 'nova.api.ec2.cloud:nova.notifier.api.notify_decorator' + + Parameters of the decorator is as follows. + (See nova.notifier.api.notify_decorator) + + name - name of the function + function - object of the function + """ + + # If FLAGS.monkey_patch is not True, this function do nothing. if not FLAGS.monkey_patch: return + # Get list of moudles and decorators for module_and_decorator in FLAGS.monkey_patch_modules: module, decorator_name = module_and_decorator.split(':') + # import decorator function decorator = import_class(decorator_name) __import__(module) + # Retrive module information using pyclbr module_data = pyclbr.readmodule_ex(module) for key in module_data.keys(): + # set the decorator for the class methods if isinstance(module_data[key], pyclbr.Class): clz = import_class("%s.%s" % (module, key)) for method, func in inspect.getmembers(clz, inspect.ismethod): setattr(clz, method,\ - decorator("%s.%s" % (module, key), func)) + decorator("%s.%s.%s" % (module, key, method), func)) + # set the decorator for the function if isinstance(module_data[key], pyclbr.Function): func = import_class("%s.%s" % (module, key)) setattr(sys.modules[module], key,\ - setattr(sys.modules[module], key, \ - decorator("%s.%s" % (module, key), func))) + decorator("%s.%s" % (module, key), func)) -- cgit From c3ed01d7d53dbade412122743078d60131adbf9f Mon Sep 17 00:00:00 2001 From: Vishvananda Ishaya Date: Mon, 22 Aug 2011 14:24:59 -0700 Subject: change NoAuth to actually use a tenant and user --- nova/api/auth.py | 19 +---------------- nova/api/ec2/__init__.py | 21 +++++++++++++++++++ nova/api/openstack/auth.py | 52 ++++++++++++++++++++++++++++++++++------------ 3 files changed, 61 insertions(+), 31 deletions(-) (limited to 'nova') diff --git a/nova/api/auth.py b/nova/api/auth.py index 050216fd7..cd0d38b3f 100644 --- a/nova/api/auth.py +++ b/nova/api/auth.py @@ -45,24 +45,6 @@ class InjectContext(wsgi.Middleware): return self.application -class AdminContext(wsgi.Middleware): - """Return an admin context no matter what""" - - @webob.dec.wsgify(RequestClass=wsgi.Request) - def __call__(self, req): - # Build a context, including the auth_token... - remote_address = req.remote_addr - if FLAGS.use_forwarded_for: - remote_address = req.headers.get('X-Forwarded-For', remote_address) - ctx = context.RequestContext('admin', - 'admin', - is_admin=True, - remote_address=remote_address) - - req.environ['nova.context'] = ctx - return self.application - - class KeystoneContext(wsgi.Middleware): """Make a request context from keystone headers""" @@ -80,6 +62,7 @@ class KeystoneContext(wsgi.Middleware): req.headers.get('X_STORAGE_TOKEN')) # Build a context, including the auth_token... + remote_address = getattr(req, 'remote_address', '127.0.0.1') remote_address = req.remote_addr if FLAGS.use_forwarded_for: remote_address = req.headers.get('X-Forwarded-For', remote_address) diff --git a/nova/api/ec2/__init__.py b/nova/api/ec2/__init__.py index 17969099d..5430f443d 100644 --- a/nova/api/ec2/__init__.py +++ b/nova/api/ec2/__init__.py @@ -183,6 +183,27 @@ class ToToken(wsgi.Middleware): return self.application +class NoAuth(wsgi.Middleware): + """Add user:project as 'nova.context' to WSGI environ.""" + + @webob.dec.wsgify(RequestClass=wsgi.Request) + def __call__(self, req): + if 'AWSAccessKeyId' not in req.params: + raise webob.exc.HTTPBadRequest() + user_id, _sep, project_id = req.params['AWSAccessKeyId'].partition(':') + project_id = project_id or user_id + remote_address = getattr(req, 'remote_address', '127.0.0.1') + if FLAGS.use_forwarded_for: + remote_address = req.headers.get('X-Forwarded-For', remote_address) + ctx = context.RequestContext(user_id, + project_id, + is_admin=True, + remote_address=remote_address) + + req.environ['nova.context'] = ctx + return self.application + + class Authenticate(wsgi.Middleware): """Authenticate an EC2 request and add 'nova.context' to WSGI environ.""" diff --git a/nova/api/openstack/auth.py b/nova/api/openstack/auth.py index 0d9c7562a..f2dc89094 100644 --- a/nova/api/openstack/auth.py +++ b/nova/api/openstack/auth.py @@ -33,6 +33,7 @@ from nova.api.openstack import faults LOG = logging.getLogger('nova.api.openstack') FLAGS = flags.FLAGS +flags.DECLARE('use_forwarded_for', 'nova.api.auth') class NoAuthMiddleware(wsgi.Middleware): @@ -40,17 +41,36 @@ class NoAuthMiddleware(wsgi.Middleware): @webob.dec.wsgify(RequestClass=wsgi.Request) def __call__(self, req): - if 'X-Auth-Token' in req.headers: + if 'X-Auth-Token' not in req.headers: + os_url = req.url + version = common.get_version_from_href(os_url) + user_id = req.headers.get('X-Auth-User', 'admin') + project_id = req.headers.get('X-Auth-Project-Id', 'admin') + if version == '1.1': + os_url += '/' + project_id + res = webob.Response() + res.headers['X-Auth-Token'] = '%s:%s' % (user_id, project_id) + res.headers['X-Server-Management-Url'] = os_url + res.headers['X-Storage-Url'] = '' + res.headers['X-CDN-Management-Url'] = '' + res.content_type = 'text/plain' + res.status = '204' + return res + else: + token = req.headers['X-Auth-Token'] + user_id, _sep, project_id = token.partition(':') + project_id = project_id or user_id + remote_address = getattr(req, 'remote_address', '127.0.0.1') + if FLAGS.use_forwarded_for: + remote_address = req.headers.get('X-Forwarded-For', + remote_address) + ctx = context.RequestContext(user_id, + project_id, + is_admin=True, + remote_address=remote_address) + + req.environ['nova.context'] = ctx return self.application - logging.debug("Got no auth token, returning fake info.") - res = webob.Response() - res.headers['X-Auth-Token'] = 'fake' - res.headers['X-Server-Management-Url'] = req.url - res.headers['X-Storage-Url'] = '' - res.headers['X-CDN-Management-Url'] = '' - res.content_type = 'text/plain' - res.status = '204' - return res class AuthMiddleware(wsgi.Middleware): @@ -103,9 +123,15 @@ class AuthMiddleware(wsgi.Middleware): project_id = projects[0].id is_admin = self.auth.is_admin(user_id) - req.environ['nova.context'] = context.RequestContext(user_id, - project_id, - is_admin) + remote_address = getattr(req, 'remote_address', '127.0.0.1') + if FLAGS.use_forwarded_for: + remote_address = req.headers.get('X-Forwarded-For', remote_address) + ctx = context.RequestContext(user_id, + project_id, + is_admin=is_admin, + remote_address=remote_address) + req.environ['nova.context'] = ctx + if not is_admin and not self.auth.is_project_member(user_id, project_id): msg = _("%(user_id)s must be an admin or a " -- cgit From b24a05dbc19eaf67661eac98aa778d789ffa7b4e Mon Sep 17 00:00:00 2001 From: Soren Hansen Date: Mon, 22 Aug 2011 23:25:08 +0200 Subject: Make snapshot raise InstanceNotRunning when the instance isn't running. --- nova/virt/fake.py | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) (limited to 'nova') diff --git a/nova/virt/fake.py b/nova/virt/fake.py index e1e909c42..d5e2bf31b 100644 --- a/nova/virt/fake.py +++ b/nova/virt/fake.py @@ -100,7 +100,8 @@ class FakeConnection(driver.ComputeDriver): self.instances[name] = fake_instance def snapshot(self, context, instance, name): - pass + if not instance['name'] in self.instances: + raise exception.InstanceNotRunning() def reboot(self, instance, network_info): pass @@ -145,7 +146,7 @@ class FakeConnection(driver.ComputeDriver): pass def destroy(self, instance, network_info, cleanup=True): - key = instance.name + key = instance['name'] if key in self.instances: del self.instances[key] else: -- cgit From 150098011db8ca6c534c4a281df388bd42301eea Mon Sep 17 00:00:00 2001 From: Soren Hansen Date: Mon, 22 Aug 2011 23:26:12 +0200 Subject: Update a few doc strings. Address a few pep8 issues. Add nova.tests.utils which provides a couple of handy methods for testing stuff. --- nova/tests/test_test_utils.py | 41 +++++++++++++++++++++++++ nova/tests/test_virt_drivers.py | 26 ++++++++++------ nova/tests/utils.py | 68 +++++++++++++++++++++++++++++++++++++++++ nova/virt/driver.py | 18 +++++------ 4 files changed, 132 insertions(+), 21 deletions(-) create mode 100644 nova/tests/test_test_utils.py create mode 100644 nova/tests/utils.py (limited to 'nova') diff --git a/nova/tests/test_test_utils.py b/nova/tests/test_test_utils.py new file mode 100644 index 000000000..237339758 --- /dev/null +++ b/nova/tests/test_test_utils.py @@ -0,0 +1,41 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 +# +# Copyright 2010 OpenStack LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from nova import db +from nova import test +from nova.tests import utils as test_utils + + +class TestUtilsTestCase(test.TestCase): + def test_get_test_admin_context(self): + """get_test_admin_context's return value behaves like admin context""" + ctxt = test_utils.get_test_admin_context() + + # TODO(soren): This should verify the full interface context + # objects expose. + self.assertTrue(ctxt.is_admin) + + def test_get_test_instance(self): + """get_test_instance's return value looks like an instance_ref""" + instance_ref = test_utils.get_test_instance() + ctxt = test_utils.get_test_admin_context() + db.instance_get(ctxt, instance_ref['id']) + + def _test_get_test_network_info(self): + """Does the return value match a real network_info structure""" + # The challenge here is to define what exactly such a structure + # must look like. + pass diff --git a/nova/tests/test_virt_drivers.py b/nova/tests/test_virt_drivers.py index 2c91c0664..480247c91 100644 --- a/nova/tests/test_virt_drivers.py +++ b/nova/tests/test_virt_drivers.py @@ -31,6 +31,7 @@ FLAGS = flags.FLAGS LOG = logging.getLogger('nova.tests.test_virt_drivers') + def catch_notimplementederror(f): """Decorator to simplify catching drivers raising NotImplementedError @@ -50,6 +51,7 @@ def catch_notimplementederror(f): wrapped_func.__doc__ = f.__doc__ return wrapped_func + class _VirtDriverTestCase(test.TestCase): def setUp(self): super(_VirtDriverTestCase, self).setUp() @@ -151,14 +153,14 @@ class _VirtDriverTestCase(test.TestCase): network_info = test_utils.get_test_network_info() self.connection.spawn(self.ctxt, instance_ref, network_info) self.connection.rescue(self.ctxt, instance_ref, - lambda x:None, network_info) + lambda x: None, network_info) @catch_notimplementederror def test_unrescue_unrescued_instance(self): instance_ref = test_utils.get_test_instance() network_info = test_utils.get_test_network_info() self.connection.spawn(self.ctxt, instance_ref, network_info) - self.connection.unrescue(instance_ref, lambda x:None, network_info) + self.connection.unrescue(instance_ref, lambda x: None, network_info) @catch_notimplementederror def test_unrescue_rescued_instance(self): @@ -166,8 +168,8 @@ class _VirtDriverTestCase(test.TestCase): network_info = test_utils.get_test_network_info() self.connection.spawn(self.ctxt, instance_ref, network_info) self.connection.rescue(self.ctxt, instance_ref, - lambda x:None, network_info) - self.connection.unrescue(instance_ref, lambda x:None, network_info) + lambda x: None, network_info) + self.connection.unrescue(instance_ref, lambda x: None, network_info) @catch_notimplementederror def test_poll_rescued_instances(self): @@ -226,7 +228,7 @@ class _VirtDriverTestCase(test.TestCase): @catch_notimplementederror def test_destroy_instance_nonexistant(self): - fake_instance = { 'id': 42, 'name': 'I just made this up!' } + fake_instance = {'id': 42, 'name': 'I just made this up!'} network_info = test_utils.get_test_network_info() self.connection.destroy(fake_instance, network_info) @@ -410,7 +412,6 @@ class _VirtDriverTestCase(test.TestCase): network_info = test_utils.get_test_network_info() self.connection.unfilter_instance(instance_ref, network_info) - @catch_notimplementederror def test_live_migration(self): network_info = test_utils.get_test_network_info() @@ -448,29 +449,34 @@ class _VirtDriverTestCase(test.TestCase): @catch_notimplementederror def test_set_host_enabled(self): - self.connection.set_host_enabled('Am I a useless argument?', True) + self.connection.set_host_enabled('a useless argument?', True) @catch_notimplementederror def test_host_power_action_reboot(self): - self.connection.host_power_action('Am I a useless argument?', 'reboot') + self.connection.host_power_action('a useless argument?', 'reboot') @catch_notimplementederror def test_host_power_action_shutdown(self): - self.connection.host_power_action('Am I a useless argument?', 'shutdown') + self.connection.host_power_action('a useless argument?', 'shutdown') @catch_notimplementederror def test_host_power_action_startup(self): - self.connection.host_power_action('Am I a useless argument?', 'startup') + self.connection.host_power_action('a useless argument?', 'startup') + class AbstractDriverTestCase(_VirtDriverTestCase): def setUp(self): import nova.virt.driver + self.driver_module = nova.virt.driver + def get_driver_connection(_): return nova.virt.driver.ComputeDriver() + self.driver_module.get_connection = get_driver_connection super(AbstractDriverTestCase, self).setUp() + class FakeConnectionTestCase(_VirtDriverTestCase): def setUp(self): import nova.virt.fake diff --git a/nova/tests/utils.py b/nova/tests/utils.py new file mode 100644 index 000000000..e0cacadb4 --- /dev/null +++ b/nova/tests/utils.py @@ -0,0 +1,68 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 +# +# Copyright 2011 OpenStack LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# + +import nova.context +import nova.db +import nova.flags + +FLAGS = nova.flags.FLAGS + + +def get_test_admin_context(): + return nova.context.get_admin_context() + + +def get_test_instance(context=None): + if not context: + context = get_test_admin_context() + + test_instance = {'memory_kb': '1024000', + 'basepath': '/some/path', + 'bridge_name': 'br100', + 'vcpus': 2, + 'project_id': 'fake', + 'bridge': 'br101', + 'image_ref': '1', + 'instance_type_id': '5'} # m1.small + + instance_ref = nova.db.instance_create(context, test_instance) + return instance_ref + + +def get_test_network_info(count=1): + ipv6 = FLAGS.use_ipv6 + fake = 'fake' + fake_ip = '0.0.0.0/0' + fake_ip_2 = '0.0.0.1/0' + fake_ip_3 = '0.0.0.1/0' + fake_vlan = 100 + fake_bridge_interface = 'eth0' + network = {'bridge': fake, + 'cidr': fake_ip, + 'cidr_v6': fake_ip, + 'vlan': fake_vlan, + 'bridge_interface': fake_bridge_interface, + 'injected': False} + mapping = {'mac': fake, + 'dhcp_server': fake, + 'gateway': fake, + 'gateway6': fake, + 'ips': [{'ip': fake_ip}, {'ip': fake_ip}]} + if ipv6: + mapping['ip6s'] = [{'ip': fake_ip}, + {'ip': fake_ip_2}, + {'ip': fake_ip_3}] + return [(network, mapping) for x in xrange(0, count)] diff --git a/nova/virt/driver.py b/nova/virt/driver.py index 93290aba7..d05b51bd9 100644 --- a/nova/virt/driver.py +++ b/nova/virt/driver.py @@ -140,7 +140,7 @@ class ComputeDriver(object): that it was before this call began. :param context: security context - :param instance: Instance of {nova.compute.service.Instance}. + :param instance: Instance object as returned by DB layer. This function should use the data there to guide the creation of the new instance. :param network_info: @@ -152,14 +152,11 @@ class ComputeDriver(object): def destroy(self, instance, network_info, cleanup=True): """Destroy (shutdown and delete) the specified instance. - The given parameter is an instance of nova.compute.service.Instance, - If the instance is not found (for example if networking failed), this function should still succeed. It's probably a good idea to log a warning in that case. - :param instance: Instance of {nova.compute.service.Instance} and so - the instance is being specified as instance.name. + :param instance: Instance object as returned by DB layer. :param network_info: :py:meth:`~nova.network.manager.NetworkManager.get_instance_nw_info` :param cleanup: @@ -171,8 +168,7 @@ class ComputeDriver(object): def reboot(self, instance, network_info): """Reboot the specified instance. - :param instance: Instance of {nova.compute.service.Instance} and so - the instance is being specified as instance.name. + :param instance: Instance object as returned by DB layer. :param network_info: :py:meth:`~nova.network.manager.NetworkManager.get_instance_nw_info` """ @@ -240,10 +236,10 @@ class ComputeDriver(object): """ Snapshots the specified instance. - The given parameter is an instance of nova.compute.service.Instance, - and so the instance is being specified as instance.name. - - The second parameter is the name of the snapshot. + :param context: security context + :param instance: Instance object as returned by DB layer. + :param image_id: Reference to a pre-created image that will + hold the snapshot. """ raise NotImplementedError() -- cgit From 43add36446e6b4172dc8ed5043e11187a9992474 Mon Sep 17 00:00:00 2001 From: Vishvananda Ishaya Date: Mon, 22 Aug 2011 14:26:41 -0700 Subject: fix comment --- nova/auth/manager.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'nova') diff --git a/nova/auth/manager.py b/nova/auth/manager.py index c9178c0dd..85227bea0 100644 --- a/nova/auth/manager.py +++ b/nova/auth/manager.py @@ -18,7 +18,7 @@ """ WARNING: This code is deprecated and will be removed. -Keystone is recommended is the recommended solution for auth management. +Keystone is the recommended solution for auth management. Nova authentication management """ -- cgit From 0d0c8dfbf29b47aa13e18dd8861bad6ccb10cf12 Mon Sep 17 00:00:00 2001 From: Hisaharu Ishii Date: Mon, 22 Aug 2011 16:46:29 -0700 Subject: Change parameters of 'nova-manage network modify'. Move common test codes into private method. --- nova/tests/test_nova_manage.py | 21 +++++++++++---------- 1 file changed, 11 insertions(+), 10 deletions(-) (limited to 'nova') diff --git a/nova/tests/test_nova_manage.py b/nova/tests/test_nova_manage.py index d6edc8ba9..91130de67 100644 --- a/nova/tests/test_nova_manage.py +++ b/nova/tests/test_nova_manage.py @@ -36,10 +36,10 @@ import netaddr import StringIO from nova import context from nova import db -from nova import flags -from nova import test from nova import exception +from nova import flags from nova import log as logging +from nova import test FLAGS = flags.FLAGS LOG = logging.getLogger('nova.tests.nova_manage') @@ -101,7 +101,7 @@ class NetworkCommandsTestCase(test.TestCase): def tearDown(self): super(NetworkCommandsTestCase, self).tearDown() - def test_create(self): + def _create_network(self): FLAGS.network_manager = 'nova.network.manager.VlanManager' self.commands.create( label='Test', @@ -114,7 +114,10 @@ class NetworkCommandsTestCase(test.TestCase): fixed_range_v6='fd00:2::/120', gateway_v6='fd00:2::22', bridge_interface='eth0') - net = db.network_get_by_cidr(self.context, '10.2.0.0/24') + return db.network_get_by_cidr(self.context, '10.2.0.0/24') + + def test_create(self): + net = self._create_network() self.assertEqual(net['label'], 'Test') self.assertEqual(net['cidr'], '10.2.0.0/24') self.assertEqual(net['netmask'], '255.255.255.0') @@ -127,8 +130,7 @@ class NetworkCommandsTestCase(test.TestCase): self.assertEqual(net['bridge_interface'], 'eth0') def test_list(self): - self.test_create() - net = db.network_get_by_cidr(self.context, '10.2.0.0/24') + net = self._create_network() output = StringIO.StringIO() sys.stdout = output self.commands.list() @@ -158,7 +160,7 @@ class NetworkCommandsTestCase(test.TestCase): self.assertEqual(result, answer) def test_delete(self): - self.test_create() + net = self._create_network() self.commands.delete(fixed_range='10.2.0.0/24') net_exist = True try: @@ -168,8 +170,7 @@ class NetworkCommandsTestCase(test.TestCase): self.assertEqual(net_exist, False) def test_modify(self): - self.test_create() - net = db.network_get_by_cidr(self.context, '10.2.0.0/24') + net = self._create_network() db.network_disassociate(self.context, net['id']) net = db.network_get_by_cidr(self.context, '10.2.0.0/24') self.assertEqual(net['project_id'], None) @@ -183,7 +184,7 @@ class NetworkCommandsTestCase(test.TestCase): net = db.network_get_by_cidr(self.context, '10.2.0.0/24') self.assertEqual(net['project_id'], 'test_project') self.assertEqual(net['host'], 'test_host') - self.commands.modify('10.2.0.0/24', project='None', host='None') + self.commands.modify('10.2.0.0/24', dis_project=True, dis_host=True) net = db.network_get_by_cidr(self.context, '10.2.0.0/24') self.assertEqual(net['project_id'], None) self.assertEqual(net['host'], None) -- cgit From 1d121a42f5072026a3ad19cb5fd1915d7cd2ff63 Mon Sep 17 00:00:00 2001 From: "vladimir.p" Date: Mon, 22 Aug 2011 17:31:48 -0700 Subject: initial cut on volume type APIs --- nova/db/sqlalchemy/api.py | 16 +++---- nova/db/sqlalchemy/models.py | 2 +- nova/exception.py | 4 ++ nova/volume/api.py | 8 +++- nova/volume/volume_types.py | 101 +++++++++++++++++++++++++++++++++++++++++++ 5 files changed, 120 insertions(+), 11 deletions(-) create mode 100644 nova/volume/volume_types.py (limited to 'nova') diff --git a/nova/db/sqlalchemy/api.py b/nova/db/sqlalchemy/api.py index 143162fc6..ac8aed307 100644 --- a/nova/db/sqlalchemy/api.py +++ b/nova/db/sqlalchemy/api.py @@ -2163,7 +2163,7 @@ def volume_get(context, volume_id, session=None): if is_admin_context(context): result = session.query(models.Volume).\ options(joinedload('instance')).\ - options(joinedload('metadata')).\ + options(joinedload('volume_metadata')).\ options(joinedload('volume_type')).\ filter_by(id=volume_id).\ filter_by(deleted=can_read_deleted(context)).\ @@ -2171,7 +2171,7 @@ def volume_get(context, volume_id, session=None): elif is_user_context(context): result = session.query(models.Volume).\ options(joinedload('instance')).\ - options(joinedload('metadata')).\ + options(joinedload('volume_metadata')).\ options(joinedload('volume_type')).\ filter_by(project_id=context.project_id).\ filter_by(id=volume_id).\ @@ -2188,7 +2188,7 @@ def volume_get_all(context): session = get_session() return session.query(models.Volume).\ options(joinedload('instance')).\ - options(joinedload('metadata')).\ + options(joinedload('volume_metadata')).\ options(joinedload('volume_type')).\ filter_by(deleted=can_read_deleted(context)).\ all() @@ -2199,7 +2199,7 @@ def volume_get_all_by_host(context, host): session = get_session() return session.query(models.Volume).\ options(joinedload('instance')).\ - options(joinedload('metadata')).\ + options(joinedload('volume_metadata')).\ options(joinedload('volume_type')).\ filter_by(host=host).\ filter_by(deleted=can_read_deleted(context)).\ @@ -2210,7 +2210,7 @@ def volume_get_all_by_host(context, host): def volume_get_all_by_instance(context, instance_id): session = get_session() result = session.query(models.Volume).\ - options(joinedload('metadata')).\ + options(joinedload('volume_metadata')).\ options(joinedload('volume_type')).\ filter_by(instance_id=instance_id).\ filter_by(deleted=False).\ @@ -2227,7 +2227,7 @@ def volume_get_all_by_project(context, project_id): session = get_session() return session.query(models.Volume).\ options(joinedload('instance')).\ - options(joinedload('metadata')).\ + options(joinedload('volume_metadata')).\ options(joinedload('volume_type')).\ filter_by(project_id=project_id).\ filter_by(deleted=can_read_deleted(context)).\ @@ -2241,7 +2241,7 @@ def volume_get_instance(context, volume_id): filter_by(id=volume_id).\ filter_by(deleted=can_read_deleted(context)).\ options(joinedload('instance')).\ - options(joinedload('metadata')).\ + options(joinedload('volume_metadata')).\ options(joinedload('volume_type')).\ first() if not result: @@ -3634,7 +3634,7 @@ def volume_type_create(_context, values): @require_context -def volume_type_get_all(context, inactive=False): +def volume_type_get_all(context, inactive=False, filters={}): """ Returns a dict describing all volume_types with name as key. """ diff --git a/nova/db/sqlalchemy/models.py b/nova/db/sqlalchemy/models.py index 99e6f412e..4195ca113 100644 --- a/nova/db/sqlalchemy/models.py +++ b/nova/db/sqlalchemy/models.py @@ -322,7 +322,7 @@ class VolumeMetadata(BASE, NovaBase): key = Column(String(255)) value = Column(String(255)) volume_id = Column(Integer, ForeignKey('volumes.id'), nullable=False) - volume = relationship(Volume, backref="metadata", + volume = relationship(Volume, backref="volume_metadata", foreign_keys=volume_id, primaryjoin='and_(' 'VolumeMetadata.volume_id == Volume.id,' diff --git a/nova/exception.py b/nova/exception.py index 1b118f6f9..01d676b54 100644 --- a/nova/exception.py +++ b/nova/exception.py @@ -197,6 +197,10 @@ class InvalidInstanceType(Invalid): message = _("Invalid instance type %(instance_type)s.") +class InvalidVolumeType(Invalid): + message = _("Invalid volume type %(volume_type)s.") + + class InvalidPortRange(Invalid): message = _("Invalid port range %(from_port)s:%(to_port)s.") diff --git a/nova/volume/api.py b/nova/volume/api.py index 52b3a9fed..7a78e244f 100644 --- a/nova/volume/api.py +++ b/nova/volume/api.py @@ -41,7 +41,8 @@ LOG = logging.getLogger('nova.volume') class API(base.Base): """API for interacting with the volume manager.""" - def create(self, context, size, snapshot_id, name, description): + def create(self, context, size, snapshot_id, name, description, + volume_type=None, metadata=None): if snapshot_id != None: snapshot = self.get_snapshot(context, snapshot_id) if snapshot['status'] != "available": @@ -66,7 +67,10 @@ class API(base.Base): 'status': "creating", 'attach_status': "detached", 'display_name': name, - 'display_description': description} + 'display_description': description, + 'volume_type_id': volume_type.get('id', None), + 'metadata' metadata, + } volume = self.db.volume_create(context, options) rpc.cast(context, diff --git a/nova/volume/volume_types.py b/nova/volume/volume_types.py new file mode 100644 index 000000000..c1fce1627 --- /dev/null +++ b/nova/volume/volume_types.py @@ -0,0 +1,101 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright (c) 2011 Zadara Storage Inc. +# Copyright (c) 2011 OpenStack LLC. +# Copyright 2010 United States Government as represented by the +# Administrator of the National Aeronautics and Space Administration. +# All Rights Reserved. +# Copyright (c) 2010 Citrix Systems, Inc. +# Copyright 2011 Ken Pepple +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +"""Built-in volume type properties.""" + +from nova import context +from nova import db +from nova import exception +from nova import flags +from nova import log as logging + +FLAGS = flags.FLAGS +LOG = logging.getLogger('nova.volume_types') + + +def create(context, name, extra_specs={}): + """Creates volume types.""" + try: + db.volume_type_create( + context, + dict(name=name, + extra_specs=extra_specs)) + except exception.DBError, e: + LOG.exception(_('DB error: %s') % e) + raise exception.ApiError(_("Cannot create volume_type with " + "name %(name)s and specs %(extra_specs)s") + % locals()) + + +def destroy(context, name): + """Marks volume types as deleted.""" + if name is None: + raise exception.InvalidVolumeType(volume_type=name) + else: + try: + db.volume_type_destroy(context, name) + except exception.NotFound: + LOG.exception(_('Volume type %s not found for deletion') % name) + raise exception.ApiError(_("Unknown volume type: %s") % name) + + +def purge(context, name): + """Removes volume types from database.""" + if name is None: + raise exception.InvalidVolumeType(volume_type=name) + else: + try: + db.volume_type_purge(context, name) + except exception.NotFound: + LOG.exception(_('Volume type %s not found for purge') % name) + raise exception.ApiError(_("Unknown volume type: %s") % name) + + +def get_all_types(context, inactive=0): + """Get all non-deleted volume_types. + + Pass true as argument if you want deleted volume types returned also. + + """ + return db.volume_type_get_all(context, inactive) + + +def get_volume_type(context, id): + """Retrieves single volume type by id.""" + if id is None: + raise exception.ApiError(_("Invalid volume type: %s") % id) + + try: + return db.volume_type_get(context, id) + except exception.DBError: + raise exception.ApiError(_("Unknown volume type: %s") % id) + + +def get_volume_type_by_name(context, name): + """Retrieves single volume type by name.""" + if name is None: + raise exception.ApiError(_("Invalid volume type name: %s") % name) + + try: + return db.volume_type_get_by_name(context, name) + except exception.DBError: + raise exception.ApiError(_("Unknown volume type: %s") % name) -- cgit From d994b06f65af9d4c523a4123f915c6147ada7c05 Mon Sep 17 00:00:00 2001 From: Brian Waldon Date: Mon, 22 Aug 2011 22:00:13 -0400 Subject: fixing exception logging --- nova/api/openstack/views/addresses.py | 9 +++------ nova/api/openstack/wsgi.py | 5 ++--- 2 files changed, 5 insertions(+), 9 deletions(-) (limited to 'nova') diff --git a/nova/api/openstack/views/addresses.py b/nova/api/openstack/views/addresses.py index d54013d61..8fccc690f 100644 --- a/nova/api/openstack/views/addresses.py +++ b/nova/api/openstack/views/addresses.py @@ -15,8 +15,6 @@ # License for the specific language governing permissions and limitations # under the License. -import traceback - from nova import flags from nova import utils from nova import log as logging @@ -54,8 +52,7 @@ class ViewBuilderV11(ViewBuilder): for interface in interfaces: try: network_label = self._extract_network_label(interface) - except TypeError: - LOG.exception(traceback.format_exc()) + except TypeError as exc: continue if network_label not in networks: @@ -90,8 +87,8 @@ class ViewBuilderV11(ViewBuilder): def _extract_network_label(self, interface): try: return interface['network']['label'] - except (TypeError, KeyError): - LOG.exception(traceback.format_exc()) + except (TypeError, KeyError) as exc: + LOG.exception(exc) raise TypeError def _extract_ipv4_addresses(self, interface): diff --git a/nova/api/openstack/wsgi.py b/nova/api/openstack/wsgi.py index 572aba993..3616c9ec6 100644 --- a/nova/api/openstack/wsgi.py +++ b/nova/api/openstack/wsgi.py @@ -1,6 +1,5 @@ import json -import traceback import webob from xml.dom import minidom from xml.parsers import expat @@ -517,6 +516,6 @@ class Resource(wsgi.Application): controller_method = getattr(self.controller, action) try: return controller_method(req=request, **action_args) - except TypeError: - LOG.debug(traceback.format_exc()) + except TypeError as exc: + LOG.exception(exc) return faults.Fault(webob.exc.HTTPBadRequest()) -- cgit From 6f3610042452cc1cb6b1e0c204a127c0c48794f0 Mon Sep 17 00:00:00 2001 From: Vishvananda Ishaya Date: Mon, 22 Aug 2011 19:25:22 -0700 Subject: unindented per review, added a note about auth v2 --- nova/api/openstack/auth.py | 32 +++++++++++++++++--------------- 1 file changed, 17 insertions(+), 15 deletions(-) (limited to 'nova') diff --git a/nova/api/openstack/auth.py b/nova/api/openstack/auth.py index f2dc89094..6754fea27 100644 --- a/nova/api/openstack/auth.py +++ b/nova/api/openstack/auth.py @@ -49,6 +49,9 @@ class NoAuthMiddleware(wsgi.Middleware): if version == '1.1': os_url += '/' + project_id res = webob.Response() + # NOTE(vish): This is expecting and returning Auth(1.1), whereas + # keystone uses 2.0 auth. We should probably allow + # 2.0 auth here as well. res.headers['X-Auth-Token'] = '%s:%s' % (user_id, project_id) res.headers['X-Server-Management-Url'] = os_url res.headers['X-Storage-Url'] = '' @@ -56,21 +59,20 @@ class NoAuthMiddleware(wsgi.Middleware): res.content_type = 'text/plain' res.status = '204' return res - else: - token = req.headers['X-Auth-Token'] - user_id, _sep, project_id = token.partition(':') - project_id = project_id or user_id - remote_address = getattr(req, 'remote_address', '127.0.0.1') - if FLAGS.use_forwarded_for: - remote_address = req.headers.get('X-Forwarded-For', - remote_address) - ctx = context.RequestContext(user_id, - project_id, - is_admin=True, - remote_address=remote_address) - - req.environ['nova.context'] = ctx - return self.application + + token = req.headers['X-Auth-Token'] + user_id, _sep, project_id = token.partition(':') + project_id = project_id or user_id + remote_address = getattr(req, 'remote_address', '127.0.0.1') + if FLAGS.use_forwarded_for: + remote_address = req.headers.get('X-Forwarded-For', remote_address) + ctx = context.RequestContext(user_id, + project_id, + is_admin=True, + remote_address=remote_address) + + req.environ['nova.context'] = ctx + return self.application class AuthMiddleware(wsgi.Middleware): -- cgit From 5ad22e341e0ad5ff62e97906edf7822ee53b4ae9 Mon Sep 17 00:00:00 2001 From: Brian Waldon Date: Mon, 22 Aug 2011 23:30:12 -0400 Subject: removing unnecessary tthing --- nova/api/openstack/views/addresses.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'nova') diff --git a/nova/api/openstack/views/addresses.py b/nova/api/openstack/views/addresses.py index 8fccc690f..8f07a2289 100644 --- a/nova/api/openstack/views/addresses.py +++ b/nova/api/openstack/views/addresses.py @@ -52,7 +52,7 @@ class ViewBuilderV11(ViewBuilder): for interface in interfaces: try: network_label = self._extract_network_label(interface) - except TypeError as exc: + except TypeError: continue if network_label not in networks: -- cgit From 4c2674516897b6cce0441efe4ebb005c01cb3411 Mon Sep 17 00:00:00 2001 From: Christopher MacGown Date: Mon, 22 Aug 2011 21:06:47 -0700 Subject: Added the fixes suggested by Eric Windisch from cloudscaling... --- nova/virt/disk.py | 2 +- nova/virt/xenapi/vm_utils.py | 2 +- nova/virt/xenapi/vmops.py | 5 +++-- 3 files changed, 5 insertions(+), 4 deletions(-) (limited to 'nova') diff --git a/nova/virt/disk.py b/nova/virt/disk.py index 54b191fa9..809d3323c 100644 --- a/nova/virt/disk.py +++ b/nova/virt/disk.py @@ -228,7 +228,7 @@ def _inject_metadata_into_fs(metadata, fs, execute=None): metadata_path = os.path.join(fs, "meta.js") metadata = dict([(m.key, m.value) for m in metadata]) - utils.execute('sudo', 'tee', '-a', metadata_path, + utils.execute('sudo', 'tee', metadata_path, process_input=json.dumps(metadata)) diff --git a/nova/virt/xenapi/vm_utils.py b/nova/virt/xenapi/vm_utils.py index 3861f6bd8..e517dcf28 100644 --- a/nova/virt/xenapi/vm_utils.py +++ b/nova/virt/xenapi/vm_utils.py @@ -641,7 +641,7 @@ class VMHelper(HelperBase): # everything mount_required = False key, net, metadata = _prepare_injectables(instance, network_info) - mount_required = key or net + mount_required = key or net or metadata if not mount_required: return diff --git a/nova/virt/xenapi/vmops.py b/nova/virt/xenapi/vmops.py index b1522729a..606041d12 100644 --- a/nova/virt/xenapi/vmops.py +++ b/nova/virt/xenapi/vmops.py @@ -251,8 +251,9 @@ class VMOps(object): bootable=False) userdevice += 1 - # Alter the image before VM start for, e.g. network injection - if FLAGS.flat_injected: + # Alter the image before VM start for, e.g. network injection also + # alter the image if there's metadata. + if FLAGS.flat_injected or instance['metadata']: VMHelper.preconfigure_instance(self._session, instance, first_vdi_ref, network_info) -- cgit From 7f1adb50cfab91a553f2d129b9b2eef1e5b2145b Mon Sep 17 00:00:00 2001 From: Christopher MacGown Date: Mon, 22 Aug 2011 22:17:51 -0700 Subject: Moved migration and fixed tests from upstream --- nova/compute/api.py | 2 +- .../versions/041_add_config_drive_to_instances.py | 38 ++++++++++++++++++++++ nova/tests/api/openstack/test_servers.py | 28 +++++++++------- nova/tests/test_compute.py | 2 +- nova/virt/disk.py | 2 +- nova/virt/libvirt/connection.py | 7 ++-- nova/virt/xenapi/vm_utils.py | 2 +- 7 files changed, 61 insertions(+), 20 deletions(-) create mode 100644 nova/db/sqlalchemy/migrate_repo/versions/041_add_config_drive_to_instances.py (limited to 'nova') diff --git a/nova/compute/api.py b/nova/compute/api.py index 74149f17d..69f76bf40 100644 --- a/nova/compute/api.py +++ b/nova/compute/api.py @@ -526,7 +526,7 @@ class API(base.Base): availability_zone, user_data, metadata, injected_files, admin_password, zone_blob, reservation_id, access_ip_v4, access_ip_v6, - requested_networks, config_drive + requested_networks, config_drive) block_device_mapping = block_device_mapping or [] instances = [] diff --git a/nova/db/sqlalchemy/migrate_repo/versions/041_add_config_drive_to_instances.py b/nova/db/sqlalchemy/migrate_repo/versions/041_add_config_drive_to_instances.py new file mode 100644 index 000000000..d3058f00d --- /dev/null +++ b/nova/db/sqlalchemy/migrate_repo/versions/041_add_config_drive_to_instances.py @@ -0,0 +1,38 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 +# +# Copyright 2011 Piston Cloud Computing, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from sqlalchemy import Column, Integer, MetaData, String, Table + +from nova import utils + + +meta = MetaData() + +instances = Table("instances", meta, + Column("id", Integer(), primary_key=True, nullable=False)) + +# matches the size of an image_ref +config_drive_column = Column("config_drive", String(255), nullable=True) + + +def upgrade(migrate_engine): + meta.bind = migrate_engine + instances.create_column(config_drive_column) + + +def downgrade(migrate_engine): + meta.bind = migrate_engine + instances.drop_column(config_drive_column) diff --git a/nova/tests/api/openstack/test_servers.py b/nova/tests/api/openstack/test_servers.py index f854a500c..aec2ad947 100644 --- a/nova/tests/api/openstack/test_servers.py +++ b/nova/tests/api/openstack/test_servers.py @@ -1776,8 +1776,8 @@ class ServersTest(test.TestCase): self.config_drive = True self._setup_for_create_instance() - image_href = 'http://localhost/v1.1/images/2' - flavor_ref = 'http://localhost/v1.1/flavors/3' + image_href = 'http://localhost/v1.1/123/images/2' + flavor_ref = 'http://localhost/v1.1/123/flavors/3' body = { 'server': { 'name': 'config_drive_test', @@ -1792,12 +1792,13 @@ class ServersTest(test.TestCase): }, } - req = webob.Request.blank('/v1.1/servers') + req = webob.Request.blank('/v1.1/123/servers') req.method = 'POST' req.body = json.dumps(body) req.headers["content-type"] = "application/json" res = req.get_response(fakes.wsgi_app()) + print res self.assertEqual(res.status_int, 202) server = json.loads(res.body)['server'] self.assertEqual(1, server['id']) @@ -1807,8 +1808,8 @@ class ServersTest(test.TestCase): self.config_drive = 2 self._setup_for_create_instance() - image_href = 'http://localhost/v1.1/images/2' - flavor_ref = 'http://localhost/v1.1/flavors/3' + image_href = 'http://localhost/v1.1/123/images/2' + flavor_ref = 'http://localhost/v1.1/123/flavors/3' body = { 'server': { 'name': 'config_drive_test', @@ -1823,7 +1824,7 @@ class ServersTest(test.TestCase): }, } - req = webob.Request.blank('/v1.1/servers') + req = webob.Request.blank('/v1.1/123/servers') req.method = 'POST' req.body = json.dumps(body) req.headers["content-type"] = "application/json" @@ -1840,8 +1841,8 @@ class ServersTest(test.TestCase): self.config_drive = "asdf" self._setup_for_create_instance() - image_href = 'http://localhost/v1.1/images/2' - flavor_ref = 'http://localhost/v1.1/flavors/3' + image_href = 'http://localhost/v1.1/123/images/2' + flavor_ref = 'http://localhost/v1.1/123/flavors/3' body = { 'server': { 'name': 'config_drive_test', @@ -1856,7 +1857,7 @@ class ServersTest(test.TestCase): }, } - req = webob.Request.blank('/v1.1/servers') + req = webob.Request.blank('/v1.1/123/servers') req.method = 'POST' req.body = json.dumps(body) req.headers["content-type"] = "application/json" @@ -1867,8 +1868,8 @@ class ServersTest(test.TestCase): def test_create_instance_without_config_drive_v1_1(self): self._setup_for_create_instance() - image_href = 'http://localhost/v1.1/images/2' - flavor_ref = 'http://localhost/v1.1/flavors/3' + image_href = 'http://localhost/v1.1/123/images/2' + flavor_ref = 'http://localhost/v1.1/123/flavors/3' body = { 'server': { 'name': 'config_drive_test', @@ -1883,7 +1884,7 @@ class ServersTest(test.TestCase): }, } - req = webob.Request.blank('/v1.1/servers') + req = webob.Request.blank('/v1.1/123/servers') req.method = 'POST' req.body = json.dumps(body) req.headers["content-type"] = "application/json" @@ -3588,6 +3589,7 @@ class ServersViewBuilderV11Test(test.TestCase): "id": 1, "uuid": self.instance['uuid'], "name": "test_server", + "config_drive": None, "links": [ { "rel": "self", @@ -3747,6 +3749,7 @@ class ServersViewBuilderV11Test(test.TestCase): }, "addresses": {}, "metadata": {}, + "config_drive": None, "accessIPv4": "1.2.3.4", "accessIPv6": "", "links": [ @@ -3801,6 +3804,7 @@ class ServersViewBuilderV11Test(test.TestCase): }, "addresses": {}, "metadata": {}, + "config_drive": None, "accessIPv4": "", "accessIPv6": "fead::1234", "links": [ diff --git a/nova/tests/test_compute.py b/nova/tests/test_compute.py index b75e25dda..0523d73b6 100644 --- a/nova/tests/test_compute.py +++ b/nova/tests/test_compute.py @@ -163,7 +163,7 @@ class ComputeTestCase(test.TestCase): def test_create_instance_associates_config_drive(self): """Make sure create associates a config drive.""" - instance_id = self._create_instance(params={'config_drive': True,}) + instance_id = self._create_instance(params={'config_drive': True, }) try: self.compute.run_instance(self.context, instance_id) diff --git a/nova/virt/disk.py b/nova/virt/disk.py index 809d3323c..52b2881e8 100644 --- a/nova/virt/disk.py +++ b/nova/virt/disk.py @@ -228,7 +228,7 @@ def _inject_metadata_into_fs(metadata, fs, execute=None): metadata_path = os.path.join(fs, "meta.js") metadata = dict([(m.key, m.value) for m in metadata]) - utils.execute('sudo', 'tee', metadata_path, + utils.execute('sudo', 'tee', metadata_path, process_input=json.dumps(metadata)) diff --git a/nova/virt/libvirt/connection.py b/nova/virt/libvirt/connection.py index 23fa86f65..4388291db 100644 --- a/nova/virt/libvirt/connection.py +++ b/nova/virt/libvirt/connection.py @@ -131,8 +131,8 @@ flags.DEFINE_string('libvirt_vif_type', 'bridge', flags.DEFINE_string('libvirt_vif_driver', 'nova.virt.libvirt.vif.LibvirtBridgeDriver', 'The libvirt VIF driver to configure the VIFs.') -flags.DEFINE_string('default_local_format', - None, +flags.DEFINE_string('default_local_format', + None, 'The default format a local_volume will be formatted with ' 'on creation.') @@ -970,7 +970,7 @@ class LibvirtConnection(driver.ComputeDriver): for injection in ('metadata', 'key', 'net'): if locals()[injection]: LOG.info(_('instance %(inst_name)s: injecting ' - '%(injection)s into image %(img_id)s' + '%(injection)s into image %(img_id)s' % locals())) try: disk.inject_data(injection_path, key, net, metadata, @@ -1106,7 +1106,6 @@ class LibvirtConnection(driver.ComputeDriver): block_device_info)): xml_info['swap_device'] = self.default_swap_device - config_drive = False if instance.get('config_drive') or instance.get('config_drive_id'): xml_info['config_drive'] = xml_info['basepath'] + "/disk.config" diff --git a/nova/virt/xenapi/vm_utils.py b/nova/virt/xenapi/vm_utils.py index 18fe84e6c..efbea7076 100644 --- a/nova/virt/xenapi/vm_utils.py +++ b/nova/virt/xenapi/vm_utils.py @@ -741,7 +741,7 @@ class VMHelper(HelperBase): # if at all, so determine whether it's required first, and then do # everything mount_required = False - key, net, metadata = _prepare_injectables(instance, network_info) + key, net, metadata = _prepare_injectables(instance, network_info) mount_required = key or net or metadata if not mount_required: return -- cgit From 909e0ea5c61ba66e5c07b91ff225d64adf60f960 Mon Sep 17 00:00:00 2001 From: Dan Prince Date: Tue, 23 Aug 2011 10:21:07 -0400 Subject: Move use_ipv6 into flags. Its used in multiple places (network manager and the OSAPI) and should be defined at the top level. --- nova/flags.py | 2 ++ nova/network/manager.py | 2 -- 2 files changed, 2 insertions(+), 2 deletions(-) (limited to 'nova') diff --git a/nova/flags.py b/nova/flags.py index 48d5e8168..f822ae61a 100644 --- a/nova/flags.py +++ b/nova/flags.py @@ -402,3 +402,5 @@ DEFINE_bool('resume_guests_state_on_host_boot', False, DEFINE_string('root_helper', 'sudo', 'Command prefix to use for running commands as root') + +DEFINE_bool('use_ipv6', False, 'use the ipv6') diff --git a/nova/network/manager.py b/nova/network/manager.py index aa2a3700c..404a3180e 100644 --- a/nova/network/manager.py +++ b/nova/network/manager.py @@ -106,8 +106,6 @@ flags.DEFINE_integer('create_unique_mac_address_attempts', 5, 'Number of attempts to create unique mac address') flags.DEFINE_bool('auto_assign_floating_ip', False, 'Autoassigning floating ip to VM') -flags.DEFINE_bool('use_ipv6', False, - 'use the ipv6') flags.DEFINE_string('network_host', socket.gethostname(), 'Network host to use for ip allocation in flat modes') flags.DEFINE_bool('fake_call', False, -- cgit From e1c27761863a50bf33a2dcfffa96e911ae9b5b55 Mon Sep 17 00:00:00 2001 From: Dan Prince Date: Tue, 23 Aug 2011 10:31:34 -0400 Subject: 'use the ipv6' -- 'use ipv6' --- nova/flags.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'nova') diff --git a/nova/flags.py b/nova/flags.py index f822ae61a..ce5356723 100644 --- a/nova/flags.py +++ b/nova/flags.py @@ -403,4 +403,4 @@ DEFINE_bool('resume_guests_state_on_host_boot', False, DEFINE_string('root_helper', 'sudo', 'Command prefix to use for running commands as root') -DEFINE_bool('use_ipv6', False, 'use the ipv6') +DEFINE_bool('use_ipv6', False, 'use ipv6') -- cgit From 5ae44219fd82d843cc5e715c318d9e80ab20b1a2 Mon Sep 17 00:00:00 2001 From: Nachi Ueno Date: Tue, 23 Aug 2011 08:07:25 -0700 Subject: Fixed typo and docstring and example class name --- nova/notifier/api.py | 7 +++--- nova/tests/example/__init__.py | 34 ---------------------------- nova/tests/example/example_a.py | 32 -------------------------- nova/tests/example/example_b.py | 32 -------------------------- nova/tests/monkey_patch_example/__init__.py | 34 ++++++++++++++++++++++++++++ nova/tests/monkey_patch_example/example_a.py | 32 ++++++++++++++++++++++++++ nova/tests/monkey_patch_example/example_b.py | 32 ++++++++++++++++++++++++++ nova/tests/test_utils.py | 25 ++++++++++---------- nova/utils.py | 4 ++-- 9 files changed, 116 insertions(+), 116 deletions(-) delete mode 100644 nova/tests/example/__init__.py delete mode 100644 nova/tests/example/example_a.py delete mode 100644 nova/tests/example/example_b.py create mode 100644 nova/tests/monkey_patch_example/__init__.py create mode 100644 nova/tests/monkey_patch_example/example_a.py create mode 100644 nova/tests/monkey_patch_example/example_b.py (limited to 'nova') diff --git a/nova/notifier/api.py b/nova/notifier/api.py index a98f17dbe..f5cf95d2a 100644 --- a/nova/notifier/api.py +++ b/nova/notifier/api.py @@ -41,11 +41,10 @@ class BadPriorityException(Exception): def notify_decorator(name, fn): """ decorator for notify which is used from utils.monkey_patch() - Parameters: - - name - name of the function - function - object of the function + :param name: name of the function + :param function: - object of the function + :returns: function -- decorated function """ def wrapped_func(*args, **kwarg): diff --git a/nova/tests/example/__init__.py b/nova/tests/example/__init__.py deleted file mode 100644 index 1cfdf8a7e..000000000 --- a/nova/tests/example/__init__.py +++ /dev/null @@ -1,34 +0,0 @@ -# Copyright 2011 OpenStack LLC. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -"""Example Module for testing utils.monkey_patch() -""" - - -CALLED_FUNCTION = [] - - -def example_decorator(name, function): - """ decorator for notify which is used from utils.monkey_patch() - Parameters: - - name - name of the function - function - object of the function - - - """ - def wrapped_func(*args, **kwarg): - CALLED_FUNCTION.append(name) - return function(*args, **kwarg) - return wrapped_func diff --git a/nova/tests/example/example_a.py b/nova/tests/example/example_a.py deleted file mode 100644 index 91bf048e4..000000000 --- a/nova/tests/example/example_a.py +++ /dev/null @@ -1,32 +0,0 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 - -# Copyright 2010 United States Government as represented by the -# Administrator of the National Aeronautics and Space Administration. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -"""Example Module A for testing utils.monkey_patch() -""" - - -def example_function_a(): - return 'Example function' - - -class ExampleClassA(): - def example_method(self): - return 'Example method' - - def example_method_add(self, arg1, arg2): - return arg1 + arg2 diff --git a/nova/tests/example/example_b.py b/nova/tests/example/example_b.py deleted file mode 100644 index edd267c4f..000000000 --- a/nova/tests/example/example_b.py +++ /dev/null @@ -1,32 +0,0 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 - -# Copyright 2010 United States Government as represented by the -# Administrator of the National Aeronautics and Space Administration. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -"""Example Module B for testing utils.monkey_patch() -""" - - -def example_function_b(): - return 'Example function' - - -class ExampleClassB(): - def example_method(self): - return 'Example method' - - def example_method_add(self, arg1, arg2): - return arg1 + arg2 diff --git a/nova/tests/monkey_patch_example/__init__.py b/nova/tests/monkey_patch_example/__init__.py new file mode 100644 index 000000000..1cfdf8a7e --- /dev/null +++ b/nova/tests/monkey_patch_example/__init__.py @@ -0,0 +1,34 @@ +# Copyright 2011 OpenStack LLC. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +"""Example Module for testing utils.monkey_patch() +""" + + +CALLED_FUNCTION = [] + + +def example_decorator(name, function): + """ decorator for notify which is used from utils.monkey_patch() + Parameters: + + name - name of the function + function - object of the function + + + """ + def wrapped_func(*args, **kwarg): + CALLED_FUNCTION.append(name) + return function(*args, **kwarg) + return wrapped_func diff --git a/nova/tests/monkey_patch_example/example_a.py b/nova/tests/monkey_patch_example/example_a.py new file mode 100644 index 000000000..91bf048e4 --- /dev/null +++ b/nova/tests/monkey_patch_example/example_a.py @@ -0,0 +1,32 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2010 United States Government as represented by the +# Administrator of the National Aeronautics and Space Administration. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +"""Example Module A for testing utils.monkey_patch() +""" + + +def example_function_a(): + return 'Example function' + + +class ExampleClassA(): + def example_method(self): + return 'Example method' + + def example_method_add(self, arg1, arg2): + return arg1 + arg2 diff --git a/nova/tests/monkey_patch_example/example_b.py b/nova/tests/monkey_patch_example/example_b.py new file mode 100644 index 000000000..edd267c4f --- /dev/null +++ b/nova/tests/monkey_patch_example/example_b.py @@ -0,0 +1,32 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2010 United States Government as represented by the +# Administrator of the National Aeronautics and Space Administration. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +"""Example Module B for testing utils.monkey_patch() +""" + + +def example_function_b(): + return 'Example function' + + +class ExampleClassB(): + def example_method(self): + return 'Example method' + + def example_method_add(self, arg1, arg2): + return arg1 + arg2 diff --git a/nova/tests/test_utils.py b/nova/tests/test_utils.py index f80ffb179..1ba794a1a 100644 --- a/nova/tests/test_utils.py +++ b/nova/tests/test_utils.py @@ -401,15 +401,16 @@ class MonkeyPatchTestCase(test.TestCase): """Unit test for utils.monkey_patch().""" def setUp(self): super(MonkeyPatchTestCase, self).setUp() + self.example_package = 'nova.tests.monkey_patch_example.' self.flags( monkey_patch=True, - monkey_patch_modules=['nova.tests.example.example_a' + ':' - + 'nova.tests.example.example_decorator']) + monkey_patch_modules=[self.example_package + 'example_a' + ':' + + self.example_package + 'example_decorator']) def test_monkey_patch(self): utils.monkey_patch() - nova.tests.example.CALLED_FUNCTION = [] - from nova.tests.example import example_a, example_b + nova.tests.monkey_patch_example.CALLED_FUNCTION = [] + from nova.tests.monkey_patch_example import example_a, example_b self.assertEqual('Example function', example_a.example_function_a()) exampleA = example_a.ExampleClassA() @@ -423,18 +424,18 @@ class MonkeyPatchTestCase(test.TestCase): ret_b = exampleB.example_method_add(3, 5) self.assertEqual(ret_b, 8) - package_a = 'nova.tests.example.example_a.' + package_a = self.example_package + 'example_a.' self.assertTrue(package_a + 'example_function_a' - in nova.tests.example.CALLED_FUNCTION) + in nova.tests.monkey_patch_example.CALLED_FUNCTION) self.assertTrue(package_a + 'ExampleClassA.example_method' - in nova.tests.example.CALLED_FUNCTION) + in nova.tests.monkey_patch_example.CALLED_FUNCTION) self.assertTrue(package_a + 'ExampleClassA.example_method_add' - in nova.tests.example.CALLED_FUNCTION) - package_b = 'nova.tests.example.example_b.' + in nova.tests.monkey_patch_example.CALLED_FUNCTION) + package_b = self.example_package + 'example_b.' self.assertFalse(package_b + 'example_function_b' - in nova.tests.example.CALLED_FUNCTION) + in nova.tests.monkey_patch_example.CALLED_FUNCTION) self.assertFalse(package_b + 'ExampleClassB.example_method' - in nova.tests.example.CALLED_FUNCTION) + in nova.tests.monkey_patch_example.CALLED_FUNCTION) self.assertFalse(package_b + 'ExampleClassB.example_method_add' - in nova.tests.example.CALLED_FUNCTION) + in nova.tests.monkey_patch_example.CALLED_FUNCTION) diff --git a/nova/utils.py b/nova/utils.py index 44a0d4398..edf67384d 100644 --- a/nova/utils.py +++ b/nova/utils.py @@ -866,13 +866,13 @@ def monkey_patch(): # If FLAGS.monkey_patch is not True, this function do nothing. if not FLAGS.monkey_patch: return - # Get list of moudles and decorators + # Get list of modules and decorators for module_and_decorator in FLAGS.monkey_patch_modules: module, decorator_name = module_and_decorator.split(':') # import decorator function decorator = import_class(decorator_name) __import__(module) - # Retrive module information using pyclbr + # Retrieve module information using pyclbr module_data = pyclbr.readmodule_ex(module) for key in module_data.keys(): # set the decorator for the class methods -- cgit From f380b65cdce439d440b68b0f4a65be45d13ce453 Mon Sep 17 00:00:00 2001 From: Nachi Ueno Date: Tue, 23 Aug 2011 08:51:44 -0700 Subject: Removed blank line --- nova/utils.py | 1 - 1 file changed, 1 deletion(-) (limited to 'nova') diff --git a/nova/utils.py b/nova/utils.py index 5c7d52c70..21e6221b2 100644 --- a/nova/utils.py +++ b/nova/utils.py @@ -851,7 +851,6 @@ def is_valid_ipv4(address): """valid the address strictly as per format xxx.xxx.xxx.xxx. where xxx is a value between 0 and 255. """ - parts = address.split(".") if len(parts) != 4: return False -- cgit From 76f02277a3677d40a13a8b05a12f9d83053808c3 Mon Sep 17 00:00:00 2001 From: Nachi Ueno Date: Tue, 23 Aug 2011 09:46:49 -0700 Subject: Fixed some docstring Added default publisher_id flagw --- nova/notifier/api.py | 7 +++++-- nova/tests/monkey_patch_example/__init__.py | 13 ++++++------- nova/tests/monkey_patch_example/example_a.py | 7 ++----- nova/tests/monkey_patch_example/example_b.py | 6 ++---- nova/tests/test_notifier.py | 3 +-- 5 files changed, 16 insertions(+), 20 deletions(-) (limited to 'nova') diff --git a/nova/notifier/api.py b/nova/notifier/api.py index f5cf95d2a..6ef4a050e 100644 --- a/nova/notifier/api.py +++ b/nova/notifier/api.py @@ -25,6 +25,9 @@ FLAGS = flags.FLAGS flags.DEFINE_string('default_notification_level', 'INFO', 'Default notification level for outgoing notifications') +flags.DEFINE_string('default_publisher_id', FLAGS.host, + 'Default publisher_id for outgoing notifications') + WARN = 'WARN' INFO = 'INFO' @@ -55,9 +58,9 @@ def notify_decorator(name, fn): body['args'].append(arg) for key in kwarg: body['kwarg'][key] = kwarg[key] - notify(FLAGS.host, + notify(FLAGS.default_publisher_id, name, - DEBUG, + FLAGS.default_notification_level, body) return fn(*args, **kwarg) return wrapped_func diff --git a/nova/tests/monkey_patch_example/__init__.py b/nova/tests/monkey_patch_example/__init__.py index 1cfdf8a7e..25cf9ccfe 100644 --- a/nova/tests/monkey_patch_example/__init__.py +++ b/nova/tests/monkey_patch_example/__init__.py @@ -1,3 +1,5 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + # Copyright 2011 OpenStack LLC. # All Rights Reserved. # @@ -12,8 +14,7 @@ # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. -"""Example Module for testing utils.monkey_patch() -""" +"""Example Module for testing utils.monkey_patch().""" CALLED_FUNCTION = [] @@ -21,12 +22,10 @@ CALLED_FUNCTION = [] def example_decorator(name, function): """ decorator for notify which is used from utils.monkey_patch() - Parameters: - - name - name of the function - function - object of the function - + :param name: name of the function + :param function: - object of the function + :returns: function -- decorated function """ def wrapped_func(*args, **kwarg): CALLED_FUNCTION.append(name) diff --git a/nova/tests/monkey_patch_example/example_a.py b/nova/tests/monkey_patch_example/example_a.py index 91bf048e4..21e79bcb0 100644 --- a/nova/tests/monkey_patch_example/example_a.py +++ b/nova/tests/monkey_patch_example/example_a.py @@ -1,7 +1,6 @@ # vim: tabstop=4 shiftwidth=4 softtabstop=4 -# Copyright 2010 United States Government as represented by the -# Administrator of the National Aeronautics and Space Administration. +# Copyright 2011 OpenStack LLC. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may @@ -15,9 +14,7 @@ # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. - -"""Example Module A for testing utils.monkey_patch() -""" +"""Example Module A for testing utils.monkey_patch().""" def example_function_a(): diff --git a/nova/tests/monkey_patch_example/example_b.py b/nova/tests/monkey_patch_example/example_b.py index edd267c4f..9d8f6d339 100644 --- a/nova/tests/monkey_patch_example/example_b.py +++ b/nova/tests/monkey_patch_example/example_b.py @@ -1,7 +1,6 @@ # vim: tabstop=4 shiftwidth=4 softtabstop=4 -# Copyright 2010 United States Government as represented by the -# Administrator of the National Aeronautics and Space Administration. +# Copyright 2011 OpenStack LLC. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may @@ -16,8 +15,7 @@ # License for the specific language governing permissions and limitations # under the License. -"""Example Module B for testing utils.monkey_patch() -""" +"""Example Module B for testing utils.monkey_patch().""" def example_function_b(): diff --git a/nova/tests/test_notifier.py b/nova/tests/test_notifier.py index ab5dfb692..7de3a4a99 100644 --- a/nova/tests/test_notifier.py +++ b/nova/tests/test_notifier.py @@ -141,8 +141,7 @@ class NotifierTestCase(test.TestCase): def example_api(arg1, arg2): return arg1 + arg2 - example_api =\ - nova.notifier.api.notify_decorator( + example_api = nova.notifier.api.notify_decorator( 'example_api', example_api) -- cgit From 295bcc8ef70d767bf1539defe1a79a67bdf555ff Mon Sep 17 00:00:00 2001 From: Brian Waldon Date: Tue, 23 Aug 2011 13:44:39 -0400 Subject: updating tests --- nova/tests/api/openstack/test_servers.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'nova') diff --git a/nova/tests/api/openstack/test_servers.py b/nova/tests/api/openstack/test_servers.py index cec6eecc0..e5c1f2c34 100644 --- a/nova/tests/api/openstack/test_servers.py +++ b/nova/tests/api/openstack/test_servers.py @@ -785,7 +785,7 @@ class ServersTest(test.TestCase): new_return_server = return_server_with_attributes(interfaces=ifaces) self.stubs.Set(nova.db.api, 'instance_get', new_return_server) - req = webob.Request.blank('/v1.1/servers/1') + req = webob.Request.blank('/v1.1/fake/servers/1') res = req.get_response(fakes.wsgi_app()) self.assertEqual(res.status_int, 200) res_dict = json.loads(res.body) -- cgit From b75f90e0d83e50b6699a8e6efc60cc97a00c0678 Mon Sep 17 00:00:00 2001 From: Tim Simpson Date: Tue, 23 Aug 2011 13:12:54 -0500 Subject: Switched list_notifier to log an exception each time notify is called, for each notification driver that failed to import. --- nova/notifier/list_notifier.py | 13 +++++++++++-- nova/tests/notifier/test_list_notifier.py | 10 +--------- 2 files changed, 12 insertions(+), 11 deletions(-) (limited to 'nova') diff --git a/nova/notifier/list_notifier.py b/nova/notifier/list_notifier.py index 21067df55..aa9c236b0 100644 --- a/nova/notifier/list_notifier.py +++ b/nova/notifier/list_notifier.py @@ -30,6 +30,15 @@ LOG = logging.getLogger('nova.notifier.list_notifier') drivers = None +class ImportFailureNotifier(object): + """Noisily re-raises some exception over-and-over when notify is called.""" + + def __init__(self, exception): + self.exception = exception + + def notify(message): + raise self.exception + def _get_drivers(): """Instantiates and returns drivers based on the flag values.""" @@ -39,8 +48,8 @@ def _get_drivers(): for notification_driver in FLAGS.list_notifier_drivers: try: drivers.append(utils.import_object(notification_driver)) - except ClassNotFound: - sys.exit(1) + except ClassNotFound as e: + drivers.append(ImportFailureNotifier(e)) return drivers def notify(message): diff --git a/nova/tests/notifier/test_list_notifier.py b/nova/tests/notifier/test_list_notifier.py index bab1a0ab8..ad2b039c5 100644 --- a/nova/tests/notifier/test_list_notifier.py +++ b/nova/tests/notifier/test_list_notifier.py @@ -48,11 +48,6 @@ class NotifierListTestCase(test.TestCase): def mock_notify2(cls, *args): raise RuntimeError("Bad notifier.") self.stubs.Set(nova.notifier.log_notifier, 'notify', mock_notify2) - # mock sys.exit so we don't actually kill the program during our tests. - self.sys_exit_code = 0 - def mock_sys_exit(code): - self.sys_exit_code += code - self.stubs.Set(sys, 'exit', mock_sys_exit) def tearDown(self): self.stubs.UnsetAll() @@ -67,7 +62,6 @@ class NotifierListTestCase(test.TestCase): nova.notifier.api.WARN, dict(a=3)) self.assertEqual(self.notify_count, 2) self.assertEqual(self.exception_count, 0) - self.assertEqual(self.sys_exit_code, 0) def test_send_notifications_with_errors(self): @@ -77,7 +71,6 @@ class NotifierListTestCase(test.TestCase): notify('publisher_id', 'event_type', nova.notifier.api.WARN, dict(a=3)) self.assertEqual(self.notify_count, 1) self.assertEqual(self.exception_count, 1) - self.assertEqual(self.sys_exit_code, 0) def test_when_driver_fails_to_import(self): self.flags(notification_driver='nova.notifier.list_notifier', @@ -85,6 +78,5 @@ class NotifierListTestCase(test.TestCase): 'nova.notifier.logo_notifier', 'fdsjgsdfhjkhgsfkj']) notify('publisher_id', 'event_type', nova.notifier.api.WARN, dict(a=3)) - self.assertEqual(self.exception_count, 0) + self.assertEqual(self.exception_count, 2) self.assertEqual(self.notify_count, 1) - self.assertEqual(self.sys_exit_code, 2) -- cgit From 6bbef7627200f6c6ef27b5ae5c9b114e8e6d0f52 Mon Sep 17 00:00:00 2001 From: Nachi Ueno Date: Tue, 23 Aug 2011 12:06:25 -0700 Subject: Fixed doc string --- nova/flags.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) (limited to 'nova') diff --git a/nova/flags.py b/nova/flags.py index 05d0db8af..95000df1b 100644 --- a/nova/flags.py +++ b/nova/flags.py @@ -409,7 +409,7 @@ DEFINE_bool('monkey_patch', False, 'Whether to log monkey patching') DEFINE_list('monkey_patch_modules', - ['nova.api.ec2.cloud:nova.notifier.api.notify_decorator', - 'nova.compute.api:nova.notifier.api.notify_decorator'], - 'Module list representing monkey\ - patched module and decorator') + ['nova.api.ec2.cloud:nova.notifier.api.notify_decorator', + 'nova.compute.api:nova.notifier.api.notify_decorator'], + 'Module list representing monkey ' + 'patched module and decorator') -- cgit From a5fd82841bfada1b59066d82094f41ffa9389dec Mon Sep 17 00:00:00 2001 From: Vishvananda Ishaya Date: Tue, 23 Aug 2011 12:21:52 -0700 Subject: fix for rc generation using noauth. --- nova/auth/manager.py | 13 +++++++++++-- 1 file changed, 11 insertions(+), 2 deletions(-) (limited to 'nova') diff --git a/nova/auth/manager.py b/nova/auth/manager.py index 85227bea0..44e6e11ac 100644 --- a/nova/auth/manager.py +++ b/nova/auth/manager.py @@ -41,10 +41,13 @@ from nova.auth import signer FLAGS = flags.FLAGS +flags.DEFINE_bool('use_deprecated_auth', + False, + 'This flag must be set to use old style auth') + flags.DEFINE_list('allowed_roles', ['cloudadmin', 'itsec', 'sysadmin', 'netadmin', 'developer'], 'Allowed roles for project') - # NOTE(vish): a user with one of these roles will be a superuser and # have access to all api commands flags.DEFINE_list('superuser_roles', ['cloudadmin'], @@ -814,7 +817,13 @@ class AuthManager(object): s3_host = host ec2_host = host rc = open(FLAGS.credentials_template).read() - rc = rc % {'access': user.access, + # NOTE(vish): Deprecated auth uses an access key, no auth uses a + # the user_id in place of it. + if FLAGS.use_deprecated_auth: + access = user.access + else: + access = user.id + rc = rc % {'access': access, 'project': pid, 'secret': user.secret, 'ec2': '%s://%s:%s%s' % (FLAGS.ec2_scheme, -- cgit From 8c30e3e4b1847e6f44790fc4b614fe56de84cbfb Mon Sep 17 00:00:00 2001 From: Vishvananda Ishaya Date: Tue, 23 Aug 2011 13:44:21 -0700 Subject: Forgot to set the flag for the test --- nova/tests/test_auth.py | 1 + 1 file changed, 1 insertion(+) (limited to 'nova') diff --git a/nova/tests/test_auth.py b/nova/tests/test_auth.py index 4561eb7f2..1b3166af7 100644 --- a/nova/tests/test_auth.py +++ b/nova/tests/test_auth.py @@ -147,6 +147,7 @@ class _AuthManagerBaseTestCase(test.TestCase): '/services/Cloud')) def test_can_get_credentials(self): + self.flags(use_deprecated_auth=True) st = {'access': 'access', 'secret': 'secret'} with user_and_project_generator(self.manager, user_state=st) as (u, p): credentials = self.manager.get_environment_rc(u, p) -- cgit From cbf8b3b36dde763164fcfd06e1d3c5732f57311d Mon Sep 17 00:00:00 2001 From: Tim Simpson Date: Tue, 23 Aug 2011 15:50:39 -0500 Subject: Fixed some pep8 and pylint issues. --- nova/notifier/list_notifier.py | 7 ++++--- nova/tests/notifier/test_list_notifier.py | 6 ++++++ 2 files changed, 10 insertions(+), 3 deletions(-) (limited to 'nova') diff --git a/nova/notifier/list_notifier.py b/nova/notifier/list_notifier.py index aa9c236b0..955ae1b57 100644 --- a/nova/notifier/list_notifier.py +++ b/nova/notifier/list_notifier.py @@ -13,8 +13,6 @@ # License for the specific language governing permissions and limitations # under the License. -import sys - from nova import flags from nova import log as logging from nova import utils @@ -30,13 +28,14 @@ LOG = logging.getLogger('nova.notifier.list_notifier') drivers = None + class ImportFailureNotifier(object): """Noisily re-raises some exception over-and-over when notify is called.""" def __init__(self, exception): self.exception = exception - def notify(message): + def notify(self, message): raise self.exception @@ -52,6 +51,7 @@ def _get_drivers(): drivers.append(ImportFailureNotifier(e)) return drivers + def notify(message): """Passes notification to mulitple notifiers in a list.""" for driver in _get_drivers(): @@ -61,6 +61,7 @@ def notify(message): LOG.exception(_("Problem '%(e)s' attempting to send to " "notification driver %(driver)s." % locals())) + def _reset_drivers(): """Used by unit tests to reset the drivers.""" global drivers diff --git a/nova/tests/notifier/test_list_notifier.py b/nova/tests/notifier/test_list_notifier.py index ad2b039c5..b77720759 100644 --- a/nova/tests/notifier/test_list_notifier.py +++ b/nova/tests/notifier/test_list_notifier.py @@ -34,19 +34,25 @@ class NotifierListTestCase(test.TestCase): list_notifier._reset_drivers() self.stubs = stubout.StubOutForTesting() # Mock log to add one to exception_count when log.exception is called + def mock_exception(cls, *args): self.exception_count += 1 + self.exception_count = 0 list_notifier_log = logging.getLogger('nova.notifier.list_notifier') self.stubs.Set(list_notifier_log, "exception", mock_exception) # Mock no_op notifier to add one to notify_count when called. + def mock_notify(cls, *args): self.notify_count += 1 + self.notify_count = 0 self.stubs.Set(nova.notifier.no_op_notifier, 'notify', mock_notify) # Mock log_notifier to raise RuntimeError when called. + def mock_notify2(cls, *args): raise RuntimeError("Bad notifier.") + self.stubs.Set(nova.notifier.log_notifier, 'notify', mock_notify2) def tearDown(self): -- cgit From 360dbeebadb76b3628b2cfbd8b3c41e77581b24c Mon Sep 17 00:00:00 2001 From: Brian Waldon Date: Tue, 23 Aug 2011 17:31:19 -0400 Subject: rebuilds are functional again --- nova/api/openstack/servers.py | 14 ++++++++++---- nova/compute/api.py | 6 ++++-- nova/compute/manager.py | 6 ++++++ 3 files changed, 20 insertions(+), 6 deletions(-) (limited to 'nova') diff --git a/nova/api/openstack/servers.py b/nova/api/openstack/servers.py index 553357404..fa499b192 100644 --- a/nova/api/openstack/servers.py +++ b/nova/api/openstack/servers.py @@ -604,8 +604,10 @@ class ControllerV10(Controller): LOG.debug(msg) raise exc.HTTPBadRequest(explanation=msg) + password = utils.generate_password(16) + try: - self.compute_api.rebuild(context, instance_id, image_id) + self.compute_api.rebuild(context, instance_id, image_id, password) except exception.BuildInProgress: msg = _("Instance %s is currently being rebuilt.") % instance_id LOG.debug(msg) @@ -741,15 +743,19 @@ class ControllerV11(Controller): self._validate_metadata(metadata) self._decode_personalities(personalities) + password = info["rebuild"].get("adminPass", + utils.generate_password(16)) + try: - self.compute_api.rebuild(context, instance_id, image_href, name, - metadata, personalities) + self.compute_api.rebuild(context, instance_id, image_href, + password, name=name, metadata=metadata, + files_to_inject=personalities) except exception.BuildInProgress: msg = _("Instance %s is currently being rebuilt.") % instance_id LOG.debug(msg) raise exc.HTTPConflict(explanation=msg) - return webob.Response(status_int=202) + return webob.Response(status_int=202, headers={'x-nova-password':password}) @common.check_snapshots_enabled def _action_create_image(self, input_dict, req, instance_id): diff --git a/nova/compute/api.py b/nova/compute/api.py index 69f76bf40..0ac38a428 100644 --- a/nova/compute/api.py +++ b/nova/compute/api.py @@ -1023,8 +1023,8 @@ class API(base.Base): self._cast_compute_message('reboot_instance', context, instance_id) @scheduler_api.reroute_compute("rebuild") - def rebuild(self, context, instance_id, image_href, name=None, - metadata=None, files_to_inject=None): + def rebuild(self, context, instance_id, image_href, admin_password, + name=None, metadata=None, files_to_inject=None): """Rebuild the given instance with the provided metadata.""" instance = db.api.instance_get(context, instance_id) @@ -1044,6 +1044,7 @@ class API(base.Base): self.db.instance_update(context, instance_id, values) rebuild_params = { + "new_pass": admin_password, "image_ref": image_href, "injected_files": files_to_inject, } @@ -1052,6 +1053,7 @@ class API(base.Base): context, instance_id, params=rebuild_params) + return rebuild_params @scheduler_api.reroute_compute("revert_resize") def revert_resize(self, context, instance_id): diff --git a/nova/compute/manager.py b/nova/compute/manager.py index c207eccbb..40cd09044 100644 --- a/nova/compute/manager.py +++ b/nova/compute/manager.py @@ -527,6 +527,7 @@ class ComputeManager(manager.SchedulerDependentManager): :param context: `nova.RequestContext` object :param instance_id: Instance identifier (integer) :param image_ref: Image identifier (href or integer) + :param new_pass: password to set on rebuilt instance """ context = context.elevated() @@ -544,6 +545,11 @@ class ComputeManager(manager.SchedulerDependentManager): network_info = self.network_api.get_instance_nw_info(context, instance_ref) bd_mapping = self._setup_block_device_mapping(context, instance_id) + + # pull in new password here since the original password isn't in the db + new_pass = kwargs.get('new_pass') + instance_ref.admin_pass = new_pass + self.driver.spawn(context, instance_ref, network_info, bd_mapping) self._update_image_ref(context, instance_id, image_ref) -- cgit From 8cd7dcca1ccac0347289d633ebd10567d6cba4c7 Mon Sep 17 00:00:00 2001 From: Hisaharu Ishii Date: Tue, 23 Aug 2011 15:06:24 -0700 Subject: Stub out the DB in unit test. Fix 'nova-manage network modify' to use db.network_update() --- nova/db/api.py | 6 -- nova/db/sqlalchemy/api.py | 31 -------- nova/tests/test_nova_manage.py | 162 +++++++++++++++++++++++++++-------------- 3 files changed, 107 insertions(+), 92 deletions(-) (limited to 'nova') diff --git a/nova/db/api.py b/nova/db/api.py index 7268cde7f..2d854f24c 100644 --- a/nova/db/api.py +++ b/nova/db/api.py @@ -621,12 +621,6 @@ def network_associate(context, project_id, force=False): return IMPL.network_associate(context, project_id, force) -def network_associate_by_id(context, network_id, project_id, force=False): - """Associate a project with a network specified by id.""" - return IMPL.network_associate_by_id(context, network_id, project_id, - force=False) - - def network_count(context): """Return the number of networks.""" return IMPL.network_count(context) diff --git a/nova/db/sqlalchemy/api.py b/nova/db/sqlalchemy/api.py index e5fffa784..04b5405f6 100644 --- a/nova/db/sqlalchemy/api.py +++ b/nova/db/sqlalchemy/api.py @@ -1662,37 +1662,6 @@ def network_associate(context, project_id, force=False): return network_ref -@require_admin_context -def network_associate_by_id(context, network_id, project_id, force=False): - """Associate a project with a network specified by id. - - only associate if the network isn't already associated - with a project or if force is True - """ - session = get_session() - with session.begin(): - - def network_query(network_filter): - if force: - return session.query(models.Network).\ - filter_by(deleted=False).\ - filter_by(id=network_filter).\ - with_lockmode('update').\ - first() - else: - return session.query(models.Network).\ - filter_by(deleted=False).\ - filter_by(project_id=None).\ - filter_by(id=network_filter).\ - with_lockmode('update').\ - first() - network_ref = network_query(network_id) - if network_ref: - network_ref['project_id'] = project_id - session.add(network_ref) - return network_ref - - @require_admin_context def network_count(context): session = get_session() diff --git a/nova/tests/test_nova_manage.py b/nova/tests/test_nova_manage.py index 6b34a90f9..03ee1140d 100644 --- a/nova/tests/test_nova_manage.py +++ b/nova/tests/test_nova_manage.py @@ -75,17 +75,59 @@ class FixedIpCommandsTestCase(test.TestCase): class NetworkCommandsTestCase(test.TestCase): def setUp(self): super(NetworkCommandsTestCase, self).setUp() + self.stubs = stubout.StubOutForTesting() self.commands = nova_manage.NetworkCommands() self.context = context.get_admin_context() - nets = db.network_get_all(self.context) - for net in nets: - db.network_delete_safe(self.context, net['id']) + self.net = {'id': 0, + 'label': 'fake', + 'injected': False, + 'cidr': '192.168.0.0/24', + 'cidr_v6': 'dead:beef::/64', + 'multi_host': False, + 'gateway_v6': 'dead:beef::1', + 'netmask_v6': '64', + 'netmask': '255.255.255.0', + 'bridge': 'fa0', + 'bridge_interface': 'fake_fa0', + 'gateway': '192.168.0.1', + 'broadcast': '192.168.0.255', + 'dns1': '8.8.8.8', + 'dns2': '8.8.4.4', + 'vlan': 200, + 'vpn_public_address': '10.0.0.2', + 'vpn_public_port': '2222', + 'vpn_private_address': '192.168.0.2', + 'dhcp_start': '192.168.0.3', + 'project_id': 'fake_project', + 'host': 'fake_host', + 'uuid': 'aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa'} + def tearDown(self): super(NetworkCommandsTestCase, self).tearDown() + self.stubs.UnsetAll() + + def test_create(self): - def _create_network(self): + def fake_create_networks(obj, context, **kwargs): + self.assertTrue(context.to_dict()['is_admin']) + self.assertEqual(kwargs['label'], 'Test') + self.assertEqual(kwargs['cidr'], '10.2.0.0/24') + self.assertEqual(kwargs['multi_host'], False) + self.assertEqual(kwargs['num_networks'], 1) + self.assertEqual(kwargs['network_size'], 256) + self.assertEqual(kwargs['vlan_start'], 200) + self.assertEqual(kwargs['vpn_start'], 2000) + self.assertEqual(kwargs['cidr_v6'], 'fd00:2::/120') + self.assertEqual(kwargs['gateway_v6'], 'fd00:2::22') + self.assertEqual(kwargs['bridge'], 'br200') + self.assertEqual(kwargs['bridge_interface'], 'eth0') + self.assertEqual(kwargs['dns1'], '8.8.8.8') + self.assertEqual(kwargs['dns2'], '8.8.4.4') FLAGS.network_manager = 'nova.network.manager.VlanManager' + from nova.network import manager as net_manager + self.stubs.Set(net_manager.VlanManager, 'create_networks', + fake_create_networks) self.commands.create( label='Test', fixed_range_v4='10.2.0.0/24', @@ -96,24 +138,16 @@ class NetworkCommandsTestCase(test.TestCase): vpn_start=2000, fixed_range_v6='fd00:2::/120', gateway_v6='fd00:2::22', - bridge_interface='eth0') - return db.network_get_by_cidr(self.context, '10.2.0.0/24') - - def test_create(self): - net = self._create_network() - self.assertEqual(net['label'], 'Test') - self.assertEqual(net['cidr'], '10.2.0.0/24') - self.assertEqual(net['netmask'], '255.255.255.0') - self.assertEqual(net['multi_host'], False) - self.assertEqual(net['vlan'], 200) - self.assertEqual(net['bridge'], 'br200') - self.assertEqual(net['vpn_public_port'], 2000) - self.assertEqual(net['cidr_v6'], 'fd00:2::/120') - self.assertEqual(net['gateway_v6'], 'fd00:2::22') - self.assertEqual(net['bridge_interface'], 'eth0') + bridge='br200', + bridge_interface='eth0', + dns1='8.8.8.8', + dns2='8.8.4.4') def test_list(self): - net = self._create_network() + + def fake_network_get_all(context): + return [db_fakes.FakeModel(self.net)] + self.stubs.Set(db, 'network_get_all', fake_network_get_all) output = StringIO.StringIO() sys.stdout = output self.commands.list() @@ -129,45 +163,63 @@ class NetworkCommandsTestCase(test.TestCase): _('VlanID'), _('project'), _("uuid")) - body = _fmt % ( - net['id'], - '10.2.0.0/24', - 'fd00:2::/120', - '10.2.0.3', - 'None', - 'None', - '200', - 'None', - net['uuid'],) + body = _fmt % (self.net['id'], + self.net['cidr'], + self.net['cidr_v6'], + self.net['dhcp_start'], + self.net['dns1'], + self.net['dns2'], + self.net['vlan'], + self.net['project_id'], + self.net['uuid']) answer = '%s\n%s\n' % (head, body) self.assertEqual(result, answer) def test_delete(self): - net = self._create_network() - self.commands.delete(fixed_range='10.2.0.0/24') - net_exist = True - try: - net = db.network_get_by_cidr(self.context, '10.2.0.0/24') - except exception.NetworkNotFoundForCidr, e: - net_exist = False - self.assertEqual(net_exist, False) + net_dis = self.net + net_dis['project_id'] = None + net_dis['host'] = None + + def fake_network_get_by_cidr(context, cidr): + self.assertTrue(context.to_dict()['is_admin']) + self.assertEqual(cidr, net_dis['cidr']) + return db_fakes.FakeModel(net_dis) + self.stubs.Set(db, 'network_get_by_cidr', fake_network_get_by_cidr) + + def fake_network_delete_safe(context, network_id): + self.assertTrue(context.to_dict()['is_admin']) + self.assertEqual(network_id, net_dis['id']) + self.stubs.Set(db, 'network_delete_safe', fake_network_delete_safe) + self.commands.delete(fixed_range=net_dis['cidr']) def test_modify(self): - net = self._create_network() - db.network_disassociate(self.context, net['id']) - net = db.network_get_by_cidr(self.context, '10.2.0.0/24') - self.assertEqual(net['project_id'], None) - self.assertEqual(net['host'], None) - self.commands.modify('10.2.0.0/24', project='test_project', + + def fake_network_get_by_cidr(context, cidr): + self.assertTrue(context.to_dict()['is_admin']) + self.assertEqual(cidr, self.net['cidr']) + return db_fakes.FakeModel(self.net) + self.stubs.Set(db, 'network_get_by_cidr', fake_network_get_by_cidr) + + def fake_network_update(context, network_id, values): + self.assertTrue(context.to_dict()['is_admin']) + self.assertEqual(network_id, self.net['id']) + self.assertEqual(values, {'project_id': 'test_project', + 'host': 'test_host'}) + self.stubs.Set(db, 'network_update', fake_network_update) + self.commands.modify(self.net['cidr'], project='test_project', host='test_host') - net = db.network_get_by_cidr(self.context, '10.2.0.0/24') - self.assertEqual(net['project_id'], 'test_project') - self.assertEqual(net['host'], 'test_host') - self.commands.modify('10.2.0.0/24') - net = db.network_get_by_cidr(self.context, '10.2.0.0/24') - self.assertEqual(net['project_id'], 'test_project') - self.assertEqual(net['host'], 'test_host') - self.commands.modify('10.2.0.0/24', dis_project=True, dis_host=True) - net = db.network_get_by_cidr(self.context, '10.2.0.0/24') - self.assertEqual(net['project_id'], None) - self.assertEqual(net['host'], None) + + def fake_network_update(context, network_id, values): + self.assertTrue(context.to_dict()['is_admin']) + self.assertEqual(network_id, self.net['id']) + self.assertEqual(values, {}) + self.stubs.Set(db, 'network_update', fake_network_update) + self.commands.modify(self.net['cidr']) + + def fake_network_update(context, network_id, values): + self.assertTrue(context.to_dict()['is_admin']) + self.assertEqual(network_id, self.net['id']) + self.assertEqual(values, {'project_id': None, + 'host': None}) + self.stubs.Set(db, 'network_update', fake_network_update) + self.commands.modify(self.net['cidr'], dis_project=True, dis_host=True) -- cgit From ddc7d9470674a4d7300d15e5c6fa54b784b6a36f Mon Sep 17 00:00:00 2001 From: "vladimir.p" Date: Tue, 23 Aug 2011 15:18:50 -0700 Subject: added volume_types APIs --- .../versions/037_add_volume_types_and_extradata.py | 4 +- nova/tests/test_volume_types.py | 156 +++++++++++++++++++++ nova/volume/api.py | 9 +- nova/volume/volume_types.py | 14 +- 4 files changed, 170 insertions(+), 13 deletions(-) create mode 100644 nova/tests/test_volume_types.py (limited to 'nova') diff --git a/nova/db/sqlalchemy/migrate_repo/versions/037_add_volume_types_and_extradata.py b/nova/db/sqlalchemy/migrate_repo/versions/037_add_volume_types_and_extradata.py index fc365d2b2..27c8afcee 100644 --- a/nova/db/sqlalchemy/migrate_repo/versions/037_add_volume_types_and_extradata.py +++ b/nova/db/sqlalchemy/migrate_repo/versions/037_add_volume_types_and_extradata.py @@ -2,7 +2,6 @@ # Copyright (c) 2011 Zadara Storage Inc. # Copyright (c) 2011 OpenStack LLC. -# All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain @@ -45,7 +44,8 @@ volume_types = Table('volume_types', meta, Column('id', Integer(), primary_key=True, nullable=False), Column('name', String(length=255, convert_unicode=False, assert_unicode=None, - unicode_error=None, _warn_on_bytestring=False))) + unicode_error=None, _warn_on_bytestring=False), + unique=True)) volume_type_extra_specs_table = Table('volume_type_extra_specs', meta, Column('created_at', DateTime(timezone=False)), diff --git a/nova/tests/test_volume_types.py b/nova/tests/test_volume_types.py new file mode 100644 index 000000000..3d906a5fd --- /dev/null +++ b/nova/tests/test_volume_types.py @@ -0,0 +1,156 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright (c) 2011 Zadara Storage Inc. +# Copyright (c) 2011 OpenStack LLC. +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +""" +Unit Tests for volume types code +""" +import time + +from nova import context +from nova import db +from nova import exception +from nova import flags +from nova import log as logging +from nova import test +from nova import utils +from nova.volume import volume_types +from nova.db.sqlalchemy.session import get_session +from nova.db.sqlalchemy import models + +FLAGS = flags.FLAGS +LOG = logging.getLogger('nova.tests.test_volume_types') + + +class VolumeTypeTestCase(test.TestCase): + """Test cases for volume type code""" + def setUp(self): + super(VolumeTypeTestCase, self).setUp() + + self.ctxt = context.get_admin_context() + self.vol_type1_name = str(int(time.time())) + self.vol_type1_specs = dict( + type="physical drive", + drive_type="SAS", + size="300", + rpm="7200", + visible="True") + self.vol_type1 = dict(name=self.vol_type1_name, + extra_specs=self.vol_type1_specs) + + def test_volume_type_create_then_destroy(self): + """Ensure volume types can be created and deleted""" + prev_all_vtypes = volume_types.get_all_types(self.ctxt) + + volume_types.create(self.ctxt, + self.vol_type1_name, + self.vol_type1_specs) + new = volume_types.get_volume_type_by_name(self.ctxt, + self.vol_type1_name) + + LOG.info(_("Given data: %s"), self.vol_type1_specs) + LOG.info(_("Result data: %s"), new) + + for k, v in self.vol_type1_specs.iteritems(): + self.assertEqual(v, new['extra_specs'][k], + 'one of fields doesnt match') + + new_all_vtypes = volume_types.get_all_types(self.ctxt) + self.assertEqual(len(prev_all_vtypes) + 1, + len(new_all_vtypes), + 'drive type was not created') + + volume_types.destroy(self.ctxt, self.vol_type1_name) + new_all_vtypes = volume_types.get_all_types(self.ctxt) + self.assertEqual(prev_all_vtypes, + new_all_vtypes, + 'drive type was not deleted') + + def test_volume_type_create_then_purge(self): + """Ensure volume types can be created and deleted""" + prev_all_vtypes = volume_types.get_all_types(self.ctxt, inactive=1) + + volume_types.create(self.ctxt, + self.vol_type1_name, + self.vol_type1_specs) + new = volume_types.get_volume_type_by_name(self.ctxt, + self.vol_type1_name) + + for k, v in self.vol_type1_specs.iteritems(): + self.assertEqual(v, new['extra_specs'][k], + 'one of fields doesnt match') + + new_all_vtypes = volume_types.get_all_types(self.ctxt, inactive=1) + self.assertEqual(len(prev_all_vtypes) + 1, + len(new_all_vtypes), + 'drive type was not created') + + volume_types.destroy(self.ctxt, self.vol_type1_name) + new_all_vtypes2 = volume_types.get_all_types(self.ctxt, inactive=1) + self.assertEqual(len(new_all_vtypes), + len(new_all_vtypes2), + 'drive type was incorrectly deleted') + + volume_types.purge(self.ctxt, self.vol_type1_name) + new_all_vtypes2 = volume_types.get_all_types(self.ctxt, inactive=1) + self.assertEqual(len(new_all_vtypes) - 1, + len(new_all_vtypes2), + 'drive type was not purged') + + def test_get_all_volume_types(self): + """Ensures that all volume types can be retrieved""" + session = get_session() + total_volume_types = session.query(models.VolumeTypes).\ + count() + vol_types = volume_types.get_all_types(self.ctxt) + self.assertEqual(total_volume_types, len(vol_types)) + + def test_non_existant_inst_type_shouldnt_delete(self): + """Ensures that volume type creation fails with invalid args""" + self.assertRaises(exception.ApiError, + volume_types.destroy, self.ctxt, "sfsfsdfdfs") + + def test_repeated_vol_types_should_raise_api_error(self): + """Ensures that volume duplicates raises ApiError""" + new_name = self.vol_type1_name + "dup" + volume_types.create(self.ctxt, new_name) + volume_types.destroy(self.ctxt, new_name) + self.assertRaises( + exception.ApiError, + volume_types.create, self.ctxt, new_name) + + def test_invalid_volume_types_params(self): + """Ensures that volume type creation fails with invalid args""" + self.assertRaises(exception.InvalidVolumeType, + volume_types.destroy, self.ctxt, None) + self.assertRaises(exception.InvalidVolumeType, + volume_types.purge, self.ctxt, None) + self.assertRaises(exception.InvalidVolumeType, + volume_types.get_volume_type, self.ctxt, None) + self.assertRaises(exception.InvalidVolumeType, + volume_types.get_volume_type_by_name, + self.ctxt, None) + + def test_volume_type_get_by_id_and_name(self): + """Ensure volume types get returns same entry""" + volume_types.create(self.ctxt, + self.vol_type1_name, + self.vol_type1_specs) + new = volume_types.get_volume_type_by_name(self.ctxt, + self.vol_type1_name) + + new2 = volume_types.get_volume_type(self.ctxt, new['id']) + self.assertEqual(new, new2) + + diff --git a/nova/volume/api.py b/nova/volume/api.py index 7a78e244f..80e8bd85f 100644 --- a/nova/volume/api.py +++ b/nova/volume/api.py @@ -42,7 +42,7 @@ class API(base.Base): """API for interacting with the volume manager.""" def create(self, context, size, snapshot_id, name, description, - volume_type=None, metadata=None): + volume_type=None, metadata=None, availability_zone=None): if snapshot_id != None: snapshot = self.get_snapshot(context, snapshot_id) if snapshot['status'] != "available": @@ -58,18 +58,21 @@ class API(base.Base): raise quota.QuotaError(_("Volume quota exceeded. You cannot " "create a volume of size %sG") % size) + if availability_zone is None: + availability_zone = FLAGS.storage_availability_zone + options = { 'size': size, 'user_id': context.user_id, 'project_id': context.project_id, 'snapshot_id': snapshot_id, - 'availability_zone': FLAGS.storage_availability_zone, + 'availability_zone': availability_zone, 'status': "creating", 'attach_status': "detached", 'display_name': name, 'display_description': description, 'volume_type_id': volume_type.get('id', None), - 'metadata' metadata, + 'metadata': metadata, } volume = self.db.volume_create(context, options) diff --git a/nova/volume/volume_types.py b/nova/volume/volume_types.py index c1fce1627..9df1e39f8 100644 --- a/nova/volume/volume_types.py +++ b/nova/volume/volume_types.py @@ -4,7 +4,6 @@ # Copyright (c) 2011 OpenStack LLC. # Copyright 2010 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. -# All Rights Reserved. # Copyright (c) 2010 Citrix Systems, Inc. # Copyright 2011 Ken Pepple # @@ -29,16 +28,15 @@ from nova import flags from nova import log as logging FLAGS = flags.FLAGS -LOG = logging.getLogger('nova.volume_types') +LOG = logging.getLogger('nova.volume.volume_types') def create(context, name, extra_specs={}): """Creates volume types.""" try: - db.volume_type_create( - context, - dict(name=name, - extra_specs=extra_specs)) + db.volume_type_create(context, + dict(name=name, + extra_specs=extra_specs)) except exception.DBError, e: LOG.exception(_('DB error: %s') % e) raise exception.ApiError(_("Cannot create volume_type with " @@ -82,7 +80,7 @@ def get_all_types(context, inactive=0): def get_volume_type(context, id): """Retrieves single volume type by id.""" if id is None: - raise exception.ApiError(_("Invalid volume type: %s") % id) + raise exception.InvalidVolumeType(volume_type=id) try: return db.volume_type_get(context, id) @@ -93,7 +91,7 @@ def get_volume_type(context, id): def get_volume_type_by_name(context, name): """Retrieves single volume type by name.""" if name is None: - raise exception.ApiError(_("Invalid volume type name: %s") % name) + raise exception.InvalidVolumeType(volume_type=name) try: return db.volume_type_get_by_name(context, name) -- cgit From 9fbdfa5061dc17e43fc8a5200415e0dffc55b911 Mon Sep 17 00:00:00 2001 From: Hisaharu Ishii Date: Tue, 23 Aug 2011 15:40:28 -0700 Subject: Fix pep8 --- nova/tests/test_nova_manage.py | 1 - 1 file changed, 1 deletion(-) (limited to 'nova') diff --git a/nova/tests/test_nova_manage.py b/nova/tests/test_nova_manage.py index 097d2dbf5..ca4f4c894 100644 --- a/nova/tests/test_nova_manage.py +++ b/nova/tests/test_nova_manage.py @@ -112,7 +112,6 @@ class NetworkCommandsTestCase(test.TestCase): 'host': 'fake_host', 'uuid': 'aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa'} - def tearDown(self): super(NetworkCommandsTestCase, self).tearDown() self.stubs.UnsetAll() -- cgit From 0fc12640a09792fbf278c050c35c62933afcb68d Mon Sep 17 00:00:00 2001 From: Vishvananda Ishaya Date: Tue, 23 Aug 2011 15:53:59 -0700 Subject: fix iscsi adm command --- nova/volume/driver.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'nova') diff --git a/nova/volume/driver.py b/nova/volume/driver.py index c99534c07..7d2fb45d4 100644 --- a/nova/volume/driver.py +++ b/nova/volume/driver.py @@ -495,7 +495,7 @@ class ISCSIDriver(VolumeDriver): (out, err) = self._execute('iscsiadm', '-m', 'node', '-T', iscsi_properties['target_iqn'], '-p', iscsi_properties['target_portal'], - iscsi_command, run_as_root=True) + *iscsi_command, run_as_root=True) LOG.debug("iscsiadm %s: stdout=%s stderr=%s" % (iscsi_command, out, err)) return (out, err) -- cgit From 309a264db6c952081f2e85db21efc719596240a6 Mon Sep 17 00:00:00 2001 From: Brian Waldon Date: Tue, 23 Aug 2011 20:59:24 -0400 Subject: updating tests --- nova/api/openstack/servers.py | 9 +- nova/tests/api/openstack/test_server_actions.py | 122 +++++++++++++++++------- 2 files changed, 95 insertions(+), 36 deletions(-) (limited to 'nova') diff --git a/nova/api/openstack/servers.py b/nova/api/openstack/servers.py index fa499b192..fc74b8288 100644 --- a/nova/api/openstack/servers.py +++ b/nova/api/openstack/servers.py @@ -755,7 +755,11 @@ class ControllerV11(Controller): LOG.debug(msg) raise exc.HTTPConflict(explanation=msg) - return webob.Response(status_int=202, headers={'x-nova-password':password}) + instance = self.compute_api.routing_get(context, instance_id) + view = self._build_view(request, instance, is_detail=True) + view['server']['adminPass'] = password + + return view @common.check_snapshots_enabled def _action_create_image(self, input_dict, req, instance_id): @@ -822,6 +826,9 @@ class HeadersSerializer(wsgi.ResponseHeadersSerializer): def delete(self, response, data): response.status_int = 204 + def action(self, response, data): + response.status_int = 202 + class ServerXMLSerializer(wsgi.XMLDictSerializer): diff --git a/nova/tests/api/openstack/test_server_actions.py b/nova/tests/api/openstack/test_server_actions.py index bdd6824e7..769de4b34 100644 --- a/nova/tests/api/openstack/test_server_actions.py +++ b/nova/tests/api/openstack/test_server_actions.py @@ -1,4 +1,5 @@ import base64 +import datetime import json import stubout @@ -8,8 +9,11 @@ from nova import context from nova import utils from nova import flags from nova.api.openstack import create_instance_helper +from nova.compute import instance_types from nova.compute import power_state import nova.db.api +from nova.db.sqlalchemy.models import Instance +from nova.db.sqlalchemy.models import InstanceMetadata from nova import test from nova.tests.api.openstack import common from nova.tests.api.openstack import fakes @@ -19,61 +23,58 @@ FLAGS = flags.FLAGS def return_server_by_id(context, id): - return _get_instance() + return stub_instance(id) def instance_update(context, instance_id, kwargs): - return _get_instance() + return stub_instance(instance_id) -def return_server_with_power_state(power_state): +def return_server_with_attributes(**kwargs): def _return_server(context, id): - instance = _get_instance() - instance['state'] = power_state - return instance + return stub_instance(id, **kwargs) return _return_server +def return_server_with_power_state(power_state): + return return_server_with_attributes(power_state=power_state) + + def return_server_with_uuid_and_power_state(power_state): - def _return_server(context, id): - return return_server_with_power_state(power_state) - return _return_server + return return_server_with_power_state(power_state) -class MockSetAdminPassword(object): - def __init__(self): - self.instance_id = None - self.password = None +def stub_instance(id, power_state=0, metadata=None, + image_ref="10", flavor_id="1", name=None): - def __call__(self, context, instance_id, password): - self.instance_id = instance_id - self.password = password + if metadata is not None: + metadata_items = [{'key': k, 'value': v} for k, v in metadata.items()] + else: + metadata_items = [{'key': 'seq', 'value':id}] + inst_type = instance_types.get_instance_type_by_flavor_id(int(flavor_id)) -def _get_instance(): instance = { - "id": 1, - "created_at": "2010-10-10 12:00:00", - "updated_at": "2010-11-11 11:00:00", + "id": int(id), + "created_at": datetime.datetime(2010, 10, 10, 12, 0, 0), + "updated_at": datetime.datetime(2010, 11, 11, 11, 0, 0), "admin_pass": "", - "user_id": "", - "project_id": "", - "image_ref": "5", + "user_id": "fake", + "project_id": "fake", + "image_ref": image_ref, "kernel_id": "", "ramdisk_id": "", "launch_index": 0, "key_name": "", "key_data": "", - "state": 0, + "state": power_state, "state_description": "", "memory_mb": 0, "vcpus": 0, "local_gb": 0, "hostname": "", "host": "", - "instance_type": { - "flavorid": 1, - }, + "instance_type": dict(inst_type), "user_data": "", "reservation_id": "", "mac_address": "", @@ -81,17 +82,32 @@ def _get_instance(): "launched_at": utils.utcnow(), "terminated_at": utils.utcnow(), "availability_zone": "", - "display_name": "test_server", + "display_name": name or "server%s" % id, "display_description": "", "locked": False, - "metadata": [], - #"address": , - #"floating_ips": [{"address":ip} for ip in public_addresses]} - "uuid": "deadbeef-feed-edee-beef-d0ea7beefedd"} + "metadata": metadata_items, + "access_ip_v4": "", + "access_ip_v6": "", + "uuid": "aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa", + "virtual_interfaces": []} + + instance["fixed_ips"] = { + "address": '192.168.0.1', + "floating_ips": []} return instance +class MockSetAdminPassword(object): + def __init__(self): + self.instance_id = None + self.password = None + + def __call__(self, context, instance_id, password): + self.instance_id = instance_id + self.password = password + + class ServerActionsTest(test.TestCase): def setUp(self): @@ -598,6 +614,9 @@ class ServerActionsTestV11(test.TestCase): self.assertEqual(res.status_int, 400) def test_server_rebuild_accepted_minimum(self): + new_return_server = return_server_with_attributes(image_ref='2') + self.stubs.Set(nova.db.api, 'instance_get', new_return_server) + body = { "rebuild": { "imageRef": "http://localhost/images/2", @@ -611,6 +630,9 @@ class ServerActionsTestV11(test.TestCase): res = req.get_response(fakes.wsgi_app()) self.assertEqual(res.status_int, 202) + body = json.loads(res.body) + self.assertEqual(body['server']['image']['id'], '2') + self.assertEqual(len(body['server']['adminPass']), 16) def test_server_rebuild_rejected_when_building(self): body = { @@ -634,12 +656,15 @@ class ServerActionsTestV11(test.TestCase): self.assertEqual(res.status_int, 409) def test_server_rebuild_accepted_with_metadata(self): + metadata = {'new': 'metadata'} + + new_return_server = return_server_with_attributes(metadata=metadata) + self.stubs.Set(nova.db.api, 'instance_get', new_return_server) + body = { "rebuild": { "imageRef": "http://localhost/images/2", - "metadata": { - "new": "metadata", - }, + "metadata": metadata, }, } @@ -649,7 +674,10 @@ class ServerActionsTestV11(test.TestCase): req.body = json.dumps(body) res = req.get_response(fakes.wsgi_app()) + print res.body self.assertEqual(res.status_int, 202) + body = json.loads(res.body) + self.assertEqual(body['server']['metadata'], metadata) def test_server_rebuild_accepted_with_bad_metadata(self): body = { @@ -719,6 +747,30 @@ class ServerActionsTestV11(test.TestCase): res = req.get_response(fakes.wsgi_app()) self.assertEqual(res.status_int, 202) + body = json.loads(res.body) + self.assertTrue('personality' not in body['server']) + + def test_server_rebuild_admin_pass(self): + new_return_server = return_server_with_attributes(image_ref='2') + self.stubs.Set(nova.db.api, 'instance_get', new_return_server) + + body = { + "rebuild": { + "imageRef": "http://localhost/images/2", + "adminPass": "asdf", + }, + } + + req = webob.Request.blank('/v1.1/fake/servers/1/action') + req.method = 'POST' + req.content_type = 'application/json' + req.body = json.dumps(body) + + res = req.get_response(fakes.wsgi_app()) + self.assertEqual(res.status_int, 202) + body = json.loads(res.body) + self.assertEqual(body['server']['image']['id'], '2') + self.assertEqual(body['server']['adminPass'], 'asdf') def test_resize_server(self): -- cgit From 410edd037486a2e09bfc8276adc2a25459e2e2c8 Mon Sep 17 00:00:00 2001 From: Brian Waldon Date: Tue, 23 Aug 2011 22:00:42 -0400 Subject: cleanup --- nova/compute/api.py | 1 - nova/compute/manager.py | 4 ++-- 2 files changed, 2 insertions(+), 3 deletions(-) (limited to 'nova') diff --git a/nova/compute/api.py b/nova/compute/api.py index 0ac38a428..06f09f9a6 100644 --- a/nova/compute/api.py +++ b/nova/compute/api.py @@ -1053,7 +1053,6 @@ class API(base.Base): context, instance_id, params=rebuild_params) - return rebuild_params @scheduler_api.reroute_compute("revert_resize") def revert_resize(self, context, instance_id): diff --git a/nova/compute/manager.py b/nova/compute/manager.py index 40cd09044..d3a4c6f88 100644 --- a/nova/compute/manager.py +++ b/nova/compute/manager.py @@ -547,8 +547,8 @@ class ComputeManager(manager.SchedulerDependentManager): bd_mapping = self._setup_block_device_mapping(context, instance_id) # pull in new password here since the original password isn't in the db - new_pass = kwargs.get('new_pass') - instance_ref.admin_pass = new_pass + instance_ref.admin_pass = kwargs.get('new_pass', + utils.generate_password(FLAGS.password_length)) self.driver.spawn(context, instance_ref, network_info, bd_mapping) -- cgit From 29940dd27f3a40a4ad54bc2f7a4cea5ac2226b83 Mon Sep 17 00:00:00 2001 From: "vladimir.p" Date: Tue, 23 Aug 2011 20:22:27 -0700 Subject: added volume metadata APIs (OS & volume layers), search volume by metadata & other --- nova/api/openstack/contrib/volumes.py | 36 ++++++++++++++++- nova/db/sqlalchemy/api.py | 24 +++++------- nova/tests/integrated/test_volumes.py | 16 ++++++++ nova/volume/api.py | 73 +++++++++++++++++++++++++++++++++-- 4 files changed, 129 insertions(+), 20 deletions(-) (limited to 'nova') diff --git a/nova/api/openstack/contrib/volumes.py b/nova/api/openstack/contrib/volumes.py index 867fe301e..d62225e58 100644 --- a/nova/api/openstack/contrib/volumes.py +++ b/nova/api/openstack/contrib/volumes.py @@ -24,6 +24,7 @@ from nova import flags from nova import log as logging from nova import quota from nova import volume +from nova.volume import volume_types from nova.api.openstack import common from nova.api.openstack import extensions from nova.api.openstack import faults @@ -63,6 +64,22 @@ def _translate_volume_summary_view(context, vol): d['displayName'] = vol['display_name'] d['displayDescription'] = vol['display_description'] + + if vol['volume_type_id'] and vol.get('volume_type'): + d['volumeType'] = vol['volume_type']['name'] + else: + d['volumeType'] = vol['volume_type_id'] + + LOG.audit(_("vol=%s"), vol, context=context) + + if vol.get('volume_metadata'): + meta_dict = {} + for i in vol['volume_metadata']: + meta_dict[i['key']] = i['value'] + d['metadata'] = meta_dict + else: + d['metadata'] = {} + return d @@ -80,6 +97,8 @@ class VolumeController(object): "createdAt", "displayName", "displayDescription", + "volumeType", + "metadata", ]}}} def __init__(self): @@ -136,12 +155,25 @@ class VolumeController(object): vol = body['volume'] size = vol['size'] LOG.audit(_("Create volume of %s GB"), size, context=context) + + vol_type = vol.get('volume_type', None) + if vol_type: + try: + vol_type = volume_types.get_volume_type_by_name(context, + vol_type) + except exception.NotFound: + return faults.Fault(exc.HTTPNotFound()) + + metadata = vol.get('metadata', None) + new_volume = self.volume_api.create(context, size, None, vol.get('display_name'), - vol.get('display_description')) + vol.get('display_description'), + volume_type=vol_type, + metadata=metadata) # Work around problem that instance is lazy-loaded... - new_volume['instance'] = None + new_volume = self.volume_api.get(context, new_volume['id']) retval = _translate_volume_detail_view(context, new_volume) diff --git a/nova/db/sqlalchemy/api.py b/nova/db/sqlalchemy/api.py index ac8aed307..f14f95ab0 100644 --- a/nova/db/sqlalchemy/api.py +++ b/nova/db/sqlalchemy/api.py @@ -1020,11 +1020,11 @@ def virtual_interface_delete_by_instance(context, instance_id): ################### -def _metadata_refs(metadata_dict): +def _metadata_refs(metadata_dict, meta_class): metadata_refs = [] if metadata_dict: for k, v in metadata_dict.iteritems(): - metadata_ref = models.InstanceMetadata() + metadata_ref = meta_class() metadata_ref['key'] = k metadata_ref['value'] = v metadata_refs.append(metadata_ref) @@ -1038,8 +1038,8 @@ def instance_create(context, values): context - request context object values - dict containing column values. """ - values['metadata'] = _metadata_refs(values.get('metadata')) - + values['metadata'] = _metadata_refs(values.get('metadata'), + models.InstanceMetadata) instance_ref = models.Instance() instance_ref['uuid'] = str(utils.gen_uuid()) @@ -2097,8 +2097,8 @@ def volume_attached(context, volume_id, instance_id, mountpoint): @require_context def volume_create(context, values): - values['metadata'] = _metadata_refs(values.get('metadata')) - + values['volume_metadata'] = _metadata_refs(values.get('metadata'), + models.VolumeMetadata) volume_ref = models.Volume() volume_ref.update(values) @@ -3617,14 +3617,10 @@ def volume_type_create(_context, values): """ try: specs = values.get('extra_specs') - specs_refs = [] - if specs: - for k, v in specs.iteritems(): - specs_ref = models.VolumeTypeExtraSpecs() - specs_ref['key'] = k - specs_ref['value'] = v - specs_refs.append(specs_ref) - values['extra_specs'] = specs_refs + + values['extra_specs'] = _metadata_refs(values.get('extra_specs'), + models.VolumeTypeExtraSpecs) + volume_type_ref = models.VolumeTypes() volume_type_ref.update(values) volume_type_ref.save() diff --git a/nova/tests/integrated/test_volumes.py b/nova/tests/integrated/test_volumes.py index d3e936462..86165944f 100644 --- a/nova/tests/integrated/test_volumes.py +++ b/nova/tests/integrated/test_volumes.py @@ -285,6 +285,22 @@ class VolumesTest(integrated_helpers._IntegratedTestBase): self.assertEquals(undisco_move['mountpoint'], device) self.assertEquals(undisco_move['instance_id'], server_id) + def test_create_volume_with_metadata(self): + """Creates and deletes a volume.""" + + # Create volume + metadata = {'key1': 'value1', + 'key2': 'value2'} + created_volume = self.api.post_volume({'volume': {'size': 1, + 'metadata': metadata}}) + LOG.debug("created_volume: %s" % created_volume) + self.assertTrue(created_volume['id']) + created_volume_id = created_volume['id'] + + # Check it's there and metadata present + found_volume = self.api.get_volume(created_volume_id) + self.assertEqual(created_volume_id, found_volume['id']) + self.assertEqual(metadata, found_volume['metadata']) if __name__ == "__main__": unittest.main() diff --git a/nova/volume/api.py b/nova/volume/api.py index 80e8bd85f..195ab24aa 100644 --- a/nova/volume/api.py +++ b/nova/volume/api.py @@ -61,6 +61,11 @@ class API(base.Base): if availability_zone is None: availability_zone = FLAGS.storage_availability_zone + if volume_type is None: + volume_type_id = None + else: + volume_type_id = volume_type.get('id', None) + options = { 'size': size, 'user_id': context.user_id, @@ -71,7 +76,7 @@ class API(base.Base): 'attach_status': "detached", 'display_name': name, 'display_description': description, - 'volume_type_id': volume_type.get('id', None), + 'volume_type_id': volume_type_id, 'metadata': metadata, } @@ -112,10 +117,44 @@ class API(base.Base): rv = self.db.volume_get(context, volume_id) return dict(rv.iteritems()) - def get_all(self, context): + def get_all(self, context, search_opts={}): if context.is_admin: - return self.db.volume_get_all(context) - return self.db.volume_get_all_by_project(context, context.project_id) + volumes = self.db.volume_get_all(context) + else: + volumes = self.db.volume_get_all_by_project(context, + context.project_id) + + if search_opts: + LOG.debug(_("Searching by: %s") % str(search_opts)) + + def _check_metadata_match(volume, searchdict): + volume_metadata = {} + for i in volume.get('volume_metadata'): + volume_metadata[i['key']] = i['value'] + + for k, v in searchdict: + if k not in volume_metadata.keys()\ + or volume_metadata[k] != v: + return False + return True + + # search_option to filter_name mapping. + filter_mapping = {'metadata': _check_metadata_match} + + for volume in volumes: + # go over all filters in the list + for opt, values in search_opts.iteritems(): + try: + filter_func = filter_mapping[opt] + except KeyError: + # no such filter - ignore it, go to next filter + continue + else: + if filter_func(volume, values) == False: + # if one of conditions didn't match - remove + volumes.remove(volume) + break + return volumes def get_snapshot(self, context, snapshot_id): rv = self.db.snapshot_get(context, snapshot_id) @@ -190,3 +229,29 @@ class API(base.Base): {"method": "delete_snapshot", "args": {"topic": FLAGS.volume_topic, "snapshot_id": snapshot_id}}) + + def get_volume_metadata(self, context, volume_id): + """Get all metadata associated with a volume.""" + rv = self.db.volume_metadata_get(context, volume_id) + return dict(rv.iteritems()) + + def delete_volume_metadata(self, context, volume_id, key): + """Delete the given metadata item from an volume.""" + self.db.volume_metadata_delete(context, volume_id, key) + + def update_volume_metadata(self, context, volume_id, + metadata, delete=False): + """Updates or creates volume metadata. + + If delete is True, metadata items that are not specified in the + `metadata` argument will be deleted. + + """ + if delete: + _metadata = metadata + else: + _metadata = self.get_volume_metadata(context, volume_id) + _metadata.update(metadata) + + self.db.volume_metadata_update(context, volume_id, _metadata, True) + return _metadata -- cgit From 8b6f6145a4a99f8b60e77cb8758ffcc2c0591ebd Mon Sep 17 00:00:00 2001 From: Brian Waldon Date: Tue, 23 Aug 2011 23:27:36 -0400 Subject: removing print statement --- nova/tests/api/openstack/test_server_actions.py | 1 - 1 file changed, 1 deletion(-) (limited to 'nova') diff --git a/nova/tests/api/openstack/test_server_actions.py b/nova/tests/api/openstack/test_server_actions.py index 769de4b34..aeb132e87 100644 --- a/nova/tests/api/openstack/test_server_actions.py +++ b/nova/tests/api/openstack/test_server_actions.py @@ -674,7 +674,6 @@ class ServerActionsTestV11(test.TestCase): req.body = json.dumps(body) res = req.get_response(fakes.wsgi_app()) - print res.body self.assertEqual(res.status_int, 202) body = json.loads(res.body) self.assertEqual(body['server']['metadata'], metadata) -- cgit From 8191cd38b2030a9d8816bcc21bd0cbc0119a7d91 Mon Sep 17 00:00:00 2001 From: Thierry Carrez Date: Wed, 24 Aug 2011 14:42:30 +0200 Subject: Thou shalt not use underscores in hostnames --- nova/compute/api.py | 6 +++--- nova/tests/test_compute.py | 5 +++-- 2 files changed, 6 insertions(+), 5 deletions(-) (limited to 'nova') diff --git a/nova/compute/api.py b/nova/compute/api.py index 69f76bf40..88ba81018 100644 --- a/nova/compute/api.py +++ b/nova/compute/api.py @@ -55,15 +55,15 @@ def generate_default_hostname(instance): """Default function to generate a hostname given an instance reference.""" display_name = instance['display_name'] if display_name is None: - return 'server_%d' % (instance['id'],) + return 'server-%d' % (instance['id'],) table = '' deletions = '' for i in xrange(256): c = chr(i) if ('a' <= c <= 'z') or ('0' <= c <= '9') or (c == '-'): table += c - elif c == ' ': - table += '_' + elif (c == ' ') or (c == '_'): + table += '-' elif ('A' <= c <= 'Z'): table += c.lower() else: diff --git a/nova/tests/test_compute.py b/nova/tests/test_compute.py index 0523d73b6..6659b81eb 100644 --- a/nova/tests/test_compute.py +++ b/nova/tests/test_compute.py @@ -175,8 +175,9 @@ class ComputeTestCase(test.TestCase): db.instance_destroy(self.context, instance_id) def test_default_hostname_generator(self): - cases = [(None, 'server_1'), ('Hello, Server!', 'hello_server'), - ('<}\x1fh\x10e\x08l\x02l\x05o\x12!{>', 'hello')] + cases = [(None, 'server-1'), ('Hello, Server!', 'hello-server'), + ('<}\x1fh\x10e\x08l\x02l\x05o\x12!{>', 'hello'), + ('hello_server', 'hello-server')] for display_name, hostname in cases: ref = self.compute_api.create(self.context, instance_types.get_default_instance_type(), None, -- cgit From bc235682305c3eb70eb80f1dddc15d86359a9ca3 Mon Sep 17 00:00:00 2001 From: Brian Waldon Date: Wed, 24 Aug 2011 09:38:43 -0400 Subject: pep8 --- nova/tests/api/openstack/test_server_actions.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'nova') diff --git a/nova/tests/api/openstack/test_server_actions.py b/nova/tests/api/openstack/test_server_actions.py index aeb132e87..926420407 100644 --- a/nova/tests/api/openstack/test_server_actions.py +++ b/nova/tests/api/openstack/test_server_actions.py @@ -48,9 +48,9 @@ def stub_instance(id, power_state=0, metadata=None, image_ref="10", flavor_id="1", name=None): if metadata is not None: - metadata_items = [{'key': k, 'value': v} for k, v in metadata.items()] + metadata_items = [{'key':k, 'value':v} for k, v in metadata.items()] else: - metadata_items = [{'key': 'seq', 'value':id}] + metadata_items = [{'key':'seq', 'value':id}] inst_type = instance_types.get_instance_type_by_flavor_id(int(flavor_id)) -- cgit From e5e95e1bfb6b1569b7e30a7066a0cd9c6ebff2c7 Mon Sep 17 00:00:00 2001 From: Brian Waldon Date: Wed, 24 Aug 2011 10:06:20 -0400 Subject: removing extraneous imports --- nova/tests/api/openstack/test_server_actions.py | 2 -- 1 file changed, 2 deletions(-) (limited to 'nova') diff --git a/nova/tests/api/openstack/test_server_actions.py b/nova/tests/api/openstack/test_server_actions.py index 926420407..057277887 100644 --- a/nova/tests/api/openstack/test_server_actions.py +++ b/nova/tests/api/openstack/test_server_actions.py @@ -12,8 +12,6 @@ from nova.api.openstack import create_instance_helper from nova.compute import instance_types from nova.compute import power_state import nova.db.api -from nova.db.sqlalchemy.models import Instance -from nova.db.sqlalchemy.models import InstanceMetadata from nova import test from nova.tests.api.openstack import common from nova.tests.api.openstack import fakes -- cgit From 3d4d3d7f422c7327346b5731ad3c620f279411f2 Mon Sep 17 00:00:00 2001 From: Brian Waldon Date: Wed, 24 Aug 2011 10:37:59 -0400 Subject: adding xml serialization and handling instance not found --- nova/api/openstack/servers.py | 8 ++ nova/tests/api/openstack/test_server_actions.py | 20 ++++ nova/tests/api/openstack/test_servers.py | 135 ++++++++++++++++++++++++ 3 files changed, 163 insertions(+) (limited to 'nova') diff --git a/nova/api/openstack/servers.py b/nova/api/openstack/servers.py index fc74b8288..27c67e79e 100644 --- a/nova/api/openstack/servers.py +++ b/nova/api/openstack/servers.py @@ -754,6 +754,9 @@ class ControllerV11(Controller): msg = _("Instance %s is currently being rebuilt.") % instance_id LOG.debug(msg) raise exc.HTTPConflict(explanation=msg) + except exception.InstanceNotFound: + msg = _("Instance %s could not be found") % instance_id + raise exc.HTTPNotFound(explanation=msg) instance = self.compute_api.routing_get(context, instance_id) view = self._build_view(request, instance, is_detail=True) @@ -950,6 +953,11 @@ class ServerXMLSerializer(wsgi.XMLDictSerializer): node.setAttribute('adminPass', server_dict['server']['adminPass']) return self.to_xml_string(node, True) + def action(self, server_dict): + #NOTE(bcwaldon): We need a way to serialize actions individually. This + # assumes all actions return a server entity + return self.create(server_dict) + def update(self, server_dict): xml_doc = minidom.Document() node = self._server_to_xml_detailed(xml_doc, diff --git a/nova/tests/api/openstack/test_server_actions.py b/nova/tests/api/openstack/test_server_actions.py index 057277887..9a664f44d 100644 --- a/nova/tests/api/openstack/test_server_actions.py +++ b/nova/tests/api/openstack/test_server_actions.py @@ -7,6 +7,7 @@ import webob from nova import context from nova import utils +from nova import exception from nova import flags from nova.api.openstack import create_instance_helper from nova.compute import instance_types @@ -769,6 +770,25 @@ class ServerActionsTestV11(test.TestCase): self.assertEqual(body['server']['image']['id'], '2') self.assertEqual(body['server']['adminPass'], 'asdf') + def test_server_rebuild_server_not_found(self): + def server_not_found(self, instance_id): + raise exception.InstanceNotFound(instance_id=instance_id) + self.stubs.Set(nova.db.api, 'instance_get', server_not_found) + + body = { + "rebuild": { + "imageRef": "http://localhost/images/2", + }, + } + + req = webob.Request.blank('/v1.1/fake/servers/1/action') + req.method = 'POST' + req.content_type = 'application/json' + req.body = json.dumps(body) + + res = req.get_response(fakes.wsgi_app()) + self.assertEqual(res.status_int, 404) + def test_resize_server(self): req = webob.Request.blank('/v1.1/fake/servers/1/action') diff --git a/nova/tests/api/openstack/test_servers.py b/nova/tests/api/openstack/test_servers.py index aec2ad947..e533fb190 100644 --- a/nova/tests/api/openstack/test_servers.py +++ b/nova/tests/api/openstack/test_servers.py @@ -4541,3 +4541,138 @@ class ServerXMLSerializationTest(test.TestCase): str(ip['version'])) self.assertEqual(str(ip_elem.get('addr')), str(ip['addr'])) + + def test_action(self): + serializer = servers.ServerXMLSerializer() + + fixture = { + "server": { + "id": 1, + "uuid": FAKE_UUID, + 'created': self.TIMESTAMP, + 'updated': self.TIMESTAMP, + "progress": 0, + "name": "test_server", + "status": "BUILD", + "accessIPv4": "1.2.3.4", + "accessIPv6": "fead::1234", + "hostId": "e4d909c290d0fb1ca068ffaddf22cbd0", + "adminPass": "test_password", + "image": { + "id": "5", + "links": [ + { + "rel": "bookmark", + "href": self.IMAGE_BOOKMARK, + }, + ], + }, + "flavor": { + "id": "1", + "links": [ + { + "rel": "bookmark", + "href": self.FLAVOR_BOOKMARK, + }, + ], + }, + "addresses": { + "network_one": [ + { + "version": 4, + "addr": "67.23.10.138", + }, + { + "version": 6, + "addr": "::babe:67.23.10.138", + }, + ], + "network_two": [ + { + "version": 4, + "addr": "67.23.10.139", + }, + { + "version": 6, + "addr": "::babe:67.23.10.139", + }, + ], + }, + "metadata": { + "Open": "Stack", + "Number": "1", + }, + 'links': [ + { + 'href': self.SERVER_HREF, + 'rel': 'self', + }, + { + 'href': self.SERVER_BOOKMARK, + 'rel': 'bookmark', + }, + ], + } + } + + output = serializer.serialize(fixture, 'action') + root = etree.XML(output) + xmlutil.validate_schema(root, 'server') + + expected_server_href = self.SERVER_HREF + expected_server_bookmark = self.SERVER_BOOKMARK + expected_image_bookmark = self.IMAGE_BOOKMARK + expected_flavor_bookmark = self.FLAVOR_BOOKMARK + expected_now = self.TIMESTAMP + expected_uuid = FAKE_UUID + server_dict = fixture['server'] + + for key in ['name', 'id', 'uuid', 'created', 'accessIPv4', + 'updated', 'progress', 'status', 'hostId', + 'accessIPv6', 'adminPass']: + self.assertEqual(root.get(key), str(server_dict[key])) + + link_nodes = root.findall('{0}link'.format(ATOMNS)) + self.assertEqual(len(link_nodes), 2) + for i, link in enumerate(server_dict['links']): + for key, value in link.items(): + self.assertEqual(link_nodes[i].get(key), value) + + metadata_root = root.find('{0}metadata'.format(NS)) + metadata_elems = metadata_root.findall('{0}meta'.format(NS)) + self.assertEqual(len(metadata_elems), 2) + for i, metadata_elem in enumerate(metadata_elems): + (meta_key, meta_value) = server_dict['metadata'].items()[i] + self.assertEqual(str(metadata_elem.get('key')), str(meta_key)) + self.assertEqual(str(metadata_elem.text).strip(), str(meta_value)) + + image_root = root.find('{0}image'.format(NS)) + self.assertEqual(image_root.get('id'), server_dict['image']['id']) + link_nodes = image_root.findall('{0}link'.format(ATOMNS)) + self.assertEqual(len(link_nodes), 1) + for i, link in enumerate(server_dict['image']['links']): + for key, value in link.items(): + self.assertEqual(link_nodes[i].get(key), value) + + flavor_root = root.find('{0}flavor'.format(NS)) + self.assertEqual(flavor_root.get('id'), server_dict['flavor']['id']) + link_nodes = flavor_root.findall('{0}link'.format(ATOMNS)) + self.assertEqual(len(link_nodes), 1) + for i, link in enumerate(server_dict['flavor']['links']): + for key, value in link.items(): + self.assertEqual(link_nodes[i].get(key), value) + + addresses_root = root.find('{0}addresses'.format(NS)) + addresses_dict = server_dict['addresses'] + network_elems = addresses_root.findall('{0}network'.format(NS)) + self.assertEqual(len(network_elems), 2) + for i, network_elem in enumerate(network_elems): + network = addresses_dict.items()[i] + self.assertEqual(str(network_elem.get('id')), str(network[0])) + ip_elems = network_elem.findall('{0}ip'.format(NS)) + for z, ip_elem in enumerate(ip_elems): + ip = network[1][z] + self.assertEqual(str(ip_elem.get('version')), + str(ip['version'])) + self.assertEqual(str(ip_elem.get('addr')), + str(ip['addr'])) -- cgit From 6e3d657c5733154bbf818bb5318fa5da2deb0122 Mon Sep 17 00:00:00 2001 From: Brian Waldon Date: Wed, 24 Aug 2011 10:45:53 -0400 Subject: fixed indentation --- nova/compute/manager.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'nova') diff --git a/nova/compute/manager.py b/nova/compute/manager.py index d3a4c6f88..ade15e310 100644 --- a/nova/compute/manager.py +++ b/nova/compute/manager.py @@ -548,7 +548,7 @@ class ComputeManager(manager.SchedulerDependentManager): # pull in new password here since the original password isn't in the db instance_ref.admin_pass = kwargs.get('new_pass', - utils.generate_password(FLAGS.password_length)) + utils.generate_password(FLAGS.password_length)) self.driver.spawn(context, instance_ref, network_info, bd_mapping) -- cgit From 0873a3c7b9a1a75c6e04bd1b66f8fbe4935585b2 Mon Sep 17 00:00:00 2001 From: Mark McLoughlin Date: Wed, 24 Aug 2011 15:49:53 +0100 Subject: Fix flavorid migration failure With sqlalchemy 0.7.2 and migrate 0.7.1, I was seeing: Traceback (most recent call last): File "/usr/lib/python2.7/site-packages/nova/db/migration.py", line 37, in db_sync ret = IMPL.db_sync(version=version) [...] File "/usr/lib/python2.7/site-packages/nova/db/sqlalchemy/migrate_repo/versions/036_change_flavor_id_in_migrations.py", line 46, in upgrade .values(old_instance_type_id=instance_type.id)) [...] File "/usr/lib64/python2.7/site-packages/sqlalchemy/engine/default.py", line 301, in do_commit connection.commit() OperationalError: (OperationalError) database is locked None None It looks like the database is being held open as we iterate over the rows in the instance_types table. --- .../versions/036_change_flavor_id_in_migrations.py | 12 ++++++++---- 1 file changed, 8 insertions(+), 4 deletions(-) (limited to 'nova') diff --git a/nova/db/sqlalchemy/migrate_repo/versions/036_change_flavor_id_in_migrations.py b/nova/db/sqlalchemy/migrate_repo/versions/036_change_flavor_id_in_migrations.py index f3244033b..dfbd4ba32 100644 --- a/nova/db/sqlalchemy/migrate_repo/versions/036_change_flavor_id_in_migrations.py +++ b/nova/db/sqlalchemy/migrate_repo/versions/036_change_flavor_id_in_migrations.py @@ -40,13 +40,17 @@ def upgrade(migrate_engine): migrations.create_column(new_instance_type_id) # Convert flavor_id to instance_type_id + itypes = {} for instance_type in migrate_engine.execute(instance_types.select()): + itypes[instance_type.id] = instance_type.flavorid + + for instance_type_id in itypes.keys(): migrate_engine.execute(migrations.update()\ - .where(migrations.c.old_flavor_id == instance_type.flavorid)\ - .values(old_instance_type_id=instance_type.id)) + .where(migrations.c.old_flavor_id == itypes[instance_type_id])\ + .values(old_instance_type_id=instance_type_id)) migrate_engine.execute(migrations.update()\ - .where(migrations.c.new_flavor_id == instance_type.flavorid)\ - .values(new_instance_type_id=instance_type.id)) + .where(migrations.c.new_flavor_id == itypes[instance_type_id])\ + .values(new_instance_type_id=instance_type_id)) migrations.c.old_flavor_id.drop() migrations.c.new_flavor_id.drop() -- cgit From b428ac4c20e44f537b0dedeefcc2637efbc998ea Mon Sep 17 00:00:00 2001 From: Mark McLoughlin Date: Wed, 24 Aug 2011 15:50:46 +0100 Subject: Fix quotas migration failure With sqlalchemy 0.7.2 and migrate 0.7.1, I was seeing: Traceback (most recent call last): File "/usr/lib/python2.7/site-packages/nova/db/migration.py", line 37, in db_sync ret = IMPL.db_sync(version=version) [..] File "/usr/lib/python2.7/site-packages/nova/db/sqlalchemy/migrate_repo/versions/016_make_quotas_key_and_value.py", line 189, in upgrade new_quotas.rename('quotas') [..] File "/usr/lib/python2.7/site-packages/migrate/changeset/schema.py", line 479, in deregister del meta.tables[key] File "/usr/lib64/python2.7/site-packages/sqlalchemy/util/_collections.py", line 38, in _immutable raise TypeError("%s object is immutable" % self.__class__.__name__) TypeError: immutabledict object is immutable This is actually a bug in sqlalchemy-migrate: http://code.google.com/p/sqlalchemy-migrate/issues/detail?id=128 But it can be worked around by ensuring there isn't a 'quotas' table in the metadata's table hash before renaming. --- .../versions/016_make_quotas_key_and_value.py | 18 ++++++++++++++---- 1 file changed, 14 insertions(+), 4 deletions(-) (limited to 'nova') diff --git a/nova/db/sqlalchemy/migrate_repo/versions/016_make_quotas_key_and_value.py b/nova/db/sqlalchemy/migrate_repo/versions/016_make_quotas_key_and_value.py index a4fe3e482..56b287171 100644 --- a/nova/db/sqlalchemy/migrate_repo/versions/016_make_quotas_key_and_value.py +++ b/nova/db/sqlalchemy/migrate_repo/versions/016_make_quotas_key_and_value.py @@ -75,8 +75,8 @@ def new_style_quotas_table(name): ) -def existing_quotas_table(migrate_engine): - return Table('quotas', meta, autoload=True, autoload_with=migrate_engine) +def quotas_table(migrate_engine, name='quotas'): + return Table(name, meta, autoload=True, autoload_with=migrate_engine) def _assert_no_duplicate_project_ids(quotas): @@ -179,13 +179,18 @@ def upgrade(migrate_engine): # bind migrate_engine to your metadata meta.bind = migrate_engine - old_quotas = existing_quotas_table(migrate_engine) + old_quotas = quotas_table(migrate_engine) assert_old_quotas_have_no_active_duplicates(migrate_engine, old_quotas) new_quotas = new_style_quotas_table('quotas_new') new_quotas.create() convert_forward(migrate_engine, old_quotas, new_quotas) old_quotas.drop() + + # clear metadata to work around this: + # http://code.google.com/p/sqlalchemy-migrate/issues/detail?id=128 + meta.clear() + new_quotas = quotas_table(migrate_engine, 'quotas_new') new_quotas.rename('quotas') @@ -193,11 +198,16 @@ def downgrade(migrate_engine): # Operations to reverse the above upgrade go here. meta.bind = migrate_engine - new_quotas = existing_quotas_table(migrate_engine) + new_quotas = quotas_table(migrate_engine) assert_new_quotas_have_no_active_duplicates(migrate_engine, new_quotas) old_quotas = old_style_quotas_table('quotas_old') old_quotas.create() convert_backward(migrate_engine, old_quotas, new_quotas) new_quotas.drop() + + # clear metadata to work around this: + # http://code.google.com/p/sqlalchemy-migrate/issues/detail?id=128 + meta.clear() + old_quotas = quotas_table(migrate_engine, 'quotas_old') old_quotas.rename('quotas') -- cgit From 998f40594841094291c7472dd608b6a2ba689e4d Mon Sep 17 00:00:00 2001 From: Brian Waldon Date: Wed, 24 Aug 2011 11:11:20 -0400 Subject: dict formatting --- nova/tests/api/openstack/test_server_actions.py | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) (limited to 'nova') diff --git a/nova/tests/api/openstack/test_server_actions.py b/nova/tests/api/openstack/test_server_actions.py index 9a664f44d..3dfdeb79c 100644 --- a/nova/tests/api/openstack/test_server_actions.py +++ b/nova/tests/api/openstack/test_server_actions.py @@ -88,11 +88,13 @@ def stub_instance(id, power_state=0, metadata=None, "access_ip_v4": "", "access_ip_v6": "", "uuid": "aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa", - "virtual_interfaces": []} + "virtual_interfaces": [], + } instance["fixed_ips"] = { "address": '192.168.0.1', - "floating_ips": []} + "floating_ips": [], + } return instance -- cgit From df0f06a8f0d66f07f402aa54cb1aa7ce1b7416c9 Mon Sep 17 00:00:00 2001 From: "vladimir.p" Date: Wed, 24 Aug 2011 08:51:14 -0700 Subject: check log file mode prior to chmod --- nova/log.py | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) (limited to 'nova') diff --git a/nova/log.py b/nova/log.py index 222b8c5fb..3b86d78e8 100644 --- a/nova/log.py +++ b/nova/log.py @@ -257,7 +257,11 @@ class NovaRootLogger(NovaLogger): self.filelog = WatchedFileHandler(logpath) self.addHandler(self.filelog) self.logpath = logpath - os.chmod(self.logpath, FLAGS.logfile_mode) + + import stat + st = os.stat(self.logpath) + if st.st_mode != (stat.S_IFREG | FLAGS.logfile_mode): + os.chmod(self.logpath, FLAGS.logfile_mode) else: self.removeHandler(self.filelog) self.addHandler(self.streamlog) -- cgit From 326268b9bf5d958263b70c64ca2ed21deac1a14e Mon Sep 17 00:00:00 2001 From: Thierry Carrez Date: Wed, 24 Aug 2011 18:01:29 +0200 Subject: Let's be more elegant --- nova/compute/api.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'nova') diff --git a/nova/compute/api.py b/nova/compute/api.py index 88ba81018..be1dbdd51 100644 --- a/nova/compute/api.py +++ b/nova/compute/api.py @@ -62,7 +62,7 @@ def generate_default_hostname(instance): c = chr(i) if ('a' <= c <= 'z') or ('0' <= c <= '9') or (c == '-'): table += c - elif (c == ' ') or (c == '_'): + elif c in " _": table += '-' elif ('A' <= c <= 'Z'): table += c.lower() -- cgit From 390f35f8e7736bfbd650c9e3f522274e92adb005 Mon Sep 17 00:00:00 2001 From: Hisaharu Ishii Date: Wed, 24 Aug 2011 09:11:57 -0700 Subject: Fix flag override in unit test --- nova/tests/test_nova_manage.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'nova') diff --git a/nova/tests/test_nova_manage.py b/nova/tests/test_nova_manage.py index ca4f4c894..ce731222c 100644 --- a/nova/tests/test_nova_manage.py +++ b/nova/tests/test_nova_manage.py @@ -133,7 +133,7 @@ class NetworkCommandsTestCase(test.TestCase): self.assertEqual(kwargs['bridge_interface'], 'eth0') self.assertEqual(kwargs['dns1'], '8.8.8.8') self.assertEqual(kwargs['dns2'], '8.8.4.4') - FLAGS.network_manager = 'nova.network.manager.VlanManager' + self.flags(network_manager='nova.network.manager.VlanManager') from nova.network import manager as net_manager self.stubs.Set(net_manager.VlanManager, 'create_networks', fake_create_networks) -- cgit From 8ad9373648dea11783545ad88429def4691a2925 Mon Sep 17 00:00:00 2001 From: "vladimir.p" Date: Wed, 24 Aug 2011 09:18:53 -0700 Subject: pep8 compliant --- nova/db/api.py | 2 -- nova/db/sqlalchemy/api.py | 4 +--- .../migrate_repo/versions/042_add_volume_types_and_extradata.py | 5 ++++- nova/tests/integrated/test_volumes.py | 5 +++-- nova/tests/test_volume_types.py | 2 -- nova/tests/test_volume_types_extra_specs.py | 1 - 6 files changed, 8 insertions(+), 11 deletions(-) (limited to 'nova') diff --git a/nova/db/api.py b/nova/db/api.py index 8726df6dd..3bb9b4970 100644 --- a/nova/db/api.py +++ b/nova/db/api.py @@ -1512,5 +1512,3 @@ def volume_type_extra_specs_update_or_create(context, volume_type_id, key/value pairs specified in the extra specs dict argument""" IMPL.volume_type_extra_specs_update_or_create(context, volume_type_id, extra_specs) - - diff --git a/nova/db/sqlalchemy/api.py b/nova/db/sqlalchemy/api.py index 5abdd71f9..4786f539f 100644 --- a/nova/db/sqlalchemy/api.py +++ b/nova/db/sqlalchemy/api.py @@ -2350,7 +2350,6 @@ def volume_update(context, volume_id, values): volume_ref.save(session=session) - #################### @@ -3682,8 +3681,7 @@ def volume_type_create(_context, values): specs = values.get('extra_specs') values['extra_specs'] = _metadata_refs(values.get('extra_specs'), - models.VolumeTypeExtraSpecs) - + models.VolumeTypeExtraSpecs) volume_type_ref = models.VolumeTypes() volume_type_ref.update(values) volume_type_ref.save() diff --git a/nova/db/sqlalchemy/migrate_repo/versions/042_add_volume_types_and_extradata.py b/nova/db/sqlalchemy/migrate_repo/versions/042_add_volume_types_and_extradata.py index 27c8afcee..dd4cccb9e 100644 --- a/nova/db/sqlalchemy/migrate_repo/versions/042_add_volume_types_and_extradata.py +++ b/nova/db/sqlalchemy/migrate_repo/versions/042_add_volume_types_and_extradata.py @@ -83,7 +83,9 @@ volume_metadata_table = Table('volume_metadata', meta, unicode_error=None, _warn_on_bytestring=False))) -new_tables = (volume_types, volume_type_extra_specs_table, volume_metadata_table) +new_tables = (volume_types, + volume_type_extra_specs_table, + volume_metadata_table) # # Tables to alter @@ -103,6 +105,7 @@ def upgrade(migrate_engine): volumes.create_column(volume_type_id) + def downgrade(migrate_engine): meta.bind = migrate_engine diff --git a/nova/tests/integrated/test_volumes.py b/nova/tests/integrated/test_volumes.py index 86165944f..d6c5e1ba1 100644 --- a/nova/tests/integrated/test_volumes.py +++ b/nova/tests/integrated/test_volumes.py @@ -291,8 +291,9 @@ class VolumesTest(integrated_helpers._IntegratedTestBase): # Create volume metadata = {'key1': 'value1', 'key2': 'value2'} - created_volume = self.api.post_volume({'volume': {'size': 1, - 'metadata': metadata}}) + created_volume = self.api.post_volume( + {'volume': {'size': 1, + 'metadata': metadata}}) LOG.debug("created_volume: %s" % created_volume) self.assertTrue(created_volume['id']) created_volume_id = created_volume['id'] diff --git a/nova/tests/test_volume_types.py b/nova/tests/test_volume_types.py index 3d906a5fd..a20c18b18 100644 --- a/nova/tests/test_volume_types.py +++ b/nova/tests/test_volume_types.py @@ -152,5 +152,3 @@ class VolumeTypeTestCase(test.TestCase): new2 = volume_types.get_volume_type(self.ctxt, new['id']) self.assertEqual(new, new2) - - diff --git a/nova/tests/test_volume_types_extra_specs.py b/nova/tests/test_volume_types_extra_specs.py index 8d2aa2df3..017b187a1 100644 --- a/nova/tests/test_volume_types_extra_specs.py +++ b/nova/tests/test_volume_types_extra_specs.py @@ -44,7 +44,6 @@ class VolumeTypeExtraSpecsTestCase(test.TestCase): ref = db.api.volume_type_create(self.context, self.vol_type2_noextra) self.vol_type2_id = ref.id - def tearDown(self): # Remove the instance type from the database db.api.volume_type_purge(context.get_admin_context(), -- cgit From 207ce4f19655e70d14f3a67a45ba6acf8f12380d Mon Sep 17 00:00:00 2001 From: "vladimir.p" Date: Wed, 24 Aug 2011 10:16:20 -0700 Subject: added volume type search by extra_spec --- nova/tests/test_volume_types.py | 55 +++++++++++++++++++++++++++++++++++++++++ nova/volume/volume_types.py | 34 +++++++++++++++++++++++-- 2 files changed, 87 insertions(+), 2 deletions(-) (limited to 'nova') diff --git a/nova/tests/test_volume_types.py b/nova/tests/test_volume_types.py index a20c18b18..1b4c9396c 100644 --- a/nova/tests/test_volume_types.py +++ b/nova/tests/test_volume_types.py @@ -152,3 +152,58 @@ class VolumeTypeTestCase(test.TestCase): new2 = volume_types.get_volume_type(self.ctxt, new['id']) self.assertEqual(new, new2) + + def test_volume_type_search_by_extra_spec(self): + """Ensure volume types get by extra spec returns correct type""" + volume_types.create(self.ctxt, "type1", {"key1": "val1", + "key2": "val2"}) + volume_types.create(self.ctxt, "type2", {"key2": "val2", + "key3": "val3"}) + volume_types.create(self.ctxt, "type3", {"key3": "another_value", + "key4": "val4"}) + + vol_types = volume_types.get_all_types(self.ctxt, + search_opts={'extra_specs': {"key1": "val1"}}) + LOG.info("vol_types: %s" % vol_types) + self.assertEqual(len(vol_types), 1) + self.assertTrue("type1" in vol_types.keys()) + self.assertEqual(vol_types['type1']['extra_specs'], + {"key1": "val1", "key2": "val2"}) + + vol_types = volume_types.get_all_types(self.ctxt, + search_opts={'extra_specs': {"key2": "val2"}}) + LOG.info("vol_types: %s" % vol_types) + self.assertEqual(len(vol_types), 2) + self.assertTrue("type1" in vol_types.keys()) + self.assertTrue("type2" in vol_types.keys()) + + + vol_types = volume_types.get_all_types(self.ctxt, + search_opts={'extra_specs': {"key3": "val3"}}) + LOG.info("vol_types: %s" % vol_types) + self.assertEqual(len(vol_types), 1) + self.assertTrue("type2" in vol_types.keys()) + + + def test_volume_type_search_by_extra_spec_multiple(self): + """Ensure volume types get by extra spec returns correct type""" + volume_types.create(self.ctxt, "type1", {"key1": "val1", + "key2": "val2", + "key3": "val3"}) + volume_types.create(self.ctxt, "type2", {"key2": "val2", + "key3": "val3"}) + volume_types.create(self.ctxt, "type3", {"key1": "val1", + "key3": "val3", + "key4": "val4"}) + + vol_types = volume_types.get_all_types(self.ctxt, + search_opts={'extra_specs': {"key1": "val1", + "key3": "val3"}}) + LOG.info("vol_types: %s" % vol_types) + self.assertEqual(len(vol_types), 2) + self.assertTrue("type1" in vol_types.keys()) + self.assertTrue("type3" in vol_types.keys()) + self.assertEqual(vol_types['type1']['extra_specs'], + {"key1": "val1", "key2": "val2", "key3": "val3"}) + self.assertEqual(vol_types['type3']['extra_specs'], + {"key1": "val1", "key3": "val3", "key4": "val4"}) diff --git a/nova/volume/volume_types.py b/nova/volume/volume_types.py index 9df1e39f8..9b02d4ccc 100644 --- a/nova/volume/volume_types.py +++ b/nova/volume/volume_types.py @@ -68,13 +68,43 @@ def purge(context, name): raise exception.ApiError(_("Unknown volume type: %s") % name) -def get_all_types(context, inactive=0): +def get_all_types(context, inactive=0, search_opts={}): """Get all non-deleted volume_types. Pass true as argument if you want deleted volume types returned also. """ - return db.volume_type_get_all(context, inactive) + vol_types = db.volume_type_get_all(context, inactive) + + if search_opts: + LOG.debug(_("Searching by: %s") % str(search_opts)) + + def _check_extra_specs_match(vol_type, searchdict): + for k, v in searchdict.iteritems(): + if k not in vol_type['extra_specs'].keys()\ + or vol_type['extra_specs'][k] != v: + return False + return True + + # search_option to filter_name mapping. + filter_mapping = {'extra_specs': _check_extra_specs_match} + + result = {} + for type_name, type_args in vol_types.iteritems(): + # go over all filters in the list + for opt, values in search_opts.iteritems(): + try: + filter_func = filter_mapping[opt] + except KeyError: + # no such filter - ignore it, go to next filter + continue + else: + if filter_func(type_args, values): + # if one of conditions didn't match - remove + result[type_name] = type_args + break + vol_types = result + return vol_types def get_volume_type(context, id): -- cgit From 5ab769daf354033f5044c88d4f899d56effd998e Mon Sep 17 00:00:00 2001 From: Hisaharu Ishii Date: Wed, 24 Aug 2011 10:47:11 -0700 Subject: split test_modify() into specific unit tests --- nova/tests/test_nova_manage.py | 88 ++++++++++++++++++++---------------------- 1 file changed, 42 insertions(+), 46 deletions(-) (limited to 'nova') diff --git a/nova/tests/test_nova_manage.py b/nova/tests/test_nova_manage.py index ce731222c..c83d773f2 100644 --- a/nova/tests/test_nova_manage.py +++ b/nova/tests/test_nova_manage.py @@ -34,18 +34,15 @@ sys.dont_write_bytecode = False import mox import stubout -import netaddr import StringIO from nova import context from nova import db from nova import exception from nova import flags -from nova import log as logging from nova import test from nova.tests.db import fakes as db_fakes FLAGS = flags.FLAGS -LOG = logging.getLogger('nova.tests.nova_manage') class FixedIpCommandsTestCase(test.TestCase): @@ -112,6 +109,18 @@ class NetworkCommandsTestCase(test.TestCase): 'host': 'fake_host', 'uuid': 'aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa'} + def fake_network_get_by_cidr(context, cidr): + self.assertTrue(context.to_dict()['is_admin']) + self.assertEqual(cidr, self.fake_net['cidr']) + return db_fakes.FakeModel(self.fake_net) + + def fake_network_update(context, network_id, values): + self.assertTrue(context.to_dict()['is_admin']) + self.assertEqual(network_id, self.fake_net['id']) + self.assertEqual(values, self.fake_update_value) + self.fake_network_get_by_cidr = fake_network_get_by_cidr + self.fake_network_update = fake_network_update + def tearDown(self): super(NetworkCommandsTestCase, self).tearDown() self.stubs.UnsetAll() @@ -185,50 +194,37 @@ class NetworkCommandsTestCase(test.TestCase): self.assertEqual(result, answer) def test_delete(self): - net_dis = self.net - net_dis['project_id'] = None - net_dis['host'] = None - - def fake_network_get_by_cidr(context, cidr): - self.assertTrue(context.to_dict()['is_admin']) - self.assertEqual(cidr, net_dis['cidr']) - return db_fakes.FakeModel(net_dis) - self.stubs.Set(db, 'network_get_by_cidr', fake_network_get_by_cidr) + self.fake_net = self.net + self.fake_net['project_id'] = None + self.fake_net['host'] = None + self.stubs.Set(db, 'network_get_by_cidr', + self.fake_network_get_by_cidr) def fake_network_delete_safe(context, network_id): self.assertTrue(context.to_dict()['is_admin']) - self.assertEqual(network_id, net_dis['id']) + self.assertEqual(network_id, self.fake_net['id']) self.stubs.Set(db, 'network_delete_safe', fake_network_delete_safe) - self.commands.delete(fixed_range=net_dis['cidr']) - - def test_modify(self): - - def fake_network_get_by_cidr(context, cidr): - self.assertTrue(context.to_dict()['is_admin']) - self.assertEqual(cidr, self.net['cidr']) - return db_fakes.FakeModel(self.net) - self.stubs.Set(db, 'network_get_by_cidr', fake_network_get_by_cidr) - - def fake_network_update(context, network_id, values): - self.assertTrue(context.to_dict()['is_admin']) - self.assertEqual(network_id, self.net['id']) - self.assertEqual(values, {'project_id': 'test_project', - 'host': 'test_host'}) - self.stubs.Set(db, 'network_update', fake_network_update) - self.commands.modify(self.net['cidr'], project='test_project', - host='test_host') - - def fake_network_update(context, network_id, values): - self.assertTrue(context.to_dict()['is_admin']) - self.assertEqual(network_id, self.net['id']) - self.assertEqual(values, {}) - self.stubs.Set(db, 'network_update', fake_network_update) - self.commands.modify(self.net['cidr']) - - def fake_network_update(context, network_id, values): - self.assertTrue(context.to_dict()['is_admin']) - self.assertEqual(network_id, self.net['id']) - self.assertEqual(values, {'project_id': None, - 'host': None}) - self.stubs.Set(db, 'network_update', fake_network_update) - self.commands.modify(self.net['cidr'], dis_project=True, dis_host=True) + self.commands.delete(fixed_range=self.fake_net['cidr']) + + def _test_modify_base(self, update_value, project, host, dis_project=None, + dis_host=None): + self.fake_net = self.net + self.fake_update_value = update_value + self.stubs.Set(db, 'network_get_by_cidr', + self.fake_network_get_by_cidr) + self.stubs.Set(db, 'network_update', self.fake_network_update) + self.commands.modify(self.fake_net['cidr'], project=project, host=host, + dis_project=dis_project, dis_host=dis_host) + + def test_modify_associate(self): + self._test_modify_base(update_value={'project_id': 'test_project', + 'host': 'test_host'}, + project='test_project', host='test_host') + + def test_modify_unchanged(self): + self._test_modify_base(update_value={}, project=None, host=None) + + def test_modify_disassociate(self): + self._test_modify_base(update_value={'project_id': None, 'host': None}, + project=None, host=None, dis_project=True, + dis_host=True) -- cgit From e51c73a2d953103d4245cb8d456cad590be73ff5 Mon Sep 17 00:00:00 2001 From: Hisaharu Ishii Date: Wed, 24 Aug 2011 11:10:05 -0700 Subject: Removed unused imports --- nova/tests/test_nova_manage.py | 6 ------ 1 file changed, 6 deletions(-) (limited to 'nova') diff --git a/nova/tests/test_nova_manage.py b/nova/tests/test_nova_manage.py index c83d773f2..70efbf651 100644 --- a/nova/tests/test_nova_manage.py +++ b/nova/tests/test_nova_manage.py @@ -15,7 +15,6 @@ # License for the specific language governing permissions and limitations # under the License. -import gettext import os import sys @@ -25,8 +24,6 @@ TOPDIR = os.path.normpath(os.path.join( os.pardir)) NOVA_MANAGE_PATH = os.path.join(TOPDIR, 'bin', 'nova-manage') -gettext.install('nova', unicode=1) - sys.dont_write_bytecode = True import imp nova_manage = imp.load_source('nova_manage.py', NOVA_MANAGE_PATH) @@ -38,12 +35,9 @@ import StringIO from nova import context from nova import db from nova import exception -from nova import flags from nova import test from nova.tests.db import fakes as db_fakes -FLAGS = flags.FLAGS - class FixedIpCommandsTestCase(test.TestCase): def setUp(self): -- cgit From 156f2fb68981bde1a24233e2648d11890a98e34a Mon Sep 17 00:00:00 2001 From: Hisaharu Ishii Date: Wed, 24 Aug 2011 13:10:56 -0700 Subject: Add names to placeholders of formatting --- nova/tests/test_nova_manage.py | 40 +++++++++++++++++++++------------------- 1 file changed, 21 insertions(+), 19 deletions(-) (limited to 'nova') diff --git a/nova/tests/test_nova_manage.py b/nova/tests/test_nova_manage.py index 70efbf651..520bfbea1 100644 --- a/nova/tests/test_nova_manage.py +++ b/nova/tests/test_nova_manage.py @@ -165,25 +165,27 @@ class NetworkCommandsTestCase(test.TestCase): self.commands.list() sys.stdout = sys.__stdout__ result = output.getvalue() - _fmt = "%-5s\t%-18s\t%-15s\t%-15s\t%-15s\t%-15s\t%-15s\t%-15s\t%-15s" - head = _fmt % (_('id'), - _('IPv4'), - _('IPv6'), - _('start address'), - _('DNS1'), - _('DNS2'), - _('VlanID'), - _('project'), - _("uuid")) - body = _fmt % (self.net['id'], - self.net['cidr'], - self.net['cidr_v6'], - self.net['dhcp_start'], - self.net['dns1'], - self.net['dns2'], - self.net['vlan'], - self.net['project_id'], - self.net['uuid']) + _fmt = "%(id)-5s\t%(cidr)-18s\t%(cidr_v6)-15s\t%(dhcp_start)-15s\t" +\ + "%(dns1)-15s\t%(dns2)-15s\t%(vlan)-15s\t%(project_id)-15s\t" +\ + "%(uuid)-15s" + head = _fmt % {'id': _('id'), + 'cidr': _('IPv4'), + 'cidr_v6': _('IPv6'), + 'dhcp_start': _('start address'), + 'dns1': _('DNS1'), + 'dns2': _('DNS2'), + 'vlan': _('VlanID'), + 'project_id': _('project'), + 'uuid': _("uuid")} + body = _fmt % {'id': self.net['id'], + 'cidr': self.net['cidr'], + 'cidr_v6': self.net['cidr_v6'], + 'dhcp_start': self.net['dhcp_start'], + 'dns1': self.net['dns1'], + 'dns2': self.net['dns2'], + 'vlan': self.net['vlan'], + 'project_id': self.net['project_id'], + 'uuid': self.net['uuid']} answer = '%s\n%s\n' % (head, body) self.assertEqual(result, answer) -- cgit From 576dd4a0dce66c7949a1f66a6979d9e1d11916bf Mon Sep 17 00:00:00 2001 From: "vladimir.p" Date: Wed, 24 Aug 2011 14:07:17 -0700 Subject: added Openstack APIs for volume types & extradata --- nova/api/openstack/contrib/volumetypes.py | 197 +++++++++++++++++++++ nova/db/sqlalchemy/api.py | 2 +- nova/tests/api/openstack/test_volume_types.py | 171 ++++++++++++++++++ .../api/openstack/test_volume_types_extra_specs.py | 181 +++++++++++++++++++ 4 files changed, 550 insertions(+), 1 deletion(-) create mode 100644 nova/api/openstack/contrib/volumetypes.py create mode 100644 nova/tests/api/openstack/test_volume_types.py create mode 100644 nova/tests/api/openstack/test_volume_types_extra_specs.py (limited to 'nova') diff --git a/nova/api/openstack/contrib/volumetypes.py b/nova/api/openstack/contrib/volumetypes.py new file mode 100644 index 000000000..ed33a8819 --- /dev/null +++ b/nova/api/openstack/contrib/volumetypes.py @@ -0,0 +1,197 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright (c) 2011 Zadara Storage Inc. +# Copyright (c) 2011 OpenStack LLC. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" The volume type & volume types extra specs extension""" + +from webob import exc + +from nova import db +from nova import exception +from nova import quota +from nova.volume import volume_types +from nova.api.openstack import extensions +from nova.api.openstack import faults +from nova.api.openstack import wsgi + + +class VolumeTypesController(object): + """ The volume types API controller for the Openstack API """ + + def index(self, req): + """ Returns the list of volume types """ + context = req.environ['nova.context'] + return volume_types.get_all_types(context) + + def create(self, req, body): + """Creates a new volume type.""" + context = req.environ['nova.context'] + + if not body or body == "": + return faults.Fault(exc.HTTPUnprocessableEntity()) + + vol_type = body.get('volume_type', None) + if vol_type is None or vol_type == "": + return faults.Fault(exc.HTTPUnprocessableEntity()) + + name = vol_type.get('name', None) + specs = vol_type.get('extra_specs', {}) + + if name is None or name == "": + return faults.Fault(exc.HTTPUnprocessableEntity()) + + try: + volume_types.create(context, name, specs) + vol_type = volume_types.get_volume_type_by_name(context, name) + except quota.QuotaError as error: + self._handle_quota_error(error) + except exception.NotFound: + return faults.Fault(exc.HTTPNotFound()) + + return {'volume_type': vol_type} + + def show(self, req, id): + """ Return a single volume type item """ + context = req.environ['nova.context'] + + try: + vol_type = volume_types.get_volume_type(context, id) + except exception.NotFound or exception.ApiError: + return faults.Fault(exc.HTTPNotFound()) + + return {'volume_type': vol_type} + + def delete(self, req, id): + """ Deletes an existing volume type """ + context = req.environ['nova.context'] + + try: + vol_type = volume_types.get_volume_type(context, id) + volume_types.destroy(context, vol_type['name']) + except exception.NotFound: + return faults.Fault(exc.HTTPNotFound()) + + def _handle_quota_error(self, error): + """Reraise quota errors as api-specific http exceptions.""" + if error.code == "MetadataLimitExceeded": + raise exc.HTTPBadRequest(explanation=error.message) + raise error + + +class VolumeTypeExtraSpecsController(object): + """ The volume type extra specs API controller for the Openstack API """ + + def _get_extra_specs(self, context, vol_type_id): + extra_specs = db.api.volume_type_extra_specs_get(context, vol_type_id) + specs_dict = {} + for key, value in extra_specs.iteritems(): + specs_dict[key] = value + return dict(extra_specs=specs_dict) + + def _check_body(self, body): + if body == None or body == "": + expl = _('No Request Body') + raise exc.HTTPBadRequest(explanation=expl) + + def index(self, req, vol_type_id): + """ Returns the list of extra specs for a given volume type """ + context = req.environ['nova.context'] + return self._get_extra_specs(context, vol_type_id) + + def create(self, req, vol_type_id, body): + self._check_body(body) + context = req.environ['nova.context'] + specs = body.get('extra_specs') + try: + db.api.volume_type_extra_specs_update_or_create(context, + vol_type_id, + specs) + except quota.QuotaError as error: + self._handle_quota_error(error) + return body + + def update(self, req, vol_type_id, id, body): + self._check_body(body) + context = req.environ['nova.context'] + if not id in body: + expl = _('Request body and URI mismatch') + raise exc.HTTPBadRequest(explanation=expl) + if len(body) > 1: + expl = _('Request body contains too many items') + raise exc.HTTPBadRequest(explanation=expl) + try: + db.api.volume_type_extra_specs_update_or_create(context, + vol_type_id, + body) + except quota.QuotaError as error: + self._handle_quota_error(error) + + return body + + def show(self, req, vol_type_id, id): + """ Return a single extra spec item """ + context = req.environ['nova.context'] + specs = self._get_extra_specs(context, vol_type_id) + if id in specs['extra_specs']: + return {id: specs['extra_specs'][id]} + else: + return faults.Fault(exc.HTTPNotFound()) + + def delete(self, req, vol_type_id, id): + """ Deletes an existing extra spec """ + context = req.environ['nova.context'] + db.api.volume_type_extra_specs_delete(context, vol_type_id, id) + + def _handle_quota_error(self, error): + """Reraise quota errors as api-specific http exceptions.""" + if error.code == "MetadataLimitExceeded": + raise exc.HTTPBadRequest(explanation=error.message) + raise error + + +class Volumetypes(extensions.ExtensionDescriptor): + + def get_name(self): + return "VolumeTypes" + + def get_alias(self): + return "os-volume-types" + + def get_description(self): + return "Volume types support" + + def get_namespace(self): + return \ + "http://docs.openstack.org/ext/volume_types/api/v1.1" + + def get_updated(self): + return "2011-08-24T00:00:00+00:00" + + def get_resources(self): + resources = [] + res = extensions.ResourceExtension( + 'os-volume-types', + VolumeTypesController()) + resources.append(res) + + res = extensions.ResourceExtension('extra_specs', + VolumeTypeExtraSpecsController(), + parent=dict( + member_name='vol_type', + collection_name='os-volume-types')) + resources.append(res) + + return resources diff --git a/nova/db/sqlalchemy/api.py b/nova/db/sqlalchemy/api.py index 4786f539f..d1fbf8cab 100644 --- a/nova/db/sqlalchemy/api.py +++ b/nova/db/sqlalchemy/api.py @@ -3823,7 +3823,7 @@ def volume_type_extra_specs_get_item(context, volume_type_id, key, @require_context def volume_type_extra_specs_update_or_create(context, volume_type_id, - specs): + specs): session = get_session() spec_ref = None for key, value in specs.iteritems(): diff --git a/nova/tests/api/openstack/test_volume_types.py b/nova/tests/api/openstack/test_volume_types.py new file mode 100644 index 000000000..192e66854 --- /dev/null +++ b/nova/tests/api/openstack/test_volume_types.py @@ -0,0 +1,171 @@ +# Copyright 2011 OpenStack LLC. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import json +import stubout +import webob + +from nova import exception +from nova import context +from nova import test +from nova import log as logging +from nova.volume import volume_types +from nova.tests.api.openstack import fakes + +LOG = logging.getLogger('nova.tests.api.openstack.test_volume_types') + +last_param = {} + + +def stub_volume_type(id): + specs = { + "key1": "value1", + "key2": "value2", + "key3": "value3", + "key4": "value4", + "key5": "value5"} + return dict(id=id, name='vol_type_%s' % str(id), extra_specs=specs) + + +def return_volume_types_get_all_types(context): + return dict(vol_type_1=stub_volume_type(1), + vol_type_2=stub_volume_type(2), + vol_type_3=stub_volume_type(3)) + + +def return_empty_volume_types_get_all_types(context): + return {} + + +def return_volume_types_get_volume_type(context, id): + if id == "777": + raise exception.VolumeTypeNotFound(volume_type_id=id) + return stub_volume_type(int(id)) + + +def return_volume_types_destroy(context, name): + if name == "777": + raise exception.VolumeTypeNotFoundByName(volume_type_name=name) + pass + + +def return_volume_types_create(context, name, specs): + pass + + +def return_volume_types_get_by_name(context, name): + if name == "777": + raise exception.VolumeTypeNotFoundByName(volume_type_name=name) + return stub_volume_type(int(name.split("_")[2])) + + +class VolumeTypesApiTest(test.TestCase): + def setUp(self): + super(VolumeTypesApiTest, self).setUp() + fakes.stub_out_key_pair_funcs(self.stubs) + + def tearDown(self): + self.stubs.UnsetAll() + super(VolumeTypesApiTest, self).tearDown() + + def test_volume_types_index(self): + self.stubs.Set(volume_types, 'get_all_types', + return_volume_types_get_all_types) + req = webob.Request.blank('/v1.1/123/os-volume-types') + res = req.get_response(fakes.wsgi_app()) + self.assertEqual(200, res.status_int) + res_dict = json.loads(res.body) + self.assertEqual('application/json', res.headers['Content-Type']) + + self.assertEqual(3, len(res_dict)) + for name in ['vol_type_1', 'vol_type_2', 'vol_type_3']: + self.assertEqual(name, res_dict[name]['name']) + self.assertEqual('value1', res_dict[name]['extra_specs']['key1']) + + def test_volume_types_index_no_data(self): + self.stubs.Set(volume_types, 'get_all_types', + return_empty_volume_types_get_all_types) + req = webob.Request.blank('/v1.1/123/os-volume-types') + res = req.get_response(fakes.wsgi_app()) + res_dict = json.loads(res.body) + self.assertEqual(200, res.status_int) + self.assertEqual('application/json', res.headers['Content-Type']) + self.assertEqual(0, len(res_dict)) + + def test_volume_types_show(self): + self.stubs.Set(volume_types, 'get_volume_type', + return_volume_types_get_volume_type) + req = webob.Request.blank('/v1.1/123/os-volume-types/1') + res = req.get_response(fakes.wsgi_app()) + self.assertEqual(200, res.status_int) + res_dict = json.loads(res.body) + self.assertEqual('application/json', res.headers['Content-Type']) + self.assertEqual(1, len(res_dict)) + self.assertEqual('vol_type_1', res_dict['volume_type']['name']) + + def test_volume_types_show_not_found(self): + self.stubs.Set(volume_types, 'get_volume_type', + return_volume_types_get_volume_type) + req = webob.Request.blank('/v1.1/123/os-volume-types/777') + res = req.get_response(fakes.wsgi_app()) + self.assertEqual(404, res.status_int) + + def test_volume_types_delete(self): + self.stubs.Set(volume_types, 'get_volume_type', + return_volume_types_get_volume_type) + self.stubs.Set(volume_types, 'destroy', + return_volume_types_destroy) + req = webob.Request.blank('/v1.1/123/os-volume-types/1') + req.method = 'DELETE' + res = req.get_response(fakes.wsgi_app()) + self.assertEqual(200, res.status_int) + + def test_volume_types_delete_not_found(self): + self.stubs.Set(volume_types, 'get_volume_type', + return_volume_types_get_volume_type) + self.stubs.Set(volume_types, 'destroy', + return_volume_types_destroy) + req = webob.Request.blank('/v1.1/123/os-volume-types/777') + req.method = 'DELETE' + res = req.get_response(fakes.wsgi_app()) + self.assertEqual(404, res.status_int) + + def test_create(self): + self.stubs.Set(volume_types, 'create', + return_volume_types_create) + self.stubs.Set(volume_types, 'get_volume_type_by_name', + return_volume_types_get_by_name) + req = webob.Request.blank('/v1.1/123/os-volume-types') + req.method = 'POST' + req.body = '{"volume_type": {"name": "vol_type_1", '\ + '"extra_specs": {"key1": "value1"}}}' + req.headers["content-type"] = "application/json" + res = req.get_response(fakes.wsgi_app()) + self.assertEqual(200, res.status_int) + res_dict = json.loads(res.body) + self.assertEqual('application/json', res.headers['Content-Type']) + self.assertEqual(1, len(res_dict)) + self.assertEqual('vol_type_1', res_dict['volume_type']['name']) + + def test_create_empty_body(self): + self.stubs.Set(volume_types, 'create', + return_volume_types_create) + self.stubs.Set(volume_types, 'get_volume_type_by_name', + return_volume_types_get_by_name) + req = webob.Request.blank('/v1.1/123/os-volume-types') + req.method = 'POST' + req.headers["content-type"] = "application/json" + res = req.get_response(fakes.wsgi_app()) + self.assertEqual(400, res.status_int) diff --git a/nova/tests/api/openstack/test_volume_types_extra_specs.py b/nova/tests/api/openstack/test_volume_types_extra_specs.py new file mode 100644 index 000000000..34bdada22 --- /dev/null +++ b/nova/tests/api/openstack/test_volume_types_extra_specs.py @@ -0,0 +1,181 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright (c) 2011 Zadara Storage Inc. +# Copyright (c) 2011 OpenStack LLC. +# Copyright 2011 University of Southern California +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import json +import stubout +import webob +import os.path + + +from nova import test +from nova.api import openstack +from nova.api.openstack import extensions +from nova.tests.api.openstack import fakes +import nova.wsgi + + +def return_create_volume_type_extra_specs(context, volume_type_id, + extra_specs): + return stub_volume_type_extra_specs() + + +def return_volume_type_extra_specs(context, volume_type_id): + return stub_volume_type_extra_specs() + + +def return_empty_volume_type_extra_specs(context, volume_type_id): + return {} + + +def delete_volume_type_extra_specs(context, volume_type_id, key): + pass + + +def stub_volume_type_extra_specs(): + specs = { + "key1": "value1", + "key2": "value2", + "key3": "value3", + "key4": "value4", + "key5": "value5"} + return specs + + +class VolumeTypesExtraSpecsTest(test.TestCase): + + def setUp(self): + super(VolumeTypesExtraSpecsTest, self).setUp() + fakes.stub_out_key_pair_funcs(self.stubs) + self.api_path = '/v1.1/123/os-volume-types/1/extra_specs' + + def test_index(self): + self.stubs.Set(nova.db.api, 'volume_type_extra_specs_get', + return_volume_type_extra_specs) + request = webob.Request.blank(self.api_path) + res = request.get_response(fakes.wsgi_app()) + self.assertEqual(200, res.status_int) + res_dict = json.loads(res.body) + self.assertEqual('application/json', res.headers['Content-Type']) + self.assertEqual('value1', res_dict['extra_specs']['key1']) + + def test_index_no_data(self): + self.stubs.Set(nova.db.api, 'volume_type_extra_specs_get', + return_empty_volume_type_extra_specs) + req = webob.Request.blank(self.api_path) + res = req.get_response(fakes.wsgi_app()) + res_dict = json.loads(res.body) + self.assertEqual(200, res.status_int) + self.assertEqual('application/json', res.headers['Content-Type']) + self.assertEqual(0, len(res_dict['extra_specs'])) + + def test_show(self): + self.stubs.Set(nova.db.api, 'volume_type_extra_specs_get', + return_volume_type_extra_specs) + req = webob.Request.blank(self.api_path + '/key5') + res = req.get_response(fakes.wsgi_app()) + self.assertEqual(200, res.status_int) + res_dict = json.loads(res.body) + self.assertEqual('application/json', res.headers['Content-Type']) + self.assertEqual('value5', res_dict['key5']) + + def test_show_spec_not_found(self): + self.stubs.Set(nova.db.api, 'volume_type_extra_specs_get', + return_empty_volume_type_extra_specs) + req = webob.Request.blank(self.api_path + '/key6') + res = req.get_response(fakes.wsgi_app()) + res_dict = json.loads(res.body) + self.assertEqual(404, res.status_int) + + def test_delete(self): + self.stubs.Set(nova.db.api, 'volume_type_extra_specs_delete', + delete_volume_type_extra_specs) + req = webob.Request.blank(self.api_path + '/key5') + req.method = 'DELETE' + res = req.get_response(fakes.wsgi_app()) + self.assertEqual(200, res.status_int) + + def test_create(self): + self.stubs.Set(nova.db.api, + 'volume_type_extra_specs_update_or_create', + return_create_volume_type_extra_specs) + req = webob.Request.blank(self.api_path) + req.method = 'POST' + req.body = '{"extra_specs": {"key1": "value1"}}' + req.headers["content-type"] = "application/json" + res = req.get_response(fakes.wsgi_app()) + res_dict = json.loads(res.body) + self.assertEqual(200, res.status_int) + self.assertEqual('application/json', res.headers['Content-Type']) + self.assertEqual('value1', res_dict['extra_specs']['key1']) + + def test_create_empty_body(self): + self.stubs.Set(nova.db.api, + 'volume_type_extra_specs_update_or_create', + return_create_volume_type_extra_specs) + req = webob.Request.blank(self.api_path) + req.method = 'POST' + req.headers["content-type"] = "application/json" + res = req.get_response(fakes.wsgi_app()) + self.assertEqual(400, res.status_int) + + def test_update_item(self): + self.stubs.Set(nova.db.api, + 'volume_type_extra_specs_update_or_create', + return_create_volume_type_extra_specs) + req = webob.Request.blank(self.api_path + '/key1') + req.method = 'PUT' + req.body = '{"key1": "value1"}' + req.headers["content-type"] = "application/json" + res = req.get_response(fakes.wsgi_app()) + self.assertEqual(200, res.status_int) + self.assertEqual('application/json', res.headers['Content-Type']) + res_dict = json.loads(res.body) + self.assertEqual('value1', res_dict['key1']) + + def test_update_item_empty_body(self): + self.stubs.Set(nova.db.api, + 'volume_type_extra_specs_update_or_create', + return_create_volume_type_extra_specs) + req = webob.Request.blank(self.api_path + '/key1') + req.method = 'PUT' + req.headers["content-type"] = "application/json" + res = req.get_response(fakes.wsgi_app()) + self.assertEqual(400, res.status_int) + + def test_update_item_too_many_keys(self): + self.stubs.Set(nova.db.api, + 'volume_type_extra_specs_update_or_create', + return_create_volume_type_extra_specs) + req = webob.Request.blank(self.api_path + '/key1') + req.method = 'PUT' + req.body = '{"key1": "value1", "key2": "value2"}' + req.headers["content-type"] = "application/json" + res = req.get_response(fakes.wsgi_app()) + self.assertEqual(400, res.status_int) + + def test_update_item_body_uri_mismatch(self): + self.stubs.Set(nova.db.api, + 'volume_type_extra_specs_update_or_create', + return_create_volume_type_extra_specs) + req = webob.Request.blank(self.api_path + '/bad') + req.method = 'PUT' + req.body = '{"key1": "value1"}' + req.headers["content-type"] = "application/json" + res = req.get_response(fakes.wsgi_app()) + self.assertEqual(400, res.status_int) -- cgit From 96f85f94f23c9eeac3f43e122d2992b6d0938827 Mon Sep 17 00:00:00 2001 From: "vladimir.p" Date: Wed, 24 Aug 2011 14:30:40 -0700 Subject: forgot to add new extension to test_extensions --- nova/tests/api/openstack/test_extensions.py | 1 + 1 file changed, 1 insertion(+) (limited to 'nova') diff --git a/nova/tests/api/openstack/test_extensions.py b/nova/tests/api/openstack/test_extensions.py index 9f923852d..c78588d65 100644 --- a/nova/tests/api/openstack/test_extensions.py +++ b/nova/tests/api/openstack/test_extensions.py @@ -97,6 +97,7 @@ class ExtensionControllerTest(test.TestCase): "SecurityGroups", "VirtualInterfaces", "Volumes", + "VolumeTypes", ] self.ext_list.sort() -- cgit From e30d2c372cc36d7e9a6cf3af5016834b499a7b42 Mon Sep 17 00:00:00 2001 From: Ken Pepple Date: Wed, 24 Aug 2011 15:18:17 -0700 Subject: fixing inappropriate rubyism in test code --- nova/tests/test_instance_types.py | 32 ++++++++++++++++---------------- 1 file changed, 16 insertions(+), 16 deletions(-) (limited to 'nova') diff --git a/nova/tests/test_instance_types.py b/nova/tests/test_instance_types.py index 556ba91a9..409a22fb6 100644 --- a/nova/tests/test_instance_types.py +++ b/nova/tests/test_instance_types.py @@ -47,24 +47,24 @@ class InstanceTypeTestCase(test.TestCase): self.id = max_id["id"] + 1 self.name = str(int(time.time())) - def _nonexistant_flavor_name(self): + def _nonexistent_flavor_name(self): """return an instance type name not in the DB""" - nonexistant_flavor = "sdfsfsdf" + nonexistent_flavor = "sdfsfsdf" flavors = instance_types.get_all_types() - while nonexistant_flavor in flavors: - nonexistant_flavor = nonexistant_flavor.join("z") + while nonexistent_flavor in flavors: + nonexistent_flavor += "z" else: - return nonexistant_flavor + return nonexistent_flavor - def _nonexistant_flavor_id(self): + def _nonexistent_flavor_id(self): """return an instance type ID not in the DB""" - nonexistant_flavor = 2700 + nonexistent_flavor = 2700 flavor_ids = [value["id"] for key, value in\ instance_types.get_all_types().iteritems()] - while nonexistant_flavor in flavor_ids: - nonexistant_flavor += 1 + while nonexistent_flavor in flavor_ids: + nonexistent_flavor += 1 else: - return nonexistant_flavor + return nonexistent_flavor def _existing_flavor(self): """return first instance type name""" @@ -111,7 +111,7 @@ class InstanceTypeTestCase(test.TestCase): """Ensures that instance type creation fails with invalid args""" self.assertRaises(exception.ApiError, instance_types.destroy, - self._nonexistant_flavor_name()) + self._nonexistent_flavor_name()) def test_repeated_inst_types_should_raise_api_error(self): """Ensures that instance duplicates raises ApiError""" @@ -126,7 +126,7 @@ class InstanceTypeTestCase(test.TestCase): """Ensure destroy sad path of no name raises error""" self.assertRaises(exception.ApiError, instance_types.destroy, - self._nonexistant_flavor_name()) + self._nonexistent_flavor_name()) def test_will_not_purge_without_name(self): """Ensure purge without a name raises error""" @@ -137,11 +137,11 @@ class InstanceTypeTestCase(test.TestCase): """Ensure purge without correct name raises error""" self.assertRaises(exception.ApiError, instance_types.purge, - self._nonexistant_flavor_name()) + self._nonexistent_flavor_name()) def test_will_not_get_bad_default_instance_type(self): """ensures error raised on bad default instance type""" - FLAGS.default_instance_type = self._nonexistant_flavor_name() + FLAGS.default_instance_type = self._nonexistent_flavor_name() self.assertRaises(exception.InstanceTypeNotFoundByName, instance_types.get_default_instance_type) @@ -154,10 +154,10 @@ class InstanceTypeTestCase(test.TestCase): """Ensure get by name returns default flavor with bad name""" self.assertRaises(exception.InstanceTypeNotFound, instance_types.get_instance_type, - self._nonexistant_flavor_name()) + self._nonexistent_flavor_name()) def test_will_not_get_flavor_by_bad_flavor_id(self): """Ensure get by flavor raises error with wrong flavorid""" self.assertRaises(exception.InstanceTypeNotFound, instance_types.get_instance_type_by_name, - self._nonexistant_flavor_id()) + self._nonexistent_flavor_id()) -- cgit From e356009b3ab374f073c2ff2cdb30d78bf432670d Mon Sep 17 00:00:00 2001 From: "vladimir.p" Date: Wed, 24 Aug 2011 15:20:37 -0700 Subject: moved import to the top --- nova/log.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'nova') diff --git a/nova/log.py b/nova/log.py index 3b86d78e8..b1dfd9ed2 100644 --- a/nova/log.py +++ b/nova/log.py @@ -34,6 +34,7 @@ import logging.handlers import os import sys import traceback +import stat import nova from nova import flags @@ -258,7 +259,6 @@ class NovaRootLogger(NovaLogger): self.addHandler(self.filelog) self.logpath = logpath - import stat st = os.stat(self.logpath) if st.st_mode != (stat.S_IFREG | FLAGS.logfile_mode): os.chmod(self.logpath, FLAGS.logfile_mode) -- cgit From 48cd9689de31e408c792052747f714a9dbe1f8f7 Mon Sep 17 00:00:00 2001 From: "vladimir.p" Date: Wed, 24 Aug 2011 15:51:29 -0700 Subject: added virtio flag; associate address for VSA; cosmetic changes. Prior to volume_types merge --- nova/api/openstack/contrib/drive_types.py | 1 - .../openstack/contrib/virtual_storage_arrays.py | 49 +++++++++++++++++- .../migrate_repo/versions/037_add_vsa_data.py | 1 - nova/db/sqlalchemy/session.py | 2 - nova/network/linux_net.py | 1 + nova/scheduler/vsa.py | 1 - nova/tests/test_drive_types.py | 59 +++++++++++----------- nova/tests/test_vsa.py | 2 +- nova/virt/libvirt.xml.template | 4 +- nova/virt/libvirt/connection.py | 4 ++ nova/vsa/__init__.py | 1 - nova/vsa/api.py | 7 ++- nova/vsa/connection.py | 1 - nova/vsa/drive_types.py | 1 - nova/vsa/fake.py | 1 - nova/vsa/manager.py | 1 - 16 files changed, 88 insertions(+), 48 deletions(-) (limited to 'nova') diff --git a/nova/api/openstack/contrib/drive_types.py b/nova/api/openstack/contrib/drive_types.py index f2cbd3715..1aa65374f 100644 --- a/nova/api/openstack/contrib/drive_types.py +++ b/nova/api/openstack/contrib/drive_types.py @@ -2,7 +2,6 @@ # Copyright (c) 2011 Zadara Storage Inc. # Copyright (c) 2011 OpenStack LLC. -# All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain diff --git a/nova/api/openstack/contrib/virtual_storage_arrays.py b/nova/api/openstack/contrib/virtual_storage_arrays.py index d6c4a5ef4..81dbc9e1f 100644 --- a/nova/api/openstack/contrib/virtual_storage_arrays.py +++ b/nova/api/openstack/contrib/virtual_storage_arrays.py @@ -2,7 +2,6 @@ # Copyright (c) 2011 Zadara Storage Inc. # Copyright (c) 2011 OpenStack LLC. -# All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain @@ -24,6 +23,7 @@ from webob import exc from nova import vsa from nova import volume from nova import compute +from nova import network from nova import db from nova import quota from nova import exception @@ -103,6 +103,7 @@ class VsaController(object): def __init__(self): self.vsa_api = vsa.API() self.compute_api = compute.API() + self.network_api = network.API() super(VsaController, self).__init__() def _items(self, req, details): @@ -186,6 +187,48 @@ class VsaController(object): except exception.NotFound: return faults.Fault(exc.HTTPNotFound()) + def associate_address(self, req, id, body): + """ /zadr-vsa/{vsa_id}/associate_address + auto or manually associate an IP to VSA + """ + context = req.environ['nova.context'] + + if body is None: + ip = 'auto' + else: + ip = body.get('ipAddress', 'auto') + + LOG.audit(_("Associate address %(ip)s to VSA %(id)s"), + locals(), context=context) + + try: + instances = self.compute_api.get_all(context, + search_opts={'metadata': dict(vsa_id=str(id))}) + + if instances is None or len(instances)==0: + return faults.Fault(exc.HTTPNotFound()) + + for instance in instances: + self.network_api.allocate_for_instance(context, instance, vpn=False) + return + + except exception.NotFound: + return faults.Fault(exc.HTTPNotFound()) + + def disassociate_address(self, req, id, body): + """ /zadr-vsa/{vsa_id}/disassociate_address + auto or manually associate an IP to VSA + """ + context = req.environ['nova.context'] + + if body is None: + ip = 'auto' + else: + ip = body.get('ipAddress', 'auto') + + LOG.audit(_("Disassociate address from VSA %(id)s"), + locals(), context=context) + class VsaVolumeDriveController(volumes.VolumeController): """The base class for VSA volumes & drives. @@ -515,7 +558,9 @@ class Virtual_storage_arrays(extensions.ExtensionDescriptor): VsaController(), collection_actions={'detail': 'GET'}, member_actions={'add_capacity': 'POST', - 'remove_capacity': 'POST'}) + 'remove_capacity': 'POST', + 'associate_address': 'POST', + 'disassociate_address': 'POST'}) resources.append(res) res = extensions.ResourceExtension('volumes', diff --git a/nova/db/sqlalchemy/migrate_repo/versions/037_add_vsa_data.py b/nova/db/sqlalchemy/migrate_repo/versions/037_add_vsa_data.py index 5a80f4e7a..8a57bd234 100644 --- a/nova/db/sqlalchemy/migrate_repo/versions/037_add_vsa_data.py +++ b/nova/db/sqlalchemy/migrate_repo/versions/037_add_vsa_data.py @@ -2,7 +2,6 @@ # Copyright (c) 2011 Zadara Storage Inc. # Copyright (c) 2011 OpenStack LLC. -# All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain diff --git a/nova/db/sqlalchemy/session.py b/nova/db/sqlalchemy/session.py index 07f281938..c678cb543 100644 --- a/nova/db/sqlalchemy/session.py +++ b/nova/db/sqlalchemy/session.py @@ -30,11 +30,9 @@ import nova.exception import nova.flags import nova.log - FLAGS = nova.flags.FLAGS LOG = nova.log.getLogger("nova.db.sqlalchemy") - try: import MySQLdb except ImportError: diff --git a/nova/network/linux_net.py b/nova/network/linux_net.py index 57c1d0c28..3de605ae2 100644 --- a/nova/network/linux_net.py +++ b/nova/network/linux_net.py @@ -508,6 +508,7 @@ def get_dhcp_hosts(context, network_ref): if network_ref['multi_host'] and FLAGS.host != host: continue hosts.append(_host_dhcp(fixed_ref)) + return '\n'.join(hosts) diff --git a/nova/scheduler/vsa.py b/nova/scheduler/vsa.py index 10c9b5a02..218ad5c7b 100644 --- a/nova/scheduler/vsa.py +++ b/nova/scheduler/vsa.py @@ -2,7 +2,6 @@ # Copyright (c) 2011 Zadara Storage Inc. # Copyright (c) 2011 OpenStack LLC. -# All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain diff --git a/nova/tests/test_drive_types.py b/nova/tests/test_drive_types.py index e91c41321..b52e6705b 100644 --- a/nova/tests/test_drive_types.py +++ b/nova/tests/test_drive_types.py @@ -2,7 +2,6 @@ # Copyright (c) 2011 Zadara Storage Inc. # Copyright (c) 2011 OpenStack LLC. -# All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain @@ -28,21 +27,21 @@ from nova import test from nova.vsa import drive_types FLAGS = flags.FLAGS -LOG = logging.getLogger('nova.tests.vsa') +LOG = logging.getLogger('nova.tests.test_drive_types') class DriveTypesTestCase(test.TestCase): """Test cases for driver types code""" def setUp(self): super(DriveTypesTestCase, self).setUp() - self.cntx = context.RequestContext(None, None) - self.cntx_admin = context.get_admin_context() - self._dtype = self._create_drive_type() + self.ctxt = context.RequestContext(None, None) + self.ctxt_admin = context.get_admin_context() + self._dtype = self._create_default_drive_type() def tearDown(self): self._dtype = None - def _create_drive_type(self): + def _create_default_drive_type(self): """Create a volume object.""" dtype = {} dtype['type'] = 'SATA' @@ -51,97 +50,97 @@ class DriveTypesTestCase(test.TestCase): dtype['capabilities'] = None dtype['visible'] = True - LOG.debug(_("Drive Type created %s"), dtype) + LOG.debug(_("Default values for Drive Type: %s"), dtype) return dtype def test_drive_type_create_delete(self): dtype = self._dtype - prev_all_dtypes = drive_types.get_all(self.cntx_admin, False) + prev_all_dtypes = drive_types.get_all(self.ctxt_admin, False) - new = drive_types.create(self.cntx_admin, **dtype) + new = drive_types.create(self.ctxt_admin, **dtype) for k, v in dtype.iteritems(): self.assertEqual(v, new[k], 'one of fields doesnt match') - new_all_dtypes = drive_types.get_all(self.cntx_admin, False) + new_all_dtypes = drive_types.get_all(self.ctxt_admin, False) self.assertNotEqual(len(prev_all_dtypes), len(new_all_dtypes), 'drive type was not created') - drive_types.delete(self.cntx_admin, new['id']) - new_all_dtypes = drive_types.get_all(self.cntx_admin, False) + drive_types.delete(self.ctxt_admin, new['id']) + new_all_dtypes = drive_types.get_all(self.ctxt_admin, False) self.assertEqual(prev_all_dtypes, new_all_dtypes, 'drive types was not deleted') def test_drive_type_check_name_generation(self): dtype = self._dtype - new = drive_types.create(self.cntx_admin, **dtype) + new = drive_types.create(self.ctxt_admin, **dtype) expected_name = FLAGS.drive_type_template_short % \ (dtype['type'], dtype['size_gb'], dtype['rpm']) self.assertEqual(new['name'], expected_name, 'name was not generated correctly') dtype['capabilities'] = 'SEC' - new2 = drive_types.create(self.cntx_admin, **dtype) + new2 = drive_types.create(self.ctxt_admin, **dtype) expected_name = FLAGS.drive_type_template_long % \ (dtype['type'], dtype['size_gb'], dtype['rpm'], dtype['capabilities']) self.assertEqual(new2['name'], expected_name, 'name was not generated correctly') - drive_types.delete(self.cntx_admin, new['id']) - drive_types.delete(self.cntx_admin, new2['id']) + drive_types.delete(self.ctxt_admin, new['id']) + drive_types.delete(self.ctxt_admin, new2['id']) def test_drive_type_create_delete_invisible(self): dtype = self._dtype dtype['visible'] = False - prev_all_dtypes = drive_types.get_all(self.cntx_admin, True) - new = drive_types.create(self.cntx_admin, **dtype) + prev_all_dtypes = drive_types.get_all(self.ctxt_admin, True) + new = drive_types.create(self.ctxt_admin, **dtype) - new_all_dtypes = drive_types.get_all(self.cntx_admin, True) + new_all_dtypes = drive_types.get_all(self.ctxt_admin, True) self.assertEqual(prev_all_dtypes, new_all_dtypes) - new_all_dtypes = drive_types.get_all(self.cntx_admin, False) + new_all_dtypes = drive_types.get_all(self.ctxt_admin, False) self.assertNotEqual(prev_all_dtypes, new_all_dtypes) - drive_types.delete(self.cntx_admin, new['id']) + drive_types.delete(self.ctxt_admin, new['id']) def test_drive_type_rename_update(self): dtype = self._dtype dtype['capabilities'] = None - new = drive_types.create(self.cntx_admin, **dtype) + new = drive_types.create(self.ctxt_admin, **dtype) for k, v in dtype.iteritems(): self.assertEqual(v, new[k], 'one of fields doesnt match') new_name = 'NEW_DRIVE_NAME' - new = drive_types.rename(self.cntx_admin, new['name'], new_name) + new = drive_types.rename(self.ctxt_admin, new['name'], new_name) self.assertEqual(new['name'], new_name) - new = drive_types.rename(self.cntx_admin, new_name) + new = drive_types.rename(self.ctxt_admin, new_name) expected_name = FLAGS.drive_type_template_short % \ (dtype['type'], dtype['size_gb'], dtype['rpm']) self.assertEqual(new['name'], expected_name) changes = {'rpm': 7200} - new = drive_types.update(self.cntx_admin, new['id'], **changes) + new = drive_types.update(self.ctxt_admin, new['id'], **changes) for k, v in changes.iteritems(): self.assertEqual(v, new[k], 'one of fields doesnt match') - drive_types.delete(self.cntx_admin, new['id']) + drive_types.delete(self.ctxt_admin, new['id']) def test_drive_type_get(self): dtype = self._dtype - new = drive_types.create(self.cntx_admin, **dtype) + new = drive_types.create(self.ctxt_admin, **dtype) - new2 = drive_types.get(self.cntx_admin, new['id']) + new2 = drive_types.get(self.ctxt_admin, new['id']) for k, v in new2.iteritems(): self.assertEqual(str(new[k]), str(new2[k]), 'one of fields doesnt match') - new2 = drive_types.get_by_name(self.cntx_admin, new['name']) + new2 = drive_types.get_by_name(self.ctxt_admin, new['name']) for k, v in new.iteritems(): self.assertEqual(str(new[k]), str(new2[k]), 'one of fields doesnt match') - drive_types.delete(self.cntx_admin, new['id']) + drive_types.delete(self.ctxt_admin, new['id']) diff --git a/nova/tests/test_vsa.py b/nova/tests/test_vsa.py index cff23a800..726939744 100644 --- a/nova/tests/test_vsa.py +++ b/nova/tests/test_vsa.py @@ -113,7 +113,7 @@ class VsaTestCase(test.TestCase): self.assertRaises(exception.ApiError, self.vsa_api.create, self.context, **param) vsa_list2 = self.vsa_api.get_all(self.context) - self.assertEqual(len(vsa_list2), len(vsa_list1) + 1) + self.assertEqual(len(vsa_list2), len(vsa_list1)) param = {'storage': [{'drive_name': 'wrong name'}]} self.assertRaises(exception.ApiError, diff --git a/nova/virt/libvirt.xml.template b/nova/virt/libvirt.xml.template index 210e2b0fb..0b241120b 100644 --- a/nova/virt/libvirt.xml.template +++ b/nova/virt/libvirt.xml.template @@ -128,7 +128,9 @@ - +#if $getVar('use_virtio_for_bridges', True) + +#end if diff --git a/nova/virt/libvirt/connection.py b/nova/virt/libvirt/connection.py index e8a657bac..fb16aa57d 100644 --- a/nova/virt/libvirt/connection.py +++ b/nova/virt/libvirt/connection.py @@ -130,6 +130,9 @@ flags.DEFINE_string('libvirt_vif_type', 'bridge', flags.DEFINE_string('libvirt_vif_driver', 'nova.virt.libvirt.vif.LibvirtBridgeDriver', 'The libvirt VIF driver to configure the VIFs.') +flags.DEFINE_bool('libvirt_use_virtio_for_bridges', + False, + 'Use virtio for bridge interfaces') def get_connection(read_only): @@ -1047,6 +1050,7 @@ class LibvirtConnection(driver.ComputeDriver): 'ebs_root': ebs_root, 'local_device': local_device, 'volumes': block_device_mapping, + 'use_virtio_for_bridges': FLAGS.libvirt_use_virtio_for_bridges, 'ephemerals': ephemerals} root_device_name = driver.block_device_info_get_root(block_device_info) diff --git a/nova/vsa/__init__.py b/nova/vsa/__init__.py index 779b7fb65..09162e006 100644 --- a/nova/vsa/__init__.py +++ b/nova/vsa/__init__.py @@ -2,7 +2,6 @@ # Copyright (c) 2011 Zadara Storage Inc. # Copyright (c) 2011 OpenStack LLC. -# All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain diff --git a/nova/vsa/api.py b/nova/vsa/api.py index 19185b907..bb6e93b87 100644 --- a/nova/vsa/api.py +++ b/nova/vsa/api.py @@ -2,7 +2,6 @@ # Copyright (c) 2011 Zadara Storage Inc. # Copyright (c) 2011 OpenStack LLC. -# All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain @@ -194,9 +193,9 @@ class API(base.Base): volume_params = self._check_storage_parameters(context, vsa_name, storage, shared) except exception.ApiError: - self.update_vsa_status(context, vsa_id, - status=VsaState.FAILED) - raise + self.db.vsa_destroy(context, vsa_id) + raise exception.ApiError(_("Error in storage parameters: %s") + % storage) # after creating DB entry, re-check and set some defaults updates = {} diff --git a/nova/vsa/connection.py b/nova/vsa/connection.py index 5de8021a7..8ac8a1dd5 100644 --- a/nova/vsa/connection.py +++ b/nova/vsa/connection.py @@ -2,7 +2,6 @@ # Copyright (c) 2011 Zadara Storage Inc. # Copyright (c) 2011 OpenStack LLC. -# All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain diff --git a/nova/vsa/drive_types.py b/nova/vsa/drive_types.py index 3c67fdbb9..3cdbbfb09 100644 --- a/nova/vsa/drive_types.py +++ b/nova/vsa/drive_types.py @@ -2,7 +2,6 @@ # Copyright (c) 2011 Zadara Storage Inc. # Copyright (c) 2011 OpenStack LLC. -# All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain diff --git a/nova/vsa/fake.py b/nova/vsa/fake.py index d96138255..0bb81484d 100644 --- a/nova/vsa/fake.py +++ b/nova/vsa/fake.py @@ -2,7 +2,6 @@ # Copyright (c) 2011 Zadara Storage Inc. # Copyright (c) 2011 OpenStack LLC. -# All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain diff --git a/nova/vsa/manager.py b/nova/vsa/manager.py index d98d0fcb2..0f1718d38 100644 --- a/nova/vsa/manager.py +++ b/nova/vsa/manager.py @@ -2,7 +2,6 @@ # Copyright (c) 2011 Zadara Storage Inc. # Copyright (c) 2011 OpenStack LLC. -# All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain -- cgit From dde9fdc6665e2562ec490fe43f46dcf945c59220 Mon Sep 17 00:00:00 2001 From: Ken Pepple Date: Wed, 24 Aug 2011 16:03:32 -0700 Subject: removes french spellings to satisfy american developers --- nova/tests/test_instance_types.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'nova') diff --git a/nova/tests/test_instance_types.py b/nova/tests/test_instance_types.py index 409a22fb6..09f532239 100644 --- a/nova/tests/test_instance_types.py +++ b/nova/tests/test_instance_types.py @@ -107,7 +107,7 @@ class InstanceTypeTestCase(test.TestCase): exception.InvalidInput, instance_types.create, self.name, 256, 1, "aa", self.flavorid) - def test_non_existant_inst_type_shouldnt_delete(self): + def test_non_existent_inst_type_shouldnt_delete(self): """Ensures that instance type creation fails with invalid args""" self.assertRaises(exception.ApiError, instance_types.destroy, -- cgit From 0c88dbc01ae9c10a3a83072ecdef201103a46752 Mon Sep 17 00:00:00 2001 From: "vladimir.p" Date: Wed, 24 Aug 2011 16:41:14 -0700 Subject: added new tables to list of DBs in migration.py --- nova/db/sqlalchemy/migration.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) (limited to 'nova') diff --git a/nova/db/sqlalchemy/migration.py b/nova/db/sqlalchemy/migration.py index d9e303599..765deb479 100644 --- a/nova/db/sqlalchemy/migration.py +++ b/nova/db/sqlalchemy/migration.py @@ -64,7 +64,8 @@ def db_version(): 'users', 'user_project_association', 'user_project_role_association', 'user_role_association', - 'volumes'): + 'volumes', 'volume_metadata', + 'volume_types', 'volume_type_extra_specs'): assert table in meta.tables return db_version_control(1) except AssertionError: -- cgit From d8da62dd6b30505bc700bafcd73f4f990cdab807 Mon Sep 17 00:00:00 2001 From: Brian Waldon Date: Thu, 25 Aug 2011 11:37:17 -0400 Subject: fixing bug --- nova/virt/xenapi/vmops.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'nova') diff --git a/nova/virt/xenapi/vmops.py b/nova/virt/xenapi/vmops.py index 64c106f47..e90022de3 100644 --- a/nova/virt/xenapi/vmops.py +++ b/nova/virt/xenapi/vmops.py @@ -241,7 +241,7 @@ class VMOps(object): # Alter the image before VM start for, e.g. network injection also # alter the image if there's metadata. - if FLAGS.flat_injected or instance['metadata']: + if FLAGS.flat_injected: VMHelper.preconfigure_instance(self._session, instance, first_vdi_ref, network_info) -- cgit From 42f9739a608b5b6219aa9f51bc90c2d072edc33a Mon Sep 17 00:00:00 2001 From: Brian Waldon Date: Thu, 25 Aug 2011 11:44:21 -0400 Subject: changing comment --- nova/virt/xenapi/vmops.py | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) (limited to 'nova') diff --git a/nova/virt/xenapi/vmops.py b/nova/virt/xenapi/vmops.py index e90022de3..c5f105f40 100644 --- a/nova/virt/xenapi/vmops.py +++ b/nova/virt/xenapi/vmops.py @@ -239,8 +239,7 @@ class VMOps(object): self._attach_disks(instance, disk_image_type, vm_ref, first_vdi_ref, vdis) - # Alter the image before VM start for, e.g. network injection also - # alter the image if there's metadata. + # Alter the image before VM start for network injection. if FLAGS.flat_injected: VMHelper.preconfigure_instance(self._session, instance, first_vdi_ref, network_info) -- cgit From 4859cce97b328cf6f76b3b5bd7a0e7d3227428f7 Mon Sep 17 00:00:00 2001 From: "vladimir.p" Date: Thu, 25 Aug 2011 08:59:21 -0700 Subject: moved import up --- nova/log.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'nova') diff --git a/nova/log.py b/nova/log.py index b1dfd9ed2..eb0b6020f 100644 --- a/nova/log.py +++ b/nova/log.py @@ -32,9 +32,9 @@ import json import logging import logging.handlers import os +import stat import sys import traceback -import stat import nova from nova import flags -- cgit From fafaf040ed4518006efd3e5909ac31bae47af33d Mon Sep 17 00:00:00 2001 From: Vishvananda Ishaya Date: Thu, 25 Aug 2011 15:08:28 -0700 Subject: pass all commands to run_iscsiadm as a tuple --- nova/volume/driver.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) (limited to 'nova') diff --git a/nova/volume/driver.py b/nova/volume/driver.py index c99534c07..473470788 100644 --- a/nova/volume/driver.py +++ b/nova/volume/driver.py @@ -523,7 +523,7 @@ class ISCSIDriver(VolumeDriver): "node.session.auth.password", iscsi_properties['auth_password']) - self._run_iscsiadm(iscsi_properties, "--login") + self._run_iscsiadm(iscsi_properties, ("--login", )) self._iscsiadm_update(iscsi_properties, "node.startup", "automatic") @@ -544,7 +544,7 @@ class ISCSIDriver(VolumeDriver): locals()) # The rescan isn't documented as being necessary(?), but it helps - self._run_iscsiadm(iscsi_properties, "--rescan") + self._run_iscsiadm(iscsi_properties, ("--rescan", )) tries = tries + 1 if not os.path.exists(mount_device): @@ -561,7 +561,7 @@ class ISCSIDriver(VolumeDriver): """Undiscover volume on a remote host.""" iscsi_properties = self._get_iscsi_properties(volume) self._iscsiadm_update(iscsi_properties, "node.startup", "manual") - self._run_iscsiadm(iscsi_properties, "--logout") + self._run_iscsiadm(iscsi_properties, ("--logout", )) self._run_iscsiadm(iscsi_properties, ('--op', 'delete')) def check_for_export(self, context, volume_id): -- cgit From 4834b920e3186712ab56e65a88c2e8c838d16f9c Mon Sep 17 00:00:00 2001 From: "vladimir.p" Date: Thu, 25 Aug 2011 18:38:35 -0700 Subject: VSA code redesign. Drive types completely replaced by Volume types --- nova/api/openstack/contrib/drive_types.py | 143 --------------- .../openstack/contrib/virtual_storage_arrays.py | 54 +++--- nova/db/api.py | 45 ----- nova/db/sqlalchemy/api.py | 153 +--------------- .../migrate_repo/versions/042_add_vsa_data.py | 133 -------------- .../migrate_repo/versions/043_add_vsa_data.py | 75 ++++++++ nova/db/sqlalchemy/models.py | 43 +---- nova/db/sqlalchemy/session.py | 2 + nova/exception.py | 12 -- nova/log.py | 2 +- nova/network/linux_net.py | 1 - nova/quota.py | 5 +- nova/scheduler/vsa.py | 68 +++++--- .../api/openstack/contrib/test_drive_types.py | 192 --------------------- nova/tests/api/openstack/contrib/test_vsa.py | 79 +++++---- nova/tests/api/openstack/test_extensions.py | 3 +- nova/tests/scheduler/test_vsa_scheduler.py | 68 +++++--- nova/tests/test_drive_types.py | 146 ---------------- nova/tests/test_vsa.py | 49 +++--- nova/tests/test_vsa_volumes.py | 77 +++++---- nova/tests/test_xenapi.py | 1 + nova/virt/libvirt/connection.py | 5 +- nova/volume/api.py | 63 +++---- nova/volume/driver.py | 74 ++++++-- nova/volume/manager.py | 27 ++- nova/volume/san.py | 12 +- nova/volume/volume_types.py | 43 ++++- nova/vsa/api.py | 148 +++++++--------- nova/vsa/drive_types.py | 114 ------------ nova/vsa/fake.py | 2 +- nova/vsa/manager.py | 52 +++--- nova/vsa/utils.py | 80 +++++++++ 32 files changed, 633 insertions(+), 1338 deletions(-) delete mode 100644 nova/api/openstack/contrib/drive_types.py delete mode 100644 nova/db/sqlalchemy/migrate_repo/versions/042_add_vsa_data.py create mode 100644 nova/db/sqlalchemy/migrate_repo/versions/043_add_vsa_data.py delete mode 100644 nova/tests/api/openstack/contrib/test_drive_types.py delete mode 100644 nova/tests/test_drive_types.py delete mode 100644 nova/vsa/drive_types.py create mode 100644 nova/vsa/utils.py (limited to 'nova') diff --git a/nova/api/openstack/contrib/drive_types.py b/nova/api/openstack/contrib/drive_types.py deleted file mode 100644 index 1aa65374f..000000000 --- a/nova/api/openstack/contrib/drive_types.py +++ /dev/null @@ -1,143 +0,0 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 - -# Copyright (c) 2011 Zadara Storage Inc. -# Copyright (c) 2011 OpenStack LLC. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -""" The Drive Types extension for Virtual Storage Arrays""" - -from webob import exc - -from nova.vsa import drive_types -from nova import exception -from nova import log as logging -from nova.api.openstack import common -from nova.api.openstack import extensions -from nova.api.openstack import faults - -LOG = logging.getLogger("nova.api.drive_types") - - -def _drive_type_view(drive): - """Maps keys for drive types view.""" - d = {} - - d['id'] = drive['id'] - d['displayName'] = drive['name'] - d['type'] = drive['type'] - d['size'] = drive['size_gb'] - d['rpm'] = drive['rpm'] - d['capabilities'] = drive['capabilities'] - return d - - -class DriveTypeController(object): - """The Drive Type API controller for the OpenStack API.""" - - _serialization_metadata = { - 'application/xml': { - "attributes": { - "drive_type": [ - "id", - "displayName", - "type", - "size", - "rpm", - "capabilities", - ]}}} - - def index(self, req): - """Returns a list of drive types.""" - - context = req.environ['nova.context'] - dtypes = drive_types.get_all(context) - limited_list = common.limited(dtypes, req) - res = [_drive_type_view(drive) for drive in limited_list] - return {'drive_types': res} - - def show(self, req, id): - """Return data about the given drive type.""" - context = req.environ['nova.context'] - - try: - drive = drive_types.get(context, id) - except exception.NotFound: - return faults.Fault(exc.HTTPNotFound()) - - return {'drive_type': _drive_type_view(drive)} - - def create(self, req, body): - """Creates a new drive type.""" - context = req.environ['nova.context'] - - if not body: - return faults.Fault(exc.HTTPUnprocessableEntity()) - - drive = body['drive_type'] - - name = drive.get('displayName') - type = drive.get('type') - size = drive.get('size') - rpm = drive.get('rpm') - capabilities = drive.get('capabilities') - - LOG.audit(_("Create drive type %(name)s for "\ - "%(type)s:%(size)s:%(rpm)s"), locals(), context=context) - - new_drive = drive_types.create(context, - type=type, - size_gb=size, - rpm=rpm, - capabilities=capabilities, - name=name) - - return {'drive_type': _drive_type_view(new_drive)} - - def delete(self, req, id): - """Deletes a drive type.""" - context = req.environ['nova.context'] - - LOG.audit(_("Delete drive type with id: %s"), id, context=context) - - try: - drive_types.delete(context, id) - except exception.NotFound: - return faults.Fault(exc.HTTPNotFound()) - - -class Drive_types(extensions.ExtensionDescriptor): - - def get_name(self): - return "DriveTypes" - - def get_alias(self): - return "zadr-drive_types" - - def get_description(self): - return "Drive Types support" - - def get_namespace(self): - return "http://docs.openstack.org/ext/drive_types/api/v1.1" - - def get_updated(self): - return "2011-06-29T00:00:00+00:00" - - def get_resources(self): - resources = [] - res = extensions.ResourceExtension( - 'zadr-drive_types', - DriveTypeController()) - - resources.append(res) - return resources diff --git a/nova/api/openstack/contrib/virtual_storage_arrays.py b/nova/api/openstack/contrib/virtual_storage_arrays.py index 81dbc9e1f..f3e4fc849 100644 --- a/nova/api/openstack/contrib/virtual_storage_arrays.py +++ b/nova/api/openstack/contrib/virtual_storage_arrays.py @@ -106,6 +106,10 @@ class VsaController(object): self.network_api = network.API() super(VsaController, self).__init__() + def _get_instances_by_vsa_id(self, context, id): + return self.compute_api.get_all(context, + search_opts={'metadata': dict(vsa_id=str(id))}) + def _items(self, req, details): """Return summary or detailed list of VSAs.""" context = req.environ['nova.context'] @@ -114,8 +118,7 @@ class VsaController(object): vsa_list = [] for vsa in limited_list: - instances = self.compute_api.get_all(context, - search_opts={'metadata': dict(vsa_id=str(vsa.get('id')))}) + instances = self._get_instances_by_vsa_id(context, vsa.get('id')) vsa_list.append(_vsa_view(context, vsa, details, instances)) return {'vsaSet': vsa_list} @@ -136,9 +139,7 @@ class VsaController(object): except exception.NotFound: return faults.Fault(exc.HTTPNotFound()) - instances = self.compute_api.get_all(context, - search_opts={'metadata': dict(vsa_id=str(vsa.get('id')))}) - + instances = self._get_instances_by_vsa_id(context, vsa.get('id')) return {'vsa': _vsa_view(context, vsa, True, instances)} def create(self, req, body): @@ -171,9 +172,7 @@ class VsaController(object): vsa = self.vsa_api.create(context, **args) - instances = self.compute_api.get_all(context, - search_opts={'metadata': dict(vsa_id=str(vsa.get('id')))}) - + instances = self._get_instances_by_vsa_id(context, vsa.get('id')) return {'vsa': _vsa_view(context, vsa, True, instances)} def delete(self, req, id): @@ -202,14 +201,14 @@ class VsaController(object): locals(), context=context) try: - instances = self.compute_api.get_all(context, - search_opts={'metadata': dict(vsa_id=str(id))}) - - if instances is None or len(instances)==0: + instances = self._get_instances_by_vsa_id(context, id) + if instances is None or len(instances) == 0: return faults.Fault(exc.HTTPNotFound()) for instance in instances: - self.network_api.allocate_for_instance(context, instance, vpn=False) + self.network_api.allocate_for_instance(context, instance, + vpn=False) + # Placeholder return except exception.NotFound: @@ -228,6 +227,7 @@ class VsaController(object): LOG.audit(_("Disassociate address from VSA %(id)s"), locals(), context=context) + # Placeholder class VsaVolumeDriveController(volumes.VolumeController): @@ -255,6 +255,7 @@ class VsaVolumeDriveController(volumes.VolumeController): def __init__(self): self.volume_api = volume.API() + self.vsa_api = vsa.API() super(VsaVolumeDriveController, self).__init__() def _translation(self, context, vol, vsa_id, details): @@ -264,7 +265,7 @@ class VsaVolumeDriveController(volumes.VolumeController): translation = volumes.translate_volume_summary_view d = translation(context, vol) - d['vsaId'] = vol[self.direction] + d['vsaId'] = vsa_id d['name'] = vol['name'] return d @@ -276,8 +277,9 @@ class VsaVolumeDriveController(volumes.VolumeController): LOG.error(_("%(obj)s with ID %(id)s not found"), locals()) raise - own_vsa_id = volume_ref[self.direction] - if own_vsa_id != int(vsa_id): + own_vsa_id = self.volume_api.get_volume_metadata_value(volume_ref, + self.direction) + if own_vsa_id != vsa_id: LOG.error(_("%(obj)s with ID %(id)s belongs to VSA %(own_vsa_id)s"\ " and not to VSA %(vsa_id)s."), locals()) raise exception.Invalid() @@ -286,8 +288,8 @@ class VsaVolumeDriveController(volumes.VolumeController): """Return summary or detailed list of volumes for particular VSA.""" context = req.environ['nova.context'] - vols = self.volume_api.get_all_by_vsa(context, vsa_id, - self.direction.split('_')[0]) + vols = self.volume_api.get_all(context, + search_opts={'metadata': {self.direction: str(vsa_id)}}) limited_list = common.limited(vols, req) res = [self._translation(context, vol, vsa_id, details) \ @@ -317,11 +319,19 @@ class VsaVolumeDriveController(volumes.VolumeController): size = vol['size'] LOG.audit(_("Create volume of %(size)s GB from VSA ID %(vsa_id)s"), locals(), context=context) + try: + # create is supported for volumes only (drives created through VSA) + volume_type = self.vsa_api.get_vsa_volume_type(context) + except exception.NotFound: + return faults.Fault(exc.HTTPNotFound()) - new_volume = self.volume_api.create(context, size, None, - vol.get('displayName'), - vol.get('displayDescription'), - from_vsa_id=vsa_id) + new_volume = self.volume_api.create(context, + size, + None, + vol.get('displayName'), + vol.get('displayDescription'), + volume_type=volume_type, + metadata=dict(from_vsa_id=str(vsa_id))) return {self.object: self._translation(context, new_volume, vsa_id, True)} diff --git a/nova/db/api.py b/nova/db/api.py index 354a90571..a2e581fe9 100644 --- a/nova/db/api.py +++ b/nova/db/api.py @@ -918,16 +918,6 @@ def volume_get_all_by_project(context, project_id): return IMPL.volume_get_all_by_project(context, project_id) -def volume_get_all_assigned_to_vsa(context, vsa_id): - """Get all volumes assigned to particular VSA.""" - return IMPL.volume_get_all_assigned_to_vsa(context, vsa_id) - - -def volume_get_all_assigned_from_vsa(context, vsa_id): - """Get all volumes created from particular VSA.""" - return IMPL.volume_get_all_assigned_from_vsa(context, vsa_id) - - def volume_get_by_ec2_id(context, ec2_id): """Get a volume by ec2 id.""" return IMPL.volume_get_by_ec2_id(context, ec2_id) @@ -1528,36 +1518,6 @@ def volume_type_extra_specs_update_or_create(context, volume_type_id, #################### -def drive_type_create(context, values): - """Creates drive type record.""" - return IMPL.drive_type_create(context, values) - - -def drive_type_update(context, drive_type_id, values): - """Updates drive type record.""" - return IMPL.drive_type_update(context, drive_type_id, values) - - -def drive_type_destroy(context, drive_type_id): - """Deletes drive type record.""" - return IMPL.drive_type_destroy(context, drive_type_id) - - -def drive_type_get(context, drive_type_id): - """Get drive type record by id.""" - return IMPL.drive_type_get(context, drive_type_id) - - -def drive_type_get_by_name(context, name): - """Get drive type record by name.""" - return IMPL.drive_type_get_by_name(context, name) - - -def drive_type_get_all(context, visible): - """Returns all (or only visible) drive types.""" - return IMPL.drive_type_get_all(context, visible) - - def vsa_create(context, values): """Creates Virtual Storage Array record.""" return IMPL.vsa_create(context, values) @@ -1586,8 +1546,3 @@ def vsa_get_all(context): def vsa_get_all_by_project(context, project_id): """Get all Virtual Storage Array records by project ID.""" return IMPL.vsa_get_all_by_project(context, project_id) - - -def vsa_get_vc_ips_list(context, vsa_id): - """Retrieves IPs of instances associated with Virtual Storage Array.""" - return IMPL.vsa_get_vc_ips_list(context, vsa_id) diff --git a/nova/db/sqlalchemy/api.py b/nova/db/sqlalchemy/api.py index 7a572f55a..65b09a65d 100644 --- a/nova/db/sqlalchemy/api.py +++ b/nova/db/sqlalchemy/api.py @@ -2226,7 +2226,6 @@ def volume_get(context, volume_id, session=None): options(joinedload('instance')).\ options(joinedload('volume_metadata')).\ options(joinedload('volume_type')).\ - options(joinedload('drive_type')).\ filter_by(id=volume_id).\ filter_by(deleted=can_read_deleted(context)).\ first() @@ -2235,7 +2234,6 @@ def volume_get(context, volume_id, session=None): options(joinedload('instance')).\ options(joinedload('volume_metadata')).\ options(joinedload('volume_type')).\ - options(joinedload('drive_type')).\ filter_by(project_id=context.project_id).\ filter_by(id=volume_id).\ filter_by(deleted=False).\ @@ -2253,7 +2251,6 @@ def volume_get_all(context): options(joinedload('instance')).\ options(joinedload('volume_metadata')).\ options(joinedload('volume_type')).\ - options(joinedload('drive_type')).\ filter_by(deleted=can_read_deleted(context)).\ all() @@ -2265,7 +2262,6 @@ def volume_get_all_by_host(context, host): options(joinedload('instance')).\ options(joinedload('volume_metadata')).\ options(joinedload('volume_type')).\ - options(joinedload('drive_type')).\ filter_by(host=host).\ filter_by(deleted=can_read_deleted(context)).\ all() @@ -2277,7 +2273,6 @@ def volume_get_all_by_instance(context, instance_id): result = session.query(models.Volume).\ options(joinedload('volume_metadata')).\ options(joinedload('volume_type')).\ - options(joinedload('drive_type')).\ filter_by(instance_id=instance_id).\ filter_by(deleted=False).\ all() @@ -2286,28 +2281,6 @@ def volume_get_all_by_instance(context, instance_id): return result -@require_admin_context -def volume_get_all_assigned_to_vsa(context, vsa_id): - session = get_session() - result = session.query(models.Volume).\ - options(joinedload('drive_type')).\ - filter_by(to_vsa_id=vsa_id).\ - filter_by(deleted=False).\ - all() - return result - - -@require_admin_context -def volume_get_all_assigned_from_vsa(context, vsa_id): - session = get_session() - result = session.query(models.Volume).\ - options(joinedload('drive_type')).\ - filter_by(from_vsa_id=vsa_id).\ - filter_by(deleted=False).\ - all() - return result - - @require_context def volume_get_all_by_project(context, project_id): authorize_project_context(context, project_id) @@ -2317,7 +2290,6 @@ def volume_get_all_by_project(context, project_id): options(joinedload('instance')).\ options(joinedload('volume_metadata')).\ options(joinedload('volume_type')).\ - options(joinedload('drive_type')).\ filter_by(project_id=project_id).\ filter_by(deleted=can_read_deleted(context)).\ all() @@ -2332,7 +2304,6 @@ def volume_get_instance(context, volume_id): options(joinedload('instance')).\ options(joinedload('volume_metadata')).\ options(joinedload('volume_type')).\ - options(joinedload('drive_type')).\ first() if not result: raise exception.VolumeNotFound(volume_id=volume_id) @@ -2377,7 +2348,7 @@ def volume_update(context, volume_id, values): volume_ref = volume_get(context, volume_id, session=session) volume_ref.update(values) volume_ref.save(session=session) - return volume_ref + #################### @@ -3871,106 +3842,6 @@ def volume_type_extra_specs_update_or_create(context, volume_type_id, #################### -@require_admin_context -def drive_type_create(context, values): - """ - Creates drive type record. - """ - try: - drive_type_ref = models.DriveTypes() - drive_type_ref.update(values) - drive_type_ref.save() - except Exception, e: - raise exception.DBError(e) - return drive_type_ref - - -@require_admin_context -def drive_type_update(context, drive_type_id, values): - """ - Updates drive type record. - """ - session = get_session() - with session.begin(): - drive_type_ref = drive_type_get(context, drive_type_id, - session=session) - drive_type_ref.update(values) - drive_type_ref.save(session=session) - return drive_type_ref - - -@require_admin_context -def drive_type_destroy(context, drive_type_id): - """ - Deletes drive type record. - """ - session = get_session() - drive_type_ref = session.query(models.DriveTypes).\ - filter_by(id=drive_type_id) - records = drive_type_ref.delete() - if records == 0: - raise exception.VirtualDiskTypeNotFound(id=drive_type_id) - - -@require_context -def drive_type_get(context, drive_type_id, session=None): - """ - Get drive type record by id. - """ - if not session: - session = get_session() - - result = session.query(models.DriveTypes).\ - filter_by(id=drive_type_id).\ - filter_by(deleted=can_read_deleted(context)).\ - first() - if not result: - raise exception.VirtualDiskTypeNotFound(id=drive_type_id) - - return result - - -@require_context -def drive_type_get_by_name(context, name, session=None): - """ - Get drive type record by name. - """ - if not session: - session = get_session() - - result = session.query(models.DriveTypes).\ - filter_by(name=name).\ - filter_by(deleted=can_read_deleted(context)).\ - first() - if not result: - raise exception.VirtualDiskTypeNotFoundByName(name=name) - - return result - - -@require_context -def drive_type_get_all(context, visible): - """ - Returns all (or only visible) drive types. - """ - session = get_session() - if visible: - drive_types = session.query(models.DriveTypes).\ - filter_by(deleted=can_read_deleted(context)).\ - filter_by(visible=True).\ - order_by("name").\ - all() - else: - drive_types = session.query(models.DriveTypes).\ - filter_by(deleted=can_read_deleted(context)).\ - order_by("name").\ - all() - return drive_types - - - #################### - - @require_admin_context def vsa_create(context, values): """ @@ -4067,26 +3938,4 @@ def vsa_get_all_by_project(context, project_id): all() -@require_context -def vsa_get_vc_ips_list(context, vsa_id): - """ - Retrieves IPs of instances associated with Virtual Storage Array. - """ - result = [] - - vc_instances = instance_get_all_by_filters(context, - search_opts={'metadata': dict(vsa_id=str(vsa_id))}) - for vc_instance in vc_instances: - if vc_instance['fixed_ips']: - for fixed in vc_instance['fixed_ips']: - # insert the [floating,fixed] (if exists) in the head, - # otherwise append the [none,fixed] in the tail - ip = {} - ip['fixed'] = fixed['address'] - if fixed['floating_ips']: - ip['floating'] = fixed['floating_ips'][0]['address'] - result.append(ip) - - return result - #################### diff --git a/nova/db/sqlalchemy/migrate_repo/versions/042_add_vsa_data.py b/nova/db/sqlalchemy/migrate_repo/versions/042_add_vsa_data.py deleted file mode 100644 index 8a57bd234..000000000 --- a/nova/db/sqlalchemy/migrate_repo/versions/042_add_vsa_data.py +++ /dev/null @@ -1,133 +0,0 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 - -# Copyright (c) 2011 Zadara Storage Inc. -# Copyright (c) 2011 OpenStack LLC. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from sqlalchemy import Column, DateTime, Integer, MetaData, String, Table -from sqlalchemy import Text, Boolean, ForeignKey - -from nova import log as logging - -meta = MetaData() - -# Just for the ForeignKey and column creation to succeed, these are not the -# actual definitions of tables . -# - -volumes = Table('volumes', meta, - Column('id', Integer(), primary_key=True, nullable=False), - ) - -to_vsa_id = Column('to_vsa_id', Integer(), nullable=True) -from_vsa_id = Column('from_vsa_id', Integer(), nullable=True) -drive_type_id = Column('drive_type_id', Integer(), nullable=True) - - -# New Tables -# - -virtual_storage_arrays = Table('virtual_storage_arrays', meta, - Column('created_at', DateTime(timezone=False)), - Column('updated_at', DateTime(timezone=False)), - Column('deleted_at', DateTime(timezone=False)), - Column('deleted', Boolean(create_constraint=True, name=None)), - Column('id', Integer(), primary_key=True, nullable=False), - Column('display_name', - String(length=255, convert_unicode=False, assert_unicode=None, - unicode_error=None, _warn_on_bytestring=False)), - Column('display_description', - String(length=255, convert_unicode=False, assert_unicode=None, - unicode_error=None, _warn_on_bytestring=False)), - Column('project_id', - String(length=255, convert_unicode=False, assert_unicode=None, - unicode_error=None, _warn_on_bytestring=False)), - Column('availability_zone', - String(length=255, convert_unicode=False, assert_unicode=None, - unicode_error=None, _warn_on_bytestring=False)), - Column('instance_type_id', Integer(), nullable=False), - Column('image_ref', - String(length=255, convert_unicode=False, assert_unicode=None, - unicode_error=None, _warn_on_bytestring=False)), - Column('vc_count', Integer(), nullable=False), - Column('vol_count', Integer(), nullable=False), - Column('status', - String(length=255, convert_unicode=False, assert_unicode=None, - unicode_error=None, _warn_on_bytestring=False)), - ) - -drive_types = Table('drive_types', meta, - Column('created_at', DateTime(timezone=False)), - Column('updated_at', DateTime(timezone=False)), - Column('deleted_at', DateTime(timezone=False)), - Column('deleted', Boolean(create_constraint=True, name=None)), - Column('id', Integer(), primary_key=True, nullable=False), - Column('name', - String(length=255, convert_unicode=False, assert_unicode=None, - unicode_error=None, _warn_on_bytestring=False), - unique=True), - Column('type', - String(length=255, convert_unicode=False, assert_unicode=None, - unicode_error=None, _warn_on_bytestring=False)), - Column('size_gb', Integer(), nullable=False), - Column('rpm', - String(length=255, convert_unicode=False, assert_unicode=None, - unicode_error=None, _warn_on_bytestring=False)), - Column('capabilities', - String(length=255, convert_unicode=False, assert_unicode=None, - unicode_error=None, _warn_on_bytestring=False)), - Column('visible', Boolean(create_constraint=True, name=None)), - ) - -new_tables = (virtual_storage_arrays, drive_types) - -# -# Tables to alter -# - - -def upgrade(migrate_engine): - - from nova import context - from nova import db - from nova import flags - - FLAGS = flags.FLAGS - - # Upgrade operations go here. Don't create your own engine; - # bind migrate_engine to your metadata - meta.bind = migrate_engine - - for table in new_tables: - try: - table.create() - except Exception: - logging.info(repr(table)) - logging.exception('Exception while creating table') - raise - - volumes.create_column(to_vsa_id) - volumes.create_column(from_vsa_id) - volumes.create_column(drive_type_id) - - -def downgrade(migrate_engine): - meta.bind = migrate_engine - - volumes.drop_column(to_vsa_id) - volumes.drop_column(from_vsa_id) - volumes.drop_column(drive_type_id) - - for table in new_tables: - table.drop() diff --git a/nova/db/sqlalchemy/migrate_repo/versions/043_add_vsa_data.py b/nova/db/sqlalchemy/migrate_repo/versions/043_add_vsa_data.py new file mode 100644 index 000000000..844643704 --- /dev/null +++ b/nova/db/sqlalchemy/migrate_repo/versions/043_add_vsa_data.py @@ -0,0 +1,75 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright (c) 2011 Zadara Storage Inc. +# Copyright (c) 2011 OpenStack LLC. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from sqlalchemy import Column, DateTime, Integer, MetaData, String, Table +from sqlalchemy import Text, Boolean, ForeignKey + +from nova import log as logging + +meta = MetaData() + +# +# New Tables +# + +virtual_storage_arrays = Table('virtual_storage_arrays', meta, + Column('created_at', DateTime(timezone=False)), + Column('updated_at', DateTime(timezone=False)), + Column('deleted_at', DateTime(timezone=False)), + Column('deleted', Boolean(create_constraint=True, name=None)), + Column('id', Integer(), primary_key=True, nullable=False), + Column('display_name', + String(length=255, convert_unicode=False, assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('display_description', + String(length=255, convert_unicode=False, assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('project_id', + String(length=255, convert_unicode=False, assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('availability_zone', + String(length=255, convert_unicode=False, assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('instance_type_id', Integer(), nullable=False), + Column('image_ref', + String(length=255, convert_unicode=False, assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('vc_count', Integer(), nullable=False), + Column('vol_count', Integer(), nullable=False), + Column('status', + String(length=255, convert_unicode=False, assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + ) + + +def upgrade(migrate_engine): + # Upgrade operations go here. Don't create your own engine; + # bind migrate_engine to your metadata + meta.bind = migrate_engine + + try: + virtual_storage_arrays.create() + except Exception: + logging.info(repr(table)) + logging.exception('Exception while creating table') + raise + + +def downgrade(migrate_engine): + meta.bind = migrate_engine + + virtual_storage_arrays.drop() diff --git a/nova/db/sqlalchemy/models.py b/nova/db/sqlalchemy/models.py index 65464ece5..f8feb0b4f 100644 --- a/nova/db/sqlalchemy/models.py +++ b/nova/db/sqlalchemy/models.py @@ -352,13 +352,6 @@ class Volume(BASE, NovaBase): volume_type_id = Column(Integer) - to_vsa_id = Column(Integer, - ForeignKey('virtual_storage_arrays.id'), nullable=True) - from_vsa_id = Column(Integer, - ForeignKey('virtual_storage_arrays.id'), nullable=True) - drive_type_id = Column(Integer, - ForeignKey('drive_types.id'), nullable=True) - class VolumeMetadata(BASE, NovaBase): """Represents a metadata key/value pair for a volume""" @@ -402,38 +395,6 @@ class VolumeTypeExtraSpecs(BASE, NovaBase): 'VolumeTypeExtraSpecs.deleted == False)') -class DriveTypes(BASE, NovaBase): - """Represents the known drive types (storage media).""" - __tablename__ = 'drive_types' - - id = Column(Integer, primary_key=True, autoincrement=True) - - """ - @property - def name(self): - if self.capabilities: - return FLAGS.drive_type_template_long % \ - (self.type, str(self.size_gb), self.rpm, self.capabilities) - else: - return FLAGS.drive_type_template_short % \ - (self.type, str(self.size_gb), self.rpm) - """ - - name = Column(String(255), unique=True) - type = Column(String(255)) - size_gb = Column(Integer) - rpm = Column(String(255)) - capabilities = Column(String(255)) - - visible = Column(Boolean, default=True) - - volumes = relationship(Volume, - backref=backref('drive_type', uselist=False), - foreign_keys=id, - primaryjoin='and_(Volume.drive_type_id == ' - 'DriveTypes.id)') - - class Quota(BASE, NovaBase): """Represents a single quota override for a project. @@ -918,7 +879,9 @@ def register_models(): Network, SecurityGroup, SecurityGroupIngressRule, SecurityGroupInstanceAssociation, AuthToken, User, Project, Certificate, ConsolePool, Console, Zone, - AgentBuild, InstanceMetadata, InstanceTypeExtraSpecs, Migration) + VolumeMetadata, VolumeTypes, VolumeTypeExtraSpecs, + AgentBuild, InstanceMetadata, InstanceTypeExtraSpecs, Migration, + VirtualStorageArray) engine = create_engine(FLAGS.sql_connection, echo=False) for model in models: model.metadata.create_all(engine) diff --git a/nova/db/sqlalchemy/session.py b/nova/db/sqlalchemy/session.py index 7b717115c..643e2338e 100644 --- a/nova/db/sqlalchemy/session.py +++ b/nova/db/sqlalchemy/session.py @@ -30,9 +30,11 @@ import nova.exception import nova.flags import nova.log + FLAGS = nova.flags.FLAGS LOG = nova.log.getLogger("nova.db.sqlalchemy") + try: import MySQLdb except ImportError: diff --git a/nova/exception.py b/nova/exception.py index f75d0b832..32981f4d5 100644 --- a/nova/exception.py +++ b/nova/exception.py @@ -365,10 +365,6 @@ class VolumeTypeExtraSpecsNotFound(NotFound): "key %(extra_specs_key)s.") -class VolumeNotFoundForVsa(VolumeNotFound): - message = _("Volume not found for vsa %(vsa_id)s.") - - class SnapshotNotFound(NotFound): message = _("Snapshot %(snapshot_id)s could not be found.") @@ -799,14 +795,6 @@ class VirtualStorageArrayNotFoundByName(NotFound): message = _("Virtual Storage Array %(name)s could not be found.") -class VirtualDiskTypeNotFound(NotFound): - message = _("Drive Type %(id)d could not be found.") - - -class VirtualDiskTypeNotFoundByName(NotFound): - message = _("Drive Type %(name)s could not be found.") - - class CannotResizeToSameSize(NovaException): message = _("When resizing, instances must change size!") diff --git a/nova/log.py b/nova/log.py index 3b86d78e8..eb0b6020f 100644 --- a/nova/log.py +++ b/nova/log.py @@ -32,6 +32,7 @@ import json import logging import logging.handlers import os +import stat import sys import traceback @@ -258,7 +259,6 @@ class NovaRootLogger(NovaLogger): self.addHandler(self.filelog) self.logpath = logpath - import stat st = os.stat(self.logpath) if st.st_mode != (stat.S_IFREG | FLAGS.logfile_mode): os.chmod(self.logpath, FLAGS.logfile_mode) diff --git a/nova/network/linux_net.py b/nova/network/linux_net.py index 3de605ae2..57c1d0c28 100644 --- a/nova/network/linux_net.py +++ b/nova/network/linux_net.py @@ -508,7 +508,6 @@ def get_dhcp_hosts(context, network_ref): if network_ref['multi_host'] and FLAGS.host != host: continue hosts.append(_host_dhcp(fixed_ref)) - return '\n'.join(hosts) diff --git a/nova/quota.py b/nova/quota.py index 48e598659..771477747 100644 --- a/nova/quota.py +++ b/nova/quota.py @@ -116,8 +116,9 @@ def allowed_volumes(context, requested_volumes, size): allowed_gigabytes = _get_request_allotment(requested_gigabytes, used_gigabytes, quota['gigabytes']) - allowed_volumes = min(allowed_volumes, - int(allowed_gigabytes // size)) + if size != 0: + allowed_volumes = min(allowed_volumes, + int(allowed_gigabytes // size)) return min(requested_volumes, allowed_volumes) diff --git a/nova/scheduler/vsa.py b/nova/scheduler/vsa.py index 218ad5c7b..ad5ebc2dc 100644 --- a/nova/scheduler/vsa.py +++ b/nova/scheduler/vsa.py @@ -20,15 +20,15 @@ VSA Simple Scheduler """ from nova import context -from nova import rpc from nova import db from nova import flags +from nova import log as logging +from nova import rpc from nova import utils -from nova.vsa.api import VsaState -from nova.volume import api as volume_api from nova.scheduler import driver from nova.scheduler import simple -from nova import log as logging +from nova.vsa.api import VsaState +from nova.volume import volume_types LOG = logging.getLogger('nova.scheduler.vsa') @@ -67,21 +67,21 @@ class VsaScheduler(simple.SimpleScheduler): def _compare_names(str1, str2): return str1.lower() == str2.lower() - def _compare_sizes_approxim(cap_capacity, size_gb): + def _compare_sizes_approxim(cap_capacity, size): cap_capacity = BYTES_TO_GB(int(cap_capacity)) - size_gb = int(size_gb) - size_perc = size_gb * \ + size = int(size) + size_perc = size * \ FLAGS.drive_type_approx_capacity_percent / 100 - return cap_capacity >= size_gb - size_perc and \ - cap_capacity <= size_gb + size_perc + return cap_capacity >= size - size_perc and \ + cap_capacity <= size + size_perc # Add more entries for additional comparisons compare_list = [{'cap1': 'DriveType', 'cap2': 'type', 'cmp_func': _compare_names}, {'cap1': 'DriveCapacity', - 'cap2': 'size_gb', + 'cap2': 'size', 'cmp_func': _compare_sizes_approxim}] for cap in compare_list: @@ -193,8 +193,8 @@ class VsaScheduler(simple.SimpleScheduler): 'attach_status': "detached", 'display_name': vol['name'], 'display_description': vol['description'], - 'to_vsa_id': vsa_id, - 'drive_type_id': vol['drive_ref']['id'], + 'volume_type_id': vol['volume_type_id'], + 'metadata': dict(to_vsa_id=vsa_id), 'host': vol['host'], 'scheduled_at': now } @@ -228,7 +228,8 @@ class VsaScheduler(simple.SimpleScheduler): def _assign_hosts_to_volumes(self, context, volume_params, forced_host): - prev_drive_type_id = None + prev_volume_type_id = None + request_spec = {} selected_hosts = [] LOG.debug(_("volume_params %(volume_params)s") % locals()) @@ -244,14 +245,25 @@ class VsaScheduler(simple.SimpleScheduler): vol['capabilities'] = None continue - drive_type = vol['drive_ref'] - request_spec = {'size': vol['size'], - 'drive_type': dict(drive_type)} + volume_type_id = vol['volume_type_id'] + request_spec['size'] = vol['size'] - if prev_drive_type_id != drive_type['id']: + if prev_volume_type_id is None or\ + prev_volume_type_id != volume_type_id: # generate list of hosts for this drive type + + volume_type = volume_types.get_volume_type(context, + volume_type_id) + drive_type = { + 'name': volume_type['extra_specs'].get('drive_name'), + 'type': volume_type['extra_specs'].get('drive_type'), + 'size': int(volume_type['extra_specs'].get('drive_size')), + 'rpm': volume_type['extra_specs'].get('drive_rpm'), + } + request_spec['drive_type'] = drive_type + all_hosts = self._filter_hosts("volume", request_spec) - prev_drive_type_id = drive_type['id'] + prev_volume_type_id = volume_type_id (host, qos_cap) = self._select_hosts(request_spec, all_hosts, selected_hosts) @@ -279,8 +291,7 @@ class VsaScheduler(simple.SimpleScheduler): self._provision_volume(context, vol, vsa_id, availability_zone) except: if vsa_id: - db.vsa_update(context, vsa_id, - dict(status=VsaState.FAILED)) + db.vsa_update(context, vsa_id, dict(status=VsaState.FAILED)) for vol in volume_params: if 'capabilities' in vol: @@ -302,12 +313,23 @@ class VsaScheduler(simple.SimpleScheduler): 'scheduled_at': now}) return host - drive_type = volume_ref['drive_type'] - if drive_type is None: + volume_type_id = volume_ref['volume_type_id'] + if volume_type_id: + volume_type = volume_types.get_volume_type(context, volume_type_id) + + if volume_type_id is None or\ + volume_types.is_vsa_volume(volume_type_id, volume_type): + LOG.debug(_("Non-VSA volume %d"), volume_ref['id']) return super(VsaScheduler, self).schedule_create_volume(context, volume_id, *_args, **_kwargs) - drive_type = dict(drive_type) + + drive_type = { + 'name': volume_type['extra_specs'].get('drive_name'), + 'type': volume_type['extra_specs'].get('drive_type'), + 'size': int(volume_type['extra_specs'].get('drive_size')), + 'rpm': volume_type['extra_specs'].get('drive_rpm'), + } LOG.debug(_("Spawning volume %(volume_id)s with drive type "\ "%(drive_type)s"), locals()) diff --git a/nova/tests/api/openstack/contrib/test_drive_types.py b/nova/tests/api/openstack/contrib/test_drive_types.py deleted file mode 100644 index 2f7d327d3..000000000 --- a/nova/tests/api/openstack/contrib/test_drive_types.py +++ /dev/null @@ -1,192 +0,0 @@ -# Copyright 2011 OpenStack LLC. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import json -import stubout -import webob - -#from nova import compute -from nova.vsa import drive_types -from nova import exception -from nova import context -from nova import test -from nova import log as logging -from nova.tests.api.openstack import fakes - -from nova.api.openstack.contrib.drive_types import _drive_type_view - -LOG = logging.getLogger('nova.tests.api.openstack.drive_types') - -last_param = {} - - -def _get_default_drive_type(): - param = { - 'name': 'Test drive type', - 'type': 'SATA', - 'size_gb': 123, - 'rpm': '7200', - 'capabilities': '', - 'visible': True - } - return param - - -def _create(context, **param): - global last_param - LOG.debug(_("_create: %s"), param) - param['id'] = 123 - last_param = param - return param - - -def _delete(context, id): - global last_param - last_param = dict(id=id) - - LOG.debug(_("_delete: %s"), locals()) - - -def _get(context, id): - global last_param - last_param = dict(id=id) - - LOG.debug(_("_get: %s"), locals()) - if id != '123': - raise exception.NotFound - - dtype = _get_default_drive_type() - dtype['id'] = id - return dtype - - -def _get_all(context, visible=True): - LOG.debug(_("_get_all: %s"), locals()) - dtype = _get_default_drive_type() - dtype['id'] = 123 - return [dtype] - - -class DriveTypesApiTest(test.TestCase): - def setUp(self): - super(DriveTypesApiTest, self).setUp() - self.stubs = stubout.StubOutForTesting() - fakes.FakeAuthManager.reset_fake_data() - fakes.FakeAuthDatabase.data = {} - fakes.stub_out_networking(self.stubs) - fakes.stub_out_rate_limiting(self.stubs) - fakes.stub_out_auth(self.stubs) - self.stubs.Set(drive_types, "create", _create) - self.stubs.Set(drive_types, "delete", _delete) - self.stubs.Set(drive_types, "get", _get) - self.stubs.Set(drive_types, "get_all", _get_all) - - self.context = context.get_admin_context() - - def tearDown(self): - self.stubs.UnsetAll() - super(DriveTypesApiTest, self).tearDown() - - def test_drive_types_api_create(self): - global last_param - last_param = {} - - dtype = _get_default_drive_type() - dtype['id'] = 123 - - body = dict(drive_type=_drive_type_view(dtype)) - req = webob.Request.blank('/v1.1/zadr-drive_types') - req.method = 'POST' - req.body = json.dumps(body) - req.headers['content-type'] = 'application/json' - - resp = req.get_response(fakes.wsgi_app()) - self.assertEqual(resp.status_int, 200) - - # Compare if parameters were correctly passed to stub - for k, v in last_param.iteritems(): - self.assertEqual(last_param[k], dtype[k]) - - resp_dict = json.loads(resp.body) - - # Compare response - self.assertTrue('drive_type' in resp_dict) - resp_dtype = resp_dict['drive_type'] - self.assertEqual(resp_dtype, _drive_type_view(dtype)) - - def test_drive_types_api_delete(self): - global last_param - last_param = {} - - dtype_id = 123 - req = webob.Request.blank('/v1.1/zadr-drive_types/%d' % dtype_id) - req.method = 'DELETE' - - resp = req.get_response(fakes.wsgi_app()) - self.assertEqual(resp.status_int, 200) - self.assertEqual(str(last_param['id']), str(dtype_id)) - - def test_drive_types_show(self): - global last_param - last_param = {} - - dtype_id = 123 - req = webob.Request.blank('/v1.1/zadr-drive_types/%d' % dtype_id) - req.method = 'GET' - resp = req.get_response(fakes.wsgi_app()) - self.assertEqual(resp.status_int, 200) - self.assertEqual(str(last_param['id']), str(dtype_id)) - - resp_dict = json.loads(resp.body) - - # Compare response - self.assertTrue('drive_type' in resp_dict) - resp_dtype = resp_dict['drive_type'] - exp_dtype = _get_default_drive_type() - exp_dtype['id'] = dtype_id - exp_dtype_view = _drive_type_view(exp_dtype) - for k, v in exp_dtype_view.iteritems(): - self.assertEqual(str(resp_dtype[k]), str(exp_dtype_view[k])) - - def test_drive_types_show_invalid_id(self): - global last_param - last_param = {} - - dtype_id = 234 - req = webob.Request.blank('/v1.1/zadr-drive_types/%d' % dtype_id) - req.method = 'GET' - resp = req.get_response(fakes.wsgi_app()) - self.assertEqual(resp.status_int, 404) - self.assertEqual(str(last_param['id']), str(dtype_id)) - - def test_drive_types_index(self): - - req = webob.Request.blank('/v1.1/zadr-drive_types') - req.method = 'GET' - resp = req.get_response(fakes.wsgi_app()) - self.assertEqual(resp.status_int, 200) - - resp_dict = json.loads(resp.body) - - self.assertTrue('drive_types' in resp_dict) - resp_dtypes = resp_dict['drive_types'] - self.assertEqual(len(resp_dtypes), 1) - - resp_dtype = resp_dtypes.pop() - exp_dtype = _get_default_drive_type() - exp_dtype['id'] = 123 - exp_dtype_view = _drive_type_view(exp_dtype) - for k, v in exp_dtype_view.iteritems(): - self.assertEqual(str(resp_dtype[k]), str(exp_dtype_view[k])) diff --git a/nova/tests/api/openstack/contrib/test_vsa.py b/nova/tests/api/openstack/contrib/test_vsa.py index a9b76b0ff..311b6cb8d 100644 --- a/nova/tests/api/openstack/contrib/test_vsa.py +++ b/nova/tests/api/openstack/contrib/test_vsa.py @@ -18,15 +18,14 @@ import stubout import unittest import webob - +from nova import context +from nova import db from nova import exception from nova import flags -from nova import vsa -from nova import db -from nova import volume -from nova import context -from nova import test from nova import log as logging +from nova import test +from nova import volume +from nova import vsa from nova.api import openstack from nova.tests.api.openstack import fakes import nova.wsgi @@ -120,7 +119,7 @@ class VSAApiTest(test.TestCase): vsa = {"displayName": "VSA Test Name", "displayDescription": "VSA Test Desc"} body = dict(vsa=vsa) - req = webob.Request.blank('/v1.1/zadr-vsa') + req = webob.Request.blank('/v1.1/777/zadr-vsa') req.method = 'POST' req.body = json.dumps(body) req.headers['content-type'] = 'application/json' @@ -139,7 +138,7 @@ class VSAApiTest(test.TestCase): vsa['displayDescription']) def test_vsa_create_no_body(self): - req = webob.Request.blank('/v1.1/zadr-vsa') + req = webob.Request.blank('/v1.1/777/zadr-vsa') req.method = 'POST' req.body = json.dumps({}) req.headers['content-type'] = 'application/json' @@ -152,7 +151,7 @@ class VSAApiTest(test.TestCase): last_param = {} vsa_id = 123 - req = webob.Request.blank('/v1.1/zadr-vsa/%d' % vsa_id) + req = webob.Request.blank('/v1.1/777/zadr-vsa/%d' % vsa_id) req.method = 'DELETE' resp = req.get_response(fakes.wsgi_app()) @@ -164,7 +163,7 @@ class VSAApiTest(test.TestCase): last_param = {} vsa_id = 234 - req = webob.Request.blank('/v1.1/zadr-vsa/%d' % vsa_id) + req = webob.Request.blank('/v1.1/777/zadr-vsa/%d' % vsa_id) req.method = 'DELETE' resp = req.get_response(fakes.wsgi_app()) @@ -176,7 +175,7 @@ class VSAApiTest(test.TestCase): last_param = {} vsa_id = 123 - req = webob.Request.blank('/v1.1/zadr-vsa/%d' % vsa_id) + req = webob.Request.blank('/v1.1/777/zadr-vsa/%d' % vsa_id) req.method = 'GET' resp = req.get_response(fakes.wsgi_app()) self.assertEqual(resp.status_int, 200) @@ -191,14 +190,14 @@ class VSAApiTest(test.TestCase): last_param = {} vsa_id = 234 - req = webob.Request.blank('/v1.1/zadr-vsa/%d' % vsa_id) + req = webob.Request.blank('/v1.1/777/zadr-vsa/%d' % vsa_id) req.method = 'GET' resp = req.get_response(fakes.wsgi_app()) self.assertEqual(resp.status_int, 404) self.assertEqual(str(last_param['vsa_id']), str(vsa_id)) def test_vsa_index(self): - req = webob.Request.blank('/v1.1/zadr-vsa') + req = webob.Request.blank('/v1.1/777/zadr-vsa') req.method = 'GET' resp = req.get_response(fakes.wsgi_app()) self.assertEqual(resp.status_int, 200) @@ -213,7 +212,7 @@ class VSAApiTest(test.TestCase): self.assertEqual(resp_vsa['id'], 123) def test_vsa_detail(self): - req = webob.Request.blank('/v1.1/zadr-vsa/detail') + req = webob.Request.blank('/v1.1/777/zadr-vsa/detail') req.method = 'GET' resp = req.get_response(fakes.wsgi_app()) self.assertEqual(resp.status_int, 200) @@ -239,17 +238,21 @@ def _get_default_volume_param(): 'name': 'vol name', 'display_name': 'Default vol name', 'display_description': 'Default vol description', - 'from_vsa_id': None, - 'to_vsa_id': None, + 'volume_type_id': 1, + 'volume_metadata': [], } +def stub_get_vsa_volume_type(self, context): + return {'id': 1, + 'name': 'VSA volume type', + 'extra_specs': {'type': 'vsa_volume'}} + + def stub_volume_create(self, context, size, snapshot_id, name, description, **param): LOG.debug(_("_create: param=%s"), size) vol = _get_default_volume_param() - for k, v in param.iteritems(): - vol[k] = v vol['size'] = size vol['display_name'] = name vol['display_description'] = description @@ -270,10 +273,10 @@ def stub_volume_get(self, context, volume_id): LOG.debug(_("_volume_get: volume_id=%s"), volume_id) vol = _get_default_volume_param() vol['id'] = volume_id - if volume_id == '234': - vol['from_vsa_id'] = 123 + meta = {'key': 'from_vsa_id', 'value': '123'} if volume_id == '345': - vol['to_vsa_id'] = 123 + meta = {'key': 'to_vsa_id', 'value': '123'} + vol['volume_metadata'].append(meta) return vol @@ -281,9 +284,9 @@ def stub_volume_get_notfound(self, context, volume_id): raise exception.NotFound -def stub_volume_get_all_by_vsa(self, context, vsa_id, direction): +def stub_volume_get_all(self, context, search_opts): vol = stub_volume_get(self, context, '123') - vol['%s_vsa_id' % direction] = vsa_id + vol['metadata'] = search_opts['metadata'] return [vol] @@ -302,13 +305,13 @@ class VSAVolumeApiTest(test.TestCase): fakes.stub_out_rate_limiting(self.stubs) fakes.stub_out_auth(self.stubs) self.stubs.Set(nova.db.api, 'vsa_get', return_vsa) + self.stubs.Set(vsa.api.API, "get_vsa_volume_type", + stub_get_vsa_volume_type) - self.stubs.Set(volume.api.API, "create", stub_volume_create) self.stubs.Set(volume.api.API, "update", stub_volume_update) self.stubs.Set(volume.api.API, "delete", stub_volume_delete) - self.stubs.Set(volume.api.API, "get_all_by_vsa", - stub_volume_get_all_by_vsa) self.stubs.Set(volume.api.API, "get", stub_volume_get) + self.stubs.Set(volume.api.API, "get_all", stub_volume_get_all) self.context = context.get_admin_context() self.test_obj = test_obj if test_obj else "volume" @@ -319,11 +322,13 @@ class VSAVolumeApiTest(test.TestCase): super(VSAVolumeApiTest, self).tearDown() def test_vsa_volume_create(self): + self.stubs.Set(volume.api.API, "create", stub_volume_create) + vol = {"size": 100, "displayName": "VSA Volume Test Name", "displayDescription": "VSA Volume Test Desc"} body = {self.test_obj: vol} - req = webob.Request.blank('/v1.1/zadr-vsa/123/%s' % self.test_objs) + req = webob.Request.blank('/v1.1/777/zadr-vsa/123/%s' % self.test_objs) req.method = 'POST' req.body = json.dumps(body) req.headers['content-type'] = 'application/json' @@ -344,7 +349,7 @@ class VSAVolumeApiTest(test.TestCase): self.assertEqual(resp.status_int, 400) def test_vsa_volume_create_no_body(self): - req = webob.Request.blank('/v1.1/zadr-vsa/123/%s' % self.test_objs) + req = webob.Request.blank('/v1.1/777/zadr-vsa/123/%s' % self.test_objs) req.method = 'POST' req.body = json.dumps({}) req.headers['content-type'] = 'application/json' @@ -356,25 +361,25 @@ class VSAVolumeApiTest(test.TestCase): self.assertEqual(resp.status_int, 400) def test_vsa_volume_index(self): - req = webob.Request.blank('/v1.1/zadr-vsa/123/%s' % self.test_objs) + req = webob.Request.blank('/v1.1/777/zadr-vsa/123/%s' % self.test_objs) resp = req.get_response(fakes.wsgi_app()) self.assertEqual(resp.status_int, 200) def test_vsa_volume_detail(self): - req = webob.Request.blank('/v1.1/zadr-vsa/123/%s/detail' % \ + req = webob.Request.blank('/v1.1/777/zadr-vsa/123/%s/detail' % \ self.test_objs) resp = req.get_response(fakes.wsgi_app()) self.assertEqual(resp.status_int, 200) def test_vsa_volume_show(self): obj_num = 234 if self.test_objs == "volumes" else 345 - req = webob.Request.blank('/v1.1/zadr-vsa/123/%s/%s' % \ + req = webob.Request.blank('/v1.1/777/zadr-vsa/123/%s/%s' % \ (self.test_objs, obj_num)) resp = req.get_response(fakes.wsgi_app()) self.assertEqual(resp.status_int, 200) def test_vsa_volume_show_no_vsa_assignment(self): - req = webob.Request.blank('/v1.1/zadr-vsa/123/%s/333' % \ + req = webob.Request.blank('/v1.1/777/zadr-vsa/4/%s/333' % \ (self.test_objs)) resp = req.get_response(fakes.wsgi_app()) self.assertEqual(resp.status_int, 400) @@ -382,7 +387,7 @@ class VSAVolumeApiTest(test.TestCase): def test_vsa_volume_show_no_volume(self): self.stubs.Set(volume.api.API, "get", stub_volume_get_notfound) - req = webob.Request.blank('/v1.1/zadr-vsa/123/%s/333' % \ + req = webob.Request.blank('/v1.1/777/zadr-vsa/123/%s/333' % \ (self.test_objs)) resp = req.get_response(fakes.wsgi_app()) self.assertEqual(resp.status_int, 404) @@ -392,7 +397,7 @@ class VSAVolumeApiTest(test.TestCase): update = {"status": "available", "displayName": "Test Display name"} body = {self.test_obj: update} - req = webob.Request.blank('/v1.1/zadr-vsa/123/%s/%s' % \ + req = webob.Request.blank('/v1.1/777/zadr-vsa/123/%s/%s' % \ (self.test_objs, obj_num)) req.method = 'PUT' req.body = json.dumps(body) @@ -406,7 +411,7 @@ class VSAVolumeApiTest(test.TestCase): def test_vsa_volume_delete(self): obj_num = 234 if self.test_objs == "volumes" else 345 - req = webob.Request.blank('/v1.1/zadr-vsa/123/%s/%s' % \ + req = webob.Request.blank('/v1.1/777/zadr-vsa/123/%s/%s' % \ (self.test_objs, obj_num)) req.method = 'DELETE' resp = req.get_response(fakes.wsgi_app()) @@ -416,7 +421,7 @@ class VSAVolumeApiTest(test.TestCase): self.assertEqual(resp.status_int, 400) def test_vsa_volume_delete_no_vsa_assignment(self): - req = webob.Request.blank('/v1.1/zadr-vsa/123/%s/333' % \ + req = webob.Request.blank('/v1.1/777/zadr-vsa/4/%s/333' % \ (self.test_objs)) req.method = 'DELETE' resp = req.get_response(fakes.wsgi_app()) @@ -425,7 +430,7 @@ class VSAVolumeApiTest(test.TestCase): def test_vsa_volume_delete_no_volume(self): self.stubs.Set(volume.api.API, "get", stub_volume_get_notfound) - req = webob.Request.blank('/v1.1/zadr-vsa/123/%s/333' % \ + req = webob.Request.blank('/v1.1/777/zadr-vsa/123/%s/333' % \ (self.test_objs)) req.method = 'DELETE' resp = req.get_response(fakes.wsgi_app()) diff --git a/nova/tests/api/openstack/test_extensions.py b/nova/tests/api/openstack/test_extensions.py index 6e9cae38d..05267d8fb 100644 --- a/nova/tests/api/openstack/test_extensions.py +++ b/nova/tests/api/openstack/test_extensions.py @@ -85,7 +85,6 @@ class ExtensionControllerTest(test.TestCase): ext_path = os.path.join(os.path.dirname(__file__), "extensions") self.flags(osapi_extensions_path=ext_path) self.ext_list = [ - "DriveTypes", "Createserverext", "FlavorExtraSpecs", "Floating_ips", @@ -96,8 +95,8 @@ class ExtensionControllerTest(test.TestCase): "Quotas", "Rescue", "SecurityGroups", - "VirtualInterfaces", "VSAs", + "VirtualInterfaces", "Volumes", "VolumeTypes", ] diff --git a/nova/tests/scheduler/test_vsa_scheduler.py b/nova/tests/scheduler/test_vsa_scheduler.py index 697ad3842..309db96a2 100644 --- a/nova/tests/scheduler/test_vsa_scheduler.py +++ b/nova/tests/scheduler/test_vsa_scheduler.py @@ -16,13 +16,15 @@ import stubout import nova + +from nova import context +from nova import db from nova import exception from nova import flags -from nova import db -from nova import context +from nova import log as logging from nova import test from nova import utils -from nova import log as logging +from nova.volume import volume_types from nova.scheduler import vsa as vsa_sched from nova.scheduler import driver @@ -52,15 +54,26 @@ class VsaSchedulerTestCase(test.TestCase): def _get_vol_creation_request(self, num_vols, drive_ix, size=0): volume_params = [] for i in range(num_vols): - drive_type = {'id': i, - 'name': 'name_' + str(drive_ix), - 'type': 'type_' + str(drive_ix), - 'size_gb': 1 + 100 * (drive_ix)} + + name = 'name_' + str(i) + try: + volume_types.create(self.context, name, + extra_specs={'type': 'vsa_drive', + 'drive_name': name, + 'drive_type': 'type_' + str(drive_ix), + 'drive_size': 1 + 100 * (drive_ix)}) + self.created_types_lst.append(name) + except exception.ApiError: + # type is already created + pass + + volume_type = volume_types.get_volume_type_by_name(self.context, + name) volume = {'size': size, 'snapshot_id': None, 'name': 'vol_' + str(i), 'description': None, - 'drive_ref': drive_type} + 'volume_type_id': volume_type['id']} volume_params.append(volume) return {'num_volumes': len(volume_params), @@ -217,7 +230,12 @@ class VsaSchedulerTestCase(test.TestCase): self.stubs.Set(nova.db, 'volume_get', self._fake_volume_get) self.stubs.Set(nova.db, 'volume_update', self._fake_volume_update) + self.created_types_lst = [] + def tearDown(self): + for name in self.created_types_lst: + volume_types.purge(self.context, name) + self.stubs.UnsetAll() super(VsaSchedulerTestCase, self).tearDown() @@ -463,7 +481,7 @@ class VsaSchedulerTestCase(test.TestCase): global global_volume global_volume = {} - global_volume['drive_type'] = None + global_volume['volume_type_id'] = None self.assertRaises(driver.NoValidHost, self.sched.schedule_create_volume, @@ -485,12 +503,16 @@ class VsaSchedulerTestCase(test.TestCase): global_volume = {} drive_ix = 2 - drive_type = {'id': drive_ix, - 'name': 'name_' + str(drive_ix), - 'type': 'type_' + str(drive_ix), - 'size_gb': 1 + 100 * (drive_ix)} - - global_volume['drive_type'] = drive_type + name = 'name_' + str(drive_ix) + volume_types.create(self.context, name, + extra_specs={'type': 'vsa_drive', + 'drive_name': name, + 'drive_type': 'type_' + str(drive_ix), + 'drive_size': 1 + 100 * (drive_ix)}) + self.created_types_lst.append(name) + volume_type = volume_types.get_volume_type_by_name(self.context, name) + + global_volume['volume_type_id'] = volume_type['id'] global_volume['size'] = 0 host = self.sched.schedule_create_volume(self.context, @@ -525,12 +547,16 @@ class VsaSchedulerTestCaseMostAvail(VsaSchedulerTestCase): global_volume = {} drive_ix = 2 - drive_type = {'id': drive_ix, - 'name': 'name_' + str(drive_ix), - 'type': 'type_' + str(drive_ix), - 'size_gb': 1 + 100 * (drive_ix)} - - global_volume['drive_type'] = drive_type + name = 'name_' + str(drive_ix) + volume_types.create(self.context, name, + extra_specs={'type': 'vsa_drive', + 'drive_name': name, + 'drive_type': 'type_' + str(drive_ix), + 'drive_size': 1 + 100 * (drive_ix)}) + self.created_types_lst.append(name) + volume_type = volume_types.get_volume_type_by_name(self.context, name) + + global_volume['volume_type_id'] = volume_type['id'] global_volume['size'] = 0 host = self.sched.schedule_create_volume(self.context, diff --git a/nova/tests/test_drive_types.py b/nova/tests/test_drive_types.py deleted file mode 100644 index b52e6705b..000000000 --- a/nova/tests/test_drive_types.py +++ /dev/null @@ -1,146 +0,0 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 - -# Copyright (c) 2011 Zadara Storage Inc. -# Copyright (c) 2011 OpenStack LLC. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -""" -Unit Tests for drive types codecode -""" -import time - -from nova import context -from nova import flags -from nova import log as logging -from nova import test -from nova.vsa import drive_types - -FLAGS = flags.FLAGS -LOG = logging.getLogger('nova.tests.test_drive_types') - - -class DriveTypesTestCase(test.TestCase): - """Test cases for driver types code""" - def setUp(self): - super(DriveTypesTestCase, self).setUp() - self.ctxt = context.RequestContext(None, None) - self.ctxt_admin = context.get_admin_context() - self._dtype = self._create_default_drive_type() - - def tearDown(self): - self._dtype = None - - def _create_default_drive_type(self): - """Create a volume object.""" - dtype = {} - dtype['type'] = 'SATA' - dtype['size_gb'] = 150 - dtype['rpm'] = 5000 - dtype['capabilities'] = None - dtype['visible'] = True - - LOG.debug(_("Default values for Drive Type: %s"), dtype) - return dtype - - def test_drive_type_create_delete(self): - dtype = self._dtype - prev_all_dtypes = drive_types.get_all(self.ctxt_admin, False) - - new = drive_types.create(self.ctxt_admin, **dtype) - for k, v in dtype.iteritems(): - self.assertEqual(v, new[k], 'one of fields doesnt match') - - new_all_dtypes = drive_types.get_all(self.ctxt_admin, False) - self.assertNotEqual(len(prev_all_dtypes), - len(new_all_dtypes), - 'drive type was not created') - - drive_types.delete(self.ctxt_admin, new['id']) - new_all_dtypes = drive_types.get_all(self.ctxt_admin, False) - self.assertEqual(prev_all_dtypes, - new_all_dtypes, - 'drive types was not deleted') - - def test_drive_type_check_name_generation(self): - dtype = self._dtype - new = drive_types.create(self.ctxt_admin, **dtype) - expected_name = FLAGS.drive_type_template_short % \ - (dtype['type'], dtype['size_gb'], dtype['rpm']) - self.assertEqual(new['name'], expected_name, - 'name was not generated correctly') - - dtype['capabilities'] = 'SEC' - new2 = drive_types.create(self.ctxt_admin, **dtype) - expected_name = FLAGS.drive_type_template_long % \ - (dtype['type'], dtype['size_gb'], dtype['rpm'], - dtype['capabilities']) - self.assertEqual(new2['name'], expected_name, - 'name was not generated correctly') - - drive_types.delete(self.ctxt_admin, new['id']) - drive_types.delete(self.ctxt_admin, new2['id']) - - def test_drive_type_create_delete_invisible(self): - dtype = self._dtype - dtype['visible'] = False - prev_all_dtypes = drive_types.get_all(self.ctxt_admin, True) - new = drive_types.create(self.ctxt_admin, **dtype) - - new_all_dtypes = drive_types.get_all(self.ctxt_admin, True) - self.assertEqual(prev_all_dtypes, new_all_dtypes) - - new_all_dtypes = drive_types.get_all(self.ctxt_admin, False) - self.assertNotEqual(prev_all_dtypes, new_all_dtypes) - - drive_types.delete(self.ctxt_admin, new['id']) - - def test_drive_type_rename_update(self): - dtype = self._dtype - dtype['capabilities'] = None - - new = drive_types.create(self.ctxt_admin, **dtype) - for k, v in dtype.iteritems(): - self.assertEqual(v, new[k], 'one of fields doesnt match') - - new_name = 'NEW_DRIVE_NAME' - new = drive_types.rename(self.ctxt_admin, new['name'], new_name) - self.assertEqual(new['name'], new_name) - - new = drive_types.rename(self.ctxt_admin, new_name) - expected_name = FLAGS.drive_type_template_short % \ - (dtype['type'], dtype['size_gb'], dtype['rpm']) - self.assertEqual(new['name'], expected_name) - - changes = {'rpm': 7200} - new = drive_types.update(self.ctxt_admin, new['id'], **changes) - for k, v in changes.iteritems(): - self.assertEqual(v, new[k], 'one of fields doesnt match') - - drive_types.delete(self.ctxt_admin, new['id']) - - def test_drive_type_get(self): - dtype = self._dtype - new = drive_types.create(self.ctxt_admin, **dtype) - - new2 = drive_types.get(self.ctxt_admin, new['id']) - for k, v in new2.iteritems(): - self.assertEqual(str(new[k]), str(new2[k]), - 'one of fields doesnt match') - - new2 = drive_types.get_by_name(self.ctxt_admin, new['name']) - for k, v in new.iteritems(): - self.assertEqual(str(new[k]), str(new2[k]), - 'one of fields doesnt match') - - drive_types.delete(self.ctxt_admin, new['id']) diff --git a/nova/tests/test_vsa.py b/nova/tests/test_vsa.py index 726939744..300a4d71c 100644 --- a/nova/tests/test_vsa.py +++ b/nova/tests/test_vsa.py @@ -13,38 +13,29 @@ # License for the specific language governing permissions and limitations # under the License. -import stubout import base64 +import stubout from xml.etree import ElementTree from xml.etree.ElementTree import Element, SubElement +from nova import context +from nova import db from nova import exception from nova import flags +from nova import log as logging +from nova import test from nova import vsa from nova import volume -from nova import db -from nova import context -from nova import test -from nova import log as logging +from nova.volume import volume_types +from nova.vsa import utils as vsa_utils + import nova.image.fake FLAGS = flags.FLAGS LOG = logging.getLogger('nova.tests.vsa') -def fake_drive_type_get_by_name(context, name): - drive_type = { - 'id': 1, - 'name': name, - 'type': name.split('_')[0], - 'size_gb': int(name.split('_')[1]), - 'rpm': name.split('_')[2], - 'capabilities': '', - 'visible': True} - return drive_type - - class VsaTestCase(test.TestCase): def setUp(self): @@ -53,9 +44,20 @@ class VsaTestCase(test.TestCase): self.vsa_api = vsa.API() self.volume_api = volume.API() + FLAGS.quota_volumes = 100 + FLAGS.quota_gigabytes = 10000 + self.context_non_admin = context.RequestContext(None, None) self.context = context.get_admin_context() + volume_types.create(self.context, + 'SATA_500_7200', + extra_specs={'type': 'vsa_drive', + 'drive_name': 'SATA_500_7200', + 'drive_type': 'SATA', + 'drive_size': '500', + 'drive_rpm': '7200'}) + def fake_show_by_name(meh, context, name): if name == 'wrong_image_name': LOG.debug(_("Test: Emulate wrong VSA name. Raise")) @@ -124,9 +126,6 @@ class VsaTestCase(test.TestCase): FLAGS.vsa_multi_vol_creation = multi_vol_creation - self.stubs.Set(nova.vsa.drive_types, 'get_by_name', - fake_drive_type_get_by_name) - param = {'storage': [{'drive_name': 'SATA_500_7200', 'num_drives': 3}]} vsa_ref = self.vsa_api.create(self.context, **param) @@ -157,8 +156,6 @@ class VsaTestCase(test.TestCase): self.vsa_api.delete(self.context, vsa_ref['id']) def test_vsa_generate_user_data(self): - self.stubs.Set(nova.vsa.drive_types, 'get_by_name', - fake_drive_type_get_by_name) FLAGS.vsa_multi_vol_creation = False param = {'display_name': 'VSA name test', @@ -167,12 +164,10 @@ class VsaTestCase(test.TestCase): 'storage': [{'drive_name': 'SATA_500_7200', 'num_drives': 3}]} vsa_ref = self.vsa_api.create(self.context, **param) - volumes = db.volume_get_all_assigned_to_vsa(self.context, - vsa_ref['id']) + volumes = self.vsa_api.get_all_vsa_drives(self.context, + vsa_ref['id']) - user_data = self.vsa_api.generate_user_data(self.context, - vsa_ref, - volumes) + user_data = vsa_utils.generate_user_data(vsa_ref, volumes) user_data = base64.b64decode(user_data) LOG.debug(_("Test: user_data = %s"), user_data) diff --git a/nova/tests/test_vsa_volumes.py b/nova/tests/test_vsa_volumes.py index d451a4377..43173d86a 100644 --- a/nova/tests/test_vsa_volumes.py +++ b/nova/tests/test_vsa_volumes.py @@ -29,15 +29,6 @@ FLAGS = flags.FLAGS LOG = logging.getLogger('nova.tests.vsa.volumes') -def _default_volume_param(): - return { - 'size': 1, - 'snapshot_id': None, - 'name': 'Test volume name', - 'description': 'Test volume desc name' - } - - class VsaVolumesTestCase(test.TestCase): def setUp(self): @@ -49,6 +40,8 @@ class VsaVolumesTestCase(test.TestCase): self.context_non_admin = context.RequestContext(None, None) self.context = context.get_admin_context() + self.default_vol_type = self.vsa_api.get_vsa_volume_type(self.context) + def fake_show_by_name(meh, context, name): return {'id': 1, 'properties': {'kernel_id': 1, 'ramdisk_id': 1}} @@ -66,12 +59,23 @@ class VsaVolumesTestCase(test.TestCase): self.stubs.UnsetAll() super(VsaVolumesTestCase, self).tearDown() + def _default_volume_param(self): + return { + 'size': 1, + 'snapshot_id': None, + 'name': 'Test volume name', + 'description': 'Test volume desc name', + 'volume_type': self.default_vol_type, + 'metadata': {'from_vsa_id': self.vsa_id} + } + + def _get_all_volumes_by_vsa(self): + return self.volume_api.get_all(self.context, + search_opts={'metadata': {"from_vsa_id": str(self.vsa_id)}}) + def test_vsa_volume_create_delete(self): """ Check if volume properly created and deleted. """ - vols1 = self.volume_api.get_all_by_vsa(self.context, - self.vsa_id, "from") - volume_param = _default_volume_param() - volume_param['from_vsa_id'] = self.vsa_id + volume_param = self._default_volume_param() volume_ref = self.volume_api.create(self.context, **volume_param) self.assertEqual(volume_ref['display_name'], @@ -81,21 +85,34 @@ class VsaVolumesTestCase(test.TestCase): self.assertEqual(volume_ref['size'], volume_param['size']) self.assertEqual(volume_ref['status'], - 'available') + 'creating') - vols2 = self.volume_api.get_all_by_vsa(self.context, - self.vsa_id, "from") - self.assertEqual(len(vols1) + 1, len(vols2)) + vols2 = self._get_all_volumes_by_vsa() + self.assertEqual(1, len(vols2)) + volume_ref = vols2[0] + self.assertEqual(volume_ref['display_name'], + volume_param['name']) + self.assertEqual(volume_ref['display_description'], + volume_param['description']) + self.assertEqual(volume_ref['size'], + volume_param['size']) + self.assertEqual(volume_ref['status'], + 'creating') + + self.volume_api.update(self.context, + volume_ref['id'], {'status': 'available'}) self.volume_api.delete(self.context, volume_ref['id']) - vols3 = self.volume_api.get_all_by_vsa(self.context, - self.vsa_id, "from") - self.assertEqual(len(vols3) + 1, len(vols2)) + + vols3 = self._get_all_volumes_by_vsa() + self.assertEqual(1, len(vols2)) + volume_ref = vols3[0] + self.assertEqual(volume_ref['status'], + 'deleting') def test_vsa_volume_delete_nonavail_volume(self): """ Check volume deleton in different states. """ - volume_param = _default_volume_param() - volume_param['from_vsa_id'] = self.vsa_id + volume_param = self._default_volume_param() volume_ref = self.volume_api.create(self.context, **volume_param) self.volume_api.update(self.context, @@ -104,26 +121,18 @@ class VsaVolumesTestCase(test.TestCase): self.volume_api.delete, self.context, volume_ref['id']) - self.volume_api.update(self.context, - volume_ref['id'], {'status': 'error'}) - self.volume_api.delete(self.context, volume_ref['id']) - def test_vsa_volume_delete_vsa_with_volumes(self): """ Check volume deleton in different states. """ - vols1 = self.volume_api.get_all_by_vsa(self.context, - self.vsa_id, "from") + vols1 = self._get_all_volumes_by_vsa() for i in range(3): - volume_param = _default_volume_param() - volume_param['from_vsa_id'] = self.vsa_id + volume_param = self._default_volume_param() volume_ref = self.volume_api.create(self.context, **volume_param) - vols2 = self.volume_api.get_all_by_vsa(self.context, - self.vsa_id, "from") + vols2 = self._get_all_volumes_by_vsa() self.assertEqual(len(vols1) + 3, len(vols2)) self.vsa_api.delete(self.context, self.vsa_id) - vols3 = self.volume_api.get_all_by_vsa(self.context, - self.vsa_id, "from") + vols3 = self._get_all_volumes_by_vsa() self.assertEqual(len(vols1), len(vols3)) diff --git a/nova/tests/test_xenapi.py b/nova/tests/test_xenapi.py index 2f0559366..6d1958401 100644 --- a/nova/tests/test_xenapi.py +++ b/nova/tests/test_xenapi.py @@ -203,6 +203,7 @@ class XenAPIVMTestCase(test.TestCase): self.context = context.RequestContext(self.user_id, self.project_id) self.conn = xenapi_conn.get_connection(False) + @test.skip_test("Skip this test meanwhile") def test_parallel_builds(self): stubs.stubout_loopingcall_delay(self.stubs) diff --git a/nova/virt/libvirt/connection.py b/nova/virt/libvirt/connection.py index abbef69bd..363a20ed0 100644 --- a/nova/virt/libvirt/connection.py +++ b/nova/virt/libvirt/connection.py @@ -135,8 +135,6 @@ flags.DEFINE_string('default_local_format', None, 'The default format a local_volume will be formatted with ' 'on creation.') - - flags.DEFINE_bool('libvirt_use_virtio_for_bridges', False, 'Use virtio for bridge interfaces') @@ -1088,7 +1086,8 @@ class LibvirtConnection(driver.ComputeDriver): 'ebs_root': ebs_root, 'local_device': local_device, 'volumes': block_device_mapping, - 'use_virtio_for_bridges': FLAGS.libvirt_use_virtio_for_bridges, + 'use_virtio_for_bridges': + FLAGS.libvirt_use_virtio_for_bridges, 'ephemerals': ephemerals} root_device_name = driver.block_device_info_get_root(block_device_info) diff --git a/nova/volume/api.py b/nova/volume/api.py index e66792373..d9c082514 100644 --- a/nova/volume/api.py +++ b/nova/volume/api.py @@ -42,9 +42,7 @@ class API(base.Base): """API for interacting with the volume manager.""" def create(self, context, size, snapshot_id, name, description, - volume_type=None, metadata=None, - to_vsa_id=None, from_vsa_id=None, drive_type_id=None, - availability_zone=None): + volume_type=None, metadata=None, availability_zone=None): if snapshot_id != None: snapshot = self.get_snapshot(context, snapshot_id) if snapshot['status'] != "available": @@ -53,13 +51,12 @@ class API(base.Base): if not size: size = snapshot['volume_size'] - if to_vsa_id is None: - if quota.allowed_volumes(context, 1, size) < 1: - pid = context.project_id - LOG.warn(_("Quota exceeded for %(pid)s, tried to create" - " %(size)sG volume") % locals()) - raise quota.QuotaError(_("Volume quota exceeded. You cannot " - "create a volume of size %sG") % size) + if quota.allowed_volumes(context, 1, size) < 1: + pid = context.project_id + LOG.warn(_("Quota exceeded for %(pid)s, tried to create" + " %(size)sG volume") % locals()) + raise quota.QuotaError(_("Volume quota exceeded. You cannot " + "create a volume of size %sG") % size) if availability_zone is None: availability_zone = FLAGS.storage_availability_zone @@ -81,19 +78,9 @@ class API(base.Base): 'display_description': description, 'volume_type_id': volume_type_id, 'metadata': metadata, - 'to_vsa_id': to_vsa_id, - 'from_vsa_id': from_vsa_id, - 'drive_type_id': drive_type_id, } volume = self.db.volume_create(context, options) - if from_vsa_id is not None: # for FE VSA volumes do nothing - now = utils.utcnow() - volume = self.db.volume_update(context, - volume['id'], {'status': 'available', - 'launched_at': now}) - return volume - rpc.cast(context, FLAGS.scheduler_topic, {"method": "create_volume", @@ -112,15 +99,6 @@ class API(base.Base): def delete(self, context, volume_id): volume = self.get(context, volume_id) - - if volume['from_vsa_id'] is not None: - if volume['status'] == "in-use": - raise exception.ApiError(_("Volume is in use. "\ - "Detach it first")) - self.db.volume_destroy(context, volume['id']) - LOG.debug(_("volume %d: deleted successfully"), volume['id']) - return - if volume['status'] != "available": raise exception.ApiError(_("Volume status must be available")) now = utils.utcnow() @@ -154,7 +132,7 @@ class API(base.Base): for i in volume.get('volume_metadata'): volume_metadata[i['key']] = i['value'] - for k, v in searchdict: + for k, v in searchdict.iteritems(): if k not in volume_metadata.keys()\ or volume_metadata[k] != v: return False @@ -163,6 +141,7 @@ class API(base.Base): # search_option to filter_name mapping. filter_mapping = {'metadata': _check_metadata_match} + result = [] for volume in volumes: # go over all filters in the list for opt, values in search_opts.iteritems(): @@ -172,21 +151,12 @@ class API(base.Base): # no such filter - ignore it, go to next filter continue else: - if filter_func(volume, values) == False: - # if one of conditions didn't match - remove - volumes.remove(volume) + if filter_func(volume, values): + result.append(volume) break + volumes = result return volumes - def get_all_by_vsa(self, context, vsa_id, direction): - if direction == "to": - return self.db.volume_get_all_assigned_to_vsa(context, vsa_id) - elif direction == "from": - return self.db.volume_get_all_assigned_from_vsa(context, vsa_id) - else: - raise exception.ApiError(_("Unsupported vol assignment type %s"), - direction) - def get_snapshot(self, context, snapshot_id): rv = self.db.snapshot_get(context, snapshot_id) return dict(rv.iteritems()) @@ -286,3 +256,12 @@ class API(base.Base): self.db.volume_metadata_update(context, volume_id, _metadata, True) return _metadata + + def get_volume_metadata_value(self, volume, key): + """Get value of particular metadata key.""" + metadata = volume.get('volume_metadata') + if metadata: + for i in volume['volume_metadata']: + if i['key'] == key: + return i['value'] + return None diff --git a/nova/volume/driver.py b/nova/volume/driver.py index 9e046d054..2e9a394c7 100644 --- a/nova/volume/driver.py +++ b/nova/volume/driver.py @@ -28,6 +28,7 @@ from nova import exception from nova import flags from nova import log as logging from nova import utils +from nova.volume import volume_types LOG = logging.getLogger("nova.volume.driver") @@ -516,7 +517,7 @@ class ISCSIDriver(VolumeDriver): iscsi_properties = self._get_iscsi_properties(volume) if not iscsi_properties['target_discovered']: - self._run_iscsiadm(iscsi_properties, '--op=new') + self._run_iscsiadm(iscsi_properties, ('--op', 'new')) if iscsi_properties.get('auth_method'): self._iscsiadm_update(iscsi_properties, @@ -568,7 +569,7 @@ class ISCSIDriver(VolumeDriver): iscsi_properties = self._get_iscsi_properties(volume) self._iscsiadm_update(iscsi_properties, "node.startup", "manual") self._run_iscsiadm(iscsi_properties, "--logout") - self._run_iscsiadm(iscsi_properties, '--op=delete') + self._run_iscsiadm(iscsi_properties, ('--op', 'delete')) def check_for_export(self, context, volume_id): """Make sure volume is exported.""" @@ -813,9 +814,15 @@ class LoggingVolumeDriver(VolumeDriver): class ZadaraBEDriver(ISCSIDriver): """Performs actions to configure Zadara BE module.""" - def _not_vsa_be_volume(self, volume): + def _is_vsa_volume(self, volume): + return volume_types.is_vsa_volume(volume['volume_type_id']) + + def _is_vsa_drive(self, volume): + return volume_types.is_vsa_drive(volume['volume_type_id']) + + def _not_vsa_volume_or_drive(self, volume): """Returns True if volume is not VSA BE volume.""" - if volume['to_vsa_id'] is None: + if not volume_types.is_vsa_object(volume['volume_type_id']): LOG.debug(_("\tVolume %s is NOT VSA volume"), volume['name']) return True else: @@ -828,9 +835,14 @@ class ZadaraBEDriver(ISCSIDriver): """ Volume Driver methods """ def create_volume(self, volume): """Creates BE volume.""" - if self._not_vsa_be_volume(volume): + if self._not_vsa_volume_or_drive(volume): return super(ZadaraBEDriver, self).create_volume(volume) + if self._is_vsa_volume(volume): + LOG.debug(_("\tFE VSA Volume %s creation - do nothing"), + volume['name']) + return + if int(volume['size']) == 0: sizestr = '0' # indicates full-partition else: @@ -838,9 +850,16 @@ class ZadaraBEDriver(ISCSIDriver): # Set the qos-str to default type sas qosstr = 'SAS_1000' - drive_type = volume.get('drive_type') - if drive_type is not None: - qosstr = drive_type['type'] + ("_%s" % drive_type['size_gb']) + LOG.debug(_("\tvolume_type_id=%s"), volume['volume_type_id']) + + volume_type = volume_types.get_volume_type(None, + volume['volume_type_id']) + + LOG.debug(_("\tvolume_type=%s"), volume_type) + + if volume_type is not None: + qosstr = volume_type['extra_specs']['drive_type'] + \ + ("_%s" % volume_type['extra_specs']['drive_size']) try: self._sync_exec('/var/lib/zadara/bin/zadara_sncfg', @@ -858,9 +877,14 @@ class ZadaraBEDriver(ISCSIDriver): def delete_volume(self, volume): """Deletes BE volume.""" - if self._not_vsa_be_volume(volume): + if self._not_vsa_volume_or_drive(volume): return super(ZadaraBEDriver, self).delete_volume(volume) + if self._is_vsa_volume(volume): + LOG.debug(_("\tFE VSA Volume %s deletion - do nothing"), + volume['name']) + return + try: self._sync_exec('/var/lib/zadara/bin/zadara_sncfg', 'delete_partition', @@ -874,16 +898,26 @@ class ZadaraBEDriver(ISCSIDriver): LOG.debug(_("VSA BE delete_volume for %s suceeded"), volume['name']) def local_path(self, volume): - if self._not_vsa_be_volume(volume): + if self._not_vsa_volume_or_drive(volume): return super(ZadaraBEDriver, self).local_path(volume) + if self._is_vsa_volume(volume): + LOG.debug(_("\tFE VSA Volume %s local path call - call discover"), + volume['name']) + return super(ZadaraBEDriver, self).discover_volume(None, volume) + raise exception.Error(_("local_path not supported")) def ensure_export(self, context, volume): """ensure BE export for a volume""" - if self._not_vsa_be_volume(volume): + if self._not_vsa_volume_or_drive(volume): return super(ZadaraBEDriver, self).ensure_export(context, volume) + if self._is_vsa_volume(volume): + LOG.debug(_("\tFE VSA Volume %s ensure export - do nothing"), + volume['name']) + return + try: iscsi_target = self.db.volume_get_iscsi_target_num(context, volume['id']) @@ -900,9 +934,14 @@ class ZadaraBEDriver(ISCSIDriver): def create_export(self, context, volume): """create BE export for a volume""" - if self._not_vsa_be_volume(volume): + if self._not_vsa_volume_or_drive(volume): return super(ZadaraBEDriver, self).create_export(context, volume) + if self._is_vsa_volume(volume): + LOG.debug(_("\tFE VSA Volume %s create export - do nothing"), + volume['name']) + return + self._ensure_iscsi_targets(context, volume['host']) iscsi_target = self.db.volume_allocate_iscsi_target(context, volume['id'], @@ -915,9 +954,14 @@ class ZadaraBEDriver(ISCSIDriver): def remove_export(self, context, volume): """Removes BE export for a volume.""" - if self._not_vsa_be_volume(volume): + if self._not_vsa_volume_or_drive(volume): return super(ZadaraBEDriver, self).remove_export(context, volume) + if self._is_vsa_volume(volume): + LOG.debug(_("\tFE VSA Volume %s remove export - do nothing"), + volume['name']) + return + try: iscsi_target = self.db.volume_get_iscsi_target_num(context, volume['id']) @@ -939,14 +983,14 @@ class ZadaraBEDriver(ISCSIDriver): def create_snapshot(self, snapshot): """Nothing required for snapshot""" - if self._not_vsa_be_volume(volume): + if self._not_vsa_volume_or_drive(volume): return super(ZadaraBEDriver, self).create_snapshot(volume) pass def delete_snapshot(self, snapshot): """Nothing required to delete a snapshot""" - if self._not_vsa_be_volume(volume): + if self._not_vsa_volume_or_drive(volume): return super(ZadaraBEDriver, self).delete_snapshot(volume) pass diff --git a/nova/volume/manager.py b/nova/volume/manager.py index b23bff1fc..63656d485 100644 --- a/nova/volume/manager.py +++ b/nova/volume/manager.py @@ -45,11 +45,12 @@ intact. from nova import context from nova import exception -from nova import rpc from nova import flags from nova import log as logging from nova import manager +from nova import rpc from nova import utils +from nova.volume import volume_types LOG = logging.getLogger('nova.volume.manager') @@ -144,13 +145,23 @@ class VolumeManager(manager.SchedulerDependentManager): return volume_id def _notify_vsa(self, context, volume_ref, status): - if volume_ref['to_vsa_id'] is not None: - rpc.cast(context, - FLAGS.vsa_topic, - {"method": "vsa_volume_created", - "args": {"vol_id": volume_ref['id'], - "vsa_id": volume_ref['to_vsa_id'], - "status": status}}) + if volume_ref['volume_type_id'] is None: + return + + if volume_types.is_vsa_drive(volume_ref['volume_type_id']): + vsa_id = None + for i in volume_ref.get('volume_metadata'): + if i['key'] == 'to_vsa_id': + vsa_id = int(i['value']) + break + + if vsa_id: + rpc.cast(context, + FLAGS.vsa_topic, + {"method": "vsa_volume_created", + "args": {"vol_id": volume_ref['id'], + "vsa_id": vsa_id, + "status": status}}) def delete_volume(self, context, volume_id): """Deletes and unexports volume.""" diff --git a/nova/volume/san.py b/nova/volume/san.py index bdebb7783..9532c8116 100644 --- a/nova/volume/san.py +++ b/nova/volume/san.py @@ -64,14 +64,12 @@ class SanISCSIDriver(ISCSIDriver): # discover_volume is still OK # undiscover_volume is still OK - def _connect_to_ssh(self, san_ip=None): - if san_ip is None: - san_ip = FLAGS.san_ip + def _connect_to_ssh(self): ssh = paramiko.SSHClient() #TODO(justinsb): We need a better SSH key policy ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy()) if FLAGS.san_password: - ssh.connect(san_ip, + ssh.connect(FLAGS.san_ip, port=FLAGS.san_ssh_port, username=FLAGS.san_login, password=FLAGS.san_password) @@ -79,7 +77,7 @@ class SanISCSIDriver(ISCSIDriver): privatekeyfile = os.path.expanduser(FLAGS.san_privatekey) # It sucks that paramiko doesn't support DSA keys privatekey = paramiko.RSAKey.from_private_key_file(privatekeyfile) - ssh.connect(san_ip, + ssh.connect(FLAGS.san_ip, port=FLAGS.san_ssh_port, username=FLAGS.san_login, pkey=privatekey) @@ -87,9 +85,9 @@ class SanISCSIDriver(ISCSIDriver): raise exception.Error(_("Specify san_password or san_privatekey")) return ssh - def _run_ssh(self, command, check_exit_code=True, san_ip=None): + def _run_ssh(self, command, check_exit_code=True): #TODO(justinsb): SSH connection caching (?) - ssh = self._connect_to_ssh(san_ip) + ssh = self._connect_to_ssh() #TODO(justinsb): Reintroduce the retry hack ret = ssh_execute(ssh, command, check_exit_code=check_exit_code) diff --git a/nova/volume/volume_types.py b/nova/volume/volume_types.py index 9b02d4ccc..ffa9e6e02 100644 --- a/nova/volume/volume_types.py +++ b/nova/volume/volume_types.py @@ -100,20 +100,22 @@ def get_all_types(context, inactive=0, search_opts={}): continue else: if filter_func(type_args, values): - # if one of conditions didn't match - remove result[type_name] = type_args break vol_types = result return vol_types -def get_volume_type(context, id): +def get_volume_type(ctxt, id): """Retrieves single volume type by id.""" if id is None: raise exception.InvalidVolumeType(volume_type=id) + if ctxt is None: + ctxt = context.get_admin_context() + try: - return db.volume_type_get(context, id) + return db.volume_type_get(ctxt, id) except exception.DBError: raise exception.ApiError(_("Unknown volume type: %s") % id) @@ -127,3 +129,38 @@ def get_volume_type_by_name(context, name): return db.volume_type_get_by_name(context, name) except exception.DBError: raise exception.ApiError(_("Unknown volume type: %s") % name) + + +def is_key_value_present(volume_type_id, key, value, volume_type=None): + if volume_type_id is None: + return False + + if volume_type is None: + volume_type = get_volume_type(context.get_admin_context(), + volume_type_id) + if volume_type.get('extra_specs') is None or\ + volume_type['extra_specs'].get(key) != value: + return False + else: + return True + + +def is_vsa_drive(volume_type_id, volume_type=None): + return is_key_value_present(volume_type_id, + 'type', 'vsa_drive', volume_type) + + +def is_vsa_volume(volume_type_id, volume_type=None): + return is_key_value_present(volume_type_id, + 'type', 'vsa_volume', volume_type) + + +def is_vsa_object(volume_type_id): + if volume_type_id is None: + return False + + volume_type = get_volume_type(context.get_admin_context(), + volume_type_id) + + return is_vsa_drive(volume_type_id, volume_type) or\ + is_vsa_volume(volume_type_id, volume_type) diff --git a/nova/vsa/api.py b/nova/vsa/api.py index bb6e93b87..b279255d7 100644 --- a/nova/vsa/api.py +++ b/nova/vsa/api.py @@ -20,22 +20,26 @@ Handles all requests relating to Virtual Storage Arrays (VSAs). """ import sys -import base64 - -from xml.etree import ElementTree +from nova import compute from nova import db from nova import exception from nova import flags from nova import log as logging -from nova import quota from nova import rpc -from nova.db import base - -from nova import compute from nova import volume from nova.compute import instance_types -from nova.vsa import drive_types +from nova.db import base +from nova.volume import volume_types + + +class VsaState: + CREATING = 'creating' # VSA creating (not ready yet) + LAUNCHING = 'launching' # Launching VCs (all BE volumes were created) + CREATED = 'created' # VSA fully created and ready for use + PARTIAL = 'partial' # Some BE drives were allocated + FAILED = 'failed' # Some BE storage allocations failed + DELETING = 'deleting' # VSA started the deletion procedure FLAGS = flags.FLAGS @@ -43,22 +47,14 @@ flags.DEFINE_string('vsa_ec2_access_key', None, 'EC2 access key used by VSA for accessing nova') flags.DEFINE_string('vsa_ec2_user_id', None, 'User ID used by VSA for accessing nova') - flags.DEFINE_boolean('vsa_multi_vol_creation', True, 'Ask scheduler to create multiple volumes in one call') +flags.DEFINE_string('vsa_volume_type_name', 'VSA volume type', + 'Name of volume type associated with FE VSA volumes') LOG = logging.getLogger('nova.vsa') -class VsaState: - CREATING = 'creating' # VSA creating (not ready yet) - LAUNCHING = 'launching' # Launching VCs (all BE volumes were created) - CREATED = 'created' # VSA fully created and ready for use - PARTIAL = 'partial' # Some BE storage allocations failed - FAILED = 'failed' # Some BE storage allocations failed - DELETING = 'deleting' # VSA started the deletion procedure - - class API(base.Base): """API for interacting with the VSA manager.""" @@ -67,6 +63,15 @@ class API(base.Base): self.volume_api = volume_api or volume.API() super(API, self).__init__(**kwargs) + def _check_volume_type_correctness(self, vol_type): + if vol_type.get('extra_specs') == None or\ + vol_type['extra_specs'].get('type') != 'vsa_drive' or\ + vol_type['extra_specs'].get('drive_type') == None or\ + vol_type['extra_specs'].get('drive_size') == None: + + raise exception.ApiError(_("Invalid drive type %s") + % vol_type['name']) + def _get_default_vsa_instance_type(self): return instance_types.get_instance_type_by_name( FLAGS.default_vsa_instance_type) @@ -89,16 +94,17 @@ class API(base.Base): if name is None: raise exception.ApiError(_("No drive_name param found in %s") % node) - - # find DB record for this disk try: - drive_ref = drive_types.get_by_name(context, name) + vol_type = volume_types.get_volume_type_by_name(context, name) except exception.NotFound: raise exception.ApiError(_("Invalid drive type name %s") % name) + self._check_volume_type_correctness(vol_type) + # if size field present - override disk size specified in DB - size = node.get('size', drive_ref['size_gb']) + size = int(node.get('size', + vol_type['extra_specs'].get('drive_size'))) if shared: part_size = FLAGS.vsa_part_size_gb @@ -110,17 +116,15 @@ class API(base.Base): size = 0 # special handling for full drives for i in range(num_volumes): - # volume_name = vsa_name + ("_%s_vol-%d" % (name, i)) volume_name = "drive-%03d" % first_index first_index += 1 volume_desc = 'BE volume for VSA %s type %s' % \ (vsa_name, name) volume = { 'size': size, - 'snapshot_id': None, 'name': volume_name, 'description': volume_desc, - 'drive_ref': drive_ref + 'volume_type_id': vol_type['id'], } volume_params.append(volume) @@ -211,7 +215,7 @@ class API(base.Base): if len(volume_params) > 0: request_spec = { 'num_volumes': len(volume_params), - 'vsa_id': vsa_id, + 'vsa_id': str(vsa_id), 'volumes': volume_params, } @@ -227,17 +231,21 @@ class API(base.Base): try: vol_name = vol['name'] vol_size = vol['size'] + vol_type_id = vol['volume_type_id'] LOG.debug(_("VSA ID %(vsa_id)d %(vsa_name)s: Create "\ - "volume %(vol_name)s, %(vol_size)d GB"), - locals()) + "volume %(vol_name)s, %(vol_size)d GB, "\ + "type %(vol_type_id)s"), locals()) + + vol_type = volume_types.get_volume_type(context, + vol['volume_type_id']) vol_ref = self.volume_api.create(context, vol_size, - vol['snapshot_id'], + None, vol_name, vol['description'], - to_vsa_id=vsa_id, - drive_type_id=vol['drive_ref'].get('id'), + volume_type=vol_type, + metadata=dict(to_vsa_id=str(vsa_id)), availability_zone=availability_zone) except: self.update_vsa_status(context, vsa_id, @@ -249,7 +257,7 @@ class API(base.Base): rpc.cast(context, FLAGS.vsa_topic, {"method": "create_vsa", - "args": {"vsa_id": vsa_id}}) + "args": {"vsa_id": str(vsa_id)}}) return vsa_ref @@ -314,8 +322,7 @@ class API(base.Base): def _force_volume_delete(self, ctxt, volume): """Delete a volume, bypassing the check that it must be available.""" host = volume['host'] - if not host or volume['from_vsa_id']: - # Volume not yet assigned to host OR FE volume + if not host: # Deleting volume from database and skipping rpc. self.db.volume_destroy(ctxt, volume['id']) return @@ -328,9 +335,9 @@ class API(base.Base): def delete_vsa_volumes(self, context, vsa_id, direction, force_delete=True): if direction == "FE": - volumes = self.db.volume_get_all_assigned_from_vsa(context, vsa_id) + volumes = self.get_all_vsa_volumes(context, vsa_id) else: - volumes = self.db.volume_get_all_assigned_to_vsa(context, vsa_id) + volumes = self.get_all_vsa_drives(context, vsa_id) for volume in volumes: try: @@ -374,58 +381,25 @@ class API(base.Base): return self.db.vsa_get_all(context) return self.db.vsa_get_all_by_project(context, context.project_id) - def generate_user_data(self, context, vsa, volumes): - SubElement = ElementTree.SubElement - - e_vsa = ElementTree.Element("vsa") + def get_vsa_volume_type(self, context): + name = FLAGS.vsa_volume_type_name + try: + vol_type = volume_types.get_volume_type_by_name(context, name) + except exception.NotFound: + volume_types.create(context, name, + extra_specs=dict(type='vsa_volume')) + vol_type = volume_types.get_volume_type_by_name(context, name) - e_vsa_detail = SubElement(e_vsa, "id") - e_vsa_detail.text = str(vsa['id']) - e_vsa_detail = SubElement(e_vsa, "name") - e_vsa_detail.text = vsa['display_name'] - e_vsa_detail = SubElement(e_vsa, "description") - e_vsa_detail.text = vsa['display_description'] - e_vsa_detail = SubElement(e_vsa, "vc_count") - e_vsa_detail.text = str(vsa['vc_count']) + return vol_type - e_vsa_detail = SubElement(e_vsa, "auth_user") - e_vsa_detail.text = FLAGS.vsa_ec2_user_id - e_vsa_detail = SubElement(e_vsa, "auth_access_key") - e_vsa_detail.text = FLAGS.vsa_ec2_access_key + def get_all_vsa_instances(self, context, vsa_id): + return self.compute_api.get_all(context, + search_opts={'metadata': dict(vsa_id=str(vsa_id))}) - e_volumes = SubElement(e_vsa, "volumes") - for volume in volumes: + def get_all_vsa_volumes(self, context, vsa_id): + return self.volume_api.get_all(context, + search_opts={'metadata': dict(from_vsa_id=str(vsa_id))}) - loc = volume['provider_location'] - if loc is None: - ip = '' - iscsi_iqn = '' - iscsi_portal = '' - else: - (iscsi_target, _sep, iscsi_iqn) = loc.partition(" ") - (ip, iscsi_portal) = iscsi_target.split(":", 1) - - e_vol = SubElement(e_volumes, "volume") - e_vol_detail = SubElement(e_vol, "id") - e_vol_detail.text = str(volume['id']) - e_vol_detail = SubElement(e_vol, "name") - e_vol_detail.text = volume['name'] - e_vol_detail = SubElement(e_vol, "display_name") - e_vol_detail.text = volume['display_name'] - e_vol_detail = SubElement(e_vol, "size_gb") - e_vol_detail.text = str(volume['size']) - e_vol_detail = SubElement(e_vol, "status") - e_vol_detail.text = volume['status'] - e_vol_detail = SubElement(e_vol, "ip") - e_vol_detail.text = ip - e_vol_detail = SubElement(e_vol, "iscsi_iqn") - e_vol_detail.text = iscsi_iqn - e_vol_detail = SubElement(e_vol, "iscsi_portal") - e_vol_detail.text = iscsi_portal - e_vol_detail = SubElement(e_vol, "lun") - e_vol_detail.text = '0' - e_vol_detail = SubElement(e_vol, "sn_host") - e_vol_detail.text = volume['host'] - - _xml = ElementTree.tostring(e_vsa) - return base64.b64encode(_xml) + def get_all_vsa_drives(self, context, vsa_id): + return self.volume_api.get_all(context, + search_opts={'metadata': dict(to_vsa_id=str(vsa_id))}) diff --git a/nova/vsa/drive_types.py b/nova/vsa/drive_types.py deleted file mode 100644 index 3cdbbfb09..000000000 --- a/nova/vsa/drive_types.py +++ /dev/null @@ -1,114 +0,0 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 - -# Copyright (c) 2011 Zadara Storage Inc. -# Copyright (c) 2011 OpenStack LLC. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -""" -Handles all requests relating to Virtual Storage Arrays (VSAs). -""" - -from nova import db -from nova import exception -from nova import flags -from nova import log as logging - -FLAGS = flags.FLAGS -flags.DEFINE_string('drive_type_template_short', '%s_%sGB_%sRPM', - 'Template string for generation of drive type name') -flags.DEFINE_string('drive_type_template_long', '%s_%sGB_%sRPM_%s', - 'Template string for generation of drive type name') - - -LOG = logging.getLogger('nova.drive_types') - - -def _generate_default_drive_name(type, size_gb, rpm, capabilities): - if capabilities is None or capabilities == '': - return FLAGS.drive_type_template_short % \ - (type, str(size_gb), rpm) - else: - return FLAGS.drive_type_template_long % \ - (type, str(size_gb), rpm, capabilities) - - -def create(context, type, size_gb, rpm, capabilities='', - visible=True, name=None): - if name is None: - name = _generate_default_drive_name(type, size_gb, rpm, - capabilities) - LOG.debug(_("Creating drive type %(name)s: "\ - "%(type)s %(size_gb)s %(rpm)s %(capabilities)s"), locals()) - - values = { - 'type': type, - 'size_gb': size_gb, - 'rpm': rpm, - 'capabilities': capabilities, - 'visible': visible, - 'name': name - } - return db.drive_type_create(context, values) - - -def update(context, id, **kwargs): - - LOG.debug(_("Updating drive type with id %(id)s: %(kwargs)s"), locals()) - - updatable_fields = ['type', - 'size_gb', - 'rpm', - 'capabilities', - 'visible'] - changes = {} - for field in updatable_fields: - if field in kwargs and \ - kwargs[field] is not None and \ - kwargs[field] != '': - changes[field] = kwargs[field] - - # call update regadless if changes is empty or not - return db.drive_type_update(context, id, changes) - - -def rename(context, name, new_name=None): - - if new_name is None or \ - new_name == '': - disk = db.drive_type_get_by_name(context, name) - new_name = _generate_default_drive_name(disk['type'], - disk['size_gb'], disk['rpm'], disk['capabilities']) - - LOG.debug(_("Renaming drive type %(name)s to %(new_name)s"), locals()) - - values = dict(name=new_name) - dtype = db.drive_type_get_by_name(context, name) - return db.drive_type_update(context, dtype['id'], values) - - -def delete(context, id): - LOG.debug(_("Deleting drive type %d"), id) - db.drive_type_destroy(context, id) - - -def get(context, id): - return db.drive_type_get(context, id) - - -def get_by_name(context, name): - return db.drive_type_get_by_name(context, name) - - -def get_all(context, visible=True): - return db.drive_type_get_all(context, visible) diff --git a/nova/vsa/fake.py b/nova/vsa/fake.py index 0bb81484d..d4248ca01 100644 --- a/nova/vsa/fake.py +++ b/nova/vsa/fake.py @@ -16,7 +16,7 @@ # under the License. -class FakeVcConnection: +class FakeVcConnection(object): def init_host(self, host): pass diff --git a/nova/vsa/manager.py b/nova/vsa/manager.py index 0f1718d38..d4c414106 100644 --- a/nova/vsa/manager.py +++ b/nova/vsa/manager.py @@ -22,17 +22,17 @@ Handles all processes relating to Virtual Storage Arrays (VSA). """ +from nova import compute +from nova import exception +from nova import flags from nova import log as logging from nova import manager -from nova import flags -from nova import utils -from nova import exception -from nova import compute from nova import volume from nova import vsa -from nova.vsa.api import VsaState +from nova import utils from nova.compute import instance_types - +from nova.vsa import utils as vsa_utils +from nova.vsa.api import VsaState FLAGS = flags.FLAGS flags.DEFINE_string('vsa_driver', 'nova.vsa.connection.get_connection', @@ -83,18 +83,18 @@ class VsaManager(manager.SchedulerDependentManager): @exception.wrap_exception() def vsa_volume_created(self, context, vol_id, vsa_id, status): """Callback for volume creations""" - LOG.debug(_("VSA ID %(vsa_id)s: Volume %(vol_id)s created. "\ + LOG.debug(_("VSA ID %(vsa_id)s: Drive %(vol_id)s created. "\ "Status %(status)s"), locals()) vsa_id = int(vsa_id) # just in case # Get all volumes for this VSA # check if any of them still in creating phase - volumes = self.db.volume_get_all_assigned_to_vsa(context, vsa_id) - for volume in volumes: - if volume['status'] == 'creating': - vol_name = volume['name'] - vol_disp_name = volume['display_name'] - LOG.debug(_("Volume %(vol_name)s (%(vol_disp_name)s) still "\ + drives = self.vsa_api.get_all_vsa_drives(context, vsa_id) + for drive in drives: + if drive['status'] == 'creating': + vol_name = drive['name'] + vol_disp_name = drive['display_name'] + LOG.debug(_("Drive %(vol_name)s (%(vol_disp_name)s) still "\ "in creating phase - wait"), locals()) return @@ -105,17 +105,17 @@ class VsaManager(manager.SchedulerDependentManager): LOG.exception(msg) return - if len(volumes) != vsa['vol_count']: - cvol_real = len(volumes) + if len(drives) != vsa['vol_count']: + cvol_real = len(drives) cvol_exp = vsa['vol_count'] LOG.debug(_("VSA ID %(vsa_id)d: Not all volumes are created "\ "(%(cvol_real)d of %(cvol_exp)d)"), locals()) return # all volumes created (successfully or not) - return self._start_vcs(context, vsa, volumes) + return self._start_vcs(context, vsa, drives) - def _start_vcs(self, context, vsa, volumes=[]): + def _start_vcs(self, context, vsa, drives=[]): """Start VCs for VSA """ vsa_id = vsa['id'] @@ -127,11 +127,11 @@ class VsaManager(manager.SchedulerDependentManager): # in _separate_ loop go over all volumes and mark as "attached" has_failed_volumes = False - for volume in volumes: - vol_name = volume['name'] - vol_disp_name = volume['display_name'] - status = volume['status'] - LOG.info(_("VSA ID %(vsa_id)d: Volume %(vol_name)s "\ + for drive in drives: + vol_name = drive['name'] + vol_disp_name = drive['display_name'] + status = drive['status'] + LOG.info(_("VSA ID %(vsa_id)d: Drive %(vol_name)s "\ "(%(vol_disp_name)s) is in %(status)s state"), locals()) if status == 'available': @@ -149,11 +149,12 @@ class VsaManager(manager.SchedulerDependentManager): if has_failed_volumes: LOG.info(_("VSA ID %(vsa_id)d: Delete all BE volumes"), locals()) self.vsa_api.delete_vsa_volumes(context, vsa_id, "BE", True) - self.vsa_api.update_vsa_status(context, vsa_id, VsaState.FAILED) + self.vsa_api.update_vsa_status(context, vsa_id, + VsaState.FAILED) return # create user-data record for VC - storage_data = self.vsa_api.generate_user_data(context, vsa, volumes) + storage_data = vsa_utils.generate_user_data(vsa, drives) instance_type = instance_types.get_instance_type( vsa['instance_type_id']) @@ -174,4 +175,5 @@ class VsaManager(manager.SchedulerDependentManager): user_data=storage_data, metadata=dict(vsa_id=str(vsa_id))) - self.vsa_api.update_vsa_status(context, vsa_id, VsaState.CREATED) + self.vsa_api.update_vsa_status(context, vsa_id, + VsaState.CREATED) diff --git a/nova/vsa/utils.py b/nova/vsa/utils.py new file mode 100644 index 000000000..1de341ac5 --- /dev/null +++ b/nova/vsa/utils.py @@ -0,0 +1,80 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright (c) 2011 Zadara Storage Inc. +# Copyright (c) 2011 OpenStack LLC. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import base64 +from xml.etree import ElementTree + +from nova import flags + +FLAGS = flags.FLAGS + + +def generate_user_data(vsa, volumes): + SubElement = ElementTree.SubElement + + e_vsa = ElementTree.Element("vsa") + + e_vsa_detail = SubElement(e_vsa, "id") + e_vsa_detail.text = str(vsa['id']) + e_vsa_detail = SubElement(e_vsa, "name") + e_vsa_detail.text = vsa['display_name'] + e_vsa_detail = SubElement(e_vsa, "description") + e_vsa_detail.text = vsa['display_description'] + e_vsa_detail = SubElement(e_vsa, "vc_count") + e_vsa_detail.text = str(vsa['vc_count']) + + e_vsa_detail = SubElement(e_vsa, "auth_user") + e_vsa_detail.text = FLAGS.vsa_ec2_user_id + e_vsa_detail = SubElement(e_vsa, "auth_access_key") + e_vsa_detail.text = FLAGS.vsa_ec2_access_key + + e_volumes = SubElement(e_vsa, "volumes") + for volume in volumes: + + loc = volume['provider_location'] + if loc is None: + ip = '' + iscsi_iqn = '' + iscsi_portal = '' + else: + (iscsi_target, _sep, iscsi_iqn) = loc.partition(" ") + (ip, iscsi_portal) = iscsi_target.split(":", 1) + + e_vol = SubElement(e_volumes, "volume") + e_vol_detail = SubElement(e_vol, "id") + e_vol_detail.text = str(volume['id']) + e_vol_detail = SubElement(e_vol, "name") + e_vol_detail.text = volume['name'] + e_vol_detail = SubElement(e_vol, "display_name") + e_vol_detail.text = volume['display_name'] + e_vol_detail = SubElement(e_vol, "size_gb") + e_vol_detail.text = str(volume['size']) + e_vol_detail = SubElement(e_vol, "status") + e_vol_detail.text = volume['status'] + e_vol_detail = SubElement(e_vol, "ip") + e_vol_detail.text = ip + e_vol_detail = SubElement(e_vol, "iscsi_iqn") + e_vol_detail.text = iscsi_iqn + e_vol_detail = SubElement(e_vol, "iscsi_portal") + e_vol_detail.text = iscsi_portal + e_vol_detail = SubElement(e_vol, "lun") + e_vol_detail.text = '0' + e_vol_detail = SubElement(e_vol, "sn_host") + e_vol_detail.text = volume['host'] + + _xml = ElementTree.tostring(e_vsa) + return base64.b64encode(_xml) -- cgit From 59e9adb8e2ef39474a04ead76975a1fc3f913550 Mon Sep 17 00:00:00 2001 From: "vladimir.p" Date: Thu, 25 Aug 2011 19:09:50 -0700 Subject: cosmetic cleanup --- nova/api/openstack/contrib/virtual_storage_arrays.py | 6 +++--- nova/api/openstack/contrib/volumes.py | 14 +++++++------- nova/log.py | 6 +----- nova/tests/scheduler/test_vsa_scheduler.py | 1 - nova/tests/test_vsa.py | 1 - nova/tests/test_vsa_volumes.py | 2 -- nova/tests/test_xenapi.py | 1 - 7 files changed, 11 insertions(+), 20 deletions(-) (limited to 'nova') diff --git a/nova/api/openstack/contrib/virtual_storage_arrays.py b/nova/api/openstack/contrib/virtual_storage_arrays.py index f3e4fc849..e09736a28 100644 --- a/nova/api/openstack/contrib/virtual_storage_arrays.py +++ b/nova/api/openstack/contrib/virtual_storage_arrays.py @@ -260,9 +260,9 @@ class VsaVolumeDriveController(volumes.VolumeController): def _translation(self, context, vol, vsa_id, details): if details: - translation = volumes.translate_volume_detail_view + translation = volumes._translate_volume_detail_view else: - translation = volumes.translate_volume_summary_view + translation = volumes._translate_volume_summary_view d = translation(context, vol) d['vsaId'] = vsa_id @@ -559,7 +559,7 @@ class Virtual_storage_arrays(extensions.ExtensionDescriptor): return "http://docs.openstack.org/ext/vsa/api/v1.1" def get_updated(self): - return "2011-06-29T00:00:00+00:00" + return "2011-08-25T00:00:00+00:00" def get_resources(self): resources = [] diff --git a/nova/api/openstack/contrib/volumes.py b/nova/api/openstack/contrib/volumes.py index 8c3898867..d62225e58 100644 --- a/nova/api/openstack/contrib/volumes.py +++ b/nova/api/openstack/contrib/volumes.py @@ -37,17 +37,17 @@ LOG = logging.getLogger("nova.api.volumes") FLAGS = flags.FLAGS -def translate_volume_detail_view(context, vol): +def _translate_volume_detail_view(context, vol): """Maps keys for volumes details view.""" - d = translate_volume_summary_view(context, vol) + d = _translate_volume_summary_view(context, vol) # No additional data / lookups at the moment return d -def translate_volume_summary_view(context, vol): +def _translate_volume_summary_view(context, vol): """Maps keys for volumes summary view.""" d = {} @@ -114,7 +114,7 @@ class VolumeController(object): except exception.NotFound: return faults.Fault(exc.HTTPNotFound()) - return {'volume': translate_volume_detail_view(context, vol)} + return {'volume': _translate_volume_detail_view(context, vol)} def delete(self, req, id): """Delete a volume.""" @@ -130,11 +130,11 @@ class VolumeController(object): def index(self, req): """Returns a summary list of volumes.""" - return self._items(req, entity_maker=translate_volume_summary_view) + return self._items(req, entity_maker=_translate_volume_summary_view) def detail(self, req): """Returns a detailed list of volumes.""" - return self._items(req, entity_maker=translate_volume_detail_view) + return self._items(req, entity_maker=_translate_volume_detail_view) def _items(self, req, entity_maker): """Returns a list of volumes, transformed through entity_maker.""" @@ -175,7 +175,7 @@ class VolumeController(object): # Work around problem that instance is lazy-loaded... new_volume = self.volume_api.get(context, new_volume['id']) - retval = translate_volume_detail_view(context, new_volume) + retval = _translate_volume_detail_view(context, new_volume) return {'volume': retval} diff --git a/nova/log.py b/nova/log.py index eb0b6020f..222b8c5fb 100644 --- a/nova/log.py +++ b/nova/log.py @@ -32,7 +32,6 @@ import json import logging import logging.handlers import os -import stat import sys import traceback @@ -258,10 +257,7 @@ class NovaRootLogger(NovaLogger): self.filelog = WatchedFileHandler(logpath) self.addHandler(self.filelog) self.logpath = logpath - - st = os.stat(self.logpath) - if st.st_mode != (stat.S_IFREG | FLAGS.logfile_mode): - os.chmod(self.logpath, FLAGS.logfile_mode) + os.chmod(self.logpath, FLAGS.logfile_mode) else: self.removeHandler(self.filelog) self.addHandler(self.streamlog) diff --git a/nova/tests/scheduler/test_vsa_scheduler.py b/nova/tests/scheduler/test_vsa_scheduler.py index 309db96a2..37964f00d 100644 --- a/nova/tests/scheduler/test_vsa_scheduler.py +++ b/nova/tests/scheduler/test_vsa_scheduler.py @@ -210,7 +210,6 @@ class VsaSchedulerTestCase(test.TestCase): def setUp(self, sched_class=None): super(VsaSchedulerTestCase, self).setUp() self.stubs = stubout.StubOutForTesting() - self.context_non_admin = context.RequestContext(None, None) self.context = context.get_admin_context() if sched_class is None: diff --git a/nova/tests/test_vsa.py b/nova/tests/test_vsa.py index 300a4d71c..3d2d2de13 100644 --- a/nova/tests/test_vsa.py +++ b/nova/tests/test_vsa.py @@ -47,7 +47,6 @@ class VsaTestCase(test.TestCase): FLAGS.quota_volumes = 100 FLAGS.quota_gigabytes = 10000 - self.context_non_admin = context.RequestContext(None, None) self.context = context.get_admin_context() volume_types.create(self.context, diff --git a/nova/tests/test_vsa_volumes.py b/nova/tests/test_vsa_volumes.py index 43173d86a..b7cd4e840 100644 --- a/nova/tests/test_vsa_volumes.py +++ b/nova/tests/test_vsa_volumes.py @@ -36,8 +36,6 @@ class VsaVolumesTestCase(test.TestCase): self.stubs = stubout.StubOutForTesting() self.vsa_api = vsa.API() self.volume_api = volume.API() - - self.context_non_admin = context.RequestContext(None, None) self.context = context.get_admin_context() self.default_vol_type = self.vsa_api.get_vsa_volume_type(self.context) diff --git a/nova/tests/test_xenapi.py b/nova/tests/test_xenapi.py index 6d1958401..2f0559366 100644 --- a/nova/tests/test_xenapi.py +++ b/nova/tests/test_xenapi.py @@ -203,7 +203,6 @@ class XenAPIVMTestCase(test.TestCase): self.context = context.RequestContext(self.user_id, self.project_id) self.conn = xenapi_conn.get_connection(False) - @test.skip_test("Skip this test meanwhile") def test_parallel_builds(self): stubs.stubout_loopingcall_delay(self.stubs) -- cgit From eecb6ce2acee168713177459942e405b099fb25a Mon Sep 17 00:00:00 2001 From: "vladimir.p" Date: Thu, 25 Aug 2011 19:47:12 -0700 Subject: driver: added vsa_id parameter for SN call --- nova/volume/driver.py | 12 +++++++----- 1 file changed, 7 insertions(+), 5 deletions(-) (limited to 'nova') diff --git a/nova/volume/driver.py b/nova/volume/driver.py index 7a02a7c14..35e3ea8d0 100644 --- a/nova/volume/driver.py +++ b/nova/volume/driver.py @@ -850,23 +850,25 @@ class ZadaraBEDriver(ISCSIDriver): # Set the qos-str to default type sas qosstr = 'SAS_1000' - LOG.debug(_("\tvolume_type_id=%s"), volume['volume_type_id']) - volume_type = volume_types.get_volume_type(None, volume['volume_type_id']) - - LOG.debug(_("\tvolume_type=%s"), volume_type) - if volume_type is not None: qosstr = volume_type['extra_specs']['drive_type'] + \ ("_%s" % volume_type['extra_specs']['drive_size']) + vsa_id = None + for i in volume.get('volume_metadata'): + if i['key'] == 'to_vsa_id': + vsa_id = i['value'] + break + try: self._sync_exec('/var/lib/zadara/bin/zadara_sncfg', 'create_qospart', '--qos', qosstr, '--pname', volume['name'], '--psize', sizestr, + '--vsaid', vsa_id, run_as_root=True, check_exit_code=0) except exception.ProcessExecutionError: -- cgit From 5dc7956eed749c33b6cfaaaf122e829feec62ea9 Mon Sep 17 00:00:00 2001 From: Dan Prince Date: Fri, 26 Aug 2011 09:54:53 -0400 Subject: Update compute API and manager so that the image_ref is set before spawning the rebuilt instance. Fixes issue where rebuild didn't actually change the image_id. --- nova/compute/api.py | 3 +-- nova/compute/manager.py | 14 +++----------- nova/tests/integrated/test_servers.py | 3 ++- 3 files changed, 6 insertions(+), 14 deletions(-) (limited to 'nova') diff --git a/nova/compute/api.py b/nova/compute/api.py index 60a13631a..3b4bde8ea 100644 --- a/nova/compute/api.py +++ b/nova/compute/api.py @@ -1035,7 +1035,7 @@ class API(base.Base): files_to_inject = files_to_inject or [] self._check_injected_file_quota(context, files_to_inject) - values = {} + values = {"image_ref": image_href} if metadata is not None: self._check_metadata_properties_quota(context, metadata) values['metadata'] = metadata @@ -1045,7 +1045,6 @@ class API(base.Base): rebuild_params = { "new_pass": admin_password, - "image_ref": image_href, "injected_files": files_to_inject, } diff --git a/nova/compute/manager.py b/nova/compute/manager.py index ade15e310..6fcb3786c 100644 --- a/nova/compute/manager.py +++ b/nova/compute/manager.py @@ -201,11 +201,6 @@ class ComputeManager(manager.SchedulerDependentManager): data = {'launched_at': launched_at or utils.utcnow()} self.db.instance_update(context, instance_id, data) - def _update_image_ref(self, context, instance_id, image_ref): - """Update the image_id for the given instance.""" - data = {'image_ref': image_ref} - self.db.instance_update(context, instance_id, data) - def get_console_topic(self, context, **kwargs): """Retrieves the console host for a project on this host. @@ -526,7 +521,7 @@ class ComputeManager(manager.SchedulerDependentManager): :param context: `nova.RequestContext` object :param instance_id: Instance identifier (integer) - :param image_ref: Image identifier (href or integer) + :param injected_files: Files to inject :param new_pass: password to set on rebuilt instance """ context = context.elevated() @@ -539,8 +534,6 @@ class ComputeManager(manager.SchedulerDependentManager): network_info = self._get_instance_nw_info(context, instance_ref) self.driver.destroy(instance_ref, network_info) - image_ref = kwargs.get('image_ref') - instance_ref.image_ref = image_ref instance_ref.injected_files = kwargs.get('injected_files', []) network_info = self.network_api.get_instance_nw_info(context, instance_ref) @@ -552,11 +545,10 @@ class ComputeManager(manager.SchedulerDependentManager): self.driver.spawn(context, instance_ref, network_info, bd_mapping) - self._update_image_ref(context, instance_id, image_ref) self._update_launched_at(context, instance_id) self._update_state(context, instance_id) - usage_info = utils.usage_from_instance(instance_ref, - image_ref=image_ref) + usage_info = utils.usage_from_instance(instance_ref) + notifier.notify('compute.%s' % self.host, 'compute.instance.rebuild', notifier.INFO, diff --git a/nova/tests/integrated/test_servers.py b/nova/tests/integrated/test_servers.py index c2f800689..b9382038a 100644 --- a/nova/tests/integrated/test_servers.py +++ b/nova/tests/integrated/test_servers.py @@ -193,7 +193,7 @@ class ServersTest(integrated_helpers._IntegratedTestBase): # rebuild the server with metadata post = {} post['rebuild'] = { - "imageRef": "https://localhost/v1.1/32278/images/2", + "imageRef": "https://localhost/v1.1/32278/images/3", "name": "blah", } @@ -205,6 +205,7 @@ class ServersTest(integrated_helpers._IntegratedTestBase): self.assertEqual(created_server_id, found_server['id']) self.assertEqual({}, found_server.get('metadata')) self.assertEqual('blah', found_server.get('name')) + self.assertEqual('3', found_server.get('image')['id']) # Cleanup self._delete_server(created_server_id) -- cgit From 96a1b218d1d1d24853df3eceff11ba7676cd48ae Mon Sep 17 00:00:00 2001 From: "vladimir.p" Date: Fri, 26 Aug 2011 11:14:44 -0700 Subject: added debug prints for scheduler --- nova/scheduler/vsa.py | 26 +++++++++++++++++++++++++- 1 file changed, 25 insertions(+), 1 deletion(-) (limited to 'nova') diff --git a/nova/scheduler/vsa.py b/nova/scheduler/vsa.py index ad5ebc2dc..6962dd86b 100644 --- a/nova/scheduler/vsa.py +++ b/nova/scheduler/vsa.py @@ -272,7 +272,7 @@ class VsaScheduler(simple.SimpleScheduler): self._consume_resource(qos_cap, vol['size'], -1) def schedule_create_volumes(self, context, request_spec, - availability_zone, *_args, **_kwargs): + availability_zone=None, *_args, **_kwargs): """Picks hosts for hosting multiple volumes.""" num_volumes = request_spec.get('num_volumes') @@ -285,6 +285,8 @@ class VsaScheduler(simple.SimpleScheduler): host = self._check_host_enforcement(context, availability_zone) try: + self._print_capabilities_info() + self._assign_hosts_to_volumes(context, volume_params, host) for vol in volume_params: @@ -324,6 +326,8 @@ class VsaScheduler(simple.SimpleScheduler): return super(VsaScheduler, self).schedule_create_volume(context, volume_id, *_args, **_kwargs) + self._print_capabilities_info() + drive_type = { 'name': volume_type['extra_specs'].get('drive_name'), 'type': volume_type['extra_specs'].get('drive_type'), @@ -398,6 +402,26 @@ class VsaScheduler(simple.SimpleScheduler): self._consume_partition(qos_values, GB_TO_BYTES(size), direction) return + def _print_capabilities_info(self): + host_list = self._get_service_states().iteritems() + for host, host_dict in host_list: + for service_name, service_dict in host_dict.iteritems(): + if service_name != "volume": + continue + + LOG.info(_("Host %s:"), host) + + gos_info = service_dict.get('drive_qos_info', {}) + for qosgrp, qos_values in gos_info.iteritems(): + total = qos_values['TotalDrives'] + used = qos_values['FullDrive']['NumOccupiedDrives'] + free = qos_values['FullDrive']['NumFreeDrives'] + avail = BYTES_TO_GB(qos_values['AvailableCapacity']) + + LOG.info(_("\tDrive %(qosgrp)-25s: total %(total)2s, "\ + "used %(used)2s, free %(free)2s. Available "\ + "capacity %(avail)-5s"), locals()) + class VsaSchedulerLeastUsedHost(VsaScheduler): """ -- cgit From 6f467a94e3f7bdab41ebdcb7b987ca5544bfe321 Mon Sep 17 00:00:00 2001 From: "vladimir.p" Date: Fri, 26 Aug 2011 13:55:43 -0700 Subject: removed create_volumes, added log & doc comment about experimental code --- nova/volume/manager.py | 4 ---- nova/vsa/api.py | 6 ++++++ 2 files changed, 6 insertions(+), 4 deletions(-) (limited to 'nova') diff --git a/nova/volume/manager.py b/nova/volume/manager.py index 63656d485..caa5298d4 100644 --- a/nova/volume/manager.py +++ b/nova/volume/manager.py @@ -93,10 +93,6 @@ class VolumeManager(manager.SchedulerDependentManager): else: LOG.info(_("volume %s: skipping export"), volume['name']) - def create_volumes(self, context, request_spec, availability_zone): - LOG.info(_("create_volumes called with req=%(request_spec)s, "\ - "availability_zone=%(availability_zone)s"), locals()) - def create_volume(self, context, volume_id, snapshot_id=None): """Creates and exports the volume.""" context = context.elevated() diff --git a/nova/vsa/api.py b/nova/vsa/api.py index b279255d7..18cf13705 100644 --- a/nova/vsa/api.py +++ b/nova/vsa/api.py @@ -17,6 +17,10 @@ """ Handles all requests relating to Virtual Storage Arrays (VSAs). + +Experimental code. Requires special VSA image. +For assistance and guidelines pls contact + Zadara Storage Inc & Openstack community """ import sys @@ -142,6 +146,8 @@ class API(base.Base): For shared storage disks split into partitions """ + LOG.info(_("*** Experimental VSA code ***")) + if vc_count > FLAGS.max_vcs_in_vsa: LOG.warning(_("Requested number of VCs (%d) is too high."\ " Setting to default"), vc_count) -- cgit From 8bd8103c86fc021ff86b923883b66371052b3f93 Mon Sep 17 00:00:00 2001 From: Trey Morris Date: Fri, 26 Aug 2011 17:32:44 -0500 Subject: doubles quotes to single --- nova/network/manager.py | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) (limited to 'nova') diff --git a/nova/network/manager.py b/nova/network/manager.py index 404a3180e..b4605eea5 100644 --- a/nova/network/manager.py +++ b/nova/network/manager.py @@ -484,17 +484,17 @@ class NetworkManager(manager.SchedulerDependentManager): # TODO(tr3buchet) eventually "enabled" should be determined def ip_dict(ip): return { - "ip": ip, - "netmask": network["netmask"], - "enabled": "1"} + 'ip': ip, + 'netmask': network['netmask'], + 'enabled': '1'} def ip6_dict(): return { - "ip": ipv6.to_global(network['cidr_v6'], + 'ip': ipv6.to_global(network['cidr_v6'], vif['address'], network['project_id']), - "netmask": network['netmask_v6'], - "enabled": "1"} + 'netmask': network['netmask_v6'], + 'enabled': '1'} network_dict = { 'bridge': network['bridge'], 'id': network['id'], -- cgit From 75c7c841379341c63598850e4676f2146d63334a Mon Sep 17 00:00:00 2001 From: Ewan Mellor Date: Sun, 28 Aug 2011 16:17:17 +0530 Subject: Bug #835964: pep8 violations in IPv6 code Fix pep8 violations. --- nova/ipv6/account_identifier.py | 3 ++- nova/tests/test_ipv6.py | 2 +- 2 files changed, 3 insertions(+), 2 deletions(-) (limited to 'nova') diff --git a/nova/ipv6/account_identifier.py b/nova/ipv6/account_identifier.py index 27bb01988..8a08510ac 100644 --- a/nova/ipv6/account_identifier.py +++ b/nova/ipv6/account_identifier.py @@ -39,7 +39,8 @@ def to_global(prefix, mac, project_id): except TypeError: raise TypeError(_('Bad prefix for to_global_ipv6: %s') % prefix) except NameError: - raise TypeError(_('Bad project_id for to_global_ipv6: %s') % project_id) + raise TypeError(_('Bad project_id for to_global_ipv6: %s') % + project_id) def to_mac(ipv6_address): diff --git a/nova/tests/test_ipv6.py b/nova/tests/test_ipv6.py index 04c1b5598..e1ba4aafb 100644 --- a/nova/tests/test_ipv6.py +++ b/nova/tests/test_ipv6.py @@ -48,7 +48,7 @@ class IPv6RFC2462TestCase(test.TestCase): def test_to_global_with_bad_prefix(self): bad_prefix = '82' self.assertRaises(TypeError, ipv6.to_global, - bad_prefix, + bad_prefix, '2001:db8::216:3eff:fe33:4455', 'test') -- cgit