summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorBrian Lamar <brian.lamar@rackspace.com>2011-08-29 08:34:40 -0400
committerBrian Lamar <brian.lamar@rackspace.com>2011-08-29 08:34:40 -0400
commit4ef1d00d355dd21f23b78aebe6dc4feae667072c (patch)
treea6099593258fc2b4e43541e7e71a3d9e779d79b3
parentb846d22937ac62549832e16105ed06a21a3e34d0 (diff)
parent49af6fa8e07b566237e6b80244ffe117568957d5 (diff)
downloadnova-4ef1d00d355dd21f23b78aebe6dc4feae667072c.tar.gz
nova-4ef1d00d355dd21f23b78aebe6dc4feae667072c.tar.xz
nova-4ef1d00d355dd21f23b78aebe6dc4feae667072c.zip
Merged trunk.
-rwxr-xr-xbin/nova-manage478
-rwxr-xr-xbin/nova-vsa49
-rw-r--r--nova/api/openstack/contrib/virtual_storage_arrays.py606
-rw-r--r--nova/db/api.py36
-rw-r--r--nova/db/sqlalchemy/api.py102
-rw-r--r--nova/db/sqlalchemy/migrate_repo/versions/043_add_vsa_data.py75
-rw-r--r--nova/db/sqlalchemy/migration.py1
-rw-r--r--nova/db/sqlalchemy/models.py35
-rw-r--r--nova/exception.py12
-rw-r--r--nova/flags.py12
-rw-r--r--nova/ipv6/account_identifier.py3
-rw-r--r--nova/network/manager.py12
-rw-r--r--nova/quota.py5
-rw-r--r--nova/scheduler/vsa.py535
-rw-r--r--nova/test.py18
-rw-r--r--nova/tests/api/openstack/contrib/test_vsa.py450
-rw-r--r--nova/tests/api/openstack/test_extensions.py1
-rw-r--r--nova/tests/scheduler/test_vsa_scheduler.py641
-rw-r--r--nova/tests/test_ipv6.py2
-rw-r--r--nova/tests/test_vsa.py182
-rw-r--r--nova/tests/test_vsa_volumes.py136
-rw-r--r--nova/virt/libvirt.xml.template4
-rw-r--r--nova/virt/libvirt/connection.py5
-rw-r--r--nova/volume/api.py18
-rw-r--r--nova/volume/driver.py272
-rw-r--r--nova/volume/manager.py78
-rw-r--r--nova/volume/volume_types.py43
-rw-r--r--nova/vsa/__init__.py18
-rw-r--r--nova/vsa/api.py411
-rw-r--r--nova/vsa/connection.py25
-rw-r--r--nova/vsa/fake.py22
-rw-r--r--nova/vsa/manager.py179
-rw-r--r--nova/vsa/utils.py80
-rwxr-xr-xrun_tests.sh18
34 files changed, 4535 insertions, 29 deletions
diff --git a/bin/nova-manage b/bin/nova-manage
index 890cde0b8..c9cf4266d 100755
--- a/bin/nova-manage
+++ b/bin/nova-manage
@@ -53,6 +53,7 @@
CLI interface for nova management.
"""
+import ast
import gettext
import glob
import json
@@ -85,11 +86,13 @@ from nova import quota
from nova import rpc
from nova import utils
from nova import version
+from nova import vsa
from nova.api.ec2 import ec2utils
from nova.auth import manager
from nova.cloudpipe import pipelib
from nova.compute import instance_types
from nova.db import migration
+from nova.volume import volume_types
FLAGS = flags.FLAGS
flags.DECLARE('fixed_range', 'nova.network.manager')
@@ -1097,6 +1100,477 @@ class VersionCommands(object):
self.list()
+class VsaCommands(object):
+ """Methods for dealing with VSAs"""
+
+ def __init__(self, *args, **kwargs):
+ self.manager = manager.AuthManager()
+ self.vsa_api = vsa.API()
+ self.context = context.get_admin_context()
+
+ self._format_str_vsa = "%(id)-5s %(vsa_id)-15s %(name)-25s "\
+ "%(type)-10s %(vcs)-6s %(drives)-9s %(stat)-10s "\
+ "%(az)-10s %(time)-10s"
+ self._format_str_volume = "\t%(id)-4s %(name)-15s %(size)-5s "\
+ "%(stat)-10s %(att)-20s %(time)s"
+ self._format_str_drive = "\t%(id)-4s %(name)-15s %(size)-5s "\
+ "%(stat)-10s %(host)-20s %(type)-4s %(tname)-10s %(time)s"
+ self._format_str_instance = "\t%(id)-4s %(name)-10s %(dname)-20s "\
+ "%(image)-12s %(type)-10s %(fl_ip)-15s %(fx_ip)-15s "\
+ "%(stat)-10s %(host)-15s %(time)s"
+
+ def _print_vsa_header(self):
+ print self._format_str_vsa %\
+ dict(id=_('ID'),
+ vsa_id=_('vsa_id'),
+ name=_('displayName'),
+ type=_('vc_type'),
+ vcs=_('vc_cnt'),
+ drives=_('drive_cnt'),
+ stat=_('status'),
+ az=_('AZ'),
+ time=_('createTime'))
+
+ def _print_vsa(self, vsa):
+ print self._format_str_vsa %\
+ dict(id=vsa['id'],
+ vsa_id=vsa['name'],
+ name=vsa['display_name'],
+ type=vsa['vsa_instance_type'].get('name', None),
+ vcs=vsa['vc_count'],
+ drives=vsa['vol_count'],
+ stat=vsa['status'],
+ az=vsa['availability_zone'],
+ time=str(vsa['created_at']))
+
+ def _print_volume_header(self):
+ print _(' === Volumes ===')
+ print self._format_str_volume %\
+ dict(id=_('ID'),
+ name=_('name'),
+ size=_('size'),
+ stat=_('status'),
+ att=_('attachment'),
+ time=_('createTime'))
+
+ def _print_volume(self, vol):
+ print self._format_str_volume %\
+ dict(id=vol['id'],
+ name=vol['display_name'] or vol['name'],
+ size=vol['size'],
+ stat=vol['status'],
+ att=vol['attach_status'],
+ time=str(vol['created_at']))
+
+ def _print_drive_header(self):
+ print _(' === Drives ===')
+ print self._format_str_drive %\
+ dict(id=_('ID'),
+ name=_('name'),
+ size=_('size'),
+ stat=_('status'),
+ host=_('host'),
+ type=_('type'),
+ tname=_('typeName'),
+ time=_('createTime'))
+
+ def _print_drive(self, drive):
+ if drive['volume_type_id'] is not None and drive.get('volume_type'):
+ drive_type_name = drive['volume_type'].get('name')
+ else:
+ drive_type_name = ''
+
+ print self._format_str_drive %\
+ dict(id=drive['id'],
+ name=drive['display_name'],
+ size=drive['size'],
+ stat=drive['status'],
+ host=drive['host'],
+ type=drive['volume_type_id'],
+ tname=drive_type_name,
+ time=str(drive['created_at']))
+
+ def _print_instance_header(self):
+ print _(' === Instances ===')
+ print self._format_str_instance %\
+ dict(id=_('ID'),
+ name=_('name'),
+ dname=_('disp_name'),
+ image=_('image'),
+ type=_('type'),
+ fl_ip=_('floating_IP'),
+ fx_ip=_('fixed_IP'),
+ stat=_('status'),
+ host=_('host'),
+ time=_('createTime'))
+
+ def _print_instance(self, vc):
+
+ fixed_addr = None
+ floating_addr = None
+ if vc['fixed_ips']:
+ fixed = vc['fixed_ips'][0]
+ fixed_addr = fixed['address']
+ if fixed['floating_ips']:
+ floating_addr = fixed['floating_ips'][0]['address']
+ floating_addr = floating_addr or fixed_addr
+
+ print self._format_str_instance %\
+ dict(id=vc['id'],
+ name=ec2utils.id_to_ec2_id(vc['id']),
+ dname=vc['display_name'],
+ image=('ami-%08x' % int(vc['image_ref'])),
+ type=vc['instance_type']['name'],
+ fl_ip=floating_addr,
+ fx_ip=fixed_addr,
+ stat=vc['state_description'],
+ host=vc['host'],
+ time=str(vc['created_at']))
+
+ def _list(self, context, vsas, print_drives=False,
+ print_volumes=False, print_instances=False):
+ if vsas:
+ self._print_vsa_header()
+
+ for vsa in vsas:
+ self._print_vsa(vsa)
+ vsa_id = vsa.get('id')
+
+ if print_instances:
+ instances = self.vsa_api.get_all_vsa_instances(context, vsa_id)
+ if instances:
+ print
+ self._print_instance_header()
+ for instance in instances:
+ self._print_instance(instance)
+ print
+
+ if print_drives:
+ drives = self.vsa_api.get_all_vsa_drives(context, vsa_id)
+ if drives:
+ self._print_drive_header()
+ for drive in drives:
+ self._print_drive(drive)
+ print
+
+ if print_volumes:
+ volumes = self.vsa_api.get_all_vsa_volumes(context, vsa_id)
+ if volumes:
+ self._print_volume_header()
+ for volume in volumes:
+ self._print_volume(volume)
+ print
+
+ @args('--storage', dest='storage',
+ metavar="[{'drive_name': 'type', 'num_drives': N, 'size': M},..]",
+ help='Initial storage allocation for VSA')
+ @args('--name', dest='name', metavar="<name>", help='VSA name')
+ @args('--description', dest='description', metavar="<description>",
+ help='VSA description')
+ @args('--vc', dest='vc_count', metavar="<number>", help='Number of VCs')
+ @args('--instance_type', dest='instance_type_name', metavar="<name>",
+ help='Instance type name')
+ @args('--image', dest='image_name', metavar="<name>", help='Image name')
+ @args('--shared', dest='shared', action="store_true", default=False,
+ help='Use shared drives')
+ @args('--az', dest='az', metavar="<zone:host>", help='Availability zone')
+ @args('--user', dest="user_id", metavar='<User name>',
+ help='User name')
+ @args('--project', dest="project_id", metavar='<Project name>',
+ help='Project name')
+ def create(self, storage='[]', name=None, description=None, vc_count=1,
+ instance_type_name=None, image_name=None, shared=None,
+ az=None, user_id=None, project_id=None):
+ """Create a VSA."""
+
+ if project_id is None:
+ try:
+ project_id = os.getenv("EC2_ACCESS_KEY").split(':')[1]
+ except Exception as exc:
+ print _("Failed to retrieve project id: %(exc)s") % exc
+ raise
+
+ if user_id is None:
+ try:
+ project = self.manager.get_project(project_id)
+ user_id = project.project_manager_id
+ except Exception as exc:
+ print _("Failed to retrieve user info: %(exc)s") % exc
+ raise
+
+ is_admin = self.manager.is_admin(user_id)
+ ctxt = context.RequestContext(user_id, project_id, is_admin)
+ if not is_admin and \
+ not self.manager.is_project_member(user_id, project_id):
+ msg = _("%(user_id)s must be an admin or a "
+ "member of %(project_id)s")
+ LOG.warn(msg % locals())
+ raise ValueError(msg % locals())
+
+ # Sanity check for storage string
+ storage_list = []
+ if storage is not None:
+ try:
+ storage_list = ast.literal_eval(storage)
+ except:
+ print _("Invalid string format %s") % storage
+ raise
+
+ for node in storage_list:
+ if ('drive_name' not in node) or ('num_drives' not in node):
+ print (_("Invalid string format for element %s. " \
+ "Expecting keys 'drive_name' & 'num_drives'"),
+ str(node))
+ raise KeyError
+
+ if instance_type_name == '':
+ instance_type_name = None
+ instance_type = instance_types.get_instance_type_by_name(
+ instance_type_name)
+
+ if image_name == '':
+ image_name = None
+
+ if shared in [None, False, "--full_drives"]:
+ shared = False
+ elif shared in [True, "--shared"]:
+ shared = True
+ else:
+ raise ValueError(_('Shared parameter should be set either to "\
+ "--shared or --full_drives'))
+
+ values = {
+ 'display_name': name,
+ 'display_description': description,
+ 'vc_count': int(vc_count),
+ 'instance_type': instance_type,
+ 'image_name': image_name,
+ 'availability_zone': az,
+ 'storage': storage_list,
+ 'shared': shared,
+ }
+
+ result = self.vsa_api.create(ctxt, **values)
+ self._list(ctxt, [result])
+
+ @args('--id', dest='vsa_id', metavar="<vsa_id>", help='VSA ID')
+ @args('--name', dest='name', metavar="<name>", help='VSA name')
+ @args('--description', dest='description', metavar="<description>",
+ help='VSA description')
+ @args('--vc', dest='vc_count', metavar="<number>", help='Number of VCs')
+ def update(self, vsa_id, name=None, description=None, vc_count=None):
+ """Updates name/description of vsa and number of VCs."""
+
+ values = {}
+ if name is not None:
+ values['display_name'] = name
+ if description is not None:
+ values['display_description'] = description
+ if vc_count is not None:
+ values['vc_count'] = int(vc_count)
+
+ vsa_id = ec2utils.ec2_id_to_id(vsa_id)
+ result = self.vsa_api.update(self.context, vsa_id=vsa_id, **values)
+ self._list(self.context, [result])
+
+ @args('--id', dest='vsa_id', metavar="<vsa_id>", help='VSA ID')
+ def delete(self, vsa_id):
+ """Delete a VSA."""
+ vsa_id = ec2utils.ec2_id_to_id(vsa_id)
+ self.vsa_api.delete(self.context, vsa_id)
+
+ @args('--id', dest='vsa_id', metavar="<vsa_id>",
+ help='VSA ID (optional)')
+ @args('--all', dest='all', action="store_true", default=False,
+ help='Show all available details')
+ @args('--drives', dest='drives', action="store_true",
+ help='Include drive-level details')
+ @args('--volumes', dest='volumes', action="store_true",
+ help='Include volume-level details')
+ @args('--instances', dest='instances', action="store_true",
+ help='Include instance-level details')
+ def list(self, vsa_id=None, all=False,
+ drives=False, volumes=False, instances=False):
+ """Describe all available VSAs (or particular one)."""
+
+ vsas = []
+ if vsa_id is not None:
+ internal_id = ec2utils.ec2_id_to_id(vsa_id)
+ vsa = self.vsa_api.get(self.context, internal_id)
+ vsas.append(vsa)
+ else:
+ vsas = self.vsa_api.get_all(self.context)
+
+ if all:
+ drives = volumes = instances = True
+
+ self._list(self.context, vsas, drives, volumes, instances)
+
+ def update_capabilities(self):
+ """Forces updates capabilities on all nova-volume nodes."""
+
+ rpc.fanout_cast(context.get_admin_context(),
+ FLAGS.volume_topic,
+ {"method": "notification",
+ "args": {"event": "startup"}})
+
+
+class VsaDriveTypeCommands(object):
+ """Methods for dealing with VSA drive types"""
+
+ def __init__(self, *args, **kwargs):
+ super(VsaDriveTypeCommands, self).__init__(*args, **kwargs)
+ self.context = context.get_admin_context()
+ self._drive_type_template = '%s_%sGB_%sRPM'
+
+ def _list(self, drives):
+ format_str = "%-5s %-30s %-10s %-10s %-10s %-20s %-10s %s"
+ if len(drives):
+ print format_str %\
+ (_('ID'),
+ _('name'),
+ _('type'),
+ _('size_gb'),
+ _('rpm'),
+ _('capabilities'),
+ _('visible'),
+ _('createTime'))
+
+ for name, vol_type in drives.iteritems():
+ drive = vol_type.get('extra_specs')
+ print format_str %\
+ (str(vol_type['id']),
+ drive['drive_name'],
+ drive['drive_type'],
+ drive['drive_size'],
+ drive['drive_rpm'],
+ drive.get('capabilities', ''),
+ str(drive.get('visible', '')),
+ str(vol_type['created_at']))
+
+ @args('--type', dest='type', metavar="<type>",
+ help='Drive type (SATA, SAS, SSD, etc.)')
+ @args('--size', dest='size_gb', metavar="<gb>", help='Drive size in GB')
+ @args('--rpm', dest='rpm', metavar="<rpm>", help='RPM')
+ @args('--capabilities', dest='capabilities', default=None,
+ metavar="<string>", help='Different capabilities')
+ @args('--hide', dest='hide', action="store_true", default=False,
+ help='Show or hide drive')
+ @args('--name', dest='name', metavar="<name>", help='Drive name')
+ def create(self, type, size_gb, rpm, capabilities=None,
+ hide=False, name=None):
+ """Create drive type."""
+
+ hide = True if hide in [True, "True", "--hide", "hide"] else False
+
+ if name is None:
+ name = self._drive_type_template % (type, size_gb, rpm)
+
+ extra_specs = {'type': 'vsa_drive',
+ 'drive_name': name,
+ 'drive_type': type,
+ 'drive_size': size_gb,
+ 'drive_rpm': rpm,
+ 'visible': True,
+ }
+ if hide:
+ extra_specs['visible'] = False
+
+ if capabilities is not None and capabilities != '':
+ extra_specs['capabilities'] = capabilities
+
+ volume_types.create(self.context, name, extra_specs)
+ result = volume_types.get_volume_type_by_name(self.context, name)
+ self._list({name: result})
+
+ @args('--name', dest='name', metavar="<name>", help='Drive name')
+ @args('--purge', action="store_true", dest='purge', default=False,
+ help='purge record from database')
+ def delete(self, name, purge):
+ """Marks instance types / flavors as deleted"""
+ try:
+ if purge:
+ volume_types.purge(self.context, name)
+ verb = "purged"
+ else:
+ volume_types.destroy(self.context, name)
+ verb = "deleted"
+ except exception.ApiError:
+ print "Valid volume type name is required"
+ sys.exit(1)
+ except exception.DBError, e:
+ print "DB Error: %s" % e
+ sys.exit(2)
+ except:
+ sys.exit(3)
+ else:
+ print "%s %s" % (name, verb)
+
+ @args('--all', dest='all', action="store_true", default=False,
+ help='Show all drives (including invisible)')
+ @args('--name', dest='name', metavar="<name>",
+ help='Show only specified drive')
+ def list(self, all=False, name=None):
+ """Describe all available VSA drive types (or particular one)."""
+
+ all = False if all in ["--all", False, "False"] else True
+
+ search_opts = {'extra_specs': {'type': 'vsa_drive'}}
+ if name is not None:
+ search_opts['extra_specs']['name'] = name
+
+ if all == False:
+ search_opts['extra_specs']['visible'] = '1'
+
+ drives = volume_types.get_all_types(self.context,
+ search_opts=search_opts)
+ self._list(drives)
+
+ @args('--name', dest='name', metavar="<name>", help='Drive name')
+ @args('--type', dest='type', metavar="<type>",
+ help='Drive type (SATA, SAS, SSD, etc.)')
+ @args('--size', dest='size_gb', metavar="<gb>", help='Drive size in GB')
+ @args('--rpm', dest='rpm', metavar="<rpm>", help='RPM')
+ @args('--capabilities', dest='capabilities', default=None,
+ metavar="<string>", help='Different capabilities')
+ @args('--visible', dest='visible',
+ metavar="<show|hide>", help='Show or hide drive')
+ def update(self, name, type=None, size_gb=None, rpm=None,
+ capabilities=None, visible=None):
+ """Update drive type."""
+
+ volume_type = volume_types.get_volume_type_by_name(self.context, name)
+
+ extra_specs = {'type': 'vsa_drive'}
+
+ if type:
+ extra_specs['drive_type'] = type
+
+ if size_gb:
+ extra_specs['drive_size'] = size_gb
+
+ if rpm:
+ extra_specs['drive_rpm'] = rpm
+
+ if capabilities:
+ extra_specs['capabilities'] = capabilities
+
+ if visible is not None:
+ if visible in ["show", True, "True"]:
+ extra_specs['visible'] = True
+ elif visible in ["hide", False, "False"]:
+ extra_specs['visible'] = False
+ else:
+ raise ValueError(_('visible parameter should be set to '\
+ 'show or hide'))
+
+ db.api.volume_type_extra_specs_update_or_create(self.context,
+ volume_type['id'],
+ extra_specs)
+ result = volume_types.get_volume_type_by_name(self.context, name)
+ self._list({name: result})
+
+
class VolumeCommands(object):
"""Methods for dealing with a cloud in an odd state"""
@@ -1483,6 +1957,7 @@ CATEGORIES = [
('agent', AgentBuildCommands),
('config', ConfigCommands),
('db', DbCommands),
+ ('drive', VsaDriveTypeCommands),
('fixed', FixedIpCommands),
('flavor', InstanceTypeCommands),
('floating', FloatingIpCommands),
@@ -1498,7 +1973,8 @@ CATEGORIES = [
('version', VersionCommands),
('vm', VmCommands),
('volume', VolumeCommands),
- ('vpn', VpnCommands)]
+ ('vpn', VpnCommands),
+ ('vsa', VsaCommands)]
def lazy_match(name, key_value_tuples):
diff --git a/bin/nova-vsa b/bin/nova-vsa
new file mode 100755
index 000000000..2d6eee2c0
--- /dev/null
+++ b/bin/nova-vsa
@@ -0,0 +1,49 @@
+#!/usr/bin/env python
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright (c) 2011 Zadara Storage Inc.
+# Copyright (c) 2011 OpenStack LLC.
+#
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""Starter script for Nova VSA."""
+
+import eventlet
+eventlet.monkey_patch()
+
+import os
+import sys
+
+# If ../nova/__init__.py exists, add ../ to Python search path, so that
+# it will override what happens to be installed in /usr/(local/)lib/python...
+possible_topdir = os.path.normpath(os.path.join(os.path.abspath(sys.argv[0]),
+ os.pardir,
+ os.pardir))
+if os.path.exists(os.path.join(possible_topdir, 'nova', '__init__.py')):
+ sys.path.insert(0, possible_topdir)
+
+
+from nova import flags
+from nova import log as logging
+from nova import service
+from nova import utils
+
+if __name__ == '__main__':
+ utils.default_flagfile()
+ flags.FLAGS(sys.argv)
+ logging.setup()
+ utils.monkey_patch()
+ server = service.Service.create(binary='nova-vsa')
+ service.serve(server)
+ service.wait()
diff --git a/nova/api/openstack/contrib/virtual_storage_arrays.py b/nova/api/openstack/contrib/virtual_storage_arrays.py
new file mode 100644
index 000000000..e09736a28
--- /dev/null
+++ b/nova/api/openstack/contrib/virtual_storage_arrays.py
@@ -0,0 +1,606 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright (c) 2011 Zadara Storage Inc.
+# Copyright (c) 2011 OpenStack LLC.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+""" The virtul storage array extension"""
+
+
+from webob import exc
+
+from nova import vsa
+from nova import volume
+from nova import compute
+from nova import network
+from nova import db
+from nova import quota
+from nova import exception
+from nova import log as logging
+from nova.api.openstack import common
+from nova.api.openstack import extensions
+from nova.api.openstack import faults
+from nova.api.openstack import wsgi
+from nova.api.openstack import servers
+from nova.api.openstack.contrib import volumes
+from nova.compute import instance_types
+
+from nova import flags
+FLAGS = flags.FLAGS
+
+LOG = logging.getLogger("nova.api.vsa")
+
+
+def _vsa_view(context, vsa, details=False, instances=None):
+ """Map keys for vsa summary/detailed view."""
+ d = {}
+
+ d['id'] = vsa.get('id')
+ d['name'] = vsa.get('name')
+ d['displayName'] = vsa.get('display_name')
+ d['displayDescription'] = vsa.get('display_description')
+
+ d['createTime'] = vsa.get('created_at')
+ d['status'] = vsa.get('status')
+
+ if 'vsa_instance_type' in vsa:
+ d['vcType'] = vsa['vsa_instance_type'].get('name', None)
+ else:
+ d['vcType'] = vsa['instance_type_id']
+
+ d['vcCount'] = vsa.get('vc_count')
+ d['driveCount'] = vsa.get('vol_count')
+
+ d['ipAddress'] = None
+ for instance in instances:
+ fixed_addr = None
+ floating_addr = None
+ if instance['fixed_ips']:
+ fixed = instance['fixed_ips'][0]
+ fixed_addr = fixed['address']
+ if fixed['floating_ips']:
+ floating_addr = fixed['floating_ips'][0]['address']
+
+ if floating_addr:
+ d['ipAddress'] = floating_addr
+ break
+ else:
+ d['ipAddress'] = d['ipAddress'] or fixed_addr
+
+ return d
+
+
+class VsaController(object):
+ """The Virtual Storage Array API controller for the OpenStack API."""
+
+ _serialization_metadata = {
+ 'application/xml': {
+ "attributes": {
+ "vsa": [
+ "id",
+ "name",
+ "displayName",
+ "displayDescription",
+ "createTime",
+ "status",
+ "vcType",
+ "vcCount",
+ "driveCount",
+ "ipAddress",
+ ]}}}
+
+ def __init__(self):
+ self.vsa_api = vsa.API()
+ self.compute_api = compute.API()
+ self.network_api = network.API()
+ super(VsaController, self).__init__()
+
+ def _get_instances_by_vsa_id(self, context, id):
+ return self.compute_api.get_all(context,
+ search_opts={'metadata': dict(vsa_id=str(id))})
+
+ def _items(self, req, details):
+ """Return summary or detailed list of VSAs."""
+ context = req.environ['nova.context']
+ vsas = self.vsa_api.get_all(context)
+ limited_list = common.limited(vsas, req)
+
+ vsa_list = []
+ for vsa in limited_list:
+ instances = self._get_instances_by_vsa_id(context, vsa.get('id'))
+ vsa_list.append(_vsa_view(context, vsa, details, instances))
+ return {'vsaSet': vsa_list}
+
+ def index(self, req):
+ """Return a short list of VSAs."""
+ return self._items(req, details=False)
+
+ def detail(self, req):
+ """Return a detailed list of VSAs."""
+ return self._items(req, details=True)
+
+ def show(self, req, id):
+ """Return data about the given VSA."""
+ context = req.environ['nova.context']
+
+ try:
+ vsa = self.vsa_api.get(context, vsa_id=id)
+ except exception.NotFound:
+ return faults.Fault(exc.HTTPNotFound())
+
+ instances = self._get_instances_by_vsa_id(context, vsa.get('id'))
+ return {'vsa': _vsa_view(context, vsa, True, instances)}
+
+ def create(self, req, body):
+ """Create a new VSA."""
+ context = req.environ['nova.context']
+
+ if not body or 'vsa' not in body:
+ LOG.debug(_("No body provided"), context=context)
+ return faults.Fault(exc.HTTPUnprocessableEntity())
+
+ vsa = body['vsa']
+
+ display_name = vsa.get('displayName')
+ vc_type = vsa.get('vcType', FLAGS.default_vsa_instance_type)
+ try:
+ instance_type = instance_types.get_instance_type_by_name(vc_type)
+ except exception.NotFound:
+ return faults.Fault(exc.HTTPNotFound())
+
+ LOG.audit(_("Create VSA %(display_name)s of type %(vc_type)s"),
+ locals(), context=context)
+
+ args = dict(display_name=display_name,
+ display_description=vsa.get('displayDescription'),
+ instance_type=instance_type,
+ storage=vsa.get('storage'),
+ shared=vsa.get('shared'),
+ availability_zone=vsa.get('placement', {}).\
+ get('AvailabilityZone'))
+
+ vsa = self.vsa_api.create(context, **args)
+
+ instances = self._get_instances_by_vsa_id(context, vsa.get('id'))
+ return {'vsa': _vsa_view(context, vsa, True, instances)}
+
+ def delete(self, req, id):
+ """Delete a VSA."""
+ context = req.environ['nova.context']
+
+ LOG.audit(_("Delete VSA with id: %s"), id, context=context)
+
+ try:
+ self.vsa_api.delete(context, vsa_id=id)
+ except exception.NotFound:
+ return faults.Fault(exc.HTTPNotFound())
+
+ def associate_address(self, req, id, body):
+ """ /zadr-vsa/{vsa_id}/associate_address
+ auto or manually associate an IP to VSA
+ """
+ context = req.environ['nova.context']
+
+ if body is None:
+ ip = 'auto'
+ else:
+ ip = body.get('ipAddress', 'auto')
+
+ LOG.audit(_("Associate address %(ip)s to VSA %(id)s"),
+ locals(), context=context)
+
+ try:
+ instances = self._get_instances_by_vsa_id(context, id)
+ if instances is None or len(instances) == 0:
+ return faults.Fault(exc.HTTPNotFound())
+
+ for instance in instances:
+ self.network_api.allocate_for_instance(context, instance,
+ vpn=False)
+ # Placeholder
+ return
+
+ except exception.NotFound:
+ return faults.Fault(exc.HTTPNotFound())
+
+ def disassociate_address(self, req, id, body):
+ """ /zadr-vsa/{vsa_id}/disassociate_address
+ auto or manually associate an IP to VSA
+ """
+ context = req.environ['nova.context']
+
+ if body is None:
+ ip = 'auto'
+ else:
+ ip = body.get('ipAddress', 'auto')
+
+ LOG.audit(_("Disassociate address from VSA %(id)s"),
+ locals(), context=context)
+ # Placeholder
+
+
+class VsaVolumeDriveController(volumes.VolumeController):
+ """The base class for VSA volumes & drives.
+
+ A child resource of the VSA object. Allows operations with
+ volumes and drives created to/from particular VSA
+
+ """
+
+ _serialization_metadata = {
+ 'application/xml': {
+ "attributes": {
+ "volume": [
+ "id",
+ "name",
+ "status",
+ "size",
+ "availabilityZone",
+ "createdAt",
+ "displayName",
+ "displayDescription",
+ "vsaId",
+ ]}}}
+
+ def __init__(self):
+ self.volume_api = volume.API()
+ self.vsa_api = vsa.API()
+ super(VsaVolumeDriveController, self).__init__()
+
+ def _translation(self, context, vol, vsa_id, details):
+ if details:
+ translation = volumes._translate_volume_detail_view
+ else:
+ translation = volumes._translate_volume_summary_view
+
+ d = translation(context, vol)
+ d['vsaId'] = vsa_id
+ d['name'] = vol['name']
+ return d
+
+ def _check_volume_ownership(self, context, vsa_id, id):
+ obj = self.object
+ try:
+ volume_ref = self.volume_api.get(context, volume_id=id)
+ except exception.NotFound:
+ LOG.error(_("%(obj)s with ID %(id)s not found"), locals())
+ raise
+
+ own_vsa_id = self.volume_api.get_volume_metadata_value(volume_ref,
+ self.direction)
+ if own_vsa_id != vsa_id:
+ LOG.error(_("%(obj)s with ID %(id)s belongs to VSA %(own_vsa_id)s"\
+ " and not to VSA %(vsa_id)s."), locals())
+ raise exception.Invalid()
+
+ def _items(self, req, vsa_id, details):
+ """Return summary or detailed list of volumes for particular VSA."""
+ context = req.environ['nova.context']
+
+ vols = self.volume_api.get_all(context,
+ search_opts={'metadata': {self.direction: str(vsa_id)}})
+ limited_list = common.limited(vols, req)
+
+ res = [self._translation(context, vol, vsa_id, details) \
+ for vol in limited_list]
+
+ return {self.objects: res}
+
+ def index(self, req, vsa_id):
+ """Return a short list of volumes created from particular VSA."""
+ LOG.audit(_("Index. vsa_id=%(vsa_id)s"), locals())
+ return self._items(req, vsa_id, details=False)
+
+ def detail(self, req, vsa_id):
+ """Return a detailed list of volumes created from particular VSA."""
+ LOG.audit(_("Detail. vsa_id=%(vsa_id)s"), locals())
+ return self._items(req, vsa_id, details=True)
+
+ def create(self, req, vsa_id, body):
+ """Create a new volume from VSA."""
+ LOG.audit(_("Create. vsa_id=%(vsa_id)s, body=%(body)s"), locals())
+ context = req.environ['nova.context']
+
+ if not body:
+ return faults.Fault(exc.HTTPUnprocessableEntity())
+
+ vol = body[self.object]
+ size = vol['size']
+ LOG.audit(_("Create volume of %(size)s GB from VSA ID %(vsa_id)s"),
+ locals(), context=context)
+ try:
+ # create is supported for volumes only (drives created through VSA)
+ volume_type = self.vsa_api.get_vsa_volume_type(context)
+ except exception.NotFound:
+ return faults.Fault(exc.HTTPNotFound())
+
+ new_volume = self.volume_api.create(context,
+ size,
+ None,
+ vol.get('displayName'),
+ vol.get('displayDescription'),
+ volume_type=volume_type,
+ metadata=dict(from_vsa_id=str(vsa_id)))
+
+ return {self.object: self._translation(context, new_volume,
+ vsa_id, True)}
+
+ def update(self, req, vsa_id, id, body):
+ """Update a volume."""
+ context = req.environ['nova.context']
+
+ try:
+ self._check_volume_ownership(context, vsa_id, id)
+ except exception.NotFound:
+ return faults.Fault(exc.HTTPNotFound())
+ except exception.Invalid:
+ return faults.Fault(exc.HTTPBadRequest())
+
+ vol = body[self.object]
+ updatable_fields = [{'displayName': 'display_name'},
+ {'displayDescription': 'display_description'},
+ {'status': 'status'},
+ {'providerLocation': 'provider_location'},
+ {'providerAuth': 'provider_auth'}]
+ changes = {}
+ for field in updatable_fields:
+ key = field.keys()[0]
+ val = field[key]
+ if key in vol:
+ changes[val] = vol[key]
+
+ obj = self.object
+ LOG.audit(_("Update %(obj)s with id: %(id)s, changes: %(changes)s"),
+ locals(), context=context)
+
+ try:
+ self.volume_api.update(context, volume_id=id, fields=changes)
+ except exception.NotFound:
+ return faults.Fault(exc.HTTPNotFound())
+ return exc.HTTPAccepted()
+
+ def delete(self, req, vsa_id, id):
+ """Delete a volume."""
+ context = req.environ['nova.context']
+
+ LOG.audit(_("Delete. vsa_id=%(vsa_id)s, id=%(id)s"), locals())
+
+ try:
+ self._check_volume_ownership(context, vsa_id, id)
+ except exception.NotFound:
+ return faults.Fault(exc.HTTPNotFound())
+ except exception.Invalid:
+ return faults.Fault(exc.HTTPBadRequest())
+
+ return super(VsaVolumeDriveController, self).delete(req, id)
+
+ def show(self, req, vsa_id, id):
+ """Return data about the given volume."""
+ context = req.environ['nova.context']
+
+ LOG.audit(_("Show. vsa_id=%(vsa_id)s, id=%(id)s"), locals())
+
+ try:
+ self._check_volume_ownership(context, vsa_id, id)
+ except exception.NotFound:
+ return faults.Fault(exc.HTTPNotFound())
+ except exception.Invalid:
+ return faults.Fault(exc.HTTPBadRequest())
+
+ return super(VsaVolumeDriveController, self).show(req, id)
+
+
+class VsaVolumeController(VsaVolumeDriveController):
+ """The VSA volume API controller for the Openstack API.
+
+ A child resource of the VSA object. Allows operations with volumes created
+ by particular VSA
+
+ """
+
+ def __init__(self):
+ self.direction = 'from_vsa_id'
+ self.objects = 'volumes'
+ self.object = 'volume'
+ super(VsaVolumeController, self).__init__()
+
+
+class VsaDriveController(VsaVolumeDriveController):
+ """The VSA Drive API controller for the Openstack API.
+
+ A child resource of the VSA object. Allows operations with drives created
+ for particular VSA
+
+ """
+
+ def __init__(self):
+ self.direction = 'to_vsa_id'
+ self.objects = 'drives'
+ self.object = 'drive'
+ super(VsaDriveController, self).__init__()
+
+ def create(self, req, vsa_id, body):
+ """Create a new drive for VSA. Should be done through VSA APIs"""
+ return faults.Fault(exc.HTTPBadRequest())
+
+ def update(self, req, vsa_id, id, body):
+ """Update a drive. Should be done through VSA APIs"""
+ return faults.Fault(exc.HTTPBadRequest())
+
+ def delete(self, req, vsa_id, id):
+ """Delete a volume. Should be done through VSA APIs"""
+ return faults.Fault(exc.HTTPBadRequest())
+
+
+class VsaVPoolController(object):
+ """The vPool VSA API controller for the OpenStack API."""
+
+ _serialization_metadata = {
+ 'application/xml': {
+ "attributes": {
+ "vpool": [
+ "id",
+ "vsaId",
+ "name",
+ "displayName",
+ "displayDescription",
+ "driveCount",
+ "driveIds",
+ "protection",
+ "stripeSize",
+ "stripeWidth",
+ "createTime",
+ "status",
+ ]}}}
+
+ def __init__(self):
+ self.vsa_api = vsa.API()
+ super(VsaVPoolController, self).__init__()
+
+ def index(self, req, vsa_id):
+ """Return a short list of vpools created from particular VSA."""
+ return {'vpools': []}
+
+ def create(self, req, vsa_id, body):
+ """Create a new vPool for VSA."""
+ return faults.Fault(exc.HTTPBadRequest())
+
+ def update(self, req, vsa_id, id, body):
+ """Update vPool parameters."""
+ return faults.Fault(exc.HTTPBadRequest())
+
+ def delete(self, req, vsa_id, id):
+ """Delete a vPool."""
+ return faults.Fault(exc.HTTPBadRequest())
+
+ def show(self, req, vsa_id, id):
+ """Return data about the given vPool."""
+ return faults.Fault(exc.HTTPBadRequest())
+
+
+class VsaVCController(servers.ControllerV11):
+ """The VSA Virtual Controller API controller for the OpenStack API."""
+
+ def __init__(self):
+ self.vsa_api = vsa.API()
+ self.compute_api = compute.API()
+ self.vsa_id = None # VP-TODO: temporary ugly hack
+ super(VsaVCController, self).__init__()
+
+ def _get_servers(self, req, is_detail):
+ """Returns a list of servers, taking into account any search
+ options specified.
+ """
+
+ if self.vsa_id is None:
+ super(VsaVCController, self)._get_servers(req, is_detail)
+
+ context = req.environ['nova.context']
+
+ search_opts = {'metadata': dict(vsa_id=str(self.vsa_id))}
+ instance_list = self.compute_api.get_all(
+ context, search_opts=search_opts)
+
+ limited_list = self._limit_items(instance_list, req)
+ servers = [self._build_view(req, inst, is_detail)['server']
+ for inst in limited_list]
+ return dict(servers=servers)
+
+ def index(self, req, vsa_id):
+ """Return list of instances for particular VSA."""
+
+ LOG.audit(_("Index instances for VSA %s"), vsa_id)
+
+ self.vsa_id = vsa_id # VP-TODO: temporary ugly hack
+ result = super(VsaVCController, self).detail(req)
+ self.vsa_id = None
+ return result
+
+ def create(self, req, vsa_id, body):
+ """Create a new instance for VSA."""
+ return faults.Fault(exc.HTTPBadRequest())
+
+ def update(self, req, vsa_id, id, body):
+ """Update VSA instance."""
+ return faults.Fault(exc.HTTPBadRequest())
+
+ def delete(self, req, vsa_id, id):
+ """Delete VSA instance."""
+ return faults.Fault(exc.HTTPBadRequest())
+
+ def show(self, req, vsa_id, id):
+ """Return data about the given instance."""
+ return super(VsaVCController, self).show(req, id)
+
+
+class Virtual_storage_arrays(extensions.ExtensionDescriptor):
+
+ def get_name(self):
+ return "VSAs"
+
+ def get_alias(self):
+ return "zadr-vsa"
+
+ def get_description(self):
+ return "Virtual Storage Arrays support"
+
+ def get_namespace(self):
+ return "http://docs.openstack.org/ext/vsa/api/v1.1"
+
+ def get_updated(self):
+ return "2011-08-25T00:00:00+00:00"
+
+ def get_resources(self):
+ resources = []
+ res = extensions.ResourceExtension(
+ 'zadr-vsa',
+ VsaController(),
+ collection_actions={'detail': 'GET'},
+ member_actions={'add_capacity': 'POST',
+ 'remove_capacity': 'POST',
+ 'associate_address': 'POST',
+ 'disassociate_address': 'POST'})
+ resources.append(res)
+
+ res = extensions.ResourceExtension('volumes',
+ VsaVolumeController(),
+ collection_actions={'detail': 'GET'},
+ parent=dict(
+ member_name='vsa',
+ collection_name='zadr-vsa'))
+ resources.append(res)
+
+ res = extensions.ResourceExtension('drives',
+ VsaDriveController(),
+ collection_actions={'detail': 'GET'},
+ parent=dict(
+ member_name='vsa',
+ collection_name='zadr-vsa'))
+ resources.append(res)
+
+ res = extensions.ResourceExtension('vpools',
+ VsaVPoolController(),
+ parent=dict(
+ member_name='vsa',
+ collection_name='zadr-vsa'))
+ resources.append(res)
+
+ res = extensions.ResourceExtension('instances',
+ VsaVCController(),
+ parent=dict(
+ member_name='vsa',
+ collection_name='zadr-vsa'))
+ resources.append(res)
+
+ return resources
diff --git a/nova/db/api.py b/nova/db/api.py
index 3bb9b4970..a2e581fe9 100644
--- a/nova/db/api.py
+++ b/nova/db/api.py
@@ -49,7 +49,8 @@ flags.DEFINE_string('volume_name_template', 'volume-%08x',
'Template string to be used to generate instance names')
flags.DEFINE_string('snapshot_name_template', 'snapshot-%08x',
'Template string to be used to generate snapshot names')
-
+flags.DEFINE_string('vsa_name_template', 'vsa-%08x',
+ 'Template string to be used to generate VSA names')
IMPL = utils.LazyPluggable(FLAGS['db_backend'],
sqlalchemy='nova.db.sqlalchemy.api')
@@ -1512,3 +1513,36 @@ def volume_type_extra_specs_update_or_create(context, volume_type_id,
key/value pairs specified in the extra specs dict argument"""
IMPL.volume_type_extra_specs_update_or_create(context, volume_type_id,
extra_specs)
+
+
+####################
+
+
+def vsa_create(context, values):
+ """Creates Virtual Storage Array record."""
+ return IMPL.vsa_create(context, values)
+
+
+def vsa_update(context, vsa_id, values):
+ """Updates Virtual Storage Array record."""
+ return IMPL.vsa_update(context, vsa_id, values)
+
+
+def vsa_destroy(context, vsa_id):
+ """Deletes Virtual Storage Array record."""
+ return IMPL.vsa_destroy(context, vsa_id)
+
+
+def vsa_get(context, vsa_id):
+ """Get Virtual Storage Array record by ID."""
+ return IMPL.vsa_get(context, vsa_id)
+
+
+def vsa_get_all(context):
+ """Get all Virtual Storage Array records."""
+ return IMPL.vsa_get_all(context)
+
+
+def vsa_get_all_by_project(context, project_id):
+ """Get all Virtual Storage Array records by project ID."""
+ return IMPL.vsa_get_all_by_project(context, project_id)
diff --git a/nova/db/sqlalchemy/api.py b/nova/db/sqlalchemy/api.py
index 7b78e286d..24e1772f6 100644
--- a/nova/db/sqlalchemy/api.py
+++ b/nova/db/sqlalchemy/api.py
@@ -3825,3 +3825,105 @@ def volume_type_extra_specs_update_or_create(context, volume_type_id,
"deleted": 0})
spec_ref.save(session=session)
return specs
+
+
+ ####################
+
+
+@require_admin_context
+def vsa_create(context, values):
+ """
+ Creates Virtual Storage Array record.
+ """
+ try:
+ vsa_ref = models.VirtualStorageArray()
+ vsa_ref.update(values)
+ vsa_ref.save()
+ except Exception, e:
+ raise exception.DBError(e)
+ return vsa_ref
+
+
+@require_admin_context
+def vsa_update(context, vsa_id, values):
+ """
+ Updates Virtual Storage Array record.
+ """
+ session = get_session()
+ with session.begin():
+ vsa_ref = vsa_get(context, vsa_id, session=session)
+ vsa_ref.update(values)
+ vsa_ref.save(session=session)
+ return vsa_ref
+
+
+@require_admin_context
+def vsa_destroy(context, vsa_id):
+ """
+ Deletes Virtual Storage Array record.
+ """
+ session = get_session()
+ with session.begin():
+ session.query(models.VirtualStorageArray).\
+ filter_by(id=vsa_id).\
+ update({'deleted': True,
+ 'deleted_at': utils.utcnow(),
+ 'updated_at': literal_column('updated_at')})
+
+
+@require_context
+def vsa_get(context, vsa_id, session=None):
+ """
+ Get Virtual Storage Array record by ID.
+ """
+ if not session:
+ session = get_session()
+ result = None
+
+ if is_admin_context(context):
+ result = session.query(models.VirtualStorageArray).\
+ options(joinedload('vsa_instance_type')).\
+ filter_by(id=vsa_id).\
+ filter_by(deleted=can_read_deleted(context)).\
+ first()
+ elif is_user_context(context):
+ result = session.query(models.VirtualStorageArray).\
+ options(joinedload('vsa_instance_type')).\
+ filter_by(project_id=context.project_id).\
+ filter_by(id=vsa_id).\
+ filter_by(deleted=False).\
+ first()
+ if not result:
+ raise exception.VirtualStorageArrayNotFound(id=vsa_id)
+
+ return result
+
+
+@require_admin_context
+def vsa_get_all(context):
+ """
+ Get all Virtual Storage Array records.
+ """
+ session = get_session()
+ return session.query(models.VirtualStorageArray).\
+ options(joinedload('vsa_instance_type')).\
+ filter_by(deleted=can_read_deleted(context)).\
+ all()
+
+
+@require_context
+def vsa_get_all_by_project(context, project_id):
+ """
+ Get all Virtual Storage Array records by project ID.
+ """
+ authorize_project_context(context, project_id)
+
+ session = get_session()
+ return session.query(models.VirtualStorageArray).\
+ options(joinedload('vsa_instance_type')).\
+ filter_by(project_id=project_id).\
+ filter_by(deleted=can_read_deleted(context)).\
+ all()
+
+
+ ####################
diff --git a/nova/db/sqlalchemy/migrate_repo/versions/043_add_vsa_data.py b/nova/db/sqlalchemy/migrate_repo/versions/043_add_vsa_data.py
new file mode 100644
index 000000000..844643704
--- /dev/null
+++ b/nova/db/sqlalchemy/migrate_repo/versions/043_add_vsa_data.py
@@ -0,0 +1,75 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright (c) 2011 Zadara Storage Inc.
+# Copyright (c) 2011 OpenStack LLC.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from sqlalchemy import Column, DateTime, Integer, MetaData, String, Table
+from sqlalchemy import Text, Boolean, ForeignKey
+
+from nova import log as logging
+
+meta = MetaData()
+
+#
+# New Tables
+#
+
+virtual_storage_arrays = Table('virtual_storage_arrays', meta,
+ Column('created_at', DateTime(timezone=False)),
+ Column('updated_at', DateTime(timezone=False)),
+ Column('deleted_at', DateTime(timezone=False)),
+ Column('deleted', Boolean(create_constraint=True, name=None)),
+ Column('id', Integer(), primary_key=True, nullable=False),
+ Column('display_name',
+ String(length=255, convert_unicode=False, assert_unicode=None,
+ unicode_error=None, _warn_on_bytestring=False)),
+ Column('display_description',
+ String(length=255, convert_unicode=False, assert_unicode=None,
+ unicode_error=None, _warn_on_bytestring=False)),
+ Column('project_id',
+ String(length=255, convert_unicode=False, assert_unicode=None,
+ unicode_error=None, _warn_on_bytestring=False)),
+ Column('availability_zone',
+ String(length=255, convert_unicode=False, assert_unicode=None,
+ unicode_error=None, _warn_on_bytestring=False)),
+ Column('instance_type_id', Integer(), nullable=False),
+ Column('image_ref',
+ String(length=255, convert_unicode=False, assert_unicode=None,
+ unicode_error=None, _warn_on_bytestring=False)),
+ Column('vc_count', Integer(), nullable=False),
+ Column('vol_count', Integer(), nullable=False),
+ Column('status',
+ String(length=255, convert_unicode=False, assert_unicode=None,
+ unicode_error=None, _warn_on_bytestring=False)),
+ )
+
+
+def upgrade(migrate_engine):
+ # Upgrade operations go here. Don't create your own engine;
+ # bind migrate_engine to your metadata
+ meta.bind = migrate_engine
+
+ try:
+ virtual_storage_arrays.create()
+ except Exception:
+ logging.info(repr(table))
+ logging.exception('Exception while creating table')
+ raise
+
+
+def downgrade(migrate_engine):
+ meta.bind = migrate_engine
+
+ virtual_storage_arrays.drop()
diff --git a/nova/db/sqlalchemy/migration.py b/nova/db/sqlalchemy/migration.py
index 765deb479..bb05986c9 100644
--- a/nova/db/sqlalchemy/migration.py
+++ b/nova/db/sqlalchemy/migration.py
@@ -64,6 +64,7 @@ def db_version():
'users', 'user_project_association',
'user_project_role_association',
'user_role_association',
+ 'virtual_storage_arrays',
'volumes', 'volume_metadata',
'volume_types', 'volume_type_extra_specs'):
assert table in meta.tables
diff --git a/nova/db/sqlalchemy/models.py b/nova/db/sqlalchemy/models.py
index 1533cfb2c..854034f12 100644
--- a/nova/db/sqlalchemy/models.py
+++ b/nova/db/sqlalchemy/models.py
@@ -240,6 +240,32 @@ class Instance(BASE, NovaBase):
access_ip_v6 = Column(String(255))
+class VirtualStorageArray(BASE, NovaBase):
+ """
+ Represents a virtual storage array supplying block storage to instances.
+ """
+ __tablename__ = 'virtual_storage_arrays'
+
+ id = Column(Integer, primary_key=True, autoincrement=True)
+
+ @property
+ def name(self):
+ return FLAGS.vsa_name_template % self.id
+
+ # User editable field for display in user-facing UIs
+ display_name = Column(String(255))
+ display_description = Column(String(255))
+
+ project_id = Column(String(255))
+ availability_zone = Column(String(255))
+
+ instance_type_id = Column(Integer, ForeignKey('instance_types.id'))
+ image_ref = Column(String(255))
+ vc_count = Column(Integer, default=0) # number of requested VC instances
+ vol_count = Column(Integer, default=0) # total number of BE volumes
+ status = Column(String(255))
+
+
class InstanceActions(BASE, NovaBase):
"""Represents a guest VM's actions and results"""
__tablename__ = "instance_actions"
@@ -269,6 +295,12 @@ class InstanceTypes(BASE, NovaBase):
primaryjoin='and_(Instance.instance_type_id == '
'InstanceTypes.id)')
+ vsas = relationship(VirtualStorageArray,
+ backref=backref('vsa_instance_type', uselist=False),
+ foreign_keys=id,
+ primaryjoin='and_(VirtualStorageArray.instance_type_id'
+ ' == InstanceTypes.id)')
+
class Volume(BASE, NovaBase):
"""Represents a block storage device that can be attached to a vm."""
@@ -838,7 +870,8 @@ def register_models():
SecurityGroupInstanceAssociation, AuthToken, User,
Project, Certificate, ConsolePool, Console, Zone,
VolumeMetadata, VolumeTypes, VolumeTypeExtraSpecs,
- AgentBuild, InstanceMetadata, InstanceTypeExtraSpecs, Migration)
+ AgentBuild, InstanceMetadata, InstanceTypeExtraSpecs, Migration,
+ VirtualStorageArray)
engine = create_engine(FLAGS.sql_connection, echo=False)
for model in models:
model.metadata.create_all(engine)
diff --git a/nova/exception.py b/nova/exception.py
index 111dcfd6d..a53891955 100644
--- a/nova/exception.py
+++ b/nova/exception.py
@@ -783,6 +783,18 @@ class PasteAppNotFound(NotFound):
message = _("Could not load paste app '%(name)s' from %(path)s")
+class VSANovaAccessParamNotFound(Invalid):
+ message = _("Nova access parameters were not specified.")
+
+
+class VirtualStorageArrayNotFound(NotFound):
+ message = _("Virtual Storage Array %(id)d could not be found.")
+
+
+class VirtualStorageArrayNotFoundByName(NotFound):
+ message = _("Virtual Storage Array %(name)s could not be found.")
+
+
class CannotResizeToSameSize(NovaException):
message = _("When resizing, instances must change size!")
diff --git a/nova/flags.py b/nova/flags.py
index 95000df1b..a5951ebc8 100644
--- a/nova/flags.py
+++ b/nova/flags.py
@@ -292,6 +292,7 @@ DEFINE_string('ajax_console_proxy_url',
in the form "http://127.0.0.1:8000"')
DEFINE_string('ajax_console_proxy_port',
8000, 'port that ajax_console_proxy binds')
+DEFINE_string('vsa_topic', 'vsa', 'the topic that nova-vsa service listens on')
DEFINE_bool('verbose', False, 'show debug output')
DEFINE_boolean('fake_rabbit', False, 'use a fake rabbit')
DEFINE_bool('fake_network', False,
@@ -371,6 +372,17 @@ DEFINE_string('volume_manager', 'nova.volume.manager.VolumeManager',
'Manager for volume')
DEFINE_string('scheduler_manager', 'nova.scheduler.manager.SchedulerManager',
'Manager for scheduler')
+DEFINE_string('vsa_manager', 'nova.vsa.manager.VsaManager',
+ 'Manager for vsa')
+DEFINE_string('vc_image_name', 'vc_image',
+ 'the VC image ID (for a VC image that exists in DB Glance)')
+# VSA constants and enums
+DEFINE_string('default_vsa_instance_type', 'm1.small',
+ 'default instance type for VSA instances')
+DEFINE_integer('max_vcs_in_vsa', 32,
+ 'maxinum VCs in a VSA')
+DEFINE_integer('vsa_part_size_gb', 100,
+ 'default partition size for shared capacity')
# The service to use for image search and retrieval
DEFINE_string('image_service', 'nova.image.glance.GlanceImageService',
diff --git a/nova/ipv6/account_identifier.py b/nova/ipv6/account_identifier.py
index 27bb01988..8a08510ac 100644
--- a/nova/ipv6/account_identifier.py
+++ b/nova/ipv6/account_identifier.py
@@ -39,7 +39,8 @@ def to_global(prefix, mac, project_id):
except TypeError:
raise TypeError(_('Bad prefix for to_global_ipv6: %s') % prefix)
except NameError:
- raise TypeError(_('Bad project_id for to_global_ipv6: %s') % project_id)
+ raise TypeError(_('Bad project_id for to_global_ipv6: %s') %
+ project_id)
def to_mac(ipv6_address):
diff --git a/nova/network/manager.py b/nova/network/manager.py
index 404a3180e..b4605eea5 100644
--- a/nova/network/manager.py
+++ b/nova/network/manager.py
@@ -484,17 +484,17 @@ class NetworkManager(manager.SchedulerDependentManager):
# TODO(tr3buchet) eventually "enabled" should be determined
def ip_dict(ip):
return {
- "ip": ip,
- "netmask": network["netmask"],
- "enabled": "1"}
+ 'ip': ip,
+ 'netmask': network['netmask'],
+ 'enabled': '1'}
def ip6_dict():
return {
- "ip": ipv6.to_global(network['cidr_v6'],
+ 'ip': ipv6.to_global(network['cidr_v6'],
vif['address'],
network['project_id']),
- "netmask": network['netmask_v6'],
- "enabled": "1"}
+ 'netmask': network['netmask_v6'],
+ 'enabled': '1'}
network_dict = {
'bridge': network['bridge'],
'id': network['id'],
diff --git a/nova/quota.py b/nova/quota.py
index 48e598659..771477747 100644
--- a/nova/quota.py
+++ b/nova/quota.py
@@ -116,8 +116,9 @@ def allowed_volumes(context, requested_volumes, size):
allowed_gigabytes = _get_request_allotment(requested_gigabytes,
used_gigabytes,
quota['gigabytes'])
- allowed_volumes = min(allowed_volumes,
- int(allowed_gigabytes // size))
+ if size != 0:
+ allowed_volumes = min(allowed_volumes,
+ int(allowed_gigabytes // size))
return min(requested_volumes, allowed_volumes)
diff --git a/nova/scheduler/vsa.py b/nova/scheduler/vsa.py
new file mode 100644
index 000000000..6962dd86b
--- /dev/null
+++ b/nova/scheduler/vsa.py
@@ -0,0 +1,535 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright (c) 2011 Zadara Storage Inc.
+# Copyright (c) 2011 OpenStack LLC.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""
+VSA Simple Scheduler
+"""
+
+from nova import context
+from nova import db
+from nova import flags
+from nova import log as logging
+from nova import rpc
+from nova import utils
+from nova.scheduler import driver
+from nova.scheduler import simple
+from nova.vsa.api import VsaState
+from nova.volume import volume_types
+
+LOG = logging.getLogger('nova.scheduler.vsa')
+
+FLAGS = flags.FLAGS
+flags.DEFINE_integer('drive_type_approx_capacity_percent', 10,
+ 'The percentage range for capacity comparison')
+flags.DEFINE_integer('vsa_unique_hosts_per_alloc', 10,
+ 'The number of unique hosts per storage allocation')
+flags.DEFINE_boolean('vsa_select_unique_drives', True,
+ 'Allow selection of same host for multiple drives')
+
+
+def BYTES_TO_GB(bytes):
+ return bytes >> 30
+
+
+def GB_TO_BYTES(gb):
+ return gb << 30
+
+
+class VsaScheduler(simple.SimpleScheduler):
+ """Implements Scheduler for volume placement."""
+
+ def __init__(self, *args, **kwargs):
+ super(VsaScheduler, self).__init__(*args, **kwargs)
+ self._notify_all_volume_hosts("startup")
+
+ def _notify_all_volume_hosts(self, event):
+ rpc.fanout_cast(context.get_admin_context(),
+ FLAGS.volume_topic,
+ {"method": "notification",
+ "args": {"event": event}})
+
+ def _qosgrp_match(self, drive_type, qos_values):
+
+ def _compare_names(str1, str2):
+ return str1.lower() == str2.lower()
+
+ def _compare_sizes_approxim(cap_capacity, size):
+ cap_capacity = BYTES_TO_GB(int(cap_capacity))
+ size = int(size)
+ size_perc = size * \
+ FLAGS.drive_type_approx_capacity_percent / 100
+
+ return cap_capacity >= size - size_perc and \
+ cap_capacity <= size + size_perc
+
+ # Add more entries for additional comparisons
+ compare_list = [{'cap1': 'DriveType',
+ 'cap2': 'type',
+ 'cmp_func': _compare_names},
+ {'cap1': 'DriveCapacity',
+ 'cap2': 'size',
+ 'cmp_func': _compare_sizes_approxim}]
+
+ for cap in compare_list:
+ if cap['cap1'] in qos_values.keys() and \
+ cap['cap2'] in drive_type.keys() and \
+ cap['cmp_func'] is not None and \
+ cap['cmp_func'](qos_values[cap['cap1']],
+ drive_type[cap['cap2']]):
+ pass
+ else:
+ return False
+ return True
+
+ def _get_service_states(self):
+ return self.zone_manager.service_states
+
+ def _filter_hosts(self, topic, request_spec, host_list=None):
+
+ LOG.debug(_("_filter_hosts: %(request_spec)s"), locals())
+
+ drive_type = request_spec['drive_type']
+ LOG.debug(_("Filter hosts for drive type %s"), drive_type['name'])
+
+ if host_list is None:
+ host_list = self._get_service_states().iteritems()
+
+ filtered_hosts = [] # returns list of (hostname, capability_dict)
+ for host, host_dict in host_list:
+ for service_name, service_dict in host_dict.iteritems():
+ if service_name != topic:
+ continue
+
+ gos_info = service_dict.get('drive_qos_info', {})
+ for qosgrp, qos_values in gos_info.iteritems():
+ if self._qosgrp_match(drive_type, qos_values):
+ if qos_values['AvailableCapacity'] > 0:
+ filtered_hosts.append((host, gos_info))
+ else:
+ LOG.debug(_("Host %s has no free capacity. Skip"),
+ host)
+ break
+
+ host_names = [item[0] for item in filtered_hosts]
+ LOG.debug(_("Filter hosts: %s"), host_names)
+ return filtered_hosts
+
+ def _allowed_to_use_host(self, host, selected_hosts, unique):
+ if unique == False or \
+ host not in [item[0] for item in selected_hosts]:
+ return True
+ else:
+ return False
+
+ def _add_hostcap_to_list(self, selected_hosts, host, cap):
+ if host not in [item[0] for item in selected_hosts]:
+ selected_hosts.append((host, cap))
+
+ def host_selection_algorithm(self, request_spec, all_hosts,
+ selected_hosts, unique):
+ """Must override this method for VSA scheduler to work."""
+ raise NotImplementedError(_("Must implement host selection mechanism"))
+
+ def _select_hosts(self, request_spec, all_hosts, selected_hosts=None):
+
+ if selected_hosts is None:
+ selected_hosts = []
+
+ host = None
+ if len(selected_hosts) >= FLAGS.vsa_unique_hosts_per_alloc:
+ # try to select from already selected hosts only
+ LOG.debug(_("Maximum number of hosts selected (%d)"),
+ len(selected_hosts))
+ unique = False
+ (host, qos_cap) = self.host_selection_algorithm(request_spec,
+ selected_hosts,
+ selected_hosts,
+ unique)
+
+ LOG.debug(_("Selected excessive host %(host)s"), locals())
+ else:
+ unique = FLAGS.vsa_select_unique_drives
+
+ if host is None:
+ # if we've not tried yet (# of sel hosts < max) - unique=True
+ # or failed to select from selected_hosts - unique=False
+ # select from all hosts
+ (host, qos_cap) = self.host_selection_algorithm(request_spec,
+ all_hosts,
+ selected_hosts,
+ unique)
+ if host is None:
+ raise driver.WillNotSchedule(_("No available hosts"))
+
+ return (host, qos_cap)
+
+ def _provision_volume(self, context, vol, vsa_id, availability_zone):
+
+ if availability_zone is None:
+ availability_zone = FLAGS.storage_availability_zone
+
+ now = utils.utcnow()
+ options = {
+ 'size': vol['size'],
+ 'user_id': context.user_id,
+ 'project_id': context.project_id,
+ 'snapshot_id': None,
+ 'availability_zone': availability_zone,
+ 'status': "creating",
+ 'attach_status': "detached",
+ 'display_name': vol['name'],
+ 'display_description': vol['description'],
+ 'volume_type_id': vol['volume_type_id'],
+ 'metadata': dict(to_vsa_id=vsa_id),
+ 'host': vol['host'],
+ 'scheduled_at': now
+ }
+
+ size = vol['size']
+ host = vol['host']
+ name = vol['name']
+ LOG.debug(_("Provision volume %(name)s of size %(size)s GB on "\
+ "host %(host)s"), locals())
+
+ volume_ref = db.volume_create(context, options)
+ rpc.cast(context,
+ db.queue_get_for(context, "volume", vol['host']),
+ {"method": "create_volume",
+ "args": {"volume_id": volume_ref['id'],
+ "snapshot_id": None}})
+
+ def _check_host_enforcement(self, context, availability_zone):
+ if (availability_zone
+ and ':' in availability_zone
+ and context.is_admin):
+ zone, _x, host = availability_zone.partition(':')
+ service = db.service_get_by_args(context.elevated(), host,
+ 'nova-volume')
+ if not self.service_is_up(service):
+ raise driver.WillNotSchedule(_("Host %s not available") % host)
+
+ return host
+ else:
+ return None
+
+ def _assign_hosts_to_volumes(self, context, volume_params, forced_host):
+
+ prev_volume_type_id = None
+ request_spec = {}
+ selected_hosts = []
+
+ LOG.debug(_("volume_params %(volume_params)s") % locals())
+
+ i = 1
+ for vol in volume_params:
+ name = vol['name']
+ LOG.debug(_("%(i)d: Volume %(name)s"), locals())
+ i += 1
+
+ if forced_host:
+ vol['host'] = forced_host
+ vol['capabilities'] = None
+ continue
+
+ volume_type_id = vol['volume_type_id']
+ request_spec['size'] = vol['size']
+
+ if prev_volume_type_id is None or\
+ prev_volume_type_id != volume_type_id:
+ # generate list of hosts for this drive type
+
+ volume_type = volume_types.get_volume_type(context,
+ volume_type_id)
+ drive_type = {
+ 'name': volume_type['extra_specs'].get('drive_name'),
+ 'type': volume_type['extra_specs'].get('drive_type'),
+ 'size': int(volume_type['extra_specs'].get('drive_size')),
+ 'rpm': volume_type['extra_specs'].get('drive_rpm'),
+ }
+ request_spec['drive_type'] = drive_type
+
+ all_hosts = self._filter_hosts("volume", request_spec)
+ prev_volume_type_id = volume_type_id
+
+ (host, qos_cap) = self._select_hosts(request_spec,
+ all_hosts, selected_hosts)
+ vol['host'] = host
+ vol['capabilities'] = qos_cap
+ self._consume_resource(qos_cap, vol['size'], -1)
+
+ def schedule_create_volumes(self, context, request_spec,
+ availability_zone=None, *_args, **_kwargs):
+ """Picks hosts for hosting multiple volumes."""
+
+ num_volumes = request_spec.get('num_volumes')
+ LOG.debug(_("Attempting to spawn %(num_volumes)d volume(s)") %
+ locals())
+
+ vsa_id = request_spec.get('vsa_id')
+ volume_params = request_spec.get('volumes')
+
+ host = self._check_host_enforcement(context, availability_zone)
+
+ try:
+ self._print_capabilities_info()
+
+ self._assign_hosts_to_volumes(context, volume_params, host)
+
+ for vol in volume_params:
+ self._provision_volume(context, vol, vsa_id, availability_zone)
+ except:
+ if vsa_id:
+ db.vsa_update(context, vsa_id, dict(status=VsaState.FAILED))
+
+ for vol in volume_params:
+ if 'capabilities' in vol:
+ self._consume_resource(vol['capabilities'],
+ vol['size'], 1)
+ raise
+
+ return None
+
+ def schedule_create_volume(self, context, volume_id, *_args, **_kwargs):
+ """Picks the best host based on requested drive type capability."""
+ volume_ref = db.volume_get(context, volume_id)
+
+ host = self._check_host_enforcement(context,
+ volume_ref['availability_zone'])
+ if host:
+ now = utils.utcnow()
+ db.volume_update(context, volume_id, {'host': host,
+ 'scheduled_at': now})
+ return host
+
+ volume_type_id = volume_ref['volume_type_id']
+ if volume_type_id:
+ volume_type = volume_types.get_volume_type(context, volume_type_id)
+
+ if volume_type_id is None or\
+ volume_types.is_vsa_volume(volume_type_id, volume_type):
+
+ LOG.debug(_("Non-VSA volume %d"), volume_ref['id'])
+ return super(VsaScheduler, self).schedule_create_volume(context,
+ volume_id, *_args, **_kwargs)
+
+ self._print_capabilities_info()
+
+ drive_type = {
+ 'name': volume_type['extra_specs'].get('drive_name'),
+ 'type': volume_type['extra_specs'].get('drive_type'),
+ 'size': int(volume_type['extra_specs'].get('drive_size')),
+ 'rpm': volume_type['extra_specs'].get('drive_rpm'),
+ }
+
+ LOG.debug(_("Spawning volume %(volume_id)s with drive type "\
+ "%(drive_type)s"), locals())
+
+ request_spec = {'size': volume_ref['size'],
+ 'drive_type': drive_type}
+ hosts = self._filter_hosts("volume", request_spec)
+
+ try:
+ (host, qos_cap) = self._select_hosts(request_spec, all_hosts=hosts)
+ except:
+ if volume_ref['to_vsa_id']:
+ db.vsa_update(context, volume_ref['to_vsa_id'],
+ dict(status=VsaState.FAILED))
+ raise
+
+ if host:
+ now = utils.utcnow()
+ db.volume_update(context, volume_id, {'host': host,
+ 'scheduled_at': now})
+ self._consume_resource(qos_cap, volume_ref['size'], -1)
+ return host
+
+ def _consume_full_drive(self, qos_values, direction):
+ qos_values['FullDrive']['NumFreeDrives'] += direction
+ qos_values['FullDrive']['NumOccupiedDrives'] -= direction
+
+ def _consume_partition(self, qos_values, size, direction):
+
+ if qos_values['PartitionDrive']['PartitionSize'] != 0:
+ partition_size = qos_values['PartitionDrive']['PartitionSize']
+ else:
+ partition_size = size
+ part_per_drive = qos_values['DriveCapacity'] / partition_size
+
+ if direction == -1 and \
+ qos_values['PartitionDrive']['NumFreePartitions'] == 0:
+
+ self._consume_full_drive(qos_values, direction)
+ qos_values['PartitionDrive']['NumFreePartitions'] += \
+ part_per_drive
+
+ qos_values['PartitionDrive']['NumFreePartitions'] += direction
+ qos_values['PartitionDrive']['NumOccupiedPartitions'] -= direction
+
+ if direction == 1 and \
+ qos_values['PartitionDrive']['NumFreePartitions'] >= \
+ part_per_drive:
+
+ self._consume_full_drive(qos_values, direction)
+ qos_values['PartitionDrive']['NumFreePartitions'] -= \
+ part_per_drive
+
+ def _consume_resource(self, qos_values, size, direction):
+ if qos_values is None:
+ LOG.debug(_("No capability selected for volume of size %(size)s"),
+ locals())
+ return
+
+ if size == 0: # full drive match
+ qos_values['AvailableCapacity'] += direction * \
+ qos_values['DriveCapacity']
+ self._consume_full_drive(qos_values, direction)
+ else:
+ qos_values['AvailableCapacity'] += direction * GB_TO_BYTES(size)
+ self._consume_partition(qos_values, GB_TO_BYTES(size), direction)
+ return
+
+ def _print_capabilities_info(self):
+ host_list = self._get_service_states().iteritems()
+ for host, host_dict in host_list:
+ for service_name, service_dict in host_dict.iteritems():
+ if service_name != "volume":
+ continue
+
+ LOG.info(_("Host %s:"), host)
+
+ gos_info = service_dict.get('drive_qos_info', {})
+ for qosgrp, qos_values in gos_info.iteritems():
+ total = qos_values['TotalDrives']
+ used = qos_values['FullDrive']['NumOccupiedDrives']
+ free = qos_values['FullDrive']['NumFreeDrives']
+ avail = BYTES_TO_GB(qos_values['AvailableCapacity'])
+
+ LOG.info(_("\tDrive %(qosgrp)-25s: total %(total)2s, "\
+ "used %(used)2s, free %(free)2s. Available "\
+ "capacity %(avail)-5s"), locals())
+
+
+class VsaSchedulerLeastUsedHost(VsaScheduler):
+ """
+ Implements VSA scheduler to select the host with least used capacity
+ of particular type.
+ """
+
+ def __init__(self, *args, **kwargs):
+ super(VsaSchedulerLeastUsedHost, self).__init__(*args, **kwargs)
+
+ def host_selection_algorithm(self, request_spec, all_hosts,
+ selected_hosts, unique):
+ size = request_spec['size']
+ drive_type = request_spec['drive_type']
+ best_host = None
+ best_qoscap = None
+ best_cap = None
+ min_used = 0
+
+ for (host, capabilities) in all_hosts:
+
+ has_enough_capacity = False
+ used_capacity = 0
+ for qosgrp, qos_values in capabilities.iteritems():
+
+ used_capacity = used_capacity + qos_values['TotalCapacity'] \
+ - qos_values['AvailableCapacity']
+
+ if self._qosgrp_match(drive_type, qos_values):
+ # we found required qosgroup
+
+ if size == 0: # full drive match
+ if qos_values['FullDrive']['NumFreeDrives'] > 0:
+ has_enough_capacity = True
+ matched_qos = qos_values
+ else:
+ break
+ else:
+ if qos_values['AvailableCapacity'] >= size and \
+ (qos_values['PartitionDrive'][
+ 'NumFreePartitions'] > 0 or \
+ qos_values['FullDrive']['NumFreeDrives'] > 0):
+ has_enough_capacity = True
+ matched_qos = qos_values
+ else:
+ break
+
+ if has_enough_capacity and \
+ self._allowed_to_use_host(host,
+ selected_hosts,
+ unique) and \
+ (best_host is None or used_capacity < min_used):
+
+ min_used = used_capacity
+ best_host = host
+ best_qoscap = matched_qos
+ best_cap = capabilities
+
+ if best_host:
+ self._add_hostcap_to_list(selected_hosts, best_host, best_cap)
+ min_used = BYTES_TO_GB(min_used)
+ LOG.debug(_("\t LeastUsedHost: Best host: %(best_host)s. "\
+ "(used capacity %(min_used)s)"), locals())
+ return (best_host, best_qoscap)
+
+
+class VsaSchedulerMostAvailCapacity(VsaScheduler):
+ """
+ Implements VSA scheduler to select the host with most available capacity
+ of one particular type.
+ """
+
+ def __init__(self, *args, **kwargs):
+ super(VsaSchedulerMostAvailCapacity, self).__init__(*args, **kwargs)
+
+ def host_selection_algorithm(self, request_spec, all_hosts,
+ selected_hosts, unique):
+ size = request_spec['size']
+ drive_type = request_spec['drive_type']
+ best_host = None
+ best_qoscap = None
+ best_cap = None
+ max_avail = 0
+
+ for (host, capabilities) in all_hosts:
+ for qosgrp, qos_values in capabilities.iteritems():
+ if self._qosgrp_match(drive_type, qos_values):
+ # we found required qosgroup
+
+ if size == 0: # full drive match
+ available = qos_values['FullDrive']['NumFreeDrives']
+ else:
+ available = qos_values['AvailableCapacity']
+
+ if available > max_avail and \
+ self._allowed_to_use_host(host,
+ selected_hosts,
+ unique):
+ max_avail = available
+ best_host = host
+ best_qoscap = qos_values
+ best_cap = capabilities
+ break # go to the next host
+
+ if best_host:
+ self._add_hostcap_to_list(selected_hosts, best_host, best_cap)
+ type_str = "drives" if size == 0 else "bytes"
+ LOG.debug(_("\t MostAvailCap: Best host: %(best_host)s. "\
+ "(available %(max_avail)s %(type_str)s)"), locals())
+
+ return (best_host, best_qoscap)
diff --git a/nova/test.py b/nova/test.py
index 88f1489e8..d759aef60 100644
--- a/nova/test.py
+++ b/nova/test.py
@@ -277,3 +277,21 @@ class TestCase(unittest.TestCase):
continue
else:
self.assertEqual(sub_value, super_value)
+
+ def assertIn(self, a, b, *args, **kwargs):
+ """Python < v2.7 compatibility. Assert 'a' in 'b'"""
+ try:
+ f = super(TestCase, self).assertIn
+ except AttributeError:
+ self.assertTrue(a in b, *args, **kwargs)
+ else:
+ f(a, b, *args, **kwargs)
+
+ def assertNotIn(self, a, b, *args, **kwargs):
+ """Python < v2.7 compatibility. Assert 'a' NOT in 'b'"""
+ try:
+ f = super(TestCase, self).assertNotIn
+ except AttributeError:
+ self.assertFalse(a in b, *args, **kwargs)
+ else:
+ f(a, b, *args, **kwargs)
diff --git a/nova/tests/api/openstack/contrib/test_vsa.py b/nova/tests/api/openstack/contrib/test_vsa.py
new file mode 100644
index 000000000..311b6cb8d
--- /dev/null
+++ b/nova/tests/api/openstack/contrib/test_vsa.py
@@ -0,0 +1,450 @@
+# Copyright 2011 OpenStack LLC.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import json
+import stubout
+import unittest
+import webob
+
+from nova import context
+from nova import db
+from nova import exception
+from nova import flags
+from nova import log as logging
+from nova import test
+from nova import volume
+from nova import vsa
+from nova.api import openstack
+from nova.tests.api.openstack import fakes
+import nova.wsgi
+
+from nova.api.openstack.contrib.virtual_storage_arrays import _vsa_view
+
+FLAGS = flags.FLAGS
+
+LOG = logging.getLogger('nova.tests.api.openstack.vsa')
+
+last_param = {}
+
+
+def _get_default_vsa_param():
+ return {
+ 'display_name': 'Test_VSA_name',
+ 'display_description': 'Test_VSA_description',
+ 'vc_count': 1,
+ 'instance_type': 'm1.small',
+ 'instance_type_id': 5,
+ 'image_name': None,
+ 'availability_zone': None,
+ 'storage': [],
+ 'shared': False
+ }
+
+
+def stub_vsa_create(self, context, **param):
+ global last_param
+ LOG.debug(_("_create: param=%s"), param)
+ param['id'] = 123
+ param['name'] = 'Test name'
+ param['instance_type_id'] = 5
+ last_param = param
+ return param
+
+
+def stub_vsa_delete(self, context, vsa_id):
+ global last_param
+ last_param = dict(vsa_id=vsa_id)
+
+ LOG.debug(_("_delete: %s"), locals())
+ if vsa_id != '123':
+ raise exception.NotFound
+
+
+def stub_vsa_get(self, context, vsa_id):
+ global last_param
+ last_param = dict(vsa_id=vsa_id)
+
+ LOG.debug(_("_get: %s"), locals())
+ if vsa_id != '123':
+ raise exception.NotFound
+
+ param = _get_default_vsa_param()
+ param['id'] = vsa_id
+ return param
+
+
+def stub_vsa_get_all(self, context):
+ LOG.debug(_("_get_all: %s"), locals())
+ param = _get_default_vsa_param()
+ param['id'] = 123
+ return [param]
+
+
+class VSAApiTest(test.TestCase):
+ def setUp(self):
+ super(VSAApiTest, self).setUp()
+ self.stubs = stubout.StubOutForTesting()
+ fakes.FakeAuthManager.reset_fake_data()
+ fakes.FakeAuthDatabase.data = {}
+ fakes.stub_out_networking(self.stubs)
+ fakes.stub_out_rate_limiting(self.stubs)
+ fakes.stub_out_auth(self.stubs)
+ self.stubs.Set(vsa.api.API, "create", stub_vsa_create)
+ self.stubs.Set(vsa.api.API, "delete", stub_vsa_delete)
+ self.stubs.Set(vsa.api.API, "get", stub_vsa_get)
+ self.stubs.Set(vsa.api.API, "get_all", stub_vsa_get_all)
+
+ self.context = context.get_admin_context()
+
+ def tearDown(self):
+ self.stubs.UnsetAll()
+ super(VSAApiTest, self).tearDown()
+
+ def test_vsa_create(self):
+ global last_param
+ last_param = {}
+
+ vsa = {"displayName": "VSA Test Name",
+ "displayDescription": "VSA Test Desc"}
+ body = dict(vsa=vsa)
+ req = webob.Request.blank('/v1.1/777/zadr-vsa')
+ req.method = 'POST'
+ req.body = json.dumps(body)
+ req.headers['content-type'] = 'application/json'
+
+ resp = req.get_response(fakes.wsgi_app())
+ self.assertEqual(resp.status_int, 200)
+
+ # Compare if parameters were correctly passed to stub
+ self.assertEqual(last_param['display_name'], "VSA Test Name")
+ self.assertEqual(last_param['display_description'], "VSA Test Desc")
+
+ resp_dict = json.loads(resp.body)
+ self.assertTrue('vsa' in resp_dict)
+ self.assertEqual(resp_dict['vsa']['displayName'], vsa['displayName'])
+ self.assertEqual(resp_dict['vsa']['displayDescription'],
+ vsa['displayDescription'])
+
+ def test_vsa_create_no_body(self):
+ req = webob.Request.blank('/v1.1/777/zadr-vsa')
+ req.method = 'POST'
+ req.body = json.dumps({})
+ req.headers['content-type'] = 'application/json'
+
+ resp = req.get_response(fakes.wsgi_app())
+ self.assertEqual(resp.status_int, 422)
+
+ def test_vsa_delete(self):
+ global last_param
+ last_param = {}
+
+ vsa_id = 123
+ req = webob.Request.blank('/v1.1/777/zadr-vsa/%d' % vsa_id)
+ req.method = 'DELETE'
+
+ resp = req.get_response(fakes.wsgi_app())
+ self.assertEqual(resp.status_int, 200)
+ self.assertEqual(str(last_param['vsa_id']), str(vsa_id))
+
+ def test_vsa_delete_invalid_id(self):
+ global last_param
+ last_param = {}
+
+ vsa_id = 234
+ req = webob.Request.blank('/v1.1/777/zadr-vsa/%d' % vsa_id)
+ req.method = 'DELETE'
+
+ resp = req.get_response(fakes.wsgi_app())
+ self.assertEqual(resp.status_int, 404)
+ self.assertEqual(str(last_param['vsa_id']), str(vsa_id))
+
+ def test_vsa_show(self):
+ global last_param
+ last_param = {}
+
+ vsa_id = 123
+ req = webob.Request.blank('/v1.1/777/zadr-vsa/%d' % vsa_id)
+ req.method = 'GET'
+ resp = req.get_response(fakes.wsgi_app())
+ self.assertEqual(resp.status_int, 200)
+ self.assertEqual(str(last_param['vsa_id']), str(vsa_id))
+
+ resp_dict = json.loads(resp.body)
+ self.assertTrue('vsa' in resp_dict)
+ self.assertEqual(resp_dict['vsa']['id'], str(vsa_id))
+
+ def test_vsa_show_invalid_id(self):
+ global last_param
+ last_param = {}
+
+ vsa_id = 234
+ req = webob.Request.blank('/v1.1/777/zadr-vsa/%d' % vsa_id)
+ req.method = 'GET'
+ resp = req.get_response(fakes.wsgi_app())
+ self.assertEqual(resp.status_int, 404)
+ self.assertEqual(str(last_param['vsa_id']), str(vsa_id))
+
+ def test_vsa_index(self):
+ req = webob.Request.blank('/v1.1/777/zadr-vsa')
+ req.method = 'GET'
+ resp = req.get_response(fakes.wsgi_app())
+ self.assertEqual(resp.status_int, 200)
+
+ resp_dict = json.loads(resp.body)
+
+ self.assertTrue('vsaSet' in resp_dict)
+ resp_vsas = resp_dict['vsaSet']
+ self.assertEqual(len(resp_vsas), 1)
+
+ resp_vsa = resp_vsas.pop()
+ self.assertEqual(resp_vsa['id'], 123)
+
+ def test_vsa_detail(self):
+ req = webob.Request.blank('/v1.1/777/zadr-vsa/detail')
+ req.method = 'GET'
+ resp = req.get_response(fakes.wsgi_app())
+ self.assertEqual(resp.status_int, 200)
+
+ resp_dict = json.loads(resp.body)
+
+ self.assertTrue('vsaSet' in resp_dict)
+ resp_vsas = resp_dict['vsaSet']
+ self.assertEqual(len(resp_vsas), 1)
+
+ resp_vsa = resp_vsas.pop()
+ self.assertEqual(resp_vsa['id'], 123)
+
+
+def _get_default_volume_param():
+ return {
+ 'id': 123,
+ 'status': 'available',
+ 'size': 100,
+ 'availability_zone': 'nova',
+ 'created_at': None,
+ 'attach_status': 'detached',
+ 'name': 'vol name',
+ 'display_name': 'Default vol name',
+ 'display_description': 'Default vol description',
+ 'volume_type_id': 1,
+ 'volume_metadata': [],
+ }
+
+
+def stub_get_vsa_volume_type(self, context):
+ return {'id': 1,
+ 'name': 'VSA volume type',
+ 'extra_specs': {'type': 'vsa_volume'}}
+
+
+def stub_volume_create(self, context, size, snapshot_id, name, description,
+ **param):
+ LOG.debug(_("_create: param=%s"), size)
+ vol = _get_default_volume_param()
+ vol['size'] = size
+ vol['display_name'] = name
+ vol['display_description'] = description
+ return vol
+
+
+def stub_volume_update(self, context, **param):
+ LOG.debug(_("_volume_update: param=%s"), param)
+ pass
+
+
+def stub_volume_delete(self, context, **param):
+ LOG.debug(_("_volume_delete: param=%s"), param)
+ pass
+
+
+def stub_volume_get(self, context, volume_id):
+ LOG.debug(_("_volume_get: volume_id=%s"), volume_id)
+ vol = _get_default_volume_param()
+ vol['id'] = volume_id
+ meta = {'key': 'from_vsa_id', 'value': '123'}
+ if volume_id == '345':
+ meta = {'key': 'to_vsa_id', 'value': '123'}
+ vol['volume_metadata'].append(meta)
+ return vol
+
+
+def stub_volume_get_notfound(self, context, volume_id):
+ raise exception.NotFound
+
+
+def stub_volume_get_all(self, context, search_opts):
+ vol = stub_volume_get(self, context, '123')
+ vol['metadata'] = search_opts['metadata']
+ return [vol]
+
+
+def return_vsa(context, vsa_id):
+ return {'id': vsa_id}
+
+
+class VSAVolumeApiTest(test.TestCase):
+
+ def setUp(self, test_obj=None, test_objs=None):
+ super(VSAVolumeApiTest, self).setUp()
+ self.stubs = stubout.StubOutForTesting()
+ fakes.FakeAuthManager.reset_fake_data()
+ fakes.FakeAuthDatabase.data = {}
+ fakes.stub_out_networking(self.stubs)
+ fakes.stub_out_rate_limiting(self.stubs)
+ fakes.stub_out_auth(self.stubs)
+ self.stubs.Set(nova.db.api, 'vsa_get', return_vsa)
+ self.stubs.Set(vsa.api.API, "get_vsa_volume_type",
+ stub_get_vsa_volume_type)
+
+ self.stubs.Set(volume.api.API, "update", stub_volume_update)
+ self.stubs.Set(volume.api.API, "delete", stub_volume_delete)
+ self.stubs.Set(volume.api.API, "get", stub_volume_get)
+ self.stubs.Set(volume.api.API, "get_all", stub_volume_get_all)
+
+ self.context = context.get_admin_context()
+ self.test_obj = test_obj if test_obj else "volume"
+ self.test_objs = test_objs if test_objs else "volumes"
+
+ def tearDown(self):
+ self.stubs.UnsetAll()
+ super(VSAVolumeApiTest, self).tearDown()
+
+ def test_vsa_volume_create(self):
+ self.stubs.Set(volume.api.API, "create", stub_volume_create)
+
+ vol = {"size": 100,
+ "displayName": "VSA Volume Test Name",
+ "displayDescription": "VSA Volume Test Desc"}
+ body = {self.test_obj: vol}
+ req = webob.Request.blank('/v1.1/777/zadr-vsa/123/%s' % self.test_objs)
+ req.method = 'POST'
+ req.body = json.dumps(body)
+ req.headers['content-type'] = 'application/json'
+ resp = req.get_response(fakes.wsgi_app())
+
+ if self.test_obj == "volume":
+ self.assertEqual(resp.status_int, 200)
+
+ resp_dict = json.loads(resp.body)
+ self.assertTrue(self.test_obj in resp_dict)
+ self.assertEqual(resp_dict[self.test_obj]['size'],
+ vol['size'])
+ self.assertEqual(resp_dict[self.test_obj]['displayName'],
+ vol['displayName'])
+ self.assertEqual(resp_dict[self.test_obj]['displayDescription'],
+ vol['displayDescription'])
+ else:
+ self.assertEqual(resp.status_int, 400)
+
+ def test_vsa_volume_create_no_body(self):
+ req = webob.Request.blank('/v1.1/777/zadr-vsa/123/%s' % self.test_objs)
+ req.method = 'POST'
+ req.body = json.dumps({})
+ req.headers['content-type'] = 'application/json'
+
+ resp = req.get_response(fakes.wsgi_app())
+ if self.test_obj == "volume":
+ self.assertEqual(resp.status_int, 422)
+ else:
+ self.assertEqual(resp.status_int, 400)
+
+ def test_vsa_volume_index(self):
+ req = webob.Request.blank('/v1.1/777/zadr-vsa/123/%s' % self.test_objs)
+ resp = req.get_response(fakes.wsgi_app())
+ self.assertEqual(resp.status_int, 200)
+
+ def test_vsa_volume_detail(self):
+ req = webob.Request.blank('/v1.1/777/zadr-vsa/123/%s/detail' % \
+ self.test_objs)
+ resp = req.get_response(fakes.wsgi_app())
+ self.assertEqual(resp.status_int, 200)
+
+ def test_vsa_volume_show(self):
+ obj_num = 234 if self.test_objs == "volumes" else 345
+ req = webob.Request.blank('/v1.1/777/zadr-vsa/123/%s/%s' % \
+ (self.test_objs, obj_num))
+ resp = req.get_response(fakes.wsgi_app())
+ self.assertEqual(resp.status_int, 200)
+
+ def test_vsa_volume_show_no_vsa_assignment(self):
+ req = webob.Request.blank('/v1.1/777/zadr-vsa/4/%s/333' % \
+ (self.test_objs))
+ resp = req.get_response(fakes.wsgi_app())
+ self.assertEqual(resp.status_int, 400)
+
+ def test_vsa_volume_show_no_volume(self):
+ self.stubs.Set(volume.api.API, "get", stub_volume_get_notfound)
+
+ req = webob.Request.blank('/v1.1/777/zadr-vsa/123/%s/333' % \
+ (self.test_objs))
+ resp = req.get_response(fakes.wsgi_app())
+ self.assertEqual(resp.status_int, 404)
+
+ def test_vsa_volume_update(self):
+ obj_num = 234 if self.test_objs == "volumes" else 345
+ update = {"status": "available",
+ "displayName": "Test Display name"}
+ body = {self.test_obj: update}
+ req = webob.Request.blank('/v1.1/777/zadr-vsa/123/%s/%s' % \
+ (self.test_objs, obj_num))
+ req.method = 'PUT'
+ req.body = json.dumps(body)
+ req.headers['content-type'] = 'application/json'
+
+ resp = req.get_response(fakes.wsgi_app())
+ if self.test_obj == "volume":
+ self.assertEqual(resp.status_int, 202)
+ else:
+ self.assertEqual(resp.status_int, 400)
+
+ def test_vsa_volume_delete(self):
+ obj_num = 234 if self.test_objs == "volumes" else 345
+ req = webob.Request.blank('/v1.1/777/zadr-vsa/123/%s/%s' % \
+ (self.test_objs, obj_num))
+ req.method = 'DELETE'
+ resp = req.get_response(fakes.wsgi_app())
+ if self.test_obj == "volume":
+ self.assertEqual(resp.status_int, 202)
+ else:
+ self.assertEqual(resp.status_int, 400)
+
+ def test_vsa_volume_delete_no_vsa_assignment(self):
+ req = webob.Request.blank('/v1.1/777/zadr-vsa/4/%s/333' % \
+ (self.test_objs))
+ req.method = 'DELETE'
+ resp = req.get_response(fakes.wsgi_app())
+ self.assertEqual(resp.status_int, 400)
+
+ def test_vsa_volume_delete_no_volume(self):
+ self.stubs.Set(volume.api.API, "get", stub_volume_get_notfound)
+
+ req = webob.Request.blank('/v1.1/777/zadr-vsa/123/%s/333' % \
+ (self.test_objs))
+ req.method = 'DELETE'
+ resp = req.get_response(fakes.wsgi_app())
+ if self.test_obj == "volume":
+ self.assertEqual(resp.status_int, 404)
+ else:
+ self.assertEqual(resp.status_int, 400)
+
+
+class VSADriveApiTest(VSAVolumeApiTest):
+ def setUp(self):
+ super(VSADriveApiTest, self).setUp(test_obj="drive",
+ test_objs="drives")
+
+ def tearDown(self):
+ self.stubs.UnsetAll()
+ super(VSADriveApiTest, self).tearDown()
diff --git a/nova/tests/api/openstack/test_extensions.py b/nova/tests/api/openstack/test_extensions.py
index c78588d65..05267d8fb 100644
--- a/nova/tests/api/openstack/test_extensions.py
+++ b/nova/tests/api/openstack/test_extensions.py
@@ -95,6 +95,7 @@ class ExtensionControllerTest(test.TestCase):
"Quotas",
"Rescue",
"SecurityGroups",
+ "VSAs",
"VirtualInterfaces",
"Volumes",
"VolumeTypes",
diff --git a/nova/tests/scheduler/test_vsa_scheduler.py b/nova/tests/scheduler/test_vsa_scheduler.py
new file mode 100644
index 000000000..37964f00d
--- /dev/null
+++ b/nova/tests/scheduler/test_vsa_scheduler.py
@@ -0,0 +1,641 @@
+# Copyright 2011 OpenStack LLC.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import stubout
+
+import nova
+
+from nova import context
+from nova import db
+from nova import exception
+from nova import flags
+from nova import log as logging
+from nova import test
+from nova import utils
+from nova.volume import volume_types
+
+from nova.scheduler import vsa as vsa_sched
+from nova.scheduler import driver
+
+FLAGS = flags.FLAGS
+LOG = logging.getLogger('nova.tests.scheduler.vsa')
+
+scheduled_volumes = []
+scheduled_volume = {}
+global_volume = {}
+
+
+class FakeVsaLeastUsedScheduler(
+ vsa_sched.VsaSchedulerLeastUsedHost):
+ # No need to stub anything at the moment
+ pass
+
+
+class FakeVsaMostAvailCapacityScheduler(
+ vsa_sched.VsaSchedulerMostAvailCapacity):
+ # No need to stub anything at the moment
+ pass
+
+
+class VsaSchedulerTestCase(test.TestCase):
+
+ def _get_vol_creation_request(self, num_vols, drive_ix, size=0):
+ volume_params = []
+ for i in range(num_vols):
+
+ name = 'name_' + str(i)
+ try:
+ volume_types.create(self.context, name,
+ extra_specs={'type': 'vsa_drive',
+ 'drive_name': name,
+ 'drive_type': 'type_' + str(drive_ix),
+ 'drive_size': 1 + 100 * (drive_ix)})
+ self.created_types_lst.append(name)
+ except exception.ApiError:
+ # type is already created
+ pass
+
+ volume_type = volume_types.get_volume_type_by_name(self.context,
+ name)
+ volume = {'size': size,
+ 'snapshot_id': None,
+ 'name': 'vol_' + str(i),
+ 'description': None,
+ 'volume_type_id': volume_type['id']}
+ volume_params.append(volume)
+
+ return {'num_volumes': len(volume_params),
+ 'vsa_id': 123,
+ 'volumes': volume_params}
+
+ def _generate_default_service_states(self):
+ service_states = {}
+ for i in range(self.host_num):
+ host = {}
+ hostname = 'host_' + str(i)
+ if hostname in self.exclude_host_list:
+ continue
+
+ host['volume'] = {'timestamp': utils.utcnow(),
+ 'drive_qos_info': {}}
+
+ for j in range(self.drive_type_start_ix,
+ self.drive_type_start_ix + self.drive_type_num):
+ dtype = {}
+ dtype['Name'] = 'name_' + str(j)
+ dtype['DriveType'] = 'type_' + str(j)
+ dtype['TotalDrives'] = 2 * (self.init_num_drives + i)
+ dtype['DriveCapacity'] = vsa_sched.GB_TO_BYTES(1 + 100 * j)
+ dtype['TotalCapacity'] = dtype['TotalDrives'] * \
+ dtype['DriveCapacity']
+ dtype['AvailableCapacity'] = (dtype['TotalDrives'] - i) * \
+ dtype['DriveCapacity']
+ dtype['DriveRpm'] = 7200
+ dtype['DifCapable'] = 0
+ dtype['SedCapable'] = 0
+ dtype['PartitionDrive'] = {
+ 'PartitionSize': 0,
+ 'NumOccupiedPartitions': 0,
+ 'NumFreePartitions': 0}
+ dtype['FullDrive'] = {
+ 'NumFreeDrives': dtype['TotalDrives'] - i,
+ 'NumOccupiedDrives': i}
+ host['volume']['drive_qos_info'][dtype['Name']] = dtype
+
+ service_states[hostname] = host
+
+ return service_states
+
+ def _print_service_states(self):
+ for host, host_val in self.service_states.iteritems():
+ LOG.info(_("Host %s"), host)
+ total_used = 0
+ total_available = 0
+ qos = host_val['volume']['drive_qos_info']
+
+ for k, d in qos.iteritems():
+ LOG.info("\t%s: type %s: drives (used %2d, total %2d) "\
+ "size %3d, total %4d, used %4d, avail %d",
+ k, d['DriveType'],
+ d['FullDrive']['NumOccupiedDrives'], d['TotalDrives'],
+ vsa_sched.BYTES_TO_GB(d['DriveCapacity']),
+ vsa_sched.BYTES_TO_GB(d['TotalCapacity']),
+ vsa_sched.BYTES_TO_GB(d['TotalCapacity'] - \
+ d['AvailableCapacity']),
+ vsa_sched.BYTES_TO_GB(d['AvailableCapacity']))
+
+ total_used += vsa_sched.BYTES_TO_GB(d['TotalCapacity'] - \
+ d['AvailableCapacity'])
+ total_available += vsa_sched.BYTES_TO_GB(
+ d['AvailableCapacity'])
+ LOG.info("Host %s: used %d, avail %d",
+ host, total_used, total_available)
+
+ def _set_service_states(self, host_num,
+ drive_type_start_ix, drive_type_num,
+ init_num_drives=10,
+ exclude_host_list=[]):
+ self.host_num = host_num
+ self.drive_type_start_ix = drive_type_start_ix
+ self.drive_type_num = drive_type_num
+ self.exclude_host_list = exclude_host_list
+ self.init_num_drives = init_num_drives
+ self.service_states = self._generate_default_service_states()
+
+ def _get_service_states(self):
+ return self.service_states
+
+ def _fake_get_service_states(self):
+ return self._get_service_states()
+
+ def _fake_provision_volume(self, context, vol, vsa_id, availability_zone):
+ global scheduled_volumes
+ scheduled_volumes.append(dict(vol=vol,
+ vsa_id=vsa_id,
+ az=availability_zone))
+ name = vol['name']
+ host = vol['host']
+ LOG.debug(_("Test: provision vol %(name)s on host %(host)s"),
+ locals())
+ LOG.debug(_("\t vol=%(vol)s"), locals())
+ pass
+
+ def _fake_vsa_update(self, context, vsa_id, values):
+ LOG.debug(_("Test: VSA update request: vsa_id=%(vsa_id)s "\
+ "values=%(values)s"), locals())
+ pass
+
+ def _fake_volume_create(self, context, options):
+ LOG.debug(_("Test: Volume create: %s"), options)
+ options['id'] = 123
+ global global_volume
+ global_volume = options
+ return options
+
+ def _fake_volume_get(self, context, volume_id):
+ LOG.debug(_("Test: Volume get request: id=%(volume_id)s"), locals())
+ global global_volume
+ global_volume['id'] = volume_id
+ global_volume['availability_zone'] = None
+ return global_volume
+
+ def _fake_volume_update(self, context, volume_id, values):
+ LOG.debug(_("Test: Volume update request: id=%(volume_id)s "\
+ "values=%(values)s"), locals())
+ global scheduled_volume
+ scheduled_volume = {'id': volume_id, 'host': values['host']}
+ pass
+
+ def _fake_service_get_by_args(self, context, host, binary):
+ return "service"
+
+ def _fake_service_is_up_True(self, service):
+ return True
+
+ def _fake_service_is_up_False(self, service):
+ return False
+
+ def setUp(self, sched_class=None):
+ super(VsaSchedulerTestCase, self).setUp()
+ self.stubs = stubout.StubOutForTesting()
+ self.context = context.get_admin_context()
+
+ if sched_class is None:
+ self.sched = FakeVsaLeastUsedScheduler()
+ else:
+ self.sched = sched_class
+
+ self.host_num = 10
+ self.drive_type_num = 5
+
+ self.stubs.Set(self.sched,
+ '_get_service_states', self._fake_get_service_states)
+ self.stubs.Set(self.sched,
+ '_provision_volume', self._fake_provision_volume)
+ self.stubs.Set(nova.db, 'vsa_update', self._fake_vsa_update)
+
+ self.stubs.Set(nova.db, 'volume_get', self._fake_volume_get)
+ self.stubs.Set(nova.db, 'volume_update', self._fake_volume_update)
+
+ self.created_types_lst = []
+
+ def tearDown(self):
+ for name in self.created_types_lst:
+ volume_types.purge(self.context, name)
+
+ self.stubs.UnsetAll()
+ super(VsaSchedulerTestCase, self).tearDown()
+
+ def test_vsa_sched_create_volumes_simple(self):
+ global scheduled_volumes
+ scheduled_volumes = []
+ self._set_service_states(host_num=10,
+ drive_type_start_ix=0,
+ drive_type_num=5,
+ init_num_drives=10,
+ exclude_host_list=['host_1', 'host_3'])
+ prev = self._generate_default_service_states()
+ request_spec = self._get_vol_creation_request(num_vols=3, drive_ix=2)
+
+ self.sched.schedule_create_volumes(self.context,
+ request_spec,
+ availability_zone=None)
+
+ self.assertEqual(len(scheduled_volumes), 3)
+ self.assertEqual(scheduled_volumes[0]['vol']['host'], 'host_0')
+ self.assertEqual(scheduled_volumes[1]['vol']['host'], 'host_2')
+ self.assertEqual(scheduled_volumes[2]['vol']['host'], 'host_4')
+
+ cur = self._get_service_states()
+ for host in ['host_0', 'host_2', 'host_4']:
+ cur_dtype = cur[host]['volume']['drive_qos_info']['name_2']
+ prev_dtype = prev[host]['volume']['drive_qos_info']['name_2']
+ self.assertEqual(cur_dtype['DriveType'], prev_dtype['DriveType'])
+ self.assertEqual(cur_dtype['FullDrive']['NumFreeDrives'],
+ prev_dtype['FullDrive']['NumFreeDrives'] - 1)
+ self.assertEqual(cur_dtype['FullDrive']['NumOccupiedDrives'],
+ prev_dtype['FullDrive']['NumOccupiedDrives'] + 1)
+
+ def test_vsa_sched_no_drive_type(self):
+ self._set_service_states(host_num=10,
+ drive_type_start_ix=0,
+ drive_type_num=5,
+ init_num_drives=1)
+ request_spec = self._get_vol_creation_request(num_vols=1, drive_ix=6)
+ self.assertRaises(driver.WillNotSchedule,
+ self.sched.schedule_create_volumes,
+ self.context,
+ request_spec,
+ availability_zone=None)
+
+ def test_vsa_sched_no_enough_drives(self):
+ global scheduled_volumes
+ scheduled_volumes = []
+
+ self._set_service_states(host_num=3,
+ drive_type_start_ix=0,
+ drive_type_num=1,
+ init_num_drives=0)
+ prev = self._generate_default_service_states()
+ request_spec = self._get_vol_creation_request(num_vols=3, drive_ix=0)
+
+ self.assertRaises(driver.WillNotSchedule,
+ self.sched.schedule_create_volumes,
+ self.context,
+ request_spec,
+ availability_zone=None)
+
+ # check that everything was returned back
+ cur = self._get_service_states()
+ for k, v in prev.iteritems():
+ self.assertEqual(prev[k]['volume']['drive_qos_info'],
+ cur[k]['volume']['drive_qos_info'])
+
+ def test_vsa_sched_wrong_topic(self):
+ self._set_service_states(host_num=1,
+ drive_type_start_ix=0,
+ drive_type_num=5,
+ init_num_drives=1)
+ states = self._get_service_states()
+ new_states = {}
+ new_states['host_0'] = {'compute': states['host_0']['volume']}
+ self.service_states = new_states
+ request_spec = self._get_vol_creation_request(num_vols=1, drive_ix=0)
+
+ self.assertRaises(driver.WillNotSchedule,
+ self.sched.schedule_create_volumes,
+ self.context,
+ request_spec,
+ availability_zone=None)
+
+ def test_vsa_sched_provision_volume(self):
+ global global_volume
+ global_volume = {}
+ self._set_service_states(host_num=1,
+ drive_type_start_ix=0,
+ drive_type_num=1,
+ init_num_drives=1)
+ request_spec = self._get_vol_creation_request(num_vols=1, drive_ix=0)
+
+ self.stubs.UnsetAll()
+ self.stubs.Set(self.sched,
+ '_get_service_states', self._fake_get_service_states)
+ self.stubs.Set(nova.db, 'volume_create', self._fake_volume_create)
+
+ self.sched.schedule_create_volumes(self.context,
+ request_spec,
+ availability_zone=None)
+
+ self.assertEqual(request_spec['volumes'][0]['name'],
+ global_volume['display_name'])
+
+ def test_vsa_sched_no_free_drives(self):
+ self._set_service_states(host_num=1,
+ drive_type_start_ix=0,
+ drive_type_num=1,
+ init_num_drives=1)
+ request_spec = self._get_vol_creation_request(num_vols=1, drive_ix=0)
+
+ self.sched.schedule_create_volumes(self.context,
+ request_spec,
+ availability_zone=None)
+
+ cur = self._get_service_states()
+ cur_dtype = cur['host_0']['volume']['drive_qos_info']['name_0']
+ self.assertEqual(cur_dtype['FullDrive']['NumFreeDrives'], 1)
+
+ new_request = self._get_vol_creation_request(num_vols=1, drive_ix=0)
+
+ self.sched.schedule_create_volumes(self.context,
+ request_spec,
+ availability_zone=None)
+ self._print_service_states()
+
+ self.assertRaises(driver.WillNotSchedule,
+ self.sched.schedule_create_volumes,
+ self.context,
+ new_request,
+ availability_zone=None)
+
+ def test_vsa_sched_forced_host(self):
+ global scheduled_volumes
+ scheduled_volumes = []
+
+ self._set_service_states(host_num=10,
+ drive_type_start_ix=0,
+ drive_type_num=5,
+ init_num_drives=10)
+
+ request_spec = self._get_vol_creation_request(num_vols=3, drive_ix=2)
+
+ self.assertRaises(exception.HostBinaryNotFound,
+ self.sched.schedule_create_volumes,
+ self.context,
+ request_spec,
+ availability_zone="nova:host_5")
+
+ self.stubs.Set(nova.db,
+ 'service_get_by_args', self._fake_service_get_by_args)
+ self.stubs.Set(self.sched,
+ 'service_is_up', self._fake_service_is_up_False)
+
+ self.assertRaises(driver.WillNotSchedule,
+ self.sched.schedule_create_volumes,
+ self.context,
+ request_spec,
+ availability_zone="nova:host_5")
+
+ self.stubs.Set(self.sched,
+ 'service_is_up', self._fake_service_is_up_True)
+
+ self.sched.schedule_create_volumes(self.context,
+ request_spec,
+ availability_zone="nova:host_5")
+
+ self.assertEqual(len(scheduled_volumes), 3)
+ self.assertEqual(scheduled_volumes[0]['vol']['host'], 'host_5')
+ self.assertEqual(scheduled_volumes[1]['vol']['host'], 'host_5')
+ self.assertEqual(scheduled_volumes[2]['vol']['host'], 'host_5')
+
+ def test_vsa_sched_create_volumes_partition(self):
+ global scheduled_volumes
+ scheduled_volumes = []
+ self._set_service_states(host_num=5,
+ drive_type_start_ix=0,
+ drive_type_num=5,
+ init_num_drives=1,
+ exclude_host_list=['host_0', 'host_2'])
+ prev = self._generate_default_service_states()
+ request_spec = self._get_vol_creation_request(num_vols=3,
+ drive_ix=3,
+ size=50)
+ self.sched.schedule_create_volumes(self.context,
+ request_spec,
+ availability_zone=None)
+
+ self.assertEqual(len(scheduled_volumes), 3)
+ self.assertEqual(scheduled_volumes[0]['vol']['host'], 'host_1')
+ self.assertEqual(scheduled_volumes[1]['vol']['host'], 'host_3')
+ self.assertEqual(scheduled_volumes[2]['vol']['host'], 'host_4')
+
+ cur = self._get_service_states()
+ for host in ['host_1', 'host_3', 'host_4']:
+ cur_dtype = cur[host]['volume']['drive_qos_info']['name_3']
+ prev_dtype = prev[host]['volume']['drive_qos_info']['name_3']
+
+ self.assertEqual(cur_dtype['DriveType'], prev_dtype['DriveType'])
+ self.assertEqual(cur_dtype['FullDrive']['NumFreeDrives'],
+ prev_dtype['FullDrive']['NumFreeDrives'] - 1)
+ self.assertEqual(cur_dtype['FullDrive']['NumOccupiedDrives'],
+ prev_dtype['FullDrive']['NumOccupiedDrives'] + 1)
+
+ self.assertEqual(prev_dtype['PartitionDrive']
+ ['NumOccupiedPartitions'], 0)
+ self.assertEqual(cur_dtype['PartitionDrive']
+ ['NumOccupiedPartitions'], 1)
+ self.assertEqual(cur_dtype['PartitionDrive']
+ ['NumFreePartitions'], 5)
+
+ self.assertEqual(prev_dtype['PartitionDrive']
+ ['NumFreePartitions'], 0)
+ self.assertEqual(prev_dtype['PartitionDrive']
+ ['PartitionSize'], 0)
+
+ def test_vsa_sched_create_single_volume_az(self):
+ global scheduled_volume
+ scheduled_volume = {}
+
+ def _fake_volume_get_az(context, volume_id):
+ LOG.debug(_("Test: Volume get: id=%(volume_id)s"), locals())
+ return {'id': volume_id, 'availability_zone': 'nova:host_3'}
+
+ self.stubs.Set(nova.db, 'volume_get', _fake_volume_get_az)
+ self.stubs.Set(nova.db,
+ 'service_get_by_args', self._fake_service_get_by_args)
+ self.stubs.Set(self.sched,
+ 'service_is_up', self._fake_service_is_up_True)
+
+ host = self.sched.schedule_create_volume(self.context,
+ 123, availability_zone=None)
+
+ self.assertEqual(host, 'host_3')
+ self.assertEqual(scheduled_volume['id'], 123)
+ self.assertEqual(scheduled_volume['host'], 'host_3')
+
+ def test_vsa_sched_create_single_non_vsa_volume(self):
+ global scheduled_volume
+ scheduled_volume = {}
+
+ global global_volume
+ global_volume = {}
+ global_volume['volume_type_id'] = None
+
+ self.assertRaises(driver.NoValidHost,
+ self.sched.schedule_create_volume,
+ self.context,
+ 123,
+ availability_zone=None)
+
+ def test_vsa_sched_create_single_volume(self):
+ global scheduled_volume
+ scheduled_volume = {}
+ self._set_service_states(host_num=10,
+ drive_type_start_ix=0,
+ drive_type_num=5,
+ init_num_drives=10,
+ exclude_host_list=['host_0', 'host_1'])
+ prev = self._generate_default_service_states()
+
+ global global_volume
+ global_volume = {}
+
+ drive_ix = 2
+ name = 'name_' + str(drive_ix)
+ volume_types.create(self.context, name,
+ extra_specs={'type': 'vsa_drive',
+ 'drive_name': name,
+ 'drive_type': 'type_' + str(drive_ix),
+ 'drive_size': 1 + 100 * (drive_ix)})
+ self.created_types_lst.append(name)
+ volume_type = volume_types.get_volume_type_by_name(self.context, name)
+
+ global_volume['volume_type_id'] = volume_type['id']
+ global_volume['size'] = 0
+
+ host = self.sched.schedule_create_volume(self.context,
+ 123, availability_zone=None)
+
+ self.assertEqual(host, 'host_2')
+ self.assertEqual(scheduled_volume['id'], 123)
+ self.assertEqual(scheduled_volume['host'], 'host_2')
+
+
+class VsaSchedulerTestCaseMostAvail(VsaSchedulerTestCase):
+
+ def setUp(self):
+ super(VsaSchedulerTestCaseMostAvail, self).setUp(
+ FakeVsaMostAvailCapacityScheduler())
+
+ def tearDown(self):
+ self.stubs.UnsetAll()
+ super(VsaSchedulerTestCaseMostAvail, self).tearDown()
+
+ def test_vsa_sched_create_single_volume(self):
+ global scheduled_volume
+ scheduled_volume = {}
+ self._set_service_states(host_num=10,
+ drive_type_start_ix=0,
+ drive_type_num=5,
+ init_num_drives=10,
+ exclude_host_list=['host_0', 'host_1'])
+ prev = self._generate_default_service_states()
+
+ global global_volume
+ global_volume = {}
+
+ drive_ix = 2
+ name = 'name_' + str(drive_ix)
+ volume_types.create(self.context, name,
+ extra_specs={'type': 'vsa_drive',
+ 'drive_name': name,
+ 'drive_type': 'type_' + str(drive_ix),
+ 'drive_size': 1 + 100 * (drive_ix)})
+ self.created_types_lst.append(name)
+ volume_type = volume_types.get_volume_type_by_name(self.context, name)
+
+ global_volume['volume_type_id'] = volume_type['id']
+ global_volume['size'] = 0
+
+ host = self.sched.schedule_create_volume(self.context,
+ 123, availability_zone=None)
+
+ self.assertEqual(host, 'host_9')
+ self.assertEqual(scheduled_volume['id'], 123)
+ self.assertEqual(scheduled_volume['host'], 'host_9')
+
+ def test_vsa_sched_create_volumes_simple(self):
+ global scheduled_volumes
+ scheduled_volumes = []
+ self._set_service_states(host_num=10,
+ drive_type_start_ix=0,
+ drive_type_num=5,
+ init_num_drives=10,
+ exclude_host_list=['host_1', 'host_3'])
+ prev = self._generate_default_service_states()
+ request_spec = self._get_vol_creation_request(num_vols=3, drive_ix=2)
+
+ self._print_service_states()
+
+ self.sched.schedule_create_volumes(self.context,
+ request_spec,
+ availability_zone=None)
+
+ self.assertEqual(len(scheduled_volumes), 3)
+ self.assertEqual(scheduled_volumes[0]['vol']['host'], 'host_9')
+ self.assertEqual(scheduled_volumes[1]['vol']['host'], 'host_8')
+ self.assertEqual(scheduled_volumes[2]['vol']['host'], 'host_7')
+
+ cur = self._get_service_states()
+ for host in ['host_9', 'host_8', 'host_7']:
+ cur_dtype = cur[host]['volume']['drive_qos_info']['name_2']
+ prev_dtype = prev[host]['volume']['drive_qos_info']['name_2']
+ self.assertEqual(cur_dtype['DriveType'], prev_dtype['DriveType'])
+ self.assertEqual(cur_dtype['FullDrive']['NumFreeDrives'],
+ prev_dtype['FullDrive']['NumFreeDrives'] - 1)
+ self.assertEqual(cur_dtype['FullDrive']['NumOccupiedDrives'],
+ prev_dtype['FullDrive']['NumOccupiedDrives'] + 1)
+
+ def test_vsa_sched_create_volumes_partition(self):
+ global scheduled_volumes
+ scheduled_volumes = []
+ self._set_service_states(host_num=5,
+ drive_type_start_ix=0,
+ drive_type_num=5,
+ init_num_drives=1,
+ exclude_host_list=['host_0', 'host_2'])
+ prev = self._generate_default_service_states()
+ request_spec = self._get_vol_creation_request(num_vols=3,
+ drive_ix=3,
+ size=50)
+ self.sched.schedule_create_volumes(self.context,
+ request_spec,
+ availability_zone=None)
+
+ self.assertEqual(len(scheduled_volumes), 3)
+ self.assertEqual(scheduled_volumes[0]['vol']['host'], 'host_4')
+ self.assertEqual(scheduled_volumes[1]['vol']['host'], 'host_3')
+ self.assertEqual(scheduled_volumes[2]['vol']['host'], 'host_1')
+
+ cur = self._get_service_states()
+ for host in ['host_1', 'host_3', 'host_4']:
+ cur_dtype = cur[host]['volume']['drive_qos_info']['name_3']
+ prev_dtype = prev[host]['volume']['drive_qos_info']['name_3']
+
+ self.assertEqual(cur_dtype['DriveType'], prev_dtype['DriveType'])
+ self.assertEqual(cur_dtype['FullDrive']['NumFreeDrives'],
+ prev_dtype['FullDrive']['NumFreeDrives'] - 1)
+ self.assertEqual(cur_dtype['FullDrive']['NumOccupiedDrives'],
+ prev_dtype['FullDrive']['NumOccupiedDrives'] + 1)
+
+ self.assertEqual(prev_dtype['PartitionDrive']
+ ['NumOccupiedPartitions'], 0)
+ self.assertEqual(cur_dtype['PartitionDrive']
+ ['NumOccupiedPartitions'], 1)
+ self.assertEqual(cur_dtype['PartitionDrive']
+ ['NumFreePartitions'], 5)
+ self.assertEqual(prev_dtype['PartitionDrive']
+ ['NumFreePartitions'], 0)
+ self.assertEqual(prev_dtype['PartitionDrive']
+ ['PartitionSize'], 0)
diff --git a/nova/tests/test_ipv6.py b/nova/tests/test_ipv6.py
index 04c1b5598..e1ba4aafb 100644
--- a/nova/tests/test_ipv6.py
+++ b/nova/tests/test_ipv6.py
@@ -48,7 +48,7 @@ class IPv6RFC2462TestCase(test.TestCase):
def test_to_global_with_bad_prefix(self):
bad_prefix = '82'
self.assertRaises(TypeError, ipv6.to_global,
- bad_prefix,
+ bad_prefix,
'2001:db8::216:3eff:fe33:4455',
'test')
diff --git a/nova/tests/test_vsa.py b/nova/tests/test_vsa.py
new file mode 100644
index 000000000..3d2d2de13
--- /dev/null
+++ b/nova/tests/test_vsa.py
@@ -0,0 +1,182 @@
+# Copyright 2011 OpenStack LLC.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import base64
+import stubout
+
+from xml.etree import ElementTree
+from xml.etree.ElementTree import Element, SubElement
+
+from nova import context
+from nova import db
+from nova import exception
+from nova import flags
+from nova import log as logging
+from nova import test
+from nova import vsa
+from nova import volume
+from nova.volume import volume_types
+from nova.vsa import utils as vsa_utils
+
+import nova.image.fake
+
+FLAGS = flags.FLAGS
+LOG = logging.getLogger('nova.tests.vsa')
+
+
+class VsaTestCase(test.TestCase):
+
+ def setUp(self):
+ super(VsaTestCase, self).setUp()
+ self.stubs = stubout.StubOutForTesting()
+ self.vsa_api = vsa.API()
+ self.volume_api = volume.API()
+
+ FLAGS.quota_volumes = 100
+ FLAGS.quota_gigabytes = 10000
+
+ self.context = context.get_admin_context()
+
+ volume_types.create(self.context,
+ 'SATA_500_7200',
+ extra_specs={'type': 'vsa_drive',
+ 'drive_name': 'SATA_500_7200',
+ 'drive_type': 'SATA',
+ 'drive_size': '500',
+ 'drive_rpm': '7200'})
+
+ def fake_show_by_name(meh, context, name):
+ if name == 'wrong_image_name':
+ LOG.debug(_("Test: Emulate wrong VSA name. Raise"))
+ raise exception.ImageNotFound
+ return {'id': 1, 'properties': {'kernel_id': 1, 'ramdisk_id': 1}}
+
+ self.stubs.Set(nova.image.fake._FakeImageService,
+ 'show_by_name',
+ fake_show_by_name)
+
+ def tearDown(self):
+ self.stubs.UnsetAll()
+ super(VsaTestCase, self).tearDown()
+
+ def test_vsa_create_delete_defaults(self):
+ param = {'display_name': 'VSA name test'}
+ vsa_ref = self.vsa_api.create(self.context, **param)
+ self.assertEqual(vsa_ref['display_name'], param['display_name'])
+ self.vsa_api.delete(self.context, vsa_ref['id'])
+
+ def test_vsa_create_delete_check_in_db(self):
+ vsa_list1 = self.vsa_api.get_all(self.context)
+ vsa_ref = self.vsa_api.create(self.context)
+ vsa_list2 = self.vsa_api.get_all(self.context)
+ self.assertEqual(len(vsa_list2), len(vsa_list1) + 1)
+
+ self.vsa_api.delete(self.context, vsa_ref['id'])
+ vsa_list3 = self.vsa_api.get_all(self.context)
+ self.assertEqual(len(vsa_list3), len(vsa_list2) - 1)
+
+ def test_vsa_create_delete_high_vc_count(self):
+ param = {'vc_count': FLAGS.max_vcs_in_vsa + 1}
+ vsa_ref = self.vsa_api.create(self.context, **param)
+ self.assertEqual(vsa_ref['vc_count'], FLAGS.max_vcs_in_vsa)
+ self.vsa_api.delete(self.context, vsa_ref['id'])
+
+ def test_vsa_create_wrong_image_name(self):
+ param = {'image_name': 'wrong_image_name'}
+ self.assertRaises(exception.ApiError,
+ self.vsa_api.create, self.context, **param)
+
+ def test_vsa_create_db_error(self):
+
+ def fake_vsa_create(context, options):
+ LOG.debug(_("Test: Emulate DB error. Raise"))
+ raise exception.Error
+
+ self.stubs.Set(nova.db.api, 'vsa_create', fake_vsa_create)
+ self.assertRaises(exception.ApiError,
+ self.vsa_api.create, self.context)
+
+ def test_vsa_create_wrong_storage_params(self):
+ vsa_list1 = self.vsa_api.get_all(self.context)
+ param = {'storage': [{'stub': 1}]}
+ self.assertRaises(exception.ApiError,
+ self.vsa_api.create, self.context, **param)
+ vsa_list2 = self.vsa_api.get_all(self.context)
+ self.assertEqual(len(vsa_list2), len(vsa_list1))
+
+ param = {'storage': [{'drive_name': 'wrong name'}]}
+ self.assertRaises(exception.ApiError,
+ self.vsa_api.create, self.context, **param)
+
+ def test_vsa_create_with_storage(self, multi_vol_creation=True):
+ """Test creation of VSA with BE storage"""
+
+ FLAGS.vsa_multi_vol_creation = multi_vol_creation
+
+ param = {'storage': [{'drive_name': 'SATA_500_7200',
+ 'num_drives': 3}]}
+ vsa_ref = self.vsa_api.create(self.context, **param)
+ self.assertEqual(vsa_ref['vol_count'], 3)
+ self.vsa_api.delete(self.context, vsa_ref['id'])
+
+ param = {'storage': [{'drive_name': 'SATA_500_7200',
+ 'num_drives': 3}],
+ 'shared': True}
+ vsa_ref = self.vsa_api.create(self.context, **param)
+ self.assertEqual(vsa_ref['vol_count'], 15)
+ self.vsa_api.delete(self.context, vsa_ref['id'])
+
+ def test_vsa_create_with_storage_single_volumes(self):
+ self.test_vsa_create_with_storage(multi_vol_creation=False)
+
+ def test_vsa_update(self):
+ vsa_ref = self.vsa_api.create(self.context)
+
+ param = {'vc_count': FLAGS.max_vcs_in_vsa + 1}
+ vsa_ref = self.vsa_api.update(self.context, vsa_ref['id'], **param)
+ self.assertEqual(vsa_ref['vc_count'], FLAGS.max_vcs_in_vsa)
+
+ param = {'vc_count': 2}
+ vsa_ref = self.vsa_api.update(self.context, vsa_ref['id'], **param)
+ self.assertEqual(vsa_ref['vc_count'], 2)
+
+ self.vsa_api.delete(self.context, vsa_ref['id'])
+
+ def test_vsa_generate_user_data(self):
+
+ FLAGS.vsa_multi_vol_creation = False
+ param = {'display_name': 'VSA name test',
+ 'display_description': 'VSA desc test',
+ 'vc_count': 2,
+ 'storage': [{'drive_name': 'SATA_500_7200',
+ 'num_drives': 3}]}
+ vsa_ref = self.vsa_api.create(self.context, **param)
+ volumes = self.vsa_api.get_all_vsa_drives(self.context,
+ vsa_ref['id'])
+
+ user_data = vsa_utils.generate_user_data(vsa_ref, volumes)
+ user_data = base64.b64decode(user_data)
+
+ LOG.debug(_("Test: user_data = %s"), user_data)
+
+ elem = ElementTree.fromstring(user_data)
+ self.assertEqual(elem.findtext('name'),
+ param['display_name'])
+ self.assertEqual(elem.findtext('description'),
+ param['display_description'])
+ self.assertEqual(elem.findtext('vc_count'),
+ str(param['vc_count']))
+
+ self.vsa_api.delete(self.context, vsa_ref['id'])
diff --git a/nova/tests/test_vsa_volumes.py b/nova/tests/test_vsa_volumes.py
new file mode 100644
index 000000000..b7cd4e840
--- /dev/null
+++ b/nova/tests/test_vsa_volumes.py
@@ -0,0 +1,136 @@
+# Copyright 2011 OpenStack LLC.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import stubout
+
+from nova import exception
+from nova import flags
+from nova import vsa
+from nova import volume
+from nova import db
+from nova import context
+from nova import test
+from nova import log as logging
+import nova.image.fake
+
+FLAGS = flags.FLAGS
+LOG = logging.getLogger('nova.tests.vsa.volumes')
+
+
+class VsaVolumesTestCase(test.TestCase):
+
+ def setUp(self):
+ super(VsaVolumesTestCase, self).setUp()
+ self.stubs = stubout.StubOutForTesting()
+ self.vsa_api = vsa.API()
+ self.volume_api = volume.API()
+ self.context = context.get_admin_context()
+
+ self.default_vol_type = self.vsa_api.get_vsa_volume_type(self.context)
+
+ def fake_show_by_name(meh, context, name):
+ return {'id': 1, 'properties': {'kernel_id': 1, 'ramdisk_id': 1}}
+
+ self.stubs.Set(nova.image.fake._FakeImageService,
+ 'show_by_name',
+ fake_show_by_name)
+
+ param = {'display_name': 'VSA name test'}
+ vsa_ref = self.vsa_api.create(self.context, **param)
+ self.vsa_id = vsa_ref['id']
+
+ def tearDown(self):
+ if self.vsa_id:
+ self.vsa_api.delete(self.context, self.vsa_id)
+ self.stubs.UnsetAll()
+ super(VsaVolumesTestCase, self).tearDown()
+
+ def _default_volume_param(self):
+ return {
+ 'size': 1,
+ 'snapshot_id': None,
+ 'name': 'Test volume name',
+ 'description': 'Test volume desc name',
+ 'volume_type': self.default_vol_type,
+ 'metadata': {'from_vsa_id': self.vsa_id}
+ }
+
+ def _get_all_volumes_by_vsa(self):
+ return self.volume_api.get_all(self.context,
+ search_opts={'metadata': {"from_vsa_id": str(self.vsa_id)}})
+
+ def test_vsa_volume_create_delete(self):
+ """ Check if volume properly created and deleted. """
+ volume_param = self._default_volume_param()
+ volume_ref = self.volume_api.create(self.context, **volume_param)
+
+ self.assertEqual(volume_ref['display_name'],
+ volume_param['name'])
+ self.assertEqual(volume_ref['display_description'],
+ volume_param['description'])
+ self.assertEqual(volume_ref['size'],
+ volume_param['size'])
+ self.assertEqual(volume_ref['status'],
+ 'creating')
+
+ vols2 = self._get_all_volumes_by_vsa()
+ self.assertEqual(1, len(vols2))
+ volume_ref = vols2[0]
+
+ self.assertEqual(volume_ref['display_name'],
+ volume_param['name'])
+ self.assertEqual(volume_ref['display_description'],
+ volume_param['description'])
+ self.assertEqual(volume_ref['size'],
+ volume_param['size'])
+ self.assertEqual(volume_ref['status'],
+ 'creating')
+
+ self.volume_api.update(self.context,
+ volume_ref['id'], {'status': 'available'})
+ self.volume_api.delete(self.context, volume_ref['id'])
+
+ vols3 = self._get_all_volumes_by_vsa()
+ self.assertEqual(1, len(vols2))
+ volume_ref = vols3[0]
+ self.assertEqual(volume_ref['status'],
+ 'deleting')
+
+ def test_vsa_volume_delete_nonavail_volume(self):
+ """ Check volume deleton in different states. """
+ volume_param = self._default_volume_param()
+ volume_ref = self.volume_api.create(self.context, **volume_param)
+
+ self.volume_api.update(self.context,
+ volume_ref['id'], {'status': 'in-use'})
+ self.assertRaises(exception.ApiError,
+ self.volume_api.delete,
+ self.context, volume_ref['id'])
+
+ def test_vsa_volume_delete_vsa_with_volumes(self):
+ """ Check volume deleton in different states. """
+
+ vols1 = self._get_all_volumes_by_vsa()
+ for i in range(3):
+ volume_param = self._default_volume_param()
+ volume_ref = self.volume_api.create(self.context, **volume_param)
+
+ vols2 = self._get_all_volumes_by_vsa()
+ self.assertEqual(len(vols1) + 3, len(vols2))
+
+ self.vsa_api.delete(self.context, self.vsa_id)
+
+ vols3 = self._get_all_volumes_by_vsa()
+ self.assertEqual(len(vols1), len(vols3))
diff --git a/nova/virt/libvirt.xml.template b/nova/virt/libvirt.xml.template
index 6a02cfa24..d3aeadda4 100644
--- a/nova/virt/libvirt.xml.template
+++ b/nova/virt/libvirt.xml.template
@@ -135,7 +135,9 @@
<interface type='bridge'>
<source bridge='${nic.bridge_name}'/>
<mac address='${nic.mac_address}'/>
- <!-- <model type='virtio'/> CANT RUN virtio network right now -->
+#if $getVar('use_virtio_for_bridges', True)
+ <model type='virtio'/>
+#end if
<filterref filter="nova-instance-${name}-${nic.id}">
<parameter name="IP" value="${nic.ip_address}" />
<parameter name="DHCPSERVER" value="${nic.dhcp_server}" />
diff --git a/nova/virt/libvirt/connection.py b/nova/virt/libvirt/connection.py
index 4388291db..363a20ed0 100644
--- a/nova/virt/libvirt/connection.py
+++ b/nova/virt/libvirt/connection.py
@@ -135,6 +135,9 @@ flags.DEFINE_string('default_local_format',
None,
'The default format a local_volume will be formatted with '
'on creation.')
+flags.DEFINE_bool('libvirt_use_virtio_for_bridges',
+ False,
+ 'Use virtio for bridge interfaces')
def get_connection(read_only):
@@ -1083,6 +1086,8 @@ class LibvirtConnection(driver.ComputeDriver):
'ebs_root': ebs_root,
'local_device': local_device,
'volumes': block_device_mapping,
+ 'use_virtio_for_bridges':
+ FLAGS.libvirt_use_virtio_for_bridges,
'ephemerals': ephemerals}
root_device_name = driver.block_device_info_get_root(block_device_info)
diff --git a/nova/volume/api.py b/nova/volume/api.py
index 195ab24aa..d9c082514 100644
--- a/nova/volume/api.py
+++ b/nova/volume/api.py
@@ -132,7 +132,7 @@ class API(base.Base):
for i in volume.get('volume_metadata'):
volume_metadata[i['key']] = i['value']
- for k, v in searchdict:
+ for k, v in searchdict.iteritems():
if k not in volume_metadata.keys()\
or volume_metadata[k] != v:
return False
@@ -141,6 +141,7 @@ class API(base.Base):
# search_option to filter_name mapping.
filter_mapping = {'metadata': _check_metadata_match}
+ result = []
for volume in volumes:
# go over all filters in the list
for opt, values in search_opts.iteritems():
@@ -150,10 +151,10 @@ class API(base.Base):
# no such filter - ignore it, go to next filter
continue
else:
- if filter_func(volume, values) == False:
- # if one of conditions didn't match - remove
- volumes.remove(volume)
+ if filter_func(volume, values):
+ result.append(volume)
break
+ volumes = result
return volumes
def get_snapshot(self, context, snapshot_id):
@@ -255,3 +256,12 @@ class API(base.Base):
self.db.volume_metadata_update(context, volume_id, _metadata, True)
return _metadata
+
+ def get_volume_metadata_value(self, volume, key):
+ """Get value of particular metadata key."""
+ metadata = volume.get('volume_metadata')
+ if metadata:
+ for i in volume['volume_metadata']:
+ if i['key'] == key:
+ return i['value']
+ return None
diff --git a/nova/volume/driver.py b/nova/volume/driver.py
index 06e3d7afd..35e3ea8d0 100644
--- a/nova/volume/driver.py
+++ b/nova/volume/driver.py
@@ -22,11 +22,13 @@ Drivers for volumes.
import time
import os
+from xml.etree import ElementTree
from nova import exception
from nova import flags
from nova import log as logging
from nova import utils
+from nova.volume import volume_types
LOG = logging.getLogger("nova.volume.driver")
@@ -212,6 +214,11 @@ class VolumeDriver(object):
"""Make sure volume is exported."""
raise NotImplementedError()
+ def get_volume_stats(self, refresh=False):
+ """Return the current state of the volume service. If 'refresh' is
+ True, run the update first."""
+ return None
+
class AOEDriver(VolumeDriver):
"""Implements AOE specific volume commands."""
@@ -802,3 +809,268 @@ class LoggingVolumeDriver(VolumeDriver):
if match:
matches.append(entry)
return matches
+
+
+class ZadaraBEDriver(ISCSIDriver):
+ """Performs actions to configure Zadara BE module."""
+
+ def _is_vsa_volume(self, volume):
+ return volume_types.is_vsa_volume(volume['volume_type_id'])
+
+ def _is_vsa_drive(self, volume):
+ return volume_types.is_vsa_drive(volume['volume_type_id'])
+
+ def _not_vsa_volume_or_drive(self, volume):
+ """Returns True if volume is not VSA BE volume."""
+ if not volume_types.is_vsa_object(volume['volume_type_id']):
+ LOG.debug(_("\tVolume %s is NOT VSA volume"), volume['name'])
+ return True
+ else:
+ return False
+
+ def check_for_setup_error(self):
+ """No setup necessary for Zadara BE."""
+ pass
+
+ """ Volume Driver methods """
+ def create_volume(self, volume):
+ """Creates BE volume."""
+ if self._not_vsa_volume_or_drive(volume):
+ return super(ZadaraBEDriver, self).create_volume(volume)
+
+ if self._is_vsa_volume(volume):
+ LOG.debug(_("\tFE VSA Volume %s creation - do nothing"),
+ volume['name'])
+ return
+
+ if int(volume['size']) == 0:
+ sizestr = '0' # indicates full-partition
+ else:
+ sizestr = '%s' % (int(volume['size']) << 30) # size in bytes
+
+ # Set the qos-str to default type sas
+ qosstr = 'SAS_1000'
+ volume_type = volume_types.get_volume_type(None,
+ volume['volume_type_id'])
+ if volume_type is not None:
+ qosstr = volume_type['extra_specs']['drive_type'] + \
+ ("_%s" % volume_type['extra_specs']['drive_size'])
+
+ vsa_id = None
+ for i in volume.get('volume_metadata'):
+ if i['key'] == 'to_vsa_id':
+ vsa_id = i['value']
+ break
+
+ try:
+ self._sync_exec('/var/lib/zadara/bin/zadara_sncfg',
+ 'create_qospart',
+ '--qos', qosstr,
+ '--pname', volume['name'],
+ '--psize', sizestr,
+ '--vsaid', vsa_id,
+ run_as_root=True,
+ check_exit_code=0)
+ except exception.ProcessExecutionError:
+ LOG.debug(_("VSA BE create_volume for %s failed"), volume['name'])
+ raise
+
+ LOG.debug(_("VSA BE create_volume for %s succeeded"), volume['name'])
+
+ def delete_volume(self, volume):
+ """Deletes BE volume."""
+ if self._not_vsa_volume_or_drive(volume):
+ return super(ZadaraBEDriver, self).delete_volume(volume)
+
+ if self._is_vsa_volume(volume):
+ LOG.debug(_("\tFE VSA Volume %s deletion - do nothing"),
+ volume['name'])
+ return
+
+ try:
+ self._sync_exec('/var/lib/zadara/bin/zadara_sncfg',
+ 'delete_partition',
+ '--pname', volume['name'],
+ run_as_root=True,
+ check_exit_code=0)
+ except exception.ProcessExecutionError:
+ LOG.debug(_("VSA BE delete_volume for %s failed"), volume['name'])
+ return
+
+ LOG.debug(_("VSA BE delete_volume for %s suceeded"), volume['name'])
+
+ def local_path(self, volume):
+ if self._not_vsa_volume_or_drive(volume):
+ return super(ZadaraBEDriver, self).local_path(volume)
+
+ if self._is_vsa_volume(volume):
+ LOG.debug(_("\tFE VSA Volume %s local path call - call discover"),
+ volume['name'])
+ return super(ZadaraBEDriver, self).discover_volume(None, volume)
+
+ raise exception.Error(_("local_path not supported"))
+
+ def ensure_export(self, context, volume):
+ """ensure BE export for a volume"""
+ if self._not_vsa_volume_or_drive(volume):
+ return super(ZadaraBEDriver, self).ensure_export(context, volume)
+
+ if self._is_vsa_volume(volume):
+ LOG.debug(_("\tFE VSA Volume %s ensure export - do nothing"),
+ volume['name'])
+ return
+
+ try:
+ iscsi_target = self.db.volume_get_iscsi_target_num(context,
+ volume['id'])
+ except exception.NotFound:
+ LOG.info(_("Skipping ensure_export. No iscsi_target " +
+ "provisioned for volume: %d"), volume['id'])
+ return
+
+ try:
+ ret = self._common_be_export(context, volume, iscsi_target)
+ except exception.ProcessExecutionError:
+ return
+ return ret
+
+ def create_export(self, context, volume):
+ """create BE export for a volume"""
+ if self._not_vsa_volume_or_drive(volume):
+ return super(ZadaraBEDriver, self).create_export(context, volume)
+
+ if self._is_vsa_volume(volume):
+ LOG.debug(_("\tFE VSA Volume %s create export - do nothing"),
+ volume['name'])
+ return
+
+ self._ensure_iscsi_targets(context, volume['host'])
+ iscsi_target = self.db.volume_allocate_iscsi_target(context,
+ volume['id'],
+ volume['host'])
+ try:
+ ret = self._common_be_export(context, volume, iscsi_target)
+ except:
+ raise exception.ProcessExecutionError
+ return ret
+
+ def remove_export(self, context, volume):
+ """Removes BE export for a volume."""
+ if self._not_vsa_volume_or_drive(volume):
+ return super(ZadaraBEDriver, self).remove_export(context, volume)
+
+ if self._is_vsa_volume(volume):
+ LOG.debug(_("\tFE VSA Volume %s remove export - do nothing"),
+ volume['name'])
+ return
+
+ try:
+ iscsi_target = self.db.volume_get_iscsi_target_num(context,
+ volume['id'])
+ except exception.NotFound:
+ LOG.info(_("Skipping remove_export. No iscsi_target " +
+ "provisioned for volume: %d"), volume['id'])
+ return
+
+ try:
+ self._sync_exec('/var/lib/zadara/bin/zadara_sncfg',
+ 'remove_export',
+ '--pname', volume['name'],
+ '--tid', iscsi_target,
+ run_as_root=True,
+ check_exit_code=0)
+ except exception.ProcessExecutionError:
+ LOG.debug(_("VSA BE remove_export for %s failed"), volume['name'])
+ return
+
+ def create_snapshot(self, snapshot):
+ """Nothing required for snapshot"""
+ if self._not_vsa_volume_or_drive(volume):
+ return super(ZadaraBEDriver, self).create_snapshot(volume)
+
+ pass
+
+ def delete_snapshot(self, snapshot):
+ """Nothing required to delete a snapshot"""
+ if self._not_vsa_volume_or_drive(volume):
+ return super(ZadaraBEDriver, self).delete_snapshot(volume)
+
+ pass
+
+ """ Internal BE Volume methods """
+ def _common_be_export(self, context, volume, iscsi_target):
+ """
+ Common logic that asks zadara_sncfg to setup iSCSI target/lun for
+ this volume
+ """
+ (out, err) = self._sync_exec(
+ '/var/lib/zadara/bin/zadara_sncfg',
+ 'create_export',
+ '--pname', volume['name'],
+ '--tid', iscsi_target,
+ run_as_root=True,
+ check_exit_code=0)
+
+ result_xml = ElementTree.fromstring(out)
+ response_node = result_xml.find("Sn")
+ if response_node is None:
+ msg = "Malformed response from zadara_sncfg"
+ raise exception.Error(msg)
+
+ sn_ip = response_node.findtext("SnIp")
+ sn_iqn = response_node.findtext("IqnName")
+ iscsi_portal = sn_ip + ":3260," + ("%s" % iscsi_target)
+
+ model_update = {}
+ model_update['provider_location'] = ("%s %s" %
+ (iscsi_portal,
+ sn_iqn))
+ return model_update
+
+ def _get_qosgroup_summary(self):
+ """gets the list of qosgroups from Zadara BE"""
+ try:
+ (out, err) = self._sync_exec(
+ '/var/lib/zadara/bin/zadara_sncfg',
+ 'get_qosgroups_xml',
+ run_as_root=True,
+ check_exit_code=0)
+ except exception.ProcessExecutionError:
+ LOG.debug(_("Failed to retrieve QoS info"))
+ return {}
+
+ qos_groups = {}
+ result_xml = ElementTree.fromstring(out)
+ for element in result_xml.findall('QosGroup'):
+ qos_group = {}
+ # get the name of the group.
+ # If we cannot find it, forget this element
+ group_name = element.findtext("Name")
+ if not group_name:
+ continue
+
+ # loop through all child nodes & fill up attributes of this group
+ for child in element.getchildren():
+ # two types of elements - property of qos-group & sub property
+ # classify them accordingly
+ if child.text:
+ qos_group[child.tag] = int(child.text) \
+ if child.text.isdigit() else child.text
+ else:
+ subelement = {}
+ for subchild in child.getchildren():
+ subelement[subchild.tag] = int(subchild.text) \
+ if subchild.text.isdigit() else subchild.text
+ qos_group[child.tag] = subelement
+
+ # Now add this group to the master qos_groups
+ qos_groups[group_name] = qos_group
+
+ return qos_groups
+
+ def get_volume_stats(self, refresh=False):
+ """Return the current state of the volume service. If 'refresh' is
+ True, run the update first."""
+
+ drive_info = self._get_qosgroup_summary()
+ return {'drive_qos_info': drive_info}
diff --git a/nova/volume/manager.py b/nova/volume/manager.py
index 798bd379a..caa5298d4 100644
--- a/nova/volume/manager.py
+++ b/nova/volume/manager.py
@@ -48,7 +48,9 @@ from nova import exception
from nova import flags
from nova import log as logging
from nova import manager
+from nova import rpc
from nova import utils
+from nova.volume import volume_types
LOG = logging.getLogger('nova.volume.manager')
@@ -60,6 +62,8 @@ flags.DEFINE_string('volume_driver', 'nova.volume.driver.ISCSIDriver',
'Driver to use for volume creation')
flags.DEFINE_boolean('use_local_volumes', True,
'if True, will not discover local volumes')
+flags.DEFINE_boolean('volume_force_update_capabilities', False,
+ 'if True will force update capabilities on each check')
class VolumeManager(manager.SchedulerDependentManager):
@@ -74,6 +78,7 @@ class VolumeManager(manager.SchedulerDependentManager):
# NOTE(vish): Implementation specific db handling is done
# by the driver.
self.driver.db = self.db
+ self._last_volume_stats = []
def init_host(self):
"""Do any initialization that needs to be run if this is a
@@ -123,6 +128,7 @@ class VolumeManager(manager.SchedulerDependentManager):
except Exception:
self.db.volume_update(context,
volume_ref['id'], {'status': 'error'})
+ self._notify_vsa(context, volume_ref, 'error')
raise
now = utils.utcnow()
@@ -130,8 +136,29 @@ class VolumeManager(manager.SchedulerDependentManager):
volume_ref['id'], {'status': 'available',
'launched_at': now})
LOG.debug(_("volume %s: created successfully"), volume_ref['name'])
+ self._notify_vsa(context, volume_ref, 'available')
+ self._reset_stats()
return volume_id
+ def _notify_vsa(self, context, volume_ref, status):
+ if volume_ref['volume_type_id'] is None:
+ return
+
+ if volume_types.is_vsa_drive(volume_ref['volume_type_id']):
+ vsa_id = None
+ for i in volume_ref.get('volume_metadata'):
+ if i['key'] == 'to_vsa_id':
+ vsa_id = int(i['value'])
+ break
+
+ if vsa_id:
+ rpc.cast(context,
+ FLAGS.vsa_topic,
+ {"method": "vsa_volume_created",
+ "args": {"vol_id": volume_ref['id'],
+ "vsa_id": vsa_id,
+ "status": status}})
+
def delete_volume(self, context, volume_id):
"""Deletes and unexports volume."""
context = context.elevated()
@@ -141,6 +168,7 @@ class VolumeManager(manager.SchedulerDependentManager):
if volume_ref['host'] != self.host:
raise exception.Error(_("Volume is not local to this node"))
+ self._reset_stats()
try:
LOG.debug(_("volume %s: removing export"), volume_ref['name'])
self.driver.remove_export(context, volume_ref)
@@ -231,3 +259,53 @@ class VolumeManager(manager.SchedulerDependentManager):
instance_ref = self.db.instance_get(context, instance_id)
for volume in instance_ref['volumes']:
self.driver.check_for_export(context, volume['id'])
+
+ def periodic_tasks(self, context=None):
+ """Tasks to be run at a periodic interval."""
+
+ error_list = []
+ try:
+ self._report_driver_status()
+ except Exception as ex:
+ LOG.warning(_("Error during report_driver_status(): %s"),
+ unicode(ex))
+ error_list.append(ex)
+
+ super(VolumeManager, self).periodic_tasks(context)
+
+ return error_list
+
+ def _volume_stats_changed(self, stat1, stat2):
+ if FLAGS.volume_force_update_capabilities:
+ return True
+ if len(stat1) != len(stat2):
+ return True
+ for (k, v) in stat1.iteritems():
+ if (k, v) not in stat2.iteritems():
+ return True
+ return False
+
+ def _report_driver_status(self):
+ volume_stats = self.driver.get_volume_stats(refresh=True)
+ if volume_stats:
+ LOG.info(_("Checking volume capabilities"))
+
+ if self._volume_stats_changed(self._last_volume_stats,
+ volume_stats):
+ LOG.info(_("New capabilities found: %s"), volume_stats)
+ self._last_volume_stats = volume_stats
+
+ # This will grab info about the host and queue it
+ # to be sent to the Schedulers.
+ self.update_service_capabilities(self._last_volume_stats)
+ else:
+ # avoid repeating fanouts
+ self.update_service_capabilities(None)
+
+ def _reset_stats(self):
+ LOG.info(_("Clear capabilities"))
+ self._last_volume_stats = []
+
+ def notification(self, context, event):
+ LOG.info(_("Notification {%s} received"), event)
+ self._reset_stats()
diff --git a/nova/volume/volume_types.py b/nova/volume/volume_types.py
index 9b02d4ccc..ffa9e6e02 100644
--- a/nova/volume/volume_types.py
+++ b/nova/volume/volume_types.py
@@ -100,20 +100,22 @@ def get_all_types(context, inactive=0, search_opts={}):
continue
else:
if filter_func(type_args, values):
- # if one of conditions didn't match - remove
result[type_name] = type_args
break
vol_types = result
return vol_types
-def get_volume_type(context, id):
+def get_volume_type(ctxt, id):
"""Retrieves single volume type by id."""
if id is None:
raise exception.InvalidVolumeType(volume_type=id)
+ if ctxt is None:
+ ctxt = context.get_admin_context()
+
try:
- return db.volume_type_get(context, id)
+ return db.volume_type_get(ctxt, id)
except exception.DBError:
raise exception.ApiError(_("Unknown volume type: %s") % id)
@@ -127,3 +129,38 @@ def get_volume_type_by_name(context, name):
return db.volume_type_get_by_name(context, name)
except exception.DBError:
raise exception.ApiError(_("Unknown volume type: %s") % name)
+
+
+def is_key_value_present(volume_type_id, key, value, volume_type=None):
+ if volume_type_id is None:
+ return False
+
+ if volume_type is None:
+ volume_type = get_volume_type(context.get_admin_context(),
+ volume_type_id)
+ if volume_type.get('extra_specs') is None or\
+ volume_type['extra_specs'].get(key) != value:
+ return False
+ else:
+ return True
+
+
+def is_vsa_drive(volume_type_id, volume_type=None):
+ return is_key_value_present(volume_type_id,
+ 'type', 'vsa_drive', volume_type)
+
+
+def is_vsa_volume(volume_type_id, volume_type=None):
+ return is_key_value_present(volume_type_id,
+ 'type', 'vsa_volume', volume_type)
+
+
+def is_vsa_object(volume_type_id):
+ if volume_type_id is None:
+ return False
+
+ volume_type = get_volume_type(context.get_admin_context(),
+ volume_type_id)
+
+ return is_vsa_drive(volume_type_id, volume_type) or\
+ is_vsa_volume(volume_type_id, volume_type)
diff --git a/nova/vsa/__init__.py b/nova/vsa/__init__.py
new file mode 100644
index 000000000..09162e006
--- /dev/null
+++ b/nova/vsa/__init__.py
@@ -0,0 +1,18 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright (c) 2011 Zadara Storage Inc.
+# Copyright (c) 2011 OpenStack LLC.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from nova.vsa.api import API
diff --git a/nova/vsa/api.py b/nova/vsa/api.py
new file mode 100644
index 000000000..18cf13705
--- /dev/null
+++ b/nova/vsa/api.py
@@ -0,0 +1,411 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright (c) 2011 Zadara Storage Inc.
+# Copyright (c) 2011 OpenStack LLC.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""
+Handles all requests relating to Virtual Storage Arrays (VSAs).
+
+Experimental code. Requires special VSA image.
+For assistance and guidelines pls contact
+ Zadara Storage Inc & Openstack community
+"""
+
+import sys
+
+from nova import compute
+from nova import db
+from nova import exception
+from nova import flags
+from nova import log as logging
+from nova import rpc
+from nova import volume
+from nova.compute import instance_types
+from nova.db import base
+from nova.volume import volume_types
+
+
+class VsaState:
+ CREATING = 'creating' # VSA creating (not ready yet)
+ LAUNCHING = 'launching' # Launching VCs (all BE volumes were created)
+ CREATED = 'created' # VSA fully created and ready for use
+ PARTIAL = 'partial' # Some BE drives were allocated
+ FAILED = 'failed' # Some BE storage allocations failed
+ DELETING = 'deleting' # VSA started the deletion procedure
+
+
+FLAGS = flags.FLAGS
+flags.DEFINE_string('vsa_ec2_access_key', None,
+ 'EC2 access key used by VSA for accessing nova')
+flags.DEFINE_string('vsa_ec2_user_id', None,
+ 'User ID used by VSA for accessing nova')
+flags.DEFINE_boolean('vsa_multi_vol_creation', True,
+ 'Ask scheduler to create multiple volumes in one call')
+flags.DEFINE_string('vsa_volume_type_name', 'VSA volume type',
+ 'Name of volume type associated with FE VSA volumes')
+
+LOG = logging.getLogger('nova.vsa')
+
+
+class API(base.Base):
+ """API for interacting with the VSA manager."""
+
+ def __init__(self, compute_api=None, volume_api=None, **kwargs):
+ self.compute_api = compute_api or compute.API()
+ self.volume_api = volume_api or volume.API()
+ super(API, self).__init__(**kwargs)
+
+ def _check_volume_type_correctness(self, vol_type):
+ if vol_type.get('extra_specs') == None or\
+ vol_type['extra_specs'].get('type') != 'vsa_drive' or\
+ vol_type['extra_specs'].get('drive_type') == None or\
+ vol_type['extra_specs'].get('drive_size') == None:
+
+ raise exception.ApiError(_("Invalid drive type %s")
+ % vol_type['name'])
+
+ def _get_default_vsa_instance_type(self):
+ return instance_types.get_instance_type_by_name(
+ FLAGS.default_vsa_instance_type)
+
+ def _check_storage_parameters(self, context, vsa_name, storage,
+ shared, first_index=0):
+ """
+ Translates storage array of disks to the list of volumes
+ :param storage: List of dictionaries with following keys:
+ disk_name, num_disks, size
+ :param shared: Specifies if storage is dedicated or shared.
+ For shared storage disks split into partitions
+ """
+ volume_params = []
+ for node in storage:
+
+ name = node.get('drive_name', None)
+ num_disks = node.get('num_drives', 1)
+
+ if name is None:
+ raise exception.ApiError(_("No drive_name param found in %s")
+ % node)
+ try:
+ vol_type = volume_types.get_volume_type_by_name(context, name)
+ except exception.NotFound:
+ raise exception.ApiError(_("Invalid drive type name %s")
+ % name)
+
+ self._check_volume_type_correctness(vol_type)
+
+ # if size field present - override disk size specified in DB
+ size = int(node.get('size',
+ vol_type['extra_specs'].get('drive_size')))
+
+ if shared:
+ part_size = FLAGS.vsa_part_size_gb
+ total_capacity = num_disks * size
+ num_volumes = total_capacity / part_size
+ size = part_size
+ else:
+ num_volumes = num_disks
+ size = 0 # special handling for full drives
+
+ for i in range(num_volumes):
+ volume_name = "drive-%03d" % first_index
+ first_index += 1
+ volume_desc = 'BE volume for VSA %s type %s' % \
+ (vsa_name, name)
+ volume = {
+ 'size': size,
+ 'name': volume_name,
+ 'description': volume_desc,
+ 'volume_type_id': vol_type['id'],
+ }
+ volume_params.append(volume)
+
+ return volume_params
+
+ def create(self, context, display_name='', display_description='',
+ vc_count=1, instance_type=None, image_name=None,
+ availability_zone=None, storage=[], shared=None):
+ """
+ Provision VSA instance with corresponding compute instances
+ and associated volumes
+ :param storage: List of dictionaries with following keys:
+ disk_name, num_disks, size
+ :param shared: Specifies if storage is dedicated or shared.
+ For shared storage disks split into partitions
+ """
+
+ LOG.info(_("*** Experimental VSA code ***"))
+
+ if vc_count > FLAGS.max_vcs_in_vsa:
+ LOG.warning(_("Requested number of VCs (%d) is too high."\
+ " Setting to default"), vc_count)
+ vc_count = FLAGS.max_vcs_in_vsa
+
+ if instance_type is None:
+ instance_type = self._get_default_vsa_instance_type()
+
+ if availability_zone is None:
+ availability_zone = FLAGS.storage_availability_zone
+
+ if storage is None:
+ storage = []
+
+ if shared is None or shared == 'False' or shared == False:
+ shared = False
+ else:
+ shared = True
+
+ # check if image is ready before starting any work
+ if image_name is None:
+ image_name = FLAGS.vc_image_name
+ try:
+ image_service = self.compute_api.image_service
+ vc_image = image_service.show_by_name(context, image_name)
+ vc_image_href = vc_image['id']
+ except exception.ImageNotFound:
+ raise exception.ApiError(_("Failed to find configured image %s")
+ % image_name)
+
+ options = {
+ 'display_name': display_name,
+ 'display_description': display_description,
+ 'project_id': context.project_id,
+ 'availability_zone': availability_zone,
+ 'instance_type_id': instance_type['id'],
+ 'image_ref': vc_image_href,
+ 'vc_count': vc_count,
+ 'status': VsaState.CREATING,
+ }
+ LOG.info(_("Creating VSA: %s") % options)
+
+ # create DB entry for VSA instance
+ try:
+ vsa_ref = self.db.vsa_create(context, options)
+ except exception.Error:
+ raise exception.ApiError(_(sys.exc_info()[1]))
+ vsa_id = vsa_ref['id']
+ vsa_name = vsa_ref['name']
+
+ # check storage parameters
+ try:
+ volume_params = self._check_storage_parameters(context, vsa_name,
+ storage, shared)
+ except exception.ApiError:
+ self.db.vsa_destroy(context, vsa_id)
+ raise exception.ApiError(_("Error in storage parameters: %s")
+ % storage)
+
+ # after creating DB entry, re-check and set some defaults
+ updates = {}
+ if (not hasattr(vsa_ref, 'display_name') or
+ vsa_ref.display_name is None or
+ vsa_ref.display_name == ''):
+ updates['display_name'] = display_name = vsa_name
+ updates['vol_count'] = len(volume_params)
+ vsa_ref = self.update(context, vsa_id, **updates)
+
+ # create volumes
+ if FLAGS.vsa_multi_vol_creation:
+ if len(volume_params) > 0:
+ request_spec = {
+ 'num_volumes': len(volume_params),
+ 'vsa_id': str(vsa_id),
+ 'volumes': volume_params,
+ }
+
+ rpc.cast(context,
+ FLAGS.scheduler_topic,
+ {"method": "create_volumes",
+ "args": {"topic": FLAGS.volume_topic,
+ "request_spec": request_spec,
+ "availability_zone": availability_zone}})
+ else:
+ # create BE volumes one-by-one
+ for vol in volume_params:
+ try:
+ vol_name = vol['name']
+ vol_size = vol['size']
+ vol_type_id = vol['volume_type_id']
+ LOG.debug(_("VSA ID %(vsa_id)d %(vsa_name)s: Create "\
+ "volume %(vol_name)s, %(vol_size)d GB, "\
+ "type %(vol_type_id)s"), locals())
+
+ vol_type = volume_types.get_volume_type(context,
+ vol['volume_type_id'])
+
+ vol_ref = self.volume_api.create(context,
+ vol_size,
+ None,
+ vol_name,
+ vol['description'],
+ volume_type=vol_type,
+ metadata=dict(to_vsa_id=str(vsa_id)),
+ availability_zone=availability_zone)
+ except:
+ self.update_vsa_status(context, vsa_id,
+ status=VsaState.PARTIAL)
+ raise
+
+ if len(volume_params) == 0:
+ # No BE volumes - ask VSA manager to start VCs
+ rpc.cast(context,
+ FLAGS.vsa_topic,
+ {"method": "create_vsa",
+ "args": {"vsa_id": str(vsa_id)}})
+
+ return vsa_ref
+
+ def update_vsa_status(self, context, vsa_id, status):
+ updates = dict(status=status)
+ LOG.info(_("VSA ID %(vsa_id)d: Update VSA status to %(status)s"),
+ locals())
+ return self.update(context, vsa_id, **updates)
+
+ def update(self, context, vsa_id, **kwargs):
+ """Updates the VSA instance in the datastore.
+
+ :param context: The security context
+ :param vsa_id: ID of the VSA instance to update
+ :param kwargs: All additional keyword args are treated
+ as data fields of the instance to be
+ updated
+
+ :returns: None
+ """
+ LOG.info(_("VSA ID %(vsa_id)d: Update VSA call"), locals())
+
+ updatable_fields = ['status', 'vc_count', 'vol_count',
+ 'display_name', 'display_description']
+ changes = {}
+ for field in updatable_fields:
+ if field in kwargs:
+ changes[field] = kwargs[field]
+
+ vc_count = kwargs.get('vc_count', None)
+ if vc_count is not None:
+ # VP-TODO: This request may want to update number of VCs
+ # Get number of current VCs and add/delete VCs appropriately
+ vsa = self.get(context, vsa_id)
+ vc_count = int(vc_count)
+ if vc_count > FLAGS.max_vcs_in_vsa:
+ LOG.warning(_("Requested number of VCs (%d) is too high."\
+ " Setting to default"), vc_count)
+ vc_count = FLAGS.max_vcs_in_vsa
+
+ if vsa['vc_count'] != vc_count:
+ self.update_num_vcs(context, vsa, vc_count)
+ changes['vc_count'] = vc_count
+
+ return self.db.vsa_update(context, vsa_id, changes)
+
+ def update_num_vcs(self, context, vsa, vc_count):
+ vsa_name = vsa['name']
+ old_vc_count = int(vsa['vc_count'])
+ if vc_count > old_vc_count:
+ add_cnt = vc_count - old_vc_count
+ LOG.debug(_("Adding %(add_cnt)s VCs to VSA %(vsa_name)s."),
+ locals())
+ # VP-TODO: actual code for adding new VCs
+
+ elif vc_count < old_vc_count:
+ del_cnt = old_vc_count - vc_count
+ LOG.debug(_("Deleting %(del_cnt)s VCs from VSA %(vsa_name)s."),
+ locals())
+ # VP-TODO: actual code for deleting extra VCs
+
+ def _force_volume_delete(self, ctxt, volume):
+ """Delete a volume, bypassing the check that it must be available."""
+ host = volume['host']
+ if not host:
+ # Deleting volume from database and skipping rpc.
+ self.db.volume_destroy(ctxt, volume['id'])
+ return
+
+ rpc.cast(ctxt,
+ self.db.queue_get_for(ctxt, FLAGS.volume_topic, host),
+ {"method": "delete_volume",
+ "args": {"volume_id": volume['id']}})
+
+ def delete_vsa_volumes(self, context, vsa_id, direction,
+ force_delete=True):
+ if direction == "FE":
+ volumes = self.get_all_vsa_volumes(context, vsa_id)
+ else:
+ volumes = self.get_all_vsa_drives(context, vsa_id)
+
+ for volume in volumes:
+ try:
+ vol_name = volume['name']
+ LOG.info(_("VSA ID %(vsa_id)s: Deleting %(direction)s "\
+ "volume %(vol_name)s"), locals())
+ self.volume_api.delete(context, volume['id'])
+ except exception.ApiError:
+ LOG.info(_("Unable to delete volume %s"), volume['name'])
+ if force_delete:
+ LOG.info(_("VSA ID %(vsa_id)s: Forced delete. "\
+ "%(direction)s volume %(vol_name)s"), locals())
+ self._force_volume_delete(context, volume)
+
+ def delete(self, context, vsa_id):
+ """Terminate a VSA instance."""
+ LOG.info(_("Going to try to terminate VSA ID %s"), vsa_id)
+
+ # Delete all FrontEnd and BackEnd volumes
+ self.delete_vsa_volumes(context, vsa_id, "FE", force_delete=True)
+ self.delete_vsa_volumes(context, vsa_id, "BE", force_delete=True)
+
+ # Delete all VC instances
+ instances = self.compute_api.get_all(context,
+ search_opts={'metadata': dict(vsa_id=str(vsa_id))})
+ for instance in instances:
+ name = instance['name']
+ LOG.debug(_("VSA ID %(vsa_id)s: Delete instance %(name)s"),
+ locals())
+ self.compute_api.delete(context, instance['id'])
+
+ # Delete VSA instance
+ self.db.vsa_destroy(context, vsa_id)
+
+ def get(self, context, vsa_id):
+ rv = self.db.vsa_get(context, vsa_id)
+ return rv
+
+ def get_all(self, context):
+ if context.is_admin:
+ return self.db.vsa_get_all(context)
+ return self.db.vsa_get_all_by_project(context, context.project_id)
+
+ def get_vsa_volume_type(self, context):
+ name = FLAGS.vsa_volume_type_name
+ try:
+ vol_type = volume_types.get_volume_type_by_name(context, name)
+ except exception.NotFound:
+ volume_types.create(context, name,
+ extra_specs=dict(type='vsa_volume'))
+ vol_type = volume_types.get_volume_type_by_name(context, name)
+
+ return vol_type
+
+ def get_all_vsa_instances(self, context, vsa_id):
+ return self.compute_api.get_all(context,
+ search_opts={'metadata': dict(vsa_id=str(vsa_id))})
+
+ def get_all_vsa_volumes(self, context, vsa_id):
+ return self.volume_api.get_all(context,
+ search_opts={'metadata': dict(from_vsa_id=str(vsa_id))})
+
+ def get_all_vsa_drives(self, context, vsa_id):
+ return self.volume_api.get_all(context,
+ search_opts={'metadata': dict(to_vsa_id=str(vsa_id))})
diff --git a/nova/vsa/connection.py b/nova/vsa/connection.py
new file mode 100644
index 000000000..8ac8a1dd5
--- /dev/null
+++ b/nova/vsa/connection.py
@@ -0,0 +1,25 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright (c) 2011 Zadara Storage Inc.
+# Copyright (c) 2011 OpenStack LLC.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""Abstraction of the underlying connection to VC."""
+
+from nova.vsa import fake
+
+
+def get_connection():
+ # Return an object that is able to talk to VCs
+ return fake.FakeVcConnection()
diff --git a/nova/vsa/fake.py b/nova/vsa/fake.py
new file mode 100644
index 000000000..d4248ca01
--- /dev/null
+++ b/nova/vsa/fake.py
@@ -0,0 +1,22 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright (c) 2011 Zadara Storage Inc.
+# Copyright (c) 2011 OpenStack LLC.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+
+class FakeVcConnection(object):
+
+ def init_host(self, host):
+ pass
diff --git a/nova/vsa/manager.py b/nova/vsa/manager.py
new file mode 100644
index 000000000..d4c414106
--- /dev/null
+++ b/nova/vsa/manager.py
@@ -0,0 +1,179 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright (c) 2011 Zadara Storage Inc.
+# Copyright (c) 2011 OpenStack LLC.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""
+Handles all processes relating to Virtual Storage Arrays (VSA).
+
+**Related Flags**
+
+"""
+
+from nova import compute
+from nova import exception
+from nova import flags
+from nova import log as logging
+from nova import manager
+from nova import volume
+from nova import vsa
+from nova import utils
+from nova.compute import instance_types
+from nova.vsa import utils as vsa_utils
+from nova.vsa.api import VsaState
+
+FLAGS = flags.FLAGS
+flags.DEFINE_string('vsa_driver', 'nova.vsa.connection.get_connection',
+ 'Driver to use for controlling VSAs')
+
+LOG = logging.getLogger('nova.vsa.manager')
+
+
+class VsaManager(manager.SchedulerDependentManager):
+ """Manages Virtual Storage Arrays (VSAs)."""
+
+ def __init__(self, vsa_driver=None, *args, **kwargs):
+ if not vsa_driver:
+ vsa_driver = FLAGS.vsa_driver
+ self.driver = utils.import_object(vsa_driver)
+ self.compute_manager = utils.import_object(FLAGS.compute_manager)
+
+ self.compute_api = compute.API()
+ self.volume_api = volume.API()
+ self.vsa_api = vsa.API()
+
+ if FLAGS.vsa_ec2_user_id is None or \
+ FLAGS.vsa_ec2_access_key is None:
+ raise exception.VSANovaAccessParamNotFound()
+
+ super(VsaManager, self).__init__(*args, **kwargs)
+
+ def init_host(self):
+ self.driver.init_host(host=self.host)
+ super(VsaManager, self).init_host()
+
+ @exception.wrap_exception()
+ def create_vsa(self, context, vsa_id):
+ """Called by API if there were no BE volumes assigned"""
+ LOG.debug(_("Create call received for VSA %s"), vsa_id)
+
+ vsa_id = int(vsa_id) # just in case
+
+ try:
+ vsa = self.vsa_api.get(context, vsa_id)
+ except Exception as ex:
+ msg = _("Failed to find VSA %(vsa_id)d") % locals()
+ LOG.exception(msg)
+ return
+
+ return self._start_vcs(context, vsa)
+
+ @exception.wrap_exception()
+ def vsa_volume_created(self, context, vol_id, vsa_id, status):
+ """Callback for volume creations"""
+ LOG.debug(_("VSA ID %(vsa_id)s: Drive %(vol_id)s created. "\
+ "Status %(status)s"), locals())
+ vsa_id = int(vsa_id) # just in case
+
+ # Get all volumes for this VSA
+ # check if any of them still in creating phase
+ drives = self.vsa_api.get_all_vsa_drives(context, vsa_id)
+ for drive in drives:
+ if drive['status'] == 'creating':
+ vol_name = drive['name']
+ vol_disp_name = drive['display_name']
+ LOG.debug(_("Drive %(vol_name)s (%(vol_disp_name)s) still "\
+ "in creating phase - wait"), locals())
+ return
+
+ try:
+ vsa = self.vsa_api.get(context, vsa_id)
+ except Exception as ex:
+ msg = _("Failed to find VSA %(vsa_id)d") % locals()
+ LOG.exception(msg)
+ return
+
+ if len(drives) != vsa['vol_count']:
+ cvol_real = len(drives)
+ cvol_exp = vsa['vol_count']
+ LOG.debug(_("VSA ID %(vsa_id)d: Not all volumes are created "\
+ "(%(cvol_real)d of %(cvol_exp)d)"), locals())
+ return
+
+ # all volumes created (successfully or not)
+ return self._start_vcs(context, vsa, drives)
+
+ def _start_vcs(self, context, vsa, drives=[]):
+ """Start VCs for VSA """
+
+ vsa_id = vsa['id']
+ if vsa['status'] == VsaState.CREATING:
+ self.vsa_api.update_vsa_status(context, vsa_id,
+ VsaState.LAUNCHING)
+ else:
+ return
+
+ # in _separate_ loop go over all volumes and mark as "attached"
+ has_failed_volumes = False
+ for drive in drives:
+ vol_name = drive['name']
+ vol_disp_name = drive['display_name']
+ status = drive['status']
+ LOG.info(_("VSA ID %(vsa_id)d: Drive %(vol_name)s "\
+ "(%(vol_disp_name)s) is in %(status)s state"),
+ locals())
+ if status == 'available':
+ try:
+ # self.volume_api.update(context, volume['id'],
+ # dict(attach_status="attached"))
+ pass
+ except Exception as ex:
+ msg = _("Failed to update attach status for volume "
+ "%(vol_name)s. %(ex)s") % locals()
+ LOG.exception(msg)
+ else:
+ has_failed_volumes = True
+
+ if has_failed_volumes:
+ LOG.info(_("VSA ID %(vsa_id)d: Delete all BE volumes"), locals())
+ self.vsa_api.delete_vsa_volumes(context, vsa_id, "BE", True)
+ self.vsa_api.update_vsa_status(context, vsa_id,
+ VsaState.FAILED)
+ return
+
+ # create user-data record for VC
+ storage_data = vsa_utils.generate_user_data(vsa, drives)
+
+ instance_type = instance_types.get_instance_type(
+ vsa['instance_type_id'])
+
+ # now start the VC instance
+
+ vc_count = vsa['vc_count']
+ LOG.info(_("VSA ID %(vsa_id)d: Start %(vc_count)d instances"),
+ locals())
+ vc_instances = self.compute_api.create(context,
+ instance_type, # vsa['vsa_instance_type'],
+ vsa['image_ref'],
+ min_count=1,
+ max_count=vc_count,
+ display_name='vc-' + vsa['display_name'],
+ display_description='VC for VSA ' + vsa['display_name'],
+ availability_zone=vsa['availability_zone'],
+ user_data=storage_data,
+ metadata=dict(vsa_id=str(vsa_id)))
+
+ self.vsa_api.update_vsa_status(context, vsa_id,
+ VsaState.CREATED)
diff --git a/nova/vsa/utils.py b/nova/vsa/utils.py
new file mode 100644
index 000000000..1de341ac5
--- /dev/null
+++ b/nova/vsa/utils.py
@@ -0,0 +1,80 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright (c) 2011 Zadara Storage Inc.
+# Copyright (c) 2011 OpenStack LLC.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import base64
+from xml.etree import ElementTree
+
+from nova import flags
+
+FLAGS = flags.FLAGS
+
+
+def generate_user_data(vsa, volumes):
+ SubElement = ElementTree.SubElement
+
+ e_vsa = ElementTree.Element("vsa")
+
+ e_vsa_detail = SubElement(e_vsa, "id")
+ e_vsa_detail.text = str(vsa['id'])
+ e_vsa_detail = SubElement(e_vsa, "name")
+ e_vsa_detail.text = vsa['display_name']
+ e_vsa_detail = SubElement(e_vsa, "description")
+ e_vsa_detail.text = vsa['display_description']
+ e_vsa_detail = SubElement(e_vsa, "vc_count")
+ e_vsa_detail.text = str(vsa['vc_count'])
+
+ e_vsa_detail = SubElement(e_vsa, "auth_user")
+ e_vsa_detail.text = FLAGS.vsa_ec2_user_id
+ e_vsa_detail = SubElement(e_vsa, "auth_access_key")
+ e_vsa_detail.text = FLAGS.vsa_ec2_access_key
+
+ e_volumes = SubElement(e_vsa, "volumes")
+ for volume in volumes:
+
+ loc = volume['provider_location']
+ if loc is None:
+ ip = ''
+ iscsi_iqn = ''
+ iscsi_portal = ''
+ else:
+ (iscsi_target, _sep, iscsi_iqn) = loc.partition(" ")
+ (ip, iscsi_portal) = iscsi_target.split(":", 1)
+
+ e_vol = SubElement(e_volumes, "volume")
+ e_vol_detail = SubElement(e_vol, "id")
+ e_vol_detail.text = str(volume['id'])
+ e_vol_detail = SubElement(e_vol, "name")
+ e_vol_detail.text = volume['name']
+ e_vol_detail = SubElement(e_vol, "display_name")
+ e_vol_detail.text = volume['display_name']
+ e_vol_detail = SubElement(e_vol, "size_gb")
+ e_vol_detail.text = str(volume['size'])
+ e_vol_detail = SubElement(e_vol, "status")
+ e_vol_detail.text = volume['status']
+ e_vol_detail = SubElement(e_vol, "ip")
+ e_vol_detail.text = ip
+ e_vol_detail = SubElement(e_vol, "iscsi_iqn")
+ e_vol_detail.text = iscsi_iqn
+ e_vol_detail = SubElement(e_vol, "iscsi_portal")
+ e_vol_detail.text = iscsi_portal
+ e_vol_detail = SubElement(e_vol, "lun")
+ e_vol_detail.text = '0'
+ e_vol_detail = SubElement(e_vol, "sn_host")
+ e_vol_detail.text = volume['host']
+
+ _xml = ElementTree.tostring(e_vsa)
+ return base64.b64encode(_xml)
diff --git a/run_tests.sh b/run_tests.sh
index 871332b4a..c1fda4cf9 100755
--- a/run_tests.sh
+++ b/run_tests.sh
@@ -1,5 +1,7 @@
#!/bin/bash
+set -eu
+
function usage {
echo "Usage: $0 [OPTION]..."
echo "Run Nova's test suite(s)"
@@ -24,13 +26,13 @@ function usage {
function process_option {
case "$1" in
-h|--help) usage;;
- -V|--virtual-env) let always_venv=1; let never_venv=0;;
- -N|--no-virtual-env) let always_venv=0; let never_venv=1;;
- -r|--recreate-db) let recreate_db=1;;
- -n|--no-recreate-db) let recreate_db=0;;
- -f|--force) let force=1;;
- -p|--pep8) let just_pep8=1;;
- -c|--coverage) let coverage=1;;
+ -V|--virtual-env) always_venv=1; never_venv=0;;
+ -N|--no-virtual-env) always_venv=0; never_venv=1;;
+ -r|--recreate-db) recreate_db=1;;
+ -n|--no-recreate-db) recreate_db=0;;
+ -f|--force) force=1;;
+ -p|--pep8) just_pep8=1;;
+ -c|--coverage) coverage=1;;
-*) noseopts="$noseopts $1";;
*) noseargs="$noseargs $1"
esac
@@ -130,7 +132,7 @@ if [ $recreate_db -eq 1 ]; then
rm -f tests.sqlite
fi
-run_tests || exit
+run_tests
# NOTE(sirp): we only want to run pep8 when we're running the full-test suite,
# not when we're running tests individually. To handle this, we need to