summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorvladimir.p <vladimir@zadarastorage.com>2011-07-15 17:56:27 -0700
committervladimir.p <vladimir@zadarastorage.com>2011-07-15 17:56:27 -0700
commitb814f9fef3efa1bdcb7e03a9161e08721b7bc8c4 (patch)
tree71f9fc135a3de3b4d2938a365e1a080f2d5bfb39
parent3a11738f517999ed1fd3a2c0a7ca452c7191b50f (diff)
VSA: first cut. merged with 1279
-rw-r--r--Authors1
-rw-r--r--[-rwxr-xr-x]bin/nova-api0
-rwxr-xr-xbin/nova-manage250
-rw-r--r--[-rwxr-xr-x]bin/nova-vncproxy0
-rw-r--r--[-rwxr-xr-x]contrib/nova.sh0
-rw-r--r--nova/CA/newcerts/.placeholder0
-rw-r--r--nova/CA/private/.placeholder0
-rw-r--r--nova/CA/projects/.gitignore1
-rw-r--r--nova/CA/projects/.placeholder0
-rw-r--r--nova/CA/reqs/.gitignore1
-rw-r--r--nova/CA/reqs/.placeholder0
-rw-r--r--nova/api/ec2/__init__.py4
-rw-r--r--nova/api/ec2/cloud.py164
-rw-r--r--nova/api/openstack/contrib/drive_types.py147
-rw-r--r--nova/api/openstack/contrib/virtual_storage_arrays.py454
-rw-r--r--nova/api/openstack/contrib/volumes.py14
-rw-r--r--nova/compute/api.py10
-rw-r--r--nova/db/api.py88
-rw-r--r--nova/db/sqlalchemy/api.py291
-rw-r--r--nova/db/sqlalchemy/migrate_repo/versions/032_add_vsa_data.py152
-rw-r--r--nova/db/sqlalchemy/migration.py3
-rw-r--r--nova/db/sqlalchemy/models.py95
-rw-r--r--nova/exception.py20
-rw-r--r--nova/flags.py27
-rw-r--r--nova/quota.py4
-rw-r--r--nova/scheduler/vsa.py495
-rw-r--r--nova/tests/test_libvirt.py2
-rw-r--r--nova/volume/api.py46
-rw-r--r--nova/volume/driver.py20
-rw-r--r--nova/volume/manager.py121
-rw-r--r--nova/volume/san.py323
-rw-r--r--nova/vsa/__init__.py18
-rw-r--r--nova/vsa/api.py407
-rw-r--r--nova/vsa/connection.py25
-rw-r--r--nova/vsa/fake.py22
-rw-r--r--nova/vsa/manager.py172
-rw-r--r--[-rwxr-xr-x]plugins/xenserver/xenapi/etc/xapi.d/plugins/agent0
-rw-r--r--[-rwxr-xr-x]tools/clean-vlans0
-rw-r--r--[-rwxr-xr-x]tools/nova-debug0
39 files changed, 3328 insertions, 49 deletions
diff --git a/Authors b/Authors
index 8ffb7d8d4..d6dfe7615 100644
--- a/Authors
+++ b/Authors
@@ -95,6 +95,7 @@ Tushar Patil <tushar.vitthal.patil@gmail.com>
Vasiliy Shlykov <vash@vasiliyshlykov.org>
Vishvananda Ishaya <vishvananda@gmail.com>
Vivek Y S <vivek.ys@gmail.com>
+Vladimir Popovski <vladimir@zadarastorage.com>
William Wolf <throughnothing@gmail.com>
Yoshiaki Tamura <yoshi@midokura.jp>
Youcef Laribi <Youcef.Laribi@eu.citrix.com>
diff --git a/bin/nova-api b/bin/nova-api
index fe8e83366..fe8e83366 100755..100644
--- a/bin/nova-api
+++ b/bin/nova-api
diff --git a/bin/nova-manage b/bin/nova-manage
index b892d958a..4cf27ec8c 100755
--- a/bin/nova-manage
+++ b/bin/nova-manage
@@ -62,6 +62,10 @@ import sys
import time
+import tempfile
+import zipfile
+import ast
+
# If ../nova/__init__.py exists, add ../ to Python search path, so that
# it will override what happens to be installed in /usr/(local/)lib/python...
POSSIBLE_TOPDIR = os.path.normpath(os.path.join(os.path.abspath(sys.argv[0]),
@@ -84,6 +88,7 @@ from nova import rpc
from nova import utils
from nova import version
from nova.api.ec2 import ec2utils
+from nova.api.ec2 import cloud
from nova.auth import manager
from nova.cloudpipe import pipelib
from nova.compute import instance_types
@@ -870,6 +875,243 @@ class VersionCommands(object):
(version.version_string(), version.version_string_with_vcs())
+class VsaCommands(object):
+ """Methods for dealing with VSAs"""
+
+ def __init__(self, *args, **kwargs):
+ self.controller = cloud.CloudController()
+ self.manager = manager.AuthManager()
+
+ # VP-TMP Solution for APIs. Project should be provided per API call
+ #self.context = context.get_admin_context()
+
+ try:
+ project = self.manager.get_projects().pop()
+ except IndexError:
+ print (_("No projects defined"))
+ raise
+
+ self.context = context.RequestContext(user=project.project_manager,
+ project=project)
+
+ def _list(self, vsas):
+ format_str = "%-5s %-15s %-25s %-30s %-5s %-10s %-10s %-10s %10s"
+ if len(vsas):
+ print format_str %\
+ (_('ID'),
+ _('vsa_id'),
+ _('displayName'),
+ _('description'),
+ _('count'),
+ _('vc_type'),
+ _('status'),
+ _('AZ'),
+ _('createTime'))
+
+ for vsa in vsas:
+ print format_str %\
+ (vsa['vsaId'],
+ vsa['name'],
+ vsa['displayName'],
+ vsa['displayDescription'],
+ vsa['vcCount'],
+ vsa['vcType'],
+ vsa['status'],
+ vsa['availabilityZone'],
+ str(vsa['createTime']))
+
+ def create(self, storage='[]', name=None, description=None, vc_count=1,
+ instance_type_name=None, image_name=None, shared=None,
+ az=None):
+ """Create a VSA.
+ args: [storage] [name] [description] [vc_count]
+ [instance_type] [image_name] [--shared|--full_drives]
+ [availability_zone]
+
+ where <storage> is a string representing list of dictionaries
+ in the following format:
+ [{'drive_name': 'type', 'num_drives': N, 'size': M},..]
+ """
+
+ # Sanity check for storage string
+ storage_list = []
+ if storage is not None:
+ try:
+ storage_list = ast.literal_eval(storage)
+ except:
+ print _("Invalid string format %s") % storage
+ raise
+
+ for node in storage_list:
+ if ('drive_name' not in node) or ('num_drives' not in node):
+ print (_("Invalid string format for element %s. " \
+ "Expecting keys 'drive_name' & 'num_drives'"),
+ str(node))
+ raise KeyError
+
+ if instance_type_name == '':
+ instance_type_name = None
+
+ if shared is None or shared == "--full_drives":
+ shared = False
+ elif shared == "--shared":
+ shared = True
+ else:
+ raise ValueError(_('Shared parameter should be set either to "\
+ "--shared or --full_drives'))
+
+ values = {
+ 'display_name': name,
+ 'display_description': description,
+ 'vc_count': int(vc_count),
+ 'vc_type': instance_type_name,
+ 'image_name': image_name,
+ 'storage': storage_list,
+ 'shared': shared,
+ 'placement': {'AvailabilityZone': az}
+ }
+
+ result = self.controller.create_vsa(self.context, **values)
+ self._list(result['vsaSet'])
+
+ def update(self, vsa_id, name=None, description=None, vc_count=None):
+ """Updates name/description of vsa and number of VCs
+ args: vsa_id [display_name] [display_description] [vc_count]"""
+
+ values = {}
+ if name is not None:
+ values['display_name'] = name
+ if description is not None:
+ values['display_description'] = description
+ if vc_count is not None:
+ values['vc_count'] = int(vc_count)
+
+ self.controller.update_vsa(self.context, vsa_id, **values)
+
+ def delete(self, vsa_id):
+ """Delete a vsa
+ args: vsa_id"""
+
+ self.controller.delete_vsa(self.context, vsa_id)
+
+ def list(self, vsa_id=None):
+ """Describe all available VSAs (or particular one)
+ args: [vsa_id]"""
+
+ if vsa_id is not None:
+ vsa_id = [vsa_id]
+
+ result = self.controller.describe_vsas(self.context, vsa_id)
+ self._list(result['vsaSet'])
+
+
+class VsaDriveTypeCommands(object):
+ """Methods for dealing with VSA drive types"""
+
+ def __init__(self, *args, **kwargs):
+ super(VsaDriveTypeCommands, self).__init__(*args, **kwargs)
+
+ def _list(self, drives):
+ format_str = "%-5s %-30s %-10s %-10s %-10s %-20s %-10s %s"
+ if len(drives):
+ print format_str %\
+ (_('ID'),
+ _('name'),
+ _('type'),
+ _('size_gb'),
+ _('rpm'),
+ _('capabilities'),
+ _('visible'),
+ _('createTime'))
+
+ for drive in drives:
+ print format_str %\
+ (str(drive['id']),
+ drive['name'],
+ drive['type'],
+ str(drive['size_gb']),
+ drive['rpm'],
+ drive['capabilities'],
+ str(drive['visible']),
+ str(drive['created_at']))
+
+ def create(self, type, size_gb, rpm, capabilities='',
+ visible=None, name=None):
+ """Create drive type.
+ args: type size_gb rpm [capabilities] [--show|--hide] [custom_name]
+ """
+
+ if visible is None or visible == "--show":
+ visible = True
+ elif visible == "--hide":
+ visible = False
+ else:
+ raise ValueError(_('Visible parameter should be set to --show '\
+ 'or --hide'))
+
+ values = {
+ 'type': type,
+ 'size_gb': int(size_gb),
+ 'rpm': rpm,
+ 'capabilities': capabilities,
+ 'visible': visible,
+ 'name': name
+ }
+ result = self.controller.create_drive_type(context.get_admin_context(),
+ **values)
+ self._list(result['driveTypeSet'])
+
+ def delete(self, name):
+ """Delete drive type
+ args: name"""
+
+ self.controller.delete_drive_type(context.get_admin_context(), name)
+
+ def rename(self, name, new_name=None):
+ """Rename drive type
+ args: name [new_name]"""
+
+ self.controller.rename_drive_type(context.get_admin_context(),
+ name, new_name)
+
+ def list(self, visible=None, name=None):
+ """Describe all available VSA drive types (or particular one)
+ args: [--all] [drive_name]"""
+
+ visible = False if visible == "--all" else True
+
+ if name is not None:
+ name = [name]
+
+ result = self.controller.describe_drive_types(
+ context.get_admin_context(), name, visible)
+ self._list(result['driveTypeSet'])
+
+ def update(self, name, type=None, size_gb=None, rpm=None,
+ capabilities='', visible=None):
+ """Update drive type.
+ args: name [type] [size_gb] [rpm] [capabilities] [--show|--hide]
+ """
+
+ if visible is None or visible == "--show":
+ visible = True
+ elif visible == "--hide":
+ visible = False
+ else:
+ raise ValueError(_('Visible parameter should be set to --show '\
+ 'or --hide'))
+
+ values = {
+ 'type': type,
+ 'size_gb': size_gb,
+ 'rpm': rpm,
+ 'capabilities': capabilities,
+ 'visible': visible
+ }
+ self.controller.update_drive_type(context.get_admin_context(),
+ name, **values)
+
+
class VolumeCommands(object):
"""Methods for dealing with a cloud in an odd state"""
@@ -1214,6 +1456,7 @@ CATEGORIES = [
('agent', AgentBuildCommands),
('config', ConfigCommands),
('db', DbCommands),
+ ('drive', VsaDriveTypeCommands),
('fixed', FixedIpCommands),
('flavor', InstanceTypeCommands),
('floating', FloatingIpCommands),
@@ -1229,7 +1472,8 @@ CATEGORIES = [
('version', VersionCommands),
('vm', VmCommands),
('volume', VolumeCommands),
- ('vpn', VpnCommands)]
+ ('vpn', VpnCommands),
+ ('vsa', VsaCommands)]
def lazy_match(name, key_value_tuples):
@@ -1295,6 +1539,10 @@ def main():
action, fn = matches[0]
# call the action with the remaining arguments
try:
+ for arg in sys.argv:
+ if arg == '-h' or arg == '--help':
+ print "%s %s: %s" % (category, action, fn.__doc__)
+ sys.exit(0)
fn(*argv)
sys.exit(0)
except TypeError:
diff --git a/bin/nova-vncproxy b/bin/nova-vncproxy
index bdbb30a7f..bdbb30a7f 100755..100644
--- a/bin/nova-vncproxy
+++ b/bin/nova-vncproxy
diff --git a/contrib/nova.sh b/contrib/nova.sh
index eab680580..eab680580 100755..100644
--- a/contrib/nova.sh
+++ b/contrib/nova.sh
diff --git a/nova/CA/newcerts/.placeholder b/nova/CA/newcerts/.placeholder
deleted file mode 100644
index e69de29bb..000000000
--- a/nova/CA/newcerts/.placeholder
+++ /dev/null
diff --git a/nova/CA/private/.placeholder b/nova/CA/private/.placeholder
deleted file mode 100644
index e69de29bb..000000000
--- a/nova/CA/private/.placeholder
+++ /dev/null
diff --git a/nova/CA/projects/.gitignore b/nova/CA/projects/.gitignore
deleted file mode 100644
index 72e8ffc0d..000000000
--- a/nova/CA/projects/.gitignore
+++ /dev/null
@@ -1 +0,0 @@
-*
diff --git a/nova/CA/projects/.placeholder b/nova/CA/projects/.placeholder
deleted file mode 100644
index e69de29bb..000000000
--- a/nova/CA/projects/.placeholder
+++ /dev/null
diff --git a/nova/CA/reqs/.gitignore b/nova/CA/reqs/.gitignore
deleted file mode 100644
index 72e8ffc0d..000000000
--- a/nova/CA/reqs/.gitignore
+++ /dev/null
@@ -1 +0,0 @@
-*
diff --git a/nova/CA/reqs/.placeholder b/nova/CA/reqs/.placeholder
deleted file mode 100644
index e69de29bb..000000000
--- a/nova/CA/reqs/.placeholder
+++ /dev/null
diff --git a/nova/api/ec2/__init__.py b/nova/api/ec2/__init__.py
index 890d57fe7..ec44c02ef 100644
--- a/nova/api/ec2/__init__.py
+++ b/nova/api/ec2/__init__.py
@@ -269,6 +269,10 @@ class Authorizer(wsgi.Middleware):
'DescribeImageAttribute': ['all'],
'ModifyImageAttribute': ['projectmanager', 'sysadmin'],
'UpdateImage': ['projectmanager', 'sysadmin'],
+ 'CreateVsa': ['projectmanager', 'sysadmin'],
+ 'DeleteVsa': ['projectmanager', 'sysadmin'],
+ 'DescribeVsas': ['projectmanager', 'sysadmin'],
+ 'DescribeDriveTypes': ['projectmanager', 'sysadmin'],
},
'AdminController': {
# All actions have the same permission: ['none'] (the default)
diff --git a/nova/api/ec2/cloud.py b/nova/api/ec2/cloud.py
index acfd1361c..786ceaccc 100644
--- a/nova/api/ec2/cloud.py
+++ b/nova/api/ec2/cloud.py
@@ -42,6 +42,8 @@ from nova import network
from nova import rpc
from nova import utils
from nova import volume
+from nova import vsa
+from nova.vsa import drive_types
from nova.api.ec2 import ec2utils
from nova.compute import instance_types
from nova.image import s3
@@ -87,6 +89,7 @@ class CloudController(object):
self.compute_api = compute.API(
network_api=self.network_api,
volume_api=self.volume_api)
+ self.vsa_api = vsa.API(compute_api=self.compute_api)
self.setup()
def __str__(self):
@@ -727,12 +730,26 @@ class CloudController(object):
snapshot_id = None
LOG.audit(_("Create volume of %s GB"), size, context=context)
+ to_vsa_id = kwargs.get('to_vsa_id', None)
+ if to_vsa_id:
+ to_vsa_id = ec2utils.ec2_id_to_id(to_vsa_id)
+
+ from_vsa_id = kwargs.get('from_vsa_id', None)
+ if from_vsa_id:
+ from_vsa_id = ec2utils.ec2_id_to_id(from_vsa_id)
+
+ if to_vsa_id or from_vsa_id:
+ LOG.audit(_("Create volume of %s GB associated with VSA "\
+ "(to: %d, from: %d)"),
+ size, to_vsa_id, from_vsa_id, context=context)
+
volume = self.volume_api.create(
context,
size=size,
snapshot_id=snapshot_id,
name=kwargs.get('display_name'),
- description=kwargs.get('display_description'))
+ description=kwargs.get('display_description'),
+ to_vsa_id=to_vsa_id, from_vsa_id=from_vsa_id)
# TODO(vish): Instance should be None at db layer instead of
# trying to lazy load, but for now we turn it into
# a dict to avoid an error.
@@ -786,6 +803,151 @@ class CloudController(object):
'status': volume['attach_status'],
'volumeId': ec2utils.id_to_ec2_id(volume_id, 'vol-%08x')}
+ def _format_vsa(self, context, p_vsa):
+ vsa = {}
+ vsa['vsaId'] = p_vsa['id']
+ vsa['status'] = p_vsa['status']
+ vsa['availabilityZone'] = p_vsa['availability_zone']
+ vsa['createTime'] = p_vsa['created_at']
+ vsa['name'] = p_vsa['name']
+ vsa['displayName'] = p_vsa['display_name']
+ vsa['displayDescription'] = p_vsa['display_description']
+ vsa['vcCount'] = p_vsa['vc_count']
+ if p_vsa['vsa_instance_type']:
+ vsa['vcType'] = p_vsa['vsa_instance_type'].get('name', None)
+ else:
+ vsa['vcType'] = None
+ return vsa
+
+ def create_vsa(self, context, **kwargs):
+ display_name = kwargs.get('display_name')
+ display_description = kwargs.get('display_description')
+ vc_count = int(kwargs.get('vc_count', 1))
+ instance_type = instance_types.get_instance_type_by_name(
+ kwargs.get('vc_type', FLAGS.default_vsa_instance_type))
+ image_name = kwargs.get('image_name')
+ availability_zone = kwargs.get('placement', {}).get(
+ 'AvailabilityZone')
+ #storage = ast.literal_eval(kwargs.get('storage', '[]'))
+ storage = kwargs.get('storage', [])
+ shared = kwargs.get('shared', False)
+
+ vc_type = instance_type['name']
+ _storage = str(storage)
+ LOG.audit(_("Create VSA %(display_name)s vc_count:%(vc_count)d "\
+ "vc_type:%(vc_type)s storage:%(_storage)s"), locals())
+
+ vsa = self.vsa_api.create(context, display_name, display_description,
+ vc_count, instance_type, image_name,
+ availability_zone, storage, shared)
+ return {'vsaSet': [self._format_vsa(context, vsa)]}
+
+ def update_vsa(self, context, vsa_id, **kwargs):
+ LOG.audit(_("Update VSA %s"), vsa_id)
+ updatable_fields = ['display_name', 'display_description', 'vc_count']
+ changes = {}
+ for field in updatable_fields:
+ if field in kwargs:
+ changes[field] = kwargs[field]
+ if changes:
+ vsa_id = ec2utils.ec2_id_to_id(vsa_id)
+ self.vsa_api.update(context, vsa_id=vsa_id, **changes)
+ return True
+
+ def delete_vsa(self, context, vsa_id, **kwargs):
+ LOG.audit(_("Delete VSA %s"), vsa_id)
+ vsa_id = ec2utils.ec2_id_to_id(vsa_id)
+
+ self.vsa_api.delete(context, vsa_id)
+
+ return True
+
+ def describe_vsas(self, context, vsa_id=None, status=None,
+ availability_zone=None, **kwargs):
+# LOG.debug(_("vsa_id=%s, status=%s, az=%s"),
+# (vsa_id, status, availability_zone))
+ result = []
+ vsas = []
+ if vsa_id is not None:
+ for ec2_id in vsa_id:
+ internal_id = ec2utils.ec2_id_to_id(ec2_id)
+ vsa = self.vsa_api.get(context, internal_id)
+ vsas.append(vsa)
+ else:
+ vsas = self.vsa_api.get_all(context)
+
+ if status:
+ result = []
+ for vsa in vsas:
+ if vsa['status'] in status:
+ result.append(vsa)
+ vsas = result
+
+ if availability_zone:
+ result = []
+ for vsa in vsas:
+ if vsa['availability_zone'] in availability_zone:
+ result.append(vsa)
+ vsas = result
+
+ return {'vsaSet': [self._format_vsa(context, vsa) for vsa in vsas]}
+
+ def create_drive_type(self, context, **kwargs):
+ name = kwargs.get('name')
+ type = kwargs.get('type')
+ size_gb = int(kwargs.get('size_gb'))
+ rpm = kwargs.get('rpm')
+ capabilities = kwargs.get('capabilities')
+ visible = kwargs.get('visible', True)
+
+ LOG.audit(_("Create Drive Type %(name)s: %(type)s %(size_gb)d "\
+ "%(rpm)s %(capabilities)s %(visible)s"),
+ locals())
+
+ rv = drive_types.drive_type_create(context, type, size_gb, rpm,
+ capabilities, visible, name)
+ return {'driveTypeSet': [dict(rv)]}
+
+ def update_drive_type(self, context, name, **kwargs):
+ LOG.audit(_("Update Drive Type %s"), name)
+ updatable_fields = ['type',
+ 'size_gb',
+ 'rpm',
+ 'capabilities',
+ 'visible']
+ changes = {}
+ for field in updatable_fields:
+ if field in kwargs and \
+ kwargs[field] is not None and \
+ kwargs[field] != '':
+ changes[field] = kwargs[field]
+ if changes:
+ drive_types.drive_type_update(context, name, **changes)
+ return True
+
+ def rename_drive_type(self, context, name, new_name):
+ drive_types.drive_type_rename(context, name, new_name)
+ return True
+
+ def delete_drive_type(self, context, name):
+ drive_types.drive_type_delete(context, name)
+ return True
+
+ def describe_drive_types(self, context, names=None, visible=True):
+
+ drives = []
+ if names is not None:
+ for name in names:
+ drive = drive_types.drive_type_get_by_name(context, name)
+ if drive['visible'] == visible:
+ drives.append(drive)
+ else:
+ drives = drive_types.drive_type_get_all(context, visible)
+
+ # VP-TODO: Change it later to EC2 compatible func (output)
+
+ return {'driveTypeSet': [dict(drive) for drive in drives]}
+
def _convert_to_set(self, lst, label):
if lst is None or lst == []:
return None
diff --git a/nova/api/openstack/contrib/drive_types.py b/nova/api/openstack/contrib/drive_types.py
new file mode 100644
index 000000000..85b3170cb
--- /dev/null
+++ b/nova/api/openstack/contrib/drive_types.py
@@ -0,0 +1,147 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright (c) 2011 Zadara Storage Inc.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+""" The Drive Types extension for Virtual Storage Arrays"""
+
+
+from webob import exc
+
+from nova.vsa import drive_types
+from nova import db
+from nova import quota
+from nova import log as logging
+from nova.api.openstack import common
+from nova.api.openstack import extensions
+from nova.api.openstack import faults
+from nova.api.openstack import wsgi
+
+LOG = logging.getLogger("nova.api.drive_types")
+
+
+class DriveTypeController(object):
+ """The Drive Type API controller for the OpenStack API."""
+
+ _serialization_metadata = {
+ 'application/xml': {
+ "attributes": {
+ "drive_type": [
+ "id",
+ "displayName",
+ "type",
+ "size",
+ "rpm",
+ "capabilities",
+ ]}}}
+
+ def _drive_type_view(self, context, drive):
+ """Maps keys for drive types view."""
+ d = {}
+
+ d['id'] = drive['id']
+ d['displayName'] = drive['name']
+ d['type'] = drive['type']
+ d['size'] = drive['size_gb']
+ d['rpm'] = drive['rpm']
+ d['capabilities'] = drive['capabilities']
+ return d
+
+ def index(self, req):
+ """Returns a list of drive types."""
+
+ context = req.environ['nova.context']
+ drive_types = drive_types.drive_type_get_all(context)
+ limited_list = common.limited(drive_types, req)
+ res = [self._drive_type_view(context, drive) for drive in limited_list]
+ return {'drive_types': res}
+
+ def show(self, req, id):
+ """Return data about the given drive type."""
+ context = req.environ['nova.context']
+
+ try:
+ drive = drive_types.drive_type_get(context, id)
+ except exception.NotFound:
+ return faults.Fault(exc.HTTPNotFound())
+
+ return {'drive_type': self._drive_type_view(context, drive)}
+
+ def create(self, req, body):
+ """Creates a new drive type."""
+ context = req.environ['nova.context']
+
+ if not body:
+ return faults.Fault(exc.HTTPUnprocessableEntity())
+
+ drive = body['drive_type']
+
+ name = drive.get('displayName')
+ type = drive.get('type')
+ size = drive.get('size')
+ rpm = drive.get('rpm')
+ capabilities = drive.get('capabilities')
+
+ LOG.audit(_("Create drive type %(name)s for "\
+ "%(type)s:%(size)s:%(rpm)s"), locals(), context=context)
+
+ new_drive = drive_types.drive_type_create(context,
+ type=type,
+ size_gb=size,
+ rpm=rpm,
+ capabilities=capabilities,
+ name=name)
+
+ return {'drive_type': self._drive_type_view(context, new_drive)}
+
+ def delete(self, req, id):
+ """Deletes a drive type."""
+ context = req.environ['nova.context']
+
+ LOG.audit(_("Delete drive type with id: %s"), id, context=context)
+
+ try:
+ drive = drive_types.drive_type_get(context, id)
+ drive_types.drive_type_delete(context, drive['name'])
+ except exception.NotFound:
+ return faults.Fault(exc.HTTPNotFound())
+ return exc.HTTPAccepted()
+
+
+class Drive_types(extensions.ExtensionDescriptor):
+
+ def get_name(self):
+ return "DriveTypes"
+
+ def get_alias(self):
+ return "zadr-drive_types"
+
+ def get_description(self):
+ return "Drive Types support"
+
+ def get_namespace(self):
+ return "http://docs.openstack.org/ext/drive_types/api/v1.1"
+
+ def get_updated(self):
+ return "2011-06-29T00:00:00+00:00"
+
+ def get_resources(self):
+ resources = []
+ res = extensions.ResourceExtension(
+ 'zadr-drive_types',
+ DriveTypeController())
+
+ resources.append(res)
+ return resources
diff --git a/nova/api/openstack/contrib/virtual_storage_arrays.py b/nova/api/openstack/contrib/virtual_storage_arrays.py
new file mode 100644
index 000000000..eca2d68dd
--- /dev/null
+++ b/nova/api/openstack/contrib/virtual_storage_arrays.py
@@ -0,0 +1,454 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright (c) 2011 Zadara Storage Inc.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+""" The virtul storage array extension"""
+
+
+from webob import exc
+
+from nova import vsa
+from nova import volume
+from nova import db
+from nova import quota
+from nova import exception
+from nova import log as logging
+from nova.api.openstack import common
+from nova.api.openstack import extensions
+from nova.api.openstack import faults
+from nova.api.openstack import wsgi
+from nova.api.openstack.contrib import volumes
+from nova.compute import instance_types
+
+from nova import flags
+FLAGS = flags.FLAGS
+
+LOG = logging.getLogger("nova.api.vsa")
+
+
+class VsaController(object):
+ """The Virtual Storage Array API controller for the OpenStack API."""
+
+ _serialization_metadata = {
+ 'application/xml': {
+ "attributes": {
+ "vsa": [
+ "id",
+ "name",
+ "displayName",
+ "displayDescription",
+ "createTime",
+ "status",
+ "vcType",
+ "vcCount",
+ "driveCount",
+ ]}}}
+
+ def __init__(self):
+ self.vsa_api = vsa.API()
+ super(VsaController, self).__init__()
+
+ def _vsa_view(self, context, vsa, details=False):
+ """Map keys for vsa summary/detailed view."""
+ d = {}
+
+ d['id'] = vsa['id']
+ d['name'] = vsa['name']
+ d['displayName'] = vsa['display_name']
+ d['displayDescription'] = vsa['display_description']
+
+ d['createTime'] = vsa['created_at']
+ d['status'] = vsa['status']
+
+ if vsa['vsa_instance_type']:
+ d['vcType'] = vsa['vsa_instance_type'].get('name', None)
+ else:
+ d['vcType'] = None
+
+ d['vcCount'] = vsa['vc_count']
+ d['driveCount'] = vsa['vol_count']
+
+ return d
+
+ def _items(self, req, details):
+ """Return summary or detailed list of VSAs."""
+ context = req.environ['nova.context']
+ vsas = self.vsa_api.get_all(context)
+ limited_list = common.limited(vsas, req)
+ res = [self._vsa_view(context, vsa, details) for vsa in limited_list]
+ return {'vsaSet': res}
+
+ def index(self, req):
+ """Return a short list of VSAs."""
+ return self._items(req, details=False)
+
+ def detail(self, req):
+ """Return a detailed list of VSAs."""
+ return self._items(req, details=True)
+
+ def show(self, req, id):
+ """Return data about the given VSA."""
+ context = req.environ['nova.context']
+
+ try:
+ vsa = self.vsa_api.get(context, vsa_id=id)
+ except exception.NotFound:
+ return faults.Fault(exc.HTTPNotFound())
+
+ return {'vsa': self._vsa_view(context, vsa, details=True)}
+
+ def create(self, req, body):
+ """Create a new VSA."""
+ context = req.environ['nova.context']
+
+ if not body:
+ return faults.Fault(exc.HTTPUnprocessableEntity())
+
+ vsa = body['vsa']
+
+ display_name = vsa.get('displayName')
+ display_description = vsa.get('displayDescription')
+ storage = vsa.get('storage')
+ shared = vsa.get('shared')
+ vc_type = vsa.get('vcType', FLAGS.default_vsa_instance_type)
+ availability_zone = vsa.get('placement', {}).get('AvailabilityZone')
+
+ try:
+ instance_type = instance_types.get_instance_type_by_name(vc_type)
+ except exception.NotFound:
+ return faults.Fault(exc.HTTPNotFound())
+
+ LOG.audit(_("Create VSA %(display_name)s of type %(vc_type)s"),
+ locals(), context=context)
+
+ result = self.vsa_api.create(context,
+ display_name=display_name,
+ display_description=display_description,
+ storage=storage,
+ shared=shared,
+ instance_type=instance_type,
+ availability_zone=availability_zone)
+
+ return {'vsa': self._vsa_view(context, result, details=True)}
+
+ def delete(self, req, id):
+ """Delete a VSA."""
+ context = req.environ['nova.context']
+
+ LOG.audit(_("Delete VSA with id: %s"), id, context=context)
+
+ try:
+ self.vsa_api.delete(context, vsa_id=id)
+ except exception.NotFound:
+ return faults.Fault(exc.HTTPNotFound())
+ return exc.HTTPAccepted()
+
+
+class VsaVolumeDriveController(volumes.VolumeController):
+ """The base class for VSA volumes & drives.
+
+ A child resource of the VSA object. Allows operations with
+ volumes and drives created to/from particular VSA
+
+ """
+
+ _serialization_metadata = {
+ 'application/xml': {
+ "attributes": {
+ "volume": [
+ "id",
+ "name",
+ "status",
+ "size",
+ "availabilityZone",
+ "createdAt",
+ "displayName",
+ "displayDescription",
+ "vsaId",
+ ]}}}
+
+ def __init__(self):
+ # self.compute_api = compute.API()
+ # self.vsa_api = vsa.API()
+ self.volume_api = volume.API()
+ super(VsaVolumeDriveController, self).__init__()
+
+ def _translation(self, context, vol, vsa_id, details):
+ if details:
+ translation = volumes.translate_volume_detail_view
+ else:
+ translation = volumes.translate_volume_summary_view
+
+ d = translation(context, vol)
+ d['vsaId'] = vol[self.direction]
+ return d
+
+ def _check_volume_ownership(self, context, vsa_id, id):
+ obj = self.object
+ try:
+ volume_ref = self.volume_api.get(context, volume_id=id)
+ except exception.NotFound:
+ LOG.error(_("%(obj)s with ID %(id)s not found"), locals())
+ raise
+
+ own_vsa_id = volume_ref[self.direction]
+ if own_vsa_id != int(vsa_id):
+ LOG.error(_("%(obj)s with ID %(id)s belongs to VSA %(own_vsa_id)s"\
+ " and not to VSA %(vsa_id)s."), locals())
+ raise exception.Invalid()
+
+ def _items(self, req, vsa_id, details):
+ """Return summary or detailed list of volumes for particular VSA."""
+ context = req.environ['nova.context']
+
+ vols = self.volume_api.get_all_by_vsa(context, vsa_id,
+ self.direction.split('_')[0])
+ limited_list = common.limited(vols, req)
+
+ res = [self._translation(context, vol, vsa_id, details) \
+ for vol in limited_list]
+
+ return {self.objects: res}
+
+ def index(self, req, vsa_id):
+ """Return a short list of volumes created from particular VSA."""
+ LOG.audit(_("Index. vsa_id=%(vsa_id)s"), locals())
+ return self._items(req, vsa_id, details=False)
+
+ def detail(self, req, vsa_id):
+ """Return a detailed list of volumes created from particular VSA."""
+ LOG.audit(_("Detail. vsa_id=%(vsa_id)s"), locals())
+ return self._items(req, vsa_id, details=True)
+
+ def create(self, req, vsa_id, body):
+ """Create a new volume from VSA."""
+ LOG.audit(_("Create. vsa_id=%(vsa_id)s, body=%(body)s"), locals())
+ context = req.environ['nova.context']
+
+ if not body:
+ return faults.Fault(exc.HTTPUnprocessableEntity())
+
+ vol = body[self.object]
+ size = vol['size']
+ LOG.audit(_("Create volume of %(size)s GB from VSA ID %(vsa_id)s"),
+ locals(), context=context)
+
+ new_volume = self.volume_api.create(context, size, None,
+ vol.get('displayName'),
+ vol.get('displayDescription'),
+ from_vsa_id=vsa_id)
+
+ return {self.object: self._translation(context, new_volume,
+ vsa_id, True)}
+
+ def update(self, req, vsa_id, id, body):
+ """Update a volume."""
+ context = req.environ['nova.context']
+
+ try:
+ self._check_volume_ownership(context, vsa_id, id)
+ except exception.NotFound:
+ return faults.Fault(exc.HTTPNotFound())
+ except exception.Invalid:
+ return faults.Fault(exc.HTTPBadRequest())
+
+ vol = body[self.object]
+ updatable_fields = ['display_name',
+ 'display_description',
+ 'status',
+ 'provider_location',
+ 'provider_auth']
+ changes = {}
+ for field in updatable_fields:
+ if field in vol:
+ changes[field] = vol[field]
+
+ obj = self.object
+ LOG.audit(_("Update %(obj)s with id: %(id)s, changes: %(changes)s"),
+ locals(), context=context)
+
+ try:
+ self.volume_api.update(context, volume_id=id, fields=changes)
+ except exception.NotFound:
+ return faults.Fault(exc.HTTPNotFound())
+ return exc.HTTPAccepted()
+
+ def delete(self, req, vsa_id, id):
+ """Delete a volume."""
+ context = req.environ['nova.context']
+
+ LOG.audit(_("Delete. vsa_id=%(vsa_id)s, id=%(id)s"), locals())
+
+ try:
+ self._check_volume_ownership(context, vsa_id, id)
+ except exception.NotFound:
+ return faults.Fault(exc.HTTPNotFound())
+ except exception.Invalid:
+ return faults.Fault(exc.HTTPBadRequest())
+
+ return super(VsaVolumeDriveController, self).delete(req, id)
+
+ def show(self, req, vsa_id, id):
+ """Return data about the given volume."""
+ context = req.environ['nova.context']
+
+ LOG.audit(_("Show. vsa_id=%(vsa_id)s, id=%(id)s"), locals())
+
+ try:
+ self._check_volume_ownership(context, vsa_id, id)
+ except exception.NotFound:
+ return faults.Fault(exc.HTTPNotFound())
+ except exception.Invalid:
+ return faults.Fault(exc.HTTPBadRequest())
+
+ return super(VsaVolumeDriveController, self).show(req, id)
+
+
+class VsaVolumeController(VsaVolumeDriveController):
+ """The VSA volume API controller for the Openstack API.
+
+ A child resource of the VSA object. Allows operations with volumes created
+ by particular VSA
+
+ """
+
+ def __init__(self):
+ self.direction = 'from_vsa_id'
+ self.objects = 'volumes'
+ self.object = 'volume'
+ super(VsaVolumeController, self).__init__()
+
+
+class VsaDriveController(VsaVolumeDriveController):
+ """The VSA Drive API controller for the Openstack API.
+
+ A child resource of the VSA object. Allows operations with drives created
+ for particular VSA
+
+ """
+
+ def __init__(self):
+ self.direction = 'to_vsa_id'
+ self.objects = 'drives'
+ self.object = 'drive'
+ super(VsaDriveController, self).__init__()
+
+ def create(self, req, vsa_id, body):
+ """Create a new drive for VSA. Should be done through VSA APIs"""
+ return faults.Fault(exc.HTTPBadRequest())
+
+ def update(self, req, vsa_id, id, body):
+ """Update a drive. Should be done through VSA APIs"""
+ return faults.Fault(exc.HTTPBadRequest())
+
+
+class VsaVPoolController(object):
+ """The vPool VSA API controller for the OpenStack API."""
+
+ _serialization_metadata = {
+ 'application/xml': {
+ "attributes": {
+ "vpool": [
+ "id",
+ "vsaId",
+ "name",
+ "displayName",
+ "displayDescription",
+ "driveCount",
+ "driveIds",
+ "protection",
+ "stripeSize",
+ "stripeWidth",
+ "createTime",
+ "status",
+ ]}}}
+
+ def __init__(self):
+ self.vsa_api = vsa.API()
+ super(VsaVPoolController, self).__init__()
+
+ def index(self, req, vsa_id):
+ """Return a short list of vpools created from particular VSA."""
+ return {'vpools': []}
+
+ def create(self, req, vsa_id, body):
+ """Create a new vPool for VSA."""
+ return faults.Fault(exc.HTTPBadRequest())
+
+ def update(self, req, vsa_id, id, body):
+ """Update vPool parameters."""
+ return faults.Fault(exc.HTTPBadRequest())
+
+ def delete(self, req, vsa_id, id):
+ """Delete a vPool."""
+ return faults.Fault(exc.HTTPBadRequest())
+
+ def show(self, req, vsa_id, id):
+ """Return data about the given vPool."""
+ return faults.Fault(exc.HTTPBadRequest())
+
+
+class Virtual_storage_arrays(extensions.ExtensionDescriptor):
+
+ def get_name(self):
+ return "VSAs"
+
+ def get_alias(self):
+ return "zadr-vsa"
+
+ def get_description(self):
+ return "Virtual Storage Arrays support"
+
+ def get_namespace(self):
+ return "http://docs.openstack.org/ext/vsa/api/v1.1"
+
+ def get_updated(self):
+ return "2011-06-29T00:00:00+00:00"
+
+ def get_resources(self):
+ resources = []
+ res = extensions.ResourceExtension(
+ 'zadr-vsa',
+ VsaController(),
+ collection_actions={'detail': 'GET'},
+ member_actions={'add_capacity': 'POST',
+ 'remove_capacity': 'POST'})
+ resources.append(res)
+
+ res = extensions.ResourceExtension('volumes',
+ VsaVolumeController(),
+ collection_actions={'detail': 'GET'},
+ parent=dict(
+ member_name='vsa',
+ collection_name='zadr-vsa'))
+ resources.append(res)
+
+ res = extensions.ResourceExtension('drives',
+ VsaDriveController(),
+ collection_actions={'detail': 'GET'},
+ parent=dict(
+ member_name='vsa',
+ collection_name='zadr-vsa'))
+ resources.append(res)
+
+ res = extensions.ResourceExtension('vpools',
+ VsaVPoolController(),
+ parent=dict(
+ member_name='vsa',
+ collection_name='zadr-vsa'))
+ resources.append(res)
+
+ return resources
diff --git a/nova/api/openstack/contrib/volumes.py b/nova/api/openstack/contrib/volumes.py
index e5e2c5b50..3c3d40c0f 100644
--- a/nova/api/openstack/contrib/volumes.py
+++ b/nova/api/openstack/contrib/volumes.py
@@ -33,17 +33,17 @@ LOG = logging.getLogger("nova.api.volumes")
FLAGS = flags.FLAGS
-def _translate_volume_detail_view(context, vol):
+def translate_volume_detail_view(context, vol):
"""Maps keys for volumes details view."""
- d = _translate_volume_summary_view(context, vol)
+ d = translate_volume_summary_view(context, vol)
# No additional data / lookups at the moment
return d
-def _translate_volume_summary_view(context, vol):
+def translate_volume_summary_view(context, vol):
"""Maps keys for volumes summary view."""
d = {}
@@ -92,7 +92,7 @@ class VolumeController(object):
except exception.NotFound:
return faults.Fault(exc.HTTPNotFound())
- return {'volume': _translate_volume_detail_view(context, vol)}
+ return {'volume': translate_volume_detail_view(context, vol)}
def delete(self, req, id):
"""Delete a volume."""
@@ -108,11 +108,11 @@ class VolumeController(object):
def index(self, req):
"""Returns a summary list of volumes."""
- return self._items(req, entity_maker=_translate_volume_summary_view)
+ return self._items(req, entity_maker=translate_volume_summary_view)
def detail(self, req):
"""Returns a detailed list of volumes."""
- return self._items(req, entity_maker=_translate_volume_detail_view)
+ return self._items(req, entity_maker=translate_volume_detail_view)
def _items(self, req, entity_maker):
"""Returns a list of volumes, transformed through entity_maker."""
@@ -140,7 +140,7 @@ class VolumeController(object):
# Work around problem that instance is lazy-loaded...
new_volume['instance'] = None
- retval = _translate_volume_detail_view(context, new_volume)
+ retval = translate_volume_detail_view(context, new_volume)
return {'volume': retval}
diff --git a/nova/compute/api.py b/nova/compute/api.py
index 432658bbb..a48a5bc98 100644
--- a/nova/compute/api.py
+++ b/nova/compute/api.py
@@ -149,7 +149,7 @@ class API(base.Base):
key_name=None, key_data=None, security_group='default',
availability_zone=None, user_data=None, metadata={},
injected_files=None, admin_password=None, zone_blob=None,
- reservation_id=None):
+ reservation_id=None, vsa_id=None):
"""Verify all the input parameters regardless of the provisioning
strategy being performed."""
@@ -241,7 +241,8 @@ class API(base.Base):
'availability_zone': availability_zone,
'os_type': os_type,
'architecture': architecture,
- 'vm_mode': vm_mode}
+ 'vm_mode': vm_mode,
+ 'vsa_id': vsa_id}
return (num_instances, base_options)
@@ -381,7 +382,8 @@ class API(base.Base):
key_name=None, key_data=None, security_group='default',
availability_zone=None, user_data=None, metadata={},
injected_files=None, admin_password=None, zone_blob=None,
- reservation_id=None, block_device_mapping=None):
+ reservation_id=None, block_device_mapping=None,
+ vsa_id=None):
"""
Provision the instances by sending off a series of single
instance requests to the Schedulers. This is fine for trival
@@ -402,7 +404,7 @@ class API(base.Base):
key_name, key_data, security_group,
availability_zone, user_data, metadata,
injected_files, admin_password, zone_blob,
- reservation_id)
+ reservation_id, vsa_id)
instances = []
LOG.debug(_("Going to run %s instances..."), num_instances)
diff --git a/nova/db/api.py b/nova/db/api.py
index b7c5700e5..9147f136b 100644
--- a/nova/db/api.py
+++ b/nova/db/api.py
@@ -49,7 +49,8 @@ flags.DEFINE_string('volume_name_template', 'volume-%08x',
'Template string to be used to generate instance names')
flags.DEFINE_string('snapshot_name_template', 'snapshot-%08x',
'Template string to be used to generate snapshot names')
-
+flags.DEFINE_string('vsa_name_template', 'vsa-%08x',
+ 'Template string to be used to generate VSA names')
IMPL = utils.LazyPluggable(FLAGS['db_backend'],
sqlalchemy='nova.db.sqlalchemy.api')
@@ -509,6 +510,13 @@ def instance_get_all_by_project(context, project_id):
return IMPL.instance_get_all_by_project(context, project_id)
+def instance_get_all_by_project_and_vsa(context, project_id, vsa_id):
+ """Get all instance spawned by a given VSA belonging to a project."""
+ return IMPL.instance_get_all_by_project_and_vsa(context,
+ project_id,
+ vsa_id)
+
+
def instance_get_all_by_host(context, host):
"""Get all instance belonging to a host."""
return IMPL.instance_get_all_by_host(context, host)
@@ -914,6 +922,16 @@ def volume_get_all_by_project(context, project_id):
return IMPL.volume_get_all_by_project(context, project_id)
+def volume_get_all_assigned_to_vsa(context, vsa_id):
+ """Get all volumes assigned to particular VSA."""
+ return IMPL.volume_get_all_assigned_to_vsa(context, vsa_id)
+
+
+def volume_get_all_assigned_from_vsa(context, vsa_id):
+ """Get all volumes created from particular VSA."""
+ return IMPL.volume_get_all_assigned_from_vsa(context, vsa_id)
+
+
def volume_get_by_ec2_id(context, ec2_id):
"""Get a volume by ec2 id."""
return IMPL.volume_get_by_ec2_id(context, ec2_id)
@@ -1422,3 +1440,71 @@ def instance_type_extra_specs_update_or_create(context, instance_type_id,
key/value pairs specified in the extra specs dict argument"""
IMPL.instance_type_extra_specs_update_or_create(context, instance_type_id,
extra_specs)
+
+
+####################
+
+
+def drive_type_create(context, values):
+ """Creates drive type record."""
+ return IMPL.drive_type_create(context, values)
+
+
+def drive_type_update(context, name, values):
+ """Updates drive type record."""
+ return IMPL.drive_type_update(context, name, values)
+
+
+def drive_type_destroy(context, name):
+ """Deletes drive type record."""
+ return IMPL.drive_type_destroy(context, name)
+
+
+def drive_type_get(context, drive_type_id):
+ """Get drive type record by id."""
+ return IMPL.drive_type_get(context, drive_type_id)
+
+
+def drive_type_get_by_name(context, name):
+ """Get drive type record by name."""
+ return IMPL.drive_type_get_by_name(context, name)
+
+
+def drive_type_get_all(context, visible=None):
+ """Returns all (or only visible) drive types."""
+ return IMPL.drive_type_get_all(context, visible)
+
+
+def vsa_create(context, values):
+ """Creates Virtual Storage Array record."""
+ return IMPL.vsa_create(context, values)
+
+
+def vsa_update(context, vsa_id, values):
+ """Updates Virtual Storage Array record."""
+ return IMPL.vsa_update(context, vsa_id, values)
+
+
+def vsa_destroy(context, vsa_id):
+ """Deletes Virtual Storage Array record."""
+ return IMPL.vsa_destroy(context, vsa_id)
+
+
+def vsa_get(context, vsa_id):
+ """Get Virtual Storage Array record by ID."""
+ return IMPL.vsa_get(context, vsa_id)
+
+
+def vsa_get_all(context):
+ """Get all Virtual Storage Array records."""
+ return IMPL.vsa_get_all(context)
+
+
+def vsa_get_all_by_project(context, project_id):
+ """Get all Virtual Storage Array records by project ID."""
+ return IMPL.vsa_get_all_by_project(context, project_id)
+
+
+def vsa_get_vc_ips_list(context, vsa_id):
+ """Retrieves IPs of instances associated with Virtual Storage Array."""
+ return IMPL.vsa_get_vc_ips_list(context, vsa_id)
diff --git a/nova/db/sqlalchemy/api.py b/nova/db/sqlalchemy/api.py
index a831516a8..aa5a6e052 100644
--- a/nova/db/sqlalchemy/api.py
+++ b/nova/db/sqlalchemy/api.py
@@ -1218,6 +1218,35 @@ def instance_get_all_by_project(context, project_id):
@require_context
+def instance_get_all_by_project_and_vsa(context, project_id, vsa_id):
+ authorize_project_context(context, project_id)
+
+ session = get_session()
+ return session.query(models.Instance).\
+ options(joinedload_all('fixed_ips.floating_ips')).\
+ options(joinedload('security_groups')).\
+ options(joinedload_all('fixed_ips.network')).\
+ options(joinedload('instance_type')).\
+ filter_by(project_id=project_id).\
+ filter_by(vsa_id=vsa_id).\
+ filter_by(deleted=can_read_deleted(context)).\
+ all()
+
+
+@require_admin_context
+def instance_get_all_by_vsa(context, vsa_id):
+ session = get_session()
+ return session.query(models.Instance).\
+ options(joinedload_all('fixed_ips.floating_ips')).\
+ options(joinedload('security_groups')).\
+ options(joinedload_all('fixed_ips.network')).\
+ options(joinedload('instance_type')).\
+ filter_by(vsa_id=vsa_id).\
+ filter_by(deleted=can_read_deleted(context)).\
+ all()
+
+
+@require_context
def instance_get_all_by_reservation(context, reservation_id):
session = get_session()
@@ -2018,12 +2047,14 @@ def volume_get(context, volume_id, session=None):
if is_admin_context(context):
result = session.query(models.Volume).\
options(joinedload('instance')).\
+ options(joinedload('drive_type')).\
filter_by(id=volume_id).\
filter_by(deleted=can_read_deleted(context)).\
first()
elif is_user_context(context):
result = session.query(models.Volume).\
options(joinedload('instance')).\
+ options(joinedload('drive_type')).\
filter_by(project_id=context.project_id).\
filter_by(id=volume_id).\
filter_by(deleted=False).\
@@ -2039,6 +2070,7 @@ def volume_get_all(context):
session = get_session()
return session.query(models.Volume).\
options(joinedload('instance')).\
+ options(joinedload('drive_type')).\
filter_by(deleted=can_read_deleted(context)).\
all()
@@ -2048,6 +2080,7 @@ def volume_get_all_by_host(context, host):
session = get_session()
return session.query(models.Volume).\
options(joinedload('instance')).\
+ options(joinedload('drive_type')).\
filter_by(host=host).\
filter_by(deleted=can_read_deleted(context)).\
all()
@@ -2057,6 +2090,7 @@ def volume_get_all_by_host(context, host):
def volume_get_all_by_instance(context, instance_id):
session = get_session()
result = session.query(models.Volume).\
+ options(joinedload('drive_type')).\
filter_by(instance_id=instance_id).\
filter_by(deleted=False).\
all()
@@ -2065,6 +2099,28 @@ def volume_get_all_by_instance(context, instance_id):
return result
+@require_admin_context
+def volume_get_all_assigned_to_vsa(context, vsa_id):
+ session = get_session()
+ result = session.query(models.Volume).\
+ options(joinedload('drive_type')).\
+ filter_by(to_vsa_id=vsa_id).\
+ filter_by(deleted=False).\
+ all()
+ return result
+
+
+@require_admin_context
+def volume_get_all_assigned_from_vsa(context, vsa_id):
+ session = get_session()
+ result = session.query(models.Volume).\
+ options(joinedload('drive_type')).\
+ filter_by(from_vsa_id=vsa_id).\
+ filter_by(deleted=False).\
+ all()
+ return result
+
+
@require_context
def volume_get_all_by_project(context, project_id):
authorize_project_context(context, project_id)
@@ -2072,6 +2128,7 @@ def volume_get_all_by_project(context, project_id):
session = get_session()
return session.query(models.Volume).\
options(joinedload('instance')).\
+ options(joinedload('drive_type')).\
filter_by(project_id=project_id).\
filter_by(deleted=can_read_deleted(context)).\
all()
@@ -2084,6 +2141,7 @@ def volume_get_instance(context, volume_id):
filter_by(id=volume_id).\
filter_by(deleted=can_read_deleted(context)).\
options(joinedload('instance')).\
+ options(joinedload('drive_type')).\
first()
if not result:
raise exception.VolumeNotFound(volume_id=volume_id)
@@ -3286,3 +3344,236 @@ def instance_type_extra_specs_update_or_create(context, instance_type_id,
"deleted": 0})
spec_ref.save(session=session)
return specs
+
+
+ ####################
+
+
+@require_admin_context
+def drive_type_create(context, values):
+ """
+ Creates drive type record.
+ """
+ try:
+ drive_type_ref = models.DriveTypes()
+ drive_type_ref.update(values)
+ drive_type_ref.save()
+ except Exception, e:
+ raise exception.DBError(e)
+ return drive_type_ref
+
+
+@require_admin_context
+def drive_type_update(context, name, values):
+ """
+ Updates drive type record.
+ """
+ session = get_session()
+ with session.begin():
+ drive_type_ref = drive_type_get_by_name(context, name, session=session)
+ drive_type_ref.update(values)
+ drive_type_ref.save(session=session)
+ return drive_type_ref
+
+
+@require_admin_context
+def drive_type_destroy(context, name):
+ """
+ Deletes drive type record.
+ """
+ session = get_session()
+ drive_type_ref = session.query(models.DriveTypes).\
+ filter_by(name=name)
+ records = drive_type_ref.delete()
+ if records == 0:
+ raise exception.VirtualDiskTypeNotFoundByName(name=name)
+ else:
+ return drive_type_ref
+
+
+@require_context
+def drive_type_get(context, drive_type_id, session=None):
+ """
+ Get drive type record by id.
+ """
+ if not session:
+ session = get_session()
+
+ result = session.query(models.DriveTypes).\
+ filter_by(id=drive_type_id).\
+ filter_by(deleted=can_read_deleted(context)).\
+ first()
+ if not result:
+ raise exception.VirtualDiskTypeNotFound(id=drive_type_id)
+
+ return result
+
+
+@require_context
+def drive_type_get_by_name(context, name, session=None):
+ """
+ Get drive type record by name.
+ """
+ if not session:
+ session = get_session()
+
+ result = session.query(models.DriveTypes).\
+ filter_by(name=name).\
+ filter_by(deleted=can_read_deleted(context)).\
+ first()
+ if not result:
+ raise exception.VirtualDiskTypeNotFoundByName(name=name)
+
+ return result
+
+
+@require_context
+def drive_type_get_all(context, visible=False):
+ """
+ Returns all (or only visible) drive types.
+ """
+ session = get_session()
+ if not visible:
+ drive_types = session.query(models.DriveTypes).\
+ filter_by(deleted=can_read_deleted(context)).\
+ order_by("name").\
+ all()
+ else:
+ drive_types = session.query(models.DriveTypes).\
+ filter_by(deleted=can_read_deleted(context)).\
+ filter_by(visible=True).\
+ order_by("name").\
+ all()
+ return drive_types
+
+
+ ####################
+
+
+@require_admin_context
+def vsa_create(context, values):
+ """
+ Creates Virtual Storage Array record.
+ """
+ try:
+ vsa_ref = models.VirtualStorageArray()
+ vsa_ref.update(values)
+ vsa_ref.save()
+ except Exception, e:
+ raise exception.DBError(e)
+ return vsa_ref
+
+
+@require_admin_context
+def vsa_update(context, vsa_id, values):
+ """
+ Updates Virtual Storage Array record.
+ """
+ session = get_session()
+ with session.begin():
+ vsa_ref = vsa_get(context, vsa_id, session=session)
+ vsa_ref.update(values)
+ vsa_ref.save(session=session)
+ return vsa_ref
+
+
+@require_admin_context
+def vsa_destroy(context, vsa_id):
+ """
+ Deletes Virtual Storage Array record.
+ """
+ session = get_session()
+ with session.begin():
+ #vsa_ref = vsa_get(context, vsa_id, session=session)
+ #vsa_ref.delete(session=session)
+ session.query(models.VirtualStorageArray).\
+ filter_by(id=vsa_id).\
+ update({'deleted': True,
+ 'deleted_at': utils.utcnow(),
+ 'updated_at': literal_column('updated_at')})
+
+
+@require_context
+def vsa_get(context, vsa_id, session=None):
+ """
+ Get Virtual Storage Array record by ID.
+ """
+ if not session:
+ session = get_session()
+ result = None
+
+ if is_admin_context(context):
+ result = session.query(models.VirtualStorageArray).\
+ options(joinedload('vsa_instance_type')).\
+ filter_by(id=vsa_id).\
+ filter_by(deleted=can_read_deleted(context)).\
+ first()
+ elif is_user_context(context):
+ result = session.query(models.VirtualStorageArray).\
+ options(joinedload('vsa_instance_type')).\
+ filter_by(project_id=context.project_id).\
+ filter_by(id=vsa_id).\
+ filter_by(deleted=False).\
+ first()
+ if not result:
+ raise exception.VirtualStorageArrayNotFound(id=vsa_id)
+
+ return result
+
+
+@require_admin_context
+def vsa_get_all(context):
+ """
+ Get all Virtual Storage Array records.
+ """
+ session = get_session()
+ return session.query(models.VirtualStorageArray).\
+ options(joinedload('vsa_instance_type')).\
+ filter_by(deleted=can_read_deleted(context)).\
+ all()
+
+
+@require_context
+def vsa_get_all_by_project(context, project_id):
+ """
+ Get all Virtual Storage Array records by project ID.
+ """
+ authorize_project_context(context, project_id)
+
+ session = get_session()
+ return session.query(models.VirtualStorageArray).\
+ options(joinedload('vsa_instance_type')).\
+ filter_by(project_id=project_id).\
+ filter_by(deleted=can_read_deleted(context)).\
+ all()
+
+
+@require_context
+def vsa_get_vc_ips_list(context, vsa_id):
+ """
+ Retrieves IPs of instances associated with Virtual Storage Array.
+ """
+ result = []
+ session = get_session()
+ vc_instances = session.query(models.Instance).\
+ options(joinedload_all('fixed_ips.floating_ips')).\
+ options(joinedload('security_groups')).\
+ options(joinedload_all('fixed_ips.network')).\
+ options(joinedload('instance_type')).\
+ filter_by(vsa_id=vsa_id).\
+ filter_by(deleted=False).\
+ all()
+ for vc_instance in vc_instances:
+ if vc_instance['fixed_ips']:
+ for fixed in vc_instance['fixed_ips']:
+ # insert the [floating,fixed] (if exists) in the head,
+ # otherwise append the [none,fixed] in the tail
+ ip = {}
+ ip['fixed'] = fixed['address']
+ if fixed['floating_ips']:
+ ip['floating'] = fixed['floating_ips'][0]['address']
+ result.append(ip)
+
+ return result
+
+ ####################
diff --git a/nova/db/sqlalchemy/migrate_repo/versions/032_add_vsa_data.py b/nova/db/sqlalchemy/migrate_repo/versions/032_add_vsa_data.py
new file mode 100644
index 000000000..7fc8f955c
--- /dev/null
+++ b/nova/db/sqlalchemy/migrate_repo/versions/032_add_vsa_data.py
@@ -0,0 +1,152 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright (c) 2011 Zadara Storage Inc.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from sqlalchemy import Column, DateTime, Integer, MetaData, String, Table
+from sqlalchemy import Text, Boolean, ForeignKey
+
+from nova import log as logging
+
+meta = MetaData()
+
+# Just for the ForeignKey and column creation to succeed, these are not the
+# actual definitions of tables .
+#
+
+instances = Table('instances', meta,
+ Column('id', Integer(), primary_key=True, nullable=False),
+ )
+
+volumes = Table('volumes', meta,
+ Column('id', Integer(), primary_key=True, nullable=False),
+ )
+
+vsa_id = Column('vsa_id', Integer(), nullable=True)
+to_vsa_id = Column('to_vsa_id', Integer(), nullable=True)
+from_vsa_id = Column('from_vsa_id', Integer(), nullable=True)
+drive_type_id = Column('drive_type_id', Integer(), nullable=True)
+
+
+# New Tables
+#
+
+virtual_storage_arrays = Table('virtual_storage_arrays', meta,
+ Column('created_at', DateTime(timezone=False)),
+ Column('updated_at', DateTime(timezone=False)),
+ Column('deleted_at', DateTime(timezone=False)),
+ Column('deleted', Boolean(create_constraint=True, name=None)),
+ Column('id', Integer(), primary_key=True, nullable=False),
+ Column('display_name',
+ String(length=255, convert_unicode=False, assert_unicode=None,
+ unicode_error=None, _warn_on_bytestring=False)),
+ Column('display_description',
+ String(length=255, convert_unicode=False, assert_unicode=None,
+ unicode_error=None, _warn_on_bytestring=False)),
+ Column('project_id',
+ String(length=255, convert_unicode=False, assert_unicode=None,
+ unicode_error=None, _warn_on_bytestring=False)),
+ Column('availability_zone',
+ String(length=255, convert_unicode=False, assert_unicode=None,
+ unicode_error=None, _warn_on_bytestring=False)),
+ Column('instance_type_id', Integer(), nullable=False),
+ Column('image_ref',
+ String(length=255, convert_unicode=False, assert_unicode=None,
+ unicode_error=None, _warn_on_bytestring=False)),
+ Column('vc_count', Integer(), nullable=False),
+ Column('vol_count', Integer(), nullable=False),
+ Column('status',
+ String(length=255, convert_unicode=False, assert_unicode=None,
+ unicode_error=None, _warn_on_bytestring=False)),
+ )
+
+drive_types = Table('drive_types', meta,
+ Column('created_at', DateTime(timezone=False)),
+ Column('updated_at', DateTime(timezone=False)),
+ Column('deleted_at', DateTime(timezone=False)),
+ Column('deleted', Boolean(create_constraint=True, name=None)),
+ Column('id', Integer(), primary_key=True, nullable=False),
+ Column('name',
+ String(length=255, convert_unicode=False, assert_unicode=None,
+ unicode_error=None, _warn_on_bytestring=False),
+ unique=True),
+ Column('type',
+ String(length=255, convert_unicode=False, assert_unicode=None,
+ unicode_error=None, _warn_on_bytestring=False)),
+ Column('size_gb', Integer(), nullable=False),
+ Column('rpm',
+ String(length=255, convert_unicode=False, assert_unicode=None,
+ unicode_error=None, _warn_on_bytestring=False)),
+ Column('capabilities',
+ String(length=255, convert_unicode=False, assert_unicode=None,
+ unicode_error=None, _warn_on_bytestring=False)),
+ Column('visible', Boolean(create_constraint=True, name=None)),
+ )
+
+#vsa_disk_association = Table('vsa_disk_association', meta,
+# Column('created_at', DateTime(timezone=False)),
+# Column('updated_at', DateTime(timezone=False)),
+# Column('deleted_at', DateTime(timezone=False)),
+# Column('deleted', Boolean(create_constraint=True, name=None)),
+# Column('id', Integer(), primary_key=True, nullable=False),
+# Column('drive_type_id', Integer(), ForeignKey('drive_types.id')),
+# Column('vsa_id', Integer(), ForeignKey('virtual_storage_arrays.id')),
+# Column('disk_num', Integer(), nullable=False),
+# )
+
+#new_tables = (virtual_storage_arrays, drive_types, vsa_disk_association)
+new_tables = (virtual_storage_arrays, drive_types)
+
+#
+# Tables to alter
+#
+
+
+def upgrade(migrate_engine):
+
+ from nova import context
+ from nova import db
+ from nova import flags
+
+ FLAGS = flags.FLAGS
+
+ # Upgrade operations go here. Don't create your own engine;
+ # bind migrate_engine to your metadata
+ meta.bind = migrate_engine
+
+ for table in new_tables:
+ try:
+ table.create()
+ except Exception:
+ logging.info(repr(table))
+ logging.exception('Exception while creating table')
+ raise
+
+ instances.create_column(vsa_id)
+ volumes.create_column(to_vsa_id)
+ volumes.create_column(from_vsa_id)
+ volumes.create_column(drive_type_id)
+
+
+def downgrade(migrate_engine):
+ meta.bind = migrate_engine
+
+ instances.drop_column(vsa_id)
+ volumes.drop_column(to_vsa_id)
+ volumes.drop_column(from_vsa_id)
+ volumes.drop_column(drive_type_id)
+
+ for table in new_tables:
+ table.drop()
diff --git a/nova/db/sqlalchemy/migration.py b/nova/db/sqlalchemy/migration.py
index d9e303599..9b64671a3 100644
--- a/nova/db/sqlalchemy/migration.py
+++ b/nova/db/sqlalchemy/migration.py
@@ -64,7 +64,8 @@ def db_version():
'users', 'user_project_association',
'user_project_role_association',
'user_role_association',
- 'volumes'):
+ 'volumes',
+ 'virtual_storage_arrays', 'drive_types'):
assert table in meta.tables
return db_version_control(1)
except AssertionError:
diff --git a/nova/db/sqlalchemy/models.py b/nova/db/sqlalchemy/models.py
index d29d3d6f1..7f2e9d39c 100644
--- a/nova/db/sqlalchemy/models.py
+++ b/nova/db/sqlalchemy/models.py
@@ -247,6 +247,43 @@ class Instance(BASE, NovaBase):
# assert(state in ['nostate', 'running', 'blocked', 'paused',
# 'shutdown', 'shutoff', 'crashed'])
+ vsa_id = Column(Integer, ForeignKey('virtual_storage_arrays.id'),
+ nullable=True)
+
+
+class VirtualStorageArray(BASE, NovaBase):
+ """
+ Represents a virtual storage array supplying block storage to instances.
+ """
+ __tablename__ = 'virtual_storage_arrays'
+
+ id = Column(Integer, primary_key=True, autoincrement=True)
+
+ @property
+ def name(self):
+ return FLAGS.vsa_name_template % self.id
+
+ # User editable field for display in user-facing UIs
+ display_name = Column(String(255))
+ display_description = Column(String(255))
+
+ project_id = Column(String(255))
+ availability_zone = Column(String(255))
+
+ instance_type_id = Column(Integer, ForeignKey('instance_types.id'))
+ image_ref = Column(String(255))
+ vc_count = Column(Integer, default=0) # number of requested VC instances
+ vol_count = Column(Integer, default=0) # total number of BE volumes
+ status = Column(String(255))
+
+ #admin_pass = Column(String(255))
+
+ #disks = relationship(VsaDiskAssociation,
+ # backref=backref('vsa', uselist=False),
+ # foreign_keys=id,
+ # primaryjoin='and_(VsaDiskAssociation.vsa_id == '
+ # 'VirtualStorageArray.id)')
+
class InstanceActions(BASE, NovaBase):
"""Represents a guest VM's actions and results"""
@@ -277,6 +314,12 @@ class InstanceTypes(BASE, NovaBase):
primaryjoin='and_(Instance.instance_type_id == '
'InstanceTypes.id)')
+ vsas = relationship(VirtualStorageArray,
+ backref=backref('vsa_instance_type', uselist=False),
+ foreign_keys=id,
+ primaryjoin='and_(VirtualStorageArray.instance_type_id'
+ ' == InstanceTypes.id)')
+
class Volume(BASE, NovaBase):
"""Represents a block storage device that can be attached to a vm."""
@@ -316,6 +359,57 @@ class Volume(BASE, NovaBase):
provider_location = Column(String(255))
provider_auth = Column(String(255))
+ to_vsa_id = Column(Integer,
+ ForeignKey('virtual_storage_arrays.id'), nullable=True)
+ from_vsa_id = Column(Integer,
+ ForeignKey('virtual_storage_arrays.id'), nullable=True)
+ drive_type_id = Column(Integer,
+ ForeignKey('drive_types.id'), nullable=True)
+
+
+class DriveTypes(BASE, NovaBase):
+ """Represents the known drive types (storage media)."""
+ __tablename__ = 'drive_types'
+
+ id = Column(Integer, primary_key=True, autoincrement=True)
+
+ """
+ @property
+ def name(self):
+ if self.capabilities:
+ return FLAGS.drive_type_template_long % \
+ (self.type, str(self.size_gb), self.rpm, self.capabilities)
+ else:
+ return FLAGS.drive_type_template_short % \
+ (self.type, str(self.size_gb), self.rpm)
+ """
+
+ name = Column(String(255), unique=True)
+ type = Column(String(255))
+ size_gb = Column(Integer)
+ rpm = Column(String(255))
+ capabilities = Column(String(255))
+
+ visible = Column(Boolean, default=True)
+
+ volumes = relationship(Volume,
+ backref=backref('drive_type', uselist=False),
+ foreign_keys=id,
+ primaryjoin='and_(Volume.drive_type_id == '
+ 'DriveTypes.id)')
+
+#
+#class VsaDiskAssociation(BASE, NovaBase):
+# """associates drive types with Virtual Storage Arrays."""
+# __tablename__ = 'vsa_disk_association'
+#
+# id = Column(Integer, primary_key=True, autoincrement=True)
+#
+# drive_type_id = Column(Integer, ForeignKey('drive_types.id'))
+# vsa_id = Column(Integer, ForeignKey('virtual_storage_arrays.id'))
+#
+# disk_num = Column(Integer, nullable=False) # number of disks
+
class Quota(BASE, NovaBase):
"""Represents a single quota override for a project.
@@ -785,6 +879,7 @@ def register_models():
Network, SecurityGroup, SecurityGroupIngressRule,
SecurityGroupInstanceAssociation, AuthToken, User,
Project, Certificate, ConsolePool, Console, Zone,
+ VirtualStorageArray, DriveTypes,
AgentBuild, InstanceMetadata, InstanceTypeExtraSpecs, Migration)
engine = create_engine(FLAGS.sql_connection, echo=False)
for model in models:
diff --git a/nova/exception.py b/nova/exception.py
index ad6c005f8..a3d1a4b3f 100644
--- a/nova/exception.py
+++ b/nova/exception.py
@@ -311,6 +311,10 @@ class VolumeNotFoundForInstance(VolumeNotFound):
message = _("Volume not found for instance %(instance_id)s.")
+class VolumeNotFoundForVsa(VolumeNotFound):
+ message = _("Volume not found for vsa %(vsa_id)s.")
+
+
class SnapshotNotFound(NotFound):
message = _("Snapshot %(snapshot_id)s could not be found.")
@@ -682,3 +686,19 @@ class PasteConfigNotFound(NotFound):
class PasteAppNotFound(NotFound):
message = _("Could not load paste app '%(name)s' from %(path)s")
+
+
+class VirtualStorageArrayNotFound(NotFound):
+ message = _("Virtual Storage Array %(id)d could not be found.")
+
+
+class VirtualStorageArrayNotFoundByName(NotFound):
+ message = _("Virtual Storage Array %(name)s could not be found.")
+
+
+class VirtualDiskTypeNotFound(NotFound):
+ message = _("Drive Type %(id)d could not be found.")
+
+
+class VirtualDiskTypeNotFoundByName(NotFound):
+ message = _("Drive Type %(name)s could not be found.")
diff --git a/nova/flags.py b/nova/flags.py
index 49355b436..8000eac4a 100644
--- a/nova/flags.py
+++ b/nova/flags.py
@@ -292,6 +292,7 @@ DEFINE_string('ajax_console_proxy_url',
in the form "http://127.0.0.1:8000"')
DEFINE_string('ajax_console_proxy_port',
8000, 'port that ajax_console_proxy binds')
+DEFINE_string('vsa_topic', 'vsa', 'the topic that nova-vsa service listens on')
DEFINE_bool('verbose', False, 'show debug output')
DEFINE_boolean('fake_rabbit', False, 'use a fake rabbit')
DEFINE_bool('fake_network', False,
@@ -364,6 +365,32 @@ DEFINE_string('volume_manager', 'nova.volume.manager.VolumeManager',
'Manager for volume')
DEFINE_string('scheduler_manager', 'nova.scheduler.manager.SchedulerManager',
'Manager for scheduler')
+DEFINE_string('vsa_manager', 'nova.vsa.manager.VsaManager',
+ 'Manager for vsa')
+DEFINE_string('vc_image_name', 'vc_image',
+ 'the VC image ID (for a VC image that exists in DB Glance)')
+#---------------------------------------------------------------------
+# VSA constants and enums
+DEFINE_string('default_vsa_instance_type', 'm1.small',
+ 'default instance type for VSA instances')
+DEFINE_integer('max_vcs_in_vsa', 32,
+ 'maxinum VCs in a VSA')
+DEFINE_integer('vsa_part_size_gb', 100,
+ 'default partition size for shared capacity')
+
+DEFINE_string('vsa_status_creating', 'creating',
+ 'VSA creating (not ready yet)')
+DEFINE_string('vsa_status_launching', 'launching',
+ 'Launching VCs (all BE volumes were created)')
+DEFINE_string('vsa_status_created', 'created',
+ 'VSA fully created and ready for use')
+DEFINE_string('vsa_status_partial', 'partial',
+ 'Some BE storage allocations failed')
+DEFINE_string('vsa_status_failed', 'failed',
+ 'Some BE storage allocations failed')
+DEFINE_string('vsa_status_deleting', 'deleting',
+ 'VSA started the deletion procedure')
+
# The service to use for image search and retrieval
DEFINE_string('image_service', 'nova.image.glance.GlanceImageService',
diff --git a/nova/quota.py b/nova/quota.py
index 58766e846..46322d60c 100644
--- a/nova/quota.py
+++ b/nova/quota.py
@@ -24,13 +24,13 @@ from nova import flags
FLAGS = flags.FLAGS
-flags.DEFINE_integer('quota_instances', 10,
+flags.DEFINE_integer('quota_instances', 100, # 10
'number of instances allowed per project')
flags.DEFINE_integer('quota_cores', 20,
'number of instance cores allowed per project')
flags.DEFINE_integer('quota_ram', 50 * 1024,
'megabytes of instance ram allowed per project')
-flags.DEFINE_integer('quota_volumes', 10,
+flags.DEFINE_integer('quota_volumes', 100, # 10
'number of volumes allowed per project')
flags.DEFINE_integer('quota_gigabytes', 1000,
'number of volume gigabytes allowed per project')
diff --git a/nova/scheduler/vsa.py b/nova/scheduler/vsa.py
new file mode 100644
index 000000000..4277c0ba8
--- /dev/null
+++ b/nova/scheduler/vsa.py
@@ -0,0 +1,495 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright (c) 2011 Zadara Storage Inc.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""
+VSA Simple Scheduler
+"""
+
+from nova import context
+from nova import rpc
+from nova import db
+from nova import flags
+from nova import utils
+from nova.volume import api as volume_api
+from nova.scheduler import driver
+from nova.scheduler import simple
+from nova import log as logging
+
+LOG = logging.getLogger('nova.scheduler.vsa')
+
+FLAGS = flags.FLAGS
+flags.DEFINE_integer('gb_to_bytes_shift', 30,
+ 'Conversion shift between GB and bytes')
+flags.DEFINE_integer('drive_type_approx_capacity_percent', 10,
+ 'The percentage range for capacity comparison')
+flags.DEFINE_integer('vsa_unique_hosts_per_alloc', 10,
+ 'The number of unique hosts per storage allocation')
+flags.DEFINE_boolean('vsa_select_unique_drives', True,
+ 'Allow selection of same host for multiple drives')
+
+
+class VsaScheduler(simple.SimpleScheduler):
+ """Implements Naive Scheduler that tries to find least loaded host."""
+
+ def __init__(self, *args, **kwargs):
+ super(VsaScheduler, self).__init__(*args, **kwargs)
+ self._notify_all_volume_hosts("startup")
+
+ def _notify_all_volume_hosts(self, event):
+ rpc.cast(context.get_admin_context(),
+ FLAGS.volume_topic,
+ {"method": "notification",
+ "args": {"event": event}})
+
+ def _compare_names(self, str1, str2):
+ result = str1.lower() == str2.lower()
+ # LOG.debug(_("Comparing %(str1)s and %(str2)s. "\
+ # "Result %(result)s"), locals())
+ return result
+
+ def _compare_sizes_exact_match(self, cap_capacity, size_gb):
+ cap_capacity = int(cap_capacity) >> FLAGS.gb_to_bytes_shift
+ size_gb = int(size_gb)
+ result = cap_capacity == size_gb
+ # LOG.debug(_("Comparing %(cap_capacity)d and %(size_gb)d. "\
+ # "Result %(result)s"), locals())
+ return result
+
+ def _compare_sizes_approxim(self, cap_capacity, size_gb):
+ cap_capacity = int(cap_capacity) >> FLAGS.gb_to_bytes_shift
+ size_gb = int(size_gb)
+ size_perc = size_gb * FLAGS.drive_type_approx_capacity_percent / 100
+
+ result = cap_capacity >= size_gb - size_perc and \
+ cap_capacity <= size_gb + size_perc
+ # LOG.debug(_("Comparing %(cap_capacity)d and %(size_gb)d. "\
+ # "Result %(result)s"), locals())
+ return result
+
+ def _qosgrp_match(self, drive_type, qos_values):
+
+ # Add more entries for additional comparisons
+ compare_list = [{'cap1': 'DriveType',
+ 'cap2': 'type',
+ 'cmp_func': self._compare_names},
+ {'cap1': 'DriveCapacity',
+ 'cap2': 'size_gb',
+ 'cmp_func': self._compare_sizes_approxim}]
+
+ for cap in compare_list:
+ if cap['cap1'] in qos_values.keys() and \
+ cap['cap2'] in drive_type.keys() and \
+ cap['cmp_func'] is not None and \
+ cap['cmp_func'](qos_values[cap['cap1']],
+ drive_type[cap['cap2']]):
+ # LOG.debug(_("One of required capabilities found: %s:%s"),
+ # cap['cap1'], drive_type[cap['cap2']])
+ pass
+ else:
+ return False
+ return True
+
+ def _filter_hosts(self, topic, request_spec, host_list=None):
+
+ drive_type = request_spec['drive_type']
+ LOG.debug(_("Filter hosts for drive type %(drive_type)s") % locals())
+
+ if host_list is None:
+ host_list = self.zone_manager.service_states.iteritems()
+
+ filtered_hosts = [] # returns list of (hostname, capability_dict)
+ for host, host_dict in host_list:
+ for service_name, service_dict in host_dict.iteritems():
+ if service_name != topic:
+ continue
+
+ gos_info = service_dict.get('drive_qos_info', {})
+ for qosgrp, qos_values in gos_info.iteritems():
+ if self._qosgrp_match(drive_type, qos_values):
+ if qos_values['AvailableCapacity'] > 0:
+ LOG.debug(_("Adding host %s to the list"), host)
+ filtered_hosts.append((host, gos_info))
+ else:
+ LOG.debug(_("Host %s has no free capacity. Skip"),
+ host)
+ break
+
+ LOG.debug(_("Found hosts %(filtered_hosts)s") % locals())
+ return filtered_hosts
+
+ def _allowed_to_use_host(self, host, selected_hosts, unique):
+ if unique == False or \
+ host not in [item[0] for item in selected_hosts]:
+ return True
+ else:
+ return False
+
+ def _add_hostcap_to_list(self, selected_hosts, host, cap):
+ if host not in [item[0] for item in selected_hosts]:
+ selected_hosts.append((host, cap))
+
+ def _alg_least_used_host(self, request_spec, all_hosts, selected_hosts):
+ size = request_spec['size']
+ drive_type = request_spec['drive_type']
+ best_host = None
+ best_qoscap = None
+ best_cap = None
+ min_used = 0
+
+ LOG.debug(_("Selecting best host for %(size)sGB volume of type "\
+ "%(drive_type)s from %(all_hosts)s"), locals())
+
+ for (host, capabilities) in all_hosts:
+ has_enough_capacity = False
+ used_capacity = 0
+ for qosgrp, qos_values in capabilities.iteritems():
+
+ used_capacity = used_capacity + qos_values['TotalCapacity'] \
+ - qos_values['AvailableCapacity']
+
+ if self._qosgrp_match(drive_type, qos_values):
+ # we found required qosgroup
+
+ if size == 0: # full drive match
+ if qos_values['FullDrive']['NumFreeDrives'] > 0:
+ has_enough_capacity = True
+ matched_qos = qos_values
+ else:
+ break
+ else:
+ if qos_values['AvailableCapacity'] >= size and \
+ (qos_values['PartitionDrive'][
+ 'NumFreePartitions'] > 0 or \
+ qos_values['FullDrive']['NumFreeDrives'] > 0):
+ has_enough_capacity = True
+ matched_qos = qos_values
+ else:
+ break
+
+ if has_enough_capacity and \
+ self._allowed_to_use_host(host,
+ selected_hosts,
+ unique) and \
+ (best_host is None or used_capacity < min_used):
+
+ min_used = used_capacity
+ best_host = host
+ best_qoscap = matched_qos
+ best_cap = capabilities
+
+ if best_host:
+ self._add_hostcap_to_list(selected_hosts, host, best_cap)
+ LOG.debug(_("Best host found: %(best_host)s. "\
+ "(used capacity %(min_used)s)"), locals())
+ return (best_host, best_qoscap)
+
+ def _alg_most_avail_capacity(self, request_spec, all_hosts,
+ selected_hosts, unique):
+ size = request_spec['size']
+ drive_type = request_spec['drive_type']
+ best_host = None
+ best_qoscap = None
+ best_cap = None
+ max_avail = 0
+
+ LOG.debug(_("Selecting best host for %(size)sGB volume of type "\
+ "%(drive_type)s from %(all_hosts)s"), locals())
+
+ for (host, capabilities) in all_hosts:
+ for qosgrp, qos_values in capabilities.iteritems():
+ if self._qosgrp_match(drive_type, qos_values):
+ # we found required qosgroup
+
+ if size == 0: # full drive match
+ available = qos_values['FullDrive']['NumFreeDrives']
+ else:
+ available = qos_values['AvailableCapacity']
+
+ if available > max_avail and \
+ self._allowed_to_use_host(host,
+ selected_hosts,
+ unique):
+ max_avail = available
+ best_host = host
+ best_qoscap = qos_values
+ best_cap = capabilities
+ break # go to the next host
+
+ if best_host:
+ self._add_hostcap_to_list(selected_hosts, host, best_cap)
+ LOG.debug(_("Best host found: %(best_host)s. "\
+ "(available capacity %(max_avail)s)"), locals())
+
+ return (best_host, best_qoscap)
+
+ def _select_hosts(self, request_spec, all_hosts, selected_hosts=None):
+
+ #self._alg_most_avail_capacity(request_spec, all_hosts, selected_hosts)
+
+ if selected_hosts is None:
+ selected_hosts = []
+
+ host = None
+ if len(selected_hosts) >= FLAGS.vsa_unique_hosts_per_alloc:
+ # try to select from already selected hosts only
+ LOG.debug(_("Maximum number of hosts selected (%d)"),
+ len(selected_hosts))
+ unique = False
+ (host, qos_cap) = self._alg_most_avail_capacity(request_spec,
+ selected_hosts,
+ selected_hosts,
+ unique)
+
+ LOG.debug(_("Selected excessive host %(host)s"), locals())
+ else:
+ unique = FLAGS.vsa_select_unique_drives
+
+ if host is None:
+ # if we've not tried yet (# of sel hosts < max) - unique=True
+ # or failed to select from selected_hosts - unique=False
+ # select from all hosts
+ (host, qos_cap) = self._alg_most_avail_capacity(request_spec,
+ all_hosts,
+ selected_hosts,
+ unique)
+ LOG.debug(_("Selected host %(host)s"), locals())
+
+ if host is None:
+ raise driver.WillNotSchedule(_("No available hosts"))
+
+ return (host, qos_cap)
+
+ def _provision_volume(self, context, vol, vsa_id, availability_zone):
+
+ if availability_zone is None:
+ availability_zone = FLAGS.storage_availability_zone
+
+ now = utils.utcnow()
+ options = {
+ 'size': vol['size'],
+ 'user_id': context.user_id,
+ 'project_id': context.project_id,
+ 'snapshot_id': None,
+ 'availability_zone': availability_zone,
+ 'status': "creating",
+ 'attach_status': "detached",
+ 'display_name': vol['name'],
+ 'display_description': vol['description'],
+ 'to_vsa_id': vsa_id,
+ 'drive_type_id': vol['drive_ref']['id'],
+ 'host': vol['host'],
+ 'scheduled_at': now
+ }
+
+ size = vol['size']
+ host = vol['host']
+ name = vol['name']
+ LOG.debug(_("Provision volume %(name)s of size %(size)s GB on "\
+ "host %(host)s"), locals())
+
+ volume_ref = db.volume_create(context, options)
+ rpc.cast(context,
+ db.queue_get_for(context, "volume", vol['host']),
+ {"method": "create_volume",
+ "args": {"volume_id": volume_ref['id'],
+ "snapshot_id": None}})
+
+ def _check_host_enforcement(self, availability_zone):
+ if (availability_zone
+ and ':' in availability_zone
+ and context.is_admin):
+ zone, _x, host = availability_zone.partition(':')
+ service = db.service_get_by_args(context.elevated(), host,
+ 'nova-volume')
+ if not self.service_is_up(service):
+ raise driver.WillNotSchedule(_("Host %s not available") % host)
+
+ return host
+ else:
+ return None
+
+ def _assign_hosts_to_volumes(self, context, volume_params, forced_host):
+
+ prev_drive_type_id = None
+ selected_hosts = []
+
+ LOG.debug(_("volume_params %(volume_params)s") % locals())
+
+ for vol in volume_params:
+ LOG.debug(_("Assigning host to volume %s") % vol['name'])
+
+ if forced_host:
+ vol['host'] = forced_host
+ vol['capabilities'] = None
+ continue
+
+ drive_type = vol['drive_ref']
+ request_spec = {'size': vol['size'],
+ 'drive_type': dict(drive_type)}
+
+ if prev_drive_type_id != drive_type['id']:
+ # generate list of hosts for this drive type
+ all_hosts = self._filter_hosts("volume", request_spec)
+ prev_drive_type_id = drive_type['id']
+
+ (host, qos_cap) = self._select_hosts(request_spec,
+ all_hosts, selected_hosts)
+ vol['host'] = host
+ vol['capabilities'] = qos_cap
+ self._consume_resource(qos_cap, vol['size'], -1)
+
+ LOG.debug(_("Assigned host %(host)s, capabilities %(qos_cap)s"),
+ locals())
+
+ LOG.debug(_("END: volume_params %(volume_params)s") % locals())
+
+ def schedule_create_volumes(self, context, request_spec,
+ availability_zone, *_args, **_kwargs):
+ """Picks hosts for hosting multiple volumes."""
+
+ num_volumes = request_spec.get('num_volumes')
+ LOG.debug(_("Attempting to spawn %(num_volumes)d volume(s)") %
+ locals())
+
+ LOG.debug(_("Service states BEFORE %s"),
+ self.zone_manager.service_states)
+
+ vsa_id = request_spec.get('vsa_id')
+ volume_params = request_spec.get('volumes')
+
+ host = self._check_host_enforcement(availability_zone)
+
+ try:
+ self._assign_hosts_to_volumes(context, volume_params, host)
+
+ for vol in volume_params:
+ self._provision_volume(context, vol, vsa_id, availability_zone)
+
+ LOG.debug(_("Service states AFTER %s"),
+ self.zone_manager.service_states)
+
+ except:
+ if vsa_id:
+ db.vsa_update(context, vsa_id,
+ dict(status=FLAGS.vsa_status_failed))
+
+ for vol in volume_params:
+ if 'capabilities' in vol:
+ self._consume_resource(vol['capabilities'],
+ vol['size'], 1)
+ LOG.debug(_("Service states AFTER %s"),
+ self.zone_manager.service_states)
+ raise
+
+ return None
+
+ def schedule_create_volume(self, context, volume_id, *_args, **_kwargs):
+ """Picks the best host based on requested drive type capability."""
+ volume_ref = db.volume_get(context, volume_id)
+
+ host = self._check_host_enforcement(volume_ref['availability_zone'])
+ if host:
+ now = utils.utcnow()
+ db.volume_update(context, volume_id, {'host': host,
+ 'scheduled_at': now})
+ return host
+
+ drive_type = volume_ref['drive_type']
+ if drive_type is None:
+ LOG.debug(_("Non-VSA volume %d"), volume_ref['id'])
+ return super(VsaScheduler, self).schedule_create_volume(context,
+ volume_id, *_args, **_kwargs)
+ drive_type = dict(drive_type)
+
+ # otherwise - drive type is loaded
+ LOG.debug(_("Spawning volume %d with drive type %s"),
+ volume_ref['id'], drive_type)
+
+ LOG.debug(_("Service states BEFORE %s"),
+ self.zone_manager.service_states)
+
+ request_spec = {'size': volume_ref['size'],
+ 'drive_type': drive_type}
+ hosts = self._filter_hosts("volume", request_spec)
+
+ try:
+ (host, qos_cap) = self._select_hosts(request_spec, all_hosts=hosts)
+ except:
+ if volume_ref['to_vsa_id']:
+ db.vsa_update(context, volume_ref['to_vsa_id'],
+ dict(status=FLAGS.vsa_status_failed))
+ raise
+ #return super(VsaScheduler, self).schedule_create_volume(context,
+ # volume_id, *_args, **_kwargs)
+
+ if host:
+ now = utils.utcnow()
+ db.volume_update(context, volume_id, {'host': host,
+ 'scheduled_at': now})
+ self._consume_resource(qos_cap, volume_ref['size'], -1)
+
+ LOG.debug(_("Service states AFTER %s"),
+ self.zone_manager.service_states)
+ return host
+
+ def _consume_full_drive(self, qos_values, direction):
+ qos_values['FullDrive']['NumFreeDrives'] += direction
+ qos_values['FullDrive']['NumOccupiedDrives'] -= direction
+
+ def _consume_partition(self, qos_values, size, direction):
+
+ if qos_values['PartitionDrive']['PartitionSize'] != 0:
+ partition_size = qos_values['PartitionDrive']['PartitionSize']
+ else:
+ partition_size = size
+ part_per_drive = qos_values['DriveCapacity'] / partition_size
+
+ if direction == -1 and \
+ qos_values['PartitionDrive']['NumFreePartitions'] == 0:
+
+ self._consume_full_drive(qos_values, direction)
+ qos_values['PartitionDrive']['NumFreePartitions'] += \
+ part_per_drive
+
+ qos_values['PartitionDrive']['NumFreePartitions'] += direction
+ qos_values['PartitionDrive']['NumOccupiedPartitions'] -= direction
+
+ if direction == 1 and \
+ qos_values['PartitionDrive']['NumFreePartitions'] >= \
+ part_per_drive:
+
+ self._consume_full_drive(qos_values, direction)
+ qos_values['PartitionDrive']['NumFreePartitions'] -= \
+ part_per_drive
+
+ def _consume_resource(self, qos_values, size, direction):
+ if qos_values is None:
+ LOG.debug(_("No capability selected for volume of size %(size)s"),
+ locals())
+ return
+
+ if size == 0: # full drive match
+ qos_values['AvailableCapacity'] += direction * \
+ qos_values['DriveCapacity']
+ self._consume_full_drive(qos_values, direction)
+ else:
+ qos_values['AvailableCapacity'] += direction * \
+ (size << FLAGS.gb_to_bytes_shift)
+ self._consume_partition(qos_values,
+ size << FLAGS.gb_to_bytes_shift,
+ direction)
+ return
diff --git a/nova/tests/test_libvirt.py b/nova/tests/test_libvirt.py
index f99e1713d..36e469be3 100644
--- a/nova/tests/test_libvirt.py
+++ b/nova/tests/test_libvirt.py
@@ -242,7 +242,7 @@ class LibvirtConnTestCase(test.TestCase):
return """
<domain type='kvm'>
<devices>
- <disk type='file'>
+ <drive type='file'>
<source file='filename'/>
</disk>
</devices>
diff --git a/nova/volume/api.py b/nova/volume/api.py
index 7d27abff9..f81222017 100644
--- a/nova/volume/api.py
+++ b/nova/volume/api.py
@@ -41,7 +41,9 @@ LOG = logging.getLogger('nova.volume')
class API(base.Base):
"""API for interacting with the volume manager."""
- def create(self, context, size, snapshot_id, name, description):
+ def create(self, context, size, snapshot_id, name, description,
+ to_vsa_id=None, from_vsa_id=None, drive_type_id=None,
+ availability_zone=None):
if snapshot_id != None:
snapshot = self.get_snapshot(context, snapshot_id)
if snapshot['status'] != "available":
@@ -50,25 +52,36 @@ class API(base.Base):
if not size:
size = snapshot['volume_size']
- if quota.allowed_volumes(context, 1, size) < 1:
- pid = context.project_id
- LOG.warn(_("Quota exceeeded for %(pid)s, tried to create"
- " %(size)sG volume") % locals())
- raise quota.QuotaError(_("Volume quota exceeded. You cannot "
- "create a volume of size %sG") % size)
+ if availability_zone is None:
+ availability_zone = FLAGS.storage_availability_zone
+
+ if to_vsa_id is None:
+ # VP-TODO: for now don't check quotas for BE volumes
+ if quota.allowed_volumes(context, 1, size) < 1:
+ pid = context.project_id
+ LOG.warn(_("Quota exceeeded for %(pid)s, tried to create"
+ " %(size)sG volume") % locals())
+ raise quota.QuotaError(_("Volume quota exceeded. You cannot "
+ "create a volume of size %sG") % size)
options = {
'size': size,
'user_id': context.user_id,
'project_id': context.project_id,
'snapshot_id': snapshot_id,
- 'availability_zone': FLAGS.storage_availability_zone,
+ 'availability_zone': availability_zone,
'status': "creating",
'attach_status': "detached",
'display_name': name,
- 'display_description': description}
+ 'display_description': description,
+ 'to_vsa_id': to_vsa_id,
+ 'from_vsa_id': from_vsa_id,
+ 'drive_type_id': drive_type_id}
volume = self.db.volume_create(context, options)
+ if from_vsa_id is not None: # for FE VSA volumes do nothing
+ return volume
+
rpc.cast(context,
FLAGS.scheduler_topic,
{"method": "create_volume",
@@ -89,6 +102,12 @@ class API(base.Base):
volume = self.get(context, volume_id)
if volume['status'] != "available":
raise exception.ApiError(_("Volume status must be available"))
+
+ if volume['from_vsa_id'] is not None:
+ self.db.volume_destroy(context, volume['id'])
+ LOG.debug(_("volume %d: deleted successfully"), volume['id'])
+ return
+
now = utils.utcnow()
self.db.volume_update(context, volume_id, {'status': 'deleting',
'terminated_at': now})
@@ -110,6 +129,15 @@ class API(base.Base):
return self.db.volume_get_all(context)
return self.db.volume_get_all_by_project(context, context.project_id)
+ def get_all_by_vsa(self, context, vsa_id, direction):
+ if direction == "to":
+ return self.db.volume_get_all_assigned_to_vsa(context, vsa_id)
+ elif direction == "from":
+ return self.db.volume_get_all_assigned_from_vsa(context, vsa_id)
+ else:
+ raise exception.ApiError(_("Unsupported vol assignment type %s"),
+ direction)
+
def get_snapshot(self, context, snapshot_id):
rv = self.db.snapshot_get(context, snapshot_id)
return dict(rv.iteritems())
diff --git a/nova/volume/driver.py b/nova/volume/driver.py
index 23e845deb..ec09325d8 100644
--- a/nova/volume/driver.py
+++ b/nova/volume/driver.py
@@ -501,7 +501,15 @@ class ISCSIDriver(VolumeDriver):
iscsi_properties = self._get_iscsi_properties(volume)
if not iscsi_properties['target_discovered']:
- self._run_iscsiadm(iscsi_properties, ('--op', 'new'))
+ # zadara-begin: Bug in cactus. _run_iscsiadm() cannot accept
+ # multiple args for iscsi-command. Like in --op new. Hence
+ # using a local version here which does the same thing
+ (out, err) = self._execute('sudo', 'iscsiadm', '--op', 'new',
+ '-m', 'node',
+ '-T', iscsi_properties['target_iqn'],
+ '-p', iscsi_properties['target_portal'])
+ # self._run_iscsiadm(iscsi_properties, ('--op', 'new'))
+ # zadara-end
if iscsi_properties.get('auth_method'):
self._iscsiadm_update(iscsi_properties,
@@ -553,7 +561,15 @@ class ISCSIDriver(VolumeDriver):
iscsi_properties = self._get_iscsi_properties(volume)
self._iscsiadm_update(iscsi_properties, "node.startup", "manual")
self._run_iscsiadm(iscsi_properties, "--logout")
- self._run_iscsiadm(iscsi_properties, ('--op', 'delete'))
+ # zadara-begin: Bug in cactus. _run_iscsiadm() cannot accept
+ # multiple args for iscsi-command. Like in --op delete. Hence
+ # using a local version here which does the same thing
+ (out, err) = self._execute('sudo', 'iscsiadm', '--op', 'delete',
+ '-m', 'node',
+ '-T', iscsi_properties['target_iqn'],
+ '-p', iscsi_properties['target_portal'])
+ #self._run_iscsiadm(iscsi_properties, ('--op', 'delete'))
+ # zadara-end
def check_for_export(self, context, volume_id):
"""Make sure volume is exported."""
diff --git a/nova/volume/manager.py b/nova/volume/manager.py
index 798bd379a..3e2892fee 100644
--- a/nova/volume/manager.py
+++ b/nova/volume/manager.py
@@ -42,6 +42,7 @@ intact.
"""
+import time
from nova import context
from nova import exception
@@ -49,6 +50,7 @@ from nova import flags
from nova import log as logging
from nova import manager
from nova import utils
+from nova import rpc
LOG = logging.getLogger('nova.volume.manager')
@@ -58,22 +60,40 @@ flags.DEFINE_string('storage_availability_zone',
'availability zone of this service')
flags.DEFINE_string('volume_driver', 'nova.volume.driver.ISCSIDriver',
'Driver to use for volume creation')
+flags.DEFINE_string('vsa_volume_driver', 'nova.volume.san.ZadaraVsaDriver',
+ 'Driver to use for FE/BE volume creation with VSA')
flags.DEFINE_boolean('use_local_volumes', True,
'if True, will not discover local volumes')
+flags.DEFINE_integer('volume_state_interval', 60,
+ 'Interval in seconds for querying volumes status')
class VolumeManager(manager.SchedulerDependentManager):
"""Manages attachable block storage devices."""
- def __init__(self, volume_driver=None, *args, **kwargs):
+ def __init__(self, volume_driver=None, vsa_volume_driver=None,
+ *args, **kwargs):
"""Load the driver from the one specified in args, or from flags."""
if not volume_driver:
volume_driver = FLAGS.volume_driver
self.driver = utils.import_object(volume_driver)
+ if not vsa_volume_driver:
+ vsa_volume_driver = FLAGS.vsa_volume_driver
+ self.vsadriver = utils.import_object(vsa_volume_driver)
super(VolumeManager, self).__init__(service_name='volume',
*args, **kwargs)
# NOTE(vish): Implementation specific db handling is done
# by the driver.
self.driver.db = self.db
+ self.vsadriver.db = self.db
+ self._last_volume_stats = []
+ #self._last_host_check = 0
+
+ def _get_driver(self, volume_ref):
+ if volume_ref['to_vsa_id'] is None and \
+ volume_ref['from_vsa_id'] is None:
+ return self.driver
+ else:
+ return self.vsadriver
def init_host(self):
"""Do any initialization that needs to be run if this is a
@@ -84,10 +104,15 @@ class VolumeManager(manager.SchedulerDependentManager):
LOG.debug(_("Re-exporting %s volumes"), len(volumes))
for volume in volumes:
if volume['status'] in ['available', 'in-use']:
- self.driver.ensure_export(ctxt, volume)
+ driver = self._get_driver(volume)
+ driver.ensure_export(ctxt, volume)
else:
LOG.info(_("volume %s: skipping export"), volume['name'])
+ def create_volumes(self, context, request_spec, availability_zone):
+ LOG.info(_("create_volumes called with req=%(request_spec)s, "\
+ "availability_zone=%(availability_zone)s"), locals())
+
def create_volume(self, context, volume_id, snapshot_id=None):
"""Creates and exports the volume."""
context = context.elevated()
@@ -101,28 +126,31 @@ class VolumeManager(manager.SchedulerDependentManager):
# before passing it to the driver.
volume_ref['host'] = self.host
+ driver = self._get_driver(volume_ref)
try:
vol_name = volume_ref['name']
vol_size = volume_ref['size']
LOG.debug(_("volume %(vol_name)s: creating lv of"
" size %(vol_size)sG") % locals())
if snapshot_id == None:
- model_update = self.driver.create_volume(volume_ref)
+ model_update = driver.create_volume(volume_ref)
else:
snapshot_ref = self.db.snapshot_get(context, snapshot_id)
- model_update = self.driver.create_volume_from_snapshot(
+ model_update = driver.create_volume_from_snapshot(
volume_ref,
snapshot_ref)
if model_update:
self.db.volume_update(context, volume_ref['id'], model_update)
LOG.debug(_("volume %s: creating export"), volume_ref['name'])
- model_update = self.driver.create_export(context, volume_ref)
+ model_update = driver.create_export(context, volume_ref)
if model_update:
self.db.volume_update(context, volume_ref['id'], model_update)
- except Exception:
+ # except Exception:
+ except:
self.db.volume_update(context,
volume_ref['id'], {'status': 'error'})
+ self._notify_vsa(context, volume_ref, 'error')
raise
now = utils.utcnow()
@@ -130,8 +158,20 @@ class VolumeManager(manager.SchedulerDependentManager):
volume_ref['id'], {'status': 'available',
'launched_at': now})
LOG.debug(_("volume %s: created successfully"), volume_ref['name'])
+
+ self._notify_vsa(context, volume_ref, 'available')
+
return volume_id
+ def _notify_vsa(self, context, volume_ref, status):
+ if volume_ref['to_vsa_id'] is not None:
+ rpc.cast(context,
+ FLAGS.vsa_topic,
+ {"method": "vsa_volume_created",
+ "args": {"vol_id": volume_ref['id'],
+ "vsa_id": volume_ref['to_vsa_id'],
+ "status": status}})
+
def delete_volume(self, context, volume_id):
"""Deletes and unexports volume."""
context = context.elevated()
@@ -141,14 +181,15 @@ class VolumeManager(manager.SchedulerDependentManager):
if volume_ref['host'] != self.host:
raise exception.Error(_("Volume is not local to this node"))
+ driver = self._get_driver(volume_ref)
try:
LOG.debug(_("volume %s: removing export"), volume_ref['name'])
- self.driver.remove_export(context, volume_ref)
+ driver.remove_export(context, volume_ref)
LOG.debug(_("volume %s: deleting"), volume_ref['name'])
- self.driver.delete_volume(volume_ref)
+ driver.delete_volume(volume_ref)
except exception.VolumeIsBusy, e:
LOG.debug(_("volume %s: volume is busy"), volume_ref['name'])
- self.driver.ensure_export(context, volume_ref)
+ driver.ensure_export(context, volume_ref)
self.db.volume_update(context, volume_ref['id'],
{'status': 'available'})
return True
@@ -171,6 +212,7 @@ class VolumeManager(manager.SchedulerDependentManager):
try:
snap_name = snapshot_ref['name']
LOG.debug(_("snapshot %(snap_name)s: creating") % locals())
+ # snapshot-related operations are irrelevant for vsadriver
model_update = self.driver.create_snapshot(snapshot_ref)
if model_update:
self.db.snapshot_update(context, snapshot_ref['id'],
@@ -194,6 +236,7 @@ class VolumeManager(manager.SchedulerDependentManager):
try:
LOG.debug(_("snapshot %s: deleting"), snapshot_ref['name'])
+ # snapshot-related operations are irrelevant for vsadriver
self.driver.delete_snapshot(snapshot_ref)
except Exception:
self.db.snapshot_update(context,
@@ -211,23 +254,75 @@ class VolumeManager(manager.SchedulerDependentManager):
Returns path to device."""
context = context.elevated()
volume_ref = self.db.volume_get(context, volume_id)
+ driver = self._get_driver(volume_ref)
if volume_ref['host'] == self.host and FLAGS.use_local_volumes:
- path = self.driver.local_path(volume_ref)
+ path = driver.local_path(volume_ref)
else:
- path = self.driver.discover_volume(context, volume_ref)
+ path = driver.discover_volume(context, volume_ref)
return path
def remove_compute_volume(self, context, volume_id):
"""Remove remote volume on compute host."""
context = context.elevated()
volume_ref = self.db.volume_get(context, volume_id)
+ driver = self._get_driver(volume_ref)
if volume_ref['host'] == self.host and FLAGS.use_local_volumes:
return True
else:
- self.driver.undiscover_volume(volume_ref)
+ driver.undiscover_volume(volume_ref)
def check_for_export(self, context, instance_id):
"""Make sure whether volume is exported."""
instance_ref = self.db.instance_get(context, instance_id)
for volume in instance_ref['volumes']:
- self.driver.check_for_export(context, volume['id'])
+ driver = self._get_driver(volume)
+ driver.check_for_export(context, volume['id'])
+
+ def periodic_tasks(self, context=None):
+ """Tasks to be run at a periodic interval."""
+
+ error_list = []
+ try:
+ self._report_driver_status()
+ except Exception as ex:
+ LOG.warning(_("Error during report_driver_status(): %s"),
+ unicode(ex))
+ error_list.append(ex)
+
+ super(VolumeManager, self).periodic_tasks(context)
+
+ return error_list
+
+ def _volume_stats_changed(self, stat1, stat2):
+ #LOG.info(_("stat1=%s"), stat1)
+ #LOG.info(_("stat2=%s"), stat2)
+
+ if len(stat1) != len(stat2):
+ return True
+ for (k, v) in stat1.iteritems():
+ if (k, v) not in stat2.iteritems():
+ return True
+ return False
+
+ def _report_driver_status(self):
+ #curr_time = time.time()
+ #LOG.info(_("Report Volume node status"))
+ #if curr_time - self._last_host_check > FLAGS.volume_state_interval:
+ # self._last_host_check = curr_time
+
+ LOG.info(_("Updating volume status"))
+
+ volume_stats = self.vsadriver.get_volume_stats(refresh=True)
+ if self._volume_stats_changed(self._last_volume_stats, volume_stats):
+ LOG.info(_("New capabilities found: %s"), volume_stats)
+ self._last_volume_stats = volume_stats
+
+ # This will grab info about the host and queue it
+ # to be sent to the Schedulers.
+ self.update_service_capabilities(self._last_volume_stats)
+ else:
+ self.update_service_capabilities(None)
+
+ def notification(self, context, event):
+ LOG.info(_("Notification {%s} received"), event)
+ self._last_volume_stats = []
diff --git a/nova/volume/san.py b/nova/volume/san.py
index 9532c8116..6a962c6f2 100644
--- a/nova/volume/san.py
+++ b/nova/volume/san.py
@@ -26,6 +26,7 @@ import paramiko
from xml.etree import ElementTree
+from nova import context
from nova import exception
from nova import flags
from nova import log as logging
@@ -64,12 +65,16 @@ class SanISCSIDriver(ISCSIDriver):
# discover_volume is still OK
# undiscover_volume is still OK
- def _connect_to_ssh(self):
+ def _connect_to_ssh(self, san_ip=None):
+ if san_ip:
+ ssh_ip = san_ip
+ else:
+ ssh_ip = FLAGS.san_ip
ssh = paramiko.SSHClient()
#TODO(justinsb): We need a better SSH key policy
ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
if FLAGS.san_password:
- ssh.connect(FLAGS.san_ip,
+ ssh.connect(ssh_ip,
port=FLAGS.san_ssh_port,
username=FLAGS.san_login,
password=FLAGS.san_password)
@@ -77,7 +82,7 @@ class SanISCSIDriver(ISCSIDriver):
privatekeyfile = os.path.expanduser(FLAGS.san_privatekey)
# It sucks that paramiko doesn't support DSA keys
privatekey = paramiko.RSAKey.from_private_key_file(privatekeyfile)
- ssh.connect(FLAGS.san_ip,
+ ssh.connect(ssh_ip,
port=FLAGS.san_ssh_port,
username=FLAGS.san_login,
pkey=privatekey)
@@ -85,9 +90,9 @@ class SanISCSIDriver(ISCSIDriver):
raise exception.Error(_("Specify san_password or san_privatekey"))
return ssh
- def _run_ssh(self, command, check_exit_code=True):
+ def _run_ssh(self, command, check_exit_code=True, san_ip=None):
#TODO(justinsb): SSH connection caching (?)
- ssh = self._connect_to_ssh()
+ ssh = self._connect_to_ssh(san_ip)
#TODO(justinsb): Reintroduce the retry hack
ret = ssh_execute(ssh, command, check_exit_code=check_exit_code)
@@ -583,3 +588,311 @@ class HpSanISCSIDriver(SanISCSIDriver):
cliq_args['volumeName'] = volume['name']
self._cliq_run_xml("unassignVolume", cliq_args)
+
+
+class ZadaraVsaDriver(SanISCSIDriver):
+ """Executes commands relating to Virtual Storage Array volumes.
+
+ There are two types of volumes. Front-end(FE) volumes and Back-end(BE)
+ volumes.
+
+ FE volumes are nova-volumes that are exported by VSA instance & can be
+ consumed by user instances. We use SSH to connect into the VSA instance
+ to execute those steps.
+
+ BE volumes are nova-volumes that are attached as back-end storage for the
+ VSA instance.
+
+ VSA instance essentially consumes the BE volumes and allows creation of FE
+ volumes over it.
+ """
+
+ """ Volume Driver methods """
+ def create_volume(self, volume):
+ """Creates FE/BE volume."""
+ if volume['to_vsa_id']:
+ self._create_be_volume(volume)
+ else:
+ self._create_fe_volume(volume)
+
+ def delete_volume(self, volume):
+ """Deletes FE/BE volume."""
+ if volume['to_vsa_id']:
+ self._delete_be_volume(volume)
+ else:
+ self._delete_fe_volume(volume)
+
+ def local_path(self, volume):
+ # TODO: Is this needed here?
+ raise exception.Error(_("local_path not supported"))
+
+ def ensure_export(self, context, volume):
+ """On bootup synchronously ensures a volume export is available."""
+ if volume['to_vsa_id']:
+ return self._ensure_be_export(context, volume)
+
+ # Not required for FE volumes. VSA VM will ensure volume exposure
+ pass
+
+ def create_export(self, context, volume):
+ """For first time creates volume export."""
+ if volume['to_vsa_id']:
+ return self._create_be_export(context, volume)
+ else:
+ return self._create_fe_export(context, volume)
+
+ def remove_export(self, context, volume):
+ if volume['to_vsa_id']:
+ return self._remove_be_export(context, volume)
+ else:
+ return self._remove_fe_export(context, volume)
+
+ def check_for_setup_error(self):
+ """Returns an error if prerequisites aren't met"""
+ # skip the flags.san_ip check & do the regular check
+
+ if not (FLAGS.san_password or FLAGS.san_privatekey):
+ raise exception.Error(_("Specify san_password or san_privatekey"))
+
+ """ Internal BE Volume methods """
+ def _create_be_volume(self, volume):
+ """Creates BE volume."""
+ if int(volume['size']) == 0:
+ sizestr = '0' # indicates full-partition
+ else:
+ sizestr = '%s' % (int(volume['size']) << 30) # size in bytes
+
+ # Set the qos-str to default type sas
+ # TODO - later for this piece we will get the direct qos-group name
+ # in create_volume and hence this lookup will not be needed
+ qosstr = 'SAS_1000'
+ drive_type = volume.get('drive_type')
+ if drive_type is not None:
+ # for now just use the qos-type string from the disktypes.
+ qosstr = drive_type['type'] + ("_%s" % drive_type['size_gb'])
+
+ self._sync_exec('sudo', '/var/lib/zadara/bin/zadara_sncfg',
+ 'create_qospart',
+ '--qos', qosstr,
+ '--pname', volume['name'],
+ '--psize', sizestr,
+ check_exit_code=0)
+ LOG.debug(_("VSA BE create_volume for %s succeeded"), volume['name'])
+
+ def _delete_be_volume(self, volume):
+ try:
+ self._sync_exec('sudo', '/var/lib/zadara/bin/zadara_sncfg',
+ 'delete_partition',
+ '--pname', volume['name'],
+ check_exit_code=0)
+ except exception.ProcessExecutionError:
+ LOG.debug(_("VSA BE delete_volume for %s failed"), volume['name'])
+ return
+
+ LOG.debug(_("VSA BE delete_volume for %s suceeded"), volume['name'])
+
+ def _create_be_export(self, context, volume):
+ """create BE export for a volume"""
+ self._ensure_iscsi_targets(context, volume['host'])
+ iscsi_target = self.db.volume_allocate_iscsi_target(context,
+ volume['id'],
+ volume['host'])
+ return self._common_be_export(context, volume, iscsi_target)
+
+ def _ensure_be_export(self, context, volume):
+ """ensure BE export for a volume"""
+ try:
+ iscsi_target = self.db.volume_get_iscsi_target_num(context,
+ volume['id'])
+ except exception.NotFound:
+ LOG.info(_("Skipping ensure_export. No iscsi_target " +
+ "provisioned for volume: %d"), volume['id'])
+ return
+
+ return self._common_be_export(context, volume, iscsi_target)
+
+ def _common_be_export(self, context, volume, iscsi_target):
+ """
+ Common logic that asks zadara_sncfg to setup iSCSI target/lun for
+ this volume
+ """
+ (out, err) = self._sync_exec('sudo',
+ '/var/lib/zadara/bin/zadara_sncfg',
+ 'create_export',
+ '--pname', volume['name'],
+ '--tid', iscsi_target,
+ check_exit_code=0)
+
+ result_xml = ElementTree.fromstring(out)
+ response_node = result_xml.find("Sn")
+ if response_node is None:
+ msg = "Malformed response from zadara_sncfg"
+ raise exception.Error(msg)
+
+ sn_ip = response_node.findtext("SnIp")
+ sn_iqn = response_node.findtext("IqnName")
+ iscsi_portal = sn_ip + ":3260," + ("%s" % iscsi_target)
+
+ model_update = {}
+ model_update['provider_location'] = ("%s %s" %
+ (iscsi_portal,
+ sn_iqn))
+ return model_update
+
+ def _remove_be_export(self, context, volume):
+ """Removes BE export for a volume."""
+ try:
+ iscsi_target = self.db.volume_get_iscsi_target_num(context,
+ volume['id'])
+ except exception.NotFound:
+ LOG.info(_("Skipping remove_export. No iscsi_target " +
+ "provisioned for volume: %d"), volume['id'])
+ return
+
+ try:
+ self._sync_exec('sudo', '/var/lib/zadara/bin/zadara_sncfg',
+ 'remove_export',
+ '--pname', volume['name'],
+ '--tid', iscsi_target,
+ check_exit_code=0)
+ except exception.ProcessExecutionError:
+ LOG.debug(_("VSA BE remove_export for %s failed"), volume['name'])
+ return
+
+ def _get_qosgroup_summary(self):
+ """gets the list of qosgroups from Zadara SN"""
+ (out, err) = self._sync_exec('sudo',
+ '/var/lib/zadara/bin/zadara_sncfg',
+ 'get_qosgroups_xml',
+ check_exit_code=0)
+ qos_groups = {}
+ #qos_groups = []
+ result_xml = ElementTree.fromstring(out)
+ for element in result_xml.findall('QosGroup'):
+ qos_group = {}
+ # get the name of the group.
+ # If we cannot find it, forget this element
+ group_name = element.findtext("Name")
+ if not group_name:
+ continue
+
+ # loop through all child nodes & fill up attributes of this group
+ for child in element.getchildren():
+ # two types of elements - property of qos-group & sub property
+ # classify them accordingly
+ if child.text:
+ qos_group[child.tag] = int(child.text) \
+ if child.text.isdigit() else child.text
+ else:
+ subelement = {}
+ for subchild in child.getchildren():
+ subelement[subchild.tag] = int(subchild.text) \
+ if subchild.text.isdigit() else subchild.text
+ qos_group[child.tag] = subelement
+
+ # Now add this group to the master qos_groups
+ qos_groups[group_name] = qos_group
+ #qos_groups.append(qos_group)
+
+ return qos_groups
+
+ """ Internal FE Volume methods """
+ def _vsa_run(self, volume, verb, vsa_args):
+ """
+ Runs a command over SSH to VSA instance and checks for return status
+ """
+ vsa_arg_strings = []
+
+ if vsa_args:
+ for k, v in vsa_args.items():
+ vsa_arg_strings.append(" --%s %s" % (k, v))
+
+ # Form the zadara_cfg script that will do the configuration at VSA VM
+ cmd = "/var/lib/zadara/bin/zadara_cfg.py " + verb + \
+ ''.join(vsa_arg_strings)
+
+ # get the list of IP's corresponding to VSA VM's
+ vsa_ips = self.db.vsa_get_vc_ips_list(context.get_admin_context(),
+ volume['from_vsa_id'])
+ if not vsa_ips:
+ raise exception.Error(_("Cannot Lookup VSA VM's IP"))
+ return
+
+ # pick the first element in the return's fixed_ip for SSH
+ vsa_ip = vsa_ips[0]['fixed']
+
+ (out, _err) = self._run_ssh(cmd, san_ip=vsa_ip)
+
+ # check the xml StatusCode to check fro real status
+ result_xml = ElementTree.fromstring(out)
+
+ status = result_xml.findtext("StatusCode")
+ if status != '0':
+ statusmsg = result_xml.findtext("StatusMessage")
+ msg = (_('vsa_run failed to ' + verb + ' for ' + volume['name'] +
+ '. Result=' + str(statusmsg)))
+ raise exception.Error(msg)
+
+ return out, _err
+
+ def _create_fe_volume(self, volume):
+ """Creates FE volume."""
+ vsa_args = {}
+ vsa_args['volname'] = volume['name']
+ if int(volume['size']) == 0:
+ sizestr = '100M'
+ else:
+ sizestr = '%sG' % volume['size']
+ vsa_args['volsize'] = sizestr
+ (out, _err) = self._vsa_run(volume, "create_volume", vsa_args)
+
+ LOG.debug(_("VSA FE create_volume for %s suceeded"), volume['name'])
+
+ def _delete_fe_volume(self, volume):
+ """Deletes FE volume."""
+ vsa_args = {}
+ vsa_args['volname'] = volume['name']
+ (out, _err) = self._vsa_run(volume, "delete_volume", vsa_args)
+ LOG.debug(_("VSA FE delete_volume for %s suceeded"), volume['name'])
+ return
+
+ def _create_fe_export(self, context, volume):
+ """Create FE volume exposure at VSA VM"""
+ vsa_args = {}
+ vsa_args['volname'] = volume['name']
+ (out, _err) = self._vsa_run(volume, "create_export", vsa_args)
+
+ result_xml = ElementTree.fromstring(out)
+ response_node = result_xml.find("Vsa")
+ if response_node is None:
+ msg = "Malformed response to VSA command "
+ raise exception.Error(msg)
+
+ LOG.debug(_("VSA create_export for %s suceeded"), volume['name'])
+
+ vsa_ip = response_node.findtext("VsaIp")
+ vsa_iqn = response_node.findtext("IqnName")
+ vsa_interface = response_node.findtext("VsaInterface")
+ iscsi_portal = vsa_ip + ":3260," + vsa_interface
+
+ model_update = {}
+ model_update['provider_location'] = ("%s %s" %
+ (iscsi_portal,
+ vsa_iqn))
+
+ return model_update
+
+ def remove_fe_export(self, context, volume):
+ """Remove FE volume exposure at VSA VM"""
+ vsa_args = {}
+ vsa_args['volname'] = volume['name']
+ (out, _err) = self._vsa_run(volume, "remove_export", vsa_args)
+ LOG.debug(_("VSA FE remove_export for %s suceeded"), volume['name'])
+ return
+
+ def get_volume_stats(self, refresh=False):
+ """Return the current state of the volume service. If 'refresh' is
+ True, run the update first."""
+
+ drive_info = self._get_qosgroup_summary()
+ return {'drive_qos_info': drive_info}
diff --git a/nova/vsa/__init__.py b/nova/vsa/__init__.py
new file mode 100644
index 000000000..a94a6b7a4
--- /dev/null
+++ b/nova/vsa/__init__.py
@@ -0,0 +1,18 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright (c) 2011 Zadara Storage Inc.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from nova.vsa.api import API
diff --git a/nova/vsa/api.py b/nova/vsa/api.py
new file mode 100644
index 000000000..ed83ff563
--- /dev/null
+++ b/nova/vsa/api.py
@@ -0,0 +1,407 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright (c) 2011 Zadara Storage Inc.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""
+Handles all requests relating to Virtual Storage Arrays (VSAs).
+"""
+
+#import datetime
+import sys
+import base64
+
+from xml.etree import ElementTree
+from xml.etree.ElementTree import Element, SubElement
+
+from nova import db
+from nova import exception
+from nova import flags
+from nova import log as logging
+from nova import quota
+from nova import rpc
+from nova.db import base
+
+from nova import compute
+from nova import volume
+from nova.compute import instance_types
+from nova.vsa import drive_types
+
+
+FLAGS = flags.FLAGS
+flags.DEFINE_boolean('vsa_multi_vol_creation', True,
+ 'Ask scheduler to create multiple volumes in one call')
+
+LOG = logging.getLogger('nova.vsa')
+
+
+class API(base.Base):
+ """API for interacting with the VSA manager."""
+
+ def __init__(self, compute_api=None, volume_api=None, **kwargs):
+ self.compute_api = compute_api or compute.API()
+ self.volume_api = volume_api or volume.API()
+ super(API, self).__init__(**kwargs)
+
+ def _get_default_vsa_instance_type(self):
+ return instance_types.get_instance_type_by_name(
+ FLAGS.default_vsa_instance_type)
+
+ def _check_storage_parameters(self, context, vsa_name, storage, shared):
+ """
+ Translates storage array of disks to the list of volumes
+ :param storage: List of dictionaries with following keys:
+ disk_name, num_disks, size
+ :param shared: Specifies if storage is dedicated or shared.
+ For shared storage disks split into partitions
+ """
+ volume_params = []
+ for node in storage:
+
+ name = node.get('drive_name', None)
+ num_disks = node.get('num_drives', 1)
+
+ if name is None:
+ raise exception.ApiError(_("No drive_name param found in %s"),
+ node)
+
+ # find DB record for this disk
+ try:
+ drive_ref = drive_types.drive_type_get_by_name(context, name)
+ except exception.NotFound:
+ raise exception.ApiError(_("Invalid drive type name %s"),
+ name)
+
+ # if size field present - override disk size specified in DB
+ size = node.get('size', drive_ref['size_gb'])
+
+ if shared:
+ part_size = FLAGS.vsa_part_size_gb
+ total_capacity = num_disks * size
+ num_volumes = total_capacity / part_size
+ size = part_size
+ else:
+ num_volumes = num_disks
+ size = 0 # special handling for full drives
+
+ for i in range(num_volumes):
+ # VP-TODO: potentialy may conflict with previous volumes
+ volume_name = vsa_name + ("_%s_vol-%d" % (name, i))
+ volume = {
+ 'size': size,
+ 'snapshot_id': None,
+ 'name': volume_name,
+ 'description': 'BE volume for ' + volume_name,
+ 'drive_ref': drive_ref
+ }
+ volume_params.append(volume)
+
+ return volume_params
+
+ def create(self, context, display_name='', display_description='',
+ vc_count=1, instance_type=None, image_name=None,
+ availability_zone=None, storage=[], shared=None):
+ """
+ Provision VSA instance with corresponding compute instances
+ and associated volumes
+ :param storage: List of dictionaries with following keys:
+ disk_name, num_disks, size
+ :param shared: Specifies if storage is dedicated or shared.
+ For shared storage disks split into partitions
+ """
+
+ if vc_count > FLAGS.max_vcs_in_vsa:
+ LOG.warning(_("Requested number of VCs (%d) is too high."\
+ " Setting to default"), vc_count)
+ vc_count = FLAGS.max_vcs_in_vsa
+
+ if instance_type is None:
+ instance_type = self._get_default_vsa_instance_type()
+
+ if availability_zone is None:
+ availability_zone = FLAGS.storage_availability_zone
+
+ if storage is None:
+ storage = []
+
+ if shared is None or shared == 'False' or shared == False:
+ shared = False
+ else:
+ shared = True
+
+ # check if image is ready before starting any work
+ if image_name is None or image_name == '':
+ image_name = FLAGS.vc_image_name
+ try:
+ image_service = self.compute_api.image_service
+ vc_image = image_service.show_by_name(context, image_name)
+ vc_image_href = vc_image['id']
+ except exception.ImageNotFound:
+ raise exception.ApiError(_("Failed to find configured image %s"),
+ image_name)
+
+ options = {
+ 'display_name': display_name,
+ 'display_description': display_description,
+ 'project_id': context.project_id,
+ 'availability_zone': availability_zone,
+ 'instance_type_id': instance_type['id'],
+ 'image_ref': vc_image_href,
+ 'vc_count': vc_count,
+ 'status': FLAGS.vsa_status_creating,
+ }
+ LOG.info(_("Creating VSA: %s") % options)
+
+ # create DB entry for VSA instance
+ try:
+ vsa_ref = self.db.vsa_create(context, options)
+ except exception.Error:
+ raise exception.ApiError(_(sys.exc_info()[1]))
+ vsa_id = vsa_ref['id']
+ vsa_name = vsa_ref['name']
+
+ # check storage parameters
+ try:
+ volume_params = self._check_storage_parameters(context, vsa_name,
+ storage, shared)
+ except exception.ApiError:
+ self.update_vsa_status(context, vsa_id,
+ status=FLAGS.vsa_status_failed)
+ raise
+
+ # after creating DB entry, re-check and set some defaults
+ updates = {}
+ if (not hasattr(vsa_ref, 'display_name') or
+ vsa_ref.display_name is None or
+ vsa_ref.display_name == ''):
+ updates['display_name'] = display_name = vsa_name
+ updates['vol_count'] = len(volume_params)
+ vsa_ref = self.update(context, vsa_id, **updates)
+
+ # create volumes
+ if FLAGS.vsa_multi_vol_creation:
+ if len(volume_params) > 0:
+ #filter_class = 'nova.scheduler.vsa.InstanceTypeFilter'
+ request_spec = {
+ 'num_volumes': len(volume_params),
+ 'vsa_id': vsa_id,
+ 'volumes': volume_params,
+ #'filter': filter_class,
+ }
+
+ rpc.cast(context,
+ FLAGS.scheduler_topic,
+ {"method": "create_volumes",
+ "args": {"topic": FLAGS.volume_topic,
+ "request_spec": request_spec,
+ "availability_zone": availability_zone}})
+ else:
+ # create BE volumes one-by-one
+ for vol in volume_params:
+ try:
+ vol_name = vol['name']
+ vol_size = vol['size']
+ LOG.debug(_("VSA ID %(vsa_id)d %(vsa_name)s: Create "\
+ "volume %(vol_name)s, %(vol_size)d GB"),
+ locals())
+
+ vol_ref = self.volume_api.create(context,
+ vol_size,
+ vol['snapshot_id'],
+ vol_name,
+ vol['description'],
+ to_vsa_id=vsa_id,
+ drive_type_id=vol['drive_ref'].get('id'),
+ availability_zone=availability_zone)
+ except:
+ self.update_vsa_status(context, vsa_id,
+ status=FLAGS.vsa_status_partial)
+ raise
+
+ if len(volume_params) == 0:
+ # No BE volumes - ask VSA manager to start VCs
+ rpc.cast(context,
+ FLAGS.vsa_topic,
+ {"method": "create_vsa",
+ "args": {"vsa_id": vsa_id}})
+
+ return vsa_ref
+
+ def update_vsa_status(self, context, vsa_id, status):
+ updates = dict(status=status)
+ LOG.info(_("VSA ID %(vsa_id)d: Update VSA status to %(status)s"),
+ locals())
+ return self.update(context, vsa_id, **updates)
+
+ def update(self, context, vsa_id, **kwargs):
+ """Updates the VSA instance in the datastore.
+
+ :param context: The security context
+ :param vsa_id: ID of the VSA instance to update
+ :param kwargs: All additional keyword args are treated
+ as data fields of the instance to be
+ updated
+
+ :returns: None
+ """
+ LOG.info(_("VSA ID %(vsa_id)d: Update VSA call"), locals())
+
+ vc_count = kwargs.get('vc_count', None)
+ if vc_count is not None:
+ # VP-TODO: This request may want to update number of VCs
+ # Get number of current VCs and add/delete VCs appropriately
+ vsa = self.get(context, vsa_id)
+ vc_count = int(vc_count)
+ if vsa['vc_count'] != vc_count:
+ self.update_num_vcs(context, vsa, vc_count)
+
+ return self.db.vsa_update(context, vsa_id, kwargs)
+
+ def update_num_vcs(self, context, vsa, vc_count):
+ if vc_count > FLAGS.max_vcs_in_vsa:
+ LOG.warning(_("Requested number of VCs (%d) is too high."\
+ " Setting to default"), vc_count)
+ vc_count = FLAGS.max_vcs_in_vsa
+
+ old_vc_count = vsa['vc_count']
+ if vc_count > old_vc_count:
+ LOG.debug(_("Adding %d VCs to VSA %s."),
+ (vc_count - old_vc_count, vsa['name']))
+ # VP-TODO: actual code for adding new VCs
+
+ elif vc_count < old_vc_count:
+ LOG.debug(_("Deleting %d VCs from VSA %s."),
+ (old_vc_count - vc_count, vsa['name']))
+ # VP-TODO: actual code for deleting extra VCs
+
+ def _force_volume_delete(self, ctxt, volume):
+ """Delete a volume, bypassing the check that it must be available."""
+ host = volume['host']
+
+ if not host:
+ # Volume not yet assigned to host
+ # Deleting volume from database and skipping rpc.
+ self.db.volume_destroy(ctxt, volume['id'])
+ return
+
+ rpc.cast(ctxt,
+ self.db.queue_get_for(ctxt, FLAGS.volume_topic, host),
+ {"method": "delete_volume",
+ "args": {"volume_id": volume['id']}})
+
+ def delete_be_volumes(self, context, vsa_id, force_delete=True):
+
+ be_volumes = self.db.volume_get_all_assigned_to_vsa(context, vsa_id)
+ for volume in be_volumes:
+ try:
+ vol_name = volume['name']
+ LOG.info(_("VSA ID %(vsa_id)s: Deleting BE volume "\
+ "%(vol_name)s"), locals())
+ self.volume_api.delete(context, volume['id'])
+ except exception.ApiError:
+ LOG.info(_("Unable to delete volume %s"), volume['name'])
+ if force_delete:
+ LOG.info(_("VSA ID %(vsa_id)s: Forced delete. BE volume "\
+ "%(vol_name)s"), locals())
+ self._force_volume_delete(context, volume)
+
+ def delete(self, context, vsa_id):
+ """Terminate a VSA instance."""
+ LOG.info(_("Going to try to terminate VSA ID %s"), vsa_id)
+
+ # allow deletion of volumes in "abnormal" state
+
+ # Delete all FE volumes
+ fe_volumes = self.db.volume_get_all_assigned_from_vsa(context, vsa_id)
+ for volume in fe_volumes:
+ try:
+ vol_name = volume['name']
+ LOG.info(_("VSA ID %(vsa_id)s: Deleting FE volume "\
+ "%(vol_name)s"), locals())
+ self.volume_api.delete(context, volume['id'])
+ except exception.ApiError:
+ LOG.info(_("Unable to delete volume %s"), volume['name'])
+
+ # Delete all BE volumes
+ self.delete_be_volumes(context, vsa_id, force_delete=True)
+
+ # Delete all VC instances
+ instances = self.db.instance_get_all_by_vsa(context, vsa_id)
+ for instance in instances:
+ name = instance['name']
+ LOG.debug(_("VSA ID %(vsa_id)s: Delete instance %(name)s"),
+ locals())
+ self.compute_api.delete(context, instance['id'])
+
+ # Delete VSA instance
+ self.db.vsa_destroy(context, vsa_id)
+
+ def get(self, context, vsa_id):
+ rv = self.db.vsa_get(context, vsa_id)
+ return rv
+
+ def get_all(self, context):
+ if context.is_admin:
+ return self.db.vsa_get_all(context)
+ return self.db.vsa_get_all_by_project(context, context.project_id)
+
+ def generate_user_data(self, context, vsa, volumes):
+ e_vsa = Element("vsa")
+
+ e_vsa_detail = SubElement(e_vsa, "id")
+ e_vsa_detail.text = str(vsa['id'])
+ e_vsa_detail = SubElement(e_vsa, "name")
+ e_vsa_detail.text = vsa['display_name']
+ e_vsa_detail = SubElement(e_vsa, "description")
+ e_vsa_detail.text = vsa['display_description']
+ e_vsa_detail = SubElement(e_vsa, "vc_count")
+ e_vsa_detail.text = str(vsa['vc_count'])
+
+ e_volumes = SubElement(e_vsa, "volumes")
+ for volume in volumes:
+
+ loc = volume['provider_location']
+ if loc is None:
+ ip = ''
+ iscsi_iqn = ''
+ iscsi_portal = ''
+ else:
+ (iscsi_target, _sep, iscsi_iqn) = loc.partition(" ")
+ (ip, iscsi_portal) = iscsi_target.split(":", 1)
+
+ e_vol = SubElement(e_volumes, "volume")
+ e_vol_detail = SubElement(e_vol, "id")
+ e_vol_detail.text = str(volume['id'])
+ e_vol_detail = SubElement(e_vol, "name")
+ e_vol_detail.text = volume['name']
+ e_vol_detail = SubElement(e_vol, "display_name")
+ e_vol_detail.text = volume['display_name']
+ e_vol_detail = SubElement(e_vol, "size_gb")
+ e_vol_detail.text = str(volume['size'])
+ e_vol_detail = SubElement(e_vol, "status")
+ e_vol_detail.text = volume['status']
+ e_vol_detail = SubElement(e_vol, "ip")
+ e_vol_detail.text = ip
+ e_vol_detail = SubElement(e_vol, "iscsi_iqn")
+ e_vol_detail.text = iscsi_iqn
+ e_vol_detail = SubElement(e_vol, "iscsi_portal")
+ e_vol_detail.text = iscsi_portal
+ e_vol_detail = SubElement(e_vol, "lun")
+ e_vol_detail.text = '0'
+ e_vol_detail = SubElement(e_vol, "sn_host")
+ e_vol_detail.text = volume['host']
+
+ _xml = ElementTree.tostring(e_vsa)
+ return base64.b64encode(_xml)
diff --git a/nova/vsa/connection.py b/nova/vsa/connection.py
new file mode 100644
index 000000000..6c61acee4
--- /dev/null
+++ b/nova/vsa/connection.py
@@ -0,0 +1,25 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright (c) 2011 Zadara Storage Inc.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""Abstraction of the underlying connection to VC."""
+
+from nova.vsa import fake
+
+
+def get_connection():
+ # Return an object that is able to talk to VCs
+ return fake.FakeVcConnection()
diff --git a/nova/vsa/fake.py b/nova/vsa/fake.py
new file mode 100644
index 000000000..308d21fec
--- /dev/null
+++ b/nova/vsa/fake.py
@@ -0,0 +1,22 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright (c) 2011 Zadara Storage Inc.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+
+class FakeVcConnection:
+
+ def init_host(self, host):
+ pass
diff --git a/nova/vsa/manager.py b/nova/vsa/manager.py
new file mode 100644
index 000000000..a9a9fa2e8
--- /dev/null
+++ b/nova/vsa/manager.py
@@ -0,0 +1,172 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright (c) 2011 Zadara Storage Inc.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""
+Handles all processes relating to Virtual Storage Arrays (VSA).
+
+**Related Flags**
+
+"""
+
+from nova import log as logging
+from nova import manager
+from nova import flags
+from nova import utils
+from nova import exception
+from nova import compute
+from nova import volume
+from nova import vsa
+from nova.compute import instance_types
+
+
+FLAGS = flags.FLAGS
+flags.DEFINE_string('vsa_driver', 'nova.vsa.connection.get_connection',
+ 'Driver to use for controlling VSAs')
+
+LOG = logging.getLogger('nova.vsa.manager')
+
+
+class VsaManager(manager.SchedulerDependentManager):
+ """Manages Virtual Storage Arrays (VSAs)."""
+
+ def __init__(self, vsa_driver=None, *args, **kwargs):
+ if not vsa_driver:
+ vsa_driver = FLAGS.vsa_driver
+ self.driver = utils.import_object(vsa_driver)
+ self.compute_manager = utils.import_object(FLAGS.compute_manager)
+
+ self.compute_api = compute.API()
+ self.volume_api = volume.API()
+ self.vsa_api = vsa.API()
+
+ super(VsaManager, self).__init__(*args, **kwargs)
+
+ def init_host(self):
+ self.driver.init_host(host=self.host)
+ super(VsaManager, self).init_host()
+
+ @exception.wrap_exception()
+ def create_vsa(self, context, vsa_id):
+ """Called by API if there were no BE volumes assigned"""
+ LOG.debug(_("Create call received for VSA %s"), vsa_id)
+
+ vsa_id = int(vsa_id) # just in case
+
+ try:
+ vsa = self.vsa_api.get(context, vsa_id)
+ except Exception as ex:
+ msg = _("Failed to find VSA %(vsa_id)d") % locals()
+ LOG.exception(msg)
+ return
+
+ return self._start_vcs(context, vsa)
+
+ @exception.wrap_exception()
+ def vsa_volume_created(self, context, vol_id, vsa_id, status):
+ """Callback for volume creations"""
+ LOG.debug(_("VSA ID %(vsa_id)s: Volume %(vol_id)s created. "\
+ "Status %(status)s"), locals())
+ vsa_id = int(vsa_id) # just in case
+
+ # Get all volumes for this VSA
+ # check if any of them still in creating phase
+ volumes = self.db.volume_get_all_assigned_to_vsa(context, vsa_id)
+ for volume in volumes:
+ if volume['status'] == 'creating':
+ vol_name = volume['name']
+ vol_disp_name = volume['display_name']
+ LOG.debug(_("Volume %(vol_name)s (%(vol_disp_name)s) still "\
+ "in creating phase - wait"), locals())
+ return
+
+ try:
+ vsa = self.vsa_api.get(context, vsa_id)
+ except Exception as ex:
+ msg = _("Failed to find VSA %(vsa_id)d") % locals()
+ LOG.exception(msg)
+ return
+
+ if len(volumes) != vsa['vol_count']:
+ LOG.debug(_("VSA ID %d: Not all volumes are created (%d of %d)"),
+ vsa_id, len(volumes), vsa['vol_count'])
+ return
+
+ # all volumes created (successfully or not)
+ return self._start_vcs(context, vsa, volumes)
+
+ def _start_vcs(self, context, vsa, volumes=[]):
+ """Start VCs for VSA """
+
+ vsa_id = vsa['id']
+ if vsa['status'] == FLAGS.vsa_status_creating:
+ self.vsa_api.update_vsa_status(context, vsa_id,
+ FLAGS.vsa_status_launching)
+ else:
+ return
+
+ # in _separate_ loop go over all volumes and mark as "attached"
+ has_failed_volumes = False
+ for volume in volumes:
+ vol_name = volume['name']
+ vol_disp_name = volume['display_name']
+ status = volume['status']
+ LOG.info(_("VSA ID %(vsa_id)d: Volume %(vol_name)s "\
+ "(%(vol_disp_name)s) is in %(status)s state"),
+ locals())
+ if status == 'available':
+ try:
+ # self.volume_api.update(context, volume['id'],
+ # dict(attach_status="attached"))
+ pass
+ except Exception as ex:
+ msg = _("Failed to update attach status for volume "
+ "%(vol_name)s. %(ex)s") % locals()
+ LOG.exception(msg)
+ else:
+ has_failed_volumes = True
+
+ if has_failed_volumes:
+ LOG.info(_("VSA ID %(vsa_id)d: Delete all BE volumes"), locals())
+ self.vsa_api.delete_be_volumes(context, vsa_id, force_delete=True)
+ self.vsa_api.update_vsa_status(context, vsa_id,
+ FLAGS.vsa_status_failed)
+ return
+
+ # create user-data record for VC
+ storage_data = self.vsa_api.generate_user_data(context, vsa, volumes)
+
+ instance_type = instance_types.get_instance_type(
+ vsa['instance_type_id'])
+
+ # now start the VC instance
+
+ vc_count = vsa['vc_count']
+ LOG.info(_("VSA ID %(vsa_id)d: Start %(vc_count)d instances"),
+ locals())
+ vc_instances = self.compute_api.create(context,
+ instance_type, # vsa['vsa_instance_type'],
+ vsa['image_ref'],
+ min_count=1,
+ max_count=vc_count,
+ display_name='vc-' + vsa['display_name'],
+ display_description='VC for VSA ' + vsa['display_name'],
+ availability_zone=vsa['availability_zone'],
+ user_data=storage_data,
+ vsa_id=vsa_id)
+
+ self.vsa_api.update_vsa_status(context, vsa_id,
+ FLAGS.vsa_status_created)
diff --git a/plugins/xenserver/xenapi/etc/xapi.d/plugins/agent b/plugins/xenserver/xenapi/etc/xapi.d/plugins/agent
index 68d7e7bff..68d7e7bff 100755..100644
--- a/plugins/xenserver/xenapi/etc/xapi.d/plugins/agent
+++ b/plugins/xenserver/xenapi/etc/xapi.d/plugins/agent
diff --git a/tools/clean-vlans b/tools/clean-vlans
index a26ad86ad..a26ad86ad 100755..100644
--- a/tools/clean-vlans
+++ b/tools/clean-vlans
diff --git a/tools/nova-debug b/tools/nova-debug
index 0a78af16a..0a78af16a 100755..100644
--- a/tools/nova-debug
+++ b/tools/nova-debug