summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--Authors3
-rwxr-xr-xbin/nova-api1
-rwxr-xr-xbin/nova-api-ec21
-rwxr-xr-xbin/nova-api-os1
-rwxr-xr-xbin/nova-compute1
-rwxr-xr-xbin/nova-manage530
-rwxr-xr-xbin/nova-network1
-rwxr-xr-xbin/nova-objectstore1
-rwxr-xr-xbin/nova-scheduler3
-rwxr-xr-xbin/nova-volume1
-rwxr-xr-xbin/nova-vsa49
-rw-r--r--etc/nova/api-paste.ini24
-rw-r--r--nova/api/auth.py1
-rw-r--r--nova/api/ec2/__init__.py21
-rw-r--r--nova/api/ec2/admin.py4
-rw-r--r--nova/api/openstack/auth.py52
-rw-r--r--nova/api/openstack/contrib/virtual_storage_arrays.py606
-rw-r--r--nova/api/openstack/contrib/volumes.py36
-rw-r--r--nova/api/openstack/contrib/volumetypes.py197
-rw-r--r--nova/api/openstack/create_instance_helper.py6
-rw-r--r--nova/api/openstack/servers.py29
-rw-r--r--nova/api/openstack/views/addresses.py23
-rw-r--r--nova/api/openstack/views/servers.py5
-rw-r--r--nova/api/openstack/wsgi.py6
-rw-r--r--nova/auth/manager.py16
-rw-r--r--nova/cloudpipe/pipelib.py9
-rw-r--r--nova/compute/api.py34
-rw-r--r--nova/compute/manager.py20
-rw-r--r--nova/db/api.py112
-rw-r--r--nova/db/sqlalchemy/api.py424
-rw-r--r--nova/db/sqlalchemy/migrate_repo/versions/016_make_quotas_key_and_value.py18
-rw-r--r--nova/db/sqlalchemy/migrate_repo/versions/036_change_flavor_id_in_migrations.py12
-rw-r--r--nova/db/sqlalchemy/migrate_repo/versions/041_add_config_drive_to_instances.py38
-rw-r--r--nova/db/sqlalchemy/migrate_repo/versions/042_add_volume_types_and_extradata.py115
-rw-r--r--nova/db/sqlalchemy/migrate_repo/versions/043_add_vsa_data.py75
-rw-r--r--nova/db/sqlalchemy/migration.py4
-rw-r--r--nova/db/sqlalchemy/models.py82
-rw-r--r--nova/exception.py39
-rw-r--r--nova/flags.py23
-rw-r--r--nova/ipv6/account_identifier.py7
-rw-r--r--nova/ipv6/rfc2462.py4
-rw-r--r--nova/log.py6
-rw-r--r--nova/network/manager.py14
-rw-r--r--nova/notifier/api.py27
-rw-r--r--nova/notifier/list_notifier.py68
-rw-r--r--nova/quota.py5
-rw-r--r--nova/scheduler/vsa.py535
-rw-r--r--nova/tests/api/openstack/contrib/test_vsa.py450
-rw-r--r--nova/tests/api/openstack/test_extensions.py2
-rw-r--r--nova/tests/api/openstack/test_server_actions.py147
-rw-r--r--nova/tests/api/openstack/test_servers.py296
-rw-r--r--nova/tests/api/openstack/test_volume_types.py171
-rw-r--r--nova/tests/api/openstack/test_volume_types_extra_specs.py181
-rw-r--r--nova/tests/integrated/integrated_helpers.py109
-rw-r--r--nova/tests/integrated/test_login.py39
-rw-r--r--nova/tests/integrated/test_servers.py5
-rw-r--r--nova/tests/integrated/test_volumes.py17
-rw-r--r--nova/tests/monkey_patch_example/__init__.py33
-rw-r--r--nova/tests/monkey_patch_example/example_a.py29
-rw-r--r--nova/tests/monkey_patch_example/example_b.py30
-rw-r--r--nova/tests/notifier/__init__.py16
-rw-r--r--nova/tests/notifier/test_list_notifier.py88
-rw-r--r--nova/tests/scheduler/test_vsa_scheduler.py641
-rw-r--r--nova/tests/test_auth.py1
-rw-r--r--nova/tests/test_compute.py20
-rw-r--r--nova/tests/test_instance_types.py68
-rw-r--r--nova/tests/test_ipv6.py38
-rw-r--r--nova/tests/test_notifier.py21
-rw-r--r--nova/tests/test_nova_manage.py154
-rw-r--r--nova/tests/test_test_utils.py41
-rw-r--r--nova/tests/test_utils.py45
-rw-r--r--nova/tests/test_versions.py61
-rw-r--r--nova/tests/test_virt_drivers.py489
-rw-r--r--nova/tests/test_volume_types.py207
-rw-r--r--nova/tests/test_volume_types_extra_specs.py132
-rw-r--r--nova/tests/test_vsa.py182
-rw-r--r--nova/tests/test_vsa_volumes.py136
-rw-r--r--nova/tests/utils.py68
-rw-r--r--nova/utils.py41
-rw-r--r--nova/virt/disk.py32
-rw-r--r--nova/virt/driver.py18
-rw-r--r--nova/virt/fake.py13
-rw-r--r--nova/virt/libvirt.xml.template11
-rw-r--r--nova/virt/libvirt/connection.py77
-rw-r--r--nova/virt/xenapi/vm_utils.py15
-rw-r--r--nova/virt/xenapi/vmops.py2
-rw-r--r--nova/volume/api.py94
-rw-r--r--nova/volume/driver.py280
-rw-r--r--nova/volume/manager.py78
-rw-r--r--nova/volume/volume_types.py166
-rw-r--r--nova/vsa/__init__.py18
-rw-r--r--nova/vsa/api.py411
-rw-r--r--nova/vsa/connection.py25
-rw-r--r--nova/vsa/fake.py22
-rw-r--r--nova/vsa/manager.py179
-rw-r--r--nova/vsa/utils.py80
-rw-r--r--po/ast.po4
-rw-r--r--po/cs.po4
-rw-r--r--po/da.po4
-rw-r--r--po/de.po22
-rw-r--r--po/en_AU.po4
-rw-r--r--po/en_GB.po25
-rw-r--r--po/es.po69
-rw-r--r--po/fr.po52
-rw-r--r--po/it.po87
-rw-r--r--po/ja.po50
-rw-r--r--po/pt_BR.po52
-rw-r--r--po/ru.po22
-rw-r--r--po/tl.po4
-rw-r--r--po/uk.po12
-rw-r--r--po/zh_CN.po169
-rw-r--r--po/zh_TW.po15
-rwxr-xr-xrun_tests.sh20
113 files changed, 8822 insertions, 561 deletions
diff --git a/Authors b/Authors
index 112791cfc..9896f6297 100644
--- a/Authors
+++ b/Authors
@@ -19,6 +19,7 @@ Chiradeep Vittal <chiradeep@cloud.com>
Chmouel Boudjnah <chmouel@chmouel.com>
Chris Behrens <cbehrens@codestud.com>
Christian Berendt <berendt@b1-systems.de>
+Christopher MacGown <chris@pistoncloud.com>
Chuck Short <zulcss@ubuntu.com>
Cory Wright <corywright@gmail.com>
Dan Prince <dan.prince@rackspace.com>
@@ -69,6 +70,7 @@ Koji Iida <iida.koji@lab.ntt.co.jp>
Lorin Hochstein <lorin@isi.edu>
Lvov Maxim <usrleon@gmail.com>
Mandell Degerness <mdegerne@gmail.com>
+Mark McLoughlin <markmc@redhat.com>
Mark Washenberger <mark.washenberger@rackspace.com>
Masanori Itoh <itoumsn@nttdata.co.jp>
Matt Dietz <matt.dietz@rackspace.com>
@@ -100,6 +102,7 @@ Scott Moser <smoser@ubuntu.com>
Soren Hansen <soren.hansen@rackspace.com>
Stephanie Reese <reese.sm@gmail.com>
Thierry Carrez <thierry@openstack.org>
+Tim Simpson <tim.simpson@rackspace.com>
Todd Willey <todd@ansolabs.com>
Trey Morris <trey.morris@rackspace.com>
Troy Toman <troy.toman@rackspace.com>
diff --git a/bin/nova-api b/bin/nova-api
index 38e2624d8..d8635978e 100755
--- a/bin/nova-api
+++ b/bin/nova-api
@@ -45,6 +45,7 @@ if __name__ == '__main__':
utils.default_flagfile()
flags.FLAGS(sys.argv)
logging.setup()
+ utils.monkey_patch()
servers = []
for api in flags.FLAGS.enabled_apis:
servers.append(service.WSGIService(api))
diff --git a/bin/nova-api-ec2 b/bin/nova-api-ec2
index df50f713d..9f82a69e4 100755
--- a/bin/nova-api-ec2
+++ b/bin/nova-api-ec2
@@ -41,6 +41,7 @@ if __name__ == '__main__':
utils.default_flagfile()
flags.FLAGS(sys.argv)
logging.setup()
+ utils.monkey_patch()
server = service.WSGIService('ec2')
service.serve(server)
service.wait()
diff --git a/bin/nova-api-os b/bin/nova-api-os
index 374e850ea..83a808987 100755
--- a/bin/nova-api-os
+++ b/bin/nova-api-os
@@ -41,6 +41,7 @@ if __name__ == '__main__':
utils.default_flagfile()
flags.FLAGS(sys.argv)
logging.setup()
+ utils.monkey_patch()
server = service.WSGIService('osapi')
service.serve(server)
service.wait()
diff --git a/bin/nova-compute b/bin/nova-compute
index 5239fae72..0c69a8129 100755
--- a/bin/nova-compute
+++ b/bin/nova-compute
@@ -43,6 +43,7 @@ if __name__ == '__main__':
utils.default_flagfile()
flags.FLAGS(sys.argv)
logging.setup()
+ utils.monkey_patch()
server = service.Service.create(binary='nova-compute')
service.serve(server)
service.wait()
diff --git a/bin/nova-manage b/bin/nova-manage
index 0c2cee3ce..a95890e36 100755
--- a/bin/nova-manage
+++ b/bin/nova-manage
@@ -53,6 +53,7 @@
CLI interface for nova management.
"""
+import ast
import gettext
import glob
import json
@@ -85,11 +86,13 @@ from nova import quota
from nova import rpc
from nova import utils
from nova import version
+from nova import vsa
from nova.api.ec2 import ec2utils
from nova.auth import manager
from nova.cloudpipe import pipelib
from nova.compute import instance_types
from nova.db import migration
+from nova.volume import volume_types
FLAGS = flags.FLAGS
flags.DECLARE('fixed_range', 'nova.network.manager')
@@ -134,7 +137,7 @@ class VpnCommands(object):
help='Project name')
def list(self, project=None):
"""Print a listing of the VPN data for one or all projects."""
-
+ print "WARNING: This method only works with deprecated auth"
print "%-12s\t" % 'project',
print "%-20s\t" % 'ip:port',
print "%-20s\t" % 'private_ip',
@@ -170,17 +173,22 @@ class VpnCommands(object):
def spawn(self):
"""Run all VPNs."""
+ print "WARNING: This method only works with deprecated auth"
for p in reversed(self.manager.get_projects()):
if not self._vpn_for(p.id):
print 'spawning %s' % p.id
- self.pipe.launch_vpn_instance(p.id)
+ self.pipe.launch_vpn_instance(p.id, p.project_manager_id)
time.sleep(10)
@args('--project', dest="project_id", metavar='<Project name>',
help='Project name')
- def run(self, project_id):
- """Start the VPN for a given project."""
- self.pipe.launch_vpn_instance(project_id)
+ @args('--user', dest="user_id", metavar='<user name>', help='User name')
+ def run(self, project_id, user_id):
+ """Start the VPN for a given project and user."""
+ if not user_id:
+ print "WARNING: This method only works with deprecated auth"
+ user_id = self.manager.get_project(project_id).project_manager_id
+ self.pipe.launch_vpn_instance(project_id, user_id)
@args('--project', dest="project_id", metavar='<Project name>',
help='Project name')
@@ -195,10 +203,6 @@ class VpnCommands(object):
"""
# TODO(tr3buchet): perhaps this shouldn't update all networks
# associated with a project in the future
- project = self.manager.get_project(project_id)
- if not project:
- print 'No project %s' % (project_id)
- return
admin_context = context.get_admin_context()
networks = db.project_get_networks(admin_context, project_id)
for network in networks:
@@ -825,6 +829,39 @@ class NetworkCommands(object):
uuid=None)
+ @args('--network', dest="fixed_range", metavar='<x.x.x.x/yy>',
+ help='Network to modify')
+ @args('--project', dest="project", metavar='<project name>',
+ help='Project name to associate')
+ @args('--host', dest="host", metavar='<host>',
+ help='Host to associate')
+ @args('--disassociate-project', action="store_true", dest='dis_project',
+ default=False, help='Disassociate Network from Project')
+ @args('--disassociate-host', action="store_true", dest='dis_host',
+ default=False, help='Disassociate Host from Project')
+ def modify(self, fixed_range, project=None, host=None,
+ dis_project=None, dis_host=None):
+ """Associate/Disassociate Network with Project and/or Host
+ arguments: network project host
+ leave any field blank to ignore it
+ """
+ admin_context = context.get_admin_context()
+ network = db.network_get_by_cidr(admin_context, fixed_range)
+ net = {}
+ #User can choose the following actions each for project and host.
+ #1) Associate (set not None value given by project/host parameter)
+ #2) Disassociate (set None by disassociate parameter)
+ #3) Keep unchanged (project/host key is not added to 'net')
+ if project:
+ net['project_id'] = project
+ elif dis_project:
+ net['project_id'] = None
+ if host:
+ net['host'] = host
+ elif dis_host:
+ net['host'] = None
+ db.network_update(admin_context, network['id'], net)
+
class VmCommands(object):
"""Class for mangaging VM instances."""
@@ -1091,6 +1128,477 @@ class VersionCommands(object):
self.list()
+class VsaCommands(object):
+ """Methods for dealing with VSAs"""
+
+ def __init__(self, *args, **kwargs):
+ self.manager = manager.AuthManager()
+ self.vsa_api = vsa.API()
+ self.context = context.get_admin_context()
+
+ self._format_str_vsa = "%(id)-5s %(vsa_id)-15s %(name)-25s "\
+ "%(type)-10s %(vcs)-6s %(drives)-9s %(stat)-10s "\
+ "%(az)-10s %(time)-10s"
+ self._format_str_volume = "\t%(id)-4s %(name)-15s %(size)-5s "\
+ "%(stat)-10s %(att)-20s %(time)s"
+ self._format_str_drive = "\t%(id)-4s %(name)-15s %(size)-5s "\
+ "%(stat)-10s %(host)-20s %(type)-4s %(tname)-10s %(time)s"
+ self._format_str_instance = "\t%(id)-4s %(name)-10s %(dname)-20s "\
+ "%(image)-12s %(type)-10s %(fl_ip)-15s %(fx_ip)-15s "\
+ "%(stat)-10s %(host)-15s %(time)s"
+
+ def _print_vsa_header(self):
+ print self._format_str_vsa %\
+ dict(id=_('ID'),
+ vsa_id=_('vsa_id'),
+ name=_('displayName'),
+ type=_('vc_type'),
+ vcs=_('vc_cnt'),
+ drives=_('drive_cnt'),
+ stat=_('status'),
+ az=_('AZ'),
+ time=_('createTime'))
+
+ def _print_vsa(self, vsa):
+ print self._format_str_vsa %\
+ dict(id=vsa['id'],
+ vsa_id=vsa['name'],
+ name=vsa['display_name'],
+ type=vsa['vsa_instance_type'].get('name', None),
+ vcs=vsa['vc_count'],
+ drives=vsa['vol_count'],
+ stat=vsa['status'],
+ az=vsa['availability_zone'],
+ time=str(vsa['created_at']))
+
+ def _print_volume_header(self):
+ print _(' === Volumes ===')
+ print self._format_str_volume %\
+ dict(id=_('ID'),
+ name=_('name'),
+ size=_('size'),
+ stat=_('status'),
+ att=_('attachment'),
+ time=_('createTime'))
+
+ def _print_volume(self, vol):
+ print self._format_str_volume %\
+ dict(id=vol['id'],
+ name=vol['display_name'] or vol['name'],
+ size=vol['size'],
+ stat=vol['status'],
+ att=vol['attach_status'],
+ time=str(vol['created_at']))
+
+ def _print_drive_header(self):
+ print _(' === Drives ===')
+ print self._format_str_drive %\
+ dict(id=_('ID'),
+ name=_('name'),
+ size=_('size'),
+ stat=_('status'),
+ host=_('host'),
+ type=_('type'),
+ tname=_('typeName'),
+ time=_('createTime'))
+
+ def _print_drive(self, drive):
+ if drive['volume_type_id'] is not None and drive.get('volume_type'):
+ drive_type_name = drive['volume_type'].get('name')
+ else:
+ drive_type_name = ''
+
+ print self._format_str_drive %\
+ dict(id=drive['id'],
+ name=drive['display_name'],
+ size=drive['size'],
+ stat=drive['status'],
+ host=drive['host'],
+ type=drive['volume_type_id'],
+ tname=drive_type_name,
+ time=str(drive['created_at']))
+
+ def _print_instance_header(self):
+ print _(' === Instances ===')
+ print self._format_str_instance %\
+ dict(id=_('ID'),
+ name=_('name'),
+ dname=_('disp_name'),
+ image=_('image'),
+ type=_('type'),
+ fl_ip=_('floating_IP'),
+ fx_ip=_('fixed_IP'),
+ stat=_('status'),
+ host=_('host'),
+ time=_('createTime'))
+
+ def _print_instance(self, vc):
+
+ fixed_addr = None
+ floating_addr = None
+ if vc['fixed_ips']:
+ fixed = vc['fixed_ips'][0]
+ fixed_addr = fixed['address']
+ if fixed['floating_ips']:
+ floating_addr = fixed['floating_ips'][0]['address']
+ floating_addr = floating_addr or fixed_addr
+
+ print self._format_str_instance %\
+ dict(id=vc['id'],
+ name=ec2utils.id_to_ec2_id(vc['id']),
+ dname=vc['display_name'],
+ image=('ami-%08x' % int(vc['image_ref'])),
+ type=vc['instance_type']['name'],
+ fl_ip=floating_addr,
+ fx_ip=fixed_addr,
+ stat=vc['state_description'],
+ host=vc['host'],
+ time=str(vc['created_at']))
+
+ def _list(self, context, vsas, print_drives=False,
+ print_volumes=False, print_instances=False):
+ if vsas:
+ self._print_vsa_header()
+
+ for vsa in vsas:
+ self._print_vsa(vsa)
+ vsa_id = vsa.get('id')
+
+ if print_instances:
+ instances = self.vsa_api.get_all_vsa_instances(context, vsa_id)
+ if instances:
+ print
+ self._print_instance_header()
+ for instance in instances:
+ self._print_instance(instance)
+ print
+
+ if print_drives:
+ drives = self.vsa_api.get_all_vsa_drives(context, vsa_id)
+ if drives:
+ self._print_drive_header()
+ for drive in drives:
+ self._print_drive(drive)
+ print
+
+ if print_volumes:
+ volumes = self.vsa_api.get_all_vsa_volumes(context, vsa_id)
+ if volumes:
+ self._print_volume_header()
+ for volume in volumes:
+ self._print_volume(volume)
+ print
+
+ @args('--storage', dest='storage',
+ metavar="[{'drive_name': 'type', 'num_drives': N, 'size': M},..]",
+ help='Initial storage allocation for VSA')
+ @args('--name', dest='name', metavar="<name>", help='VSA name')
+ @args('--description', dest='description', metavar="<description>",
+ help='VSA description')
+ @args('--vc', dest='vc_count', metavar="<number>", help='Number of VCs')
+ @args('--instance_type', dest='instance_type_name', metavar="<name>",
+ help='Instance type name')
+ @args('--image', dest='image_name', metavar="<name>", help='Image name')
+ @args('--shared', dest='shared', action="store_true", default=False,
+ help='Use shared drives')
+ @args('--az', dest='az', metavar="<zone:host>", help='Availability zone')
+ @args('--user', dest="user_id", metavar='<User name>',
+ help='User name')
+ @args('--project', dest="project_id", metavar='<Project name>',
+ help='Project name')
+ def create(self, storage='[]', name=None, description=None, vc_count=1,
+ instance_type_name=None, image_name=None, shared=None,
+ az=None, user_id=None, project_id=None):
+ """Create a VSA."""
+
+ if project_id is None:
+ try:
+ project_id = os.getenv("EC2_ACCESS_KEY").split(':')[1]
+ except Exception as exc:
+ print _("Failed to retrieve project id: %(exc)s") % exc
+ raise
+
+ if user_id is None:
+ try:
+ project = self.manager.get_project(project_id)
+ user_id = project.project_manager_id
+ except Exception as exc:
+ print _("Failed to retrieve user info: %(exc)s") % exc
+ raise
+
+ is_admin = self.manager.is_admin(user_id)
+ ctxt = context.RequestContext(user_id, project_id, is_admin)
+ if not is_admin and \
+ not self.manager.is_project_member(user_id, project_id):
+ msg = _("%(user_id)s must be an admin or a "
+ "member of %(project_id)s")
+ LOG.warn(msg % locals())
+ raise ValueError(msg % locals())
+
+ # Sanity check for storage string
+ storage_list = []
+ if storage is not None:
+ try:
+ storage_list = ast.literal_eval(storage)
+ except:
+ print _("Invalid string format %s") % storage
+ raise
+
+ for node in storage_list:
+ if ('drive_name' not in node) or ('num_drives' not in node):
+ print (_("Invalid string format for element %s. " \
+ "Expecting keys 'drive_name' & 'num_drives'"),
+ str(node))
+ raise KeyError
+
+ if instance_type_name == '':
+ instance_type_name = None
+ instance_type = instance_types.get_instance_type_by_name(
+ instance_type_name)
+
+ if image_name == '':
+ image_name = None
+
+ if shared in [None, False, "--full_drives"]:
+ shared = False
+ elif shared in [True, "--shared"]:
+ shared = True
+ else:
+ raise ValueError(_('Shared parameter should be set either to "\
+ "--shared or --full_drives'))
+
+ values = {
+ 'display_name': name,
+ 'display_description': description,
+ 'vc_count': int(vc_count),
+ 'instance_type': instance_type,
+ 'image_name': image_name,
+ 'availability_zone': az,
+ 'storage': storage_list,
+ 'shared': shared,
+ }
+
+ result = self.vsa_api.create(ctxt, **values)
+ self._list(ctxt, [result])
+
+ @args('--id', dest='vsa_id', metavar="<vsa_id>", help='VSA ID')
+ @args('--name', dest='name', metavar="<name>", help='VSA name')
+ @args('--description', dest='description', metavar="<description>",
+ help='VSA description')
+ @args('--vc', dest='vc_count', metavar="<number>", help='Number of VCs')
+ def update(self, vsa_id, name=None, description=None, vc_count=None):
+ """Updates name/description of vsa and number of VCs."""
+
+ values = {}
+ if name is not None:
+ values['display_name'] = name
+ if description is not None:
+ values['display_description'] = description
+ if vc_count is not None:
+ values['vc_count'] = int(vc_count)
+
+ vsa_id = ec2utils.ec2_id_to_id(vsa_id)
+ result = self.vsa_api.update(self.context, vsa_id=vsa_id, **values)
+ self._list(self.context, [result])
+
+ @args('--id', dest='vsa_id', metavar="<vsa_id>", help='VSA ID')
+ def delete(self, vsa_id):
+ """Delete a VSA."""
+ vsa_id = ec2utils.ec2_id_to_id(vsa_id)
+ self.vsa_api.delete(self.context, vsa_id)
+
+ @args('--id', dest='vsa_id', metavar="<vsa_id>",
+ help='VSA ID (optional)')
+ @args('--all', dest='all', action="store_true", default=False,
+ help='Show all available details')
+ @args('--drives', dest='drives', action="store_true",
+ help='Include drive-level details')
+ @args('--volumes', dest='volumes', action="store_true",
+ help='Include volume-level details')
+ @args('--instances', dest='instances', action="store_true",
+ help='Include instance-level details')
+ def list(self, vsa_id=None, all=False,
+ drives=False, volumes=False, instances=False):
+ """Describe all available VSAs (or particular one)."""
+
+ vsas = []
+ if vsa_id is not None:
+ internal_id = ec2utils.ec2_id_to_id(vsa_id)
+ vsa = self.vsa_api.get(self.context, internal_id)
+ vsas.append(vsa)
+ else:
+ vsas = self.vsa_api.get_all(self.context)
+
+ if all:
+ drives = volumes = instances = True
+
+ self._list(self.context, vsas, drives, volumes, instances)
+
+ def update_capabilities(self):
+ """Forces updates capabilities on all nova-volume nodes."""
+
+ rpc.fanout_cast(context.get_admin_context(),
+ FLAGS.volume_topic,
+ {"method": "notification",
+ "args": {"event": "startup"}})
+
+
+class VsaDriveTypeCommands(object):
+ """Methods for dealing with VSA drive types"""
+
+ def __init__(self, *args, **kwargs):
+ super(VsaDriveTypeCommands, self).__init__(*args, **kwargs)
+ self.context = context.get_admin_context()
+ self._drive_type_template = '%s_%sGB_%sRPM'
+
+ def _list(self, drives):
+ format_str = "%-5s %-30s %-10s %-10s %-10s %-20s %-10s %s"
+ if len(drives):
+ print format_str %\
+ (_('ID'),
+ _('name'),
+ _('type'),
+ _('size_gb'),
+ _('rpm'),
+ _('capabilities'),
+ _('visible'),
+ _('createTime'))
+
+ for name, vol_type in drives.iteritems():
+ drive = vol_type.get('extra_specs')
+ print format_str %\
+ (str(vol_type['id']),
+ drive['drive_name'],
+ drive['drive_type'],
+ drive['drive_size'],
+ drive['drive_rpm'],
+ drive.get('capabilities', ''),
+ str(drive.get('visible', '')),
+ str(vol_type['created_at']))
+
+ @args('--type', dest='type', metavar="<type>",
+ help='Drive type (SATA, SAS, SSD, etc.)')
+ @args('--size', dest='size_gb', metavar="<gb>", help='Drive size in GB')
+ @args('--rpm', dest='rpm', metavar="<rpm>", help='RPM')
+ @args('--capabilities', dest='capabilities', default=None,
+ metavar="<string>", help='Different capabilities')
+ @args('--hide', dest='hide', action="store_true", default=False,
+ help='Show or hide drive')
+ @args('--name', dest='name', metavar="<name>", help='Drive name')
+ def create(self, type, size_gb, rpm, capabilities=None,
+ hide=False, name=None):
+ """Create drive type."""
+
+ hide = True if hide in [True, "True", "--hide", "hide"] else False
+
+ if name is None:
+ name = self._drive_type_template % (type, size_gb, rpm)
+
+ extra_specs = {'type': 'vsa_drive',
+ 'drive_name': name,
+ 'drive_type': type,
+ 'drive_size': size_gb,
+ 'drive_rpm': rpm,
+ 'visible': True,
+ }
+ if hide:
+ extra_specs['visible'] = False
+
+ if capabilities is not None and capabilities != '':
+ extra_specs['capabilities'] = capabilities
+
+ volume_types.create(self.context, name, extra_specs)
+ result = volume_types.get_volume_type_by_name(self.context, name)
+ self._list({name: result})
+
+ @args('--name', dest='name', metavar="<name>", help='Drive name')
+ @args('--purge', action="store_true", dest='purge', default=False,
+ help='purge record from database')
+ def delete(self, name, purge):
+ """Marks instance types / flavors as deleted"""
+ try:
+ if purge:
+ volume_types.purge(self.context, name)
+ verb = "purged"
+ else:
+ volume_types.destroy(self.context, name)
+ verb = "deleted"
+ except exception.ApiError:
+ print "Valid volume type name is required"
+ sys.exit(1)
+ except exception.DBError, e:
+ print "DB Error: %s" % e
+ sys.exit(2)
+ except:
+ sys.exit(3)
+ else:
+ print "%s %s" % (name, verb)
+
+ @args('--all', dest='all', action="store_true", default=False,
+ help='Show all drives (including invisible)')
+ @args('--name', dest='name', metavar="<name>",
+ help='Show only specified drive')
+ def list(self, all=False, name=None):
+ """Describe all available VSA drive types (or particular one)."""
+
+ all = False if all in ["--all", False, "False"] else True
+
+ search_opts = {'extra_specs': {'type': 'vsa_drive'}}
+ if name is not None:
+ search_opts['extra_specs']['name'] = name
+
+ if all == False:
+ search_opts['extra_specs']['visible'] = '1'
+
+ drives = volume_types.get_all_types(self.context,
+ search_opts=search_opts)
+ self._list(drives)
+
+ @args('--name', dest='name', metavar="<name>", help='Drive name')
+ @args('--type', dest='type', metavar="<type>",
+ help='Drive type (SATA, SAS, SSD, etc.)')
+ @args('--size', dest='size_gb', metavar="<gb>", help='Drive size in GB')
+ @args('--rpm', dest='rpm', metavar="<rpm>", help='RPM')
+ @args('--capabilities', dest='capabilities', default=None,
+ metavar="<string>", help='Different capabilities')
+ @args('--visible', dest='visible',
+ metavar="<show|hide>", help='Show or hide drive')
+ def update(self, name, type=None, size_gb=None, rpm=None,
+ capabilities=None, visible=None):
+ """Update drive type."""
+
+ volume_type = volume_types.get_volume_type_by_name(self.context, name)
+
+ extra_specs = {'type': 'vsa_drive'}
+
+ if type:
+ extra_specs['drive_type'] = type
+
+ if size_gb:
+ extra_specs['drive_size'] = size_gb
+
+ if rpm:
+ extra_specs['drive_rpm'] = rpm
+
+ if capabilities:
+ extra_specs['capabilities'] = capabilities
+
+ if visible is not None:
+ if visible in ["show", True, "True"]:
+ extra_specs['visible'] = True
+ elif visible in ["hide", False, "False"]:
+ extra_specs['visible'] = False
+ else:
+ raise ValueError(_('visible parameter should be set to '\
+ 'show or hide'))
+
+ db.api.volume_type_extra_specs_update_or_create(self.context,
+ volume_type['id'],
+ extra_specs)
+ result = volume_types.get_volume_type_by_name(self.context, name)
+ self._list({name: result})
+
+
class VolumeCommands(object):
"""Methods for dealing with a cloud in an odd state"""
@@ -1477,6 +1985,7 @@ CATEGORIES = [
('agent', AgentBuildCommands),
('config', ConfigCommands),
('db', DbCommands),
+ ('drive', VsaDriveTypeCommands),
('fixed', FixedIpCommands),
('flavor', InstanceTypeCommands),
('floating', FloatingIpCommands),
@@ -1492,7 +2001,8 @@ CATEGORIES = [
('version', VersionCommands),
('vm', VmCommands),
('volume', VolumeCommands),
- ('vpn', VpnCommands)]
+ ('vpn', VpnCommands),
+ ('vsa', VsaCommands)]
def lazy_match(name, key_value_tuples):
diff --git a/bin/nova-network b/bin/nova-network
index 57759d30a..0f1482515 100755
--- a/bin/nova-network
+++ b/bin/nova-network
@@ -43,6 +43,7 @@ if __name__ == '__main__':
utils.default_flagfile()
flags.FLAGS(sys.argv)
logging.setup()
+ utils.monkey_patch()
server = service.Service.create(binary='nova-network')
service.serve(server)
service.wait()
diff --git a/bin/nova-objectstore b/bin/nova-objectstore
index c7a76e120..757301c24 100755
--- a/bin/nova-objectstore
+++ b/bin/nova-objectstore
@@ -49,6 +49,7 @@ if __name__ == '__main__':
utils.default_flagfile()
FLAGS(sys.argv)
logging.setup()
+ utils.monkey_patch()
router = s3server.S3Application(FLAGS.buckets_path)
server = wsgi.Server("S3 Objectstore",
router,
diff --git a/bin/nova-scheduler b/bin/nova-scheduler
index 2e168cbc6..c1033a304 100755
--- a/bin/nova-scheduler
+++ b/bin/nova-scheduler
@@ -22,6 +22,7 @@
import eventlet
eventlet.monkey_patch()
+import gettext
import os
import sys
@@ -33,6 +34,7 @@ possible_topdir = os.path.normpath(os.path.join(os.path.abspath(sys.argv[0]),
if os.path.exists(os.path.join(possible_topdir, 'nova', '__init__.py')):
sys.path.insert(0, possible_topdir)
+gettext.install('nova', unicode=1)
from nova import flags
from nova import log as logging
@@ -43,6 +45,7 @@ if __name__ == '__main__':
utils.default_flagfile()
flags.FLAGS(sys.argv)
logging.setup()
+ utils.monkey_patch()
server = service.Service.create(binary='nova-scheduler')
service.serve(server)
service.wait()
diff --git a/bin/nova-volume b/bin/nova-volume
index 5405aebbb..8caa0f44a 100755
--- a/bin/nova-volume
+++ b/bin/nova-volume
@@ -43,6 +43,7 @@ if __name__ == '__main__':
utils.default_flagfile()
flags.FLAGS(sys.argv)
logging.setup()
+ utils.monkey_patch()
server = service.Service.create(binary='nova-volume')
service.serve(server)
service.wait()
diff --git a/bin/nova-vsa b/bin/nova-vsa
new file mode 100755
index 000000000..2d6eee2c0
--- /dev/null
+++ b/bin/nova-vsa
@@ -0,0 +1,49 @@
+#!/usr/bin/env python
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright (c) 2011 Zadara Storage Inc.
+# Copyright (c) 2011 OpenStack LLC.
+#
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""Starter script for Nova VSA."""
+
+import eventlet
+eventlet.monkey_patch()
+
+import os
+import sys
+
+# If ../nova/__init__.py exists, add ../ to Python search path, so that
+# it will override what happens to be installed in /usr/(local/)lib/python...
+possible_topdir = os.path.normpath(os.path.join(os.path.abspath(sys.argv[0]),
+ os.pardir,
+ os.pardir))
+if os.path.exists(os.path.join(possible_topdir, 'nova', '__init__.py')):
+ sys.path.insert(0, possible_topdir)
+
+
+from nova import flags
+from nova import log as logging
+from nova import service
+from nova import utils
+
+if __name__ == '__main__':
+ utils.default_flagfile()
+ flags.FLAGS(sys.argv)
+ logging.setup()
+ utils.monkey_patch()
+ server = service.Service.create(binary='nova-vsa')
+ service.serve(server)
+ service.wait()
diff --git a/etc/nova/api-paste.ini b/etc/nova/api-paste.ini
index b540509a2..cd24efb13 100644
--- a/etc/nova/api-paste.ini
+++ b/etc/nova/api-paste.ini
@@ -19,12 +19,18 @@ use = egg:Paste#urlmap
/1.0: ec2metadata
[pipeline:ec2cloud]
-pipeline = logrequest authenticate cloudrequest authorizer ec2executor
+pipeline = logrequest ec2noauth cloudrequest authorizer ec2executor
+# NOTE(vish): use the following pipeline for deprecated auth
+#pipeline = logrequest authenticate cloudrequest authorizer ec2executor
# NOTE(vish): use the following pipeline for keystone
# pipeline = logrequest totoken authtoken keystonecontext cloudrequest authorizer ec2executor
[pipeline:ec2admin]
-pipeline = logrequest authenticate adminrequest authorizer ec2executor
+pipeline = logrequest ec2noauth adminrequest authorizer ec2executor
+# NOTE(vish): use the following pipeline for deprecated auth
+#pipeline = logrequest authenticate adminrequest authorizer ec2executor
+# NOTE(vish): use the following pipeline for keystone
+#pipeline = logrequest totoken authtoken keystonecontext adminrequest authorizer ec2executor
[pipeline:ec2metadata]
pipeline = logrequest ec2md
@@ -41,6 +47,9 @@ paste.filter_factory = nova.api.ec2:Lockout.factory
[filter:totoken]
paste.filter_factory = nova.api.ec2:ToToken.factory
+[filter:ec2noauth]
+paste.filter_factory = nova.api.ec2:NoAuth.factory
+
[filter:authenticate]
paste.filter_factory = nova.api.ec2:Authenticate.factory
@@ -75,12 +84,16 @@ use = egg:Paste#urlmap
/v1.1: openstackapi11
[pipeline:openstackapi10]
-pipeline = faultwrap auth ratelimit osapiapp10
+pipeline = faultwrap noauth ratelimit osapiapp10
+# NOTE(vish): use the following pipeline for deprecated auth
+# pipeline = faultwrap auth ratelimit osapiapp10
# NOTE(vish): use the following pipeline for keystone
#pipeline = faultwrap authtoken keystonecontext ratelimit osapiapp10
[pipeline:openstackapi11]
-pipeline = faultwrap auth ratelimit extensions osapiapp11
+pipeline = faultwrap noauth ratelimit extensions osapiapp11
+# NOTE(vish): use the following pipeline for deprecated auth
+# pipeline = faultwrap auth ratelimit extensions osapiapp11
# NOTE(vish): use the following pipeline for keystone
# pipeline = faultwrap authtoken keystonecontext ratelimit extensions osapiapp11
@@ -90,6 +103,9 @@ paste.filter_factory = nova.api.openstack:FaultWrapper.factory
[filter:auth]
paste.filter_factory = nova.api.openstack.auth:AuthMiddleware.factory
+[filter:noauth]
+paste.filter_factory = nova.api.openstack.auth:NoAuthMiddleware.factory
+
[filter:ratelimit]
paste.filter_factory = nova.api.openstack.limits:RateLimitingMiddleware.factory
diff --git a/nova/api/auth.py b/nova/api/auth.py
index cd3e3e8a0..cd0d38b3f 100644
--- a/nova/api/auth.py
+++ b/nova/api/auth.py
@@ -62,6 +62,7 @@ class KeystoneContext(wsgi.Middleware):
req.headers.get('X_STORAGE_TOKEN'))
# Build a context, including the auth_token...
+ remote_address = getattr(req, 'remote_address', '127.0.0.1')
remote_address = req.remote_addr
if FLAGS.use_forwarded_for:
remote_address = req.headers.get('X-Forwarded-For', remote_address)
diff --git a/nova/api/ec2/__init__.py b/nova/api/ec2/__init__.py
index 17969099d..5430f443d 100644
--- a/nova/api/ec2/__init__.py
+++ b/nova/api/ec2/__init__.py
@@ -183,6 +183,27 @@ class ToToken(wsgi.Middleware):
return self.application
+class NoAuth(wsgi.Middleware):
+ """Add user:project as 'nova.context' to WSGI environ."""
+
+ @webob.dec.wsgify(RequestClass=wsgi.Request)
+ def __call__(self, req):
+ if 'AWSAccessKeyId' not in req.params:
+ raise webob.exc.HTTPBadRequest()
+ user_id, _sep, project_id = req.params['AWSAccessKeyId'].partition(':')
+ project_id = project_id or user_id
+ remote_address = getattr(req, 'remote_address', '127.0.0.1')
+ if FLAGS.use_forwarded_for:
+ remote_address = req.headers.get('X-Forwarded-For', remote_address)
+ ctx = context.RequestContext(user_id,
+ project_id,
+ is_admin=True,
+ remote_address=remote_address)
+
+ req.environ['nova.context'] = ctx
+ return self.application
+
+
class Authenticate(wsgi.Middleware):
"""Authenticate an EC2 request and add 'nova.context' to WSGI environ."""
diff --git a/nova/api/ec2/admin.py b/nova/api/ec2/admin.py
index df7876b9d..dfbbc0a2b 100644
--- a/nova/api/ec2/admin.py
+++ b/nova/api/ec2/admin.py
@@ -283,8 +283,10 @@ class AdminController(object):
# NOTE(vish) import delayed because of __init__.py
from nova.cloudpipe import pipelib
pipe = pipelib.CloudPipe()
+ proj = manager.AuthManager().get_project(project)
+ user_id = proj.project_manager_id
try:
- pipe.launch_vpn_instance(project)
+ pipe.launch_vpn_instance(project, user_id)
except db.NoMoreNetworks:
raise exception.ApiError("Unable to claim IP for VPN instance"
", ensure it isn't running, and try "
diff --git a/nova/api/openstack/auth.py b/nova/api/openstack/auth.py
index b6ff1126b..6754fea27 100644
--- a/nova/api/openstack/auth.py
+++ b/nova/api/openstack/auth.py
@@ -33,6 +33,46 @@ from nova.api.openstack import faults
LOG = logging.getLogger('nova.api.openstack')
FLAGS = flags.FLAGS
+flags.DECLARE('use_forwarded_for', 'nova.api.auth')
+
+
+class NoAuthMiddleware(wsgi.Middleware):
+ """Return a fake token if one isn't specified."""
+
+ @webob.dec.wsgify(RequestClass=wsgi.Request)
+ def __call__(self, req):
+ if 'X-Auth-Token' not in req.headers:
+ os_url = req.url
+ version = common.get_version_from_href(os_url)
+ user_id = req.headers.get('X-Auth-User', 'admin')
+ project_id = req.headers.get('X-Auth-Project-Id', 'admin')
+ if version == '1.1':
+ os_url += '/' + project_id
+ res = webob.Response()
+ # NOTE(vish): This is expecting and returning Auth(1.1), whereas
+ # keystone uses 2.0 auth. We should probably allow
+ # 2.0 auth here as well.
+ res.headers['X-Auth-Token'] = '%s:%s' % (user_id, project_id)
+ res.headers['X-Server-Management-Url'] = os_url
+ res.headers['X-Storage-Url'] = ''
+ res.headers['X-CDN-Management-Url'] = ''
+ res.content_type = 'text/plain'
+ res.status = '204'
+ return res
+
+ token = req.headers['X-Auth-Token']
+ user_id, _sep, project_id = token.partition(':')
+ project_id = project_id or user_id
+ remote_address = getattr(req, 'remote_address', '127.0.0.1')
+ if FLAGS.use_forwarded_for:
+ remote_address = req.headers.get('X-Forwarded-For', remote_address)
+ ctx = context.RequestContext(user_id,
+ project_id,
+ is_admin=True,
+ remote_address=remote_address)
+
+ req.environ['nova.context'] = ctx
+ return self.application
class AuthMiddleware(wsgi.Middleware):
@@ -85,9 +125,15 @@ class AuthMiddleware(wsgi.Middleware):
project_id = projects[0].id
is_admin = self.auth.is_admin(user_id)
- req.environ['nova.context'] = context.RequestContext(user_id,
- project_id,
- is_admin)
+ remote_address = getattr(req, 'remote_address', '127.0.0.1')
+ if FLAGS.use_forwarded_for:
+ remote_address = req.headers.get('X-Forwarded-For', remote_address)
+ ctx = context.RequestContext(user_id,
+ project_id,
+ is_admin=is_admin,
+ remote_address=remote_address)
+ req.environ['nova.context'] = ctx
+
if not is_admin and not self.auth.is_project_member(user_id,
project_id):
msg = _("%(user_id)s must be an admin or a "
diff --git a/nova/api/openstack/contrib/virtual_storage_arrays.py b/nova/api/openstack/contrib/virtual_storage_arrays.py
new file mode 100644
index 000000000..e09736a28
--- /dev/null
+++ b/nova/api/openstack/contrib/virtual_storage_arrays.py
@@ -0,0 +1,606 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright (c) 2011 Zadara Storage Inc.
+# Copyright (c) 2011 OpenStack LLC.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+""" The virtul storage array extension"""
+
+
+from webob import exc
+
+from nova import vsa
+from nova import volume
+from nova import compute
+from nova import network
+from nova import db
+from nova import quota
+from nova import exception
+from nova import log as logging
+from nova.api.openstack import common
+from nova.api.openstack import extensions
+from nova.api.openstack import faults
+from nova.api.openstack import wsgi
+from nova.api.openstack import servers
+from nova.api.openstack.contrib import volumes
+from nova.compute import instance_types
+
+from nova import flags
+FLAGS = flags.FLAGS
+
+LOG = logging.getLogger("nova.api.vsa")
+
+
+def _vsa_view(context, vsa, details=False, instances=None):
+ """Map keys for vsa summary/detailed view."""
+ d = {}
+
+ d['id'] = vsa.get('id')
+ d['name'] = vsa.get('name')
+ d['displayName'] = vsa.get('display_name')
+ d['displayDescription'] = vsa.get('display_description')
+
+ d['createTime'] = vsa.get('created_at')
+ d['status'] = vsa.get('status')
+
+ if 'vsa_instance_type' in vsa:
+ d['vcType'] = vsa['vsa_instance_type'].get('name', None)
+ else:
+ d['vcType'] = vsa['instance_type_id']
+
+ d['vcCount'] = vsa.get('vc_count')
+ d['driveCount'] = vsa.get('vol_count')
+
+ d['ipAddress'] = None
+ for instance in instances:
+ fixed_addr = None
+ floating_addr = None
+ if instance['fixed_ips']:
+ fixed = instance['fixed_ips'][0]
+ fixed_addr = fixed['address']
+ if fixed['floating_ips']:
+ floating_addr = fixed['floating_ips'][0]['address']
+
+ if floating_addr:
+ d['ipAddress'] = floating_addr
+ break
+ else:
+ d['ipAddress'] = d['ipAddress'] or fixed_addr
+
+ return d
+
+
+class VsaController(object):
+ """The Virtual Storage Array API controller for the OpenStack API."""
+
+ _serialization_metadata = {
+ 'application/xml': {
+ "attributes": {
+ "vsa": [
+ "id",
+ "name",
+ "displayName",
+ "displayDescription",
+ "createTime",
+ "status",
+ "vcType",
+ "vcCount",
+ "driveCount",
+ "ipAddress",
+ ]}}}
+
+ def __init__(self):
+ self.vsa_api = vsa.API()
+ self.compute_api = compute.API()
+ self.network_api = network.API()
+ super(VsaController, self).__init__()
+
+ def _get_instances_by_vsa_id(self, context, id):
+ return self.compute_api.get_all(context,
+ search_opts={'metadata': dict(vsa_id=str(id))})
+
+ def _items(self, req, details):
+ """Return summary or detailed list of VSAs."""
+ context = req.environ['nova.context']
+ vsas = self.vsa_api.get_all(context)
+ limited_list = common.limited(vsas, req)
+
+ vsa_list = []
+ for vsa in limited_list:
+ instances = self._get_instances_by_vsa_id(context, vsa.get('id'))
+ vsa_list.append(_vsa_view(context, vsa, details, instances))
+ return {'vsaSet': vsa_list}
+
+ def index(self, req):
+ """Return a short list of VSAs."""
+ return self._items(req, details=False)
+
+ def detail(self, req):
+ """Return a detailed list of VSAs."""
+ return self._items(req, details=True)
+
+ def show(self, req, id):
+ """Return data about the given VSA."""
+ context = req.environ['nova.context']
+
+ try:
+ vsa = self.vsa_api.get(context, vsa_id=id)
+ except exception.NotFound:
+ return faults.Fault(exc.HTTPNotFound())
+
+ instances = self._get_instances_by_vsa_id(context, vsa.get('id'))
+ return {'vsa': _vsa_view(context, vsa, True, instances)}
+
+ def create(self, req, body):
+ """Create a new VSA."""
+ context = req.environ['nova.context']
+
+ if not body or 'vsa' not in body:
+ LOG.debug(_("No body provided"), context=context)
+ return faults.Fault(exc.HTTPUnprocessableEntity())
+
+ vsa = body['vsa']
+
+ display_name = vsa.get('displayName')
+ vc_type = vsa.get('vcType', FLAGS.default_vsa_instance_type)
+ try:
+ instance_type = instance_types.get_instance_type_by_name(vc_type)
+ except exception.NotFound:
+ return faults.Fault(exc.HTTPNotFound())
+
+ LOG.audit(_("Create VSA %(display_name)s of type %(vc_type)s"),
+ locals(), context=context)
+
+ args = dict(display_name=display_name,
+ display_description=vsa.get('displayDescription'),
+ instance_type=instance_type,
+ storage=vsa.get('storage'),
+ shared=vsa.get('shared'),
+ availability_zone=vsa.get('placement', {}).\
+ get('AvailabilityZone'))
+
+ vsa = self.vsa_api.create(context, **args)
+
+ instances = self._get_instances_by_vsa_id(context, vsa.get('id'))
+ return {'vsa': _vsa_view(context, vsa, True, instances)}
+
+ def delete(self, req, id):
+ """Delete a VSA."""
+ context = req.environ['nova.context']
+
+ LOG.audit(_("Delete VSA with id: %s"), id, context=context)
+
+ try:
+ self.vsa_api.delete(context, vsa_id=id)
+ except exception.NotFound:
+ return faults.Fault(exc.HTTPNotFound())
+
+ def associate_address(self, req, id, body):
+ """ /zadr-vsa/{vsa_id}/associate_address
+ auto or manually associate an IP to VSA
+ """
+ context = req.environ['nova.context']
+
+ if body is None:
+ ip = 'auto'
+ else:
+ ip = body.get('ipAddress', 'auto')
+
+ LOG.audit(_("Associate address %(ip)s to VSA %(id)s"),
+ locals(), context=context)
+
+ try:
+ instances = self._get_instances_by_vsa_id(context, id)
+ if instances is None or len(instances) == 0:
+ return faults.Fault(exc.HTTPNotFound())
+
+ for instance in instances:
+ self.network_api.allocate_for_instance(context, instance,
+ vpn=False)
+ # Placeholder
+ return
+
+ except exception.NotFound:
+ return faults.Fault(exc.HTTPNotFound())
+
+ def disassociate_address(self, req, id, body):
+ """ /zadr-vsa/{vsa_id}/disassociate_address
+ auto or manually associate an IP to VSA
+ """
+ context = req.environ['nova.context']
+
+ if body is None:
+ ip = 'auto'
+ else:
+ ip = body.get('ipAddress', 'auto')
+
+ LOG.audit(_("Disassociate address from VSA %(id)s"),
+ locals(), context=context)
+ # Placeholder
+
+
+class VsaVolumeDriveController(volumes.VolumeController):
+ """The base class for VSA volumes & drives.
+
+ A child resource of the VSA object. Allows operations with
+ volumes and drives created to/from particular VSA
+
+ """
+
+ _serialization_metadata = {
+ 'application/xml': {
+ "attributes": {
+ "volume": [
+ "id",
+ "name",
+ "status",
+ "size",
+ "availabilityZone",
+ "createdAt",
+ "displayName",
+ "displayDescription",
+ "vsaId",
+ ]}}}
+
+ def __init__(self):
+ self.volume_api = volume.API()
+ self.vsa_api = vsa.API()
+ super(VsaVolumeDriveController, self).__init__()
+
+ def _translation(self, context, vol, vsa_id, details):
+ if details:
+ translation = volumes._translate_volume_detail_view
+ else:
+ translation = volumes._translate_volume_summary_view
+
+ d = translation(context, vol)
+ d['vsaId'] = vsa_id
+ d['name'] = vol['name']
+ return d
+
+ def _check_volume_ownership(self, context, vsa_id, id):
+ obj = self.object
+ try:
+ volume_ref = self.volume_api.get(context, volume_id=id)
+ except exception.NotFound:
+ LOG.error(_("%(obj)s with ID %(id)s not found"), locals())
+ raise
+
+ own_vsa_id = self.volume_api.get_volume_metadata_value(volume_ref,
+ self.direction)
+ if own_vsa_id != vsa_id:
+ LOG.error(_("%(obj)s with ID %(id)s belongs to VSA %(own_vsa_id)s"\
+ " and not to VSA %(vsa_id)s."), locals())
+ raise exception.Invalid()
+
+ def _items(self, req, vsa_id, details):
+ """Return summary or detailed list of volumes for particular VSA."""
+ context = req.environ['nova.context']
+
+ vols = self.volume_api.get_all(context,
+ search_opts={'metadata': {self.direction: str(vsa_id)}})
+ limited_list = common.limited(vols, req)
+
+ res = [self._translation(context, vol, vsa_id, details) \
+ for vol in limited_list]
+
+ return {self.objects: res}
+
+ def index(self, req, vsa_id):
+ """Return a short list of volumes created from particular VSA."""
+ LOG.audit(_("Index. vsa_id=%(vsa_id)s"), locals())
+ return self._items(req, vsa_id, details=False)
+
+ def detail(self, req, vsa_id):
+ """Return a detailed list of volumes created from particular VSA."""
+ LOG.audit(_("Detail. vsa_id=%(vsa_id)s"), locals())
+ return self._items(req, vsa_id, details=True)
+
+ def create(self, req, vsa_id, body):
+ """Create a new volume from VSA."""
+ LOG.audit(_("Create. vsa_id=%(vsa_id)s, body=%(body)s"), locals())
+ context = req.environ['nova.context']
+
+ if not body:
+ return faults.Fault(exc.HTTPUnprocessableEntity())
+
+ vol = body[self.object]
+ size = vol['size']
+ LOG.audit(_("Create volume of %(size)s GB from VSA ID %(vsa_id)s"),
+ locals(), context=context)
+ try:
+ # create is supported for volumes only (drives created through VSA)
+ volume_type = self.vsa_api.get_vsa_volume_type(context)
+ except exception.NotFound:
+ return faults.Fault(exc.HTTPNotFound())
+
+ new_volume = self.volume_api.create(context,
+ size,
+ None,
+ vol.get('displayName'),
+ vol.get('displayDescription'),
+ volume_type=volume_type,
+ metadata=dict(from_vsa_id=str(vsa_id)))
+
+ return {self.object: self._translation(context, new_volume,
+ vsa_id, True)}
+
+ def update(self, req, vsa_id, id, body):
+ """Update a volume."""
+ context = req.environ['nova.context']
+
+ try:
+ self._check_volume_ownership(context, vsa_id, id)
+ except exception.NotFound:
+ return faults.Fault(exc.HTTPNotFound())
+ except exception.Invalid:
+ return faults.Fault(exc.HTTPBadRequest())
+
+ vol = body[self.object]
+ updatable_fields = [{'displayName': 'display_name'},
+ {'displayDescription': 'display_description'},
+ {'status': 'status'},
+ {'providerLocation': 'provider_location'},
+ {'providerAuth': 'provider_auth'}]
+ changes = {}
+ for field in updatable_fields:
+ key = field.keys()[0]
+ val = field[key]
+ if key in vol:
+ changes[val] = vol[key]
+
+ obj = self.object
+ LOG.audit(_("Update %(obj)s with id: %(id)s, changes: %(changes)s"),
+ locals(), context=context)
+
+ try:
+ self.volume_api.update(context, volume_id=id, fields=changes)
+ except exception.NotFound:
+ return faults.Fault(exc.HTTPNotFound())
+ return exc.HTTPAccepted()
+
+ def delete(self, req, vsa_id, id):
+ """Delete a volume."""
+ context = req.environ['nova.context']
+
+ LOG.audit(_("Delete. vsa_id=%(vsa_id)s, id=%(id)s"), locals())
+
+ try:
+ self._check_volume_ownership(context, vsa_id, id)
+ except exception.NotFound:
+ return faults.Fault(exc.HTTPNotFound())
+ except exception.Invalid:
+ return faults.Fault(exc.HTTPBadRequest())
+
+ return super(VsaVolumeDriveController, self).delete(req, id)
+
+ def show(self, req, vsa_id, id):
+ """Return data about the given volume."""
+ context = req.environ['nova.context']
+
+ LOG.audit(_("Show. vsa_id=%(vsa_id)s, id=%(id)s"), locals())
+
+ try:
+ self._check_volume_ownership(context, vsa_id, id)
+ except exception.NotFound:
+ return faults.Fault(exc.HTTPNotFound())
+ except exception.Invalid:
+ return faults.Fault(exc.HTTPBadRequest())
+
+ return super(VsaVolumeDriveController, self).show(req, id)
+
+
+class VsaVolumeController(VsaVolumeDriveController):
+ """The VSA volume API controller for the Openstack API.
+
+ A child resource of the VSA object. Allows operations with volumes created
+ by particular VSA
+
+ """
+
+ def __init__(self):
+ self.direction = 'from_vsa_id'
+ self.objects = 'volumes'
+ self.object = 'volume'
+ super(VsaVolumeController, self).__init__()
+
+
+class VsaDriveController(VsaVolumeDriveController):
+ """The VSA Drive API controller for the Openstack API.
+
+ A child resource of the VSA object. Allows operations with drives created
+ for particular VSA
+
+ """
+
+ def __init__(self):
+ self.direction = 'to_vsa_id'
+ self.objects = 'drives'
+ self.object = 'drive'
+ super(VsaDriveController, self).__init__()
+
+ def create(self, req, vsa_id, body):
+ """Create a new drive for VSA. Should be done through VSA APIs"""
+ return faults.Fault(exc.HTTPBadRequest())
+
+ def update(self, req, vsa_id, id, body):
+ """Update a drive. Should be done through VSA APIs"""
+ return faults.Fault(exc.HTTPBadRequest())
+
+ def delete(self, req, vsa_id, id):
+ """Delete a volume. Should be done through VSA APIs"""
+ return faults.Fault(exc.HTTPBadRequest())
+
+
+class VsaVPoolController(object):
+ """The vPool VSA API controller for the OpenStack API."""
+
+ _serialization_metadata = {
+ 'application/xml': {
+ "attributes": {
+ "vpool": [
+ "id",
+ "vsaId",
+ "name",
+ "displayName",
+ "displayDescription",
+ "driveCount",
+ "driveIds",
+ "protection",
+ "stripeSize",
+ "stripeWidth",
+ "createTime",
+ "status",
+ ]}}}
+
+ def __init__(self):
+ self.vsa_api = vsa.API()
+ super(VsaVPoolController, self).__init__()
+
+ def index(self, req, vsa_id):
+ """Return a short list of vpools created from particular VSA."""
+ return {'vpools': []}
+
+ def create(self, req, vsa_id, body):
+ """Create a new vPool for VSA."""
+ return faults.Fault(exc.HTTPBadRequest())
+
+ def update(self, req, vsa_id, id, body):
+ """Update vPool parameters."""
+ return faults.Fault(exc.HTTPBadRequest())
+
+ def delete(self, req, vsa_id, id):
+ """Delete a vPool."""
+ return faults.Fault(exc.HTTPBadRequest())
+
+ def show(self, req, vsa_id, id):
+ """Return data about the given vPool."""
+ return faults.Fault(exc.HTTPBadRequest())
+
+
+class VsaVCController(servers.ControllerV11):
+ """The VSA Virtual Controller API controller for the OpenStack API."""
+
+ def __init__(self):
+ self.vsa_api = vsa.API()
+ self.compute_api = compute.API()
+ self.vsa_id = None # VP-TODO: temporary ugly hack
+ super(VsaVCController, self).__init__()
+
+ def _get_servers(self, req, is_detail):
+ """Returns a list of servers, taking into account any search
+ options specified.
+ """
+
+ if self.vsa_id is None:
+ super(VsaVCController, self)._get_servers(req, is_detail)
+
+ context = req.environ['nova.context']
+
+ search_opts = {'metadata': dict(vsa_id=str(self.vsa_id))}
+ instance_list = self.compute_api.get_all(
+ context, search_opts=search_opts)
+
+ limited_list = self._limit_items(instance_list, req)
+ servers = [self._build_view(req, inst, is_detail)['server']
+ for inst in limited_list]
+ return dict(servers=servers)
+
+ def index(self, req, vsa_id):
+ """Return list of instances for particular VSA."""
+
+ LOG.audit(_("Index instances for VSA %s"), vsa_id)
+
+ self.vsa_id = vsa_id # VP-TODO: temporary ugly hack
+ result = super(VsaVCController, self).detail(req)
+ self.vsa_id = None
+ return result
+
+ def create(self, req, vsa_id, body):
+ """Create a new instance for VSA."""
+ return faults.Fault(exc.HTTPBadRequest())
+
+ def update(self, req, vsa_id, id, body):
+ """Update VSA instance."""
+ return faults.Fault(exc.HTTPBadRequest())
+
+ def delete(self, req, vsa_id, id):
+ """Delete VSA instance."""
+ return faults.Fault(exc.HTTPBadRequest())
+
+ def show(self, req, vsa_id, id):
+ """Return data about the given instance."""
+ return super(VsaVCController, self).show(req, id)
+
+
+class Virtual_storage_arrays(extensions.ExtensionDescriptor):
+
+ def get_name(self):
+ return "VSAs"
+
+ def get_alias(self):
+ return "zadr-vsa"
+
+ def get_description(self):
+ return "Virtual Storage Arrays support"
+
+ def get_namespace(self):
+ return "http://docs.openstack.org/ext/vsa/api/v1.1"
+
+ def get_updated(self):
+ return "2011-08-25T00:00:00+00:00"
+
+ def get_resources(self):
+ resources = []
+ res = extensions.ResourceExtension(
+ 'zadr-vsa',
+ VsaController(),
+ collection_actions={'detail': 'GET'},
+ member_actions={'add_capacity': 'POST',
+ 'remove_capacity': 'POST',
+ 'associate_address': 'POST',
+ 'disassociate_address': 'POST'})
+ resources.append(res)
+
+ res = extensions.ResourceExtension('volumes',
+ VsaVolumeController(),
+ collection_actions={'detail': 'GET'},
+ parent=dict(
+ member_name='vsa',
+ collection_name='zadr-vsa'))
+ resources.append(res)
+
+ res = extensions.ResourceExtension('drives',
+ VsaDriveController(),
+ collection_actions={'detail': 'GET'},
+ parent=dict(
+ member_name='vsa',
+ collection_name='zadr-vsa'))
+ resources.append(res)
+
+ res = extensions.ResourceExtension('vpools',
+ VsaVPoolController(),
+ parent=dict(
+ member_name='vsa',
+ collection_name='zadr-vsa'))
+ resources.append(res)
+
+ res = extensions.ResourceExtension('instances',
+ VsaVCController(),
+ parent=dict(
+ member_name='vsa',
+ collection_name='zadr-vsa'))
+ resources.append(res)
+
+ return resources
diff --git a/nova/api/openstack/contrib/volumes.py b/nova/api/openstack/contrib/volumes.py
index 867fe301e..d62225e58 100644
--- a/nova/api/openstack/contrib/volumes.py
+++ b/nova/api/openstack/contrib/volumes.py
@@ -24,6 +24,7 @@ from nova import flags
from nova import log as logging
from nova import quota
from nova import volume
+from nova.volume import volume_types
from nova.api.openstack import common
from nova.api.openstack import extensions
from nova.api.openstack import faults
@@ -63,6 +64,22 @@ def _translate_volume_summary_view(context, vol):
d['displayName'] = vol['display_name']
d['displayDescription'] = vol['display_description']
+
+ if vol['volume_type_id'] and vol.get('volume_type'):
+ d['volumeType'] = vol['volume_type']['name']
+ else:
+ d['volumeType'] = vol['volume_type_id']
+
+ LOG.audit(_("vol=%s"), vol, context=context)
+
+ if vol.get('volume_metadata'):
+ meta_dict = {}
+ for i in vol['volume_metadata']:
+ meta_dict[i['key']] = i['value']
+ d['metadata'] = meta_dict
+ else:
+ d['metadata'] = {}
+
return d
@@ -80,6 +97,8 @@ class VolumeController(object):
"createdAt",
"displayName",
"displayDescription",
+ "volumeType",
+ "metadata",
]}}}
def __init__(self):
@@ -136,12 +155,25 @@ class VolumeController(object):
vol = body['volume']
size = vol['size']
LOG.audit(_("Create volume of %s GB"), size, context=context)
+
+ vol_type = vol.get('volume_type', None)
+ if vol_type:
+ try:
+ vol_type = volume_types.get_volume_type_by_name(context,
+ vol_type)
+ except exception.NotFound:
+ return faults.Fault(exc.HTTPNotFound())
+
+ metadata = vol.get('metadata', None)
+
new_volume = self.volume_api.create(context, size, None,
vol.get('display_name'),
- vol.get('display_description'))
+ vol.get('display_description'),
+ volume_type=vol_type,
+ metadata=metadata)
# Work around problem that instance is lazy-loaded...
- new_volume['instance'] = None
+ new_volume = self.volume_api.get(context, new_volume['id'])
retval = _translate_volume_detail_view(context, new_volume)
diff --git a/nova/api/openstack/contrib/volumetypes.py b/nova/api/openstack/contrib/volumetypes.py
new file mode 100644
index 000000000..ed33a8819
--- /dev/null
+++ b/nova/api/openstack/contrib/volumetypes.py
@@ -0,0 +1,197 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright (c) 2011 Zadara Storage Inc.
+# Copyright (c) 2011 OpenStack LLC.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+""" The volume type & volume types extra specs extension"""
+
+from webob import exc
+
+from nova import db
+from nova import exception
+from nova import quota
+from nova.volume import volume_types
+from nova.api.openstack import extensions
+from nova.api.openstack import faults
+from nova.api.openstack import wsgi
+
+
+class VolumeTypesController(object):
+ """ The volume types API controller for the Openstack API """
+
+ def index(self, req):
+ """ Returns the list of volume types """
+ context = req.environ['nova.context']
+ return volume_types.get_all_types(context)
+
+ def create(self, req, body):
+ """Creates a new volume type."""
+ context = req.environ['nova.context']
+
+ if not body or body == "":
+ return faults.Fault(exc.HTTPUnprocessableEntity())
+
+ vol_type = body.get('volume_type', None)
+ if vol_type is None or vol_type == "":
+ return faults.Fault(exc.HTTPUnprocessableEntity())
+
+ name = vol_type.get('name', None)
+ specs = vol_type.get('extra_specs', {})
+
+ if name is None or name == "":
+ return faults.Fault(exc.HTTPUnprocessableEntity())
+
+ try:
+ volume_types.create(context, name, specs)
+ vol_type = volume_types.get_volume_type_by_name(context, name)
+ except quota.QuotaError as error:
+ self._handle_quota_error(error)
+ except exception.NotFound:
+ return faults.Fault(exc.HTTPNotFound())
+
+ return {'volume_type': vol_type}
+
+ def show(self, req, id):
+ """ Return a single volume type item """
+ context = req.environ['nova.context']
+
+ try:
+ vol_type = volume_types.get_volume_type(context, id)
+ except exception.NotFound or exception.ApiError:
+ return faults.Fault(exc.HTTPNotFound())
+
+ return {'volume_type': vol_type}
+
+ def delete(self, req, id):
+ """ Deletes an existing volume type """
+ context = req.environ['nova.context']
+
+ try:
+ vol_type = volume_types.get_volume_type(context, id)
+ volume_types.destroy(context, vol_type['name'])
+ except exception.NotFound:
+ return faults.Fault(exc.HTTPNotFound())
+
+ def _handle_quota_error(self, error):
+ """Reraise quota errors as api-specific http exceptions."""
+ if error.code == "MetadataLimitExceeded":
+ raise exc.HTTPBadRequest(explanation=error.message)
+ raise error
+
+
+class VolumeTypeExtraSpecsController(object):
+ """ The volume type extra specs API controller for the Openstack API """
+
+ def _get_extra_specs(self, context, vol_type_id):
+ extra_specs = db.api.volume_type_extra_specs_get(context, vol_type_id)
+ specs_dict = {}
+ for key, value in extra_specs.iteritems():
+ specs_dict[key] = value
+ return dict(extra_specs=specs_dict)
+
+ def _check_body(self, body):
+ if body == None or body == "":
+ expl = _('No Request Body')
+ raise exc.HTTPBadRequest(explanation=expl)
+
+ def index(self, req, vol_type_id):
+ """ Returns the list of extra specs for a given volume type """
+ context = req.environ['nova.context']
+ return self._get_extra_specs(context, vol_type_id)
+
+ def create(self, req, vol_type_id, body):
+ self._check_body(body)
+ context = req.environ['nova.context']
+ specs = body.get('extra_specs')
+ try:
+ db.api.volume_type_extra_specs_update_or_create(context,
+ vol_type_id,
+ specs)
+ except quota.QuotaError as error:
+ self._handle_quota_error(error)
+ return body
+
+ def update(self, req, vol_type_id, id, body):
+ self._check_body(body)
+ context = req.environ['nova.context']
+ if not id in body:
+ expl = _('Request body and URI mismatch')
+ raise exc.HTTPBadRequest(explanation=expl)
+ if len(body) > 1:
+ expl = _('Request body contains too many items')
+ raise exc.HTTPBadRequest(explanation=expl)
+ try:
+ db.api.volume_type_extra_specs_update_or_create(context,
+ vol_type_id,
+ body)
+ except quota.QuotaError as error:
+ self._handle_quota_error(error)
+
+ return body
+
+ def show(self, req, vol_type_id, id):
+ """ Return a single extra spec item """
+ context = req.environ['nova.context']
+ specs = self._get_extra_specs(context, vol_type_id)
+ if id in specs['extra_specs']:
+ return {id: specs['extra_specs'][id]}
+ else:
+ return faults.Fault(exc.HTTPNotFound())
+
+ def delete(self, req, vol_type_id, id):
+ """ Deletes an existing extra spec """
+ context = req.environ['nova.context']
+ db.api.volume_type_extra_specs_delete(context, vol_type_id, id)
+
+ def _handle_quota_error(self, error):
+ """Reraise quota errors as api-specific http exceptions."""
+ if error.code == "MetadataLimitExceeded":
+ raise exc.HTTPBadRequest(explanation=error.message)
+ raise error
+
+
+class Volumetypes(extensions.ExtensionDescriptor):
+
+ def get_name(self):
+ return "VolumeTypes"
+
+ def get_alias(self):
+ return "os-volume-types"
+
+ def get_description(self):
+ return "Volume types support"
+
+ def get_namespace(self):
+ return \
+ "http://docs.openstack.org/ext/volume_types/api/v1.1"
+
+ def get_updated(self):
+ return "2011-08-24T00:00:00+00:00"
+
+ def get_resources(self):
+ resources = []
+ res = extensions.ResourceExtension(
+ 'os-volume-types',
+ VolumeTypesController())
+ resources.append(res)
+
+ res = extensions.ResourceExtension('extra_specs',
+ VolumeTypeExtraSpecsController(),
+ parent=dict(
+ member_name='vol_type',
+ collection_name='os-volume-types'))
+ resources.append(res)
+
+ return resources
diff --git a/nova/api/openstack/create_instance_helper.py b/nova/api/openstack/create_instance_helper.py
index 4b4a1b0c3..483ff4985 100644
--- a/nova/api/openstack/create_instance_helper.py
+++ b/nova/api/openstack/create_instance_helper.py
@@ -1,4 +1,5 @@
# Copyright 2011 OpenStack LLC.
+# Copyright 2011 Piston Cloud Computing, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
@@ -106,6 +107,7 @@ class CreateInstanceHelper(object):
raise exc.HTTPBadRequest(explanation=msg)
personality = server_dict.get('personality')
+ config_drive = server_dict.get('config_drive')
injected_files = []
if personality:
@@ -159,6 +161,7 @@ class CreateInstanceHelper(object):
extra_values = {
'instance_type': inst_type,
'image_ref': image_href,
+ 'config_drive': config_drive,
'password': password}
return (extra_values,
@@ -183,7 +186,8 @@ class CreateInstanceHelper(object):
requested_networks=requested_networks,
security_group=sg_names,
user_data=user_data,
- availability_zone=availability_zone))
+ availability_zone=availability_zone,
+ config_drive=config_drive,))
except quota.QuotaError as error:
self._handle_quota_error(error)
except exception.ImageNotFound as error:
diff --git a/nova/api/openstack/servers.py b/nova/api/openstack/servers.py
index 553357404..27c67e79e 100644
--- a/nova/api/openstack/servers.py
+++ b/nova/api/openstack/servers.py
@@ -604,8 +604,10 @@ class ControllerV10(Controller):
LOG.debug(msg)
raise exc.HTTPBadRequest(explanation=msg)
+ password = utils.generate_password(16)
+
try:
- self.compute_api.rebuild(context, instance_id, image_id)
+ self.compute_api.rebuild(context, instance_id, image_id, password)
except exception.BuildInProgress:
msg = _("Instance %s is currently being rebuilt.") % instance_id
LOG.debug(msg)
@@ -741,15 +743,26 @@ class ControllerV11(Controller):
self._validate_metadata(metadata)
self._decode_personalities(personalities)
+ password = info["rebuild"].get("adminPass",
+ utils.generate_password(16))
+
try:
- self.compute_api.rebuild(context, instance_id, image_href, name,
- metadata, personalities)
+ self.compute_api.rebuild(context, instance_id, image_href,
+ password, name=name, metadata=metadata,
+ files_to_inject=personalities)
except exception.BuildInProgress:
msg = _("Instance %s is currently being rebuilt.") % instance_id
LOG.debug(msg)
raise exc.HTTPConflict(explanation=msg)
+ except exception.InstanceNotFound:
+ msg = _("Instance %s could not be found") % instance_id
+ raise exc.HTTPNotFound(explanation=msg)
- return webob.Response(status_int=202)
+ instance = self.compute_api.routing_get(context, instance_id)
+ view = self._build_view(request, instance, is_detail=True)
+ view['server']['adminPass'] = password
+
+ return view
@common.check_snapshots_enabled
def _action_create_image(self, input_dict, req, instance_id):
@@ -816,6 +829,9 @@ class HeadersSerializer(wsgi.ResponseHeadersSerializer):
def delete(self, response, data):
response.status_int = 204
+ def action(self, response, data):
+ response.status_int = 202
+
class ServerXMLSerializer(wsgi.XMLDictSerializer):
@@ -937,6 +953,11 @@ class ServerXMLSerializer(wsgi.XMLDictSerializer):
node.setAttribute('adminPass', server_dict['server']['adminPass'])
return self.to_xml_string(node, True)
+ def action(self, server_dict):
+ #NOTE(bcwaldon): We need a way to serialize actions individually. This
+ # assumes all actions return a server entity
+ return self.create(server_dict)
+
def update(self, server_dict):
xml_doc = minidom.Document()
node = self._server_to_xml_detailed(xml_doc,
diff --git a/nova/api/openstack/views/addresses.py b/nova/api/openstack/views/addresses.py
index ddbf7a144..8f07a2289 100644
--- a/nova/api/openstack/views/addresses.py
+++ b/nova/api/openstack/views/addresses.py
@@ -17,9 +17,11 @@
from nova import flags
from nova import utils
+from nova import log as logging
from nova.api.openstack import common
FLAGS = flags.FLAGS
+LOG = logging.getLogger('nova.api.openstack.views.addresses')
class ViewBuilder(object):
@@ -48,7 +50,10 @@ class ViewBuilderV11(ViewBuilder):
def build(self, interfaces):
networks = {}
for interface in interfaces:
- network_label = interface['network']['label']
+ try:
+ network_label = self._extract_network_label(interface)
+ except TypeError:
+ continue
if network_label not in networks:
networks[network_label] = []
@@ -64,9 +69,14 @@ class ViewBuilderV11(ViewBuilder):
return networks
- def build_network(self, interfaces, network_label):
+ def build_network(self, interfaces, requested_network):
for interface in interfaces:
- if interface['network']['label'] == network_label:
+ try:
+ network_label = self._extract_network_label(interface)
+ except TypeError:
+ continue
+
+ if network_label == requested_network:
ips = list(self._extract_ipv4_addresses(interface))
ipv6 = self._extract_ipv6_address(interface)
if ipv6 is not None:
@@ -74,6 +84,13 @@ class ViewBuilderV11(ViewBuilder):
return {network_label: ips}
return None
+ def _extract_network_label(self, interface):
+ try:
+ return interface['network']['label']
+ except (TypeError, KeyError) as exc:
+ LOG.exception(exc)
+ raise TypeError
+
def _extract_ipv4_addresses(self, interface):
for fixed_ip in interface['fixed_ips']:
yield self._build_ip_entity(fixed_ip['address'], 4)
diff --git a/nova/api/openstack/views/servers.py b/nova/api/openstack/views/servers.py
index 465287adc..0ec98591e 100644
--- a/nova/api/openstack/views/servers.py
+++ b/nova/api/openstack/views/servers.py
@@ -1,6 +1,7 @@
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2010-2011 OpenStack LLC.
+# Copyright 2011 Piston Cloud Computing, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
@@ -187,6 +188,7 @@ class ViewBuilderV11(ViewBuilder):
def _build_extra(self, response, inst):
self._build_links(response, inst)
response['uuid'] = inst['uuid']
+ self._build_config_drive(response, inst)
def _build_links(self, response, inst):
href = self.generate_href(inst["id"])
@@ -205,6 +207,9 @@ class ViewBuilderV11(ViewBuilder):
response["links"] = links
+ def _build_config_drive(self, response, inst):
+ response['config_drive'] = inst.get('config_drive')
+
def generate_href(self, server_id):
"""Create an url that refers to a specific server id."""
return os.path.join(self.base_url, self.project_id,
diff --git a/nova/api/openstack/wsgi.py b/nova/api/openstack/wsgi.py
index dc0f1b93e..8641e960a 100644
--- a/nova/api/openstack/wsgi.py
+++ b/nova/api/openstack/wsgi.py
@@ -520,6 +520,6 @@ class Resource(wsgi.Application):
controller_method = getattr(self.controller, action)
try:
return controller_method(req=request, **action_args)
- except TypeError, exc:
- LOG.debug(str(exc))
- return webob.exc.HTTPBadRequest()
+ except TypeError as exc:
+ LOG.exception(exc)
+ return faults.Fault(webob.exc.HTTPBadRequest())
diff --git a/nova/auth/manager.py b/nova/auth/manager.py
index 6205cfb56..44e6e11ac 100644
--- a/nova/auth/manager.py
+++ b/nova/auth/manager.py
@@ -17,6 +17,9 @@
# under the License.
"""
+WARNING: This code is deprecated and will be removed.
+Keystone is the recommended solution for auth management.
+
Nova authentication management
"""
@@ -38,10 +41,13 @@ from nova.auth import signer
FLAGS = flags.FLAGS
+flags.DEFINE_bool('use_deprecated_auth',
+ False,
+ 'This flag must be set to use old style auth')
+
flags.DEFINE_list('allowed_roles',
['cloudadmin', 'itsec', 'sysadmin', 'netadmin', 'developer'],
'Allowed roles for project')
-
# NOTE(vish): a user with one of these roles will be a superuser and
# have access to all api commands
flags.DEFINE_list('superuser_roles', ['cloudadmin'],
@@ -811,7 +817,13 @@ class AuthManager(object):
s3_host = host
ec2_host = host
rc = open(FLAGS.credentials_template).read()
- rc = rc % {'access': user.access,
+ # NOTE(vish): Deprecated auth uses an access key, no auth uses a
+ # the user_id in place of it.
+ if FLAGS.use_deprecated_auth:
+ access = user.access
+ else:
+ access = user.id
+ rc = rc % {'access': access,
'project': pid,
'secret': user.secret,
'ec2': '%s://%s:%s%s' % (FLAGS.ec2_scheme,
diff --git a/nova/cloudpipe/pipelib.py b/nova/cloudpipe/pipelib.py
index 2c4673f9e..3eb372844 100644
--- a/nova/cloudpipe/pipelib.py
+++ b/nova/cloudpipe/pipelib.py
@@ -34,7 +34,6 @@ from nova import exception
from nova import flags
from nova import log as logging
from nova import utils
-from nova.auth import manager
# TODO(eday): Eventually changes these to something not ec2-specific
from nova.api.ec2 import cloud
@@ -57,7 +56,6 @@ LOG = logging.getLogger('nova.cloudpipe')
class CloudPipe(object):
def __init__(self):
self.controller = cloud.CloudController()
- self.manager = manager.AuthManager()
def get_encoded_zip(self, project_id):
# Make a payload.zip
@@ -93,11 +91,10 @@ class CloudPipe(object):
zippy.close()
return encoded
- def launch_vpn_instance(self, project_id):
+ def launch_vpn_instance(self, project_id, user_id):
LOG.debug(_("Launching VPN for %s") % (project_id))
- project = self.manager.get_project(project_id)
- ctxt = context.RequestContext(user=project.project_manager_id,
- project=project.id)
+ ctxt = context.RequestContext(user_id=user_id,
+ project_id=project_id)
key_name = self.setup_key_pair(ctxt)
group_name = self.setup_security_group(ctxt)
diff --git a/nova/compute/api.py b/nova/compute/api.py
index 7de91584f..3b4bde8ea 100644
--- a/nova/compute/api.py
+++ b/nova/compute/api.py
@@ -2,6 +2,7 @@
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
+# Copyright 2011 Piston Cloud Computing, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
@@ -54,15 +55,15 @@ def generate_default_hostname(instance):
"""Default function to generate a hostname given an instance reference."""
display_name = instance['display_name']
if display_name is None:
- return 'server_%d' % (instance['id'],)
+ return 'server-%d' % (instance['id'],)
table = ''
deletions = ''
for i in xrange(256):
c = chr(i)
if ('a' <= c <= 'z') or ('0' <= c <= '9') or (c == '-'):
table += c
- elif c == ' ':
- table += '_'
+ elif c in " _":
+ table += '-'
elif ('A' <= c <= 'Z'):
table += c.lower()
else:
@@ -164,7 +165,7 @@ class API(base.Base):
availability_zone=None, user_data=None, metadata=None,
injected_files=None, admin_password=None, zone_blob=None,
reservation_id=None, access_ip_v4=None, access_ip_v6=None,
- requested_networks=None):
+ requested_networks=None, config_drive=None,):
"""Verify all the input parameters regardless of the provisioning
strategy being performed."""
@@ -198,6 +199,11 @@ class API(base.Base):
(image_service, image_id) = nova.image.get_image_service(image_href)
image = image_service.show(context, image_id)
+ config_drive_id = None
+ if config_drive and config_drive is not True:
+ # config_drive is volume id
+ config_drive, config_drive_id = None, config_drive
+
os_type = None
if 'properties' in image and 'os_type' in image['properties']:
os_type = image['properties']['os_type']
@@ -225,6 +231,8 @@ class API(base.Base):
image_service.show(context, kernel_id)
if ramdisk_id:
image_service.show(context, ramdisk_id)
+ if config_drive_id:
+ image_service.show(context, config_drive_id)
self.ensure_default_security_group(context)
@@ -243,6 +251,8 @@ class API(base.Base):
'image_ref': image_href,
'kernel_id': kernel_id or '',
'ramdisk_id': ramdisk_id or '',
+ 'config_drive_id': config_drive_id or '',
+ 'config_drive': config_drive or '',
'state': 0,
'state_description': 'scheduling',
'user_id': context.user_id,
@@ -454,7 +464,7 @@ class API(base.Base):
injected_files=None, admin_password=None, zone_blob=None,
reservation_id=None, block_device_mapping=None,
access_ip_v4=None, access_ip_v6=None,
- requested_networks=None):
+ requested_networks=None, config_drive=None):
"""Provision the instances by passing the whole request to
the Scheduler for execution. Returns a Reservation ID
related to the creation of all of these instances."""
@@ -471,7 +481,7 @@ class API(base.Base):
availability_zone, user_data, metadata,
injected_files, admin_password, zone_blob,
reservation_id, access_ip_v4, access_ip_v6,
- requested_networks)
+ requested_networks, config_drive)
self._ask_scheduler_to_create_instance(context, base_options,
instance_type, zone_blob,
@@ -491,7 +501,7 @@ class API(base.Base):
injected_files=None, admin_password=None, zone_blob=None,
reservation_id=None, block_device_mapping=None,
access_ip_v4=None, access_ip_v6=None,
- requested_networks=None):
+ requested_networks=None, config_drive=None,):
"""
Provision the instances by sending off a series of single
instance requests to the Schedulers. This is fine for trival
@@ -516,7 +526,7 @@ class API(base.Base):
availability_zone, user_data, metadata,
injected_files, admin_password, zone_blob,
reservation_id, access_ip_v4, access_ip_v6,
- requested_networks)
+ requested_networks, config_drive)
block_device_mapping = block_device_mapping or []
instances = []
@@ -1013,8 +1023,8 @@ class API(base.Base):
self._cast_compute_message('reboot_instance', context, instance_id)
@scheduler_api.reroute_compute("rebuild")
- def rebuild(self, context, instance_id, image_href, name=None,
- metadata=None, files_to_inject=None):
+ def rebuild(self, context, instance_id, image_href, admin_password,
+ name=None, metadata=None, files_to_inject=None):
"""Rebuild the given instance with the provided metadata."""
instance = db.api.instance_get(context, instance_id)
@@ -1025,7 +1035,7 @@ class API(base.Base):
files_to_inject = files_to_inject or []
self._check_injected_file_quota(context, files_to_inject)
- values = {}
+ values = {"image_ref": image_href}
if metadata is not None:
self._check_metadata_properties_quota(context, metadata)
values['metadata'] = metadata
@@ -1034,7 +1044,7 @@ class API(base.Base):
self.db.instance_update(context, instance_id, values)
rebuild_params = {
- "image_ref": image_href,
+ "new_pass": admin_password,
"injected_files": files_to_inject,
}
diff --git a/nova/compute/manager.py b/nova/compute/manager.py
index c207eccbb..6fcb3786c 100644
--- a/nova/compute/manager.py
+++ b/nova/compute/manager.py
@@ -201,11 +201,6 @@ class ComputeManager(manager.SchedulerDependentManager):
data = {'launched_at': launched_at or utils.utcnow()}
self.db.instance_update(context, instance_id, data)
- def _update_image_ref(self, context, instance_id, image_ref):
- """Update the image_id for the given instance."""
- data = {'image_ref': image_ref}
- self.db.instance_update(context, instance_id, data)
-
def get_console_topic(self, context, **kwargs):
"""Retrieves the console host for a project on this host.
@@ -526,7 +521,8 @@ class ComputeManager(manager.SchedulerDependentManager):
:param context: `nova.RequestContext` object
:param instance_id: Instance identifier (integer)
- :param image_ref: Image identifier (href or integer)
+ :param injected_files: Files to inject
+ :param new_pass: password to set on rebuilt instance
"""
context = context.elevated()
@@ -538,19 +534,21 @@ class ComputeManager(manager.SchedulerDependentManager):
network_info = self._get_instance_nw_info(context, instance_ref)
self.driver.destroy(instance_ref, network_info)
- image_ref = kwargs.get('image_ref')
- instance_ref.image_ref = image_ref
instance_ref.injected_files = kwargs.get('injected_files', [])
network_info = self.network_api.get_instance_nw_info(context,
instance_ref)
bd_mapping = self._setup_block_device_mapping(context, instance_id)
+
+ # pull in new password here since the original password isn't in the db
+ instance_ref.admin_pass = kwargs.get('new_pass',
+ utils.generate_password(FLAGS.password_length))
+
self.driver.spawn(context, instance_ref, network_info, bd_mapping)
- self._update_image_ref(context, instance_id, image_ref)
self._update_launched_at(context, instance_id)
self._update_state(context, instance_id)
- usage_info = utils.usage_from_instance(instance_ref,
- image_ref=image_ref)
+ usage_info = utils.usage_from_instance(instance_ref)
+
notifier.notify('compute.%s' % self.host,
'compute.instance.rebuild',
notifier.INFO,
diff --git a/nova/db/api.py b/nova/db/api.py
index 17ef0bd0b..07d6e1095 100644
--- a/nova/db/api.py
+++ b/nova/db/api.py
@@ -49,7 +49,8 @@ flags.DEFINE_string('volume_name_template', 'volume-%08x',
'Template string to be used to generate instance names')
flags.DEFINE_string('snapshot_name_template', 'snapshot-%08x',
'Template string to be used to generate snapshot names')
-
+flags.DEFINE_string('vsa_name_template', 'vsa-%08x',
+ 'Template string to be used to generate VSA names')
IMPL = utils.LazyPluggable(FLAGS['db_backend'],
sqlalchemy='nova.db.sqlalchemy.api')
@@ -1446,3 +1447,112 @@ def instance_type_extra_specs_update_or_create(context, instance_type_id,
key/value pairs specified in the extra specs dict argument"""
IMPL.instance_type_extra_specs_update_or_create(context, instance_type_id,
extra_specs)
+
+
+##################
+
+
+def volume_metadata_get(context, volume_id):
+ """Get all metadata for a volume."""
+ return IMPL.volume_metadata_get(context, volume_id)
+
+
+def volume_metadata_delete(context, volume_id, key):
+ """Delete the given metadata item."""
+ IMPL.volume_metadata_delete(context, volume_id, key)
+
+
+def volume_metadata_update(context, volume_id, metadata, delete):
+ """Update metadata if it exists, otherwise create it."""
+ IMPL.volume_metadata_update(context, volume_id, metadata, delete)
+
+
+##################
+
+
+def volume_type_create(context, values):
+ """Create a new volume type."""
+ return IMPL.volume_type_create(context, values)
+
+
+def volume_type_get_all(context, inactive=False):
+ """Get all volume types."""
+ return IMPL.volume_type_get_all(context, inactive)
+
+
+def volume_type_get(context, id):
+ """Get volume type by id."""
+ return IMPL.volume_type_get(context, id)
+
+
+def volume_type_get_by_name(context, name):
+ """Get volume type by name."""
+ return IMPL.volume_type_get_by_name(context, name)
+
+
+def volume_type_destroy(context, name):
+ """Delete a volume type."""
+ return IMPL.volume_type_destroy(context, name)
+
+
+def volume_type_purge(context, name):
+ """Purges (removes) a volume type from DB.
+
+ Use volume_type_destroy for most cases
+
+ """
+ return IMPL.volume_type_purge(context, name)
+
+
+####################
+
+
+def volume_type_extra_specs_get(context, volume_type_id):
+ """Get all extra specs for a volume type."""
+ return IMPL.volume_type_extra_specs_get(context, volume_type_id)
+
+
+def volume_type_extra_specs_delete(context, volume_type_id, key):
+ """Delete the given extra specs item."""
+ IMPL.volume_type_extra_specs_delete(context, volume_type_id, key)
+
+
+def volume_type_extra_specs_update_or_create(context, volume_type_id,
+ extra_specs):
+ """Create or update volume type extra specs. This adds or modifies the
+ key/value pairs specified in the extra specs dict argument"""
+ IMPL.volume_type_extra_specs_update_or_create(context, volume_type_id,
+ extra_specs)
+
+
+####################
+
+
+def vsa_create(context, values):
+ """Creates Virtual Storage Array record."""
+ return IMPL.vsa_create(context, values)
+
+
+def vsa_update(context, vsa_id, values):
+ """Updates Virtual Storage Array record."""
+ return IMPL.vsa_update(context, vsa_id, values)
+
+
+def vsa_destroy(context, vsa_id):
+ """Deletes Virtual Storage Array record."""
+ return IMPL.vsa_destroy(context, vsa_id)
+
+
+def vsa_get(context, vsa_id):
+ """Get Virtual Storage Array record by ID."""
+ return IMPL.vsa_get(context, vsa_id)
+
+
+def vsa_get_all(context):
+ """Get all Virtual Storage Array records."""
+ return IMPL.vsa_get_all(context)
+
+
+def vsa_get_all_by_project(context, project_id):
+ """Get all Virtual Storage Array records by project ID."""
+ return IMPL.vsa_get_all_by_project(context, project_id)
diff --git a/nova/db/sqlalchemy/api.py b/nova/db/sqlalchemy/api.py
index 00af62682..09356e966 100644
--- a/nova/db/sqlalchemy/api.py
+++ b/nova/db/sqlalchemy/api.py
@@ -132,6 +132,20 @@ def require_instance_exists(f):
return wrapper
+def require_volume_exists(f):
+ """Decorator to require the specified volume to exist.
+
+ Requres the wrapped function to use context and volume_id as
+ their first two arguments.
+ """
+
+ def wrapper(context, volume_id, *args, **kwargs):
+ db.api.volume_get(context, volume_id)
+ return f(context, volume_id, *args, **kwargs)
+ wrapper.__name__ = f.__name__
+ return wrapper
+
+
###################
@@ -1035,11 +1049,11 @@ def virtual_interface_delete_by_instance(context, instance_id):
###################
-def _metadata_refs(metadata_dict):
+def _metadata_refs(metadata_dict, meta_class):
metadata_refs = []
if metadata_dict:
for k, v in metadata_dict.iteritems():
- metadata_ref = models.InstanceMetadata()
+ metadata_ref = meta_class()
metadata_ref['key'] = k
metadata_ref['value'] = v
metadata_refs.append(metadata_ref)
@@ -1053,8 +1067,8 @@ def instance_create(context, values):
context - request context object
values - dict containing column values.
"""
- values['metadata'] = _metadata_refs(values.get('metadata'))
-
+ values['metadata'] = _metadata_refs(values.get('metadata'),
+ models.InstanceMetadata)
instance_ref = models.Instance()
instance_ref['uuid'] = str(utils.gen_uuid())
@@ -2173,6 +2187,8 @@ def volume_attached(context, volume_id, instance_id, mountpoint):
@require_context
def volume_create(context, values):
+ values['volume_metadata'] = _metadata_refs(values.get('metadata'),
+ models.VolumeMetadata)
volume_ref = models.Volume()
volume_ref.update(values)
@@ -2209,6 +2225,11 @@ def volume_destroy(context, volume_id):
session.query(models.IscsiTarget).\
filter_by(volume_id=volume_id).\
update({'volume_id': None})
+ session.query(models.VolumeMetadata).\
+ filter_by(volume_id=volume_id).\
+ update({'deleted': True,
+ 'deleted_at': utils.utcnow(),
+ 'updated_at': literal_column('updated_at')})
@require_admin_context
@@ -2232,12 +2253,16 @@ def volume_get(context, volume_id, session=None):
if is_admin_context(context):
result = session.query(models.Volume).\
options(joinedload('instance')).\
+ options(joinedload('volume_metadata')).\
+ options(joinedload('volume_type')).\
filter_by(id=volume_id).\
filter_by(deleted=can_read_deleted(context)).\
first()
elif is_user_context(context):
result = session.query(models.Volume).\
options(joinedload('instance')).\
+ options(joinedload('volume_metadata')).\
+ options(joinedload('volume_type')).\
filter_by(project_id=context.project_id).\
filter_by(id=volume_id).\
filter_by(deleted=False).\
@@ -2253,6 +2278,8 @@ def volume_get_all(context):
session = get_session()
return session.query(models.Volume).\
options(joinedload('instance')).\
+ options(joinedload('volume_metadata')).\
+ options(joinedload('volume_type')).\
filter_by(deleted=can_read_deleted(context)).\
all()
@@ -2262,6 +2289,8 @@ def volume_get_all_by_host(context, host):
session = get_session()
return session.query(models.Volume).\
options(joinedload('instance')).\
+ options(joinedload('volume_metadata')).\
+ options(joinedload('volume_type')).\
filter_by(host=host).\
filter_by(deleted=can_read_deleted(context)).\
all()
@@ -2271,6 +2300,8 @@ def volume_get_all_by_host(context, host):
def volume_get_all_by_instance(context, instance_id):
session = get_session()
result = session.query(models.Volume).\
+ options(joinedload('volume_metadata')).\
+ options(joinedload('volume_type')).\
filter_by(instance_id=instance_id).\
filter_by(deleted=False).\
all()
@@ -2286,6 +2317,8 @@ def volume_get_all_by_project(context, project_id):
session = get_session()
return session.query(models.Volume).\
options(joinedload('instance')).\
+ options(joinedload('volume_metadata')).\
+ options(joinedload('volume_type')).\
filter_by(project_id=project_id).\
filter_by(deleted=can_read_deleted(context)).\
all()
@@ -2298,6 +2331,8 @@ def volume_get_instance(context, volume_id):
filter_by(id=volume_id).\
filter_by(deleted=can_read_deleted(context)).\
options(joinedload('instance')).\
+ options(joinedload('volume_metadata')).\
+ options(joinedload('volume_type')).\
first()
if not result:
raise exception.VolumeNotFound(volume_id=volume_id)
@@ -2332,12 +2367,116 @@ def volume_get_iscsi_target_num(context, volume_id):
@require_context
def volume_update(context, volume_id, values):
session = get_session()
+ metadata = values.get('metadata')
+ if metadata is not None:
+ volume_metadata_update(context,
+ volume_id,
+ values.pop('metadata'),
+ delete=True)
with session.begin():
volume_ref = volume_get(context, volume_id, session=session)
volume_ref.update(values)
volume_ref.save(session=session)
+####################
+
+
+@require_context
+@require_volume_exists
+def volume_metadata_get(context, volume_id):
+ session = get_session()
+
+ meta_results = session.query(models.VolumeMetadata).\
+ filter_by(volume_id=volume_id).\
+ filter_by(deleted=False).\
+ all()
+
+ meta_dict = {}
+ for i in meta_results:
+ meta_dict[i['key']] = i['value']
+ return meta_dict
+
+
+@require_context
+@require_volume_exists
+def volume_metadata_delete(context, volume_id, key):
+ session = get_session()
+ session.query(models.VolumeMetadata).\
+ filter_by(volume_id=volume_id).\
+ filter_by(key=key).\
+ filter_by(deleted=False).\
+ update({'deleted': True,
+ 'deleted_at': utils.utcnow(),
+ 'updated_at': literal_column('updated_at')})
+
+
+@require_context
+@require_volume_exists
+def volume_metadata_delete_all(context, volume_id):
+ session = get_session()
+ session.query(models.VolumeMetadata).\
+ filter_by(volume_id=volume_id).\
+ filter_by(deleted=False).\
+ update({'deleted': True,
+ 'deleted_at': utils.utcnow(),
+ 'updated_at': literal_column('updated_at')})
+
+
+@require_context
+@require_volume_exists
+def volume_metadata_get_item(context, volume_id, key, session=None):
+ if not session:
+ session = get_session()
+
+ meta_result = session.query(models.VolumeMetadata).\
+ filter_by(volume_id=volume_id).\
+ filter_by(key=key).\
+ filter_by(deleted=False).\
+ first()
+
+ if not meta_result:
+ raise exception.VolumeMetadataNotFound(metadata_key=key,
+ volume_id=volume_id)
+ return meta_result
+
+
+@require_context
+@require_volume_exists
+def volume_metadata_update(context, volume_id, metadata, delete):
+ session = get_session()
+
+ # Set existing metadata to deleted if delete argument is True
+ if delete:
+ original_metadata = volume_metadata_get(context, volume_id)
+ for meta_key, meta_value in original_metadata.iteritems():
+ if meta_key not in metadata:
+ meta_ref = volume_metadata_get_item(context, volume_id,
+ meta_key, session)
+ meta_ref.update({'deleted': True})
+ meta_ref.save(session=session)
+
+ meta_ref = None
+
+ # Now update all existing items with new values, or create new meta objects
+ for meta_key, meta_value in metadata.iteritems():
+
+ # update the value whether it exists or not
+ item = {"value": meta_value}
+
+ try:
+ meta_ref = volume_metadata_get_item(context, volume_id,
+ meta_key, session)
+ except exception.VolumeMetadataNotFound, e:
+ meta_ref = models.VolumeMetadata()
+ item.update({"key": meta_key, "volume_id": volume_id})
+
+ meta_ref.update(item)
+ meta_ref.save(session=session)
+
+ return metadata
+
+
###################
@@ -3172,7 +3311,7 @@ def instance_type_create(_context, values):
def _dict_with_extra_specs(inst_type_query):
- """Takes an instance type query returned by sqlalchemy
+ """Takes an instance OR volume type query returned by sqlalchemy
and returns it as a dictionary, converting the extra_specs
entry from a list of dicts:
@@ -3554,3 +3693,278 @@ def instance_type_extra_specs_update_or_create(context, instance_type_id,
"deleted": 0})
spec_ref.save(session=session)
return specs
+
+
+##################
+
+
+@require_admin_context
+def volume_type_create(_context, values):
+ """Create a new instance type. In order to pass in extra specs,
+ the values dict should contain a 'extra_specs' key/value pair:
+
+ {'extra_specs' : {'k1': 'v1', 'k2': 'v2', ...}}
+
+ """
+ try:
+ specs = values.get('extra_specs')
+
+ values['extra_specs'] = _metadata_refs(values.get('extra_specs'),
+ models.VolumeTypeExtraSpecs)
+ volume_type_ref = models.VolumeTypes()
+ volume_type_ref.update(values)
+ volume_type_ref.save()
+ except Exception, e:
+ raise exception.DBError(e)
+ return volume_type_ref
+
+
+@require_context
+def volume_type_get_all(context, inactive=False, filters={}):
+ """
+ Returns a dict describing all volume_types with name as key.
+ """
+ session = get_session()
+ if inactive:
+ vol_types = session.query(models.VolumeTypes).\
+ options(joinedload('extra_specs')).\
+ order_by("name").\
+ all()
+ else:
+ vol_types = session.query(models.VolumeTypes).\
+ options(joinedload('extra_specs')).\
+ filter_by(deleted=False).\
+ order_by("name").\
+ all()
+ vol_dict = {}
+ if vol_types:
+ for i in vol_types:
+ vol_dict[i['name']] = _dict_with_extra_specs(i)
+ return vol_dict
+
+
+@require_context
+def volume_type_get(context, id):
+ """Returns a dict describing specific volume_type"""
+ session = get_session()
+ vol_type = session.query(models.VolumeTypes).\
+ options(joinedload('extra_specs')).\
+ filter_by(id=id).\
+ first()
+
+ if not vol_type:
+ raise exception.VolumeTypeNotFound(volume_type=id)
+ else:
+ return _dict_with_extra_specs(vol_type)
+
+
+@require_context
+def volume_type_get_by_name(context, name):
+ """Returns a dict describing specific volume_type"""
+ session = get_session()
+ vol_type = session.query(models.VolumeTypes).\
+ options(joinedload('extra_specs')).\
+ filter_by(name=name).\
+ first()
+ if not vol_type:
+ raise exception.VolumeTypeNotFoundByName(volume_type_name=name)
+ else:
+ return _dict_with_extra_specs(vol_type)
+
+
+@require_admin_context
+def volume_type_destroy(context, name):
+ """ Marks specific volume_type as deleted"""
+ session = get_session()
+ volume_type_ref = session.query(models.VolumeTypes).\
+ filter_by(name=name)
+ records = volume_type_ref.update(dict(deleted=True))
+ if records == 0:
+ raise exception.VolumeTypeNotFoundByName(volume_type_name=name)
+ else:
+ return volume_type_ref
+
+
+@require_admin_context
+def volume_type_purge(context, name):
+ """ Removes specific volume_type from DB
+ Usually volume_type_destroy should be used
+ """
+ session = get_session()
+ volume_type_ref = session.query(models.VolumeTypes).\
+ filter_by(name=name)
+ records = volume_type_ref.delete()
+ if records == 0:
+ raise exception.VolumeTypeNotFoundByName(volume_type_name=name)
+ else:
+ return volume_type_ref
+
+
+####################
+
+
+@require_context
+def volume_type_extra_specs_get(context, volume_type_id):
+ session = get_session()
+
+ spec_results = session.query(models.VolumeTypeExtraSpecs).\
+ filter_by(volume_type_id=volume_type_id).\
+ filter_by(deleted=False).\
+ all()
+
+ spec_dict = {}
+ for i in spec_results:
+ spec_dict[i['key']] = i['value']
+ return spec_dict
+
+
+@require_context
+def volume_type_extra_specs_delete(context, volume_type_id, key):
+ session = get_session()
+ session.query(models.VolumeTypeExtraSpecs).\
+ filter_by(volume_type_id=volume_type_id).\
+ filter_by(key=key).\
+ filter_by(deleted=False).\
+ update({'deleted': True,
+ 'deleted_at': utils.utcnow(),
+ 'updated_at': literal_column('updated_at')})
+
+
+@require_context
+def volume_type_extra_specs_get_item(context, volume_type_id, key,
+ session=None):
+
+ if not session:
+ session = get_session()
+
+ spec_result = session.query(models.VolumeTypeExtraSpecs).\
+ filter_by(volume_type_id=volume_type_id).\
+ filter_by(key=key).\
+ filter_by(deleted=False).\
+ first()
+
+ if not spec_result:
+ raise exception.\
+ VolumeTypeExtraSpecsNotFound(extra_specs_key=key,
+ volume_type_id=volume_type_id)
+ return spec_result
+
+
+@require_context
+def volume_type_extra_specs_update_or_create(context, volume_type_id,
+ specs):
+ session = get_session()
+ spec_ref = None
+ for key, value in specs.iteritems():
+ try:
+ spec_ref = volume_type_extra_specs_get_item(
+ context, volume_type_id, key, session)
+ except exception.VolumeTypeExtraSpecsNotFound, e:
+ spec_ref = models.VolumeTypeExtraSpecs()
+ spec_ref.update({"key": key, "value": value,
+ "volume_type_id": volume_type_id,
+ "deleted": 0})
+ spec_ref.save(session=session)
+ return specs
+
+
+ ####################
+
+
+@require_admin_context
+def vsa_create(context, values):
+ """
+ Creates Virtual Storage Array record.
+ """
+ try:
+ vsa_ref = models.VirtualStorageArray()
+ vsa_ref.update(values)
+ vsa_ref.save()
+ except Exception, e:
+ raise exception.DBError(e)
+ return vsa_ref
+
+
+@require_admin_context
+def vsa_update(context, vsa_id, values):
+ """
+ Updates Virtual Storage Array record.
+ """
+ session = get_session()
+ with session.begin():
+ vsa_ref = vsa_get(context, vsa_id, session=session)
+ vsa_ref.update(values)
+ vsa_ref.save(session=session)
+ return vsa_ref
+
+
+@require_admin_context
+def vsa_destroy(context, vsa_id):
+ """
+ Deletes Virtual Storage Array record.
+ """
+ session = get_session()
+ with session.begin():
+ session.query(models.VirtualStorageArray).\
+ filter_by(id=vsa_id).\
+ update({'deleted': True,
+ 'deleted_at': utils.utcnow(),
+ 'updated_at': literal_column('updated_at')})
+
+
+@require_context
+def vsa_get(context, vsa_id, session=None):
+ """
+ Get Virtual Storage Array record by ID.
+ """
+ if not session:
+ session = get_session()
+ result = None
+
+ if is_admin_context(context):
+ result = session.query(models.VirtualStorageArray).\
+ options(joinedload('vsa_instance_type')).\
+ filter_by(id=vsa_id).\
+ filter_by(deleted=can_read_deleted(context)).\
+ first()
+ elif is_user_context(context):
+ result = session.query(models.VirtualStorageArray).\
+ options(joinedload('vsa_instance_type')).\
+ filter_by(project_id=context.project_id).\
+ filter_by(id=vsa_id).\
+ filter_by(deleted=False).\
+ first()
+ if not result:
+ raise exception.VirtualStorageArrayNotFound(id=vsa_id)
+
+ return result
+
+
+@require_admin_context
+def vsa_get_all(context):
+ """
+ Get all Virtual Storage Array records.
+ """
+ session = get_session()
+ return session.query(models.VirtualStorageArray).\
+ options(joinedload('vsa_instance_type')).\
+ filter_by(deleted=can_read_deleted(context)).\
+ all()
+
+
+@require_context
+def vsa_get_all_by_project(context, project_id):
+ """
+ Get all Virtual Storage Array records by project ID.
+ """
+ authorize_project_context(context, project_id)
+
+ session = get_session()
+ return session.query(models.VirtualStorageArray).\
+ options(joinedload('vsa_instance_type')).\
+ filter_by(project_id=project_id).\
+ filter_by(deleted=can_read_deleted(context)).\
+ all()
+
+
+ ####################
diff --git a/nova/db/sqlalchemy/migrate_repo/versions/016_make_quotas_key_and_value.py b/nova/db/sqlalchemy/migrate_repo/versions/016_make_quotas_key_and_value.py
index a4fe3e482..56b287171 100644
--- a/nova/db/sqlalchemy/migrate_repo/versions/016_make_quotas_key_and_value.py
+++ b/nova/db/sqlalchemy/migrate_repo/versions/016_make_quotas_key_and_value.py
@@ -75,8 +75,8 @@ def new_style_quotas_table(name):
)
-def existing_quotas_table(migrate_engine):
- return Table('quotas', meta, autoload=True, autoload_with=migrate_engine)
+def quotas_table(migrate_engine, name='quotas'):
+ return Table(name, meta, autoload=True, autoload_with=migrate_engine)
def _assert_no_duplicate_project_ids(quotas):
@@ -179,13 +179,18 @@ def upgrade(migrate_engine):
# bind migrate_engine to your metadata
meta.bind = migrate_engine
- old_quotas = existing_quotas_table(migrate_engine)
+ old_quotas = quotas_table(migrate_engine)
assert_old_quotas_have_no_active_duplicates(migrate_engine, old_quotas)
new_quotas = new_style_quotas_table('quotas_new')
new_quotas.create()
convert_forward(migrate_engine, old_quotas, new_quotas)
old_quotas.drop()
+
+ # clear metadata to work around this:
+ # http://code.google.com/p/sqlalchemy-migrate/issues/detail?id=128
+ meta.clear()
+ new_quotas = quotas_table(migrate_engine, 'quotas_new')
new_quotas.rename('quotas')
@@ -193,11 +198,16 @@ def downgrade(migrate_engine):
# Operations to reverse the above upgrade go here.
meta.bind = migrate_engine
- new_quotas = existing_quotas_table(migrate_engine)
+ new_quotas = quotas_table(migrate_engine)
assert_new_quotas_have_no_active_duplicates(migrate_engine, new_quotas)
old_quotas = old_style_quotas_table('quotas_old')
old_quotas.create()
convert_backward(migrate_engine, old_quotas, new_quotas)
new_quotas.drop()
+
+ # clear metadata to work around this:
+ # http://code.google.com/p/sqlalchemy-migrate/issues/detail?id=128
+ meta.clear()
+ old_quotas = quotas_table(migrate_engine, 'quotas_old')
old_quotas.rename('quotas')
diff --git a/nova/db/sqlalchemy/migrate_repo/versions/036_change_flavor_id_in_migrations.py b/nova/db/sqlalchemy/migrate_repo/versions/036_change_flavor_id_in_migrations.py
index f3244033b..dfbd4ba32 100644
--- a/nova/db/sqlalchemy/migrate_repo/versions/036_change_flavor_id_in_migrations.py
+++ b/nova/db/sqlalchemy/migrate_repo/versions/036_change_flavor_id_in_migrations.py
@@ -40,13 +40,17 @@ def upgrade(migrate_engine):
migrations.create_column(new_instance_type_id)
# Convert flavor_id to instance_type_id
+ itypes = {}
for instance_type in migrate_engine.execute(instance_types.select()):
+ itypes[instance_type.id] = instance_type.flavorid
+
+ for instance_type_id in itypes.keys():
migrate_engine.execute(migrations.update()\
- .where(migrations.c.old_flavor_id == instance_type.flavorid)\
- .values(old_instance_type_id=instance_type.id))
+ .where(migrations.c.old_flavor_id == itypes[instance_type_id])\
+ .values(old_instance_type_id=instance_type_id))
migrate_engine.execute(migrations.update()\
- .where(migrations.c.new_flavor_id == instance_type.flavorid)\
- .values(new_instance_type_id=instance_type.id))
+ .where(migrations.c.new_flavor_id == itypes[instance_type_id])\
+ .values(new_instance_type_id=instance_type_id))
migrations.c.old_flavor_id.drop()
migrations.c.new_flavor_id.drop()
diff --git a/nova/db/sqlalchemy/migrate_repo/versions/041_add_config_drive_to_instances.py b/nova/db/sqlalchemy/migrate_repo/versions/041_add_config_drive_to_instances.py
new file mode 100644
index 000000000..d3058f00d
--- /dev/null
+++ b/nova/db/sqlalchemy/migrate_repo/versions/041_add_config_drive_to_instances.py
@@ -0,0 +1,38 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+#
+# Copyright 2011 Piston Cloud Computing, Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from sqlalchemy import Column, Integer, MetaData, String, Table
+
+from nova import utils
+
+
+meta = MetaData()
+
+instances = Table("instances", meta,
+ Column("id", Integer(), primary_key=True, nullable=False))
+
+# matches the size of an image_ref
+config_drive_column = Column("config_drive", String(255), nullable=True)
+
+
+def upgrade(migrate_engine):
+ meta.bind = migrate_engine
+ instances.create_column(config_drive_column)
+
+
+def downgrade(migrate_engine):
+ meta.bind = migrate_engine
+ instances.drop_column(config_drive_column)
diff --git a/nova/db/sqlalchemy/migrate_repo/versions/042_add_volume_types_and_extradata.py b/nova/db/sqlalchemy/migrate_repo/versions/042_add_volume_types_and_extradata.py
new file mode 100644
index 000000000..dd4cccb9e
--- /dev/null
+++ b/nova/db/sqlalchemy/migrate_repo/versions/042_add_volume_types_and_extradata.py
@@ -0,0 +1,115 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright (c) 2011 Zadara Storage Inc.
+# Copyright (c) 2011 OpenStack LLC.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from sqlalchemy import Column, DateTime, Integer, MetaData, String, Table
+from sqlalchemy import Text, Boolean, ForeignKey
+
+from nova import log as logging
+
+meta = MetaData()
+
+# Just for the ForeignKey and column creation to succeed, these are not the
+# actual definitions of tables .
+#
+
+volumes = Table('volumes', meta,
+ Column('id', Integer(), primary_key=True, nullable=False),
+ )
+
+volume_type_id = Column('volume_type_id', Integer(), nullable=True)
+
+
+# New Tables
+#
+
+volume_types = Table('volume_types', meta,
+ Column('created_at', DateTime(timezone=False)),
+ Column('updated_at', DateTime(timezone=False)),
+ Column('deleted_at', DateTime(timezone=False)),
+ Column('deleted', Boolean(create_constraint=True, name=None)),
+ Column('id', Integer(), primary_key=True, nullable=False),
+ Column('name',
+ String(length=255, convert_unicode=False, assert_unicode=None,
+ unicode_error=None, _warn_on_bytestring=False),
+ unique=True))
+
+volume_type_extra_specs_table = Table('volume_type_extra_specs', meta,
+ Column('created_at', DateTime(timezone=False)),
+ Column('updated_at', DateTime(timezone=False)),
+ Column('deleted_at', DateTime(timezone=False)),
+ Column('deleted', Boolean(create_constraint=True, name=None)),
+ Column('id', Integer(), primary_key=True, nullable=False),
+ Column('volume_type_id',
+ Integer(),
+ ForeignKey('volume_types.id'),
+ nullable=False),
+ Column('key',
+ String(length=255, convert_unicode=False, assert_unicode=None,
+ unicode_error=None, _warn_on_bytestring=False)),
+ Column('value',
+ String(length=255, convert_unicode=False, assert_unicode=None,
+ unicode_error=None, _warn_on_bytestring=False)))
+
+
+volume_metadata_table = Table('volume_metadata', meta,
+ Column('created_at', DateTime(timezone=False)),
+ Column('updated_at', DateTime(timezone=False)),
+ Column('deleted_at', DateTime(timezone=False)),
+ Column('deleted', Boolean(create_constraint=True, name=None)),
+ Column('id', Integer(), primary_key=True, nullable=False),
+ Column('volume_id',
+ Integer(),
+ ForeignKey('volumes.id'),
+ nullable=False),
+ Column('key',
+ String(length=255, convert_unicode=False, assert_unicode=None,
+ unicode_error=None, _warn_on_bytestring=False)),
+ Column('value',
+ String(length=255, convert_unicode=False, assert_unicode=None,
+ unicode_error=None, _warn_on_bytestring=False)))
+
+
+new_tables = (volume_types,
+ volume_type_extra_specs_table,
+ volume_metadata_table)
+
+#
+# Tables to alter
+#
+
+
+def upgrade(migrate_engine):
+ meta.bind = migrate_engine
+
+ for table in new_tables:
+ try:
+ table.create()
+ except Exception:
+ logging.info(repr(table))
+ logging.exception('Exception while creating table')
+ raise
+
+ volumes.create_column(volume_type_id)
+
+
+def downgrade(migrate_engine):
+ meta.bind = migrate_engine
+
+ volumes.drop_column(volume_type_id)
+
+ for table in new_tables:
+ table.drop()
diff --git a/nova/db/sqlalchemy/migrate_repo/versions/043_add_vsa_data.py b/nova/db/sqlalchemy/migrate_repo/versions/043_add_vsa_data.py
new file mode 100644
index 000000000..844643704
--- /dev/null
+++ b/nova/db/sqlalchemy/migrate_repo/versions/043_add_vsa_data.py
@@ -0,0 +1,75 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright (c) 2011 Zadara Storage Inc.
+# Copyright (c) 2011 OpenStack LLC.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from sqlalchemy import Column, DateTime, Integer, MetaData, String, Table
+from sqlalchemy import Text, Boolean, ForeignKey
+
+from nova import log as logging
+
+meta = MetaData()
+
+#
+# New Tables
+#
+
+virtual_storage_arrays = Table('virtual_storage_arrays', meta,
+ Column('created_at', DateTime(timezone=False)),
+ Column('updated_at', DateTime(timezone=False)),
+ Column('deleted_at', DateTime(timezone=False)),
+ Column('deleted', Boolean(create_constraint=True, name=None)),
+ Column('id', Integer(), primary_key=True, nullable=False),
+ Column('display_name',
+ String(length=255, convert_unicode=False, assert_unicode=None,
+ unicode_error=None, _warn_on_bytestring=False)),
+ Column('display_description',
+ String(length=255, convert_unicode=False, assert_unicode=None,
+ unicode_error=None, _warn_on_bytestring=False)),
+ Column('project_id',
+ String(length=255, convert_unicode=False, assert_unicode=None,
+ unicode_error=None, _warn_on_bytestring=False)),
+ Column('availability_zone',
+ String(length=255, convert_unicode=False, assert_unicode=None,
+ unicode_error=None, _warn_on_bytestring=False)),
+ Column('instance_type_id', Integer(), nullable=False),
+ Column('image_ref',
+ String(length=255, convert_unicode=False, assert_unicode=None,
+ unicode_error=None, _warn_on_bytestring=False)),
+ Column('vc_count', Integer(), nullable=False),
+ Column('vol_count', Integer(), nullable=False),
+ Column('status',
+ String(length=255, convert_unicode=False, assert_unicode=None,
+ unicode_error=None, _warn_on_bytestring=False)),
+ )
+
+
+def upgrade(migrate_engine):
+ # Upgrade operations go here. Don't create your own engine;
+ # bind migrate_engine to your metadata
+ meta.bind = migrate_engine
+
+ try:
+ virtual_storage_arrays.create()
+ except Exception:
+ logging.info(repr(table))
+ logging.exception('Exception while creating table')
+ raise
+
+
+def downgrade(migrate_engine):
+ meta.bind = migrate_engine
+
+ virtual_storage_arrays.drop()
diff --git a/nova/db/sqlalchemy/migration.py b/nova/db/sqlalchemy/migration.py
index d9e303599..bb05986c9 100644
--- a/nova/db/sqlalchemy/migration.py
+++ b/nova/db/sqlalchemy/migration.py
@@ -64,7 +64,9 @@ def db_version():
'users', 'user_project_association',
'user_project_role_association',
'user_role_association',
- 'volumes'):
+ 'virtual_storage_arrays',
+ 'volumes', 'volume_metadata',
+ 'volume_types', 'volume_type_extra_specs'):
assert table in meta.tables
return db_version_control(1)
except AssertionError:
diff --git a/nova/db/sqlalchemy/models.py b/nova/db/sqlalchemy/models.py
index 11b147802..dc6f85aad 100644
--- a/nova/db/sqlalchemy/models.py
+++ b/nova/db/sqlalchemy/models.py
@@ -2,6 +2,7 @@
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
+# Copyright 2011 Piston Cloud Computing, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
@@ -230,6 +231,7 @@ class Instance(BASE, NovaBase):
uuid = Column(String(36))
root_device_name = Column(String(255))
+ config_drive = Column(String(255))
# User editable field meant to represent what ip should be used
# to connect to the instance
@@ -248,6 +250,32 @@ class Instance(BASE, NovaBase):
# 'shutdown', 'shutoff', 'crashed'])
+class VirtualStorageArray(BASE, NovaBase):
+ """
+ Represents a virtual storage array supplying block storage to instances.
+ """
+ __tablename__ = 'virtual_storage_arrays'
+
+ id = Column(Integer, primary_key=True, autoincrement=True)
+
+ @property
+ def name(self):
+ return FLAGS.vsa_name_template % self.id
+
+ # User editable field for display in user-facing UIs
+ display_name = Column(String(255))
+ display_description = Column(String(255))
+
+ project_id = Column(String(255))
+ availability_zone = Column(String(255))
+
+ instance_type_id = Column(Integer, ForeignKey('instance_types.id'))
+ image_ref = Column(String(255))
+ vc_count = Column(Integer, default=0) # number of requested VC instances
+ vol_count = Column(Integer, default=0) # total number of BE volumes
+ status = Column(String(255))
+
+
class InstanceActions(BASE, NovaBase):
"""Represents a guest VM's actions and results"""
__tablename__ = "instance_actions"
@@ -277,6 +305,12 @@ class InstanceTypes(BASE, NovaBase):
primaryjoin='and_(Instance.instance_type_id == '
'InstanceTypes.id)')
+ vsas = relationship(VirtualStorageArray,
+ backref=backref('vsa_instance_type', uselist=False),
+ foreign_keys=id,
+ primaryjoin='and_(VirtualStorageArray.instance_type_id'
+ ' == InstanceTypes.id)')
+
class Volume(BASE, NovaBase):
"""Represents a block storage device that can be attached to a vm."""
@@ -316,6 +350,50 @@ class Volume(BASE, NovaBase):
provider_location = Column(String(255))
provider_auth = Column(String(255))
+ volume_type_id = Column(Integer)
+
+
+class VolumeMetadata(BASE, NovaBase):
+ """Represents a metadata key/value pair for a volume"""
+ __tablename__ = 'volume_metadata'
+ id = Column(Integer, primary_key=True)
+ key = Column(String(255))
+ value = Column(String(255))
+ volume_id = Column(Integer, ForeignKey('volumes.id'), nullable=False)
+ volume = relationship(Volume, backref="volume_metadata",
+ foreign_keys=volume_id,
+ primaryjoin='and_('
+ 'VolumeMetadata.volume_id == Volume.id,'
+ 'VolumeMetadata.deleted == False)')
+
+
+class VolumeTypes(BASE, NovaBase):
+ """Represent possible volume_types of volumes offered"""
+ __tablename__ = "volume_types"
+ id = Column(Integer, primary_key=True)
+ name = Column(String(255), unique=True)
+
+ volumes = relationship(Volume,
+ backref=backref('volume_type', uselist=False),
+ foreign_keys=id,
+ primaryjoin='and_(Volume.volume_type_id == '
+ 'VolumeTypes.id)')
+
+
+class VolumeTypeExtraSpecs(BASE, NovaBase):
+ """Represents additional specs as key/value pairs for a volume_type"""
+ __tablename__ = 'volume_type_extra_specs'
+ id = Column(Integer, primary_key=True)
+ key = Column(String(255))
+ value = Column(String(255))
+ volume_type_id = Column(Integer, ForeignKey('volume_types.id'),
+ nullable=False)
+ volume_type = relationship(VolumeTypes, backref="extra_specs",
+ foreign_keys=volume_type_id,
+ primaryjoin='and_('
+ 'VolumeTypeExtraSpecs.volume_type_id == VolumeTypes.id,'
+ 'VolumeTypeExtraSpecs.deleted == False)')
+
class Quota(BASE, NovaBase):
"""Represents a single quota override for a project.
@@ -802,7 +880,9 @@ def register_models():
Network, SecurityGroup, SecurityGroupIngressRule,
SecurityGroupInstanceAssociation, AuthToken, User,
Project, Certificate, ConsolePool, Console, Zone,
- AgentBuild, InstanceMetadata, InstanceTypeExtraSpecs, Migration)
+ VolumeMetadata, VolumeTypes, VolumeTypeExtraSpecs,
+ AgentBuild, InstanceMetadata, InstanceTypeExtraSpecs, Migration,
+ VirtualStorageArray)
engine = create_engine(FLAGS.sql_connection, echo=False)
for model in models:
model.metadata.create_all(engine)
diff --git a/nova/exception.py b/nova/exception.py
index 8d6e84d74..5a365897d 100644
--- a/nova/exception.py
+++ b/nova/exception.py
@@ -197,6 +197,10 @@ class InvalidInstanceType(Invalid):
message = _("Invalid instance type %(instance_type)s.")
+class InvalidVolumeType(Invalid):
+ message = _("Invalid volume type %(volume_type)s.")
+
+
class InvalidPortRange(Invalid):
message = _("Invalid port range %(from_port)s:%(to_port)s.")
@@ -338,6 +342,29 @@ class VolumeNotFoundForInstance(VolumeNotFound):
message = _("Volume not found for instance %(instance_id)s.")
+class VolumeMetadataNotFound(NotFound):
+ message = _("Volume %(volume_id)s has no metadata with "
+ "key %(metadata_key)s.")
+
+
+class NoVolumeTypesFound(NotFound):
+ message = _("Zero volume types found.")
+
+
+class VolumeTypeNotFound(NotFound):
+ message = _("Volume type %(volume_type_id)s could not be found.")
+
+
+class VolumeTypeNotFoundByName(VolumeTypeNotFound):
+ message = _("Volume type with name %(volume_type_name)s "
+ "could not be found.")
+
+
+class VolumeTypeExtraSpecsNotFound(NotFound):
+ message = _("Volume Type %(volume_type_id)s has no extra specs with "
+ "key %(extra_specs_key)s.")
+
+
class SnapshotNotFound(NotFound):
message = _("Snapshot %(snapshot_id)s could not be found.")
@@ -760,6 +787,18 @@ class PasteAppNotFound(NotFound):
message = _("Could not load paste app '%(name)s' from %(path)s")
+class VSANovaAccessParamNotFound(Invalid):
+ message = _("Nova access parameters were not specified.")
+
+
+class VirtualStorageArrayNotFound(NotFound):
+ message = _("Virtual Storage Array %(id)d could not be found.")
+
+
+class VirtualStorageArrayNotFoundByName(NotFound):
+ message = _("Virtual Storage Array %(name)s could not be found.")
+
+
class CannotResizeToSameSize(NovaException):
message = _("When resizing, instances must change size!")
diff --git a/nova/flags.py b/nova/flags.py
index 48d5e8168..a5951ebc8 100644
--- a/nova/flags.py
+++ b/nova/flags.py
@@ -292,6 +292,7 @@ DEFINE_string('ajax_console_proxy_url',
in the form "http://127.0.0.1:8000"')
DEFINE_string('ajax_console_proxy_port',
8000, 'port that ajax_console_proxy binds')
+DEFINE_string('vsa_topic', 'vsa', 'the topic that nova-vsa service listens on')
DEFINE_bool('verbose', False, 'show debug output')
DEFINE_boolean('fake_rabbit', False, 'use a fake rabbit')
DEFINE_bool('fake_network', False,
@@ -371,6 +372,17 @@ DEFINE_string('volume_manager', 'nova.volume.manager.VolumeManager',
'Manager for volume')
DEFINE_string('scheduler_manager', 'nova.scheduler.manager.SchedulerManager',
'Manager for scheduler')
+DEFINE_string('vsa_manager', 'nova.vsa.manager.VsaManager',
+ 'Manager for vsa')
+DEFINE_string('vc_image_name', 'vc_image',
+ 'the VC image ID (for a VC image that exists in DB Glance)')
+# VSA constants and enums
+DEFINE_string('default_vsa_instance_type', 'm1.small',
+ 'default instance type for VSA instances')
+DEFINE_integer('max_vcs_in_vsa', 32,
+ 'maxinum VCs in a VSA')
+DEFINE_integer('vsa_part_size_gb', 100,
+ 'default partition size for shared capacity')
# The service to use for image search and retrieval
DEFINE_string('image_service', 'nova.image.glance.GlanceImageService',
@@ -402,3 +414,14 @@ DEFINE_bool('resume_guests_state_on_host_boot', False,
DEFINE_string('root_helper', 'sudo',
'Command prefix to use for running commands as root')
+
+DEFINE_bool('use_ipv6', False, 'use ipv6')
+
+DEFINE_bool('monkey_patch', False,
+ 'Whether to log monkey patching')
+
+DEFINE_list('monkey_patch_modules',
+ ['nova.api.ec2.cloud:nova.notifier.api.notify_decorator',
+ 'nova.compute.api:nova.notifier.api.notify_decorator'],
+ 'Module list representing monkey '
+ 'patched module and decorator')
diff --git a/nova/ipv6/account_identifier.py b/nova/ipv6/account_identifier.py
index 258678f0a..8a08510ac 100644
--- a/nova/ipv6/account_identifier.py
+++ b/nova/ipv6/account_identifier.py
@@ -34,8 +34,13 @@ def to_global(prefix, mac, project_id):
mac_addr = netaddr.IPAddress(int_addr)
maskIP = netaddr.IPNetwork(prefix).ip
return (project_hash ^ static_num ^ mac_addr | maskIP).format()
- except TypeError:
+ except netaddr.AddrFormatError:
raise TypeError(_('Bad mac for to_global_ipv6: %s') % mac)
+ except TypeError:
+ raise TypeError(_('Bad prefix for to_global_ipv6: %s') % prefix)
+ except NameError:
+ raise TypeError(_('Bad project_id for to_global_ipv6: %s') %
+ project_id)
def to_mac(ipv6_address):
diff --git a/nova/ipv6/rfc2462.py b/nova/ipv6/rfc2462.py
index 0074efe98..acf42d201 100644
--- a/nova/ipv6/rfc2462.py
+++ b/nova/ipv6/rfc2462.py
@@ -30,8 +30,10 @@ def to_global(prefix, mac, project_id):
maskIP = netaddr.IPNetwork(prefix).ip
return (mac64_addr ^ netaddr.IPAddress('::0200:0:0:0') | maskIP).\
format()
- except TypeError:
+ except netaddr.AddrFormatError:
raise TypeError(_('Bad mac for to_global_ipv6: %s') % mac)
+ except TypeError:
+ raise TypeError(_('Bad prefix for to_global_ipv6: %s') % prefix)
def to_mac(ipv6_address):
diff --git a/nova/log.py b/nova/log.py
index 222b8c5fb..eb0b6020f 100644
--- a/nova/log.py
+++ b/nova/log.py
@@ -32,6 +32,7 @@ import json
import logging
import logging.handlers
import os
+import stat
import sys
import traceback
@@ -257,7 +258,10 @@ class NovaRootLogger(NovaLogger):
self.filelog = WatchedFileHandler(logpath)
self.addHandler(self.filelog)
self.logpath = logpath
- os.chmod(self.logpath, FLAGS.logfile_mode)
+
+ st = os.stat(self.logpath)
+ if st.st_mode != (stat.S_IFREG | FLAGS.logfile_mode):
+ os.chmod(self.logpath, FLAGS.logfile_mode)
else:
self.removeHandler(self.filelog)
self.addHandler(self.streamlog)
diff --git a/nova/network/manager.py b/nova/network/manager.py
index b778377a0..d1883ff8d 100644
--- a/nova/network/manager.py
+++ b/nova/network/manager.py
@@ -106,8 +106,6 @@ flags.DEFINE_integer('create_unique_mac_address_attempts', 5,
'Number of attempts to create unique mac address')
flags.DEFINE_bool('auto_assign_floating_ip', False,
'Autoassigning floating ip to VM')
-flags.DEFINE_bool('use_ipv6', False,
- 'use the ipv6')
flags.DEFINE_string('network_host', socket.gethostname(),
'Network host to use for ip allocation in flat modes')
flags.DEFINE_bool('fake_call', False,
@@ -486,17 +484,17 @@ class NetworkManager(manager.SchedulerDependentManager):
# TODO(tr3buchet) eventually "enabled" should be determined
def ip_dict(ip):
return {
- "ip": ip,
- "netmask": network["netmask"],
- "enabled": "1"}
+ 'ip': ip,
+ 'netmask': network['netmask'],
+ 'enabled': '1'}
def ip6_dict():
return {
- "ip": ipv6.to_global(network['cidr_v6'],
+ 'ip': ipv6.to_global(network['cidr_v6'],
vif['address'],
network['project_id']),
- "netmask": network['netmask_v6'],
- "enabled": "1"}
+ 'netmask': network['netmask_v6'],
+ 'enabled': '1'}
network_dict = {
'bridge': network['bridge'],
'id': network['id'],
diff --git a/nova/notifier/api.py b/nova/notifier/api.py
index e18f3e280..6ef4a050e 100644
--- a/nova/notifier/api.py
+++ b/nova/notifier/api.py
@@ -25,6 +25,9 @@ FLAGS = flags.FLAGS
flags.DEFINE_string('default_notification_level', 'INFO',
'Default notification level for outgoing notifications')
+flags.DEFINE_string('default_publisher_id', FLAGS.host,
+ 'Default publisher_id for outgoing notifications')
+
WARN = 'WARN'
INFO = 'INFO'
@@ -39,6 +42,30 @@ class BadPriorityException(Exception):
pass
+def notify_decorator(name, fn):
+ """ decorator for notify which is used from utils.monkey_patch()
+
+ :param name: name of the function
+ :param function: - object of the function
+ :returns: function -- decorated function
+
+ """
+ def wrapped_func(*args, **kwarg):
+ body = {}
+ body['args'] = []
+ body['kwarg'] = {}
+ for arg in args:
+ body['args'].append(arg)
+ for key in kwarg:
+ body['kwarg'][key] = kwarg[key]
+ notify(FLAGS.default_publisher_id,
+ name,
+ FLAGS.default_notification_level,
+ body)
+ return fn(*args, **kwarg)
+ return wrapped_func
+
+
def publisher_id(service, host=None):
if not host:
host = FLAGS.host
diff --git a/nova/notifier/list_notifier.py b/nova/notifier/list_notifier.py
new file mode 100644
index 000000000..955ae1b57
--- /dev/null
+++ b/nova/notifier/list_notifier.py
@@ -0,0 +1,68 @@
+# Copyright 2011 OpenStack LLC.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from nova import flags
+from nova import log as logging
+from nova import utils
+from nova.exception import ClassNotFound
+
+flags.DEFINE_multistring('list_notifier_drivers',
+ ['nova.notifier.no_op_notifier'],
+ 'List of drivers to send notifications')
+
+FLAGS = flags.FLAGS
+
+LOG = logging.getLogger('nova.notifier.list_notifier')
+
+drivers = None
+
+
+class ImportFailureNotifier(object):
+ """Noisily re-raises some exception over-and-over when notify is called."""
+
+ def __init__(self, exception):
+ self.exception = exception
+
+ def notify(self, message):
+ raise self.exception
+
+
+def _get_drivers():
+ """Instantiates and returns drivers based on the flag values."""
+ global drivers
+ if not drivers:
+ drivers = []
+ for notification_driver in FLAGS.list_notifier_drivers:
+ try:
+ drivers.append(utils.import_object(notification_driver))
+ except ClassNotFound as e:
+ drivers.append(ImportFailureNotifier(e))
+ return drivers
+
+
+def notify(message):
+ """Passes notification to mulitple notifiers in a list."""
+ for driver in _get_drivers():
+ try:
+ driver.notify(message)
+ except Exception as e:
+ LOG.exception(_("Problem '%(e)s' attempting to send to "
+ "notification driver %(driver)s." % locals()))
+
+
+def _reset_drivers():
+ """Used by unit tests to reset the drivers."""
+ global drivers
+ drivers = None
diff --git a/nova/quota.py b/nova/quota.py
index 48e598659..771477747 100644
--- a/nova/quota.py
+++ b/nova/quota.py
@@ -116,8 +116,9 @@ def allowed_volumes(context, requested_volumes, size):
allowed_gigabytes = _get_request_allotment(requested_gigabytes,
used_gigabytes,
quota['gigabytes'])
- allowed_volumes = min(allowed_volumes,
- int(allowed_gigabytes // size))
+ if size != 0:
+ allowed_volumes = min(allowed_volumes,
+ int(allowed_gigabytes // size))
return min(requested_volumes, allowed_volumes)
diff --git a/nova/scheduler/vsa.py b/nova/scheduler/vsa.py
new file mode 100644
index 000000000..6962dd86b
--- /dev/null
+++ b/nova/scheduler/vsa.py
@@ -0,0 +1,535 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright (c) 2011 Zadara Storage Inc.
+# Copyright (c) 2011 OpenStack LLC.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""
+VSA Simple Scheduler
+"""
+
+from nova import context
+from nova import db
+from nova import flags
+from nova import log as logging
+from nova import rpc
+from nova import utils
+from nova.scheduler import driver
+from nova.scheduler import simple
+from nova.vsa.api import VsaState
+from nova.volume import volume_types
+
+LOG = logging.getLogger('nova.scheduler.vsa')
+
+FLAGS = flags.FLAGS
+flags.DEFINE_integer('drive_type_approx_capacity_percent', 10,
+ 'The percentage range for capacity comparison')
+flags.DEFINE_integer('vsa_unique_hosts_per_alloc', 10,
+ 'The number of unique hosts per storage allocation')
+flags.DEFINE_boolean('vsa_select_unique_drives', True,
+ 'Allow selection of same host for multiple drives')
+
+
+def BYTES_TO_GB(bytes):
+ return bytes >> 30
+
+
+def GB_TO_BYTES(gb):
+ return gb << 30
+
+
+class VsaScheduler(simple.SimpleScheduler):
+ """Implements Scheduler for volume placement."""
+
+ def __init__(self, *args, **kwargs):
+ super(VsaScheduler, self).__init__(*args, **kwargs)
+ self._notify_all_volume_hosts("startup")
+
+ def _notify_all_volume_hosts(self, event):
+ rpc.fanout_cast(context.get_admin_context(),
+ FLAGS.volume_topic,
+ {"method": "notification",
+ "args": {"event": event}})
+
+ def _qosgrp_match(self, drive_type, qos_values):
+
+ def _compare_names(str1, str2):
+ return str1.lower() == str2.lower()
+
+ def _compare_sizes_approxim(cap_capacity, size):
+ cap_capacity = BYTES_TO_GB(int(cap_capacity))
+ size = int(size)
+ size_perc = size * \
+ FLAGS.drive_type_approx_capacity_percent / 100
+
+ return cap_capacity >= size - size_perc and \
+ cap_capacity <= size + size_perc
+
+ # Add more entries for additional comparisons
+ compare_list = [{'cap1': 'DriveType',
+ 'cap2': 'type',
+ 'cmp_func': _compare_names},
+ {'cap1': 'DriveCapacity',
+ 'cap2': 'size',
+ 'cmp_func': _compare_sizes_approxim}]
+
+ for cap in compare_list:
+ if cap['cap1'] in qos_values.keys() and \
+ cap['cap2'] in drive_type.keys() and \
+ cap['cmp_func'] is not None and \
+ cap['cmp_func'](qos_values[cap['cap1']],
+ drive_type[cap['cap2']]):
+ pass
+ else:
+ return False
+ return True
+
+ def _get_service_states(self):
+ return self.zone_manager.service_states
+
+ def _filter_hosts(self, topic, request_spec, host_list=None):
+
+ LOG.debug(_("_filter_hosts: %(request_spec)s"), locals())
+
+ drive_type = request_spec['drive_type']
+ LOG.debug(_("Filter hosts for drive type %s"), drive_type['name'])
+
+ if host_list is None:
+ host_list = self._get_service_states().iteritems()
+
+ filtered_hosts = [] # returns list of (hostname, capability_dict)
+ for host, host_dict in host_list:
+ for service_name, service_dict in host_dict.iteritems():
+ if service_name != topic:
+ continue
+
+ gos_info = service_dict.get('drive_qos_info', {})
+ for qosgrp, qos_values in gos_info.iteritems():
+ if self._qosgrp_match(drive_type, qos_values):
+ if qos_values['AvailableCapacity'] > 0:
+ filtered_hosts.append((host, gos_info))
+ else:
+ LOG.debug(_("Host %s has no free capacity. Skip"),
+ host)
+ break
+
+ host_names = [item[0] for item in filtered_hosts]
+ LOG.debug(_("Filter hosts: %s"), host_names)
+ return filtered_hosts
+
+ def _allowed_to_use_host(self, host, selected_hosts, unique):
+ if unique == False or \
+ host not in [item[0] for item in selected_hosts]:
+ return True
+ else:
+ return False
+
+ def _add_hostcap_to_list(self, selected_hosts, host, cap):
+ if host not in [item[0] for item in selected_hosts]:
+ selected_hosts.append((host, cap))
+
+ def host_selection_algorithm(self, request_spec, all_hosts,
+ selected_hosts, unique):
+ """Must override this method for VSA scheduler to work."""
+ raise NotImplementedError(_("Must implement host selection mechanism"))
+
+ def _select_hosts(self, request_spec, all_hosts, selected_hosts=None):
+
+ if selected_hosts is None:
+ selected_hosts = []
+
+ host = None
+ if len(selected_hosts) >= FLAGS.vsa_unique_hosts_per_alloc:
+ # try to select from already selected hosts only
+ LOG.debug(_("Maximum number of hosts selected (%d)"),
+ len(selected_hosts))
+ unique = False
+ (host, qos_cap) = self.host_selection_algorithm(request_spec,
+ selected_hosts,
+ selected_hosts,
+ unique)
+
+ LOG.debug(_("Selected excessive host %(host)s"), locals())
+ else:
+ unique = FLAGS.vsa_select_unique_drives
+
+ if host is None:
+ # if we've not tried yet (# of sel hosts < max) - unique=True
+ # or failed to select from selected_hosts - unique=False
+ # select from all hosts
+ (host, qos_cap) = self.host_selection_algorithm(request_spec,
+ all_hosts,
+ selected_hosts,
+ unique)
+ if host is None:
+ raise driver.WillNotSchedule(_("No available hosts"))
+
+ return (host, qos_cap)
+
+ def _provision_volume(self, context, vol, vsa_id, availability_zone):
+
+ if availability_zone is None:
+ availability_zone = FLAGS.storage_availability_zone
+
+ now = utils.utcnow()
+ options = {
+ 'size': vol['size'],
+ 'user_id': context.user_id,
+ 'project_id': context.project_id,
+ 'snapshot_id': None,
+ 'availability_zone': availability_zone,
+ 'status': "creating",
+ 'attach_status': "detached",
+ 'display_name': vol['name'],
+ 'display_description': vol['description'],
+ 'volume_type_id': vol['volume_type_id'],
+ 'metadata': dict(to_vsa_id=vsa_id),
+ 'host': vol['host'],
+ 'scheduled_at': now
+ }
+
+ size = vol['size']
+ host = vol['host']
+ name = vol['name']
+ LOG.debug(_("Provision volume %(name)s of size %(size)s GB on "\
+ "host %(host)s"), locals())
+
+ volume_ref = db.volume_create(context, options)
+ rpc.cast(context,
+ db.queue_get_for(context, "volume", vol['host']),
+ {"method": "create_volume",
+ "args": {"volume_id": volume_ref['id'],
+ "snapshot_id": None}})
+
+ def _check_host_enforcement(self, context, availability_zone):
+ if (availability_zone
+ and ':' in availability_zone
+ and context.is_admin):
+ zone, _x, host = availability_zone.partition(':')
+ service = db.service_get_by_args(context.elevated(), host,
+ 'nova-volume')
+ if not self.service_is_up(service):
+ raise driver.WillNotSchedule(_("Host %s not available") % host)
+
+ return host
+ else:
+ return None
+
+ def _assign_hosts_to_volumes(self, context, volume_params, forced_host):
+
+ prev_volume_type_id = None
+ request_spec = {}
+ selected_hosts = []
+
+ LOG.debug(_("volume_params %(volume_params)s") % locals())
+
+ i = 1
+ for vol in volume_params:
+ name = vol['name']
+ LOG.debug(_("%(i)d: Volume %(name)s"), locals())
+ i += 1
+
+ if forced_host:
+ vol['host'] = forced_host
+ vol['capabilities'] = None
+ continue
+
+ volume_type_id = vol['volume_type_id']
+ request_spec['size'] = vol['size']
+
+ if prev_volume_type_id is None or\
+ prev_volume_type_id != volume_type_id:
+ # generate list of hosts for this drive type
+
+ volume_type = volume_types.get_volume_type(context,
+ volume_type_id)
+ drive_type = {
+ 'name': volume_type['extra_specs'].get('drive_name'),
+ 'type': volume_type['extra_specs'].get('drive_type'),
+ 'size': int(volume_type['extra_specs'].get('drive_size')),
+ 'rpm': volume_type['extra_specs'].get('drive_rpm'),
+ }
+ request_spec['drive_type'] = drive_type
+
+ all_hosts = self._filter_hosts("volume", request_spec)
+ prev_volume_type_id = volume_type_id
+
+ (host, qos_cap) = self._select_hosts(request_spec,
+ all_hosts, selected_hosts)
+ vol['host'] = host
+ vol['capabilities'] = qos_cap
+ self._consume_resource(qos_cap, vol['size'], -1)
+
+ def schedule_create_volumes(self, context, request_spec,
+ availability_zone=None, *_args, **_kwargs):
+ """Picks hosts for hosting multiple volumes."""
+
+ num_volumes = request_spec.get('num_volumes')
+ LOG.debug(_("Attempting to spawn %(num_volumes)d volume(s)") %
+ locals())
+
+ vsa_id = request_spec.get('vsa_id')
+ volume_params = request_spec.get('volumes')
+
+ host = self._check_host_enforcement(context, availability_zone)
+
+ try:
+ self._print_capabilities_info()
+
+ self._assign_hosts_to_volumes(context, volume_params, host)
+
+ for vol in volume_params:
+ self._provision_volume(context, vol, vsa_id, availability_zone)
+ except:
+ if vsa_id:
+ db.vsa_update(context, vsa_id, dict(status=VsaState.FAILED))
+
+ for vol in volume_params:
+ if 'capabilities' in vol:
+ self._consume_resource(vol['capabilities'],
+ vol['size'], 1)
+ raise
+
+ return None
+
+ def schedule_create_volume(self, context, volume_id, *_args, **_kwargs):
+ """Picks the best host based on requested drive type capability."""
+ volume_ref = db.volume_get(context, volume_id)
+
+ host = self._check_host_enforcement(context,
+ volume_ref['availability_zone'])
+ if host:
+ now = utils.utcnow()
+ db.volume_update(context, volume_id, {'host': host,
+ 'scheduled_at': now})
+ return host
+
+ volume_type_id = volume_ref['volume_type_id']
+ if volume_type_id:
+ volume_type = volume_types.get_volume_type(context, volume_type_id)
+
+ if volume_type_id is None or\
+ volume_types.is_vsa_volume(volume_type_id, volume_type):
+
+ LOG.debug(_("Non-VSA volume %d"), volume_ref['id'])
+ return super(VsaScheduler, self).schedule_create_volume(context,
+ volume_id, *_args, **_kwargs)
+
+ self._print_capabilities_info()
+
+ drive_type = {
+ 'name': volume_type['extra_specs'].get('drive_name'),
+ 'type': volume_type['extra_specs'].get('drive_type'),
+ 'size': int(volume_type['extra_specs'].get('drive_size')),
+ 'rpm': volume_type['extra_specs'].get('drive_rpm'),
+ }
+
+ LOG.debug(_("Spawning volume %(volume_id)s with drive type "\
+ "%(drive_type)s"), locals())
+
+ request_spec = {'size': volume_ref['size'],
+ 'drive_type': drive_type}
+ hosts = self._filter_hosts("volume", request_spec)
+
+ try:
+ (host, qos_cap) = self._select_hosts(request_spec, all_hosts=hosts)
+ except:
+ if volume_ref['to_vsa_id']:
+ db.vsa_update(context, volume_ref['to_vsa_id'],
+ dict(status=VsaState.FAILED))
+ raise
+
+ if host:
+ now = utils.utcnow()
+ db.volume_update(context, volume_id, {'host': host,
+ 'scheduled_at': now})
+ self._consume_resource(qos_cap, volume_ref['size'], -1)
+ return host
+
+ def _consume_full_drive(self, qos_values, direction):
+ qos_values['FullDrive']['NumFreeDrives'] += direction
+ qos_values['FullDrive']['NumOccupiedDrives'] -= direction
+
+ def _consume_partition(self, qos_values, size, direction):
+
+ if qos_values['PartitionDrive']['PartitionSize'] != 0:
+ partition_size = qos_values['PartitionDrive']['PartitionSize']
+ else:
+ partition_size = size
+ part_per_drive = qos_values['DriveCapacity'] / partition_size
+
+ if direction == -1 and \
+ qos_values['PartitionDrive']['NumFreePartitions'] == 0:
+
+ self._consume_full_drive(qos_values, direction)
+ qos_values['PartitionDrive']['NumFreePartitions'] += \
+ part_per_drive
+
+ qos_values['PartitionDrive']['NumFreePartitions'] += direction
+ qos_values['PartitionDrive']['NumOccupiedPartitions'] -= direction
+
+ if direction == 1 and \
+ qos_values['PartitionDrive']['NumFreePartitions'] >= \
+ part_per_drive:
+
+ self._consume_full_drive(qos_values, direction)
+ qos_values['PartitionDrive']['NumFreePartitions'] -= \
+ part_per_drive
+
+ def _consume_resource(self, qos_values, size, direction):
+ if qos_values is None:
+ LOG.debug(_("No capability selected for volume of size %(size)s"),
+ locals())
+ return
+
+ if size == 0: # full drive match
+ qos_values['AvailableCapacity'] += direction * \
+ qos_values['DriveCapacity']
+ self._consume_full_drive(qos_values, direction)
+ else:
+ qos_values['AvailableCapacity'] += direction * GB_TO_BYTES(size)
+ self._consume_partition(qos_values, GB_TO_BYTES(size), direction)
+ return
+
+ def _print_capabilities_info(self):
+ host_list = self._get_service_states().iteritems()
+ for host, host_dict in host_list:
+ for service_name, service_dict in host_dict.iteritems():
+ if service_name != "volume":
+ continue
+
+ LOG.info(_("Host %s:"), host)
+
+ gos_info = service_dict.get('drive_qos_info', {})
+ for qosgrp, qos_values in gos_info.iteritems():
+ total = qos_values['TotalDrives']
+ used = qos_values['FullDrive']['NumOccupiedDrives']
+ free = qos_values['FullDrive']['NumFreeDrives']
+ avail = BYTES_TO_GB(qos_values['AvailableCapacity'])
+
+ LOG.info(_("\tDrive %(qosgrp)-25s: total %(total)2s, "\
+ "used %(used)2s, free %(free)2s. Available "\
+ "capacity %(avail)-5s"), locals())
+
+
+class VsaSchedulerLeastUsedHost(VsaScheduler):
+ """
+ Implements VSA scheduler to select the host with least used capacity
+ of particular type.
+ """
+
+ def __init__(self, *args, **kwargs):
+ super(VsaSchedulerLeastUsedHost, self).__init__(*args, **kwargs)
+
+ def host_selection_algorithm(self, request_spec, all_hosts,
+ selected_hosts, unique):
+ size = request_spec['size']
+ drive_type = request_spec['drive_type']
+ best_host = None
+ best_qoscap = None
+ best_cap = None
+ min_used = 0
+
+ for (host, capabilities) in all_hosts:
+
+ has_enough_capacity = False
+ used_capacity = 0
+ for qosgrp, qos_values in capabilities.iteritems():
+
+ used_capacity = used_capacity + qos_values['TotalCapacity'] \
+ - qos_values['AvailableCapacity']
+
+ if self._qosgrp_match(drive_type, qos_values):
+ # we found required qosgroup
+
+ if size == 0: # full drive match
+ if qos_values['FullDrive']['NumFreeDrives'] > 0:
+ has_enough_capacity = True
+ matched_qos = qos_values
+ else:
+ break
+ else:
+ if qos_values['AvailableCapacity'] >= size and \
+ (qos_values['PartitionDrive'][
+ 'NumFreePartitions'] > 0 or \
+ qos_values['FullDrive']['NumFreeDrives'] > 0):
+ has_enough_capacity = True
+ matched_qos = qos_values
+ else:
+ break
+
+ if has_enough_capacity and \
+ self._allowed_to_use_host(host,
+ selected_hosts,
+ unique) and \
+ (best_host is None or used_capacity < min_used):
+
+ min_used = used_capacity
+ best_host = host
+ best_qoscap = matched_qos
+ best_cap = capabilities
+
+ if best_host:
+ self._add_hostcap_to_list(selected_hosts, best_host, best_cap)
+ min_used = BYTES_TO_GB(min_used)
+ LOG.debug(_("\t LeastUsedHost: Best host: %(best_host)s. "\
+ "(used capacity %(min_used)s)"), locals())
+ return (best_host, best_qoscap)
+
+
+class VsaSchedulerMostAvailCapacity(VsaScheduler):
+ """
+ Implements VSA scheduler to select the host with most available capacity
+ of one particular type.
+ """
+
+ def __init__(self, *args, **kwargs):
+ super(VsaSchedulerMostAvailCapacity, self).__init__(*args, **kwargs)
+
+ def host_selection_algorithm(self, request_spec, all_hosts,
+ selected_hosts, unique):
+ size = request_spec['size']
+ drive_type = request_spec['drive_type']
+ best_host = None
+ best_qoscap = None
+ best_cap = None
+ max_avail = 0
+
+ for (host, capabilities) in all_hosts:
+ for qosgrp, qos_values in capabilities.iteritems():
+ if self._qosgrp_match(drive_type, qos_values):
+ # we found required qosgroup
+
+ if size == 0: # full drive match
+ available = qos_values['FullDrive']['NumFreeDrives']
+ else:
+ available = qos_values['AvailableCapacity']
+
+ if available > max_avail and \
+ self._allowed_to_use_host(host,
+ selected_hosts,
+ unique):
+ max_avail = available
+ best_host = host
+ best_qoscap = qos_values
+ best_cap = capabilities
+ break # go to the next host
+
+ if best_host:
+ self._add_hostcap_to_list(selected_hosts, best_host, best_cap)
+ type_str = "drives" if size == 0 else "bytes"
+ LOG.debug(_("\t MostAvailCap: Best host: %(best_host)s. "\
+ "(available %(max_avail)s %(type_str)s)"), locals())
+
+ return (best_host, best_qoscap)
diff --git a/nova/tests/api/openstack/contrib/test_vsa.py b/nova/tests/api/openstack/contrib/test_vsa.py
new file mode 100644
index 000000000..311b6cb8d
--- /dev/null
+++ b/nova/tests/api/openstack/contrib/test_vsa.py
@@ -0,0 +1,450 @@
+# Copyright 2011 OpenStack LLC.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import json
+import stubout
+import unittest
+import webob
+
+from nova import context
+from nova import db
+from nova import exception
+from nova import flags
+from nova import log as logging
+from nova import test
+from nova import volume
+from nova import vsa
+from nova.api import openstack
+from nova.tests.api.openstack import fakes
+import nova.wsgi
+
+from nova.api.openstack.contrib.virtual_storage_arrays import _vsa_view
+
+FLAGS = flags.FLAGS
+
+LOG = logging.getLogger('nova.tests.api.openstack.vsa')
+
+last_param = {}
+
+
+def _get_default_vsa_param():
+ return {
+ 'display_name': 'Test_VSA_name',
+ 'display_description': 'Test_VSA_description',
+ 'vc_count': 1,
+ 'instance_type': 'm1.small',
+ 'instance_type_id': 5,
+ 'image_name': None,
+ 'availability_zone': None,
+ 'storage': [],
+ 'shared': False
+ }
+
+
+def stub_vsa_create(self, context, **param):
+ global last_param
+ LOG.debug(_("_create: param=%s"), param)
+ param['id'] = 123
+ param['name'] = 'Test name'
+ param['instance_type_id'] = 5
+ last_param = param
+ return param
+
+
+def stub_vsa_delete(self, context, vsa_id):
+ global last_param
+ last_param = dict(vsa_id=vsa_id)
+
+ LOG.debug(_("_delete: %s"), locals())
+ if vsa_id != '123':
+ raise exception.NotFound
+
+
+def stub_vsa_get(self, context, vsa_id):
+ global last_param
+ last_param = dict(vsa_id=vsa_id)
+
+ LOG.debug(_("_get: %s"), locals())
+ if vsa_id != '123':
+ raise exception.NotFound
+
+ param = _get_default_vsa_param()
+ param['id'] = vsa_id
+ return param
+
+
+def stub_vsa_get_all(self, context):
+ LOG.debug(_("_get_all: %s"), locals())
+ param = _get_default_vsa_param()
+ param['id'] = 123
+ return [param]
+
+
+class VSAApiTest(test.TestCase):
+ def setUp(self):
+ super(VSAApiTest, self).setUp()
+ self.stubs = stubout.StubOutForTesting()
+ fakes.FakeAuthManager.reset_fake_data()
+ fakes.FakeAuthDatabase.data = {}
+ fakes.stub_out_networking(self.stubs)
+ fakes.stub_out_rate_limiting(self.stubs)
+ fakes.stub_out_auth(self.stubs)
+ self.stubs.Set(vsa.api.API, "create", stub_vsa_create)
+ self.stubs.Set(vsa.api.API, "delete", stub_vsa_delete)
+ self.stubs.Set(vsa.api.API, "get", stub_vsa_get)
+ self.stubs.Set(vsa.api.API, "get_all", stub_vsa_get_all)
+
+ self.context = context.get_admin_context()
+
+ def tearDown(self):
+ self.stubs.UnsetAll()
+ super(VSAApiTest, self).tearDown()
+
+ def test_vsa_create(self):
+ global last_param
+ last_param = {}
+
+ vsa = {"displayName": "VSA Test Name",
+ "displayDescription": "VSA Test Desc"}
+ body = dict(vsa=vsa)
+ req = webob.Request.blank('/v1.1/777/zadr-vsa')
+ req.method = 'POST'
+ req.body = json.dumps(body)
+ req.headers['content-type'] = 'application/json'
+
+ resp = req.get_response(fakes.wsgi_app())
+ self.assertEqual(resp.status_int, 200)
+
+ # Compare if parameters were correctly passed to stub
+ self.assertEqual(last_param['display_name'], "VSA Test Name")
+ self.assertEqual(last_param['display_description'], "VSA Test Desc")
+
+ resp_dict = json.loads(resp.body)
+ self.assertTrue('vsa' in resp_dict)
+ self.assertEqual(resp_dict['vsa']['displayName'], vsa['displayName'])
+ self.assertEqual(resp_dict['vsa']['displayDescription'],
+ vsa['displayDescription'])
+
+ def test_vsa_create_no_body(self):
+ req = webob.Request.blank('/v1.1/777/zadr-vsa')
+ req.method = 'POST'
+ req.body = json.dumps({})
+ req.headers['content-type'] = 'application/json'
+
+ resp = req.get_response(fakes.wsgi_app())
+ self.assertEqual(resp.status_int, 422)
+
+ def test_vsa_delete(self):
+ global last_param
+ last_param = {}
+
+ vsa_id = 123
+ req = webob.Request.blank('/v1.1/777/zadr-vsa/%d' % vsa_id)
+ req.method = 'DELETE'
+
+ resp = req.get_response(fakes.wsgi_app())
+ self.assertEqual(resp.status_int, 200)
+ self.assertEqual(str(last_param['vsa_id']), str(vsa_id))
+
+ def test_vsa_delete_invalid_id(self):
+ global last_param
+ last_param = {}
+
+ vsa_id = 234
+ req = webob.Request.blank('/v1.1/777/zadr-vsa/%d' % vsa_id)
+ req.method = 'DELETE'
+
+ resp = req.get_response(fakes.wsgi_app())
+ self.assertEqual(resp.status_int, 404)
+ self.assertEqual(str(last_param['vsa_id']), str(vsa_id))
+
+ def test_vsa_show(self):
+ global last_param
+ last_param = {}
+
+ vsa_id = 123
+ req = webob.Request.blank('/v1.1/777/zadr-vsa/%d' % vsa_id)
+ req.method = 'GET'
+ resp = req.get_response(fakes.wsgi_app())
+ self.assertEqual(resp.status_int, 200)
+ self.assertEqual(str(last_param['vsa_id']), str(vsa_id))
+
+ resp_dict = json.loads(resp.body)
+ self.assertTrue('vsa' in resp_dict)
+ self.assertEqual(resp_dict['vsa']['id'], str(vsa_id))
+
+ def test_vsa_show_invalid_id(self):
+ global last_param
+ last_param = {}
+
+ vsa_id = 234
+ req = webob.Request.blank('/v1.1/777/zadr-vsa/%d' % vsa_id)
+ req.method = 'GET'
+ resp = req.get_response(fakes.wsgi_app())
+ self.assertEqual(resp.status_int, 404)
+ self.assertEqual(str(last_param['vsa_id']), str(vsa_id))
+
+ def test_vsa_index(self):
+ req = webob.Request.blank('/v1.1/777/zadr-vsa')
+ req.method = 'GET'
+ resp = req.get_response(fakes.wsgi_app())
+ self.assertEqual(resp.status_int, 200)
+
+ resp_dict = json.loads(resp.body)
+
+ self.assertTrue('vsaSet' in resp_dict)
+ resp_vsas = resp_dict['vsaSet']
+ self.assertEqual(len(resp_vsas), 1)
+
+ resp_vsa = resp_vsas.pop()
+ self.assertEqual(resp_vsa['id'], 123)
+
+ def test_vsa_detail(self):
+ req = webob.Request.blank('/v1.1/777/zadr-vsa/detail')
+ req.method = 'GET'
+ resp = req.get_response(fakes.wsgi_app())
+ self.assertEqual(resp.status_int, 200)
+
+ resp_dict = json.loads(resp.body)
+
+ self.assertTrue('vsaSet' in resp_dict)
+ resp_vsas = resp_dict['vsaSet']
+ self.assertEqual(len(resp_vsas), 1)
+
+ resp_vsa = resp_vsas.pop()
+ self.assertEqual(resp_vsa['id'], 123)
+
+
+def _get_default_volume_param():
+ return {
+ 'id': 123,
+ 'status': 'available',
+ 'size': 100,
+ 'availability_zone': 'nova',
+ 'created_at': None,
+ 'attach_status': 'detached',
+ 'name': 'vol name',
+ 'display_name': 'Default vol name',
+ 'display_description': 'Default vol description',
+ 'volume_type_id': 1,
+ 'volume_metadata': [],
+ }
+
+
+def stub_get_vsa_volume_type(self, context):
+ return {'id': 1,
+ 'name': 'VSA volume type',
+ 'extra_specs': {'type': 'vsa_volume'}}
+
+
+def stub_volume_create(self, context, size, snapshot_id, name, description,
+ **param):
+ LOG.debug(_("_create: param=%s"), size)
+ vol = _get_default_volume_param()
+ vol['size'] = size
+ vol['display_name'] = name
+ vol['display_description'] = description
+ return vol
+
+
+def stub_volume_update(self, context, **param):
+ LOG.debug(_("_volume_update: param=%s"), param)
+ pass
+
+
+def stub_volume_delete(self, context, **param):
+ LOG.debug(_("_volume_delete: param=%s"), param)
+ pass
+
+
+def stub_volume_get(self, context, volume_id):
+ LOG.debug(_("_volume_get: volume_id=%s"), volume_id)
+ vol = _get_default_volume_param()
+ vol['id'] = volume_id
+ meta = {'key': 'from_vsa_id', 'value': '123'}
+ if volume_id == '345':
+ meta = {'key': 'to_vsa_id', 'value': '123'}
+ vol['volume_metadata'].append(meta)
+ return vol
+
+
+def stub_volume_get_notfound(self, context, volume_id):
+ raise exception.NotFound
+
+
+def stub_volume_get_all(self, context, search_opts):
+ vol = stub_volume_get(self, context, '123')
+ vol['metadata'] = search_opts['metadata']
+ return [vol]
+
+
+def return_vsa(context, vsa_id):
+ return {'id': vsa_id}
+
+
+class VSAVolumeApiTest(test.TestCase):
+
+ def setUp(self, test_obj=None, test_objs=None):
+ super(VSAVolumeApiTest, self).setUp()
+ self.stubs = stubout.StubOutForTesting()
+ fakes.FakeAuthManager.reset_fake_data()
+ fakes.FakeAuthDatabase.data = {}
+ fakes.stub_out_networking(self.stubs)
+ fakes.stub_out_rate_limiting(self.stubs)
+ fakes.stub_out_auth(self.stubs)
+ self.stubs.Set(nova.db.api, 'vsa_get', return_vsa)
+ self.stubs.Set(vsa.api.API, "get_vsa_volume_type",
+ stub_get_vsa_volume_type)
+
+ self.stubs.Set(volume.api.API, "update", stub_volume_update)
+ self.stubs.Set(volume.api.API, "delete", stub_volume_delete)
+ self.stubs.Set(volume.api.API, "get", stub_volume_get)
+ self.stubs.Set(volume.api.API, "get_all", stub_volume_get_all)
+
+ self.context = context.get_admin_context()
+ self.test_obj = test_obj if test_obj else "volume"
+ self.test_objs = test_objs if test_objs else "volumes"
+
+ def tearDown(self):
+ self.stubs.UnsetAll()
+ super(VSAVolumeApiTest, self).tearDown()
+
+ def test_vsa_volume_create(self):
+ self.stubs.Set(volume.api.API, "create", stub_volume_create)
+
+ vol = {"size": 100,
+ "displayName": "VSA Volume Test Name",
+ "displayDescription": "VSA Volume Test Desc"}
+ body = {self.test_obj: vol}
+ req = webob.Request.blank('/v1.1/777/zadr-vsa/123/%s' % self.test_objs)
+ req.method = 'POST'
+ req.body = json.dumps(body)
+ req.headers['content-type'] = 'application/json'
+ resp = req.get_response(fakes.wsgi_app())
+
+ if self.test_obj == "volume":
+ self.assertEqual(resp.status_int, 200)
+
+ resp_dict = json.loads(resp.body)
+ self.assertTrue(self.test_obj in resp_dict)
+ self.assertEqual(resp_dict[self.test_obj]['size'],
+ vol['size'])
+ self.assertEqual(resp_dict[self.test_obj]['displayName'],
+ vol['displayName'])
+ self.assertEqual(resp_dict[self.test_obj]['displayDescription'],
+ vol['displayDescription'])
+ else:
+ self.assertEqual(resp.status_int, 400)
+
+ def test_vsa_volume_create_no_body(self):
+ req = webob.Request.blank('/v1.1/777/zadr-vsa/123/%s' % self.test_objs)
+ req.method = 'POST'
+ req.body = json.dumps({})
+ req.headers['content-type'] = 'application/json'
+
+ resp = req.get_response(fakes.wsgi_app())
+ if self.test_obj == "volume":
+ self.assertEqual(resp.status_int, 422)
+ else:
+ self.assertEqual(resp.status_int, 400)
+
+ def test_vsa_volume_index(self):
+ req = webob.Request.blank('/v1.1/777/zadr-vsa/123/%s' % self.test_objs)
+ resp = req.get_response(fakes.wsgi_app())
+ self.assertEqual(resp.status_int, 200)
+
+ def test_vsa_volume_detail(self):
+ req = webob.Request.blank('/v1.1/777/zadr-vsa/123/%s/detail' % \
+ self.test_objs)
+ resp = req.get_response(fakes.wsgi_app())
+ self.assertEqual(resp.status_int, 200)
+
+ def test_vsa_volume_show(self):
+ obj_num = 234 if self.test_objs == "volumes" else 345
+ req = webob.Request.blank('/v1.1/777/zadr-vsa/123/%s/%s' % \
+ (self.test_objs, obj_num))
+ resp = req.get_response(fakes.wsgi_app())
+ self.assertEqual(resp.status_int, 200)
+
+ def test_vsa_volume_show_no_vsa_assignment(self):
+ req = webob.Request.blank('/v1.1/777/zadr-vsa/4/%s/333' % \
+ (self.test_objs))
+ resp = req.get_response(fakes.wsgi_app())
+ self.assertEqual(resp.status_int, 400)
+
+ def test_vsa_volume_show_no_volume(self):
+ self.stubs.Set(volume.api.API, "get", stub_volume_get_notfound)
+
+ req = webob.Request.blank('/v1.1/777/zadr-vsa/123/%s/333' % \
+ (self.test_objs))
+ resp = req.get_response(fakes.wsgi_app())
+ self.assertEqual(resp.status_int, 404)
+
+ def test_vsa_volume_update(self):
+ obj_num = 234 if self.test_objs == "volumes" else 345
+ update = {"status": "available",
+ "displayName": "Test Display name"}
+ body = {self.test_obj: update}
+ req = webob.Request.blank('/v1.1/777/zadr-vsa/123/%s/%s' % \
+ (self.test_objs, obj_num))
+ req.method = 'PUT'
+ req.body = json.dumps(body)
+ req.headers['content-type'] = 'application/json'
+
+ resp = req.get_response(fakes.wsgi_app())
+ if self.test_obj == "volume":
+ self.assertEqual(resp.status_int, 202)
+ else:
+ self.assertEqual(resp.status_int, 400)
+
+ def test_vsa_volume_delete(self):
+ obj_num = 234 if self.test_objs == "volumes" else 345
+ req = webob.Request.blank('/v1.1/777/zadr-vsa/123/%s/%s' % \
+ (self.test_objs, obj_num))
+ req.method = 'DELETE'
+ resp = req.get_response(fakes.wsgi_app())
+ if self.test_obj == "volume":
+ self.assertEqual(resp.status_int, 202)
+ else:
+ self.assertEqual(resp.status_int, 400)
+
+ def test_vsa_volume_delete_no_vsa_assignment(self):
+ req = webob.Request.blank('/v1.1/777/zadr-vsa/4/%s/333' % \
+ (self.test_objs))
+ req.method = 'DELETE'
+ resp = req.get_response(fakes.wsgi_app())
+ self.assertEqual(resp.status_int, 400)
+
+ def test_vsa_volume_delete_no_volume(self):
+ self.stubs.Set(volume.api.API, "get", stub_volume_get_notfound)
+
+ req = webob.Request.blank('/v1.1/777/zadr-vsa/123/%s/333' % \
+ (self.test_objs))
+ req.method = 'DELETE'
+ resp = req.get_response(fakes.wsgi_app())
+ if self.test_obj == "volume":
+ self.assertEqual(resp.status_int, 404)
+ else:
+ self.assertEqual(resp.status_int, 400)
+
+
+class VSADriveApiTest(VSAVolumeApiTest):
+ def setUp(self):
+ super(VSADriveApiTest, self).setUp(test_obj="drive",
+ test_objs="drives")
+
+ def tearDown(self):
+ self.stubs.UnsetAll()
+ super(VSADriveApiTest, self).tearDown()
diff --git a/nova/tests/api/openstack/test_extensions.py b/nova/tests/api/openstack/test_extensions.py
index 9f923852d..05267d8fb 100644
--- a/nova/tests/api/openstack/test_extensions.py
+++ b/nova/tests/api/openstack/test_extensions.py
@@ -95,8 +95,10 @@ class ExtensionControllerTest(test.TestCase):
"Quotas",
"Rescue",
"SecurityGroups",
+ "VSAs",
"VirtualInterfaces",
"Volumes",
+ "VolumeTypes",
]
self.ext_list.sort()
diff --git a/nova/tests/api/openstack/test_server_actions.py b/nova/tests/api/openstack/test_server_actions.py
index 90fe2f0b3..3dfdeb79c 100644
--- a/nova/tests/api/openstack/test_server_actions.py
+++ b/nova/tests/api/openstack/test_server_actions.py
@@ -1,14 +1,13 @@
import base64
+import datetime
import json
-import unittest
-from xml.dom import minidom
import stubout
import webob
from nova import context
-from nova import db
from nova import utils
+from nova import exception
from nova import flags
from nova.api.openstack import create_instance_helper
from nova.compute import instance_types
@@ -23,61 +22,58 @@ FLAGS = flags.FLAGS
def return_server_by_id(context, id):
- return _get_instance()
+ return stub_instance(id)
def instance_update(context, instance_id, kwargs):
- return _get_instance()
+ return stub_instance(instance_id)
-def return_server_with_power_state(power_state):
+def return_server_with_attributes(**kwargs):
def _return_server(context, id):
- instance = _get_instance()
- instance['state'] = power_state
- return instance
+ return stub_instance(id, **kwargs)
return _return_server
+def return_server_with_power_state(power_state):
+ return return_server_with_attributes(power_state=power_state)
+
+
def return_server_with_uuid_and_power_state(power_state):
- def _return_server(context, id):
- return return_server_with_power_state(power_state)
- return _return_server
+ return return_server_with_power_state(power_state)
-class MockSetAdminPassword(object):
- def __init__(self):
- self.instance_id = None
- self.password = None
+def stub_instance(id, power_state=0, metadata=None,
+ image_ref="10", flavor_id="1", name=None):
- def __call__(self, context, instance_id, password):
- self.instance_id = instance_id
- self.password = password
+ if metadata is not None:
+ metadata_items = [{'key':k, 'value':v} for k, v in metadata.items()]
+ else:
+ metadata_items = [{'key':'seq', 'value':id}]
+ inst_type = instance_types.get_instance_type_by_flavor_id(int(flavor_id))
-def _get_instance():
instance = {
- "id": 1,
- "created_at": "2010-10-10 12:00:00",
- "updated_at": "2010-11-11 11:00:00",
+ "id": int(id),
+ "created_at": datetime.datetime(2010, 10, 10, 12, 0, 0),
+ "updated_at": datetime.datetime(2010, 11, 11, 11, 0, 0),
"admin_pass": "",
- "user_id": "",
- "project_id": "",
- "image_ref": "5",
+ "user_id": "fake",
+ "project_id": "fake",
+ "image_ref": image_ref,
"kernel_id": "",
"ramdisk_id": "",
"launch_index": 0,
"key_name": "",
"key_data": "",
- "state": 0,
+ "state": power_state,
"state_description": "",
"memory_mb": 0,
"vcpus": 0,
"local_gb": 0,
"hostname": "",
"host": "",
- "instance_type": {
- "flavorid": 1,
- },
+ "instance_type": dict(inst_type),
"user_data": "",
"reservation_id": "",
"mac_address": "",
@@ -85,17 +81,34 @@ def _get_instance():
"launched_at": utils.utcnow(),
"terminated_at": utils.utcnow(),
"availability_zone": "",
- "display_name": "test_server",
+ "display_name": name or "server%s" % id,
"display_description": "",
"locked": False,
- "metadata": [],
- #"address": ,
- #"floating_ips": [{"address":ip} for ip in public_addresses]}
- "uuid": "deadbeef-feed-edee-beef-d0ea7beefedd"}
+ "metadata": metadata_items,
+ "access_ip_v4": "",
+ "access_ip_v6": "",
+ "uuid": "aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa",
+ "virtual_interfaces": [],
+ }
+
+ instance["fixed_ips"] = {
+ "address": '192.168.0.1',
+ "floating_ips": [],
+ }
return instance
+class MockSetAdminPassword(object):
+ def __init__(self):
+ self.instance_id = None
+ self.password = None
+
+ def __call__(self, context, instance_id, password):
+ self.instance_id = instance_id
+ self.password = password
+
+
class ServerActionsTest(test.TestCase):
def setUp(self):
@@ -103,8 +116,6 @@ class ServerActionsTest(test.TestCase):
super(ServerActionsTest, self).setUp()
self.flags(verbose=True)
self.stubs = stubout.StubOutForTesting()
- fakes.FakeAuthManager.reset_fake_data()
- fakes.FakeAuthDatabase.data = {}
fakes.stub_out_auth(self.stubs)
self.stubs.Set(nova.db.api, 'instance_get', return_server_by_id)
self.stubs.Set(nova.db.api, 'instance_update', instance_update)
@@ -468,8 +479,6 @@ class ServerActionsTestV11(test.TestCase):
self.maxDiff = None
super(ServerActionsTestV11, self).setUp()
self.stubs = stubout.StubOutForTesting()
- fakes.FakeAuthManager.reset_fake_data()
- fakes.FakeAuthDatabase.data = {}
fakes.stub_out_auth(self.stubs)
self.stubs.Set(nova.db.api, 'instance_get', return_server_by_id)
self.stubs.Set(nova.db.api, 'instance_update', instance_update)
@@ -606,6 +615,9 @@ class ServerActionsTestV11(test.TestCase):
self.assertEqual(res.status_int, 400)
def test_server_rebuild_accepted_minimum(self):
+ new_return_server = return_server_with_attributes(image_ref='2')
+ self.stubs.Set(nova.db.api, 'instance_get', new_return_server)
+
body = {
"rebuild": {
"imageRef": "http://localhost/images/2",
@@ -619,6 +631,9 @@ class ServerActionsTestV11(test.TestCase):
res = req.get_response(fakes.wsgi_app())
self.assertEqual(res.status_int, 202)
+ body = json.loads(res.body)
+ self.assertEqual(body['server']['image']['id'], '2')
+ self.assertEqual(len(body['server']['adminPass']), 16)
def test_server_rebuild_rejected_when_building(self):
body = {
@@ -642,12 +657,15 @@ class ServerActionsTestV11(test.TestCase):
self.assertEqual(res.status_int, 409)
def test_server_rebuild_accepted_with_metadata(self):
+ metadata = {'new': 'metadata'}
+
+ new_return_server = return_server_with_attributes(metadata=metadata)
+ self.stubs.Set(nova.db.api, 'instance_get', new_return_server)
+
body = {
"rebuild": {
"imageRef": "http://localhost/images/2",
- "metadata": {
- "new": "metadata",
- },
+ "metadata": metadata,
},
}
@@ -658,6 +676,8 @@ class ServerActionsTestV11(test.TestCase):
res = req.get_response(fakes.wsgi_app())
self.assertEqual(res.status_int, 202)
+ body = json.loads(res.body)
+ self.assertEqual(body['server']['metadata'], metadata)
def test_server_rebuild_accepted_with_bad_metadata(self):
body = {
@@ -727,6 +747,49 @@ class ServerActionsTestV11(test.TestCase):
res = req.get_response(fakes.wsgi_app())
self.assertEqual(res.status_int, 202)
+ body = json.loads(res.body)
+ self.assertTrue('personality' not in body['server'])
+
+ def test_server_rebuild_admin_pass(self):
+ new_return_server = return_server_with_attributes(image_ref='2')
+ self.stubs.Set(nova.db.api, 'instance_get', new_return_server)
+
+ body = {
+ "rebuild": {
+ "imageRef": "http://localhost/images/2",
+ "adminPass": "asdf",
+ },
+ }
+
+ req = webob.Request.blank('/v1.1/fake/servers/1/action')
+ req.method = 'POST'
+ req.content_type = 'application/json'
+ req.body = json.dumps(body)
+
+ res = req.get_response(fakes.wsgi_app())
+ self.assertEqual(res.status_int, 202)
+ body = json.loads(res.body)
+ self.assertEqual(body['server']['image']['id'], '2')
+ self.assertEqual(body['server']['adminPass'], 'asdf')
+
+ def test_server_rebuild_server_not_found(self):
+ def server_not_found(self, instance_id):
+ raise exception.InstanceNotFound(instance_id=instance_id)
+ self.stubs.Set(nova.db.api, 'instance_get', server_not_found)
+
+ body = {
+ "rebuild": {
+ "imageRef": "http://localhost/images/2",
+ },
+ }
+
+ req = webob.Request.blank('/v1.1/fake/servers/1/action')
+ req.method = 'POST'
+ req.content_type = 'application/json'
+ req.body = json.dumps(body)
+
+ res = req.get_response(fakes.wsgi_app())
+ self.assertEqual(res.status_int, 404)
def test_resize_server(self):
diff --git a/nova/tests/api/openstack/test_servers.py b/nova/tests/api/openstack/test_servers.py
index dd4b63a2c..3559e6de5 100644
--- a/nova/tests/api/openstack/test_servers.py
+++ b/nova/tests/api/openstack/test_servers.py
@@ -1,6 +1,7 @@
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2010-2011 OpenStack LLC.
+# Copyright 2011 Piston Cloud Computing, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
@@ -233,7 +234,6 @@ class MockSetAdminPassword(object):
class ServersTest(test.TestCase):
-
def setUp(self):
self.maxDiff = None
super(ServersTest, self).setUp()
@@ -265,6 +265,7 @@ class ServersTest(test.TestCase):
self.stubs.Set(nova.compute.API, "get_actions", fake_compute_api)
self.webreq = common.webob_factory('/v1.0/servers')
+ self.config_drive = None
def test_get_server_by_id(self):
req = webob.Request.blank('/v1.0/servers/1')
@@ -379,6 +380,7 @@ class ServersTest(test.TestCase):
"metadata": {
"seq": "1",
},
+ "config_drive": None,
"links": [
{
"rel": "self",
@@ -545,6 +547,7 @@ class ServersTest(test.TestCase):
"metadata": {
"seq": "1",
},
+ "config_drive": None,
"links": [
{
"rel": "self",
@@ -638,6 +641,7 @@ class ServersTest(test.TestCase):
"metadata": {
"seq": "1",
},
+ "config_drive": None,
"links": [
{
"rel": "self",
@@ -767,6 +771,27 @@ class ServersTest(test.TestCase):
(ip,) = private_node.getElementsByTagName('ip')
self.assertEquals(ip.getAttribute('addr'), private)
+ # NOTE(bcwaldon): lp830817
+ def test_get_server_by_id_malformed_networks_v1_1(self):
+ ifaces = [
+ {
+ 'network': None,
+ 'fixed_ips': [
+ {'address': '192.168.0.3'},
+ {'address': '192.168.0.4'},
+ ],
+ },
+ ]
+ new_return_server = return_server_with_attributes(interfaces=ifaces)
+ self.stubs.Set(nova.db.api, 'instance_get', new_return_server)
+
+ req = webob.Request.blank('/v1.1/fake/servers/1')
+ res = req.get_response(fakes.wsgi_app())
+ self.assertEqual(res.status_int, 200)
+ res_dict = json.loads(res.body)
+ self.assertEqual(res_dict['server']['id'], 1)
+ self.assertEqual(res_dict['server']['name'], 'server1')
+
def test_get_server_by_id_with_addresses_v1_1(self):
self.flags(use_ipv6=True)
interfaces = [
@@ -1399,6 +1424,7 @@ class ServersTest(test.TestCase):
'image_ref': image_ref,
"created_at": datetime.datetime(2010, 10, 10, 12, 0, 0),
"updated_at": datetime.datetime(2010, 11, 11, 11, 0, 0),
+ "config_drive": self.config_drive,
}
def server_update(context, id, params):
@@ -1424,8 +1450,7 @@ class ServersTest(test.TestCase):
self.stubs.Set(nova.db.api, 'instance_create', instance_create)
self.stubs.Set(nova.rpc, 'cast', fake_method)
self.stubs.Set(nova.rpc, 'call', fake_method)
- self.stubs.Set(nova.db.api, 'instance_update',
- server_update)
+ self.stubs.Set(nova.db.api, 'instance_update', server_update)
self.stubs.Set(nova.db.api, 'queue_get_for', queue_get_for)
self.stubs.Set(nova.network.manager.VlanManager, 'allocate_fixed_ip',
fake_method)
@@ -1768,6 +1793,129 @@ class ServersTest(test.TestCase):
res = req.get_response(fakes.wsgi_app())
self.assertEqual(res.status_int, 400)
+ def test_create_instance_with_config_drive_v1_1(self):
+ self.config_drive = True
+ self._setup_for_create_instance()
+
+ image_href = 'http://localhost/v1.1/123/images/2'
+ flavor_ref = 'http://localhost/v1.1/123/flavors/3'
+ body = {
+ 'server': {
+ 'name': 'config_drive_test',
+ 'imageRef': image_href,
+ 'flavorRef': flavor_ref,
+ 'metadata': {
+ 'hello': 'world',
+ 'open': 'stack',
+ },
+ 'personality': {},
+ 'config_drive': True,
+ },
+ }
+
+ req = webob.Request.blank('/v1.1/123/servers')
+ req.method = 'POST'
+ req.body = json.dumps(body)
+ req.headers["content-type"] = "application/json"
+
+ res = req.get_response(fakes.wsgi_app())
+ print res
+ self.assertEqual(res.status_int, 202)
+ server = json.loads(res.body)['server']
+ self.assertEqual(1, server['id'])
+ self.assertTrue(server['config_drive'])
+
+ def test_create_instance_with_config_drive_as_id_v1_1(self):
+ self.config_drive = 2
+ self._setup_for_create_instance()
+
+ image_href = 'http://localhost/v1.1/123/images/2'
+ flavor_ref = 'http://localhost/v1.1/123/flavors/3'
+ body = {
+ 'server': {
+ 'name': 'config_drive_test',
+ 'imageRef': image_href,
+ 'flavorRef': flavor_ref,
+ 'metadata': {
+ 'hello': 'world',
+ 'open': 'stack',
+ },
+ 'personality': {},
+ 'config_drive': 2,
+ },
+ }
+
+ req = webob.Request.blank('/v1.1/123/servers')
+ req.method = 'POST'
+ req.body = json.dumps(body)
+ req.headers["content-type"] = "application/json"
+
+ res = req.get_response(fakes.wsgi_app())
+
+ self.assertEqual(res.status_int, 202)
+ server = json.loads(res.body)['server']
+ self.assertEqual(1, server['id'])
+ self.assertTrue(server['config_drive'])
+ self.assertEqual(2, server['config_drive'])
+
+ def test_create_instance_with_bad_config_drive_v1_1(self):
+ self.config_drive = "asdf"
+ self._setup_for_create_instance()
+
+ image_href = 'http://localhost/v1.1/123/images/2'
+ flavor_ref = 'http://localhost/v1.1/123/flavors/3'
+ body = {
+ 'server': {
+ 'name': 'config_drive_test',
+ 'imageRef': image_href,
+ 'flavorRef': flavor_ref,
+ 'metadata': {
+ 'hello': 'world',
+ 'open': 'stack',
+ },
+ 'personality': {},
+ 'config_drive': 'asdf',
+ },
+ }
+
+ req = webob.Request.blank('/v1.1/123/servers')
+ req.method = 'POST'
+ req.body = json.dumps(body)
+ req.headers["content-type"] = "application/json"
+
+ res = req.get_response(fakes.wsgi_app())
+ self.assertEqual(res.status_int, 400)
+
+ def test_create_instance_without_config_drive_v1_1(self):
+ self._setup_for_create_instance()
+
+ image_href = 'http://localhost/v1.1/123/images/2'
+ flavor_ref = 'http://localhost/v1.1/123/flavors/3'
+ body = {
+ 'server': {
+ 'name': 'config_drive_test',
+ 'imageRef': image_href,
+ 'flavorRef': flavor_ref,
+ 'metadata': {
+ 'hello': 'world',
+ 'open': 'stack',
+ },
+ 'personality': {},
+ 'config_drive': True,
+ },
+ }
+
+ req = webob.Request.blank('/v1.1/123/servers')
+ req.method = 'POST'
+ req.body = json.dumps(body)
+ req.headers["content-type"] = "application/json"
+
+ res = req.get_response(fakes.wsgi_app())
+ self.assertEqual(res.status_int, 202)
+ server = json.loads(res.body)['server']
+ self.assertEqual(1, server['id'])
+ self.assertFalse(server['config_drive'])
+
def test_create_instance_v1_1_bad_href(self):
self._setup_for_create_instance()
@@ -3449,6 +3597,7 @@ class ServersViewBuilderV11Test(test.TestCase):
"href": "http://localhost/servers/1",
},
],
+ "config_drive": None,
}
}
@@ -3461,6 +3610,7 @@ class ServersViewBuilderV11Test(test.TestCase):
"id": 1,
"uuid": self.instance['uuid'],
"name": "test_server",
+ "config_drive": None,
"links": [
{
"rel": "self",
@@ -3513,6 +3663,7 @@ class ServersViewBuilderV11Test(test.TestCase):
},
"addresses": {},
"metadata": {},
+ "config_drive": None,
"links": [
{
"rel": "self",
@@ -3566,6 +3717,7 @@ class ServersViewBuilderV11Test(test.TestCase):
},
"addresses": {},
"metadata": {},
+ "config_drive": None,
"links": [
{
"rel": "self",
@@ -3618,6 +3770,7 @@ class ServersViewBuilderV11Test(test.TestCase):
},
"addresses": {},
"metadata": {},
+ "config_drive": None,
"accessIPv4": "1.2.3.4",
"accessIPv6": "",
"links": [
@@ -3672,6 +3825,7 @@ class ServersViewBuilderV11Test(test.TestCase):
},
"addresses": {},
"metadata": {},
+ "config_drive": None,
"accessIPv4": "",
"accessIPv6": "fead::1234",
"links": [
@@ -3734,6 +3888,7 @@ class ServersViewBuilderV11Test(test.TestCase):
"Open": "Stack",
"Number": "1",
},
+ "config_drive": None,
"links": [
{
"rel": "self",
@@ -4407,3 +4562,138 @@ class ServerXMLSerializationTest(test.TestCase):
str(ip['version']))
self.assertEqual(str(ip_elem.get('addr')),
str(ip['addr']))
+
+ def test_action(self):
+ serializer = servers.ServerXMLSerializer()
+
+ fixture = {
+ "server": {
+ "id": 1,
+ "uuid": FAKE_UUID,
+ 'created': self.TIMESTAMP,
+ 'updated': self.TIMESTAMP,
+ "progress": 0,
+ "name": "test_server",
+ "status": "BUILD",
+ "accessIPv4": "1.2.3.4",
+ "accessIPv6": "fead::1234",
+ "hostId": "e4d909c290d0fb1ca068ffaddf22cbd0",
+ "adminPass": "test_password",
+ "image": {
+ "id": "5",
+ "links": [
+ {
+ "rel": "bookmark",
+ "href": self.IMAGE_BOOKMARK,
+ },
+ ],
+ },
+ "flavor": {
+ "id": "1",
+ "links": [
+ {
+ "rel": "bookmark",
+ "href": self.FLAVOR_BOOKMARK,
+ },
+ ],
+ },
+ "addresses": {
+ "network_one": [
+ {
+ "version": 4,
+ "addr": "67.23.10.138",
+ },
+ {
+ "version": 6,
+ "addr": "::babe:67.23.10.138",
+ },
+ ],
+ "network_two": [
+ {
+ "version": 4,
+ "addr": "67.23.10.139",
+ },
+ {
+ "version": 6,
+ "addr": "::babe:67.23.10.139",
+ },
+ ],
+ },
+ "metadata": {
+ "Open": "Stack",
+ "Number": "1",
+ },
+ 'links': [
+ {
+ 'href': self.SERVER_HREF,
+ 'rel': 'self',
+ },
+ {
+ 'href': self.SERVER_BOOKMARK,
+ 'rel': 'bookmark',
+ },
+ ],
+ }
+ }
+
+ output = serializer.serialize(fixture, 'action')
+ root = etree.XML(output)
+ xmlutil.validate_schema(root, 'server')
+
+ expected_server_href = self.SERVER_HREF
+ expected_server_bookmark = self.SERVER_BOOKMARK
+ expected_image_bookmark = self.IMAGE_BOOKMARK
+ expected_flavor_bookmark = self.FLAVOR_BOOKMARK
+ expected_now = self.TIMESTAMP
+ expected_uuid = FAKE_UUID
+ server_dict = fixture['server']
+
+ for key in ['name', 'id', 'uuid', 'created', 'accessIPv4',
+ 'updated', 'progress', 'status', 'hostId',
+ 'accessIPv6', 'adminPass']:
+ self.assertEqual(root.get(key), str(server_dict[key]))
+
+ link_nodes = root.findall('{0}link'.format(ATOMNS))
+ self.assertEqual(len(link_nodes), 2)
+ for i, link in enumerate(server_dict['links']):
+ for key, value in link.items():
+ self.assertEqual(link_nodes[i].get(key), value)
+
+ metadata_root = root.find('{0}metadata'.format(NS))
+ metadata_elems = metadata_root.findall('{0}meta'.format(NS))
+ self.assertEqual(len(metadata_elems), 2)
+ for i, metadata_elem in enumerate(metadata_elems):
+ (meta_key, meta_value) = server_dict['metadata'].items()[i]
+ self.assertEqual(str(metadata_elem.get('key')), str(meta_key))
+ self.assertEqual(str(metadata_elem.text).strip(), str(meta_value))
+
+ image_root = root.find('{0}image'.format(NS))
+ self.assertEqual(image_root.get('id'), server_dict['image']['id'])
+ link_nodes = image_root.findall('{0}link'.format(ATOMNS))
+ self.assertEqual(len(link_nodes), 1)
+ for i, link in enumerate(server_dict['image']['links']):
+ for key, value in link.items():
+ self.assertEqual(link_nodes[i].get(key), value)
+
+ flavor_root = root.find('{0}flavor'.format(NS))
+ self.assertEqual(flavor_root.get('id'), server_dict['flavor']['id'])
+ link_nodes = flavor_root.findall('{0}link'.format(ATOMNS))
+ self.assertEqual(len(link_nodes), 1)
+ for i, link in enumerate(server_dict['flavor']['links']):
+ for key, value in link.items():
+ self.assertEqual(link_nodes[i].get(key), value)
+
+ addresses_root = root.find('{0}addresses'.format(NS))
+ addresses_dict = server_dict['addresses']
+ network_elems = addresses_root.findall('{0}network'.format(NS))
+ self.assertEqual(len(network_elems), 2)
+ for i, network_elem in enumerate(network_elems):
+ network = addresses_dict.items()[i]
+ self.assertEqual(str(network_elem.get('id')), str(network[0]))
+ ip_elems = network_elem.findall('{0}ip'.format(NS))
+ for z, ip_elem in enumerate(ip_elems):
+ ip = network[1][z]
+ self.assertEqual(str(ip_elem.get('version')),
+ str(ip['version']))
+ self.assertEqual(str(ip_elem.get('addr')),
+ str(ip['addr']))
diff --git a/nova/tests/api/openstack/test_volume_types.py b/nova/tests/api/openstack/test_volume_types.py
new file mode 100644
index 000000000..192e66854
--- /dev/null
+++ b/nova/tests/api/openstack/test_volume_types.py
@@ -0,0 +1,171 @@
+# Copyright 2011 OpenStack LLC.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import json
+import stubout
+import webob
+
+from nova import exception
+from nova import context
+from nova import test
+from nova import log as logging
+from nova.volume import volume_types
+from nova.tests.api.openstack import fakes
+
+LOG = logging.getLogger('nova.tests.api.openstack.test_volume_types')
+
+last_param = {}
+
+
+def stub_volume_type(id):
+ specs = {
+ "key1": "value1",
+ "key2": "value2",
+ "key3": "value3",
+ "key4": "value4",
+ "key5": "value5"}
+ return dict(id=id, name='vol_type_%s' % str(id), extra_specs=specs)
+
+
+def return_volume_types_get_all_types(context):
+ return dict(vol_type_1=stub_volume_type(1),
+ vol_type_2=stub_volume_type(2),
+ vol_type_3=stub_volume_type(3))
+
+
+def return_empty_volume_types_get_all_types(context):
+ return {}
+
+
+def return_volume_types_get_volume_type(context, id):
+ if id == "777":
+ raise exception.VolumeTypeNotFound(volume_type_id=id)
+ return stub_volume_type(int(id))
+
+
+def return_volume_types_destroy(context, name):
+ if name == "777":
+ raise exception.VolumeTypeNotFoundByName(volume_type_name=name)
+ pass
+
+
+def return_volume_types_create(context, name, specs):
+ pass
+
+
+def return_volume_types_get_by_name(context, name):
+ if name == "777":
+ raise exception.VolumeTypeNotFoundByName(volume_type_name=name)
+ return stub_volume_type(int(name.split("_")[2]))
+
+
+class VolumeTypesApiTest(test.TestCase):
+ def setUp(self):
+ super(VolumeTypesApiTest, self).setUp()
+ fakes.stub_out_key_pair_funcs(self.stubs)
+
+ def tearDown(self):
+ self.stubs.UnsetAll()
+ super(VolumeTypesApiTest, self).tearDown()
+
+ def test_volume_types_index(self):
+ self.stubs.Set(volume_types, 'get_all_types',
+ return_volume_types_get_all_types)
+ req = webob.Request.blank('/v1.1/123/os-volume-types')
+ res = req.get_response(fakes.wsgi_app())
+ self.assertEqual(200, res.status_int)
+ res_dict = json.loads(res.body)
+ self.assertEqual('application/json', res.headers['Content-Type'])
+
+ self.assertEqual(3, len(res_dict))
+ for name in ['vol_type_1', 'vol_type_2', 'vol_type_3']:
+ self.assertEqual(name, res_dict[name]['name'])
+ self.assertEqual('value1', res_dict[name]['extra_specs']['key1'])
+
+ def test_volume_types_index_no_data(self):
+ self.stubs.Set(volume_types, 'get_all_types',
+ return_empty_volume_types_get_all_types)
+ req = webob.Request.blank('/v1.1/123/os-volume-types')
+ res = req.get_response(fakes.wsgi_app())
+ res_dict = json.loads(res.body)
+ self.assertEqual(200, res.status_int)
+ self.assertEqual('application/json', res.headers['Content-Type'])
+ self.assertEqual(0, len(res_dict))
+
+ def test_volume_types_show(self):
+ self.stubs.Set(volume_types, 'get_volume_type',
+ return_volume_types_get_volume_type)
+ req = webob.Request.blank('/v1.1/123/os-volume-types/1')
+ res = req.get_response(fakes.wsgi_app())
+ self.assertEqual(200, res.status_int)
+ res_dict = json.loads(res.body)
+ self.assertEqual('application/json', res.headers['Content-Type'])
+ self.assertEqual(1, len(res_dict))
+ self.assertEqual('vol_type_1', res_dict['volume_type']['name'])
+
+ def test_volume_types_show_not_found(self):
+ self.stubs.Set(volume_types, 'get_volume_type',
+ return_volume_types_get_volume_type)
+ req = webob.Request.blank('/v1.1/123/os-volume-types/777')
+ res = req.get_response(fakes.wsgi_app())
+ self.assertEqual(404, res.status_int)
+
+ def test_volume_types_delete(self):
+ self.stubs.Set(volume_types, 'get_volume_type',
+ return_volume_types_get_volume_type)
+ self.stubs.Set(volume_types, 'destroy',
+ return_volume_types_destroy)
+ req = webob.Request.blank('/v1.1/123/os-volume-types/1')
+ req.method = 'DELETE'
+ res = req.get_response(fakes.wsgi_app())
+ self.assertEqual(200, res.status_int)
+
+ def test_volume_types_delete_not_found(self):
+ self.stubs.Set(volume_types, 'get_volume_type',
+ return_volume_types_get_volume_type)
+ self.stubs.Set(volume_types, 'destroy',
+ return_volume_types_destroy)
+ req = webob.Request.blank('/v1.1/123/os-volume-types/777')
+ req.method = 'DELETE'
+ res = req.get_response(fakes.wsgi_app())
+ self.assertEqual(404, res.status_int)
+
+ def test_create(self):
+ self.stubs.Set(volume_types, 'create',
+ return_volume_types_create)
+ self.stubs.Set(volume_types, 'get_volume_type_by_name',
+ return_volume_types_get_by_name)
+ req = webob.Request.blank('/v1.1/123/os-volume-types')
+ req.method = 'POST'
+ req.body = '{"volume_type": {"name": "vol_type_1", '\
+ '"extra_specs": {"key1": "value1"}}}'
+ req.headers["content-type"] = "application/json"
+ res = req.get_response(fakes.wsgi_app())
+ self.assertEqual(200, res.status_int)
+ res_dict = json.loads(res.body)
+ self.assertEqual('application/json', res.headers['Content-Type'])
+ self.assertEqual(1, len(res_dict))
+ self.assertEqual('vol_type_1', res_dict['volume_type']['name'])
+
+ def test_create_empty_body(self):
+ self.stubs.Set(volume_types, 'create',
+ return_volume_types_create)
+ self.stubs.Set(volume_types, 'get_volume_type_by_name',
+ return_volume_types_get_by_name)
+ req = webob.Request.blank('/v1.1/123/os-volume-types')
+ req.method = 'POST'
+ req.headers["content-type"] = "application/json"
+ res = req.get_response(fakes.wsgi_app())
+ self.assertEqual(400, res.status_int)
diff --git a/nova/tests/api/openstack/test_volume_types_extra_specs.py b/nova/tests/api/openstack/test_volume_types_extra_specs.py
new file mode 100644
index 000000000..34bdada22
--- /dev/null
+++ b/nova/tests/api/openstack/test_volume_types_extra_specs.py
@@ -0,0 +1,181 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright (c) 2011 Zadara Storage Inc.
+# Copyright (c) 2011 OpenStack LLC.
+# Copyright 2011 University of Southern California
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import json
+import stubout
+import webob
+import os.path
+
+
+from nova import test
+from nova.api import openstack
+from nova.api.openstack import extensions
+from nova.tests.api.openstack import fakes
+import nova.wsgi
+
+
+def return_create_volume_type_extra_specs(context, volume_type_id,
+ extra_specs):
+ return stub_volume_type_extra_specs()
+
+
+def return_volume_type_extra_specs(context, volume_type_id):
+ return stub_volume_type_extra_specs()
+
+
+def return_empty_volume_type_extra_specs(context, volume_type_id):
+ return {}
+
+
+def delete_volume_type_extra_specs(context, volume_type_id, key):
+ pass
+
+
+def stub_volume_type_extra_specs():
+ specs = {
+ "key1": "value1",
+ "key2": "value2",
+ "key3": "value3",
+ "key4": "value4",
+ "key5": "value5"}
+ return specs
+
+
+class VolumeTypesExtraSpecsTest(test.TestCase):
+
+ def setUp(self):
+ super(VolumeTypesExtraSpecsTest, self).setUp()
+ fakes.stub_out_key_pair_funcs(self.stubs)
+ self.api_path = '/v1.1/123/os-volume-types/1/extra_specs'
+
+ def test_index(self):
+ self.stubs.Set(nova.db.api, 'volume_type_extra_specs_get',
+ return_volume_type_extra_specs)
+ request = webob.Request.blank(self.api_path)
+ res = request.get_response(fakes.wsgi_app())
+ self.assertEqual(200, res.status_int)
+ res_dict = json.loads(res.body)
+ self.assertEqual('application/json', res.headers['Content-Type'])
+ self.assertEqual('value1', res_dict['extra_specs']['key1'])
+
+ def test_index_no_data(self):
+ self.stubs.Set(nova.db.api, 'volume_type_extra_specs_get',
+ return_empty_volume_type_extra_specs)
+ req = webob.Request.blank(self.api_path)
+ res = req.get_response(fakes.wsgi_app())
+ res_dict = json.loads(res.body)
+ self.assertEqual(200, res.status_int)
+ self.assertEqual('application/json', res.headers['Content-Type'])
+ self.assertEqual(0, len(res_dict['extra_specs']))
+
+ def test_show(self):
+ self.stubs.Set(nova.db.api, 'volume_type_extra_specs_get',
+ return_volume_type_extra_specs)
+ req = webob.Request.blank(self.api_path + '/key5')
+ res = req.get_response(fakes.wsgi_app())
+ self.assertEqual(200, res.status_int)
+ res_dict = json.loads(res.body)
+ self.assertEqual('application/json', res.headers['Content-Type'])
+ self.assertEqual('value5', res_dict['key5'])
+
+ def test_show_spec_not_found(self):
+ self.stubs.Set(nova.db.api, 'volume_type_extra_specs_get',
+ return_empty_volume_type_extra_specs)
+ req = webob.Request.blank(self.api_path + '/key6')
+ res = req.get_response(fakes.wsgi_app())
+ res_dict = json.loads(res.body)
+ self.assertEqual(404, res.status_int)
+
+ def test_delete(self):
+ self.stubs.Set(nova.db.api, 'volume_type_extra_specs_delete',
+ delete_volume_type_extra_specs)
+ req = webob.Request.blank(self.api_path + '/key5')
+ req.method = 'DELETE'
+ res = req.get_response(fakes.wsgi_app())
+ self.assertEqual(200, res.status_int)
+
+ def test_create(self):
+ self.stubs.Set(nova.db.api,
+ 'volume_type_extra_specs_update_or_create',
+ return_create_volume_type_extra_specs)
+ req = webob.Request.blank(self.api_path)
+ req.method = 'POST'
+ req.body = '{"extra_specs": {"key1": "value1"}}'
+ req.headers["content-type"] = "application/json"
+ res = req.get_response(fakes.wsgi_app())
+ res_dict = json.loads(res.body)
+ self.assertEqual(200, res.status_int)
+ self.assertEqual('application/json', res.headers['Content-Type'])
+ self.assertEqual('value1', res_dict['extra_specs']['key1'])
+
+ def test_create_empty_body(self):
+ self.stubs.Set(nova.db.api,
+ 'volume_type_extra_specs_update_or_create',
+ return_create_volume_type_extra_specs)
+ req = webob.Request.blank(self.api_path)
+ req.method = 'POST'
+ req.headers["content-type"] = "application/json"
+ res = req.get_response(fakes.wsgi_app())
+ self.assertEqual(400, res.status_int)
+
+ def test_update_item(self):
+ self.stubs.Set(nova.db.api,
+ 'volume_type_extra_specs_update_or_create',
+ return_create_volume_type_extra_specs)
+ req = webob.Request.blank(self.api_path + '/key1')
+ req.method = 'PUT'
+ req.body = '{"key1": "value1"}'
+ req.headers["content-type"] = "application/json"
+ res = req.get_response(fakes.wsgi_app())
+ self.assertEqual(200, res.status_int)
+ self.assertEqual('application/json', res.headers['Content-Type'])
+ res_dict = json.loads(res.body)
+ self.assertEqual('value1', res_dict['key1'])
+
+ def test_update_item_empty_body(self):
+ self.stubs.Set(nova.db.api,
+ 'volume_type_extra_specs_update_or_create',
+ return_create_volume_type_extra_specs)
+ req = webob.Request.blank(self.api_path + '/key1')
+ req.method = 'PUT'
+ req.headers["content-type"] = "application/json"
+ res = req.get_response(fakes.wsgi_app())
+ self.assertEqual(400, res.status_int)
+
+ def test_update_item_too_many_keys(self):
+ self.stubs.Set(nova.db.api,
+ 'volume_type_extra_specs_update_or_create',
+ return_create_volume_type_extra_specs)
+ req = webob.Request.blank(self.api_path + '/key1')
+ req.method = 'PUT'
+ req.body = '{"key1": "value1", "key2": "value2"}'
+ req.headers["content-type"] = "application/json"
+ res = req.get_response(fakes.wsgi_app())
+ self.assertEqual(400, res.status_int)
+
+ def test_update_item_body_uri_mismatch(self):
+ self.stubs.Set(nova.db.api,
+ 'volume_type_extra_specs_update_or_create',
+ return_create_volume_type_extra_specs)
+ req = webob.Request.blank(self.api_path + '/bad')
+ req.method = 'PUT'
+ req.body = '{"key1": "value1"}'
+ req.headers["content-type"] = "application/json"
+ res = req.get_response(fakes.wsgi_app())
+ self.assertEqual(400, res.status_int)
diff --git a/nova/tests/integrated/integrated_helpers.py b/nova/tests/integrated/integrated_helpers.py
index fb2f88502..343190427 100644
--- a/nova/tests/integrated/integrated_helpers.py
+++ b/nova/tests/integrated/integrated_helpers.py
@@ -22,10 +22,8 @@ Provides common functionality for integrated unit tests
import random
import string
-from nova import exception
from nova import service
from nova import test # For the flags
-from nova.auth import manager
import nova.image.glance
from nova.log import logging
from nova.tests.integrated.api import client
@@ -58,90 +56,6 @@ def generate_new_element(items, prefix, numeric=False):
LOG.debug("Random collision on %s" % candidate)
-class TestUser(object):
- def __init__(self, name, secret, auth_url):
- self.name = name
- self.secret = secret
- self.auth_url = auth_url
-
- if not auth_url:
- raise exception.Error("auth_url is required")
- self.openstack_api = client.TestOpenStackClient(self.name,
- self.secret,
- self.auth_url)
-
- def get_unused_server_name(self):
- servers = self.openstack_api.get_servers()
- server_names = [server['name'] for server in servers]
- return generate_new_element(server_names, 'server')
-
- def get_invalid_image(self):
- images = self.openstack_api.get_images()
- image_ids = [image['id'] for image in images]
- return generate_new_element(image_ids, '', numeric=True)
-
- def get_valid_image(self, create=False):
- images = self.openstack_api.get_images()
- if create and not images:
- # TODO(justinsb): No way currently to create an image through API
- #created_image = self.openstack_api.post_image(image)
- #images.append(created_image)
- raise exception.Error("No way to create an image through API")
-
- if images:
- return images[0]
- return None
-
-
-class IntegratedUnitTestContext(object):
- def __init__(self, auth_url):
- self.auth_manager = manager.AuthManager()
-
- self.auth_url = auth_url
- self.project_name = None
-
- self.test_user = None
-
- self.setup()
-
- def setup(self):
- self._create_test_user()
-
- def _create_test_user(self):
- self.test_user = self._create_unittest_user()
-
- # No way to currently pass this through the OpenStack API
- self.project_name = 'openstack'
- self._configure_project(self.project_name, self.test_user)
-
- def cleanup(self):
- self.test_user = None
-
- def _create_unittest_user(self):
- users = self.auth_manager.get_users()
- user_names = [user.name for user in users]
- auth_name = generate_new_element(user_names, 'unittest_user_')
- auth_key = generate_random_alphanumeric(16)
-
- # Right now there's a bug where auth_name and auth_key are reversed
- # bug732907
- auth_key = auth_name
-
- self.auth_manager.create_user(auth_name, auth_name, auth_key, False)
- return TestUser(auth_name, auth_key, self.auth_url)
-
- def _configure_project(self, project_name, user):
- projects = self.auth_manager.get_projects()
- project_names = [project.name for project in projects]
- if not project_name in project_names:
- project = self.auth_manager.create_project(project_name,
- user.name,
- description=None,
- member_users=None)
- else:
- self.auth_manager.add_to_project(user.name, project_name)
-
-
class _IntegratedTestBase(test.TestCase):
def setUp(self):
super(_IntegratedTestBase, self).setUp()
@@ -163,10 +77,7 @@ class _IntegratedTestBase(test.TestCase):
self._start_api_service()
- self.context = IntegratedUnitTestContext(self.auth_url)
-
- self.user = self.context.test_user
- self.api = self.user.openstack_api
+ self.api = client.TestOpenStackClient('fake', 'fake', self.auth_url)
def _start_api_service(self):
osapi = service.WSGIService("osapi")
@@ -174,10 +85,6 @@ class _IntegratedTestBase(test.TestCase):
self.auth_url = 'http://%s:%s/v1.1' % (osapi.host, osapi.port)
LOG.warn(self.auth_url)
- def tearDown(self):
- self.context.cleanup()
- super(_IntegratedTestBase, self).tearDown()
-
def _get_flags(self):
"""An opportunity to setup flags, before the services are started."""
f = {}
@@ -190,10 +97,20 @@ class _IntegratedTestBase(test.TestCase):
f['fake_network'] = True
return f
+ def get_unused_server_name(self):
+ servers = self.api.get_servers()
+ server_names = [server['name'] for server in servers]
+ return generate_new_element(server_names, 'server')
+
+ def get_invalid_image(self):
+ images = self.api.get_images()
+ image_ids = [image['id'] for image in images]
+ return generate_new_element(image_ids, '', numeric=True)
+
def _build_minimal_create_server_request(self):
server = {}
- image = self.user.get_valid_image(create=True)
+ image = self.api.get_images()[0]
LOG.debug("Image: %s" % image)
if 'imageRef' in image:
@@ -211,7 +128,7 @@ class _IntegratedTestBase(test.TestCase):
server['flavorRef'] = 'http://fake.server/%s' % flavor['id']
# Set a valid server name
- server_name = self.user.get_unused_server_name()
+ server_name = self.get_unused_server_name()
server['name'] = server_name
return server
diff --git a/nova/tests/integrated/test_login.py b/nova/tests/integrated/test_login.py
index 9d1925bc0..3a863d0f9 100644
--- a/nova/tests/integrated/test_login.py
+++ b/nova/tests/integrated/test_login.py
@@ -15,11 +15,9 @@
# License for the specific language governing permissions and limitations
# under the License.
-import unittest
from nova.log import logging
from nova.tests.integrated import integrated_helpers
-from nova.tests.integrated.api import client
LOG = logging.getLogger('nova.tests.integrated')
@@ -31,40 +29,3 @@ class LoginTest(integrated_helpers._IntegratedTestBase):
flavors = self.api.get_flavors()
for flavor in flavors:
LOG.debug(_("flavor: %s") % flavor)
-
- def test_bad_login_password(self):
- """Test that I get a 401 with a bad username."""
- bad_credentials_api = client.TestOpenStackClient(self.user.name,
- "notso_password",
- self.user.auth_url)
-
- self.assertRaises(client.OpenStackApiAuthenticationException,
- bad_credentials_api.get_flavors)
-
- def test_bad_login_username(self):
- """Test that I get a 401 with a bad password."""
- bad_credentials_api = client.TestOpenStackClient("notso_username",
- self.user.secret,
- self.user.auth_url)
-
- self.assertRaises(client.OpenStackApiAuthenticationException,
- bad_credentials_api.get_flavors)
-
- def test_bad_login_both_bad(self):
- """Test that I get a 401 with both bad username and bad password."""
- bad_credentials_api = client.TestOpenStackClient("notso_username",
- "notso_password",
- self.user.auth_url)
-
- self.assertRaises(client.OpenStackApiAuthenticationException,
- bad_credentials_api.get_flavors)
-
- def test_good_login_bad_project(self):
- """Test that I get a 401 with valid user/pass but bad project"""
- self.api.project_id = 'openstackBAD'
-
- self.assertRaises(client.OpenStackApiAuthorizationException,
- self.api.get_flavors)
-
-if __name__ == "__main__":
- unittest.main()
diff --git a/nova/tests/integrated/test_servers.py b/nova/tests/integrated/test_servers.py
index 725f6d529..b9382038a 100644
--- a/nova/tests/integrated/test_servers.py
+++ b/nova/tests/integrated/test_servers.py
@@ -51,7 +51,7 @@ class ServersTest(integrated_helpers._IntegratedTestBase):
self.api.post_server, post)
# With an invalid imageRef, this throws 500.
- server['imageRef'] = self.user.get_invalid_image()
+ server['imageRef'] = self.get_invalid_image()
# TODO(justinsb): Check whatever the spec says should be thrown here
self.assertRaises(client.OpenStackApiException,
self.api.post_server, post)
@@ -193,7 +193,7 @@ class ServersTest(integrated_helpers._IntegratedTestBase):
# rebuild the server with metadata
post = {}
post['rebuild'] = {
- "imageRef": "https://localhost/v1.1/32278/images/2",
+ "imageRef": "https://localhost/v1.1/32278/images/3",
"name": "blah",
}
@@ -205,6 +205,7 @@ class ServersTest(integrated_helpers._IntegratedTestBase):
self.assertEqual(created_server_id, found_server['id'])
self.assertEqual({}, found_server.get('metadata'))
self.assertEqual('blah', found_server.get('name'))
+ self.assertEqual('3', found_server.get('image')['id'])
# Cleanup
self._delete_server(created_server_id)
diff --git a/nova/tests/integrated/test_volumes.py b/nova/tests/integrated/test_volumes.py
index d3e936462..d6c5e1ba1 100644
--- a/nova/tests/integrated/test_volumes.py
+++ b/nova/tests/integrated/test_volumes.py
@@ -285,6 +285,23 @@ class VolumesTest(integrated_helpers._IntegratedTestBase):
self.assertEquals(undisco_move['mountpoint'], device)
self.assertEquals(undisco_move['instance_id'], server_id)
+ def test_create_volume_with_metadata(self):
+ """Creates and deletes a volume."""
+
+ # Create volume
+ metadata = {'key1': 'value1',
+ 'key2': 'value2'}
+ created_volume = self.api.post_volume(
+ {'volume': {'size': 1,
+ 'metadata': metadata}})
+ LOG.debug("created_volume: %s" % created_volume)
+ self.assertTrue(created_volume['id'])
+ created_volume_id = created_volume['id']
+
+ # Check it's there and metadata present
+ found_volume = self.api.get_volume(created_volume_id)
+ self.assertEqual(created_volume_id, found_volume['id'])
+ self.assertEqual(metadata, found_volume['metadata'])
if __name__ == "__main__":
unittest.main()
diff --git a/nova/tests/monkey_patch_example/__init__.py b/nova/tests/monkey_patch_example/__init__.py
new file mode 100644
index 000000000..25cf9ccfe
--- /dev/null
+++ b/nova/tests/monkey_patch_example/__init__.py
@@ -0,0 +1,33 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright 2011 OpenStack LLC.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+"""Example Module for testing utils.monkey_patch()."""
+
+
+CALLED_FUNCTION = []
+
+
+def example_decorator(name, function):
+ """ decorator for notify which is used from utils.monkey_patch()
+
+ :param name: name of the function
+ :param function: - object of the function
+ :returns: function -- decorated function
+ """
+ def wrapped_func(*args, **kwarg):
+ CALLED_FUNCTION.append(name)
+ return function(*args, **kwarg)
+ return wrapped_func
diff --git a/nova/tests/monkey_patch_example/example_a.py b/nova/tests/monkey_patch_example/example_a.py
new file mode 100644
index 000000000..21e79bcb0
--- /dev/null
+++ b/nova/tests/monkey_patch_example/example_a.py
@@ -0,0 +1,29 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright 2011 OpenStack LLC.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+"""Example Module A for testing utils.monkey_patch()."""
+
+
+def example_function_a():
+ return 'Example function'
+
+
+class ExampleClassA():
+ def example_method(self):
+ return 'Example method'
+
+ def example_method_add(self, arg1, arg2):
+ return arg1 + arg2
diff --git a/nova/tests/monkey_patch_example/example_b.py b/nova/tests/monkey_patch_example/example_b.py
new file mode 100644
index 000000000..9d8f6d339
--- /dev/null
+++ b/nova/tests/monkey_patch_example/example_b.py
@@ -0,0 +1,30 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright 2011 OpenStack LLC.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""Example Module B for testing utils.monkey_patch()."""
+
+
+def example_function_b():
+ return 'Example function'
+
+
+class ExampleClassB():
+ def example_method(self):
+ return 'Example method'
+
+ def example_method_add(self, arg1, arg2):
+ return arg1 + arg2
diff --git a/nova/tests/notifier/__init__.py b/nova/tests/notifier/__init__.py
new file mode 100644
index 000000000..bd862c46a
--- /dev/null
+++ b/nova/tests/notifier/__init__.py
@@ -0,0 +1,16 @@
+# Copyright 2011 Openstack LLC.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from nova.tests import *
diff --git a/nova/tests/notifier/test_list_notifier.py b/nova/tests/notifier/test_list_notifier.py
new file mode 100644
index 000000000..b77720759
--- /dev/null
+++ b/nova/tests/notifier/test_list_notifier.py
@@ -0,0 +1,88 @@
+# Copyright 2011 OpenStack LLC.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import stubout
+import sys
+
+import nova
+from nova import log as logging
+import nova.notifier.api
+from nova.notifier.api import notify
+from nova.notifier import log_notifier
+from nova.notifier import no_op_notifier
+from nova.notifier import list_notifier
+from nova import test
+
+
+class NotifierListTestCase(test.TestCase):
+ """Test case for notifications"""
+
+ def setUp(self):
+ super(NotifierListTestCase, self).setUp()
+ list_notifier._reset_drivers()
+ self.stubs = stubout.StubOutForTesting()
+ # Mock log to add one to exception_count when log.exception is called
+
+ def mock_exception(cls, *args):
+ self.exception_count += 1
+
+ self.exception_count = 0
+ list_notifier_log = logging.getLogger('nova.notifier.list_notifier')
+ self.stubs.Set(list_notifier_log, "exception", mock_exception)
+ # Mock no_op notifier to add one to notify_count when called.
+
+ def mock_notify(cls, *args):
+ self.notify_count += 1
+
+ self.notify_count = 0
+ self.stubs.Set(nova.notifier.no_op_notifier, 'notify', mock_notify)
+ # Mock log_notifier to raise RuntimeError when called.
+
+ def mock_notify2(cls, *args):
+ raise RuntimeError("Bad notifier.")
+
+ self.stubs.Set(nova.notifier.log_notifier, 'notify', mock_notify2)
+
+ def tearDown(self):
+ self.stubs.UnsetAll()
+ list_notifier._reset_drivers()
+ super(NotifierListTestCase, self).tearDown()
+
+ def test_send_notifications_successfully(self):
+ self.flags(notification_driver='nova.notifier.list_notifier',
+ list_notifier_drivers=['nova.notifier.no_op_notifier',
+ 'nova.notifier.no_op_notifier'])
+ notify('publisher_id', 'event_type',
+ nova.notifier.api.WARN, dict(a=3))
+ self.assertEqual(self.notify_count, 2)
+ self.assertEqual(self.exception_count, 0)
+
+ def test_send_notifications_with_errors(self):
+
+ self.flags(notification_driver='nova.notifier.list_notifier',
+ list_notifier_drivers=['nova.notifier.no_op_notifier',
+ 'nova.notifier.log_notifier'])
+ notify('publisher_id', 'event_type', nova.notifier.api.WARN, dict(a=3))
+ self.assertEqual(self.notify_count, 1)
+ self.assertEqual(self.exception_count, 1)
+
+ def test_when_driver_fails_to_import(self):
+ self.flags(notification_driver='nova.notifier.list_notifier',
+ list_notifier_drivers=['nova.notifier.no_op_notifier',
+ 'nova.notifier.logo_notifier',
+ 'fdsjgsdfhjkhgsfkj'])
+ notify('publisher_id', 'event_type', nova.notifier.api.WARN, dict(a=3))
+ self.assertEqual(self.exception_count, 2)
+ self.assertEqual(self.notify_count, 1)
diff --git a/nova/tests/scheduler/test_vsa_scheduler.py b/nova/tests/scheduler/test_vsa_scheduler.py
new file mode 100644
index 000000000..37964f00d
--- /dev/null
+++ b/nova/tests/scheduler/test_vsa_scheduler.py
@@ -0,0 +1,641 @@
+# Copyright 2011 OpenStack LLC.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import stubout
+
+import nova
+
+from nova import context
+from nova import db
+from nova import exception
+from nova import flags
+from nova import log as logging
+from nova import test
+from nova import utils
+from nova.volume import volume_types
+
+from nova.scheduler import vsa as vsa_sched
+from nova.scheduler import driver
+
+FLAGS = flags.FLAGS
+LOG = logging.getLogger('nova.tests.scheduler.vsa')
+
+scheduled_volumes = []
+scheduled_volume = {}
+global_volume = {}
+
+
+class FakeVsaLeastUsedScheduler(
+ vsa_sched.VsaSchedulerLeastUsedHost):
+ # No need to stub anything at the moment
+ pass
+
+
+class FakeVsaMostAvailCapacityScheduler(
+ vsa_sched.VsaSchedulerMostAvailCapacity):
+ # No need to stub anything at the moment
+ pass
+
+
+class VsaSchedulerTestCase(test.TestCase):
+
+ def _get_vol_creation_request(self, num_vols, drive_ix, size=0):
+ volume_params = []
+ for i in range(num_vols):
+
+ name = 'name_' + str(i)
+ try:
+ volume_types.create(self.context, name,
+ extra_specs={'type': 'vsa_drive',
+ 'drive_name': name,
+ 'drive_type': 'type_' + str(drive_ix),
+ 'drive_size': 1 + 100 * (drive_ix)})
+ self.created_types_lst.append(name)
+ except exception.ApiError:
+ # type is already created
+ pass
+
+ volume_type = volume_types.get_volume_type_by_name(self.context,
+ name)
+ volume = {'size': size,
+ 'snapshot_id': None,
+ 'name': 'vol_' + str(i),
+ 'description': None,
+ 'volume_type_id': volume_type['id']}
+ volume_params.append(volume)
+
+ return {'num_volumes': len(volume_params),
+ 'vsa_id': 123,
+ 'volumes': volume_params}
+
+ def _generate_default_service_states(self):
+ service_states = {}
+ for i in range(self.host_num):
+ host = {}
+ hostname = 'host_' + str(i)
+ if hostname in self.exclude_host_list:
+ continue
+
+ host['volume'] = {'timestamp': utils.utcnow(),
+ 'drive_qos_info': {}}
+
+ for j in range(self.drive_type_start_ix,
+ self.drive_type_start_ix + self.drive_type_num):
+ dtype = {}
+ dtype['Name'] = 'name_' + str(j)
+ dtype['DriveType'] = 'type_' + str(j)
+ dtype['TotalDrives'] = 2 * (self.init_num_drives + i)
+ dtype['DriveCapacity'] = vsa_sched.GB_TO_BYTES(1 + 100 * j)
+ dtype['TotalCapacity'] = dtype['TotalDrives'] * \
+ dtype['DriveCapacity']
+ dtype['AvailableCapacity'] = (dtype['TotalDrives'] - i) * \
+ dtype['DriveCapacity']
+ dtype['DriveRpm'] = 7200
+ dtype['DifCapable'] = 0
+ dtype['SedCapable'] = 0
+ dtype['PartitionDrive'] = {
+ 'PartitionSize': 0,
+ 'NumOccupiedPartitions': 0,
+ 'NumFreePartitions': 0}
+ dtype['FullDrive'] = {
+ 'NumFreeDrives': dtype['TotalDrives'] - i,
+ 'NumOccupiedDrives': i}
+ host['volume']['drive_qos_info'][dtype['Name']] = dtype
+
+ service_states[hostname] = host
+
+ return service_states
+
+ def _print_service_states(self):
+ for host, host_val in self.service_states.iteritems():
+ LOG.info(_("Host %s"), host)
+ total_used = 0
+ total_available = 0
+ qos = host_val['volume']['drive_qos_info']
+
+ for k, d in qos.iteritems():
+ LOG.info("\t%s: type %s: drives (used %2d, total %2d) "\
+ "size %3d, total %4d, used %4d, avail %d",
+ k, d['DriveType'],
+ d['FullDrive']['NumOccupiedDrives'], d['TotalDrives'],
+ vsa_sched.BYTES_TO_GB(d['DriveCapacity']),
+ vsa_sched.BYTES_TO_GB(d['TotalCapacity']),
+ vsa_sched.BYTES_TO_GB(d['TotalCapacity'] - \
+ d['AvailableCapacity']),
+ vsa_sched.BYTES_TO_GB(d['AvailableCapacity']))
+
+ total_used += vsa_sched.BYTES_TO_GB(d['TotalCapacity'] - \
+ d['AvailableCapacity'])
+ total_available += vsa_sched.BYTES_TO_GB(
+ d['AvailableCapacity'])
+ LOG.info("Host %s: used %d, avail %d",
+ host, total_used, total_available)
+
+ def _set_service_states(self, host_num,
+ drive_type_start_ix, drive_type_num,
+ init_num_drives=10,
+ exclude_host_list=[]):
+ self.host_num = host_num
+ self.drive_type_start_ix = drive_type_start_ix
+ self.drive_type_num = drive_type_num
+ self.exclude_host_list = exclude_host_list
+ self.init_num_drives = init_num_drives
+ self.service_states = self._generate_default_service_states()
+
+ def _get_service_states(self):
+ return self.service_states
+
+ def _fake_get_service_states(self):
+ return self._get_service_states()
+
+ def _fake_provision_volume(self, context, vol, vsa_id, availability_zone):
+ global scheduled_volumes
+ scheduled_volumes.append(dict(vol=vol,
+ vsa_id=vsa_id,
+ az=availability_zone))
+ name = vol['name']
+ host = vol['host']
+ LOG.debug(_("Test: provision vol %(name)s on host %(host)s"),
+ locals())
+ LOG.debug(_("\t vol=%(vol)s"), locals())
+ pass
+
+ def _fake_vsa_update(self, context, vsa_id, values):
+ LOG.debug(_("Test: VSA update request: vsa_id=%(vsa_id)s "\
+ "values=%(values)s"), locals())
+ pass
+
+ def _fake_volume_create(self, context, options):
+ LOG.debug(_("Test: Volume create: %s"), options)
+ options['id'] = 123
+ global global_volume
+ global_volume = options
+ return options
+
+ def _fake_volume_get(self, context, volume_id):
+ LOG.debug(_("Test: Volume get request: id=%(volume_id)s"), locals())
+ global global_volume
+ global_volume['id'] = volume_id
+ global_volume['availability_zone'] = None
+ return global_volume
+
+ def _fake_volume_update(self, context, volume_id, values):
+ LOG.debug(_("Test: Volume update request: id=%(volume_id)s "\
+ "values=%(values)s"), locals())
+ global scheduled_volume
+ scheduled_volume = {'id': volume_id, 'host': values['host']}
+ pass
+
+ def _fake_service_get_by_args(self, context, host, binary):
+ return "service"
+
+ def _fake_service_is_up_True(self, service):
+ return True
+
+ def _fake_service_is_up_False(self, service):
+ return False
+
+ def setUp(self, sched_class=None):
+ super(VsaSchedulerTestCase, self).setUp()
+ self.stubs = stubout.StubOutForTesting()
+ self.context = context.get_admin_context()
+
+ if sched_class is None:
+ self.sched = FakeVsaLeastUsedScheduler()
+ else:
+ self.sched = sched_class
+
+ self.host_num = 10
+ self.drive_type_num = 5
+
+ self.stubs.Set(self.sched,
+ '_get_service_states', self._fake_get_service_states)
+ self.stubs.Set(self.sched,
+ '_provision_volume', self._fake_provision_volume)
+ self.stubs.Set(nova.db, 'vsa_update', self._fake_vsa_update)
+
+ self.stubs.Set(nova.db, 'volume_get', self._fake_volume_get)
+ self.stubs.Set(nova.db, 'volume_update', self._fake_volume_update)
+
+ self.created_types_lst = []
+
+ def tearDown(self):
+ for name in self.created_types_lst:
+ volume_types.purge(self.context, name)
+
+ self.stubs.UnsetAll()
+ super(VsaSchedulerTestCase, self).tearDown()
+
+ def test_vsa_sched_create_volumes_simple(self):
+ global scheduled_volumes
+ scheduled_volumes = []
+ self._set_service_states(host_num=10,
+ drive_type_start_ix=0,
+ drive_type_num=5,
+ init_num_drives=10,
+ exclude_host_list=['host_1', 'host_3'])
+ prev = self._generate_default_service_states()
+ request_spec = self._get_vol_creation_request(num_vols=3, drive_ix=2)
+
+ self.sched.schedule_create_volumes(self.context,
+ request_spec,
+ availability_zone=None)
+
+ self.assertEqual(len(scheduled_volumes), 3)
+ self.assertEqual(scheduled_volumes[0]['vol']['host'], 'host_0')
+ self.assertEqual(scheduled_volumes[1]['vol']['host'], 'host_2')
+ self.assertEqual(scheduled_volumes[2]['vol']['host'], 'host_4')
+
+ cur = self._get_service_states()
+ for host in ['host_0', 'host_2', 'host_4']:
+ cur_dtype = cur[host]['volume']['drive_qos_info']['name_2']
+ prev_dtype = prev[host]['volume']['drive_qos_info']['name_2']
+ self.assertEqual(cur_dtype['DriveType'], prev_dtype['DriveType'])
+ self.assertEqual(cur_dtype['FullDrive']['NumFreeDrives'],
+ prev_dtype['FullDrive']['NumFreeDrives'] - 1)
+ self.assertEqual(cur_dtype['FullDrive']['NumOccupiedDrives'],
+ prev_dtype['FullDrive']['NumOccupiedDrives'] + 1)
+
+ def test_vsa_sched_no_drive_type(self):
+ self._set_service_states(host_num=10,
+ drive_type_start_ix=0,
+ drive_type_num=5,
+ init_num_drives=1)
+ request_spec = self._get_vol_creation_request(num_vols=1, drive_ix=6)
+ self.assertRaises(driver.WillNotSchedule,
+ self.sched.schedule_create_volumes,
+ self.context,
+ request_spec,
+ availability_zone=None)
+
+ def test_vsa_sched_no_enough_drives(self):
+ global scheduled_volumes
+ scheduled_volumes = []
+
+ self._set_service_states(host_num=3,
+ drive_type_start_ix=0,
+ drive_type_num=1,
+ init_num_drives=0)
+ prev = self._generate_default_service_states()
+ request_spec = self._get_vol_creation_request(num_vols=3, drive_ix=0)
+
+ self.assertRaises(driver.WillNotSchedule,
+ self.sched.schedule_create_volumes,
+ self.context,
+ request_spec,
+ availability_zone=None)
+
+ # check that everything was returned back
+ cur = self._get_service_states()
+ for k, v in prev.iteritems():
+ self.assertEqual(prev[k]['volume']['drive_qos_info'],
+ cur[k]['volume']['drive_qos_info'])
+
+ def test_vsa_sched_wrong_topic(self):
+ self._set_service_states(host_num=1,
+ drive_type_start_ix=0,
+ drive_type_num=5,
+ init_num_drives=1)
+ states = self._get_service_states()
+ new_states = {}
+ new_states['host_0'] = {'compute': states['host_0']['volume']}
+ self.service_states = new_states
+ request_spec = self._get_vol_creation_request(num_vols=1, drive_ix=0)
+
+ self.assertRaises(driver.WillNotSchedule,
+ self.sched.schedule_create_volumes,
+ self.context,
+ request_spec,
+ availability_zone=None)
+
+ def test_vsa_sched_provision_volume(self):
+ global global_volume
+ global_volume = {}
+ self._set_service_states(host_num=1,
+ drive_type_start_ix=0,
+ drive_type_num=1,
+ init_num_drives=1)
+ request_spec = self._get_vol_creation_request(num_vols=1, drive_ix=0)
+
+ self.stubs.UnsetAll()
+ self.stubs.Set(self.sched,
+ '_get_service_states', self._fake_get_service_states)
+ self.stubs.Set(nova.db, 'volume_create', self._fake_volume_create)
+
+ self.sched.schedule_create_volumes(self.context,
+ request_spec,
+ availability_zone=None)
+
+ self.assertEqual(request_spec['volumes'][0]['name'],
+ global_volume['display_name'])
+
+ def test_vsa_sched_no_free_drives(self):
+ self._set_service_states(host_num=1,
+ drive_type_start_ix=0,
+ drive_type_num=1,
+ init_num_drives=1)
+ request_spec = self._get_vol_creation_request(num_vols=1, drive_ix=0)
+
+ self.sched.schedule_create_volumes(self.context,
+ request_spec,
+ availability_zone=None)
+
+ cur = self._get_service_states()
+ cur_dtype = cur['host_0']['volume']['drive_qos_info']['name_0']
+ self.assertEqual(cur_dtype['FullDrive']['NumFreeDrives'], 1)
+
+ new_request = self._get_vol_creation_request(num_vols=1, drive_ix=0)
+
+ self.sched.schedule_create_volumes(self.context,
+ request_spec,
+ availability_zone=None)
+ self._print_service_states()
+
+ self.assertRaises(driver.WillNotSchedule,
+ self.sched.schedule_create_volumes,
+ self.context,
+ new_request,
+ availability_zone=None)
+
+ def test_vsa_sched_forced_host(self):
+ global scheduled_volumes
+ scheduled_volumes = []
+
+ self._set_service_states(host_num=10,
+ drive_type_start_ix=0,
+ drive_type_num=5,
+ init_num_drives=10)
+
+ request_spec = self._get_vol_creation_request(num_vols=3, drive_ix=2)
+
+ self.assertRaises(exception.HostBinaryNotFound,
+ self.sched.schedule_create_volumes,
+ self.context,
+ request_spec,
+ availability_zone="nova:host_5")
+
+ self.stubs.Set(nova.db,
+ 'service_get_by_args', self._fake_service_get_by_args)
+ self.stubs.Set(self.sched,
+ 'service_is_up', self._fake_service_is_up_False)
+
+ self.assertRaises(driver.WillNotSchedule,
+ self.sched.schedule_create_volumes,
+ self.context,
+ request_spec,
+ availability_zone="nova:host_5")
+
+ self.stubs.Set(self.sched,
+ 'service_is_up', self._fake_service_is_up_True)
+
+ self.sched.schedule_create_volumes(self.context,
+ request_spec,
+ availability_zone="nova:host_5")
+
+ self.assertEqual(len(scheduled_volumes), 3)
+ self.assertEqual(scheduled_volumes[0]['vol']['host'], 'host_5')
+ self.assertEqual(scheduled_volumes[1]['vol']['host'], 'host_5')
+ self.assertEqual(scheduled_volumes[2]['vol']['host'], 'host_5')
+
+ def test_vsa_sched_create_volumes_partition(self):
+ global scheduled_volumes
+ scheduled_volumes = []
+ self._set_service_states(host_num=5,
+ drive_type_start_ix=0,
+ drive_type_num=5,
+ init_num_drives=1,
+ exclude_host_list=['host_0', 'host_2'])
+ prev = self._generate_default_service_states()
+ request_spec = self._get_vol_creation_request(num_vols=3,
+ drive_ix=3,
+ size=50)
+ self.sched.schedule_create_volumes(self.context,
+ request_spec,
+ availability_zone=None)
+
+ self.assertEqual(len(scheduled_volumes), 3)
+ self.assertEqual(scheduled_volumes[0]['vol']['host'], 'host_1')
+ self.assertEqual(scheduled_volumes[1]['vol']['host'], 'host_3')
+ self.assertEqual(scheduled_volumes[2]['vol']['host'], 'host_4')
+
+ cur = self._get_service_states()
+ for host in ['host_1', 'host_3', 'host_4']:
+ cur_dtype = cur[host]['volume']['drive_qos_info']['name_3']
+ prev_dtype = prev[host]['volume']['drive_qos_info']['name_3']
+
+ self.assertEqual(cur_dtype['DriveType'], prev_dtype['DriveType'])
+ self.assertEqual(cur_dtype['FullDrive']['NumFreeDrives'],
+ prev_dtype['FullDrive']['NumFreeDrives'] - 1)
+ self.assertEqual(cur_dtype['FullDrive']['NumOccupiedDrives'],
+ prev_dtype['FullDrive']['NumOccupiedDrives'] + 1)
+
+ self.assertEqual(prev_dtype['PartitionDrive']
+ ['NumOccupiedPartitions'], 0)
+ self.assertEqual(cur_dtype['PartitionDrive']
+ ['NumOccupiedPartitions'], 1)
+ self.assertEqual(cur_dtype['PartitionDrive']
+ ['NumFreePartitions'], 5)
+
+ self.assertEqual(prev_dtype['PartitionDrive']
+ ['NumFreePartitions'], 0)
+ self.assertEqual(prev_dtype['PartitionDrive']
+ ['PartitionSize'], 0)
+
+ def test_vsa_sched_create_single_volume_az(self):
+ global scheduled_volume
+ scheduled_volume = {}
+
+ def _fake_volume_get_az(context, volume_id):
+ LOG.debug(_("Test: Volume get: id=%(volume_id)s"), locals())
+ return {'id': volume_id, 'availability_zone': 'nova:host_3'}
+
+ self.stubs.Set(nova.db, 'volume_get', _fake_volume_get_az)
+ self.stubs.Set(nova.db,
+ 'service_get_by_args', self._fake_service_get_by_args)
+ self.stubs.Set(self.sched,
+ 'service_is_up', self._fake_service_is_up_True)
+
+ host = self.sched.schedule_create_volume(self.context,
+ 123, availability_zone=None)
+
+ self.assertEqual(host, 'host_3')
+ self.assertEqual(scheduled_volume['id'], 123)
+ self.assertEqual(scheduled_volume['host'], 'host_3')
+
+ def test_vsa_sched_create_single_non_vsa_volume(self):
+ global scheduled_volume
+ scheduled_volume = {}
+
+ global global_volume
+ global_volume = {}
+ global_volume['volume_type_id'] = None
+
+ self.assertRaises(driver.NoValidHost,
+ self.sched.schedule_create_volume,
+ self.context,
+ 123,
+ availability_zone=None)
+
+ def test_vsa_sched_create_single_volume(self):
+ global scheduled_volume
+ scheduled_volume = {}
+ self._set_service_states(host_num=10,
+ drive_type_start_ix=0,
+ drive_type_num=5,
+ init_num_drives=10,
+ exclude_host_list=['host_0', 'host_1'])
+ prev = self._generate_default_service_states()
+
+ global global_volume
+ global_volume = {}
+
+ drive_ix = 2
+ name = 'name_' + str(drive_ix)
+ volume_types.create(self.context, name,
+ extra_specs={'type': 'vsa_drive',
+ 'drive_name': name,
+ 'drive_type': 'type_' + str(drive_ix),
+ 'drive_size': 1 + 100 * (drive_ix)})
+ self.created_types_lst.append(name)
+ volume_type = volume_types.get_volume_type_by_name(self.context, name)
+
+ global_volume['volume_type_id'] = volume_type['id']
+ global_volume['size'] = 0
+
+ host = self.sched.schedule_create_volume(self.context,
+ 123, availability_zone=None)
+
+ self.assertEqual(host, 'host_2')
+ self.assertEqual(scheduled_volume['id'], 123)
+ self.assertEqual(scheduled_volume['host'], 'host_2')
+
+
+class VsaSchedulerTestCaseMostAvail(VsaSchedulerTestCase):
+
+ def setUp(self):
+ super(VsaSchedulerTestCaseMostAvail, self).setUp(
+ FakeVsaMostAvailCapacityScheduler())
+
+ def tearDown(self):
+ self.stubs.UnsetAll()
+ super(VsaSchedulerTestCaseMostAvail, self).tearDown()
+
+ def test_vsa_sched_create_single_volume(self):
+ global scheduled_volume
+ scheduled_volume = {}
+ self._set_service_states(host_num=10,
+ drive_type_start_ix=0,
+ drive_type_num=5,
+ init_num_drives=10,
+ exclude_host_list=['host_0', 'host_1'])
+ prev = self._generate_default_service_states()
+
+ global global_volume
+ global_volume = {}
+
+ drive_ix = 2
+ name = 'name_' + str(drive_ix)
+ volume_types.create(self.context, name,
+ extra_specs={'type': 'vsa_drive',
+ 'drive_name': name,
+ 'drive_type': 'type_' + str(drive_ix),
+ 'drive_size': 1 + 100 * (drive_ix)})
+ self.created_types_lst.append(name)
+ volume_type = volume_types.get_volume_type_by_name(self.context, name)
+
+ global_volume['volume_type_id'] = volume_type['id']
+ global_volume['size'] = 0
+
+ host = self.sched.schedule_create_volume(self.context,
+ 123, availability_zone=None)
+
+ self.assertEqual(host, 'host_9')
+ self.assertEqual(scheduled_volume['id'], 123)
+ self.assertEqual(scheduled_volume['host'], 'host_9')
+
+ def test_vsa_sched_create_volumes_simple(self):
+ global scheduled_volumes
+ scheduled_volumes = []
+ self._set_service_states(host_num=10,
+ drive_type_start_ix=0,
+ drive_type_num=5,
+ init_num_drives=10,
+ exclude_host_list=['host_1', 'host_3'])
+ prev = self._generate_default_service_states()
+ request_spec = self._get_vol_creation_request(num_vols=3, drive_ix=2)
+
+ self._print_service_states()
+
+ self.sched.schedule_create_volumes(self.context,
+ request_spec,
+ availability_zone=None)
+
+ self.assertEqual(len(scheduled_volumes), 3)
+ self.assertEqual(scheduled_volumes[0]['vol']['host'], 'host_9')
+ self.assertEqual(scheduled_volumes[1]['vol']['host'], 'host_8')
+ self.assertEqual(scheduled_volumes[2]['vol']['host'], 'host_7')
+
+ cur = self._get_service_states()
+ for host in ['host_9', 'host_8', 'host_7']:
+ cur_dtype = cur[host]['volume']['drive_qos_info']['name_2']
+ prev_dtype = prev[host]['volume']['drive_qos_info']['name_2']
+ self.assertEqual(cur_dtype['DriveType'], prev_dtype['DriveType'])
+ self.assertEqual(cur_dtype['FullDrive']['NumFreeDrives'],
+ prev_dtype['FullDrive']['NumFreeDrives'] - 1)
+ self.assertEqual(cur_dtype['FullDrive']['NumOccupiedDrives'],
+ prev_dtype['FullDrive']['NumOccupiedDrives'] + 1)
+
+ def test_vsa_sched_create_volumes_partition(self):
+ global scheduled_volumes
+ scheduled_volumes = []
+ self._set_service_states(host_num=5,
+ drive_type_start_ix=0,
+ drive_type_num=5,
+ init_num_drives=1,
+ exclude_host_list=['host_0', 'host_2'])
+ prev = self._generate_default_service_states()
+ request_spec = self._get_vol_creation_request(num_vols=3,
+ drive_ix=3,
+ size=50)
+ self.sched.schedule_create_volumes(self.context,
+ request_spec,
+ availability_zone=None)
+
+ self.assertEqual(len(scheduled_volumes), 3)
+ self.assertEqual(scheduled_volumes[0]['vol']['host'], 'host_4')
+ self.assertEqual(scheduled_volumes[1]['vol']['host'], 'host_3')
+ self.assertEqual(scheduled_volumes[2]['vol']['host'], 'host_1')
+
+ cur = self._get_service_states()
+ for host in ['host_1', 'host_3', 'host_4']:
+ cur_dtype = cur[host]['volume']['drive_qos_info']['name_3']
+ prev_dtype = prev[host]['volume']['drive_qos_info']['name_3']
+
+ self.assertEqual(cur_dtype['DriveType'], prev_dtype['DriveType'])
+ self.assertEqual(cur_dtype['FullDrive']['NumFreeDrives'],
+ prev_dtype['FullDrive']['NumFreeDrives'] - 1)
+ self.assertEqual(cur_dtype['FullDrive']['NumOccupiedDrives'],
+ prev_dtype['FullDrive']['NumOccupiedDrives'] + 1)
+
+ self.assertEqual(prev_dtype['PartitionDrive']
+ ['NumOccupiedPartitions'], 0)
+ self.assertEqual(cur_dtype['PartitionDrive']
+ ['NumOccupiedPartitions'], 1)
+ self.assertEqual(cur_dtype['PartitionDrive']
+ ['NumFreePartitions'], 5)
+ self.assertEqual(prev_dtype['PartitionDrive']
+ ['NumFreePartitions'], 0)
+ self.assertEqual(prev_dtype['PartitionDrive']
+ ['PartitionSize'], 0)
diff --git a/nova/tests/test_auth.py b/nova/tests/test_auth.py
index 4561eb7f2..1b3166af7 100644
--- a/nova/tests/test_auth.py
+++ b/nova/tests/test_auth.py
@@ -147,6 +147,7 @@ class _AuthManagerBaseTestCase(test.TestCase):
'/services/Cloud'))
def test_can_get_credentials(self):
+ self.flags(use_deprecated_auth=True)
st = {'access': 'access', 'secret': 'secret'}
with user_and_project_generator(self.manager, user_state=st) as (u, p):
credentials = self.manager.get_environment_rc(u, p)
diff --git a/nova/tests/test_compute.py b/nova/tests/test_compute.py
index 993a87f23..6659b81eb 100644
--- a/nova/tests/test_compute.py
+++ b/nova/tests/test_compute.py
@@ -2,6 +2,7 @@
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
+# Copyright 2011 Piston Cloud Computing, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
@@ -159,9 +160,24 @@ class ComputeTestCase(test.TestCase):
db.security_group_destroy(self.context, group['id'])
db.instance_destroy(self.context, ref[0]['id'])
+ def test_create_instance_associates_config_drive(self):
+ """Make sure create associates a config drive."""
+
+ instance_id = self._create_instance(params={'config_drive': True, })
+
+ try:
+ self.compute.run_instance(self.context, instance_id)
+ instances = db.instance_get_all(context.get_admin_context())
+ instance = instances[0]
+
+ self.assertTrue(instance.config_drive)
+ finally:
+ db.instance_destroy(self.context, instance_id)
+
def test_default_hostname_generator(self):
- cases = [(None, 'server_1'), ('Hello, Server!', 'hello_server'),
- ('<}\x1fh\x10e\x08l\x02l\x05o\x12!{>', 'hello')]
+ cases = [(None, 'server-1'), ('Hello, Server!', 'hello-server'),
+ ('<}\x1fh\x10e\x08l\x02l\x05o\x12!{>', 'hello'),
+ ('hello_server', 'hello-server')]
for display_name, hostname in cases:
ref = self.compute_api.create(self.context,
instance_types.get_default_instance_type(), None,
diff --git a/nova/tests/test_instance_types.py b/nova/tests/test_instance_types.py
index ef271518c..09f532239 100644
--- a/nova/tests/test_instance_types.py
+++ b/nova/tests/test_instance_types.py
@@ -47,6 +47,29 @@ class InstanceTypeTestCase(test.TestCase):
self.id = max_id["id"] + 1
self.name = str(int(time.time()))
+ def _nonexistent_flavor_name(self):
+ """return an instance type name not in the DB"""
+ nonexistent_flavor = "sdfsfsdf"
+ flavors = instance_types.get_all_types()
+ while nonexistent_flavor in flavors:
+ nonexistent_flavor += "z"
+ else:
+ return nonexistent_flavor
+
+ def _nonexistent_flavor_id(self):
+ """return an instance type ID not in the DB"""
+ nonexistent_flavor = 2700
+ flavor_ids = [value["id"] for key, value in\
+ instance_types.get_all_types().iteritems()]
+ while nonexistent_flavor in flavor_ids:
+ nonexistent_flavor += 1
+ else:
+ return nonexistent_flavor
+
+ def _existing_flavor(self):
+ """return first instance type name"""
+ return instance_types.get_all_types().keys()[0]
+
def test_instance_type_create_then_delete(self):
"""Ensure instance types can be created"""
starting_inst_list = instance_types.get_all_types()
@@ -84,10 +107,11 @@ class InstanceTypeTestCase(test.TestCase):
exception.InvalidInput,
instance_types.create, self.name, 256, 1, "aa", self.flavorid)
- def test_non_existant_inst_type_shouldnt_delete(self):
+ def test_non_existent_inst_type_shouldnt_delete(self):
"""Ensures that instance type creation fails with invalid args"""
self.assertRaises(exception.ApiError,
- instance_types.destroy, "sfsfsdfdfs")
+ instance_types.destroy,
+ self._nonexistent_flavor_name())
def test_repeated_inst_types_should_raise_api_error(self):
"""Ensures that instance duplicates raises ApiError"""
@@ -97,3 +121,43 @@ class InstanceTypeTestCase(test.TestCase):
self.assertRaises(
exception.ApiError,
instance_types.create, new_name, 256, 1, 120, self.flavorid)
+
+ def test_will_not_destroy_with_no_name(self):
+ """Ensure destroy sad path of no name raises error"""
+ self.assertRaises(exception.ApiError,
+ instance_types.destroy,
+ self._nonexistent_flavor_name())
+
+ def test_will_not_purge_without_name(self):
+ """Ensure purge without a name raises error"""
+ self.assertRaises(exception.InvalidInstanceType,
+ instance_types.purge, None)
+
+ def test_will_not_purge_with_wrong_name(self):
+ """Ensure purge without correct name raises error"""
+ self.assertRaises(exception.ApiError,
+ instance_types.purge,
+ self._nonexistent_flavor_name())
+
+ def test_will_not_get_bad_default_instance_type(self):
+ """ensures error raised on bad default instance type"""
+ FLAGS.default_instance_type = self._nonexistent_flavor_name()
+ self.assertRaises(exception.InstanceTypeNotFoundByName,
+ instance_types.get_default_instance_type)
+
+ def test_will_not_get_instance_type_by_name_with_no_name(self):
+ """Ensure get by name returns default flavor with no name"""
+ self.assertEqual(instance_types.get_default_instance_type(),
+ instance_types.get_instance_type_by_name(None))
+
+ def test_will_not_get_instance_type_with_bad_name(self):
+ """Ensure get by name returns default flavor with bad name"""
+ self.assertRaises(exception.InstanceTypeNotFound,
+ instance_types.get_instance_type,
+ self._nonexistent_flavor_name())
+
+ def test_will_not_get_flavor_by_bad_flavor_id(self):
+ """Ensure get by flavor raises error with wrong flavorid"""
+ self.assertRaises(exception.InstanceTypeNotFound,
+ instance_types.get_instance_type_by_name,
+ self._nonexistent_flavor_id())
diff --git a/nova/tests/test_ipv6.py b/nova/tests/test_ipv6.py
index d123df6f1..e1ba4aafb 100644
--- a/nova/tests/test_ipv6.py
+++ b/nova/tests/test_ipv6.py
@@ -40,6 +40,25 @@ class IPv6RFC2462TestCase(test.TestCase):
mac = ipv6.to_mac('2001:db8::216:3eff:fe33:4455')
self.assertEquals(mac, '00:16:3e:33:44:55')
+ def test_to_global_with_bad_mac(self):
+ bad_mac = '02:16:3e:33:44:5Z'
+ self.assertRaises(TypeError, ipv6.to_global,
+ '2001:db8::', bad_mac, 'test')
+
+ def test_to_global_with_bad_prefix(self):
+ bad_prefix = '82'
+ self.assertRaises(TypeError, ipv6.to_global,
+ bad_prefix,
+ '2001:db8::216:3eff:fe33:4455',
+ 'test')
+
+ def test_to_global_with_bad_project(self):
+ bad_project = 'non-existent-project-name'
+ self.assertRaises(TypeError, ipv6.to_global,
+ '2001:db8::',
+ '2001:db8::a94a:8fe5:ff33:4455',
+ bad_project)
+
class IPv6AccountIdentiferTestCase(test.TestCase):
"""Unit tests for IPv6 account_identifier backend operations."""
@@ -55,3 +74,22 @@ class IPv6AccountIdentiferTestCase(test.TestCase):
def test_to_mac(self):
mac = ipv6.to_mac('2001:db8::a94a:8fe5:ff33:4455')
self.assertEquals(mac, '02:16:3e:33:44:55')
+
+ def test_to_global_with_bad_mac(self):
+ bad_mac = '02:16:3e:33:44:5X'
+ self.assertRaises(TypeError, ipv6.to_global,
+ '2001:db8::', bad_mac, 'test')
+
+ def test_to_global_with_bad_prefix(self):
+ bad_prefix = '78'
+ self.assertRaises(TypeError, ipv6.to_global,
+ bad_prefix,
+ '2001:db8::a94a:8fe5:ff33:4455',
+ 'test')
+
+ def test_to_global_with_bad_project(self):
+ bad_project = 'non-existent-project-name'
+ self.assertRaises(TypeError, ipv6.to_global,
+ '2001:db8::',
+ '2001:db8::a94a:8fe5:ff33:4455',
+ bad_project)
diff --git a/nova/tests/test_notifier.py b/nova/tests/test_notifier.py
index 64b799a2c..7de3a4a99 100644
--- a/nova/tests/test_notifier.py
+++ b/nova/tests/test_notifier.py
@@ -134,3 +134,24 @@ class NotifierTestCase(test.TestCase):
self.assertEqual(msg['event_type'], 'error_notification')
self.assertEqual(msg['priority'], 'ERROR')
self.assertEqual(msg['payload']['error'], 'foo')
+
+ def test_send_notification_by_decorator(self):
+ self.notify_called = False
+
+ def example_api(arg1, arg2):
+ return arg1 + arg2
+
+ example_api = nova.notifier.api.notify_decorator(
+ 'example_api',
+ example_api)
+
+ def mock_notify(cls, *args):
+ self.notify_called = True
+
+ self.stubs.Set(nova.notifier.no_op_notifier, 'notify',
+ mock_notify)
+
+ class Mock(object):
+ pass
+ self.assertEqual(3, example_api(1, 2))
+ self.assertEqual(self.notify_called, True)
diff --git a/nova/tests/test_nova_manage.py b/nova/tests/test_nova_manage.py
index f5ea68a03..520bfbea1 100644
--- a/nova/tests/test_nova_manage.py
+++ b/nova/tests/test_nova_manage.py
@@ -31,6 +31,7 @@ sys.dont_write_bytecode = False
import mox
import stubout
+import StringIO
from nova import context
from nova import db
from nova import exception
@@ -70,3 +71,156 @@ class FixedIpCommandsTestCase(test.TestCase):
self.assertRaises(SystemExit,
self.commands.unreserve,
'55.55.55.55')
+
+
+class NetworkCommandsTestCase(test.TestCase):
+ def setUp(self):
+ super(NetworkCommandsTestCase, self).setUp()
+ self.stubs = stubout.StubOutForTesting()
+ self.commands = nova_manage.NetworkCommands()
+ self.context = context.get_admin_context()
+ self.net = {'id': 0,
+ 'label': 'fake',
+ 'injected': False,
+ 'cidr': '192.168.0.0/24',
+ 'cidr_v6': 'dead:beef::/64',
+ 'multi_host': False,
+ 'gateway_v6': 'dead:beef::1',
+ 'netmask_v6': '64',
+ 'netmask': '255.255.255.0',
+ 'bridge': 'fa0',
+ 'bridge_interface': 'fake_fa0',
+ 'gateway': '192.168.0.1',
+ 'broadcast': '192.168.0.255',
+ 'dns1': '8.8.8.8',
+ 'dns2': '8.8.4.4',
+ 'vlan': 200,
+ 'vpn_public_address': '10.0.0.2',
+ 'vpn_public_port': '2222',
+ 'vpn_private_address': '192.168.0.2',
+ 'dhcp_start': '192.168.0.3',
+ 'project_id': 'fake_project',
+ 'host': 'fake_host',
+ 'uuid': 'aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa'}
+
+ def fake_network_get_by_cidr(context, cidr):
+ self.assertTrue(context.to_dict()['is_admin'])
+ self.assertEqual(cidr, self.fake_net['cidr'])
+ return db_fakes.FakeModel(self.fake_net)
+
+ def fake_network_update(context, network_id, values):
+ self.assertTrue(context.to_dict()['is_admin'])
+ self.assertEqual(network_id, self.fake_net['id'])
+ self.assertEqual(values, self.fake_update_value)
+ self.fake_network_get_by_cidr = fake_network_get_by_cidr
+ self.fake_network_update = fake_network_update
+
+ def tearDown(self):
+ super(NetworkCommandsTestCase, self).tearDown()
+ self.stubs.UnsetAll()
+
+ def test_create(self):
+
+ def fake_create_networks(obj, context, **kwargs):
+ self.assertTrue(context.to_dict()['is_admin'])
+ self.assertEqual(kwargs['label'], 'Test')
+ self.assertEqual(kwargs['cidr'], '10.2.0.0/24')
+ self.assertEqual(kwargs['multi_host'], False)
+ self.assertEqual(kwargs['num_networks'], 1)
+ self.assertEqual(kwargs['network_size'], 256)
+ self.assertEqual(kwargs['vlan_start'], 200)
+ self.assertEqual(kwargs['vpn_start'], 2000)
+ self.assertEqual(kwargs['cidr_v6'], 'fd00:2::/120')
+ self.assertEqual(kwargs['gateway_v6'], 'fd00:2::22')
+ self.assertEqual(kwargs['bridge'], 'br200')
+ self.assertEqual(kwargs['bridge_interface'], 'eth0')
+ self.assertEqual(kwargs['dns1'], '8.8.8.8')
+ self.assertEqual(kwargs['dns2'], '8.8.4.4')
+ self.flags(network_manager='nova.network.manager.VlanManager')
+ from nova.network import manager as net_manager
+ self.stubs.Set(net_manager.VlanManager, 'create_networks',
+ fake_create_networks)
+ self.commands.create(
+ label='Test',
+ fixed_range_v4='10.2.0.0/24',
+ num_networks=1,
+ network_size=256,
+ multi_host='F',
+ vlan_start=200,
+ vpn_start=2000,
+ fixed_range_v6='fd00:2::/120',
+ gateway_v6='fd00:2::22',
+ bridge='br200',
+ bridge_interface='eth0',
+ dns1='8.8.8.8',
+ dns2='8.8.4.4')
+
+ def test_list(self):
+
+ def fake_network_get_all(context):
+ return [db_fakes.FakeModel(self.net)]
+ self.stubs.Set(db, 'network_get_all', fake_network_get_all)
+ output = StringIO.StringIO()
+ sys.stdout = output
+ self.commands.list()
+ sys.stdout = sys.__stdout__
+ result = output.getvalue()
+ _fmt = "%(id)-5s\t%(cidr)-18s\t%(cidr_v6)-15s\t%(dhcp_start)-15s\t" +\
+ "%(dns1)-15s\t%(dns2)-15s\t%(vlan)-15s\t%(project_id)-15s\t" +\
+ "%(uuid)-15s"
+ head = _fmt % {'id': _('id'),
+ 'cidr': _('IPv4'),
+ 'cidr_v6': _('IPv6'),
+ 'dhcp_start': _('start address'),
+ 'dns1': _('DNS1'),
+ 'dns2': _('DNS2'),
+ 'vlan': _('VlanID'),
+ 'project_id': _('project'),
+ 'uuid': _("uuid")}
+ body = _fmt % {'id': self.net['id'],
+ 'cidr': self.net['cidr'],
+ 'cidr_v6': self.net['cidr_v6'],
+ 'dhcp_start': self.net['dhcp_start'],
+ 'dns1': self.net['dns1'],
+ 'dns2': self.net['dns2'],
+ 'vlan': self.net['vlan'],
+ 'project_id': self.net['project_id'],
+ 'uuid': self.net['uuid']}
+ answer = '%s\n%s\n' % (head, body)
+ self.assertEqual(result, answer)
+
+ def test_delete(self):
+ self.fake_net = self.net
+ self.fake_net['project_id'] = None
+ self.fake_net['host'] = None
+ self.stubs.Set(db, 'network_get_by_cidr',
+ self.fake_network_get_by_cidr)
+
+ def fake_network_delete_safe(context, network_id):
+ self.assertTrue(context.to_dict()['is_admin'])
+ self.assertEqual(network_id, self.fake_net['id'])
+ self.stubs.Set(db, 'network_delete_safe', fake_network_delete_safe)
+ self.commands.delete(fixed_range=self.fake_net['cidr'])
+
+ def _test_modify_base(self, update_value, project, host, dis_project=None,
+ dis_host=None):
+ self.fake_net = self.net
+ self.fake_update_value = update_value
+ self.stubs.Set(db, 'network_get_by_cidr',
+ self.fake_network_get_by_cidr)
+ self.stubs.Set(db, 'network_update', self.fake_network_update)
+ self.commands.modify(self.fake_net['cidr'], project=project, host=host,
+ dis_project=dis_project, dis_host=dis_host)
+
+ def test_modify_associate(self):
+ self._test_modify_base(update_value={'project_id': 'test_project',
+ 'host': 'test_host'},
+ project='test_project', host='test_host')
+
+ def test_modify_unchanged(self):
+ self._test_modify_base(update_value={}, project=None, host=None)
+
+ def test_modify_disassociate(self):
+ self._test_modify_base(update_value={'project_id': None, 'host': None},
+ project=None, host=None, dis_project=True,
+ dis_host=True)
diff --git a/nova/tests/test_test_utils.py b/nova/tests/test_test_utils.py
new file mode 100644
index 000000000..237339758
--- /dev/null
+++ b/nova/tests/test_test_utils.py
@@ -0,0 +1,41 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+#
+# Copyright 2010 OpenStack LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from nova import db
+from nova import test
+from nova.tests import utils as test_utils
+
+
+class TestUtilsTestCase(test.TestCase):
+ def test_get_test_admin_context(self):
+ """get_test_admin_context's return value behaves like admin context"""
+ ctxt = test_utils.get_test_admin_context()
+
+ # TODO(soren): This should verify the full interface context
+ # objects expose.
+ self.assertTrue(ctxt.is_admin)
+
+ def test_get_test_instance(self):
+ """get_test_instance's return value looks like an instance_ref"""
+ instance_ref = test_utils.get_test_instance()
+ ctxt = test_utils.get_test_admin_context()
+ db.instance_get(ctxt, instance_ref['id'])
+
+ def _test_get_test_network_info(self):
+ """Does the return value match a real network_info structure"""
+ # The challenge here is to define what exactly such a structure
+ # must look like.
+ pass
diff --git a/nova/tests/test_utils.py b/nova/tests/test_utils.py
index 28e366a8e..1ba794a1a 100644
--- a/nova/tests/test_utils.py
+++ b/nova/tests/test_utils.py
@@ -18,6 +18,7 @@ import datetime
import os
import tempfile
+import nova
from nova import exception
from nova import test
from nova import utils
@@ -394,3 +395,47 @@ class ToPrimitiveTestCase(test.TestCase):
self.assertTrue(ret[0].startswith(u"<module 'datetime' from "))
self.assertTrue(ret[1].startswith(u'<function foo at 0x'))
self.assertEquals(ret[2], u'<built-in function dir>')
+
+
+class MonkeyPatchTestCase(test.TestCase):
+ """Unit test for utils.monkey_patch()."""
+ def setUp(self):
+ super(MonkeyPatchTestCase, self).setUp()
+ self.example_package = 'nova.tests.monkey_patch_example.'
+ self.flags(
+ monkey_patch=True,
+ monkey_patch_modules=[self.example_package + 'example_a' + ':'
+ + self.example_package + 'example_decorator'])
+
+ def test_monkey_patch(self):
+ utils.monkey_patch()
+ nova.tests.monkey_patch_example.CALLED_FUNCTION = []
+ from nova.tests.monkey_patch_example import example_a, example_b
+
+ self.assertEqual('Example function', example_a.example_function_a())
+ exampleA = example_a.ExampleClassA()
+ exampleA.example_method()
+ ret_a = exampleA.example_method_add(3, 5)
+ self.assertEqual(ret_a, 8)
+
+ self.assertEqual('Example function', example_b.example_function_b())
+ exampleB = example_b.ExampleClassB()
+ exampleB.example_method()
+ ret_b = exampleB.example_method_add(3, 5)
+
+ self.assertEqual(ret_b, 8)
+ package_a = self.example_package + 'example_a.'
+ self.assertTrue(package_a + 'example_function_a'
+ in nova.tests.monkey_patch_example.CALLED_FUNCTION)
+
+ self.assertTrue(package_a + 'ExampleClassA.example_method'
+ in nova.tests.monkey_patch_example.CALLED_FUNCTION)
+ self.assertTrue(package_a + 'ExampleClassA.example_method_add'
+ in nova.tests.monkey_patch_example.CALLED_FUNCTION)
+ package_b = self.example_package + 'example_b.'
+ self.assertFalse(package_b + 'example_function_b'
+ in nova.tests.monkey_patch_example.CALLED_FUNCTION)
+ self.assertFalse(package_b + 'ExampleClassB.example_method'
+ in nova.tests.monkey_patch_example.CALLED_FUNCTION)
+ self.assertFalse(package_b + 'ExampleClassB.example_method_add'
+ in nova.tests.monkey_patch_example.CALLED_FUNCTION)
diff --git a/nova/tests/test_versions.py b/nova/tests/test_versions.py
new file mode 100644
index 000000000..4621b042b
--- /dev/null
+++ b/nova/tests/test_versions.py
@@ -0,0 +1,61 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright 2011 Ken Pepple
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+
+from nova import exception
+from nova import test
+from nova import utils
+from nova import version
+
+
+class VersionTestCase(test.TestCase):
+ """Test cases for Versions code"""
+ def setUp(self):
+ """setup test with unchanging values"""
+ super(VersionTestCase, self).setUp()
+ self.version = version
+ self.version.FINAL = False
+ self.version.NOVA_VERSION = ['2012', '10']
+ self.version.YEAR, self.version.COUNT = self.version.NOVA_VERSION
+ self.version.version_info = {'branch_nick': u'LOCALBRANCH',
+ 'revision_id': 'LOCALREVISION',
+ 'revno': 0}
+
+ def test_version_string_is_good(self):
+ """Ensure version string works"""
+ self.assertEqual("2012.10-dev", self.version.version_string())
+
+ def test_canonical_version_string_is_good(self):
+ """Ensure canonical version works"""
+ self.assertEqual("2012.10", self.version.canonical_version_string())
+
+ def test_final_version_strings_are_identical(self):
+ """Ensure final version strings match only at release"""
+ self.assertNotEqual(self.version.canonical_version_string(),
+ self.version.version_string())
+ self.version.FINAL = True
+ self.assertEqual(self.version.canonical_version_string(),
+ self.version.version_string())
+
+ def test_vcs_version_string_is_good(self):
+ """Ensure uninstalled code generates local """
+ self.assertEqual("LOCALBRANCH:LOCALREVISION",
+ self.version.vcs_version_string())
+
+ def test_version_string_with_vcs_is_good(self):
+ """Ensure uninstalled code get version string"""
+ self.assertEqual("2012.10-LOCALBRANCH:LOCALREVISION",
+ self.version.version_string_with_vcs())
diff --git a/nova/tests/test_virt_drivers.py b/nova/tests/test_virt_drivers.py
new file mode 100644
index 000000000..480247c91
--- /dev/null
+++ b/nova/tests/test_virt_drivers.py
@@ -0,0 +1,489 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+#
+# Copyright 2010 OpenStack LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import base64
+import netaddr
+import sys
+import traceback
+
+from nova import exception
+from nova import flags
+from nova import image
+from nova import log as logging
+from nova import test
+from nova.tests import utils as test_utils
+
+libvirt = None
+FLAGS = flags.FLAGS
+
+LOG = logging.getLogger('nova.tests.test_virt_drivers')
+
+
+def catch_notimplementederror(f):
+ """Decorator to simplify catching drivers raising NotImplementedError
+
+ If a particular call makes a driver raise NotImplementedError, we
+ log it so that we can extract this information afterwards to
+ automatically generate a hypervisor/feature support matrix."""
+ def wrapped_func(self, *args, **kwargs):
+ try:
+ return f(self, *args, **kwargs)
+ except NotImplementedError:
+ frame = traceback.extract_tb(sys.exc_info()[2])[-1]
+ LOG.error('%(driver)s does not implement %(method)s' % {
+ 'driver': type(self.connection),
+ 'method': frame[2]})
+
+ wrapped_func.__name__ = f.__name__
+ wrapped_func.__doc__ = f.__doc__
+ return wrapped_func
+
+
+class _VirtDriverTestCase(test.TestCase):
+ def setUp(self):
+ super(_VirtDriverTestCase, self).setUp()
+ self.connection = self.driver_module.get_connection('')
+ self.ctxt = test_utils.get_test_admin_context()
+ self.image_service = image.get_default_image_service()
+
+ @catch_notimplementederror
+ def test_init_host(self):
+ self.connection.init_host('myhostname')
+
+ @catch_notimplementederror
+ def test_list_instances(self):
+ self.connection.list_instances()
+
+ @catch_notimplementederror
+ def test_list_instances_detail(self):
+ self.connection.list_instances_detail()
+
+ @catch_notimplementederror
+ def test_spawn(self):
+ instance_ref = test_utils.get_test_instance()
+ network_info = test_utils.get_test_network_info()
+ self.connection.spawn(self.ctxt, instance_ref, network_info)
+
+ domains = self.connection.list_instances()
+ self.assertIn(instance_ref['name'], domains)
+
+ domains_details = self.connection.list_instances_detail()
+ self.assertIn(instance_ref['name'], [i.name for i in domains_details])
+
+ @catch_notimplementederror
+ def test_snapshot_not_running(self):
+ instance_ref = test_utils.get_test_instance()
+ img_ref = self.image_service.create(self.ctxt, {'name': 'snap-1'})
+ self.assertRaises(exception.InstanceNotRunning,
+ self.connection.snapshot,
+ self.ctxt, instance_ref, img_ref['id'])
+
+ @catch_notimplementederror
+ def test_snapshot_running(self):
+ instance_ref = test_utils.get_test_instance()
+ network_info = test_utils.get_test_network_info()
+ img_ref = self.image_service.create(self.ctxt, {'name': 'snap-1'})
+ self.connection.spawn(self.ctxt, instance_ref, network_info)
+ self.connection.snapshot(self.ctxt, instance_ref, img_ref['id'])
+
+ @catch_notimplementederror
+ def test_reboot(self):
+ instance_ref = test_utils.get_test_instance()
+ network_info = test_utils.get_test_network_info()
+ self.connection.spawn(self.ctxt, instance_ref, network_info)
+ self.connection.reboot(instance_ref, network_info)
+
+ @catch_notimplementederror
+ def test_get_host_ip_addr(self):
+ host_ip = self.connection.get_host_ip_addr()
+
+ # Will raise an exception if it's not a valid IP at all
+ ip = netaddr.IPAddress(host_ip)
+
+ # For now, assume IPv4.
+ self.assertEquals(ip.version, 4)
+
+ @catch_notimplementederror
+ def test_resize_running(self):
+ instance_ref = test_utils.get_test_instance()
+ network_info = test_utils.get_test_network_info()
+ self.connection.spawn(self.ctxt, instance_ref, network_info)
+ self.connection.resize(instance_ref, 7)
+
+ @catch_notimplementederror
+ def test_set_admin_password(self):
+ instance_ref = test_utils.get_test_instance()
+ network_info = test_utils.get_test_network_info()
+ self.connection.spawn(self.ctxt, instance_ref, network_info)
+ self.connection.set_admin_password(instance_ref, 'p4ssw0rd')
+
+ @catch_notimplementederror
+ def test_inject_file(self):
+ instance_ref = test_utils.get_test_instance()
+ network_info = test_utils.get_test_network_info()
+ self.connection.spawn(self.ctxt, instance_ref, network_info)
+ self.connection.inject_file(instance_ref,
+ base64.b64encode('/testfile'),
+ base64.b64encode('testcontents'))
+
+ @catch_notimplementederror
+ def test_agent_update(self):
+ instance_ref = test_utils.get_test_instance()
+ network_info = test_utils.get_test_network_info()
+ self.connection.spawn(self.ctxt, instance_ref, network_info)
+ self.connection.agent_update(instance_ref, 'http://www.openstack.org/',
+ 'd41d8cd98f00b204e9800998ecf8427e')
+
+ @catch_notimplementederror
+ def test_rescue(self):
+ instance_ref = test_utils.get_test_instance()
+ network_info = test_utils.get_test_network_info()
+ self.connection.spawn(self.ctxt, instance_ref, network_info)
+ self.connection.rescue(self.ctxt, instance_ref,
+ lambda x: None, network_info)
+
+ @catch_notimplementederror
+ def test_unrescue_unrescued_instance(self):
+ instance_ref = test_utils.get_test_instance()
+ network_info = test_utils.get_test_network_info()
+ self.connection.spawn(self.ctxt, instance_ref, network_info)
+ self.connection.unrescue(instance_ref, lambda x: None, network_info)
+
+ @catch_notimplementederror
+ def test_unrescue_rescued_instance(self):
+ instance_ref = test_utils.get_test_instance()
+ network_info = test_utils.get_test_network_info()
+ self.connection.spawn(self.ctxt, instance_ref, network_info)
+ self.connection.rescue(self.ctxt, instance_ref,
+ lambda x: None, network_info)
+ self.connection.unrescue(instance_ref, lambda x: None, network_info)
+
+ @catch_notimplementederror
+ def test_poll_rescued_instances(self):
+ self.connection.poll_rescued_instances(10)
+
+ @catch_notimplementederror
+ def test_migrate_disk_and_power_off(self):
+ instance_ref = test_utils.get_test_instance()
+ network_info = test_utils.get_test_network_info()
+ self.connection.spawn(self.ctxt, instance_ref, network_info)
+ self.connection.migrate_disk_and_power_off(instance_ref, 'dest_host')
+
+ @catch_notimplementederror
+ def test_pause(self):
+ instance_ref = test_utils.get_test_instance()
+ network_info = test_utils.get_test_network_info()
+ self.connection.spawn(self.ctxt, instance_ref, network_info)
+ self.connection.pause(instance_ref, None)
+
+ @catch_notimplementederror
+ def test_unpause_unpaused_instance(self):
+ instance_ref = test_utils.get_test_instance()
+ network_info = test_utils.get_test_network_info()
+ self.connection.spawn(self.ctxt, instance_ref, network_info)
+ self.connection.unpause(instance_ref, None)
+
+ @catch_notimplementederror
+ def test_unpause_paused_instance(self):
+ instance_ref = test_utils.get_test_instance()
+ network_info = test_utils.get_test_network_info()
+ self.connection.spawn(self.ctxt, instance_ref, network_info)
+ self.connection.pause(instance_ref, None)
+ self.connection.unpause(instance_ref, None)
+
+ @catch_notimplementederror
+ def test_suspend(self):
+ instance_ref = test_utils.get_test_instance()
+ network_info = test_utils.get_test_network_info()
+ self.connection.spawn(self.ctxt, instance_ref, network_info)
+ self.connection.suspend(instance_ref, None)
+
+ @catch_notimplementederror
+ def test_resume_unsuspended_instance(self):
+ instance_ref = test_utils.get_test_instance()
+ network_info = test_utils.get_test_network_info()
+ self.connection.spawn(self.ctxt, instance_ref, network_info)
+ self.connection.resume(instance_ref, None)
+
+ @catch_notimplementederror
+ def test_resume_suspended_instance(self):
+ instance_ref = test_utils.get_test_instance()
+ network_info = test_utils.get_test_network_info()
+ self.connection.spawn(self.ctxt, instance_ref, network_info)
+ self.connection.suspend(instance_ref, None)
+ self.connection.resume(instance_ref, None)
+
+ @catch_notimplementederror
+ def test_destroy_instance_nonexistant(self):
+ fake_instance = {'id': 42, 'name': 'I just made this up!'}
+ network_info = test_utils.get_test_network_info()
+ self.connection.destroy(fake_instance, network_info)
+
+ @catch_notimplementederror
+ def test_destroy_instance(self):
+ instance_ref = test_utils.get_test_instance()
+ network_info = test_utils.get_test_network_info()
+ self.connection.spawn(self.ctxt, instance_ref, network_info)
+ self.assertIn(instance_ref['name'],
+ self.connection.list_instances())
+ self.connection.destroy(instance_ref, network_info)
+ self.assertNotIn(instance_ref['name'],
+ self.connection.list_instances())
+
+ @catch_notimplementederror
+ def test_attach_detach_volume(self):
+ network_info = test_utils.get_test_network_info()
+ instance_ref = test_utils.get_test_instance()
+ self.connection.spawn(self.ctxt, instance_ref, network_info)
+ self.connection.attach_volume(instance_ref['name'],
+ '/dev/null', '/mnt/nova/something')
+ self.connection.detach_volume(instance_ref['name'],
+ '/mnt/nova/something')
+
+ @catch_notimplementederror
+ def test_get_info(self):
+ network_info = test_utils.get_test_network_info()
+ instance_ref = test_utils.get_test_instance()
+ self.connection.spawn(self.ctxt, instance_ref, network_info)
+ info = self.connection.get_info(instance_ref['name'])
+ self.assertIn('state', info)
+ self.assertIn('max_mem', info)
+ self.assertIn('mem', info)
+ self.assertIn('num_cpu', info)
+ self.assertIn('cpu_time', info)
+
+ @catch_notimplementederror
+ def test_get_info_for_unknown_instance(self):
+ self.assertRaises(exception.NotFound,
+ self.connection.get_info, 'I just made this name up')
+
+ @catch_notimplementederror
+ def test_get_diagnostics(self):
+ network_info = test_utils.get_test_network_info()
+ instance_ref = test_utils.get_test_instance()
+ self.connection.spawn(self.ctxt, instance_ref, network_info)
+ self.connection.get_diagnostics(instance_ref['name'])
+
+ @catch_notimplementederror
+ def test_list_disks(self):
+ network_info = test_utils.get_test_network_info()
+ instance_ref = test_utils.get_test_instance()
+ self.connection.spawn(self.ctxt, instance_ref, network_info)
+ self.connection.list_disks(instance_ref['name'])
+
+ @catch_notimplementederror
+ def test_list_interfaces(self):
+ network_info = test_utils.get_test_network_info()
+ instance_ref = test_utils.get_test_instance()
+ self.connection.spawn(self.ctxt, instance_ref, network_info)
+ self.connection.list_interfaces(instance_ref['name'])
+
+ @catch_notimplementederror
+ def test_block_stats(self):
+ network_info = test_utils.get_test_network_info()
+ instance_ref = test_utils.get_test_instance()
+ self.connection.spawn(self.ctxt, instance_ref, network_info)
+ stats = self.connection.block_stats(instance_ref['name'], 'someid')
+ self.assertEquals(len(stats), 5)
+
+ @catch_notimplementederror
+ def test_interface_stats(self):
+ network_info = test_utils.get_test_network_info()
+ instance_ref = test_utils.get_test_instance()
+ self.connection.spawn(self.ctxt, instance_ref, network_info)
+ stats = self.connection.interface_stats(instance_ref['name'], 'someid')
+ self.assertEquals(len(stats), 8)
+
+ @catch_notimplementederror
+ def test_get_console_output(self):
+ network_info = test_utils.get_test_network_info()
+ instance_ref = test_utils.get_test_instance()
+ self.connection.spawn(self.ctxt, instance_ref, network_info)
+ console_output = self.connection.get_console_output(instance_ref)
+ self.assertTrue(isinstance(console_output, basestring))
+
+ @catch_notimplementederror
+ def test_get_ajax_console(self):
+ network_info = test_utils.get_test_network_info()
+ instance_ref = test_utils.get_test_instance()
+ self.connection.spawn(self.ctxt, instance_ref, network_info)
+ ajax_console = self.connection.get_ajax_console(instance_ref)
+ self.assertIn('token', ajax_console)
+ self.assertIn('host', ajax_console)
+ self.assertIn('port', ajax_console)
+
+ @catch_notimplementederror
+ def test_get_vnc_console(self):
+ network_info = test_utils.get_test_network_info()
+ instance_ref = test_utils.get_test_instance()
+ self.connection.spawn(self.ctxt, instance_ref, network_info)
+ vnc_console = self.connection.get_vnc_console(instance_ref)
+ self.assertIn('token', vnc_console)
+ self.assertIn('host', vnc_console)
+ self.assertIn('port', vnc_console)
+
+ @catch_notimplementederror
+ def test_get_console_pool_info(self):
+ network_info = test_utils.get_test_network_info()
+ instance_ref = test_utils.get_test_instance()
+ self.connection.spawn(self.ctxt, instance_ref, network_info)
+ console_pool = self.connection.get_console_pool_info(instance_ref)
+ self.assertIn('address', console_pool)
+ self.assertIn('username', console_pool)
+ self.assertIn('password', console_pool)
+
+ @catch_notimplementederror
+ def test_refresh_security_group_rules(self):
+ network_info = test_utils.get_test_network_info()
+ instance_ref = test_utils.get_test_instance()
+ # FIXME: Create security group and add the instance to it
+ self.connection.spawn(self.ctxt, instance_ref, network_info)
+ self.connection.refresh_security_group_rules(1)
+
+ @catch_notimplementederror
+ def test_refresh_security_group_members(self):
+ network_info = test_utils.get_test_network_info()
+ instance_ref = test_utils.get_test_instance()
+ # FIXME: Create security group and add the instance to it
+ self.connection.spawn(self.ctxt, instance_ref, network_info)
+ self.connection.refresh_security_group_members(1)
+
+ @catch_notimplementederror
+ def test_refresh_provider_fw_rules(self):
+ network_info = test_utils.get_test_network_info()
+ instance_ref = test_utils.get_test_instance()
+ self.connection.spawn(self.ctxt, instance_ref, network_info)
+ self.connection.refresh_provider_fw_rules()
+
+ @catch_notimplementederror
+ def test_update_available_resource(self):
+ self.compute = self.start_service('compute', host='dummy')
+ self.connection.update_available_resource(self.ctxt, 'dummy')
+
+ @catch_notimplementederror
+ def test_compare_cpu(self):
+ cpu_info = '''{ "topology": {
+ "sockets": 1,
+ "cores": 2,
+ "threads": 1 },
+ "features": [
+ "xtpr",
+ "tm2",
+ "est",
+ "vmx",
+ "ds_cpl",
+ "monitor",
+ "pbe",
+ "tm",
+ "ht",
+ "ss",
+ "acpi",
+ "ds",
+ "vme"],
+ "arch": "x86_64",
+ "model": "Penryn",
+ "vendor": "Intel" }'''
+
+ self.connection.compare_cpu(cpu_info)
+
+ @catch_notimplementederror
+ def test_ensure_filtering_for_instance(self):
+ instance_ref = test_utils.get_test_instance()
+ network_info = test_utils.get_test_network_info()
+ self.connection.ensure_filtering_rules_for_instance(instance_ref,
+ network_info)
+
+ @catch_notimplementederror
+ def test_unfilter_instance(self):
+ instance_ref = test_utils.get_test_instance()
+ network_info = test_utils.get_test_network_info()
+ self.connection.unfilter_instance(instance_ref, network_info)
+
+ @catch_notimplementederror
+ def test_live_migration(self):
+ network_info = test_utils.get_test_network_info()
+ instance_ref = test_utils.get_test_instance()
+ self.connection.spawn(self.ctxt, instance_ref, network_info)
+ self.connection.live_migration(self.ctxt, instance_ref, 'otherhost',
+ None, None)
+
+ @catch_notimplementederror
+ def _check_host_status_fields(self, host_status):
+ self.assertIn('host_name-description', host_status)
+ self.assertIn('host_hostname', host_status)
+ self.assertIn('host_memory_total', host_status)
+ self.assertIn('host_memory_overhead', host_status)
+ self.assertIn('host_memory_free', host_status)
+ self.assertIn('host_memory_free_computed', host_status)
+ self.assertIn('host_other_config', host_status)
+ self.assertIn('host_ip_address', host_status)
+ self.assertIn('host_cpu_info', host_status)
+ self.assertIn('disk_available', host_status)
+ self.assertIn('disk_total', host_status)
+ self.assertIn('disk_used', host_status)
+ self.assertIn('host_uuid', host_status)
+ self.assertIn('host_name_label', host_status)
+
+ @catch_notimplementederror
+ def test_update_host_status(self):
+ host_status = self.connection.update_host_status()
+ self._check_host_status_fields(host_status)
+
+ @catch_notimplementederror
+ def test_get_host_stats(self):
+ host_status = self.connection.get_host_stats()
+ self._check_host_status_fields(host_status)
+
+ @catch_notimplementederror
+ def test_set_host_enabled(self):
+ self.connection.set_host_enabled('a useless argument?', True)
+
+ @catch_notimplementederror
+ def test_host_power_action_reboot(self):
+ self.connection.host_power_action('a useless argument?', 'reboot')
+
+ @catch_notimplementederror
+ def test_host_power_action_shutdown(self):
+ self.connection.host_power_action('a useless argument?', 'shutdown')
+
+ @catch_notimplementederror
+ def test_host_power_action_startup(self):
+ self.connection.host_power_action('a useless argument?', 'startup')
+
+
+class AbstractDriverTestCase(_VirtDriverTestCase):
+ def setUp(self):
+ import nova.virt.driver
+
+ self.driver_module = nova.virt.driver
+
+ def get_driver_connection(_):
+ return nova.virt.driver.ComputeDriver()
+
+ self.driver_module.get_connection = get_driver_connection
+ super(AbstractDriverTestCase, self).setUp()
+
+
+class FakeConnectionTestCase(_VirtDriverTestCase):
+ def setUp(self):
+ import nova.virt.fake
+ self.driver_module = nova.virt.fake
+ super(FakeConnectionTestCase, self).setUp()
+
+# Before long, we'll add the real hypervisor drivers here as well
+# with whatever instrumentation they need to work independently of
+# their hypervisor. This way, we can verify that they all act the
+# same.
diff --git a/nova/tests/test_volume_types.py b/nova/tests/test_volume_types.py
new file mode 100644
index 000000000..1e190805c
--- /dev/null
+++ b/nova/tests/test_volume_types.py
@@ -0,0 +1,207 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright (c) 2011 Zadara Storage Inc.
+# Copyright (c) 2011 OpenStack LLC.
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+"""
+Unit Tests for volume types code
+"""
+import time
+
+from nova import context
+from nova import db
+from nova import exception
+from nova import flags
+from nova import log as logging
+from nova import test
+from nova import utils
+from nova.volume import volume_types
+from nova.db.sqlalchemy.session import get_session
+from nova.db.sqlalchemy import models
+
+FLAGS = flags.FLAGS
+LOG = logging.getLogger('nova.tests.test_volume_types')
+
+
+class VolumeTypeTestCase(test.TestCase):
+ """Test cases for volume type code"""
+ def setUp(self):
+ super(VolumeTypeTestCase, self).setUp()
+
+ self.ctxt = context.get_admin_context()
+ self.vol_type1_name = str(int(time.time()))
+ self.vol_type1_specs = dict(
+ type="physical drive",
+ drive_type="SAS",
+ size="300",
+ rpm="7200",
+ visible="True")
+ self.vol_type1 = dict(name=self.vol_type1_name,
+ extra_specs=self.vol_type1_specs)
+
+ def test_volume_type_create_then_destroy(self):
+ """Ensure volume types can be created and deleted"""
+ prev_all_vtypes = volume_types.get_all_types(self.ctxt)
+
+ volume_types.create(self.ctxt,
+ self.vol_type1_name,
+ self.vol_type1_specs)
+ new = volume_types.get_volume_type_by_name(self.ctxt,
+ self.vol_type1_name)
+
+ LOG.info(_("Given data: %s"), self.vol_type1_specs)
+ LOG.info(_("Result data: %s"), new)
+
+ for k, v in self.vol_type1_specs.iteritems():
+ self.assertEqual(v, new['extra_specs'][k],
+ 'one of fields doesnt match')
+
+ new_all_vtypes = volume_types.get_all_types(self.ctxt)
+ self.assertEqual(len(prev_all_vtypes) + 1,
+ len(new_all_vtypes),
+ 'drive type was not created')
+
+ volume_types.destroy(self.ctxt, self.vol_type1_name)
+ new_all_vtypes = volume_types.get_all_types(self.ctxt)
+ self.assertEqual(prev_all_vtypes,
+ new_all_vtypes,
+ 'drive type was not deleted')
+
+ def test_volume_type_create_then_purge(self):
+ """Ensure volume types can be created and deleted"""
+ prev_all_vtypes = volume_types.get_all_types(self.ctxt, inactive=1)
+
+ volume_types.create(self.ctxt,
+ self.vol_type1_name,
+ self.vol_type1_specs)
+ new = volume_types.get_volume_type_by_name(self.ctxt,
+ self.vol_type1_name)
+
+ for k, v in self.vol_type1_specs.iteritems():
+ self.assertEqual(v, new['extra_specs'][k],
+ 'one of fields doesnt match')
+
+ new_all_vtypes = volume_types.get_all_types(self.ctxt, inactive=1)
+ self.assertEqual(len(prev_all_vtypes) + 1,
+ len(new_all_vtypes),
+ 'drive type was not created')
+
+ volume_types.destroy(self.ctxt, self.vol_type1_name)
+ new_all_vtypes2 = volume_types.get_all_types(self.ctxt, inactive=1)
+ self.assertEqual(len(new_all_vtypes),
+ len(new_all_vtypes2),
+ 'drive type was incorrectly deleted')
+
+ volume_types.purge(self.ctxt, self.vol_type1_name)
+ new_all_vtypes2 = volume_types.get_all_types(self.ctxt, inactive=1)
+ self.assertEqual(len(new_all_vtypes) - 1,
+ len(new_all_vtypes2),
+ 'drive type was not purged')
+
+ def test_get_all_volume_types(self):
+ """Ensures that all volume types can be retrieved"""
+ session = get_session()
+ total_volume_types = session.query(models.VolumeTypes).\
+ count()
+ vol_types = volume_types.get_all_types(self.ctxt)
+ self.assertEqual(total_volume_types, len(vol_types))
+
+ def test_non_existant_inst_type_shouldnt_delete(self):
+ """Ensures that volume type creation fails with invalid args"""
+ self.assertRaises(exception.ApiError,
+ volume_types.destroy, self.ctxt, "sfsfsdfdfs")
+
+ def test_repeated_vol_types_should_raise_api_error(self):
+ """Ensures that volume duplicates raises ApiError"""
+ new_name = self.vol_type1_name + "dup"
+ volume_types.create(self.ctxt, new_name)
+ volume_types.destroy(self.ctxt, new_name)
+ self.assertRaises(
+ exception.ApiError,
+ volume_types.create, self.ctxt, new_name)
+
+ def test_invalid_volume_types_params(self):
+ """Ensures that volume type creation fails with invalid args"""
+ self.assertRaises(exception.InvalidVolumeType,
+ volume_types.destroy, self.ctxt, None)
+ self.assertRaises(exception.InvalidVolumeType,
+ volume_types.purge, self.ctxt, None)
+ self.assertRaises(exception.InvalidVolumeType,
+ volume_types.get_volume_type, self.ctxt, None)
+ self.assertRaises(exception.InvalidVolumeType,
+ volume_types.get_volume_type_by_name,
+ self.ctxt, None)
+
+ def test_volume_type_get_by_id_and_name(self):
+ """Ensure volume types get returns same entry"""
+ volume_types.create(self.ctxt,
+ self.vol_type1_name,
+ self.vol_type1_specs)
+ new = volume_types.get_volume_type_by_name(self.ctxt,
+ self.vol_type1_name)
+
+ new2 = volume_types.get_volume_type(self.ctxt, new['id'])
+ self.assertEqual(new, new2)
+
+ def test_volume_type_search_by_extra_spec(self):
+ """Ensure volume types get by extra spec returns correct type"""
+ volume_types.create(self.ctxt, "type1", {"key1": "val1",
+ "key2": "val2"})
+ volume_types.create(self.ctxt, "type2", {"key2": "val2",
+ "key3": "val3"})
+ volume_types.create(self.ctxt, "type3", {"key3": "another_value",
+ "key4": "val4"})
+
+ vol_types = volume_types.get_all_types(self.ctxt,
+ search_opts={'extra_specs': {"key1": "val1"}})
+ LOG.info("vol_types: %s" % vol_types)
+ self.assertEqual(len(vol_types), 1)
+ self.assertTrue("type1" in vol_types.keys())
+ self.assertEqual(vol_types['type1']['extra_specs'],
+ {"key1": "val1", "key2": "val2"})
+
+ vol_types = volume_types.get_all_types(self.ctxt,
+ search_opts={'extra_specs': {"key2": "val2"}})
+ LOG.info("vol_types: %s" % vol_types)
+ self.assertEqual(len(vol_types), 2)
+ self.assertTrue("type1" in vol_types.keys())
+ self.assertTrue("type2" in vol_types.keys())
+
+ vol_types = volume_types.get_all_types(self.ctxt,
+ search_opts={'extra_specs': {"key3": "val3"}})
+ LOG.info("vol_types: %s" % vol_types)
+ self.assertEqual(len(vol_types), 1)
+ self.assertTrue("type2" in vol_types.keys())
+
+ def test_volume_type_search_by_extra_spec_multiple(self):
+ """Ensure volume types get by extra spec returns correct type"""
+ volume_types.create(self.ctxt, "type1", {"key1": "val1",
+ "key2": "val2",
+ "key3": "val3"})
+ volume_types.create(self.ctxt, "type2", {"key2": "val2",
+ "key3": "val3"})
+ volume_types.create(self.ctxt, "type3", {"key1": "val1",
+ "key3": "val3",
+ "key4": "val4"})
+
+ vol_types = volume_types.get_all_types(self.ctxt,
+ search_opts={'extra_specs': {"key1": "val1",
+ "key3": "val3"}})
+ LOG.info("vol_types: %s" % vol_types)
+ self.assertEqual(len(vol_types), 2)
+ self.assertTrue("type1" in vol_types.keys())
+ self.assertTrue("type3" in vol_types.keys())
+ self.assertEqual(vol_types['type1']['extra_specs'],
+ {"key1": "val1", "key2": "val2", "key3": "val3"})
+ self.assertEqual(vol_types['type3']['extra_specs'],
+ {"key1": "val1", "key3": "val3", "key4": "val4"})
diff --git a/nova/tests/test_volume_types_extra_specs.py b/nova/tests/test_volume_types_extra_specs.py
new file mode 100644
index 000000000..017b187a1
--- /dev/null
+++ b/nova/tests/test_volume_types_extra_specs.py
@@ -0,0 +1,132 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright (c) 2011 Zadara Storage Inc.
+# Copyright (c) 2011 OpenStack LLC.
+# Copyright 2011 University of Southern California
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+"""
+Unit Tests for volume types extra specs code
+"""
+
+from nova import context
+from nova import db
+from nova import test
+from nova.db.sqlalchemy.session import get_session
+from nova.db.sqlalchemy import models
+
+
+class VolumeTypeExtraSpecsTestCase(test.TestCase):
+
+ def setUp(self):
+ super(VolumeTypeExtraSpecsTestCase, self).setUp()
+ self.context = context.get_admin_context()
+ self.vol_type1 = dict(name="TEST: Regular volume test")
+ self.vol_type1_specs = dict(vol_extra1="value1",
+ vol_extra2="value2",
+ vol_extra3=3)
+ self.vol_type1['extra_specs'] = self.vol_type1_specs
+ ref = db.api.volume_type_create(self.context, self.vol_type1)
+ self.volume_type1_id = ref.id
+ for k, v in self.vol_type1_specs.iteritems():
+ self.vol_type1_specs[k] = str(v)
+
+ self.vol_type2_noextra = dict(name="TEST: Volume type without extra")
+ ref = db.api.volume_type_create(self.context, self.vol_type2_noextra)
+ self.vol_type2_id = ref.id
+
+ def tearDown(self):
+ # Remove the instance type from the database
+ db.api.volume_type_purge(context.get_admin_context(),
+ self.vol_type1['name'])
+ db.api.volume_type_purge(context.get_admin_context(),
+ self.vol_type2_noextra['name'])
+ super(VolumeTypeExtraSpecsTestCase, self).tearDown()
+
+ def test_volume_type_specs_get(self):
+ expected_specs = self.vol_type1_specs.copy()
+ actual_specs = db.api.volume_type_extra_specs_get(
+ context.get_admin_context(),
+ self.volume_type1_id)
+ self.assertEquals(expected_specs, actual_specs)
+
+ def test_volume_type_extra_specs_delete(self):
+ expected_specs = self.vol_type1_specs.copy()
+ del expected_specs['vol_extra2']
+ db.api.volume_type_extra_specs_delete(context.get_admin_context(),
+ self.volume_type1_id,
+ 'vol_extra2')
+ actual_specs = db.api.volume_type_extra_specs_get(
+ context.get_admin_context(),
+ self.volume_type1_id)
+ self.assertEquals(expected_specs, actual_specs)
+
+ def test_volume_type_extra_specs_update(self):
+ expected_specs = self.vol_type1_specs.copy()
+ expected_specs['vol_extra3'] = "4"
+ db.api.volume_type_extra_specs_update_or_create(
+ context.get_admin_context(),
+ self.volume_type1_id,
+ dict(vol_extra3=4))
+ actual_specs = db.api.volume_type_extra_specs_get(
+ context.get_admin_context(),
+ self.volume_type1_id)
+ self.assertEquals(expected_specs, actual_specs)
+
+ def test_volume_type_extra_specs_create(self):
+ expected_specs = self.vol_type1_specs.copy()
+ expected_specs['vol_extra4'] = 'value4'
+ expected_specs['vol_extra5'] = 'value5'
+ db.api.volume_type_extra_specs_update_or_create(
+ context.get_admin_context(),
+ self.volume_type1_id,
+ dict(vol_extra4="value4",
+ vol_extra5="value5"))
+ actual_specs = db.api.volume_type_extra_specs_get(
+ context.get_admin_context(),
+ self.volume_type1_id)
+ self.assertEquals(expected_specs, actual_specs)
+
+ def test_volume_type_get_with_extra_specs(self):
+ volume_type = db.api.volume_type_get(
+ context.get_admin_context(),
+ self.volume_type1_id)
+ self.assertEquals(volume_type['extra_specs'],
+ self.vol_type1_specs)
+
+ volume_type = db.api.volume_type_get(
+ context.get_admin_context(),
+ self.vol_type2_id)
+ self.assertEquals(volume_type['extra_specs'], {})
+
+ def test_volume_type_get_by_name_with_extra_specs(self):
+ volume_type = db.api.volume_type_get_by_name(
+ context.get_admin_context(),
+ self.vol_type1['name'])
+ self.assertEquals(volume_type['extra_specs'],
+ self.vol_type1_specs)
+
+ volume_type = db.api.volume_type_get_by_name(
+ context.get_admin_context(),
+ self.vol_type2_noextra['name'])
+ self.assertEquals(volume_type['extra_specs'], {})
+
+ def test_volume_type_get_all(self):
+ expected_specs = self.vol_type1_specs.copy()
+
+ types = db.api.volume_type_get_all(context.get_admin_context())
+
+ self.assertEquals(
+ types[self.vol_type1['name']]['extra_specs'], expected_specs)
+
+ self.assertEquals(
+ types[self.vol_type2_noextra['name']]['extra_specs'], {})
diff --git a/nova/tests/test_vsa.py b/nova/tests/test_vsa.py
new file mode 100644
index 000000000..3d2d2de13
--- /dev/null
+++ b/nova/tests/test_vsa.py
@@ -0,0 +1,182 @@
+# Copyright 2011 OpenStack LLC.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import base64
+import stubout
+
+from xml.etree import ElementTree
+from xml.etree.ElementTree import Element, SubElement
+
+from nova import context
+from nova import db
+from nova import exception
+from nova import flags
+from nova import log as logging
+from nova import test
+from nova import vsa
+from nova import volume
+from nova.volume import volume_types
+from nova.vsa import utils as vsa_utils
+
+import nova.image.fake
+
+FLAGS = flags.FLAGS
+LOG = logging.getLogger('nova.tests.vsa')
+
+
+class VsaTestCase(test.TestCase):
+
+ def setUp(self):
+ super(VsaTestCase, self).setUp()
+ self.stubs = stubout.StubOutForTesting()
+ self.vsa_api = vsa.API()
+ self.volume_api = volume.API()
+
+ FLAGS.quota_volumes = 100
+ FLAGS.quota_gigabytes = 10000
+
+ self.context = context.get_admin_context()
+
+ volume_types.create(self.context,
+ 'SATA_500_7200',
+ extra_specs={'type': 'vsa_drive',
+ 'drive_name': 'SATA_500_7200',
+ 'drive_type': 'SATA',
+ 'drive_size': '500',
+ 'drive_rpm': '7200'})
+
+ def fake_show_by_name(meh, context, name):
+ if name == 'wrong_image_name':
+ LOG.debug(_("Test: Emulate wrong VSA name. Raise"))
+ raise exception.ImageNotFound
+ return {'id': 1, 'properties': {'kernel_id': 1, 'ramdisk_id': 1}}
+
+ self.stubs.Set(nova.image.fake._FakeImageService,
+ 'show_by_name',
+ fake_show_by_name)
+
+ def tearDown(self):
+ self.stubs.UnsetAll()
+ super(VsaTestCase, self).tearDown()
+
+ def test_vsa_create_delete_defaults(self):
+ param = {'display_name': 'VSA name test'}
+ vsa_ref = self.vsa_api.create(self.context, **param)
+ self.assertEqual(vsa_ref['display_name'], param['display_name'])
+ self.vsa_api.delete(self.context, vsa_ref['id'])
+
+ def test_vsa_create_delete_check_in_db(self):
+ vsa_list1 = self.vsa_api.get_all(self.context)
+ vsa_ref = self.vsa_api.create(self.context)
+ vsa_list2 = self.vsa_api.get_all(self.context)
+ self.assertEqual(len(vsa_list2), len(vsa_list1) + 1)
+
+ self.vsa_api.delete(self.context, vsa_ref['id'])
+ vsa_list3 = self.vsa_api.get_all(self.context)
+ self.assertEqual(len(vsa_list3), len(vsa_list2) - 1)
+
+ def test_vsa_create_delete_high_vc_count(self):
+ param = {'vc_count': FLAGS.max_vcs_in_vsa + 1}
+ vsa_ref = self.vsa_api.create(self.context, **param)
+ self.assertEqual(vsa_ref['vc_count'], FLAGS.max_vcs_in_vsa)
+ self.vsa_api.delete(self.context, vsa_ref['id'])
+
+ def test_vsa_create_wrong_image_name(self):
+ param = {'image_name': 'wrong_image_name'}
+ self.assertRaises(exception.ApiError,
+ self.vsa_api.create, self.context, **param)
+
+ def test_vsa_create_db_error(self):
+
+ def fake_vsa_create(context, options):
+ LOG.debug(_("Test: Emulate DB error. Raise"))
+ raise exception.Error
+
+ self.stubs.Set(nova.db.api, 'vsa_create', fake_vsa_create)
+ self.assertRaises(exception.ApiError,
+ self.vsa_api.create, self.context)
+
+ def test_vsa_create_wrong_storage_params(self):
+ vsa_list1 = self.vsa_api.get_all(self.context)
+ param = {'storage': [{'stub': 1}]}
+ self.assertRaises(exception.ApiError,
+ self.vsa_api.create, self.context, **param)
+ vsa_list2 = self.vsa_api.get_all(self.context)
+ self.assertEqual(len(vsa_list2), len(vsa_list1))
+
+ param = {'storage': [{'drive_name': 'wrong name'}]}
+ self.assertRaises(exception.ApiError,
+ self.vsa_api.create, self.context, **param)
+
+ def test_vsa_create_with_storage(self, multi_vol_creation=True):
+ """Test creation of VSA with BE storage"""
+
+ FLAGS.vsa_multi_vol_creation = multi_vol_creation
+
+ param = {'storage': [{'drive_name': 'SATA_500_7200',
+ 'num_drives': 3}]}
+ vsa_ref = self.vsa_api.create(self.context, **param)
+ self.assertEqual(vsa_ref['vol_count'], 3)
+ self.vsa_api.delete(self.context, vsa_ref['id'])
+
+ param = {'storage': [{'drive_name': 'SATA_500_7200',
+ 'num_drives': 3}],
+ 'shared': True}
+ vsa_ref = self.vsa_api.create(self.context, **param)
+ self.assertEqual(vsa_ref['vol_count'], 15)
+ self.vsa_api.delete(self.context, vsa_ref['id'])
+
+ def test_vsa_create_with_storage_single_volumes(self):
+ self.test_vsa_create_with_storage(multi_vol_creation=False)
+
+ def test_vsa_update(self):
+ vsa_ref = self.vsa_api.create(self.context)
+
+ param = {'vc_count': FLAGS.max_vcs_in_vsa + 1}
+ vsa_ref = self.vsa_api.update(self.context, vsa_ref['id'], **param)
+ self.assertEqual(vsa_ref['vc_count'], FLAGS.max_vcs_in_vsa)
+
+ param = {'vc_count': 2}
+ vsa_ref = self.vsa_api.update(self.context, vsa_ref['id'], **param)
+ self.assertEqual(vsa_ref['vc_count'], 2)
+
+ self.vsa_api.delete(self.context, vsa_ref['id'])
+
+ def test_vsa_generate_user_data(self):
+
+ FLAGS.vsa_multi_vol_creation = False
+ param = {'display_name': 'VSA name test',
+ 'display_description': 'VSA desc test',
+ 'vc_count': 2,
+ 'storage': [{'drive_name': 'SATA_500_7200',
+ 'num_drives': 3}]}
+ vsa_ref = self.vsa_api.create(self.context, **param)
+ volumes = self.vsa_api.get_all_vsa_drives(self.context,
+ vsa_ref['id'])
+
+ user_data = vsa_utils.generate_user_data(vsa_ref, volumes)
+ user_data = base64.b64decode(user_data)
+
+ LOG.debug(_("Test: user_data = %s"), user_data)
+
+ elem = ElementTree.fromstring(user_data)
+ self.assertEqual(elem.findtext('name'),
+ param['display_name'])
+ self.assertEqual(elem.findtext('description'),
+ param['display_description'])
+ self.assertEqual(elem.findtext('vc_count'),
+ str(param['vc_count']))
+
+ self.vsa_api.delete(self.context, vsa_ref['id'])
diff --git a/nova/tests/test_vsa_volumes.py b/nova/tests/test_vsa_volumes.py
new file mode 100644
index 000000000..b7cd4e840
--- /dev/null
+++ b/nova/tests/test_vsa_volumes.py
@@ -0,0 +1,136 @@
+# Copyright 2011 OpenStack LLC.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import stubout
+
+from nova import exception
+from nova import flags
+from nova import vsa
+from nova import volume
+from nova import db
+from nova import context
+from nova import test
+from nova import log as logging
+import nova.image.fake
+
+FLAGS = flags.FLAGS
+LOG = logging.getLogger('nova.tests.vsa.volumes')
+
+
+class VsaVolumesTestCase(test.TestCase):
+
+ def setUp(self):
+ super(VsaVolumesTestCase, self).setUp()
+ self.stubs = stubout.StubOutForTesting()
+ self.vsa_api = vsa.API()
+ self.volume_api = volume.API()
+ self.context = context.get_admin_context()
+
+ self.default_vol_type = self.vsa_api.get_vsa_volume_type(self.context)
+
+ def fake_show_by_name(meh, context, name):
+ return {'id': 1, 'properties': {'kernel_id': 1, 'ramdisk_id': 1}}
+
+ self.stubs.Set(nova.image.fake._FakeImageService,
+ 'show_by_name',
+ fake_show_by_name)
+
+ param = {'display_name': 'VSA name test'}
+ vsa_ref = self.vsa_api.create(self.context, **param)
+ self.vsa_id = vsa_ref['id']
+
+ def tearDown(self):
+ if self.vsa_id:
+ self.vsa_api.delete(self.context, self.vsa_id)
+ self.stubs.UnsetAll()
+ super(VsaVolumesTestCase, self).tearDown()
+
+ def _default_volume_param(self):
+ return {
+ 'size': 1,
+ 'snapshot_id': None,
+ 'name': 'Test volume name',
+ 'description': 'Test volume desc name',
+ 'volume_type': self.default_vol_type,
+ 'metadata': {'from_vsa_id': self.vsa_id}
+ }
+
+ def _get_all_volumes_by_vsa(self):
+ return self.volume_api.get_all(self.context,
+ search_opts={'metadata': {"from_vsa_id": str(self.vsa_id)}})
+
+ def test_vsa_volume_create_delete(self):
+ """ Check if volume properly created and deleted. """
+ volume_param = self._default_volume_param()
+ volume_ref = self.volume_api.create(self.context, **volume_param)
+
+ self.assertEqual(volume_ref['display_name'],
+ volume_param['name'])
+ self.assertEqual(volume_ref['display_description'],
+ volume_param['description'])
+ self.assertEqual(volume_ref['size'],
+ volume_param['size'])
+ self.assertEqual(volume_ref['status'],
+ 'creating')
+
+ vols2 = self._get_all_volumes_by_vsa()
+ self.assertEqual(1, len(vols2))
+ volume_ref = vols2[0]
+
+ self.assertEqual(volume_ref['display_name'],
+ volume_param['name'])
+ self.assertEqual(volume_ref['display_description'],
+ volume_param['description'])
+ self.assertEqual(volume_ref['size'],
+ volume_param['size'])
+ self.assertEqual(volume_ref['status'],
+ 'creating')
+
+ self.volume_api.update(self.context,
+ volume_ref['id'], {'status': 'available'})
+ self.volume_api.delete(self.context, volume_ref['id'])
+
+ vols3 = self._get_all_volumes_by_vsa()
+ self.assertEqual(1, len(vols2))
+ volume_ref = vols3[0]
+ self.assertEqual(volume_ref['status'],
+ 'deleting')
+
+ def test_vsa_volume_delete_nonavail_volume(self):
+ """ Check volume deleton in different states. """
+ volume_param = self._default_volume_param()
+ volume_ref = self.volume_api.create(self.context, **volume_param)
+
+ self.volume_api.update(self.context,
+ volume_ref['id'], {'status': 'in-use'})
+ self.assertRaises(exception.ApiError,
+ self.volume_api.delete,
+ self.context, volume_ref['id'])
+
+ def test_vsa_volume_delete_vsa_with_volumes(self):
+ """ Check volume deleton in different states. """
+
+ vols1 = self._get_all_volumes_by_vsa()
+ for i in range(3):
+ volume_param = self._default_volume_param()
+ volume_ref = self.volume_api.create(self.context, **volume_param)
+
+ vols2 = self._get_all_volumes_by_vsa()
+ self.assertEqual(len(vols1) + 3, len(vols2))
+
+ self.vsa_api.delete(self.context, self.vsa_id)
+
+ vols3 = self._get_all_volumes_by_vsa()
+ self.assertEqual(len(vols1), len(vols3))
diff --git a/nova/tests/utils.py b/nova/tests/utils.py
new file mode 100644
index 000000000..e0cacadb4
--- /dev/null
+++ b/nova/tests/utils.py
@@ -0,0 +1,68 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+#
+# Copyright 2011 OpenStack LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+#
+
+import nova.context
+import nova.db
+import nova.flags
+
+FLAGS = nova.flags.FLAGS
+
+
+def get_test_admin_context():
+ return nova.context.get_admin_context()
+
+
+def get_test_instance(context=None):
+ if not context:
+ context = get_test_admin_context()
+
+ test_instance = {'memory_kb': '1024000',
+ 'basepath': '/some/path',
+ 'bridge_name': 'br100',
+ 'vcpus': 2,
+ 'project_id': 'fake',
+ 'bridge': 'br101',
+ 'image_ref': '1',
+ 'instance_type_id': '5'} # m1.small
+
+ instance_ref = nova.db.instance_create(context, test_instance)
+ return instance_ref
+
+
+def get_test_network_info(count=1):
+ ipv6 = FLAGS.use_ipv6
+ fake = 'fake'
+ fake_ip = '0.0.0.0/0'
+ fake_ip_2 = '0.0.0.1/0'
+ fake_ip_3 = '0.0.0.1/0'
+ fake_vlan = 100
+ fake_bridge_interface = 'eth0'
+ network = {'bridge': fake,
+ 'cidr': fake_ip,
+ 'cidr_v6': fake_ip,
+ 'vlan': fake_vlan,
+ 'bridge_interface': fake_bridge_interface,
+ 'injected': False}
+ mapping = {'mac': fake,
+ 'dhcp_server': fake,
+ 'gateway': fake,
+ 'gateway6': fake,
+ 'ips': [{'ip': fake_ip}, {'ip': fake_ip}]}
+ if ipv6:
+ mapping['ip6s'] = [{'ip': fake_ip},
+ {'ip': fake_ip_2},
+ {'ip': fake_ip_3}]
+ return [(network, mapping) for x in xrange(0, count)]
diff --git a/nova/utils.py b/nova/utils.py
index fc4bbd53b..21e6221b2 100644
--- a/nova/utils.py
+++ b/nova/utils.py
@@ -35,6 +35,7 @@ import sys
import time
import types
import uuid
+import pyclbr
from xml.sax import saxutils
from eventlet import event
@@ -860,3 +861,43 @@ def is_valid_ipv4(address):
except ValueError:
return False
return True
+
+
+def monkey_patch():
+ """ If the Flags.monkey_patch set as True,
+ this functuion patches a decorator
+ for all functions in specified modules.
+ You can set decorators for each modules
+ using FLAGS.monkey_patch_modules.
+ The format is "Module path:Decorator function".
+ Example: 'nova.api.ec2.cloud:nova.notifier.api.notify_decorator'
+
+ Parameters of the decorator is as follows.
+ (See nova.notifier.api.notify_decorator)
+
+ name - name of the function
+ function - object of the function
+ """
+ # If FLAGS.monkey_patch is not True, this function do nothing.
+ if not FLAGS.monkey_patch:
+ return
+ # Get list of modules and decorators
+ for module_and_decorator in FLAGS.monkey_patch_modules:
+ module, decorator_name = module_and_decorator.split(':')
+ # import decorator function
+ decorator = import_class(decorator_name)
+ __import__(module)
+ # Retrieve module information using pyclbr
+ module_data = pyclbr.readmodule_ex(module)
+ for key in module_data.keys():
+ # set the decorator for the class methods
+ if isinstance(module_data[key], pyclbr.Class):
+ clz = import_class("%s.%s" % (module, key))
+ for method, func in inspect.getmembers(clz, inspect.ismethod):
+ setattr(clz, method,\
+ decorator("%s.%s.%s" % (module, key, method), func))
+ # set the decorator for the function
+ if isinstance(module_data[key], pyclbr.Function):
+ func = import_class("%s.%s" % (module, key))
+ setattr(sys.modules[module], key,\
+ decorator("%s.%s" % (module, key), func))
diff --git a/nova/virt/disk.py b/nova/virt/disk.py
index 19f3ec185..52b2881e8 100644
--- a/nova/virt/disk.py
+++ b/nova/virt/disk.py
@@ -2,6 +2,9 @@
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
+#
+# Copyright 2011, Piston Cloud Computing, Inc.
+#
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
@@ -22,6 +25,7 @@ Includes injection of SSH PGP keys into authorized_keys file.
"""
+import json
import os
import tempfile
import time
@@ -60,7 +64,8 @@ def extend(image, size):
utils.execute('resize2fs', image, check_exit_code=False)
-def inject_data(image, key=None, net=None, partition=None, nbd=False):
+def inject_data(image, key=None, net=None, metadata=None,
+ partition=None, nbd=False, tune2fs=True):
"""Injects a ssh key and optionally net data into a disk image.
it will mount the image as a fully partitioned disk and attempt to inject
@@ -89,10 +94,10 @@ def inject_data(image, key=None, net=None, partition=None, nbd=False):
' only inject raw disk images): %s' %
mapped_device)
- # Configure ext2fs so that it doesn't auto-check every N boots
- out, err = utils.execute('tune2fs', '-c', 0, '-i', 0,
- mapped_device, run_as_root=True)
-
+ if tune2fs:
+ # Configure ext2fs so that it doesn't auto-check every N boots
+ out, err = utils.execute('tune2fs', '-c', 0, '-i', 0,
+ mapped_device, run_as_root=True)
tmpdir = tempfile.mkdtemp()
try:
# mount loopback to dir
@@ -103,7 +108,8 @@ def inject_data(image, key=None, net=None, partition=None, nbd=False):
% err)
try:
- inject_data_into_fs(tmpdir, key, net, utils.execute)
+ inject_data_into_fs(tmpdir, key, net, metadata,
+ utils.execute)
finally:
# unmount device
utils.execute('umount', mapped_device, run_as_root=True)
@@ -155,6 +161,7 @@ def destroy_container(target, instance, nbd=False):
def _link_device(image, nbd):
"""Link image to device using loopback or nbd"""
+
if nbd:
device = _allocate_device()
utils.execute('qemu-nbd', '-c', device, image, run_as_root=True)
@@ -190,6 +197,7 @@ def _allocate_device():
# NOTE(vish): This assumes no other processes are allocating nbd devices.
# It may race cause a race condition if multiple
# workers are running on a given machine.
+
while True:
if not _DEVICES:
raise exception.Error(_('No free nbd devices'))
@@ -203,7 +211,7 @@ def _free_device(device):
_DEVICES.append(device)
-def inject_data_into_fs(fs, key, net, execute):
+def inject_data_into_fs(fs, key, net, metadata, execute):
"""Injects data into a filesystem already mounted by the caller.
Virt connections can call this directly if they mount their fs
in a different way to inject_data
@@ -212,6 +220,16 @@ def inject_data_into_fs(fs, key, net, execute):
_inject_key_into_fs(key, fs, execute=execute)
if net:
_inject_net_into_fs(net, fs, execute=execute)
+ if metadata:
+ _inject_metadata_into_fs(metadata, fs, execute=execute)
+
+
+def _inject_metadata_into_fs(metadata, fs, execute=None):
+ metadata_path = os.path.join(fs, "meta.js")
+ metadata = dict([(m.key, m.value) for m in metadata])
+
+ utils.execute('sudo', 'tee', metadata_path,
+ process_input=json.dumps(metadata))
def _inject_key_into_fs(key, fs, execute=None):
diff --git a/nova/virt/driver.py b/nova/virt/driver.py
index 93290aba7..d05b51bd9 100644
--- a/nova/virt/driver.py
+++ b/nova/virt/driver.py
@@ -140,7 +140,7 @@ class ComputeDriver(object):
that it was before this call began.
:param context: security context
- :param instance: Instance of {nova.compute.service.Instance}.
+ :param instance: Instance object as returned by DB layer.
This function should use the data there to guide
the creation of the new instance.
:param network_info:
@@ -152,14 +152,11 @@ class ComputeDriver(object):
def destroy(self, instance, network_info, cleanup=True):
"""Destroy (shutdown and delete) the specified instance.
- The given parameter is an instance of nova.compute.service.Instance,
-
If the instance is not found (for example if networking failed), this
function should still succeed. It's probably a good idea to log a
warning in that case.
- :param instance: Instance of {nova.compute.service.Instance} and so
- the instance is being specified as instance.name.
+ :param instance: Instance object as returned by DB layer.
:param network_info:
:py:meth:`~nova.network.manager.NetworkManager.get_instance_nw_info`
:param cleanup:
@@ -171,8 +168,7 @@ class ComputeDriver(object):
def reboot(self, instance, network_info):
"""Reboot the specified instance.
- :param instance: Instance of {nova.compute.service.Instance} and so
- the instance is being specified as instance.name.
+ :param instance: Instance object as returned by DB layer.
:param network_info:
:py:meth:`~nova.network.manager.NetworkManager.get_instance_nw_info`
"""
@@ -240,10 +236,10 @@ class ComputeDriver(object):
"""
Snapshots the specified instance.
- The given parameter is an instance of nova.compute.service.Instance,
- and so the instance is being specified as instance.name.
-
- The second parameter is the name of the snapshot.
+ :param context: security context
+ :param instance: Instance object as returned by DB layer.
+ :param image_id: Reference to a pre-created image that will
+ hold the snapshot.
"""
raise NotImplementedError()
diff --git a/nova/virt/fake.py b/nova/virt/fake.py
index 13b7aeab5..d5e2bf31b 100644
--- a/nova/virt/fake.py
+++ b/nova/virt/fake.py
@@ -67,6 +67,7 @@ class FakeConnection(driver.ComputeDriver):
'disk_used': 100000000000,
'host_uuid': 'cedb9b39-9388-41df-8891-c5c9a0c0fe5f',
'host_name_label': 'fake-mini'}
+ self._mounts = {}
@classmethod
def instance(cls):
@@ -99,7 +100,8 @@ class FakeConnection(driver.ComputeDriver):
self.instances[name] = fake_instance
def snapshot(self, context, instance, name):
- pass
+ if not instance['name'] in self.instances:
+ raise exception.InstanceNotRunning()
def reboot(self, instance, network_info):
pass
@@ -144,7 +146,7 @@ class FakeConnection(driver.ComputeDriver):
pass
def destroy(self, instance, network_info, cleanup=True):
- key = instance.name
+ key = instance['name']
if key in self.instances:
del self.instances[key]
else:
@@ -152,9 +154,16 @@ class FakeConnection(driver.ComputeDriver):
(key, self.instances))
def attach_volume(self, instance_name, device_path, mountpoint):
+ if not instance_name in self._mounts:
+ self._mounts[instance_name] = {}
+ self._mounts[instance_name][mountpoint] = device_path
return True
def detach_volume(self, instance_name, mountpoint):
+ try:
+ del self._mounts[instance_name][mountpoint]
+ except KeyError:
+ pass
return True
def get_info(self, instance_name):
diff --git a/nova/virt/libvirt.xml.template b/nova/virt/libvirt.xml.template
index 210e2b0fb..d3aeadda4 100644
--- a/nova/virt/libvirt.xml.template
+++ b/nova/virt/libvirt.xml.template
@@ -106,6 +106,13 @@
</disk>
#end for
#end if
+ #if $getVar('config_drive', False)
+ <disk type='file'>
+ <driver type='raw' />
+ <source file='${basepath}/disk.config' />
+ <target dev='${disk_prefix}z' bus='${disk_bus}' />
+ </disk>
+ #end if
#end if
#for $nic in $nics
@@ -128,7 +135,9 @@
<interface type='bridge'>
<source bridge='${nic.bridge_name}'/>
<mac address='${nic.mac_address}'/>
- <!-- <model type='virtio'/> CANT RUN virtio network right now -->
+#if $getVar('use_virtio_for_bridges', True)
+ <model type='virtio'/>
+#end if
<filterref filter="nova-instance-${name}-${nic.id}">
<parameter name="IP" value="${nic.ip_address}" />
<parameter name="DHCPSERVER" value="${nic.dhcp_server}" />
diff --git a/nova/virt/libvirt/connection.py b/nova/virt/libvirt/connection.py
index e8a657bac..363a20ed0 100644
--- a/nova/virt/libvirt/connection.py
+++ b/nova/virt/libvirt/connection.py
@@ -4,6 +4,7 @@
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
# Copyright (c) 2010 Citrix Systems, Inc.
+# Copyright (c) 2011 Piston Cloud Computing, Inc
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
@@ -130,6 +131,13 @@ flags.DEFINE_string('libvirt_vif_type', 'bridge',
flags.DEFINE_string('libvirt_vif_driver',
'nova.virt.libvirt.vif.LibvirtBridgeDriver',
'The libvirt VIF driver to configure the VIFs.')
+flags.DEFINE_string('default_local_format',
+ None,
+ 'The default format a local_volume will be formatted with '
+ 'on creation.')
+flags.DEFINE_bool('libvirt_use_virtio_for_bridges',
+ False,
+ 'Use virtio for bridge interfaces')
def get_connection(read_only):
@@ -586,6 +594,7 @@ class LibvirtConnection(driver.ComputeDriver):
self.firewall_driver.prepare_instance_filter(instance, network_info)
self._create_image(context, instance, xml, network_info=network_info,
block_device_info=block_device_info)
+
domain = self._create_new_domain(xml)
LOG.debug(_("instance %s: is running"), instance['name'])
self.firewall_driver.apply_instance_filter(instance, network_info)
@@ -759,10 +768,15 @@ class LibvirtConnection(driver.ComputeDriver):
if size:
disk.extend(target, size)
- def _create_local(self, target, local_gb):
+ def _create_local(self, target, local_size, prefix='G', fs_format=None):
"""Create a blank image of specified size"""
- utils.execute('truncate', target, '-s', "%dG" % local_gb)
- # TODO(vish): should we format disk by default?
+
+ if not fs_format:
+ fs_format = FLAGS.default_local_format
+
+ utils.execute('truncate', target, '-s', "%d%c" % (local_size, prefix))
+ if fs_format:
+ utils.execute('mkfs', '-t', fs_format, target)
def _create_swap(self, target, swap_gb):
"""Create a swap file of specified size"""
@@ -849,14 +863,14 @@ class LibvirtConnection(driver.ComputeDriver):
target=basepath('disk.local'),
fname="local_%s" % local_gb,
cow=FLAGS.use_cow_images,
- local_gb=local_gb)
+ local_size=local_gb)
for eph in driver.block_device_info_get_ephemerals(block_device_info):
self._cache_image(fn=self._create_local,
target=basepath(_get_eph_disk(eph)),
fname="local_%s" % eph['size'],
cow=FLAGS.use_cow_images,
- local_gb=eph['size'])
+ local_size=eph['size'])
swap_gb = 0
@@ -882,9 +896,24 @@ class LibvirtConnection(driver.ComputeDriver):
if not inst['kernel_id']:
target_partition = "1"
- if FLAGS.libvirt_type == 'lxc':
+ config_drive_id = inst.get('config_drive_id')
+ config_drive = inst.get('config_drive')
+
+ if any((FLAGS.libvirt_type == 'lxc', config_drive, config_drive_id)):
target_partition = None
+ if config_drive_id:
+ fname = '%08x' % int(config_drive_id)
+ self._cache_image(fn=self._fetch_image,
+ target=basepath('disk.config'),
+ fname=fname,
+ image_id=config_drive_id,
+ user=user,
+ project=project)
+ elif config_drive:
+ self._create_local(basepath('disk.config'), 64, prefix="M",
+ fs_format='msdos') # 64MB
+
if inst['key_data']:
key = str(inst['key_data'])
else:
@@ -928,19 +957,29 @@ class LibvirtConnection(driver.ComputeDriver):
searchList=[{'interfaces': nets,
'use_ipv6': FLAGS.use_ipv6}]))
- if key or net:
+ metadata = inst.get('metadata')
+ if any((key, net, metadata)):
inst_name = inst['name']
- img_id = inst.image_ref
- if key:
- LOG.info(_('instance %(inst_name)s: injecting key into'
- ' image %(img_id)s') % locals())
- if net:
- LOG.info(_('instance %(inst_name)s: injecting net into'
- ' image %(img_id)s') % locals())
+
+ if config_drive: # Should be True or None by now.
+ injection_path = basepath('disk.config')
+ img_id = 'config-drive'
+ tune2fs = False
+ else:
+ injection_path = basepath('disk')
+ img_id = inst.image_ref
+ tune2fs = True
+
+ for injection in ('metadata', 'key', 'net'):
+ if locals()[injection]:
+ LOG.info(_('instance %(inst_name)s: injecting '
+ '%(injection)s into image %(img_id)s'
+ % locals()))
try:
- disk.inject_data(basepath('disk'), key, net,
+ disk.inject_data(injection_path, key, net, metadata,
partition=target_partition,
- nbd=FLAGS.use_cow_images)
+ nbd=FLAGS.use_cow_images,
+ tune2fs=tune2fs)
if FLAGS.libvirt_type == 'lxc':
disk.setup_container(basepath('disk'),
@@ -1047,6 +1086,8 @@ class LibvirtConnection(driver.ComputeDriver):
'ebs_root': ebs_root,
'local_device': local_device,
'volumes': block_device_mapping,
+ 'use_virtio_for_bridges':
+ FLAGS.libvirt_use_virtio_for_bridges,
'ephemerals': ephemerals}
root_device_name = driver.block_device_info_get_root(block_device_info)
@@ -1070,6 +1111,10 @@ class LibvirtConnection(driver.ComputeDriver):
block_device_info)):
xml_info['swap_device'] = self.default_swap_device
+ config_drive = False
+ if instance.get('config_drive') or instance.get('config_drive_id'):
+ xml_info['config_drive'] = xml_info['basepath'] + "/disk.config"
+
if FLAGS.vnc_enabled and FLAGS.libvirt_type not in ('lxc', 'uml'):
xml_info['vncserver_host'] = FLAGS.vncserver_host
xml_info['vnc_keymap'] = FLAGS.vnc_keymap
diff --git a/nova/virt/xenapi/vm_utils.py b/nova/virt/xenapi/vm_utils.py
index 4a1f07bb1..efbea7076 100644
--- a/nova/virt/xenapi/vm_utils.py
+++ b/nova/virt/xenapi/vm_utils.py
@@ -1,6 +1,7 @@
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright (c) 2010 Citrix Systems, Inc.
+# Copyright 2011 Piston Cloud Computing, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
@@ -740,13 +741,14 @@ class VMHelper(HelperBase):
# if at all, so determine whether it's required first, and then do
# everything
mount_required = False
- key, net = _prepare_injectables(instance, network_info)
- mount_required = key or net
+ key, net, metadata = _prepare_injectables(instance, network_info)
+ mount_required = key or net or metadata
if not mount_required:
return
with_vdi_attached_here(session, vdi_ref, False,
- lambda dev: _mounted_processing(dev, key, net))
+ lambda dev: _mounted_processing(dev, key, net,
+ metadata))
@classmethod
def lookup_kernel_ramdisk(cls, session, vm):
@@ -1198,7 +1200,7 @@ def _find_guest_agent(base_dir, agent_rel_path):
return False
-def _mounted_processing(device, key, net):
+def _mounted_processing(device, key, net, metadata):
"""Callback which runs with the image VDI attached"""
dev_path = '/dev/' + device + '1' # NB: Partition 1 hardcoded
@@ -1212,7 +1214,7 @@ def _mounted_processing(device, key, net):
if not _find_guest_agent(tmpdir, FLAGS.xenapi_agent_path):
LOG.info(_('Manipulating interface files '
'directly'))
- disk.inject_data_into_fs(tmpdir, key, net,
+ disk.inject_data_into_fs(tmpdir, key, net, metadata,
utils.execute)
finally:
utils.execute('umount', dev_path, run_as_root=True)
@@ -1235,6 +1237,7 @@ def _prepare_injectables(inst, networks_info):
template = t.Template
template_data = open(FLAGS.injected_network_template).read()
+ metadata = inst['metadata']
key = str(inst['key_data'])
net = None
if networks_info:
@@ -1272,4 +1275,4 @@ def _prepare_injectables(inst, networks_info):
net = str(template(template_data,
searchList=[{'interfaces': interfaces_info,
'use_ipv6': FLAGS.use_ipv6}]))
- return key, net
+ return key, net, metadata
diff --git a/nova/virt/xenapi/vmops.py b/nova/virt/xenapi/vmops.py
index 9a6215f88..c5f105f40 100644
--- a/nova/virt/xenapi/vmops.py
+++ b/nova/virt/xenapi/vmops.py
@@ -239,7 +239,7 @@ class VMOps(object):
self._attach_disks(instance, disk_image_type, vm_ref, first_vdi_ref,
vdis)
- # Alter the image before VM start for, e.g. network injection
+ # Alter the image before VM start for network injection.
if FLAGS.flat_injected:
VMHelper.preconfigure_instance(self._session, instance,
first_vdi_ref, network_info)
diff --git a/nova/volume/api.py b/nova/volume/api.py
index 52b3a9fed..d9c082514 100644
--- a/nova/volume/api.py
+++ b/nova/volume/api.py
@@ -41,7 +41,8 @@ LOG = logging.getLogger('nova.volume')
class API(base.Base):
"""API for interacting with the volume manager."""
- def create(self, context, size, snapshot_id, name, description):
+ def create(self, context, size, snapshot_id, name, description,
+ volume_type=None, metadata=None, availability_zone=None):
if snapshot_id != None:
snapshot = self.get_snapshot(context, snapshot_id)
if snapshot['status'] != "available":
@@ -57,16 +58,27 @@ class API(base.Base):
raise quota.QuotaError(_("Volume quota exceeded. You cannot "
"create a volume of size %sG") % size)
+ if availability_zone is None:
+ availability_zone = FLAGS.storage_availability_zone
+
+ if volume_type is None:
+ volume_type_id = None
+ else:
+ volume_type_id = volume_type.get('id', None)
+
options = {
'size': size,
'user_id': context.user_id,
'project_id': context.project_id,
'snapshot_id': snapshot_id,
- 'availability_zone': FLAGS.storage_availability_zone,
+ 'availability_zone': availability_zone,
'status': "creating",
'attach_status': "detached",
'display_name': name,
- 'display_description': description}
+ 'display_description': description,
+ 'volume_type_id': volume_type_id,
+ 'metadata': metadata,
+ }
volume = self.db.volume_create(context, options)
rpc.cast(context,
@@ -105,10 +117,45 @@ class API(base.Base):
rv = self.db.volume_get(context, volume_id)
return dict(rv.iteritems())
- def get_all(self, context):
+ def get_all(self, context, search_opts={}):
if context.is_admin:
- return self.db.volume_get_all(context)
- return self.db.volume_get_all_by_project(context, context.project_id)
+ volumes = self.db.volume_get_all(context)
+ else:
+ volumes = self.db.volume_get_all_by_project(context,
+ context.project_id)
+
+ if search_opts:
+ LOG.debug(_("Searching by: %s") % str(search_opts))
+
+ def _check_metadata_match(volume, searchdict):
+ volume_metadata = {}
+ for i in volume.get('volume_metadata'):
+ volume_metadata[i['key']] = i['value']
+
+ for k, v in searchdict.iteritems():
+ if k not in volume_metadata.keys()\
+ or volume_metadata[k] != v:
+ return False
+ return True
+
+ # search_option to filter_name mapping.
+ filter_mapping = {'metadata': _check_metadata_match}
+
+ result = []
+ for volume in volumes:
+ # go over all filters in the list
+ for opt, values in search_opts.iteritems():
+ try:
+ filter_func = filter_mapping[opt]
+ except KeyError:
+ # no such filter - ignore it, go to next filter
+ continue
+ else:
+ if filter_func(volume, values):
+ result.append(volume)
+ break
+ volumes = result
+ return volumes
def get_snapshot(self, context, snapshot_id):
rv = self.db.snapshot_get(context, snapshot_id)
@@ -183,3 +230,38 @@ class API(base.Base):
{"method": "delete_snapshot",
"args": {"topic": FLAGS.volume_topic,
"snapshot_id": snapshot_id}})
+
+ def get_volume_metadata(self, context, volume_id):
+ """Get all metadata associated with a volume."""
+ rv = self.db.volume_metadata_get(context, volume_id)
+ return dict(rv.iteritems())
+
+ def delete_volume_metadata(self, context, volume_id, key):
+ """Delete the given metadata item from an volume."""
+ self.db.volume_metadata_delete(context, volume_id, key)
+
+ def update_volume_metadata(self, context, volume_id,
+ metadata, delete=False):
+ """Updates or creates volume metadata.
+
+ If delete is True, metadata items that are not specified in the
+ `metadata` argument will be deleted.
+
+ """
+ if delete:
+ _metadata = metadata
+ else:
+ _metadata = self.get_volume_metadata(context, volume_id)
+ _metadata.update(metadata)
+
+ self.db.volume_metadata_update(context, volume_id, _metadata, True)
+ return _metadata
+
+ def get_volume_metadata_value(self, volume, key):
+ """Get value of particular metadata key."""
+ metadata = volume.get('volume_metadata')
+ if metadata:
+ for i in volume['volume_metadata']:
+ if i['key'] == key:
+ return i['value']
+ return None
diff --git a/nova/volume/driver.py b/nova/volume/driver.py
index c99534c07..35e3ea8d0 100644
--- a/nova/volume/driver.py
+++ b/nova/volume/driver.py
@@ -22,11 +22,13 @@ Drivers for volumes.
import time
import os
+from xml.etree import ElementTree
from nova import exception
from nova import flags
from nova import log as logging
from nova import utils
+from nova.volume import volume_types
LOG = logging.getLogger("nova.volume.driver")
@@ -212,6 +214,11 @@ class VolumeDriver(object):
"""Make sure volume is exported."""
raise NotImplementedError()
+ def get_volume_stats(self, refresh=False):
+ """Return the current state of the volume service. If 'refresh' is
+ True, run the update first."""
+ return None
+
class AOEDriver(VolumeDriver):
"""Implements AOE specific volume commands."""
@@ -495,7 +502,7 @@ class ISCSIDriver(VolumeDriver):
(out, err) = self._execute('iscsiadm', '-m', 'node', '-T',
iscsi_properties['target_iqn'],
'-p', iscsi_properties['target_portal'],
- iscsi_command, run_as_root=True)
+ *iscsi_command, run_as_root=True)
LOG.debug("iscsiadm %s: stdout=%s stderr=%s" %
(iscsi_command, out, err))
return (out, err)
@@ -523,7 +530,7 @@ class ISCSIDriver(VolumeDriver):
"node.session.auth.password",
iscsi_properties['auth_password'])
- self._run_iscsiadm(iscsi_properties, "--login")
+ self._run_iscsiadm(iscsi_properties, ("--login", ))
self._iscsiadm_update(iscsi_properties, "node.startup", "automatic")
@@ -544,7 +551,7 @@ class ISCSIDriver(VolumeDriver):
locals())
# The rescan isn't documented as being necessary(?), but it helps
- self._run_iscsiadm(iscsi_properties, "--rescan")
+ self._run_iscsiadm(iscsi_properties, ("--rescan", ))
tries = tries + 1
if not os.path.exists(mount_device):
@@ -561,7 +568,7 @@ class ISCSIDriver(VolumeDriver):
"""Undiscover volume on a remote host."""
iscsi_properties = self._get_iscsi_properties(volume)
self._iscsiadm_update(iscsi_properties, "node.startup", "manual")
- self._run_iscsiadm(iscsi_properties, "--logout")
+ self._run_iscsiadm(iscsi_properties, ("--logout", ))
self._run_iscsiadm(iscsi_properties, ('--op', 'delete'))
def check_for_export(self, context, volume_id):
@@ -802,3 +809,268 @@ class LoggingVolumeDriver(VolumeDriver):
if match:
matches.append(entry)
return matches
+
+
+class ZadaraBEDriver(ISCSIDriver):
+ """Performs actions to configure Zadara BE module."""
+
+ def _is_vsa_volume(self, volume):
+ return volume_types.is_vsa_volume(volume['volume_type_id'])
+
+ def _is_vsa_drive(self, volume):
+ return volume_types.is_vsa_drive(volume['volume_type_id'])
+
+ def _not_vsa_volume_or_drive(self, volume):
+ """Returns True if volume is not VSA BE volume."""
+ if not volume_types.is_vsa_object(volume['volume_type_id']):
+ LOG.debug(_("\tVolume %s is NOT VSA volume"), volume['name'])
+ return True
+ else:
+ return False
+
+ def check_for_setup_error(self):
+ """No setup necessary for Zadara BE."""
+ pass
+
+ """ Volume Driver methods """
+ def create_volume(self, volume):
+ """Creates BE volume."""
+ if self._not_vsa_volume_or_drive(volume):
+ return super(ZadaraBEDriver, self).create_volume(volume)
+
+ if self._is_vsa_volume(volume):
+ LOG.debug(_("\tFE VSA Volume %s creation - do nothing"),
+ volume['name'])
+ return
+
+ if int(volume['size']) == 0:
+ sizestr = '0' # indicates full-partition
+ else:
+ sizestr = '%s' % (int(volume['size']) << 30) # size in bytes
+
+ # Set the qos-str to default type sas
+ qosstr = 'SAS_1000'
+ volume_type = volume_types.get_volume_type(None,
+ volume['volume_type_id'])
+ if volume_type is not None:
+ qosstr = volume_type['extra_specs']['drive_type'] + \
+ ("_%s" % volume_type['extra_specs']['drive_size'])
+
+ vsa_id = None
+ for i in volume.get('volume_metadata'):
+ if i['key'] == 'to_vsa_id':
+ vsa_id = i['value']
+ break
+
+ try:
+ self._sync_exec('/var/lib/zadara/bin/zadara_sncfg',
+ 'create_qospart',
+ '--qos', qosstr,
+ '--pname', volume['name'],
+ '--psize', sizestr,
+ '--vsaid', vsa_id,
+ run_as_root=True,
+ check_exit_code=0)
+ except exception.ProcessExecutionError:
+ LOG.debug(_("VSA BE create_volume for %s failed"), volume['name'])
+ raise
+
+ LOG.debug(_("VSA BE create_volume for %s succeeded"), volume['name'])
+
+ def delete_volume(self, volume):
+ """Deletes BE volume."""
+ if self._not_vsa_volume_or_drive(volume):
+ return super(ZadaraBEDriver, self).delete_volume(volume)
+
+ if self._is_vsa_volume(volume):
+ LOG.debug(_("\tFE VSA Volume %s deletion - do nothing"),
+ volume['name'])
+ return
+
+ try:
+ self._sync_exec('/var/lib/zadara/bin/zadara_sncfg',
+ 'delete_partition',
+ '--pname', volume['name'],
+ run_as_root=True,
+ check_exit_code=0)
+ except exception.ProcessExecutionError:
+ LOG.debug(_("VSA BE delete_volume for %s failed"), volume['name'])
+ return
+
+ LOG.debug(_("VSA BE delete_volume for %s suceeded"), volume['name'])
+
+ def local_path(self, volume):
+ if self._not_vsa_volume_or_drive(volume):
+ return super(ZadaraBEDriver, self).local_path(volume)
+
+ if self._is_vsa_volume(volume):
+ LOG.debug(_("\tFE VSA Volume %s local path call - call discover"),
+ volume['name'])
+ return super(ZadaraBEDriver, self).discover_volume(None, volume)
+
+ raise exception.Error(_("local_path not supported"))
+
+ def ensure_export(self, context, volume):
+ """ensure BE export for a volume"""
+ if self._not_vsa_volume_or_drive(volume):
+ return super(ZadaraBEDriver, self).ensure_export(context, volume)
+
+ if self._is_vsa_volume(volume):
+ LOG.debug(_("\tFE VSA Volume %s ensure export - do nothing"),
+ volume['name'])
+ return
+
+ try:
+ iscsi_target = self.db.volume_get_iscsi_target_num(context,
+ volume['id'])
+ except exception.NotFound:
+ LOG.info(_("Skipping ensure_export. No iscsi_target " +
+ "provisioned for volume: %d"), volume['id'])
+ return
+
+ try:
+ ret = self._common_be_export(context, volume, iscsi_target)
+ except exception.ProcessExecutionError:
+ return
+ return ret
+
+ def create_export(self, context, volume):
+ """create BE export for a volume"""
+ if self._not_vsa_volume_or_drive(volume):
+ return super(ZadaraBEDriver, self).create_export(context, volume)
+
+ if self._is_vsa_volume(volume):
+ LOG.debug(_("\tFE VSA Volume %s create export - do nothing"),
+ volume['name'])
+ return
+
+ self._ensure_iscsi_targets(context, volume['host'])
+ iscsi_target = self.db.volume_allocate_iscsi_target(context,
+ volume['id'],
+ volume['host'])
+ try:
+ ret = self._common_be_export(context, volume, iscsi_target)
+ except:
+ raise exception.ProcessExecutionError
+ return ret
+
+ def remove_export(self, context, volume):
+ """Removes BE export for a volume."""
+ if self._not_vsa_volume_or_drive(volume):
+ return super(ZadaraBEDriver, self).remove_export(context, volume)
+
+ if self._is_vsa_volume(volume):
+ LOG.debug(_("\tFE VSA Volume %s remove export - do nothing"),
+ volume['name'])
+ return
+
+ try:
+ iscsi_target = self.db.volume_get_iscsi_target_num(context,
+ volume['id'])
+ except exception.NotFound:
+ LOG.info(_("Skipping remove_export. No iscsi_target " +
+ "provisioned for volume: %d"), volume['id'])
+ return
+
+ try:
+ self._sync_exec('/var/lib/zadara/bin/zadara_sncfg',
+ 'remove_export',
+ '--pname', volume['name'],
+ '--tid', iscsi_target,
+ run_as_root=True,
+ check_exit_code=0)
+ except exception.ProcessExecutionError:
+ LOG.debug(_("VSA BE remove_export for %s failed"), volume['name'])
+ return
+
+ def create_snapshot(self, snapshot):
+ """Nothing required for snapshot"""
+ if self._not_vsa_volume_or_drive(volume):
+ return super(ZadaraBEDriver, self).create_snapshot(volume)
+
+ pass
+
+ def delete_snapshot(self, snapshot):
+ """Nothing required to delete a snapshot"""
+ if self._not_vsa_volume_or_drive(volume):
+ return super(ZadaraBEDriver, self).delete_snapshot(volume)
+
+ pass
+
+ """ Internal BE Volume methods """
+ def _common_be_export(self, context, volume, iscsi_target):
+ """
+ Common logic that asks zadara_sncfg to setup iSCSI target/lun for
+ this volume
+ """
+ (out, err) = self._sync_exec(
+ '/var/lib/zadara/bin/zadara_sncfg',
+ 'create_export',
+ '--pname', volume['name'],
+ '--tid', iscsi_target,
+ run_as_root=True,
+ check_exit_code=0)
+
+ result_xml = ElementTree.fromstring(out)
+ response_node = result_xml.find("Sn")
+ if response_node is None:
+ msg = "Malformed response from zadara_sncfg"
+ raise exception.Error(msg)
+
+ sn_ip = response_node.findtext("SnIp")
+ sn_iqn = response_node.findtext("IqnName")
+ iscsi_portal = sn_ip + ":3260," + ("%s" % iscsi_target)
+
+ model_update = {}
+ model_update['provider_location'] = ("%s %s" %
+ (iscsi_portal,
+ sn_iqn))
+ return model_update
+
+ def _get_qosgroup_summary(self):
+ """gets the list of qosgroups from Zadara BE"""
+ try:
+ (out, err) = self._sync_exec(
+ '/var/lib/zadara/bin/zadara_sncfg',
+ 'get_qosgroups_xml',
+ run_as_root=True,
+ check_exit_code=0)
+ except exception.ProcessExecutionError:
+ LOG.debug(_("Failed to retrieve QoS info"))
+ return {}
+
+ qos_groups = {}
+ result_xml = ElementTree.fromstring(out)
+ for element in result_xml.findall('QosGroup'):
+ qos_group = {}
+ # get the name of the group.
+ # If we cannot find it, forget this element
+ group_name = element.findtext("Name")
+ if not group_name:
+ continue
+
+ # loop through all child nodes & fill up attributes of this group
+ for child in element.getchildren():
+ # two types of elements - property of qos-group & sub property
+ # classify them accordingly
+ if child.text:
+ qos_group[child.tag] = int(child.text) \
+ if child.text.isdigit() else child.text
+ else:
+ subelement = {}
+ for subchild in child.getchildren():
+ subelement[subchild.tag] = int(subchild.text) \
+ if subchild.text.isdigit() else subchild.text
+ qos_group[child.tag] = subelement
+
+ # Now add this group to the master qos_groups
+ qos_groups[group_name] = qos_group
+
+ return qos_groups
+
+ def get_volume_stats(self, refresh=False):
+ """Return the current state of the volume service. If 'refresh' is
+ True, run the update first."""
+
+ drive_info = self._get_qosgroup_summary()
+ return {'drive_qos_info': drive_info}
diff --git a/nova/volume/manager.py b/nova/volume/manager.py
index 798bd379a..caa5298d4 100644
--- a/nova/volume/manager.py
+++ b/nova/volume/manager.py
@@ -48,7 +48,9 @@ from nova import exception
from nova import flags
from nova import log as logging
from nova import manager
+from nova import rpc
from nova import utils
+from nova.volume import volume_types
LOG = logging.getLogger('nova.volume.manager')
@@ -60,6 +62,8 @@ flags.DEFINE_string('volume_driver', 'nova.volume.driver.ISCSIDriver',
'Driver to use for volume creation')
flags.DEFINE_boolean('use_local_volumes', True,
'if True, will not discover local volumes')
+flags.DEFINE_boolean('volume_force_update_capabilities', False,
+ 'if True will force update capabilities on each check')
class VolumeManager(manager.SchedulerDependentManager):
@@ -74,6 +78,7 @@ class VolumeManager(manager.SchedulerDependentManager):
# NOTE(vish): Implementation specific db handling is done
# by the driver.
self.driver.db = self.db
+ self._last_volume_stats = []
def init_host(self):
"""Do any initialization that needs to be run if this is a
@@ -123,6 +128,7 @@ class VolumeManager(manager.SchedulerDependentManager):
except Exception:
self.db.volume_update(context,
volume_ref['id'], {'status': 'error'})
+ self._notify_vsa(context, volume_ref, 'error')
raise
now = utils.utcnow()
@@ -130,8 +136,29 @@ class VolumeManager(manager.SchedulerDependentManager):
volume_ref['id'], {'status': 'available',
'launched_at': now})
LOG.debug(_("volume %s: created successfully"), volume_ref['name'])
+ self._notify_vsa(context, volume_ref, 'available')
+ self._reset_stats()
return volume_id
+ def _notify_vsa(self, context, volume_ref, status):
+ if volume_ref['volume_type_id'] is None:
+ return
+
+ if volume_types.is_vsa_drive(volume_ref['volume_type_id']):
+ vsa_id = None
+ for i in volume_ref.get('volume_metadata'):
+ if i['key'] == 'to_vsa_id':
+ vsa_id = int(i['value'])
+ break
+
+ if vsa_id:
+ rpc.cast(context,
+ FLAGS.vsa_topic,
+ {"method": "vsa_volume_created",
+ "args": {"vol_id": volume_ref['id'],
+ "vsa_id": vsa_id,
+ "status": status}})
+
def delete_volume(self, context, volume_id):
"""Deletes and unexports volume."""
context = context.elevated()
@@ -141,6 +168,7 @@ class VolumeManager(manager.SchedulerDependentManager):
if volume_ref['host'] != self.host:
raise exception.Error(_("Volume is not local to this node"))
+ self._reset_stats()
try:
LOG.debug(_("volume %s: removing export"), volume_ref['name'])
self.driver.remove_export(context, volume_ref)
@@ -231,3 +259,53 @@ class VolumeManager(manager.SchedulerDependentManager):
instance_ref = self.db.instance_get(context, instance_id)
for volume in instance_ref['volumes']:
self.driver.check_for_export(context, volume['id'])
+
+ def periodic_tasks(self, context=None):
+ """Tasks to be run at a periodic interval."""
+
+ error_list = []
+ try:
+ self._report_driver_status()
+ except Exception as ex:
+ LOG.warning(_("Error during report_driver_status(): %s"),
+ unicode(ex))
+ error_list.append(ex)
+
+ super(VolumeManager, self).periodic_tasks(context)
+
+ return error_list
+
+ def _volume_stats_changed(self, stat1, stat2):
+ if FLAGS.volume_force_update_capabilities:
+ return True
+ if len(stat1) != len(stat2):
+ return True
+ for (k, v) in stat1.iteritems():
+ if (k, v) not in stat2.iteritems():
+ return True
+ return False
+
+ def _report_driver_status(self):
+ volume_stats = self.driver.get_volume_stats(refresh=True)
+ if volume_stats:
+ LOG.info(_("Checking volume capabilities"))
+
+ if self._volume_stats_changed(self._last_volume_stats,
+ volume_stats):
+ LOG.info(_("New capabilities found: %s"), volume_stats)
+ self._last_volume_stats = volume_stats
+
+ # This will grab info about the host and queue it
+ # to be sent to the Schedulers.
+ self.update_service_capabilities(self._last_volume_stats)
+ else:
+ # avoid repeating fanouts
+ self.update_service_capabilities(None)
+
+ def _reset_stats(self):
+ LOG.info(_("Clear capabilities"))
+ self._last_volume_stats = []
+
+ def notification(self, context, event):
+ LOG.info(_("Notification {%s} received"), event)
+ self._reset_stats()
diff --git a/nova/volume/volume_types.py b/nova/volume/volume_types.py
new file mode 100644
index 000000000..ffa9e6e02
--- /dev/null
+++ b/nova/volume/volume_types.py
@@ -0,0 +1,166 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright (c) 2011 Zadara Storage Inc.
+# Copyright (c) 2011 OpenStack LLC.
+# Copyright 2010 United States Government as represented by the
+# Administrator of the National Aeronautics and Space Administration.
+# Copyright (c) 2010 Citrix Systems, Inc.
+# Copyright 2011 Ken Pepple
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""Built-in volume type properties."""
+
+from nova import context
+from nova import db
+from nova import exception
+from nova import flags
+from nova import log as logging
+
+FLAGS = flags.FLAGS
+LOG = logging.getLogger('nova.volume.volume_types')
+
+
+def create(context, name, extra_specs={}):
+ """Creates volume types."""
+ try:
+ db.volume_type_create(context,
+ dict(name=name,
+ extra_specs=extra_specs))
+ except exception.DBError, e:
+ LOG.exception(_('DB error: %s') % e)
+ raise exception.ApiError(_("Cannot create volume_type with "
+ "name %(name)s and specs %(extra_specs)s")
+ % locals())
+
+
+def destroy(context, name):
+ """Marks volume types as deleted."""
+ if name is None:
+ raise exception.InvalidVolumeType(volume_type=name)
+ else:
+ try:
+ db.volume_type_destroy(context, name)
+ except exception.NotFound:
+ LOG.exception(_('Volume type %s not found for deletion') % name)
+ raise exception.ApiError(_("Unknown volume type: %s") % name)
+
+
+def purge(context, name):
+ """Removes volume types from database."""
+ if name is None:
+ raise exception.InvalidVolumeType(volume_type=name)
+ else:
+ try:
+ db.volume_type_purge(context, name)
+ except exception.NotFound:
+ LOG.exception(_('Volume type %s not found for purge') % name)
+ raise exception.ApiError(_("Unknown volume type: %s") % name)
+
+
+def get_all_types(context, inactive=0, search_opts={}):
+ """Get all non-deleted volume_types.
+
+ Pass true as argument if you want deleted volume types returned also.
+
+ """
+ vol_types = db.volume_type_get_all(context, inactive)
+
+ if search_opts:
+ LOG.debug(_("Searching by: %s") % str(search_opts))
+
+ def _check_extra_specs_match(vol_type, searchdict):
+ for k, v in searchdict.iteritems():
+ if k not in vol_type['extra_specs'].keys()\
+ or vol_type['extra_specs'][k] != v:
+ return False
+ return True
+
+ # search_option to filter_name mapping.
+ filter_mapping = {'extra_specs': _check_extra_specs_match}
+
+ result = {}
+ for type_name, type_args in vol_types.iteritems():
+ # go over all filters in the list
+ for opt, values in search_opts.iteritems():
+ try:
+ filter_func = filter_mapping[opt]
+ except KeyError:
+ # no such filter - ignore it, go to next filter
+ continue
+ else:
+ if filter_func(type_args, values):
+ result[type_name] = type_args
+ break
+ vol_types = result
+ return vol_types
+
+
+def get_volume_type(ctxt, id):
+ """Retrieves single volume type by id."""
+ if id is None:
+ raise exception.InvalidVolumeType(volume_type=id)
+
+ if ctxt is None:
+ ctxt = context.get_admin_context()
+
+ try:
+ return db.volume_type_get(ctxt, id)
+ except exception.DBError:
+ raise exception.ApiError(_("Unknown volume type: %s") % id)
+
+
+def get_volume_type_by_name(context, name):
+ """Retrieves single volume type by name."""
+ if name is None:
+ raise exception.InvalidVolumeType(volume_type=name)
+
+ try:
+ return db.volume_type_get_by_name(context, name)
+ except exception.DBError:
+ raise exception.ApiError(_("Unknown volume type: %s") % name)
+
+
+def is_key_value_present(volume_type_id, key, value, volume_type=None):
+ if volume_type_id is None:
+ return False
+
+ if volume_type is None:
+ volume_type = get_volume_type(context.get_admin_context(),
+ volume_type_id)
+ if volume_type.get('extra_specs') is None or\
+ volume_type['extra_specs'].get(key) != value:
+ return False
+ else:
+ return True
+
+
+def is_vsa_drive(volume_type_id, volume_type=None):
+ return is_key_value_present(volume_type_id,
+ 'type', 'vsa_drive', volume_type)
+
+
+def is_vsa_volume(volume_type_id, volume_type=None):
+ return is_key_value_present(volume_type_id,
+ 'type', 'vsa_volume', volume_type)
+
+
+def is_vsa_object(volume_type_id):
+ if volume_type_id is None:
+ return False
+
+ volume_type = get_volume_type(context.get_admin_context(),
+ volume_type_id)
+
+ return is_vsa_drive(volume_type_id, volume_type) or\
+ is_vsa_volume(volume_type_id, volume_type)
diff --git a/nova/vsa/__init__.py b/nova/vsa/__init__.py
new file mode 100644
index 000000000..09162e006
--- /dev/null
+++ b/nova/vsa/__init__.py
@@ -0,0 +1,18 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright (c) 2011 Zadara Storage Inc.
+# Copyright (c) 2011 OpenStack LLC.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from nova.vsa.api import API
diff --git a/nova/vsa/api.py b/nova/vsa/api.py
new file mode 100644
index 000000000..18cf13705
--- /dev/null
+++ b/nova/vsa/api.py
@@ -0,0 +1,411 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright (c) 2011 Zadara Storage Inc.
+# Copyright (c) 2011 OpenStack LLC.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""
+Handles all requests relating to Virtual Storage Arrays (VSAs).
+
+Experimental code. Requires special VSA image.
+For assistance and guidelines pls contact
+ Zadara Storage Inc & Openstack community
+"""
+
+import sys
+
+from nova import compute
+from nova import db
+from nova import exception
+from nova import flags
+from nova import log as logging
+from nova import rpc
+from nova import volume
+from nova.compute import instance_types
+from nova.db import base
+from nova.volume import volume_types
+
+
+class VsaState:
+ CREATING = 'creating' # VSA creating (not ready yet)
+ LAUNCHING = 'launching' # Launching VCs (all BE volumes were created)
+ CREATED = 'created' # VSA fully created and ready for use
+ PARTIAL = 'partial' # Some BE drives were allocated
+ FAILED = 'failed' # Some BE storage allocations failed
+ DELETING = 'deleting' # VSA started the deletion procedure
+
+
+FLAGS = flags.FLAGS
+flags.DEFINE_string('vsa_ec2_access_key', None,
+ 'EC2 access key used by VSA for accessing nova')
+flags.DEFINE_string('vsa_ec2_user_id', None,
+ 'User ID used by VSA for accessing nova')
+flags.DEFINE_boolean('vsa_multi_vol_creation', True,
+ 'Ask scheduler to create multiple volumes in one call')
+flags.DEFINE_string('vsa_volume_type_name', 'VSA volume type',
+ 'Name of volume type associated with FE VSA volumes')
+
+LOG = logging.getLogger('nova.vsa')
+
+
+class API(base.Base):
+ """API for interacting with the VSA manager."""
+
+ def __init__(self, compute_api=None, volume_api=None, **kwargs):
+ self.compute_api = compute_api or compute.API()
+ self.volume_api = volume_api or volume.API()
+ super(API, self).__init__(**kwargs)
+
+ def _check_volume_type_correctness(self, vol_type):
+ if vol_type.get('extra_specs') == None or\
+ vol_type['extra_specs'].get('type') != 'vsa_drive' or\
+ vol_type['extra_specs'].get('drive_type') == None or\
+ vol_type['extra_specs'].get('drive_size') == None:
+
+ raise exception.ApiError(_("Invalid drive type %s")
+ % vol_type['name'])
+
+ def _get_default_vsa_instance_type(self):
+ return instance_types.get_instance_type_by_name(
+ FLAGS.default_vsa_instance_type)
+
+ def _check_storage_parameters(self, context, vsa_name, storage,
+ shared, first_index=0):
+ """
+ Translates storage array of disks to the list of volumes
+ :param storage: List of dictionaries with following keys:
+ disk_name, num_disks, size
+ :param shared: Specifies if storage is dedicated or shared.
+ For shared storage disks split into partitions
+ """
+ volume_params = []
+ for node in storage:
+
+ name = node.get('drive_name', None)
+ num_disks = node.get('num_drives', 1)
+
+ if name is None:
+ raise exception.ApiError(_("No drive_name param found in %s")
+ % node)
+ try:
+ vol_type = volume_types.get_volume_type_by_name(context, name)
+ except exception.NotFound:
+ raise exception.ApiError(_("Invalid drive type name %s")
+ % name)
+
+ self._check_volume_type_correctness(vol_type)
+
+ # if size field present - override disk size specified in DB
+ size = int(node.get('size',
+ vol_type['extra_specs'].get('drive_size')))
+
+ if shared:
+ part_size = FLAGS.vsa_part_size_gb
+ total_capacity = num_disks * size
+ num_volumes = total_capacity / part_size
+ size = part_size
+ else:
+ num_volumes = num_disks
+ size = 0 # special handling for full drives
+
+ for i in range(num_volumes):
+ volume_name = "drive-%03d" % first_index
+ first_index += 1
+ volume_desc = 'BE volume for VSA %s type %s' % \
+ (vsa_name, name)
+ volume = {
+ 'size': size,
+ 'name': volume_name,
+ 'description': volume_desc,
+ 'volume_type_id': vol_type['id'],
+ }
+ volume_params.append(volume)
+
+ return volume_params
+
+ def create(self, context, display_name='', display_description='',
+ vc_count=1, instance_type=None, image_name=None,
+ availability_zone=None, storage=[], shared=None):
+ """
+ Provision VSA instance with corresponding compute instances
+ and associated volumes
+ :param storage: List of dictionaries with following keys:
+ disk_name, num_disks, size
+ :param shared: Specifies if storage is dedicated or shared.
+ For shared storage disks split into partitions
+ """
+
+ LOG.info(_("*** Experimental VSA code ***"))
+
+ if vc_count > FLAGS.max_vcs_in_vsa:
+ LOG.warning(_("Requested number of VCs (%d) is too high."\
+ " Setting to default"), vc_count)
+ vc_count = FLAGS.max_vcs_in_vsa
+
+ if instance_type is None:
+ instance_type = self._get_default_vsa_instance_type()
+
+ if availability_zone is None:
+ availability_zone = FLAGS.storage_availability_zone
+
+ if storage is None:
+ storage = []
+
+ if shared is None or shared == 'False' or shared == False:
+ shared = False
+ else:
+ shared = True
+
+ # check if image is ready before starting any work
+ if image_name is None:
+ image_name = FLAGS.vc_image_name
+ try:
+ image_service = self.compute_api.image_service
+ vc_image = image_service.show_by_name(context, image_name)
+ vc_image_href = vc_image['id']
+ except exception.ImageNotFound:
+ raise exception.ApiError(_("Failed to find configured image %s")
+ % image_name)
+
+ options = {
+ 'display_name': display_name,
+ 'display_description': display_description,
+ 'project_id': context.project_id,
+ 'availability_zone': availability_zone,
+ 'instance_type_id': instance_type['id'],
+ 'image_ref': vc_image_href,
+ 'vc_count': vc_count,
+ 'status': VsaState.CREATING,
+ }
+ LOG.info(_("Creating VSA: %s") % options)
+
+ # create DB entry for VSA instance
+ try:
+ vsa_ref = self.db.vsa_create(context, options)
+ except exception.Error:
+ raise exception.ApiError(_(sys.exc_info()[1]))
+ vsa_id = vsa_ref['id']
+ vsa_name = vsa_ref['name']
+
+ # check storage parameters
+ try:
+ volume_params = self._check_storage_parameters(context, vsa_name,
+ storage, shared)
+ except exception.ApiError:
+ self.db.vsa_destroy(context, vsa_id)
+ raise exception.ApiError(_("Error in storage parameters: %s")
+ % storage)
+
+ # after creating DB entry, re-check and set some defaults
+ updates = {}
+ if (not hasattr(vsa_ref, 'display_name') or
+ vsa_ref.display_name is None or
+ vsa_ref.display_name == ''):
+ updates['display_name'] = display_name = vsa_name
+ updates['vol_count'] = len(volume_params)
+ vsa_ref = self.update(context, vsa_id, **updates)
+
+ # create volumes
+ if FLAGS.vsa_multi_vol_creation:
+ if len(volume_params) > 0:
+ request_spec = {
+ 'num_volumes': len(volume_params),
+ 'vsa_id': str(vsa_id),
+ 'volumes': volume_params,
+ }
+
+ rpc.cast(context,
+ FLAGS.scheduler_topic,
+ {"method": "create_volumes",
+ "args": {"topic": FLAGS.volume_topic,
+ "request_spec": request_spec,
+ "availability_zone": availability_zone}})
+ else:
+ # create BE volumes one-by-one
+ for vol in volume_params:
+ try:
+ vol_name = vol['name']
+ vol_size = vol['size']
+ vol_type_id = vol['volume_type_id']
+ LOG.debug(_("VSA ID %(vsa_id)d %(vsa_name)s: Create "\
+ "volume %(vol_name)s, %(vol_size)d GB, "\
+ "type %(vol_type_id)s"), locals())
+
+ vol_type = volume_types.get_volume_type(context,
+ vol['volume_type_id'])
+
+ vol_ref = self.volume_api.create(context,
+ vol_size,
+ None,
+ vol_name,
+ vol['description'],
+ volume_type=vol_type,
+ metadata=dict(to_vsa_id=str(vsa_id)),
+ availability_zone=availability_zone)
+ except:
+ self.update_vsa_status(context, vsa_id,
+ status=VsaState.PARTIAL)
+ raise
+
+ if len(volume_params) == 0:
+ # No BE volumes - ask VSA manager to start VCs
+ rpc.cast(context,
+ FLAGS.vsa_topic,
+ {"method": "create_vsa",
+ "args": {"vsa_id": str(vsa_id)}})
+
+ return vsa_ref
+
+ def update_vsa_status(self, context, vsa_id, status):
+ updates = dict(status=status)
+ LOG.info(_("VSA ID %(vsa_id)d: Update VSA status to %(status)s"),
+ locals())
+ return self.update(context, vsa_id, **updates)
+
+ def update(self, context, vsa_id, **kwargs):
+ """Updates the VSA instance in the datastore.
+
+ :param context: The security context
+ :param vsa_id: ID of the VSA instance to update
+ :param kwargs: All additional keyword args are treated
+ as data fields of the instance to be
+ updated
+
+ :returns: None
+ """
+ LOG.info(_("VSA ID %(vsa_id)d: Update VSA call"), locals())
+
+ updatable_fields = ['status', 'vc_count', 'vol_count',
+ 'display_name', 'display_description']
+ changes = {}
+ for field in updatable_fields:
+ if field in kwargs:
+ changes[field] = kwargs[field]
+
+ vc_count = kwargs.get('vc_count', None)
+ if vc_count is not None:
+ # VP-TODO: This request may want to update number of VCs
+ # Get number of current VCs and add/delete VCs appropriately
+ vsa = self.get(context, vsa_id)
+ vc_count = int(vc_count)
+ if vc_count > FLAGS.max_vcs_in_vsa:
+ LOG.warning(_("Requested number of VCs (%d) is too high."\
+ " Setting to default"), vc_count)
+ vc_count = FLAGS.max_vcs_in_vsa
+
+ if vsa['vc_count'] != vc_count:
+ self.update_num_vcs(context, vsa, vc_count)
+ changes['vc_count'] = vc_count
+
+ return self.db.vsa_update(context, vsa_id, changes)
+
+ def update_num_vcs(self, context, vsa, vc_count):
+ vsa_name = vsa['name']
+ old_vc_count = int(vsa['vc_count'])
+ if vc_count > old_vc_count:
+ add_cnt = vc_count - old_vc_count
+ LOG.debug(_("Adding %(add_cnt)s VCs to VSA %(vsa_name)s."),
+ locals())
+ # VP-TODO: actual code for adding new VCs
+
+ elif vc_count < old_vc_count:
+ del_cnt = old_vc_count - vc_count
+ LOG.debug(_("Deleting %(del_cnt)s VCs from VSA %(vsa_name)s."),
+ locals())
+ # VP-TODO: actual code for deleting extra VCs
+
+ def _force_volume_delete(self, ctxt, volume):
+ """Delete a volume, bypassing the check that it must be available."""
+ host = volume['host']
+ if not host:
+ # Deleting volume from database and skipping rpc.
+ self.db.volume_destroy(ctxt, volume['id'])
+ return
+
+ rpc.cast(ctxt,
+ self.db.queue_get_for(ctxt, FLAGS.volume_topic, host),
+ {"method": "delete_volume",
+ "args": {"volume_id": volume['id']}})
+
+ def delete_vsa_volumes(self, context, vsa_id, direction,
+ force_delete=True):
+ if direction == "FE":
+ volumes = self.get_all_vsa_volumes(context, vsa_id)
+ else:
+ volumes = self.get_all_vsa_drives(context, vsa_id)
+
+ for volume in volumes:
+ try:
+ vol_name = volume['name']
+ LOG.info(_("VSA ID %(vsa_id)s: Deleting %(direction)s "\
+ "volume %(vol_name)s"), locals())
+ self.volume_api.delete(context, volume['id'])
+ except exception.ApiError:
+ LOG.info(_("Unable to delete volume %s"), volume['name'])
+ if force_delete:
+ LOG.info(_("VSA ID %(vsa_id)s: Forced delete. "\
+ "%(direction)s volume %(vol_name)s"), locals())
+ self._force_volume_delete(context, volume)
+
+ def delete(self, context, vsa_id):
+ """Terminate a VSA instance."""
+ LOG.info(_("Going to try to terminate VSA ID %s"), vsa_id)
+
+ # Delete all FrontEnd and BackEnd volumes
+ self.delete_vsa_volumes(context, vsa_id, "FE", force_delete=True)
+ self.delete_vsa_volumes(context, vsa_id, "BE", force_delete=True)
+
+ # Delete all VC instances
+ instances = self.compute_api.get_all(context,
+ search_opts={'metadata': dict(vsa_id=str(vsa_id))})
+ for instance in instances:
+ name = instance['name']
+ LOG.debug(_("VSA ID %(vsa_id)s: Delete instance %(name)s"),
+ locals())
+ self.compute_api.delete(context, instance['id'])
+
+ # Delete VSA instance
+ self.db.vsa_destroy(context, vsa_id)
+
+ def get(self, context, vsa_id):
+ rv = self.db.vsa_get(context, vsa_id)
+ return rv
+
+ def get_all(self, context):
+ if context.is_admin:
+ return self.db.vsa_get_all(context)
+ return self.db.vsa_get_all_by_project(context, context.project_id)
+
+ def get_vsa_volume_type(self, context):
+ name = FLAGS.vsa_volume_type_name
+ try:
+ vol_type = volume_types.get_volume_type_by_name(context, name)
+ except exception.NotFound:
+ volume_types.create(context, name,
+ extra_specs=dict(type='vsa_volume'))
+ vol_type = volume_types.get_volume_type_by_name(context, name)
+
+ return vol_type
+
+ def get_all_vsa_instances(self, context, vsa_id):
+ return self.compute_api.get_all(context,
+ search_opts={'metadata': dict(vsa_id=str(vsa_id))})
+
+ def get_all_vsa_volumes(self, context, vsa_id):
+ return self.volume_api.get_all(context,
+ search_opts={'metadata': dict(from_vsa_id=str(vsa_id))})
+
+ def get_all_vsa_drives(self, context, vsa_id):
+ return self.volume_api.get_all(context,
+ search_opts={'metadata': dict(to_vsa_id=str(vsa_id))})
diff --git a/nova/vsa/connection.py b/nova/vsa/connection.py
new file mode 100644
index 000000000..8ac8a1dd5
--- /dev/null
+++ b/nova/vsa/connection.py
@@ -0,0 +1,25 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright (c) 2011 Zadara Storage Inc.
+# Copyright (c) 2011 OpenStack LLC.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""Abstraction of the underlying connection to VC."""
+
+from nova.vsa import fake
+
+
+def get_connection():
+ # Return an object that is able to talk to VCs
+ return fake.FakeVcConnection()
diff --git a/nova/vsa/fake.py b/nova/vsa/fake.py
new file mode 100644
index 000000000..d4248ca01
--- /dev/null
+++ b/nova/vsa/fake.py
@@ -0,0 +1,22 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright (c) 2011 Zadara Storage Inc.
+# Copyright (c) 2011 OpenStack LLC.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+
+class FakeVcConnection(object):
+
+ def init_host(self, host):
+ pass
diff --git a/nova/vsa/manager.py b/nova/vsa/manager.py
new file mode 100644
index 000000000..d4c414106
--- /dev/null
+++ b/nova/vsa/manager.py
@@ -0,0 +1,179 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright (c) 2011 Zadara Storage Inc.
+# Copyright (c) 2011 OpenStack LLC.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""
+Handles all processes relating to Virtual Storage Arrays (VSA).
+
+**Related Flags**
+
+"""
+
+from nova import compute
+from nova import exception
+from nova import flags
+from nova import log as logging
+from nova import manager
+from nova import volume
+from nova import vsa
+from nova import utils
+from nova.compute import instance_types
+from nova.vsa import utils as vsa_utils
+from nova.vsa.api import VsaState
+
+FLAGS = flags.FLAGS
+flags.DEFINE_string('vsa_driver', 'nova.vsa.connection.get_connection',
+ 'Driver to use for controlling VSAs')
+
+LOG = logging.getLogger('nova.vsa.manager')
+
+
+class VsaManager(manager.SchedulerDependentManager):
+ """Manages Virtual Storage Arrays (VSAs)."""
+
+ def __init__(self, vsa_driver=None, *args, **kwargs):
+ if not vsa_driver:
+ vsa_driver = FLAGS.vsa_driver
+ self.driver = utils.import_object(vsa_driver)
+ self.compute_manager = utils.import_object(FLAGS.compute_manager)
+
+ self.compute_api = compute.API()
+ self.volume_api = volume.API()
+ self.vsa_api = vsa.API()
+
+ if FLAGS.vsa_ec2_user_id is None or \
+ FLAGS.vsa_ec2_access_key is None:
+ raise exception.VSANovaAccessParamNotFound()
+
+ super(VsaManager, self).__init__(*args, **kwargs)
+
+ def init_host(self):
+ self.driver.init_host(host=self.host)
+ super(VsaManager, self).init_host()
+
+ @exception.wrap_exception()
+ def create_vsa(self, context, vsa_id):
+ """Called by API if there were no BE volumes assigned"""
+ LOG.debug(_("Create call received for VSA %s"), vsa_id)
+
+ vsa_id = int(vsa_id) # just in case
+
+ try:
+ vsa = self.vsa_api.get(context, vsa_id)
+ except Exception as ex:
+ msg = _("Failed to find VSA %(vsa_id)d") % locals()
+ LOG.exception(msg)
+ return
+
+ return self._start_vcs(context, vsa)
+
+ @exception.wrap_exception()
+ def vsa_volume_created(self, context, vol_id, vsa_id, status):
+ """Callback for volume creations"""
+ LOG.debug(_("VSA ID %(vsa_id)s: Drive %(vol_id)s created. "\
+ "Status %(status)s"), locals())
+ vsa_id = int(vsa_id) # just in case
+
+ # Get all volumes for this VSA
+ # check if any of them still in creating phase
+ drives = self.vsa_api.get_all_vsa_drives(context, vsa_id)
+ for drive in drives:
+ if drive['status'] == 'creating':
+ vol_name = drive['name']
+ vol_disp_name = drive['display_name']
+ LOG.debug(_("Drive %(vol_name)s (%(vol_disp_name)s) still "\
+ "in creating phase - wait"), locals())
+ return
+
+ try:
+ vsa = self.vsa_api.get(context, vsa_id)
+ except Exception as ex:
+ msg = _("Failed to find VSA %(vsa_id)d") % locals()
+ LOG.exception(msg)
+ return
+
+ if len(drives) != vsa['vol_count']:
+ cvol_real = len(drives)
+ cvol_exp = vsa['vol_count']
+ LOG.debug(_("VSA ID %(vsa_id)d: Not all volumes are created "\
+ "(%(cvol_real)d of %(cvol_exp)d)"), locals())
+ return
+
+ # all volumes created (successfully or not)
+ return self._start_vcs(context, vsa, drives)
+
+ def _start_vcs(self, context, vsa, drives=[]):
+ """Start VCs for VSA """
+
+ vsa_id = vsa['id']
+ if vsa['status'] == VsaState.CREATING:
+ self.vsa_api.update_vsa_status(context, vsa_id,
+ VsaState.LAUNCHING)
+ else:
+ return
+
+ # in _separate_ loop go over all volumes and mark as "attached"
+ has_failed_volumes = False
+ for drive in drives:
+ vol_name = drive['name']
+ vol_disp_name = drive['display_name']
+ status = drive['status']
+ LOG.info(_("VSA ID %(vsa_id)d: Drive %(vol_name)s "\
+ "(%(vol_disp_name)s) is in %(status)s state"),
+ locals())
+ if status == 'available':
+ try:
+ # self.volume_api.update(context, volume['id'],
+ # dict(attach_status="attached"))
+ pass
+ except Exception as ex:
+ msg = _("Failed to update attach status for volume "
+ "%(vol_name)s. %(ex)s") % locals()
+ LOG.exception(msg)
+ else:
+ has_failed_volumes = True
+
+ if has_failed_volumes:
+ LOG.info(_("VSA ID %(vsa_id)d: Delete all BE volumes"), locals())
+ self.vsa_api.delete_vsa_volumes(context, vsa_id, "BE", True)
+ self.vsa_api.update_vsa_status(context, vsa_id,
+ VsaState.FAILED)
+ return
+
+ # create user-data record for VC
+ storage_data = vsa_utils.generate_user_data(vsa, drives)
+
+ instance_type = instance_types.get_instance_type(
+ vsa['instance_type_id'])
+
+ # now start the VC instance
+
+ vc_count = vsa['vc_count']
+ LOG.info(_("VSA ID %(vsa_id)d: Start %(vc_count)d instances"),
+ locals())
+ vc_instances = self.compute_api.create(context,
+ instance_type, # vsa['vsa_instance_type'],
+ vsa['image_ref'],
+ min_count=1,
+ max_count=vc_count,
+ display_name='vc-' + vsa['display_name'],
+ display_description='VC for VSA ' + vsa['display_name'],
+ availability_zone=vsa['availability_zone'],
+ user_data=storage_data,
+ metadata=dict(vsa_id=str(vsa_id)))
+
+ self.vsa_api.update_vsa_status(context, vsa_id,
+ VsaState.CREATED)
diff --git a/nova/vsa/utils.py b/nova/vsa/utils.py
new file mode 100644
index 000000000..1de341ac5
--- /dev/null
+++ b/nova/vsa/utils.py
@@ -0,0 +1,80 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright (c) 2011 Zadara Storage Inc.
+# Copyright (c) 2011 OpenStack LLC.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import base64
+from xml.etree import ElementTree
+
+from nova import flags
+
+FLAGS = flags.FLAGS
+
+
+def generate_user_data(vsa, volumes):
+ SubElement = ElementTree.SubElement
+
+ e_vsa = ElementTree.Element("vsa")
+
+ e_vsa_detail = SubElement(e_vsa, "id")
+ e_vsa_detail.text = str(vsa['id'])
+ e_vsa_detail = SubElement(e_vsa, "name")
+ e_vsa_detail.text = vsa['display_name']
+ e_vsa_detail = SubElement(e_vsa, "description")
+ e_vsa_detail.text = vsa['display_description']
+ e_vsa_detail = SubElement(e_vsa, "vc_count")
+ e_vsa_detail.text = str(vsa['vc_count'])
+
+ e_vsa_detail = SubElement(e_vsa, "auth_user")
+ e_vsa_detail.text = FLAGS.vsa_ec2_user_id
+ e_vsa_detail = SubElement(e_vsa, "auth_access_key")
+ e_vsa_detail.text = FLAGS.vsa_ec2_access_key
+
+ e_volumes = SubElement(e_vsa, "volumes")
+ for volume in volumes:
+
+ loc = volume['provider_location']
+ if loc is None:
+ ip = ''
+ iscsi_iqn = ''
+ iscsi_portal = ''
+ else:
+ (iscsi_target, _sep, iscsi_iqn) = loc.partition(" ")
+ (ip, iscsi_portal) = iscsi_target.split(":", 1)
+
+ e_vol = SubElement(e_volumes, "volume")
+ e_vol_detail = SubElement(e_vol, "id")
+ e_vol_detail.text = str(volume['id'])
+ e_vol_detail = SubElement(e_vol, "name")
+ e_vol_detail.text = volume['name']
+ e_vol_detail = SubElement(e_vol, "display_name")
+ e_vol_detail.text = volume['display_name']
+ e_vol_detail = SubElement(e_vol, "size_gb")
+ e_vol_detail.text = str(volume['size'])
+ e_vol_detail = SubElement(e_vol, "status")
+ e_vol_detail.text = volume['status']
+ e_vol_detail = SubElement(e_vol, "ip")
+ e_vol_detail.text = ip
+ e_vol_detail = SubElement(e_vol, "iscsi_iqn")
+ e_vol_detail.text = iscsi_iqn
+ e_vol_detail = SubElement(e_vol, "iscsi_portal")
+ e_vol_detail.text = iscsi_portal
+ e_vol_detail = SubElement(e_vol, "lun")
+ e_vol_detail.text = '0'
+ e_vol_detail = SubElement(e_vol, "sn_host")
+ e_vol_detail.text = volume['host']
+
+ _xml = ElementTree.tostring(e_vsa)
+ return base64.b64encode(_xml)
diff --git a/po/ast.po b/po/ast.po
index 449cddb07..48682ec90 100644
--- a/po/ast.po
+++ b/po/ast.po
@@ -14,8 +14,8 @@ msgstr ""
"MIME-Version: 1.0\n"
"Content-Type: text/plain; charset=UTF-8\n"
"Content-Transfer-Encoding: 8bit\n"
-"X-Launchpad-Export-Date: 2011-07-23 05:11+0000\n"
-"X-Generator: Launchpad (build 13405)\n"
+"X-Launchpad-Export-Date: 2011-08-03 04:43+0000\n"
+"X-Generator: Launchpad (build 13573)\n"
#: ../nova/scheduler/chance.py:37 ../nova/scheduler/zone.py:55
#: ../nova/scheduler/simple.py:75 ../nova/scheduler/simple.py:110
diff --git a/po/cs.po b/po/cs.po
index 2dc763838..07bdf1928 100644
--- a/po/cs.po
+++ b/po/cs.po
@@ -14,8 +14,8 @@ msgstr ""
"MIME-Version: 1.0\n"
"Content-Type: text/plain; charset=UTF-8\n"
"Content-Transfer-Encoding: 8bit\n"
-"X-Launchpad-Export-Date: 2011-07-23 05:11+0000\n"
-"X-Generator: Launchpad (build 13405)\n"
+"X-Launchpad-Export-Date: 2011-08-03 04:43+0000\n"
+"X-Generator: Launchpad (build 13573)\n"
#: ../nova/scheduler/chance.py:37 ../nova/scheduler/zone.py:55
#: ../nova/scheduler/simple.py:75 ../nova/scheduler/simple.py:110
diff --git a/po/da.po b/po/da.po
index 570629119..0b379c9d7 100644
--- a/po/da.po
+++ b/po/da.po
@@ -14,8 +14,8 @@ msgstr ""
"MIME-Version: 1.0\n"
"Content-Type: text/plain; charset=UTF-8\n"
"Content-Transfer-Encoding: 8bit\n"
-"X-Launchpad-Export-Date: 2011-07-23 05:12+0000\n"
-"X-Generator: Launchpad (build 13405)\n"
+"X-Launchpad-Export-Date: 2011-08-03 04:43+0000\n"
+"X-Generator: Launchpad (build 13573)\n"
#: ../nova/scheduler/chance.py:37 ../nova/scheduler/zone.py:55
#: ../nova/scheduler/simple.py:75 ../nova/scheduler/simple.py:110
diff --git a/po/de.po b/po/de.po
index 772ae236c..1f652c373 100644
--- a/po/de.po
+++ b/po/de.po
@@ -14,8 +14,8 @@ msgstr ""
"MIME-Version: 1.0\n"
"Content-Type: text/plain; charset=UTF-8\n"
"Content-Transfer-Encoding: 8bit\n"
-"X-Launchpad-Export-Date: 2011-07-23 05:12+0000\n"
-"X-Generator: Launchpad (build 13405)\n"
+"X-Launchpad-Export-Date: 2011-08-03 04:44+0000\n"
+"X-Generator: Launchpad (build 13573)\n"
#: ../nova/scheduler/chance.py:37 ../nova/scheduler/zone.py:55
#: ../nova/scheduler/simple.py:75 ../nova/scheduler/simple.py:110
@@ -2833,3 +2833,21 @@ msgstr ""
#~ msgid "Data store %s is unreachable. Trying again in %d seconds."
#~ msgstr ""
#~ "Datastore %s ist nicht erreichbar. Versuche es erneut in %d Sekunden."
+
+#~ msgid "Full set of FLAGS:"
+#~ msgstr "Alle vorhandenen FLAGS:"
+
+#, python-format
+#~ msgid "pidfile %s does not exist. Daemon not running?\n"
+#~ msgstr "PID-Datei %s existiert nicht. Läuft der Daemon nicht?\n"
+
+#, python-format
+#~ msgid "Starting %s"
+#~ msgstr "%s wird gestartet"
+
+#~ msgid "No such process"
+#~ msgstr "Kein passender Prozess gefunden"
+
+#, python-format
+#~ msgid "Serving %s"
+#~ msgstr "Bedient %s"
diff --git a/po/en_AU.po b/po/en_AU.po
index 3fa62c006..a51b9ff2d 100644
--- a/po/en_AU.po
+++ b/po/en_AU.po
@@ -14,8 +14,8 @@ msgstr ""
"MIME-Version: 1.0\n"
"Content-Type: text/plain; charset=UTF-8\n"
"Content-Transfer-Encoding: 8bit\n"
-"X-Launchpad-Export-Date: 2011-07-23 05:12+0000\n"
-"X-Generator: Launchpad (build 13405)\n"
+"X-Launchpad-Export-Date: 2011-08-03 04:44+0000\n"
+"X-Generator: Launchpad (build 13573)\n"
#: ../nova/scheduler/chance.py:37 ../nova/scheduler/zone.py:55
#: ../nova/scheduler/simple.py:75 ../nova/scheduler/simple.py:110
diff --git a/po/en_GB.po b/po/en_GB.po
index b204c93a1..59247f4fa 100644
--- a/po/en_GB.po
+++ b/po/en_GB.po
@@ -14,8 +14,8 @@ msgstr ""
"MIME-Version: 1.0\n"
"Content-Type: text/plain; charset=UTF-8\n"
"Content-Transfer-Encoding: 8bit\n"
-"X-Launchpad-Export-Date: 2011-07-23 05:12+0000\n"
-"X-Generator: Launchpad (build 13405)\n"
+"X-Launchpad-Export-Date: 2011-08-03 04:44+0000\n"
+"X-Generator: Launchpad (build 13573)\n"
#: ../nova/scheduler/chance.py:37 ../nova/scheduler/zone.py:55
#: ../nova/scheduler/simple.py:75 ../nova/scheduler/simple.py:110
@@ -2812,3 +2812,24 @@ msgstr ""
#, python-format
msgid "Removing user %(user)s from project %(project)s"
msgstr ""
+
+#~ msgid "Wrong number of arguments."
+#~ msgstr "Wrong number of arguments."
+
+#~ msgid "No such process"
+#~ msgstr "No such process"
+
+#~ msgid "Full set of FLAGS:"
+#~ msgstr "Full set of FLAGS:"
+
+#, python-format
+#~ msgid "pidfile %s does not exist. Daemon not running?\n"
+#~ msgstr "pidfile %s does not exist. Daemon not running?\n"
+
+#, python-format
+#~ msgid "Starting %s"
+#~ msgstr "Starting %s"
+
+#, python-format
+#~ msgid "Serving %s"
+#~ msgstr "Serving %s"
diff --git a/po/es.po b/po/es.po
index f97434041..7371eae8c 100644
--- a/po/es.po
+++ b/po/es.po
@@ -8,20 +8,20 @@ msgstr ""
"Project-Id-Version: nova\n"
"Report-Msgid-Bugs-To: FULL NAME <EMAIL@ADDRESS>\n"
"POT-Creation-Date: 2011-02-21 10:03-0500\n"
-"PO-Revision-Date: 2011-06-30 16:42+0000\n"
-"Last-Translator: David Caro <Unknown>\n"
+"PO-Revision-Date: 2011-08-01 03:23+0000\n"
+"Last-Translator: Juan Alfredo Salas Santillana <Unknown>\n"
"Language-Team: Spanish <es@li.org>\n"
"MIME-Version: 1.0\n"
"Content-Type: text/plain; charset=UTF-8\n"
"Content-Transfer-Encoding: 8bit\n"
-"X-Launchpad-Export-Date: 2011-07-23 05:12+0000\n"
-"X-Generator: Launchpad (build 13405)\n"
+"X-Launchpad-Export-Date: 2011-08-03 04:44+0000\n"
+"X-Generator: Launchpad (build 13573)\n"
#: ../nova/scheduler/chance.py:37 ../nova/scheduler/zone.py:55
#: ../nova/scheduler/simple.py:75 ../nova/scheduler/simple.py:110
#: ../nova/scheduler/simple.py:122
msgid "No hosts found"
-msgstr "No se han encontrado hosts"
+msgstr "No se encontro anfitriones."
#: ../nova/exception.py:33
msgid "Unexpected error while running command."
@@ -2566,7 +2566,7 @@ msgstr ""
#: ../nova/auth/manager.py:289
#, python-format
msgid "User %(uid)s is not a member of project %(pjid)s"
-msgstr ""
+msgstr "El usuario %(uid)s no es miembro del proyecto %(pjid)s"
#: ../nova/auth/manager.py:298 ../nova/auth/manager.py:309
#, python-format
@@ -2584,7 +2584,7 @@ msgstr "Debes especificar un proyecto"
#: ../nova/auth/manager.py:414
#, python-format
msgid "The %s role can not be found"
-msgstr "El rol %s no se ha podido encontrar"
+msgstr ""
#: ../nova/auth/manager.py:416
#, python-format
@@ -2614,27 +2614,27 @@ msgstr ""
#: ../nova/auth/manager.py:515
#, python-format
msgid "Created project %(name)s with manager %(manager_user)s"
-msgstr ""
+msgstr "Creado el proyecto %(name)s con administrador %(manager_user)s"
#: ../nova/auth/manager.py:533
#, python-format
msgid "modifying project %s"
-msgstr "modificando proyecto %s"
+msgstr "Modificando proyecto %s"
#: ../nova/auth/manager.py:545
#, python-format
msgid "Adding user %(uid)s to project %(pid)s"
-msgstr ""
+msgstr "Agregando usuario %(uid)s para el proyecto %(pid)s"
#: ../nova/auth/manager.py:566
#, python-format
msgid "Remove user %(uid)s from project %(pid)s"
-msgstr ""
+msgstr "Borrar usuario %(uid)s del proyecto %(pid)s"
#: ../nova/auth/manager.py:592
#, python-format
msgid "Deleting project %s"
-msgstr "Eliminando proyecto %s"
+msgstr "Borrando proyecto %s"
#: ../nova/auth/manager.py:650
#, python-format
@@ -2644,7 +2644,7 @@ msgstr ""
#: ../nova/auth/manager.py:659
#, python-format
msgid "Deleting user %s"
-msgstr "Eliminando usuario %s"
+msgstr "Borrando usuario %s"
#: ../nova/auth/manager.py:669
#, python-format
@@ -2710,7 +2710,7 @@ msgstr ""
#: ../nova/auth/ldapdriver.py:478
#, python-format
msgid "Group can't be created because user %s doesn't exist"
-msgstr ""
+msgstr "El grupo no se puede crear porque el usuario %s no existe"
#: ../nova/auth/ldapdriver.py:495
#, python-format
@@ -2730,18 +2730,20 @@ msgstr ""
#: ../nova/auth/ldapdriver.py:513
#, python-format
msgid "User %(uid)s is already a member of the group %(group_dn)s"
-msgstr ""
+msgstr "El usuario %(uid)s es actualmente miembro del grupo %(group_dn)s"
#: ../nova/auth/ldapdriver.py:524
#, python-format
msgid ""
"User %s can't be removed from the group because the user doesn't exist"
msgstr ""
+"El usuario %s no se pudo borrar de el grupo a causa de que el usuario no "
+"existe"
#: ../nova/auth/ldapdriver.py:528
#, python-format
msgid "User %s is not a member of the group"
-msgstr ""
+msgstr "El usuario %s no es miembro de el grupo"
#: ../nova/auth/ldapdriver.py:542
#, python-format
@@ -2878,6 +2880,10 @@ msgstr "Eliminando el usuario %(user)s del proyecto %(project)s"
#~ "El almacen de datos %s es inalcanzable. Reintentandolo en %d segundos."
#, python-format
+#~ msgid "Serving %s"
+#~ msgstr "Sirviendo %s"
+
+#, python-format
#~ msgid "Couldn't get IP, using 127.0.0.1 %s"
#~ msgstr "No puedo obtener IP, usando 127.0.0.1 %s"
@@ -3037,11 +3043,25 @@ msgstr "Eliminando el usuario %(user)s del proyecto %(project)s"
#~ msgid "Detach volume %s from mountpoint %s on instance %s"
#~ msgstr "Desvinculando volumen %s del punto de montaje %s en la instancia %s"
+#~ msgid "unexpected exception getting connection"
+#~ msgstr "excepción inexperada al obtener la conexión"
+
+#~ msgid "unexpected error during update"
+#~ msgstr "error inesperado durante la actualización"
+
#, python-format
#~ msgid "Cannot get blockstats for \"%s\" on \"%s\""
#~ msgstr "No puedo obtener estadísticas del bloque para \"%s\" en \"%s\""
#, python-format
+#~ msgid "updating %s..."
+#~ msgstr "actualizando %s..."
+
+#, python-format
+#~ msgid "Found instance: %s"
+#~ msgstr "Encontrada interfaz: %s"
+
+#, python-format
#~ msgid "Cannot get ifstats for \"%s\" on \"%s\""
#~ msgstr "No puedo obtener estadísticas de la interfaz para \"%s\" en \"%s\""
@@ -3319,3 +3339,20 @@ msgstr "Eliminando el usuario %(user)s del proyecto %(project)s"
#, python-format
#~ msgid "Spawning VM %s created %s."
#~ msgstr "Iniciando VM %s creado %s."
+
+#~ msgid "No such process"
+#~ msgstr "No existe el proceso"
+
+#~ msgid "Full set of FLAGS:"
+#~ msgstr "Conjunto completo de opciones (FLAGS):"
+
+#~ msgid "Wrong number of arguments."
+#~ msgstr "Cantidad de argumentos incorrecta"
+
+#, python-format
+#~ msgid "pidfile %s does not exist. Daemon not running?\n"
+#~ msgstr "El \"pidfile\" %s no existe. Quizás el servicio no este corriendo.\n"
+
+#, python-format
+#~ msgid "Starting %s"
+#~ msgstr "Iniciando %s"
diff --git a/po/fr.po b/po/fr.po
index 83e4e7af0..7cb298a94 100644
--- a/po/fr.po
+++ b/po/fr.po
@@ -14,8 +14,8 @@ msgstr ""
"MIME-Version: 1.0\n"
"Content-Type: text/plain; charset=UTF-8\n"
"Content-Transfer-Encoding: 8bit\n"
-"X-Launchpad-Export-Date: 2011-07-23 05:12+0000\n"
-"X-Generator: Launchpad (build 13405)\n"
+"X-Launchpad-Export-Date: 2011-08-03 04:43+0000\n"
+"X-Generator: Launchpad (build 13573)\n"
#: ../nova/scheduler/chance.py:37 ../nova/scheduler/zone.py:55
#: ../nova/scheduler/simple.py:75 ../nova/scheduler/simple.py:110
@@ -2929,3 +2929,51 @@ msgstr "Ajout de l'utilisateur %(user)s au projet %(project)s"
#, python-format
msgid "Removing user %(user)s from project %(project)s"
msgstr "Suppression de l'utilisateur %(user)s du projet %(project)s"
+
+#~ msgid "Wrong number of arguments."
+#~ msgstr "Nombre d'arguments incorrect."
+
+#~ msgid "No such process"
+#~ msgstr "Aucun processus de ce type"
+
+#, python-format
+#~ msgid "Starting %s"
+#~ msgstr "Démarrage de %s"
+
+#~ msgid "Full set of FLAGS:"
+#~ msgstr "Ensemble de propriétés complet :"
+
+#, python-format
+#~ msgid "pidfile %s does not exist. Daemon not running?\n"
+#~ msgstr ""
+#~ "Le fichier pid %s n'existe pas. Est-ce que le processus est en cours "
+#~ "d'exécution ?\n"
+
+#, python-format
+#~ msgid "Serving %s"
+#~ msgstr "En train de servir %s"
+
+#, python-format
+#~ msgid "Cannot get blockstats for \"%(disk)s\" on \"%(iid)s\""
+#~ msgstr "Ne peut pas récupérer blockstats pour \"%(disk)s\" sur \"%(iid)s\""
+
+#, python-format
+#~ msgid "Cannot get ifstats for \"%(interface)s\" on \"%(iid)s\""
+#~ msgstr "Ne peut pas récupérer ifstats pour \"%(interface)s\" sur \"%(iid)s\""
+
+#~ msgid "unexpected error during update"
+#~ msgstr "erreur inopinée pendant la ise à jour"
+
+#, python-format
+#~ msgid "updating %s..."
+#~ msgstr "mise à jour %s..."
+
+#, python-format
+#~ msgid "Found instance: %s"
+#~ msgstr "Instance trouvée : %s"
+
+#~ msgid "unexpected exception getting connection"
+#~ msgstr "erreur inopinée pendant la connexion"
+
+#~ msgid "Starting instance monitor"
+#~ msgstr "Démarrage du superviseur d'instance"
diff --git a/po/it.po b/po/it.po
index 6bfcf1274..e166297f1 100644
--- a/po/it.po
+++ b/po/it.po
@@ -8,14 +8,14 @@ msgstr ""
"Project-Id-Version: nova\n"
"Report-Msgid-Bugs-To: FULL NAME <EMAIL@ADDRESS>\n"
"POT-Creation-Date: 2011-02-21 10:03-0500\n"
-"PO-Revision-Date: 2011-02-22 19:34+0000\n"
-"Last-Translator: Armando Migliaccio <Unknown>\n"
+"PO-Revision-Date: 2011-08-21 22:50+0000\n"
+"Last-Translator: Guido Davide Dall'Olio <Unknown>\n"
"Language-Team: Italian <it@li.org>\n"
"MIME-Version: 1.0\n"
"Content-Type: text/plain; charset=UTF-8\n"
"Content-Transfer-Encoding: 8bit\n"
-"X-Launchpad-Export-Date: 2011-07-23 05:12+0000\n"
-"X-Generator: Launchpad (build 13405)\n"
+"X-Launchpad-Export-Date: 2011-08-22 04:48+0000\n"
+"X-Generator: Launchpad (build 13697)\n"
#: ../nova/scheduler/chance.py:37 ../nova/scheduler/zone.py:55
#: ../nova/scheduler/simple.py:75 ../nova/scheduler/simple.py:110
@@ -449,24 +449,24 @@ msgstr ""
#: ../nova/scheduler/simple.py:53
#, python-format
msgid "Host %s is not alive"
-msgstr ""
+msgstr "L'host %s non è attivo"
#: ../nova/scheduler/simple.py:65
msgid "All hosts have too many cores"
-msgstr ""
+msgstr "Gli host hanno troppi core"
#: ../nova/scheduler/simple.py:87
#, python-format
msgid "Host %s not available"
-msgstr ""
+msgstr "Host %s non disponibile"
#: ../nova/scheduler/simple.py:99
msgid "All hosts have too many gigabytes"
-msgstr ""
+msgstr "Gli Host hanno troppy gigabyte"
#: ../nova/scheduler/simple.py:119
msgid "All hosts have too many networks"
-msgstr ""
+msgstr "Gli host hanno troppe reti"
#: ../nova/volume/manager.py:85
#, python-format
@@ -496,7 +496,7 @@ msgstr ""
#: ../nova/volume/manager.py:123
#, python-format
msgid "volume %s: created successfully"
-msgstr ""
+msgstr "volume %s: creato con successo"
#: ../nova/volume/manager.py:131
msgid "Volume is still attached"
@@ -514,12 +514,12 @@ msgstr ""
#: ../nova/volume/manager.py:138
#, python-format
msgid "volume %s: deleting"
-msgstr ""
+msgstr "volume %s: rimuovendo"
#: ../nova/volume/manager.py:147
#, python-format
msgid "volume %s: deleted successfully"
-msgstr ""
+msgstr "volume %s: rimosso con successo"
#: ../nova/virt/xenapi/fake.py:74
#, python-format
@@ -529,7 +529,7 @@ msgstr ""
#: ../nova/virt/xenapi/fake.py:304 ../nova/virt/xenapi/fake.py:404
#: ../nova/virt/xenapi/fake.py:422 ../nova/virt/xenapi/fake.py:478
msgid "Raising NotImplemented"
-msgstr ""
+msgstr "Sollevando NotImplemented"
#: ../nova/virt/xenapi/fake.py:306
#, python-format
@@ -539,7 +539,7 @@ msgstr ""
#: ../nova/virt/xenapi/fake.py:341
#, python-format
msgid "Calling %(localname)s %(impl)s"
-msgstr ""
+msgstr "Chiamando %(localname)s %(impl)s"
#: ../nova/virt/xenapi/fake.py:346
#, python-format
@@ -564,17 +564,17 @@ msgstr ""
#: ../nova/virt/connection.py:73
msgid "Failed to open connection to the hypervisor"
-msgstr ""
+msgstr "Fallita l'apertura della connessione verso l'hypervisor"
#: ../nova/network/linux_net.py:187
#, python-format
msgid "Starting VLAN inteface %s"
-msgstr ""
+msgstr "Avviando l'interfaccia VLAN %s"
#: ../nova/network/linux_net.py:208
#, python-format
msgid "Starting Bridge interface for %s"
-msgstr ""
+msgstr "Avviando l'interfaccia Bridge per %s"
#. pylint: disable=W0703
#: ../nova/network/linux_net.py:314
@@ -632,7 +632,7 @@ msgstr "Il risultato é %s"
#: ../nova/utils.py:159
#, python-format
msgid "Running cmd (SSH): %s"
-msgstr ""
+msgstr "Eseguendo cmd (SSH): %s"
#: ../nova/utils.py:217
#, python-format
@@ -642,7 +642,7 @@ msgstr "debug in callback: %s"
#: ../nova/utils.py:222
#, python-format
msgid "Running %s"
-msgstr ""
+msgstr "Eseguendo %s"
#: ../nova/utils.py:262
#, python-format
@@ -697,12 +697,12 @@ msgstr ""
#: ../nova/virt/xenapi/vm_utils.py:135 ../nova/virt/hyperv.py:171
#, python-format
msgid "Created VM %s..."
-msgstr ""
+msgstr "Creata VM %s.."
#: ../nova/virt/xenapi/vm_utils.py:138
#, python-format
msgid "Created VM %(instance_name)s as %(vm_ref)s."
-msgstr ""
+msgstr "Creata VM %(instance_name)s come %(vm_ref)s"
#: ../nova/virt/xenapi/vm_utils.py:168
#, python-format
@@ -771,7 +771,7 @@ msgstr ""
#: ../nova/virt/xenapi/vm_utils.py:332
#, python-format
msgid "Glance image %s"
-msgstr ""
+msgstr "Immagine Glance %s"
#. we need to invoke a plugin for copying VDI's
#. content into proper path
@@ -783,7 +783,7 @@ msgstr ""
#: ../nova/virt/xenapi/vm_utils.py:352
#, python-format
msgid "Kernel/Ramdisk VDI %s destroyed"
-msgstr ""
+msgstr "Kernel/Ramdisk VDI %s distrutti"
#: ../nova/virt/xenapi/vm_utils.py:361
#, python-format
@@ -793,7 +793,7 @@ msgstr ""
#: ../nova/virt/xenapi/vm_utils.py:386 ../nova/virt/xenapi/vm_utils.py:402
#, python-format
msgid "Looking up vdi %s for PV kernel"
-msgstr ""
+msgstr "Cercando vdi %s per kernel PV"
#: ../nova/virt/xenapi/vm_utils.py:397
#, python-format
@@ -2802,37 +2802,24 @@ msgstr ""
msgid "Removing user %(user)s from project %(project)s"
msgstr ""
-#, python-format
-#~ msgid ""
-#~ "%s\n"
-#~ "Command: %s\n"
-#~ "Exit code: %s\n"
-#~ "Stdout: %r\n"
-#~ "Stderr: %r"
-#~ msgstr ""
-#~ "%s\n"
-#~ "Comando: %s\n"
-#~ "Exit code: %s\n"
-#~ "Stdout: %r\n"
-#~ "Stderr: %r"
-
-#, python-format
-#~ msgid "(%s) publish (key: %s) %s"
-#~ msgstr "(%s) pubblica (chiave: %s) %s"
+#~ msgid "Full set of FLAGS:"
+#~ msgstr "Insieme di FLAGS:"
#, python-format
-#~ msgid "AMQP server on %s:%d is unreachable. Trying again in %d seconds."
+#~ msgid "pidfile %s does not exist. Daemon not running?\n"
#~ msgstr ""
-#~ "Il server AMQP su %s:%d non é raggiungibile. Riprovare in %d secondi."
+#~ "Il pidfile %s non esiste. Assicurarsi che il demone é in esecuzione.\n"
#, python-format
-#~ msgid "Binding %s to %s with key %s"
-#~ msgstr "Collegando %s a %s con la chiave %s"
+#~ msgid "Starting %s"
+#~ msgstr "Avvio di %s"
#, python-format
-#~ msgid "Starting %s node"
-#~ msgstr "Avviando il nodo %s"
+#~ msgid "Serving %s"
+#~ msgstr "Servire %s"
-#, python-format
-#~ msgid "Data store %s is unreachable. Trying again in %d seconds."
-#~ msgstr "Datastore %s é irrangiungibile. Riprovare in %d seconds."
+#~ msgid "Wrong number of arguments."
+#~ msgstr "Numero errato di argomenti"
+
+#~ msgid "No such process"
+#~ msgstr "Nessun processo trovato"
diff --git a/po/ja.po b/po/ja.po
index a7906ede8..179302b55 100644
--- a/po/ja.po
+++ b/po/ja.po
@@ -14,8 +14,8 @@ msgstr ""
"MIME-Version: 1.0\n"
"Content-Type: text/plain; charset=UTF-8\n"
"Content-Transfer-Encoding: 8bit\n"
-"X-Launchpad-Export-Date: 2011-07-23 05:12+0000\n"
-"X-Generator: Launchpad (build 13405)\n"
+"X-Launchpad-Export-Date: 2011-08-03 04:44+0000\n"
+"X-Generator: Launchpad (build 13573)\n"
#: ../nova/scheduler/chance.py:37 ../nova/scheduler/zone.py:55
#: ../nova/scheduler/simple.py:75 ../nova/scheduler/simple.py:110
@@ -2879,6 +2879,17 @@ msgstr "ユーザ %(user)s をプロジェクト %(project)s から削除しま
#~ msgstr "データストア %s に接続できません。 %d 秒後に再接続します。"
#, python-format
+#~ msgid "Serving %s"
+#~ msgstr "%s サービスの開始"
+
+#~ msgid "Full set of FLAGS:"
+#~ msgstr "FLAGSの一覧:"
+
+#, python-format
+#~ msgid "pidfile %s does not exist. Daemon not running?\n"
+#~ msgstr "pidfile %s が存在しません。デーモンは実行中ですか?\n"
+
+#, python-format
#~ msgid "Couldn't get IP, using 127.0.0.1 %s"
#~ msgstr "IPを取得できません。127.0.0.1 を %s として使います。"
@@ -3039,6 +3050,13 @@ msgstr "ユーザ %(user)s をプロジェクト %(project)s から削除しま
#~ msgstr "Detach volume: ボリューム %s をマウントポイント %s (インスタンス%s)からデタッチします。"
#, python-format
+#~ msgid "updating %s..."
+#~ msgstr "%s の情報の更新…"
+
+#~ msgid "unexpected error during update"
+#~ msgstr "更新の最中に予期しないエラーが発生しました。"
+
+#, python-format
#~ msgid "Cannot get blockstats for \"%s\" on \"%s\""
#~ msgstr "ブロックデバイス \"%s\" の統計を \"%s\" について取得できません。"
@@ -3046,6 +3064,13 @@ msgstr "ユーザ %(user)s をプロジェクト %(project)s から削除しま
#~ msgid "Cannot get ifstats for \"%s\" on \"%s\""
#~ msgstr "インタフェース \"%s\" の統計を \"%s\" について取得できません。"
+#~ msgid "unexpected exception getting connection"
+#~ msgstr "接続に際し予期しないエラーが発生しました。"
+
+#, python-format
+#~ msgid "Found instance: %s"
+#~ msgstr "インスタンス %s が見つかりました。"
+
#, python-format
#~ msgid "No service for %s, %s"
#~ msgstr "%s, %s のserviceが存在しません。"
@@ -3318,3 +3343,24 @@ msgstr "ユーザ %(user)s をプロジェクト %(project)s から削除しま
#, python-format
#~ msgid "volume %s: creating lv of size %sG"
#~ msgstr "ボリューム%sの%sGのlv (論理ボリューム) を作成します。"
+
+#~ msgid "Wrong number of arguments."
+#~ msgstr "引数の数が異なります。"
+
+#~ msgid "No such process"
+#~ msgstr "そのようなプロセスはありません"
+
+#, python-format
+#~ msgid "Cannot get blockstats for \"%(disk)s\" on \"%(iid)s\""
+#~ msgstr "\"%(iid)s\" 上の \"%(disk)s\" 用のブロック統計(blockstats)が取得できません"
+
+#, python-format
+#~ msgid "Cannot get ifstats for \"%(interface)s\" on \"%(iid)s\""
+#~ msgstr "\"%(iid)s\" 上の %(interface)s\" 用インターフェース統計(ifstats)が取得できません"
+
+#~ msgid "Starting instance monitor"
+#~ msgstr "インスタンスモニタを開始しています"
+
+#, python-format
+#~ msgid "Starting %s"
+#~ msgstr "%s を起動中"
diff --git a/po/pt_BR.po b/po/pt_BR.po
index b3aefce44..d6d57a9b1 100644
--- a/po/pt_BR.po
+++ b/po/pt_BR.po
@@ -8,14 +8,14 @@ msgstr ""
"Project-Id-Version: nova\n"
"Report-Msgid-Bugs-To: FULL NAME <EMAIL@ADDRESS>\n"
"POT-Creation-Date: 2011-02-21 10:03-0500\n"
-"PO-Revision-Date: 2011-03-24 14:51+0000\n"
+"PO-Revision-Date: 2011-07-25 17:40+0000\n"
"Last-Translator: msinhore <msinhore@gmail.com>\n"
"Language-Team: Brazilian Portuguese <pt_BR@li.org>\n"
"MIME-Version: 1.0\n"
"Content-Type: text/plain; charset=UTF-8\n"
"Content-Transfer-Encoding: 8bit\n"
-"X-Launchpad-Export-Date: 2011-07-23 05:12+0000\n"
-"X-Generator: Launchpad (build 13405)\n"
+"X-Launchpad-Export-Date: 2011-08-03 04:44+0000\n"
+"X-Generator: Launchpad (build 13573)\n"
#: ../nova/scheduler/chance.py:37 ../nova/scheduler/zone.py:55
#: ../nova/scheduler/simple.py:75 ../nova/scheduler/simple.py:110
@@ -36,6 +36,11 @@ msgid ""
"Stdout: %(stdout)r\n"
"Stderr: %(stderr)r"
msgstr ""
+"%(description)s\n"
+"Comando: %(cmd)s\n"
+"Código de saída: %(exit_code)s\n"
+"Saída padrão: %(stdout)r\n"
+"Erro: %(stderr)r"
#: ../nova/exception.py:107
msgid "DB exception wrapped"
@@ -392,7 +397,7 @@ msgstr "instância %s: suspendendo"
#: ../nova/compute/manager.py:472
#, python-format
msgid "instance %s: resuming"
-msgstr ""
+msgstr "instância %s: resumindo"
#: ../nova/compute/manager.py:491
#, python-format
@@ -407,12 +412,12 @@ msgstr "instância %s: desbloqueando"
#: ../nova/compute/manager.py:513
#, python-format
msgid "instance %s: getting locked state"
-msgstr ""
+msgstr "instância %s: obtendo estado de bloqueio"
#: ../nova/compute/manager.py:526
#, python-format
msgid "instance %s: reset network"
-msgstr ""
+msgstr "instância %s: reset da rede"
#: ../nova/compute/manager.py:535 ../nova/api/ec2/cloud.py:515
#, python-format
@@ -429,6 +434,7 @@ msgstr "instância %s: obtendo console ajax"
msgid ""
"instance %(instance_id)s: attaching volume %(volume_id)s to %(mountpoint)s"
msgstr ""
+"instância %(instance_id)s: atachando volume %(volume_id)s para %(mountpoint)s"
#. pylint: disable=W0702
#. NOTE(vish): The inline callback eats the exception info so we
@@ -438,6 +444,8 @@ msgstr ""
#, python-format
msgid "instance %(instance_id)s: attach failed %(mountpoint)s, removing"
msgstr ""
+"instância %(instance_id)s: falha ao atachar ponto de montagem "
+"%(mountpoint)s, removendo"
#: ../nova/compute/manager.py:585
#, python-format
@@ -458,7 +466,7 @@ msgstr "Host %s não está ativo"
#: ../nova/scheduler/simple.py:65
msgid "All hosts have too many cores"
-msgstr ""
+msgstr "Todos os hosts tem muitos núcleos de CPU"
#: ../nova/scheduler/simple.py:87
#, python-format
@@ -783,7 +791,7 @@ msgstr "Tamanho da imagem %(image)s:%(virtual_size)d"
#: ../nova/virt/xenapi/vm_utils.py:332
#, python-format
msgid "Glance image %s"
-msgstr ""
+msgstr "Visão geral da imagem %s"
#. we need to invoke a plugin for copying VDI's
#. content into proper path
@@ -815,7 +823,7 @@ msgstr "Kernel PV no VDI: %s"
#: ../nova/virt/xenapi/vm_utils.py:405
#, python-format
msgid "Running pygrub against %s"
-msgstr ""
+msgstr "Rodando pygrub novamente %s"
#: ../nova/virt/xenapi/vm_utils.py:411
#, python-format
@@ -849,12 +857,12 @@ msgstr "(VM_UTILS) xenapi power_state -> |%s|"
#: ../nova/virt/xenapi/vm_utils.py:525
#, python-format
msgid "VHD %(vdi_uuid)s has parent %(parent_ref)s"
-msgstr ""
+msgstr "O VHD %(vdi_uuid)s tem pai %(parent_ref)s"
#: ../nova/virt/xenapi/vm_utils.py:542
#, python-format
msgid "Re-scanning SR %s"
-msgstr ""
+msgstr "Re-escaneando SR %s"
#: ../nova/virt/xenapi/vm_utils.py:567
#, python-format
@@ -2857,6 +2865,17 @@ msgstr ""
#~ "Repositório de dados %s não pode ser atingido. Tentando novamente em %d "
#~ "segundos."
+#~ msgid "Full set of FLAGS:"
+#~ msgstr "Conjunto completo de FLAGS:"
+
+#, python-format
+#~ msgid "Starting %s"
+#~ msgstr "Iniciando %s"
+
+#, python-format
+#~ msgid "Serving %s"
+#~ msgstr "Servindo %s"
+
#, python-format
#~ msgid "Couldn't get IP, using 127.0.0.1 %s"
#~ msgstr "Não foi possível obter IP, usando 127.0.0.1 %s"
@@ -2965,3 +2984,14 @@ msgstr ""
#, python-format
#~ msgid "Created user %s (admin: %r)"
#~ msgstr "Criado usuário %s (administrador: %r)"
+
+#~ msgid "No such process"
+#~ msgstr "Processo inexistente"
+
+#, python-format
+#~ msgid "pidfile %s does not exist. Daemon not running?\n"
+#~ msgstr ""
+#~ "Arquivo do id do processo (pidfile) %s não existe. O Daemon está parado?\n"
+
+#~ msgid "Wrong number of arguments."
+#~ msgstr "Número errado de argumentos."
diff --git a/po/ru.po b/po/ru.po
index 1bf672fc3..746db964a 100644
--- a/po/ru.po
+++ b/po/ru.po
@@ -14,8 +14,8 @@ msgstr ""
"MIME-Version: 1.0\n"
"Content-Type: text/plain; charset=UTF-8\n"
"Content-Transfer-Encoding: 8bit\n"
-"X-Launchpad-Export-Date: 2011-07-23 05:12+0000\n"
-"X-Generator: Launchpad (build 13405)\n"
+"X-Launchpad-Export-Date: 2011-08-03 04:44+0000\n"
+"X-Generator: Launchpad (build 13573)\n"
#: ../nova/scheduler/chance.py:37 ../nova/scheduler/zone.py:55
#: ../nova/scheduler/simple.py:75 ../nova/scheduler/simple.py:110
@@ -2790,6 +2790,10 @@ msgid "Removing user %(user)s from project %(project)s"
msgstr ""
#, python-format
+#~ msgid "Starting %s"
+#~ msgstr "Запускается %s"
+
+#, python-format
#~ msgid "arg: %s\t\tval: %s"
#~ msgstr "arg: %s\t\tval: %s"
@@ -2841,6 +2845,13 @@ msgstr ""
#~ msgid "Adding role %s to user %s in project %s"
#~ msgstr "Добавление роли %s для пользователя %s в проект %s"
+#~ msgid "unexpected error during update"
+#~ msgstr "неожиданная ошибка во время обновления"
+
+#, python-format
+#~ msgid "updating %s..."
+#~ msgstr "обновление %s..."
+
#, python-format
#~ msgid "Getting object: %s / %s"
#~ msgstr "Получение объекта: %s / %s"
@@ -2892,6 +2903,10 @@ msgstr ""
#~ msgstr "Не удалось получить IP, используем 127.0.0.1 %s"
#, python-format
+#~ msgid "pidfile %s does not exist. Daemon not running?\n"
+#~ msgstr "pidfile %s не обнаружен. Демон не запущен?\n"
+
+#, python-format
#~ msgid "Getting from %s: %s"
#~ msgstr "Получение из %s: %s"
@@ -2906,3 +2921,6 @@ msgstr ""
#, python-format
#~ msgid "Authenticated Request For %s:%s)"
#~ msgstr "Запрос аутентификации для %s:%s)"
+
+#~ msgid "Wrong number of arguments."
+#~ msgstr "Неверное число аргументов."
diff --git a/po/tl.po b/po/tl.po
index 1ae59330b..84e9d26e6 100644
--- a/po/tl.po
+++ b/po/tl.po
@@ -14,8 +14,8 @@ msgstr ""
"MIME-Version: 1.0\n"
"Content-Type: text/plain; charset=UTF-8\n"
"Content-Transfer-Encoding: 8bit\n"
-"X-Launchpad-Export-Date: 2011-07-23 05:12+0000\n"
-"X-Generator: Launchpad (build 13405)\n"
+"X-Launchpad-Export-Date: 2011-08-03 04:44+0000\n"
+"X-Generator: Launchpad (build 13573)\n"
#: ../nova/scheduler/chance.py:37 ../nova/scheduler/zone.py:55
#: ../nova/scheduler/simple.py:75 ../nova/scheduler/simple.py:110
diff --git a/po/uk.po b/po/uk.po
index 481851e1c..bcc53fed3 100644
--- a/po/uk.po
+++ b/po/uk.po
@@ -14,8 +14,8 @@ msgstr ""
"MIME-Version: 1.0\n"
"Content-Type: text/plain; charset=UTF-8\n"
"Content-Transfer-Encoding: 8bit\n"
-"X-Launchpad-Export-Date: 2011-07-23 05:12+0000\n"
-"X-Generator: Launchpad (build 13405)\n"
+"X-Launchpad-Export-Date: 2011-08-03 04:44+0000\n"
+"X-Generator: Launchpad (build 13573)\n"
#: ../nova/scheduler/chance.py:37 ../nova/scheduler/zone.py:55
#: ../nova/scheduler/simple.py:75 ../nova/scheduler/simple.py:110
@@ -2793,6 +2793,14 @@ msgstr ""
#~ msgstr "AMQP сервер %s:%d недоступний. Спроба під'єднання через %d секунд."
#, python-format
+#~ msgid "Starting %s"
+#~ msgstr "Запускається %s"
+
+#, python-format
+#~ msgid "Serving %s"
+#~ msgstr "Обслуговування %s"
+
+#, python-format
#~ msgid "Couldn't get IP, using 127.0.0.1 %s"
#~ msgstr "Не вдалось отримати IP, використовуючи 127.0.0.1 %s"
diff --git a/po/zh_CN.po b/po/zh_CN.po
index d0ddcd2f7..6284ee46c 100644
--- a/po/zh_CN.po
+++ b/po/zh_CN.po
@@ -8,14 +8,18 @@ msgstr ""
"Project-Id-Version: nova\n"
"Report-Msgid-Bugs-To: FULL NAME <EMAIL@ADDRESS>\n"
"POT-Creation-Date: 2011-02-21 10:03-0500\n"
-"PO-Revision-Date: 2011-06-14 14:44+0000\n"
-"Last-Translator: chong <Unknown>\n"
+"PO-Revision-Date: 2011-08-19 09:26+0000\n"
+"Last-Translator: zhangjunfeng <Unknown>\n"
"Language-Team: Chinese (Simplified) <zh_CN@li.org>\n"
"MIME-Version: 1.0\n"
"Content-Type: text/plain; charset=UTF-8\n"
"Content-Transfer-Encoding: 8bit\n"
-"X-Launchpad-Export-Date: 2011-07-23 05:12+0000\n"
-"X-Generator: Launchpad (build 13405)\n"
+"X-Launchpad-Export-Date: 2011-08-20 05:06+0000\n"
+"X-Generator: Launchpad (build 13697)\n"
+
+#, python-format
+#~ msgid "Starting %s"
+#~ msgstr "启动 %s 中"
#: ../nova/scheduler/chance.py:37 ../nova/scheduler/zone.py:55
#: ../nova/scheduler/simple.py:75 ../nova/scheduler/simple.py:110
@@ -44,7 +48,7 @@ msgstr ""
#: ../nova/exception.py:107
msgid "DB exception wrapped"
-msgstr ""
+msgstr "数据库异常"
#. exc_type, exc_value, exc_traceback = sys.exc_info()
#: ../nova/exception.py:120
@@ -84,7 +88,7 @@ msgstr "获取外网IP失败"
#: ../nova/api/openstack/servers.py:152
#, python-format
msgid "%(param)s property not found for image %(_image_id)s"
-msgstr ""
+msgstr "没有找到镜像文件%(_image_id)s 的属性 %(param)s"
#: ../nova/api/openstack/servers.py:168
msgid "No keypairs defined"
@@ -93,55 +97,55 @@ msgstr "未定义密钥对"
#: ../nova/api/openstack/servers.py:238
#, python-format
msgid "Compute.api::lock %s"
-msgstr ""
+msgstr "compute.api::加锁 %s"
#: ../nova/api/openstack/servers.py:253
#, python-format
msgid "Compute.api::unlock %s"
-msgstr ""
+msgstr "compute.api::解锁 %s"
#: ../nova/api/openstack/servers.py:267
#, python-format
msgid "Compute.api::get_lock %s"
-msgstr ""
+msgstr "Compute.api::得到锁 %s"
#: ../nova/api/openstack/servers.py:281
#, python-format
msgid "Compute.api::reset_network %s"
-msgstr ""
+msgstr "Compute.api::重置网络 %s"
#: ../nova/api/openstack/servers.py:292
#, python-format
msgid "Compute.api::pause %s"
-msgstr ""
+msgstr "Compute.api::暂停 %s"
#: ../nova/api/openstack/servers.py:303
#, python-format
msgid "Compute.api::unpause %s"
-msgstr ""
+msgstr "Compute.api::继续 %s"
#: ../nova/api/openstack/servers.py:314
#, python-format
msgid "compute.api::suspend %s"
-msgstr ""
+msgstr "compute.api::挂起 %s"
#: ../nova/api/openstack/servers.py:325
#, python-format
msgid "compute.api::resume %s"
-msgstr ""
+msgstr "compute.api::回复 %s"
#: ../nova/virt/xenapi/volumeops.py:48 ../nova/virt/xenapi/volumeops.py:101
#: ../nova/db/sqlalchemy/api.py:731 ../nova/virt/libvirt_conn.py:741
#: ../nova/api/ec2/__init__.py:317
#, python-format
msgid "Instance %s not found"
-msgstr ""
+msgstr "实例 %s 没有找到"
#. NOTE: No Resource Pool concept so far
#: ../nova/virt/xenapi/volumeops.py:51
#, python-format
msgid "Attach_volume: %(instance_name)s, %(device_path)s, %(mountpoint)s"
-msgstr ""
+msgstr "挂载卷:%(instance_name)s, %(device_path)s, %(mountpoint)s"
#: ../nova/virt/xenapi/volumeops.py:69
#, python-format
@@ -2666,12 +2670,12 @@ msgstr "用户 %s 不存在"
#: ../nova/auth/ldapdriver.py:472
#, python-format
msgid "Group can't be created because group %s already exists"
-msgstr ""
+msgstr "组不能被创建,因为组 %s 已经存在"
#: ../nova/auth/ldapdriver.py:478
#, python-format
msgid "Group can't be created because user %s doesn't exist"
-msgstr ""
+msgstr "组不能被创建,因为用户 %s 不存在"
#: ../nova/auth/ldapdriver.py:495
#, python-format
@@ -2686,50 +2690,50 @@ msgstr ""
#: ../nova/auth/ldapdriver.py:510 ../nova/auth/ldapdriver.py:521
#, python-format
msgid "The group at dn %s doesn't exist"
-msgstr ""
+msgstr "识别名为 %s 的组不存在"
#: ../nova/auth/ldapdriver.py:513
#, python-format
msgid "User %(uid)s is already a member of the group %(group_dn)s"
-msgstr ""
+msgstr "用户 %(uid)s 已经是 组 %(group_dn)s 中的成员"
#: ../nova/auth/ldapdriver.py:524
#, python-format
msgid ""
"User %s can't be removed from the group because the user doesn't exist"
-msgstr ""
+msgstr "用户 %s 不能从组中删除,因为这个用户不存在"
#: ../nova/auth/ldapdriver.py:528
#, python-format
msgid "User %s is not a member of the group"
-msgstr ""
+msgstr "用户 %s 不是这个组的成员"
#: ../nova/auth/ldapdriver.py:542
#, python-format
msgid ""
"Attempted to remove the last member of a group. Deleting the group at %s "
"instead."
-msgstr ""
+msgstr "尝试删除组中最后一个成员,用删除组 %s 来代替。"
#: ../nova/auth/ldapdriver.py:549
#, python-format
msgid "User %s can't be removed from all because the user doesn't exist"
-msgstr ""
+msgstr "用户 %s 不能从系统中删除,因为这个用户不存在"
#: ../nova/auth/ldapdriver.py:564
#, python-format
msgid "Group at dn %s doesn't exist"
-msgstr ""
+msgstr "可识别名为 %s 的组不存在"
#: ../nova/virt/xenapi/network_utils.py:40
#, python-format
msgid "Found non-unique network for bridge %s"
-msgstr ""
+msgstr "发现网桥 %s 的网络不唯一"
#: ../nova/virt/xenapi/network_utils.py:43
#, python-format
msgid "Found no network for bridge %s"
-msgstr ""
+msgstr "发现网桥 %s 没有网络"
#: ../nova/api/ec2/admin.py:97
#, python-format
@@ -2744,22 +2748,22 @@ msgstr "删除用户: %s"
#: ../nova/api/ec2/admin.py:127
#, python-format
msgid "Adding role %(role)s to user %(user)s for project %(project)s"
-msgstr ""
+msgstr "添加角色 %(role)s 给项目 %(project)s 中的用户 %(user)s"
#: ../nova/api/ec2/admin.py:131
#, python-format
msgid "Adding sitewide role %(role)s to user %(user)s"
-msgstr ""
+msgstr "给用户 %(user)s 添加站点角色 %(role)s"
#: ../nova/api/ec2/admin.py:137
#, python-format
msgid "Removing role %(role)s from user %(user)s for project %(project)s"
-msgstr ""
+msgstr "删除项目 %(project)s中用户 %(user)s的角色 %(role)s"
#: ../nova/api/ec2/admin.py:141
#, python-format
msgid "Removing sitewide role %(role)s from user %(user)s"
-msgstr ""
+msgstr "删除用户 %(user)s 的站点角色 %(role)s"
#: ../nova/api/ec2/admin.py:146 ../nova/api/ec2/admin.py:223
msgid "operation must be add or remove"
@@ -2768,22 +2772,22 @@ msgstr "操作必须为添加或删除"
#: ../nova/api/ec2/admin.py:159
#, python-format
msgid "Getting x509 for user: %(name)s on project: %(project)s"
-msgstr ""
+msgstr "获得用户: %(name)s 在项目 :%(project)s中的x509"
#: ../nova/api/ec2/admin.py:177
#, python-format
msgid "Create project %(name)s managed by %(manager_user)s"
-msgstr ""
+msgstr "创建被%(manager_user)s 管理的项目 %(name)s"
#: ../nova/api/ec2/admin.py:190
#, python-format
msgid "Modify project: %(name)s managed by %(manager_user)s"
-msgstr ""
+msgstr "更改被 %(manager_user)s 管理的项目: %(name)s"
#: ../nova/api/ec2/admin.py:200
#, python-format
msgid "Delete project: %s"
-msgstr "删除工程 %s"
+msgstr ""
#: ../nova/api/ec2/admin.py:214
#, python-format
@@ -2795,94 +2799,19 @@ msgstr "添加用户 %(user)s 到项目 %(project)s 中"
msgid "Removing user %(user)s from project %(project)s"
msgstr "从项目 %(project)s 中移除用户 %(user)s"
-#, python-format
-#~ msgid ""
-#~ "%s\n"
-#~ "Command: %s\n"
-#~ "Exit code: %s\n"
-#~ "Stdout: %r\n"
-#~ "Stderr: %r"
-#~ msgstr ""
-#~ "%s\n"
-#~ "命令:%s\n"
-#~ "退出代码:%s\n"
-#~ "标准输出(stdout):%r\n"
-#~ "标准错误(stderr):%r"
+#~ msgid "Full set of FLAGS:"
+#~ msgstr "FLAGS全集:"
-#, python-format
-#~ msgid "Binding %s to %s with key %s"
-#~ msgstr "将%s绑定到%s(以%s键值)"
+#~ msgid "No such process"
+#~ msgstr "没有该进程"
#, python-format
-#~ msgid "AMQP server on %s:%d is unreachable. Trying again in %d seconds."
-#~ msgstr "位于%s:%d的AMQP服务器不可用。%d秒后重试。"
+#~ msgid "Serving %s"
+#~ msgstr "正在为 %s 服务"
#, python-format
-#~ msgid "Getting from %s: %s"
-#~ msgstr "从%s获得如下内容:%s"
+#~ msgid "pidfile %s does not exist. Daemon not running?\n"
+#~ msgstr "pidfile %s 不存在,守护进程是否运行?\n"
-#, python-format
-#~ msgid "Starting %s node"
-#~ msgstr "启动%s节点"
-
-#, python-format
-#~ msgid "Data store %s is unreachable. Trying again in %d seconds."
-#~ msgstr "数据储存服务%s不可用。%d秒之后继续尝试。"
-
-#, python-format
-#~ msgid "(%s) publish (key: %s) %s"
-#~ msgstr "(%s)发布(键值:%s)%s"
-
-#, python-format
-#~ msgid "Couldn't get IP, using 127.0.0.1 %s"
-#~ msgstr "不能获取IP,将使用 127.0.0.1 %s"
-
-#, python-format
-#~ msgid ""
-#~ "Access key %s has had %d failed authentications and will be locked out for "
-#~ "%d minutes."
-#~ msgstr "访问键 %s时,存在%d个失败的认证,将于%d分钟后解锁"
-
-#, python-format
-#~ msgid "Authenticated Request For %s:%s)"
-#~ msgstr "为%s:%s申请认证"
-
-#, python-format
-#~ msgid "arg: %s\t\tval: %s"
-#~ msgstr "键为: %s\t\t值为: %s"
-
-#, python-format
-#~ msgid "Getting x509 for user: %s on project: %s"
-#~ msgstr "为用户 %s从工程%s中获取 x509"
-
-#, python-format
-#~ msgid "Create project %s managed by %s"
-#~ msgstr "创建工程%s,此工程由%s管理"
-
-#, python-format
-#~ msgid "Unsupported API request: controller = %s,action = %s"
-#~ msgstr "不支持的API请求: 控制器 = %s,执行 = %s"
-
-#, python-format
-#~ msgid "Adding sitewide role %s to user %s"
-#~ msgstr "增加站点范围的 %s角色给用户 %s"
-
-#, python-format
-#~ msgid "Adding user %s to project %s"
-#~ msgstr "增加用户%s到%s工程"
-
-#, python-format
-#~ msgid "Unauthorized request for controller=%s and action=%s"
-#~ msgstr "对控制器=%s及动作=%s未经授权"
-
-#, python-format
-#~ msgid "Removing user %s from project %s"
-#~ msgstr "正将用户%s从工程%s中移除"
-
-#, python-format
-#~ msgid "Adding role %s to user %s for project %s"
-#~ msgstr "正将%s角色赋予用户%s(在工程%s中)"
-
-#, python-format
-#~ msgid "Removing role %s from user %s for project %s"
-#~ msgstr "正将角色%s从用户%s在工程%s中移除"
+#~ msgid "Wrong number of arguments."
+#~ msgstr "错误参数个数。"
diff --git a/po/zh_TW.po b/po/zh_TW.po
index 896e69618..a5a826aa0 100644
--- a/po/zh_TW.po
+++ b/po/zh_TW.po
@@ -14,8 +14,8 @@ msgstr ""
"MIME-Version: 1.0\n"
"Content-Type: text/plain; charset=UTF-8\n"
"Content-Transfer-Encoding: 8bit\n"
-"X-Launchpad-Export-Date: 2011-07-23 05:12+0000\n"
-"X-Generator: Launchpad (build 13405)\n"
+"X-Launchpad-Export-Date: 2011-08-03 04:44+0000\n"
+"X-Generator: Launchpad (build 13573)\n"
#: ../nova/scheduler/chance.py:37 ../nova/scheduler/zone.py:55
#: ../nova/scheduler/simple.py:75 ../nova/scheduler/simple.py:110
@@ -2787,3 +2787,14 @@ msgstr ""
#, python-format
msgid "Removing user %(user)s from project %(project)s"
msgstr ""
+
+#~ msgid "No such process"
+#~ msgstr "沒有此一程序"
+
+#, python-format
+#~ msgid "pidfile %s does not exist. Daemon not running?\n"
+#~ msgstr "pidfile %s 不存在. Daemon未啟動?\n"
+
+#, python-format
+#~ msgid "Starting %s"
+#~ msgstr "正在啟動 %s"
diff --git a/run_tests.sh b/run_tests.sh
index 8f2b51757..c1fda4cf9 100755
--- a/run_tests.sh
+++ b/run_tests.sh
@@ -1,5 +1,7 @@
#!/bin/bash
+set -eu
+
function usage {
echo "Usage: $0 [OPTION]..."
echo "Run Nova's test suite(s)"
@@ -24,13 +26,13 @@ function usage {
function process_option {
case "$1" in
-h|--help) usage;;
- -V|--virtual-env) let always_venv=1; let never_venv=0;;
- -N|--no-virtual-env) let always_venv=0; let never_venv=1;;
- -r|--recreate-db) let recreate_db=1;;
- -n|--no-recreate-db) let recreate_db=0;;
- -f|--force) let force=1;;
- -p|--pep8) let just_pep8=1;;
- -c|--coverage) let coverage=1;;
+ -V|--virtual-env) always_venv=1; never_venv=0;;
+ -N|--no-virtual-env) always_venv=0; never_venv=1;;
+ -r|--recreate-db) recreate_db=1;;
+ -n|--no-recreate-db) recreate_db=0;;
+ -f|--force) force=1;;
+ -p|--pep8) just_pep8=1;;
+ -c|--coverage) coverage=1;;
-*) noseopts="$noseopts $1";;
*) noseargs="$noseargs $1"
esac
@@ -67,7 +69,7 @@ function run_tests {
ERRSIZE=`wc -l run_tests.log | awk '{print \$1}'`
if [ "$ERRSIZE" -lt "40" ];
then
- cat run_tests.log
+ cat run_tests.log
fi
fi
return $RESULT
@@ -130,7 +132,7 @@ if [ $recreate_db -eq 1 ]; then
rm -f tests.sqlite
fi
-run_tests || exit
+run_tests
# NOTE(sirp): we only want to run pep8 when we're running the full-test suite,
# not when we're running tests individually. To handle this, we need to