summaryrefslogtreecommitdiffstats
path: root/nova
diff options
context:
space:
mode:
authorAnthony Young <sleepsonthefloor@gmail.com>2011-08-31 19:28:57 -0700
committerAnthony Young <sleepsonthefloor@gmail.com>2011-08-31 19:28:57 -0700
commitb4b03e0beb873a0f4853f0da6a64b9bb388d73e8 (patch)
tree71e65f654dde873c4c50e91f29764867d3a55866 /nova
parentced3ea3e8d7cf02f988d968d6078182815226719 (diff)
parente0e98075fc520428033e7ebd11eb68d37a4ca5c8 (diff)
downloadnova-b4b03e0beb873a0f4853f0da6a64b9bb388d73e8.tar.gz
nova-b4b03e0beb873a0f4853f0da6a64b9bb388d73e8.tar.xz
nova-b4b03e0beb873a0f4853f0da6a64b9bb388d73e8.zip
merge trunk
Diffstat (limited to 'nova')
-rw-r--r--nova/api/ec2/__init__.py27
-rw-r--r--nova/api/ec2/cloud.py50
-rw-r--r--nova/api/openstack/common.py78
-rw-r--r--nova/api/openstack/contrib/floating_ips.py9
-rw-r--r--nova/api/openstack/contrib/simple_tenant_usage.py236
-rw-r--r--nova/api/openstack/contrib/virtual_storage_arrays.py606
-rw-r--r--nova/api/openstack/contrib/volumes.py36
-rw-r--r--nova/api/openstack/contrib/volumetypes.py197
-rw-r--r--nova/api/openstack/create_instance_helper.py13
-rw-r--r--nova/api/openstack/servers.py22
-rw-r--r--nova/api/openstack/views/servers.py13
-rw-r--r--nova/compute/api.py188
-rw-r--r--nova/compute/manager.py465
-rw-r--r--nova/compute/task_states.py59
-rw-r--r--nova/compute/vm_states.py39
-rw-r--r--nova/context.py2
-rw-r--r--nova/db/api.py129
-rw-r--r--nova/db/sqlalchemy/api.py477
-rw-r--r--nova/db/sqlalchemy/migrate_repo/versions/016_make_quotas_key_and_value.py18
-rw-r--r--nova/db/sqlalchemy/migrate_repo/versions/036_change_flavor_id_in_migrations.py12
-rw-r--r--nova/db/sqlalchemy/migrate_repo/versions/042_add_volume_types_and_extradata.py115
-rw-r--r--nova/db/sqlalchemy/migrate_repo/versions/043_add_vsa_data.py75
-rw-r--r--nova/db/sqlalchemy/migrate_repo/versions/044_update_instance_states.py138
-rw-r--r--nova/db/sqlalchemy/migration.py4
-rw-r--r--nova/db/sqlalchemy/models.py92
-rw-r--r--nova/exception.py45
-rw-r--r--nova/flags.py20
-rw-r--r--nova/image/glance.py19
-rw-r--r--nova/ipv6/account_identifier.py3
-rw-r--r--nova/log.py6
-rw-r--r--nova/network/api.py6
-rw-r--r--nova/network/manager.py19
-rw-r--r--nova/notifier/api.py3
-rw-r--r--nova/notifier/list_notifier.py68
-rw-r--r--nova/quota.py5
-rw-r--r--nova/rpc/__init__.py39
-rw-r--r--nova/rpc/common.py6
-rw-r--r--nova/rpc/impl_carrot.py (renamed from nova/rpc/amqp.py)102
-rw-r--r--nova/rpc/impl_kombu.py781
-rw-r--r--nova/scheduler/abstract_scheduler.py1
-rw-r--r--nova/scheduler/driver.py10
-rw-r--r--nova/scheduler/vsa.py535
-rw-r--r--nova/service.py28
-rw-r--r--nova/test.py18
-rw-r--r--nova/tests/api/openstack/contrib/test_createserverext.py44
-rw-r--r--nova/tests/api/openstack/contrib/test_floating_ips.py118
-rw-r--r--nova/tests/api/openstack/contrib/test_security_groups.py72
-rw-r--r--nova/tests/api/openstack/contrib/test_simple_tenant_usage.py172
-rw-r--r--nova/tests/api/openstack/contrib/test_vsa.py450
-rw-r--r--nova/tests/api/openstack/test_extensions.py3
-rw-r--r--nova/tests/api/openstack/test_server_actions.py47
-rw-r--r--nova/tests/api/openstack/test_servers.py102
-rw-r--r--nova/tests/api/openstack/test_volume_types.py171
-rw-r--r--nova/tests/api/openstack/test_volume_types_extra_specs.py181
-rw-r--r--nova/tests/image/test_glance.py29
-rw-r--r--nova/tests/integrated/test_servers.py37
-rw-r--r--nova/tests/integrated/test_volumes.py17
-rw-r--r--nova/tests/notifier/__init__.py16
-rw-r--r--nova/tests/notifier/test_list_notifier.py88
-rw-r--r--nova/tests/scheduler/test_scheduler.py13
-rw-r--r--nova/tests/scheduler/test_vsa_scheduler.py641
-rw-r--r--nova/tests/test_adminapi.py2
-rw-r--r--nova/tests/test_cloud.py17
-rw-r--r--nova/tests/test_compute.py38
-rw-r--r--nova/tests/test_context.py33
-rw-r--r--nova/tests/test_instance_types.py68
-rw-r--r--nova/tests/test_ipv6.py2
-rw-r--r--nova/tests/test_network.py16
-rw-r--r--nova/tests/test_nova_manage.py154
-rw-r--r--nova/tests/test_rpc.py162
-rw-r--r--nova/tests/test_rpc_amqp.py88
-rw-r--r--nova/tests/test_rpc_carrot.py45
-rw-r--r--nova/tests/test_rpc_common.py189
-rw-r--r--nova/tests/test_rpc_kombu.py110
-rw-r--r--nova/tests/test_test.py5
-rw-r--r--nova/tests/test_test_utils.py41
-rw-r--r--nova/tests/test_virt_drivers.py489
-rw-r--r--nova/tests/test_volume_types.py207
-rw-r--r--nova/tests/test_volume_types_extra_specs.py132
-rw-r--r--nova/tests/test_vsa.py182
-rw-r--r--nova/tests/test_vsa_volumes.py136
-rw-r--r--nova/tests/test_xenapi.py37
-rw-r--r--nova/tests/utils.py68
-rw-r--r--nova/tests/vmwareapi/db_fakes.py5
-rw-r--r--nova/tests/xenapi/stubs.py4
-rw-r--r--nova/virt/driver.py18
-rw-r--r--nova/virt/fake.py13
-rw-r--r--nova/virt/libvirt.xml.template4
-rw-r--r--nova/virt/libvirt/connection.py5
-rw-r--r--nova/virt/xenapi/fake.py3
-rw-r--r--nova/virt/xenapi/vmops.py5
-rw-r--r--nova/volume/api.py94
-rw-r--r--nova/volume/driver.py278
-rw-r--r--nova/volume/manager.py78
-rw-r--r--nova/volume/volume_types.py166
-rw-r--r--nova/vsa/__init__.py18
-rw-r--r--nova/vsa/api.py411
-rw-r--r--nova/vsa/connection.py25
-rw-r--r--nova/vsa/fake.py22
-rw-r--r--nova/vsa/manager.py179
-rw-r--r--nova/vsa/utils.py80
101 files changed, 9671 insertions, 973 deletions
diff --git a/nova/api/ec2/__init__.py b/nova/api/ec2/__init__.py
index 5430f443d..ec4743cea 100644
--- a/nova/api/ec2/__init__.py
+++ b/nova/api/ec2/__init__.py
@@ -20,7 +20,10 @@ Starting point for routing EC2 requests.
"""
-import httplib2
+from urlparse import urlparse
+
+import eventlet
+from eventlet.green import httplib
import webob
import webob.dec
import webob.exc
@@ -35,7 +38,6 @@ from nova.api.ec2 import apirequest
from nova.api.ec2 import ec2utils
from nova.auth import manager
-
FLAGS = flags.FLAGS
LOG = logging.getLogger("nova.api")
flags.DEFINE_integer('lockout_attempts', 5,
@@ -158,7 +160,6 @@ class ToToken(wsgi.Middleware):
auth_params.pop('Signature')
# Authenticate the request.
- client = httplib2.Http()
creds = {'ec2Credentials': {'access': access,
'signature': signature,
'host': req.host,
@@ -166,18 +167,24 @@ class ToToken(wsgi.Middleware):
'path': req.path,
'params': auth_params,
}}
- headers = {'Content-Type': 'application/json'},
- resp, content = client.request(FLAGS.keystone_ec2_url,
- 'POST',
- headers=headers,
- body=utils.dumps(creds))
+ creds_json = utils.dumps(creds)
+ headers = {'Content-Type': 'application/json'}
+ o = urlparse(FLAGS.keystone_ec2_url)
+ if o.scheme == "http":
+ conn = httplib.HTTPConnection(o.netloc)
+ else:
+ conn = httplib.HTTPSConnection(o.netloc)
+ conn.request('POST', o.path, body=creds_json, headers=headers)
+ response = conn.getresponse().read()
+ conn.close()
+
# NOTE(vish): We could save a call to keystone by
# having keystone return token, tenant,
# user, and roles from this call.
- result = utils.loads(content)
+ result = utils.loads(response)
# TODO(vish): check for errors
- token_id = result['auth']['token']['id']
+ token_id = result['auth']['token']['id']
# Authenticated!
req.headers['X-Auth-Token'] = token_id
return self.application
diff --git a/nova/api/ec2/cloud.py b/nova/api/ec2/cloud.py
index 9aebf92e3..fe44191c8 100644
--- a/nova/api/ec2/cloud.py
+++ b/nova/api/ec2/cloud.py
@@ -47,6 +47,7 @@ from nova import utils
from nova import volume
from nova.api.ec2 import ec2utils
from nova.compute import instance_types
+from nova.compute import vm_states
from nova.image import s3
@@ -78,6 +79,30 @@ def _gen_key(context, user_id, key_name):
return {'private_key': private_key, 'fingerprint': fingerprint}
+# EC2 API can return the following values as documented in the EC2 API
+# http://docs.amazonwebservices.com/AWSEC2/latest/APIReference/
+# ApiReference-ItemType-InstanceStateType.html
+# pending | running | shutting-down | terminated | stopping | stopped
+_STATE_DESCRIPTION_MAP = {
+ None: 'pending',
+ vm_states.ACTIVE: 'running',
+ vm_states.BUILDING: 'pending',
+ vm_states.REBUILDING: 'pending',
+ vm_states.DELETED: 'terminated',
+ vm_states.STOPPED: 'stopped',
+ vm_states.MIGRATING: 'migrate',
+ vm_states.RESIZING: 'resize',
+ vm_states.PAUSED: 'pause',
+ vm_states.SUSPENDED: 'suspend',
+ vm_states.RESCUED: 'rescue',
+}
+
+
+def state_description_from_vm_state(vm_state):
+ """Map the vm state to the server status string"""
+ return _STATE_DESCRIPTION_MAP.get(vm_state, vm_state)
+
+
# TODO(yamahata): hypervisor dependent default device name
_DEFAULT_ROOT_DEVICE_NAME = '/dev/sda1'
_DEFAULT_MAPPINGS = {'ami': 'sda1',
@@ -1039,11 +1064,12 @@ class CloudController(object):
def _format_attr_instance_initiated_shutdown_behavior(instance,
result):
- state_description = instance['state_description']
- state_to_value = {'stopping': 'stop',
- 'stopped': 'stop',
- 'terminating': 'terminate'}
- value = state_to_value.get(state_description)
+ vm_state = instance['vm_state']
+ state_to_value = {
+ vm_states.STOPPED: 'stopped',
+ vm_states.DELETED: 'terminated',
+ }
+ value = state_to_value.get(vm_state)
if value:
result['instanceInitiatedShutdownBehavior'] = value
@@ -1198,8 +1224,8 @@ class CloudController(object):
self._format_kernel_id(instance, i, 'kernelId')
self._format_ramdisk_id(instance, i, 'ramdiskId')
i['instanceState'] = {
- 'code': instance['state'],
- 'name': instance['state_description']}
+ 'code': instance['power_state'],
+ 'name': state_description_from_vm_state(instance['vm_state'])}
fixed_addr = None
floating_addr = None
if instance['fixed_ips']:
@@ -1618,22 +1644,22 @@ class CloudController(object):
# stop the instance if necessary
restart_instance = False
if not no_reboot:
- state_description = instance['state_description']
+ vm_state = instance['vm_state']
# if the instance is in subtle state, refuse to proceed.
- if state_description not in ('running', 'stopping', 'stopped'):
+ if vm_state not in (vm_states.ACTIVE, vm_states.STOPPED):
raise exception.InstanceNotRunning(instance_id=ec2_instance_id)
- if state_description == 'running':
+ if vm_state == vm_states.ACTIVE:
restart_instance = True
self.compute_api.stop(context, instance_id=instance_id)
# wait instance for really stopped
start_time = time.time()
- while state_description != 'stopped':
+ while vm_state != vm_states.STOPPED:
time.sleep(1)
instance = self.compute_api.get(context, instance_id)
- state_description = instance['state_description']
+ vm_state = instance['vm_state']
# NOTE(yamahata): timeout and error. 1 hour for now for safety.
# Is it too short/long?
# Or is there any better way?
diff --git a/nova/api/openstack/common.py b/nova/api/openstack/common.py
index d9eb832f2..d743a66ef 100644
--- a/nova/api/openstack/common.py
+++ b/nova/api/openstack/common.py
@@ -27,7 +27,8 @@ from nova import flags
from nova import log as logging
from nova import quota
from nova.api.openstack import wsgi
-from nova.compute import power_state as compute_power_state
+from nova.compute import vm_states
+from nova.compute import task_states
LOG = logging.getLogger('nova.api.openstack.common')
@@ -38,36 +39,61 @@ XML_NS_V10 = 'http://docs.rackspacecloud.com/servers/api/v1.0'
XML_NS_V11 = 'http://docs.openstack.org/compute/api/v1.1'
-_STATUS_MAP = {
- None: 'BUILD',
- compute_power_state.NOSTATE: 'BUILD',
- compute_power_state.RUNNING: 'ACTIVE',
- compute_power_state.BLOCKED: 'ACTIVE',
- compute_power_state.SUSPENDED: 'SUSPENDED',
- compute_power_state.PAUSED: 'PAUSED',
- compute_power_state.SHUTDOWN: 'SHUTDOWN',
- compute_power_state.SHUTOFF: 'SHUTOFF',
- compute_power_state.CRASHED: 'ERROR',
- compute_power_state.FAILED: 'ERROR',
- compute_power_state.BUILDING: 'BUILD',
+_STATE_MAP = {
+ vm_states.ACTIVE: {
+ 'default': 'ACTIVE',
+ task_states.REBOOTING: 'REBOOT',
+ task_states.UPDATING_PASSWORD: 'PASSWORD',
+ task_states.RESIZE_VERIFY: 'VERIFY_RESIZE',
+ },
+ vm_states.BUILDING: {
+ 'default': 'BUILD',
+ },
+ vm_states.REBUILDING: {
+ 'default': 'REBUILD',
+ },
+ vm_states.STOPPED: {
+ 'default': 'STOPPED',
+ },
+ vm_states.MIGRATING: {
+ 'default': 'MIGRATING',
+ },
+ vm_states.RESIZING: {
+ 'default': 'RESIZE',
+ },
+ vm_states.PAUSED: {
+ 'default': 'PAUSED',
+ },
+ vm_states.SUSPENDED: {
+ 'default': 'SUSPENDED',
+ },
+ vm_states.RESCUED: {
+ 'default': 'RESCUE',
+ },
+ vm_states.ERROR: {
+ 'default': 'ERROR',
+ },
+ vm_states.DELETED: {
+ 'default': 'DELETED',
+ },
}
-def status_from_power_state(power_state):
- """Map the power state to the server status string"""
- return _STATUS_MAP[power_state]
+def status_from_state(vm_state, task_state='default'):
+ """Given vm_state and task_state, return a status string."""
+ task_map = _STATE_MAP.get(vm_state, dict(default='UNKNOWN_STATE'))
+ status = task_map.get(task_state, task_map['default'])
+ LOG.debug("Generated %(status)s from vm_state=%(vm_state)s "
+ "task_state=%(task_state)s." % locals())
+ return status
-def power_states_from_status(status):
- """Map the server status string to a list of power states"""
- power_states = []
- for power_state, status_map in _STATUS_MAP.iteritems():
- # Skip the 'None' state
- if power_state is None:
- continue
- if status.lower() == status_map.lower():
- power_states.append(power_state)
- return power_states
+def vm_state_from_status(status):
+ """Map the server status string to a vm state."""
+ for state, task_map in _STATE_MAP.iteritems():
+ status_string = task_map.get("default")
+ if status.lower() == status_string.lower():
+ return state
def get_pagination_params(request):
diff --git a/nova/api/openstack/contrib/floating_ips.py b/nova/api/openstack/contrib/floating_ips.py
index 40086f778..6ce531c8f 100644
--- a/nova/api/openstack/contrib/floating_ips.py
+++ b/nova/api/openstack/contrib/floating_ips.py
@@ -96,7 +96,8 @@ class FloatingIPController(object):
except rpc.RemoteError as ex:
# NOTE(tr3buchet) - why does this block exist?
if ex.exc_type == 'NoMoreFloatingIps':
- raise exception.NoMoreFloatingIps()
+ msg = _("No more floating ips available.")
+ raise webob.exc.HTTPBadRequest(explanation=msg)
else:
raise
@@ -138,7 +139,11 @@ class Floating_ips(extensions.ExtensionDescriptor):
msg = _("Address not specified")
raise webob.exc.HTTPBadRequest(explanation=msg)
- self.compute_api.associate_floating_ip(context, instance_id, address)
+ try:
+ self.compute_api.associate_floating_ip(context, instance_id,
+ address)
+ except exception.ApiError, e:
+ raise webob.exc.HTTPBadRequest(explanation=e.message)
return webob.Response(status_int=202)
diff --git a/nova/api/openstack/contrib/simple_tenant_usage.py b/nova/api/openstack/contrib/simple_tenant_usage.py
new file mode 100644
index 000000000..69b38e229
--- /dev/null
+++ b/nova/api/openstack/contrib/simple_tenant_usage.py
@@ -0,0 +1,236 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright 2011 OpenStack LLC.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import urlparse
+import webob
+
+from datetime import datetime
+from nova import exception
+from nova import flags
+from nova.compute import api
+from nova.api.openstack import extensions
+from nova.api.openstack import views
+from nova.db.sqlalchemy.session import get_session
+from webob import exc
+
+
+FLAGS = flags.FLAGS
+
+
+class SimpleTenantUsageController(object):
+ def _hours_for(self, instance, period_start, period_stop):
+ launched_at = instance['launched_at']
+ terminated_at = instance['terminated_at']
+ if terminated_at is not None:
+ if not isinstance(terminated_at, datetime):
+ terminated_at = datetime.strptime(terminated_at,
+ "%Y-%m-%d %H:%M:%S.%f")
+
+ if launched_at is not None:
+ if not isinstance(launched_at, datetime):
+ launched_at = datetime.strptime(launched_at,
+ "%Y-%m-%d %H:%M:%S.%f")
+
+ if terminated_at and terminated_at < period_start:
+ return 0
+ # nothing if it started after the usage report ended
+ if launched_at and launched_at > period_stop:
+ return 0
+ if launched_at:
+ # if instance launched after period_started, don't charge for first
+ start = max(launched_at, period_start)
+ if terminated_at:
+ # if instance stopped before period_stop, don't charge after
+ stop = min(period_stop, terminated_at)
+ else:
+ # instance is still running, so charge them up to current time
+ stop = period_stop
+ dt = stop - start
+ seconds = dt.days * 3600 * 24 + dt.seconds\
+ + dt.microseconds / 100000.0
+
+ return seconds / 3600.0
+ else:
+ # instance hasn't launched, so no charge
+ return 0
+
+ def _tenant_usages_for_period(self, context, period_start,
+ period_stop, tenant_id=None, detailed=True):
+
+ compute_api = api.API()
+ instances = compute_api.get_active_by_window(context,
+ period_start,
+ period_stop,
+ tenant_id)
+ from nova import log as logging
+ logging.info(instances)
+ rval = {}
+ flavors = {}
+
+ for instance in instances:
+ info = {}
+ info['hours'] = self._hours_for(instance,
+ period_start,
+ period_stop)
+ flavor_type = instance['instance_type_id']
+
+ if not flavors.get(flavor_type):
+ try:
+ it_ref = compute_api.get_instance_type(context,
+ flavor_type)
+ flavors[flavor_type] = it_ref
+ except exception.InstanceTypeNotFound:
+ # can't bill if there is no instance type
+ continue
+
+ flavor = flavors[flavor_type]
+
+ info['name'] = instance['display_name']
+
+ info['memory_mb'] = flavor['memory_mb']
+ info['local_gb'] = flavor['local_gb']
+ info['vcpus'] = flavor['vcpus']
+
+ info['tenant_id'] = instance['project_id']
+
+ info['flavor'] = flavor['name']
+
+ info['started_at'] = instance['launched_at']
+
+ info['ended_at'] = instance['terminated_at']
+
+ if info['ended_at']:
+ info['state'] = 'terminated'
+ else:
+ info['state'] = instance['state_description']
+
+ now = datetime.utcnow()
+
+ if info['state'] == 'terminated':
+ delta = info['ended_at'] - info['started_at']
+ else:
+ delta = now - info['started_at']
+
+ info['uptime'] = delta.days * 24 * 60 + delta.seconds
+
+ if not info['tenant_id'] in rval:
+ summary = {}
+ summary['tenant_id'] = info['tenant_id']
+ if detailed:
+ summary['server_usages'] = []
+ summary['total_local_gb_usage'] = 0
+ summary['total_vcpus_usage'] = 0
+ summary['total_memory_mb_usage'] = 0
+ summary['total_hours'] = 0
+ summary['start'] = period_start
+ summary['stop'] = period_stop
+ rval[info['tenant_id']] = summary
+
+ summary = rval[info['tenant_id']]
+ summary['total_local_gb_usage'] += info['local_gb'] * info['hours']
+ summary['total_vcpus_usage'] += info['vcpus'] * info['hours']
+ summary['total_memory_mb_usage'] += info['memory_mb']\
+ * info['hours']
+
+ summary['total_hours'] += info['hours']
+ if detailed:
+ summary['server_usages'].append(info)
+
+ return rval.values()
+
+ def _parse_datetime(self, dtstr):
+ if isinstance(dtstr, datetime):
+ return dtstr
+ try:
+ return datetime.strptime(dtstr, "%Y-%m-%dT%H:%M:%S")
+ except:
+ try:
+ return datetime.strptime(dtstr, "%Y-%m-%dT%H:%M:%S.%f")
+ except:
+ return datetime.strptime(dtstr, "%Y-%m-%d %H:%M:%S.%f")
+
+ def _get_datetime_range(self, req):
+ qs = req.environ.get('QUERY_STRING', '')
+ env = urlparse.parse_qs(qs)
+ period_start = self._parse_datetime(env.get('start',
+ [datetime.utcnow().isoformat()])[0])
+ period_stop = self._parse_datetime(env.get('end',
+ [datetime.utcnow().isoformat()])[0])
+
+ detailed = bool(env.get('detailed', False))
+ return (period_start, period_stop, detailed)
+
+ def index(self, req):
+ """Retrive tenant_usage for all tenants"""
+ context = req.environ['nova.context']
+
+ if not context.is_admin and FLAGS.allow_admin_api:
+ return webob.Response(status_int=403)
+
+ (period_start, period_stop, detailed) = self._get_datetime_range(req)
+ usages = self._tenant_usages_for_period(context,
+ period_start,
+ period_stop,
+ detailed=detailed)
+ return {'tenant_usages': usages}
+
+ def show(self, req, id):
+ """Retrive tenant_usage for a specified tenant"""
+ tenant_id = id
+ context = req.environ['nova.context']
+
+ if not context.is_admin and FLAGS.allow_admin_api:
+ if tenant_id != context.project_id:
+ return webob.Response(status_int=403)
+
+ (period_start, period_stop, ignore) = self._get_datetime_range(req)
+ usage = self._tenant_usages_for_period(context,
+ period_start,
+ period_stop,
+ tenant_id=tenant_id,
+ detailed=True)
+ if len(usage):
+ usage = usage[0]
+ else:
+ usage = {}
+ return {'tenant_usage': usage}
+
+
+class Simple_tenant_usage(extensions.ExtensionDescriptor):
+ def get_name(self):
+ return "SimpleTenantUsage"
+
+ def get_alias(self):
+ return "os-simple-tenant-usage"
+
+ def get_description(self):
+ return "Simple tenant usage extension"
+
+ def get_namespace(self):
+ return "http://docs.openstack.org/ext/os-simple-tenant-usage/api/v1.1"
+
+ def get_updated(self):
+ return "2011-08-19T00:00:00+00:00"
+
+ def get_resources(self):
+ resources = []
+
+ res = extensions.ResourceExtension('os-simple-tenant-usage',
+ SimpleTenantUsageController())
+ resources.append(res)
+
+ return resources
diff --git a/nova/api/openstack/contrib/virtual_storage_arrays.py b/nova/api/openstack/contrib/virtual_storage_arrays.py
new file mode 100644
index 000000000..e09736a28
--- /dev/null
+++ b/nova/api/openstack/contrib/virtual_storage_arrays.py
@@ -0,0 +1,606 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright (c) 2011 Zadara Storage Inc.
+# Copyright (c) 2011 OpenStack LLC.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+""" The virtul storage array extension"""
+
+
+from webob import exc
+
+from nova import vsa
+from nova import volume
+from nova import compute
+from nova import network
+from nova import db
+from nova import quota
+from nova import exception
+from nova import log as logging
+from nova.api.openstack import common
+from nova.api.openstack import extensions
+from nova.api.openstack import faults
+from nova.api.openstack import wsgi
+from nova.api.openstack import servers
+from nova.api.openstack.contrib import volumes
+from nova.compute import instance_types
+
+from nova import flags
+FLAGS = flags.FLAGS
+
+LOG = logging.getLogger("nova.api.vsa")
+
+
+def _vsa_view(context, vsa, details=False, instances=None):
+ """Map keys for vsa summary/detailed view."""
+ d = {}
+
+ d['id'] = vsa.get('id')
+ d['name'] = vsa.get('name')
+ d['displayName'] = vsa.get('display_name')
+ d['displayDescription'] = vsa.get('display_description')
+
+ d['createTime'] = vsa.get('created_at')
+ d['status'] = vsa.get('status')
+
+ if 'vsa_instance_type' in vsa:
+ d['vcType'] = vsa['vsa_instance_type'].get('name', None)
+ else:
+ d['vcType'] = vsa['instance_type_id']
+
+ d['vcCount'] = vsa.get('vc_count')
+ d['driveCount'] = vsa.get('vol_count')
+
+ d['ipAddress'] = None
+ for instance in instances:
+ fixed_addr = None
+ floating_addr = None
+ if instance['fixed_ips']:
+ fixed = instance['fixed_ips'][0]
+ fixed_addr = fixed['address']
+ if fixed['floating_ips']:
+ floating_addr = fixed['floating_ips'][0]['address']
+
+ if floating_addr:
+ d['ipAddress'] = floating_addr
+ break
+ else:
+ d['ipAddress'] = d['ipAddress'] or fixed_addr
+
+ return d
+
+
+class VsaController(object):
+ """The Virtual Storage Array API controller for the OpenStack API."""
+
+ _serialization_metadata = {
+ 'application/xml': {
+ "attributes": {
+ "vsa": [
+ "id",
+ "name",
+ "displayName",
+ "displayDescription",
+ "createTime",
+ "status",
+ "vcType",
+ "vcCount",
+ "driveCount",
+ "ipAddress",
+ ]}}}
+
+ def __init__(self):
+ self.vsa_api = vsa.API()
+ self.compute_api = compute.API()
+ self.network_api = network.API()
+ super(VsaController, self).__init__()
+
+ def _get_instances_by_vsa_id(self, context, id):
+ return self.compute_api.get_all(context,
+ search_opts={'metadata': dict(vsa_id=str(id))})
+
+ def _items(self, req, details):
+ """Return summary or detailed list of VSAs."""
+ context = req.environ['nova.context']
+ vsas = self.vsa_api.get_all(context)
+ limited_list = common.limited(vsas, req)
+
+ vsa_list = []
+ for vsa in limited_list:
+ instances = self._get_instances_by_vsa_id(context, vsa.get('id'))
+ vsa_list.append(_vsa_view(context, vsa, details, instances))
+ return {'vsaSet': vsa_list}
+
+ def index(self, req):
+ """Return a short list of VSAs."""
+ return self._items(req, details=False)
+
+ def detail(self, req):
+ """Return a detailed list of VSAs."""
+ return self._items(req, details=True)
+
+ def show(self, req, id):
+ """Return data about the given VSA."""
+ context = req.environ['nova.context']
+
+ try:
+ vsa = self.vsa_api.get(context, vsa_id=id)
+ except exception.NotFound:
+ return faults.Fault(exc.HTTPNotFound())
+
+ instances = self._get_instances_by_vsa_id(context, vsa.get('id'))
+ return {'vsa': _vsa_view(context, vsa, True, instances)}
+
+ def create(self, req, body):
+ """Create a new VSA."""
+ context = req.environ['nova.context']
+
+ if not body or 'vsa' not in body:
+ LOG.debug(_("No body provided"), context=context)
+ return faults.Fault(exc.HTTPUnprocessableEntity())
+
+ vsa = body['vsa']
+
+ display_name = vsa.get('displayName')
+ vc_type = vsa.get('vcType', FLAGS.default_vsa_instance_type)
+ try:
+ instance_type = instance_types.get_instance_type_by_name(vc_type)
+ except exception.NotFound:
+ return faults.Fault(exc.HTTPNotFound())
+
+ LOG.audit(_("Create VSA %(display_name)s of type %(vc_type)s"),
+ locals(), context=context)
+
+ args = dict(display_name=display_name,
+ display_description=vsa.get('displayDescription'),
+ instance_type=instance_type,
+ storage=vsa.get('storage'),
+ shared=vsa.get('shared'),
+ availability_zone=vsa.get('placement', {}).\
+ get('AvailabilityZone'))
+
+ vsa = self.vsa_api.create(context, **args)
+
+ instances = self._get_instances_by_vsa_id(context, vsa.get('id'))
+ return {'vsa': _vsa_view(context, vsa, True, instances)}
+
+ def delete(self, req, id):
+ """Delete a VSA."""
+ context = req.environ['nova.context']
+
+ LOG.audit(_("Delete VSA with id: %s"), id, context=context)
+
+ try:
+ self.vsa_api.delete(context, vsa_id=id)
+ except exception.NotFound:
+ return faults.Fault(exc.HTTPNotFound())
+
+ def associate_address(self, req, id, body):
+ """ /zadr-vsa/{vsa_id}/associate_address
+ auto or manually associate an IP to VSA
+ """
+ context = req.environ['nova.context']
+
+ if body is None:
+ ip = 'auto'
+ else:
+ ip = body.get('ipAddress', 'auto')
+
+ LOG.audit(_("Associate address %(ip)s to VSA %(id)s"),
+ locals(), context=context)
+
+ try:
+ instances = self._get_instances_by_vsa_id(context, id)
+ if instances is None or len(instances) == 0:
+ return faults.Fault(exc.HTTPNotFound())
+
+ for instance in instances:
+ self.network_api.allocate_for_instance(context, instance,
+ vpn=False)
+ # Placeholder
+ return
+
+ except exception.NotFound:
+ return faults.Fault(exc.HTTPNotFound())
+
+ def disassociate_address(self, req, id, body):
+ """ /zadr-vsa/{vsa_id}/disassociate_address
+ auto or manually associate an IP to VSA
+ """
+ context = req.environ['nova.context']
+
+ if body is None:
+ ip = 'auto'
+ else:
+ ip = body.get('ipAddress', 'auto')
+
+ LOG.audit(_("Disassociate address from VSA %(id)s"),
+ locals(), context=context)
+ # Placeholder
+
+
+class VsaVolumeDriveController(volumes.VolumeController):
+ """The base class for VSA volumes & drives.
+
+ A child resource of the VSA object. Allows operations with
+ volumes and drives created to/from particular VSA
+
+ """
+
+ _serialization_metadata = {
+ 'application/xml': {
+ "attributes": {
+ "volume": [
+ "id",
+ "name",
+ "status",
+ "size",
+ "availabilityZone",
+ "createdAt",
+ "displayName",
+ "displayDescription",
+ "vsaId",
+ ]}}}
+
+ def __init__(self):
+ self.volume_api = volume.API()
+ self.vsa_api = vsa.API()
+ super(VsaVolumeDriveController, self).__init__()
+
+ def _translation(self, context, vol, vsa_id, details):
+ if details:
+ translation = volumes._translate_volume_detail_view
+ else:
+ translation = volumes._translate_volume_summary_view
+
+ d = translation(context, vol)
+ d['vsaId'] = vsa_id
+ d['name'] = vol['name']
+ return d
+
+ def _check_volume_ownership(self, context, vsa_id, id):
+ obj = self.object
+ try:
+ volume_ref = self.volume_api.get(context, volume_id=id)
+ except exception.NotFound:
+ LOG.error(_("%(obj)s with ID %(id)s not found"), locals())
+ raise
+
+ own_vsa_id = self.volume_api.get_volume_metadata_value(volume_ref,
+ self.direction)
+ if own_vsa_id != vsa_id:
+ LOG.error(_("%(obj)s with ID %(id)s belongs to VSA %(own_vsa_id)s"\
+ " and not to VSA %(vsa_id)s."), locals())
+ raise exception.Invalid()
+
+ def _items(self, req, vsa_id, details):
+ """Return summary or detailed list of volumes for particular VSA."""
+ context = req.environ['nova.context']
+
+ vols = self.volume_api.get_all(context,
+ search_opts={'metadata': {self.direction: str(vsa_id)}})
+ limited_list = common.limited(vols, req)
+
+ res = [self._translation(context, vol, vsa_id, details) \
+ for vol in limited_list]
+
+ return {self.objects: res}
+
+ def index(self, req, vsa_id):
+ """Return a short list of volumes created from particular VSA."""
+ LOG.audit(_("Index. vsa_id=%(vsa_id)s"), locals())
+ return self._items(req, vsa_id, details=False)
+
+ def detail(self, req, vsa_id):
+ """Return a detailed list of volumes created from particular VSA."""
+ LOG.audit(_("Detail. vsa_id=%(vsa_id)s"), locals())
+ return self._items(req, vsa_id, details=True)
+
+ def create(self, req, vsa_id, body):
+ """Create a new volume from VSA."""
+ LOG.audit(_("Create. vsa_id=%(vsa_id)s, body=%(body)s"), locals())
+ context = req.environ['nova.context']
+
+ if not body:
+ return faults.Fault(exc.HTTPUnprocessableEntity())
+
+ vol = body[self.object]
+ size = vol['size']
+ LOG.audit(_("Create volume of %(size)s GB from VSA ID %(vsa_id)s"),
+ locals(), context=context)
+ try:
+ # create is supported for volumes only (drives created through VSA)
+ volume_type = self.vsa_api.get_vsa_volume_type(context)
+ except exception.NotFound:
+ return faults.Fault(exc.HTTPNotFound())
+
+ new_volume = self.volume_api.create(context,
+ size,
+ None,
+ vol.get('displayName'),
+ vol.get('displayDescription'),
+ volume_type=volume_type,
+ metadata=dict(from_vsa_id=str(vsa_id)))
+
+ return {self.object: self._translation(context, new_volume,
+ vsa_id, True)}
+
+ def update(self, req, vsa_id, id, body):
+ """Update a volume."""
+ context = req.environ['nova.context']
+
+ try:
+ self._check_volume_ownership(context, vsa_id, id)
+ except exception.NotFound:
+ return faults.Fault(exc.HTTPNotFound())
+ except exception.Invalid:
+ return faults.Fault(exc.HTTPBadRequest())
+
+ vol = body[self.object]
+ updatable_fields = [{'displayName': 'display_name'},
+ {'displayDescription': 'display_description'},
+ {'status': 'status'},
+ {'providerLocation': 'provider_location'},
+ {'providerAuth': 'provider_auth'}]
+ changes = {}
+ for field in updatable_fields:
+ key = field.keys()[0]
+ val = field[key]
+ if key in vol:
+ changes[val] = vol[key]
+
+ obj = self.object
+ LOG.audit(_("Update %(obj)s with id: %(id)s, changes: %(changes)s"),
+ locals(), context=context)
+
+ try:
+ self.volume_api.update(context, volume_id=id, fields=changes)
+ except exception.NotFound:
+ return faults.Fault(exc.HTTPNotFound())
+ return exc.HTTPAccepted()
+
+ def delete(self, req, vsa_id, id):
+ """Delete a volume."""
+ context = req.environ['nova.context']
+
+ LOG.audit(_("Delete. vsa_id=%(vsa_id)s, id=%(id)s"), locals())
+
+ try:
+ self._check_volume_ownership(context, vsa_id, id)
+ except exception.NotFound:
+ return faults.Fault(exc.HTTPNotFound())
+ except exception.Invalid:
+ return faults.Fault(exc.HTTPBadRequest())
+
+ return super(VsaVolumeDriveController, self).delete(req, id)
+
+ def show(self, req, vsa_id, id):
+ """Return data about the given volume."""
+ context = req.environ['nova.context']
+
+ LOG.audit(_("Show. vsa_id=%(vsa_id)s, id=%(id)s"), locals())
+
+ try:
+ self._check_volume_ownership(context, vsa_id, id)
+ except exception.NotFound:
+ return faults.Fault(exc.HTTPNotFound())
+ except exception.Invalid:
+ return faults.Fault(exc.HTTPBadRequest())
+
+ return super(VsaVolumeDriveController, self).show(req, id)
+
+
+class VsaVolumeController(VsaVolumeDriveController):
+ """The VSA volume API controller for the Openstack API.
+
+ A child resource of the VSA object. Allows operations with volumes created
+ by particular VSA
+
+ """
+
+ def __init__(self):
+ self.direction = 'from_vsa_id'
+ self.objects = 'volumes'
+ self.object = 'volume'
+ super(VsaVolumeController, self).__init__()
+
+
+class VsaDriveController(VsaVolumeDriveController):
+ """The VSA Drive API controller for the Openstack API.
+
+ A child resource of the VSA object. Allows operations with drives created
+ for particular VSA
+
+ """
+
+ def __init__(self):
+ self.direction = 'to_vsa_id'
+ self.objects = 'drives'
+ self.object = 'drive'
+ super(VsaDriveController, self).__init__()
+
+ def create(self, req, vsa_id, body):
+ """Create a new drive for VSA. Should be done through VSA APIs"""
+ return faults.Fault(exc.HTTPBadRequest())
+
+ def update(self, req, vsa_id, id, body):
+ """Update a drive. Should be done through VSA APIs"""
+ return faults.Fault(exc.HTTPBadRequest())
+
+ def delete(self, req, vsa_id, id):
+ """Delete a volume. Should be done through VSA APIs"""
+ return faults.Fault(exc.HTTPBadRequest())
+
+
+class VsaVPoolController(object):
+ """The vPool VSA API controller for the OpenStack API."""
+
+ _serialization_metadata = {
+ 'application/xml': {
+ "attributes": {
+ "vpool": [
+ "id",
+ "vsaId",
+ "name",
+ "displayName",
+ "displayDescription",
+ "driveCount",
+ "driveIds",
+ "protection",
+ "stripeSize",
+ "stripeWidth",
+ "createTime",
+ "status",
+ ]}}}
+
+ def __init__(self):
+ self.vsa_api = vsa.API()
+ super(VsaVPoolController, self).__init__()
+
+ def index(self, req, vsa_id):
+ """Return a short list of vpools created from particular VSA."""
+ return {'vpools': []}
+
+ def create(self, req, vsa_id, body):
+ """Create a new vPool for VSA."""
+ return faults.Fault(exc.HTTPBadRequest())
+
+ def update(self, req, vsa_id, id, body):
+ """Update vPool parameters."""
+ return faults.Fault(exc.HTTPBadRequest())
+
+ def delete(self, req, vsa_id, id):
+ """Delete a vPool."""
+ return faults.Fault(exc.HTTPBadRequest())
+
+ def show(self, req, vsa_id, id):
+ """Return data about the given vPool."""
+ return faults.Fault(exc.HTTPBadRequest())
+
+
+class VsaVCController(servers.ControllerV11):
+ """The VSA Virtual Controller API controller for the OpenStack API."""
+
+ def __init__(self):
+ self.vsa_api = vsa.API()
+ self.compute_api = compute.API()
+ self.vsa_id = None # VP-TODO: temporary ugly hack
+ super(VsaVCController, self).__init__()
+
+ def _get_servers(self, req, is_detail):
+ """Returns a list of servers, taking into account any search
+ options specified.
+ """
+
+ if self.vsa_id is None:
+ super(VsaVCController, self)._get_servers(req, is_detail)
+
+ context = req.environ['nova.context']
+
+ search_opts = {'metadata': dict(vsa_id=str(self.vsa_id))}
+ instance_list = self.compute_api.get_all(
+ context, search_opts=search_opts)
+
+ limited_list = self._limit_items(instance_list, req)
+ servers = [self._build_view(req, inst, is_detail)['server']
+ for inst in limited_list]
+ return dict(servers=servers)
+
+ def index(self, req, vsa_id):
+ """Return list of instances for particular VSA."""
+
+ LOG.audit(_("Index instances for VSA %s"), vsa_id)
+
+ self.vsa_id = vsa_id # VP-TODO: temporary ugly hack
+ result = super(VsaVCController, self).detail(req)
+ self.vsa_id = None
+ return result
+
+ def create(self, req, vsa_id, body):
+ """Create a new instance for VSA."""
+ return faults.Fault(exc.HTTPBadRequest())
+
+ def update(self, req, vsa_id, id, body):
+ """Update VSA instance."""
+ return faults.Fault(exc.HTTPBadRequest())
+
+ def delete(self, req, vsa_id, id):
+ """Delete VSA instance."""
+ return faults.Fault(exc.HTTPBadRequest())
+
+ def show(self, req, vsa_id, id):
+ """Return data about the given instance."""
+ return super(VsaVCController, self).show(req, id)
+
+
+class Virtual_storage_arrays(extensions.ExtensionDescriptor):
+
+ def get_name(self):
+ return "VSAs"
+
+ def get_alias(self):
+ return "zadr-vsa"
+
+ def get_description(self):
+ return "Virtual Storage Arrays support"
+
+ def get_namespace(self):
+ return "http://docs.openstack.org/ext/vsa/api/v1.1"
+
+ def get_updated(self):
+ return "2011-08-25T00:00:00+00:00"
+
+ def get_resources(self):
+ resources = []
+ res = extensions.ResourceExtension(
+ 'zadr-vsa',
+ VsaController(),
+ collection_actions={'detail': 'GET'},
+ member_actions={'add_capacity': 'POST',
+ 'remove_capacity': 'POST',
+ 'associate_address': 'POST',
+ 'disassociate_address': 'POST'})
+ resources.append(res)
+
+ res = extensions.ResourceExtension('volumes',
+ VsaVolumeController(),
+ collection_actions={'detail': 'GET'},
+ parent=dict(
+ member_name='vsa',
+ collection_name='zadr-vsa'))
+ resources.append(res)
+
+ res = extensions.ResourceExtension('drives',
+ VsaDriveController(),
+ collection_actions={'detail': 'GET'},
+ parent=dict(
+ member_name='vsa',
+ collection_name='zadr-vsa'))
+ resources.append(res)
+
+ res = extensions.ResourceExtension('vpools',
+ VsaVPoolController(),
+ parent=dict(
+ member_name='vsa',
+ collection_name='zadr-vsa'))
+ resources.append(res)
+
+ res = extensions.ResourceExtension('instances',
+ VsaVCController(),
+ parent=dict(
+ member_name='vsa',
+ collection_name='zadr-vsa'))
+ resources.append(res)
+
+ return resources
diff --git a/nova/api/openstack/contrib/volumes.py b/nova/api/openstack/contrib/volumes.py
index 867fe301e..d62225e58 100644
--- a/nova/api/openstack/contrib/volumes.py
+++ b/nova/api/openstack/contrib/volumes.py
@@ -24,6 +24,7 @@ from nova import flags
from nova import log as logging
from nova import quota
from nova import volume
+from nova.volume import volume_types
from nova.api.openstack import common
from nova.api.openstack import extensions
from nova.api.openstack import faults
@@ -63,6 +64,22 @@ def _translate_volume_summary_view(context, vol):
d['displayName'] = vol['display_name']
d['displayDescription'] = vol['display_description']
+
+ if vol['volume_type_id'] and vol.get('volume_type'):
+ d['volumeType'] = vol['volume_type']['name']
+ else:
+ d['volumeType'] = vol['volume_type_id']
+
+ LOG.audit(_("vol=%s"), vol, context=context)
+
+ if vol.get('volume_metadata'):
+ meta_dict = {}
+ for i in vol['volume_metadata']:
+ meta_dict[i['key']] = i['value']
+ d['metadata'] = meta_dict
+ else:
+ d['metadata'] = {}
+
return d
@@ -80,6 +97,8 @@ class VolumeController(object):
"createdAt",
"displayName",
"displayDescription",
+ "volumeType",
+ "metadata",
]}}}
def __init__(self):
@@ -136,12 +155,25 @@ class VolumeController(object):
vol = body['volume']
size = vol['size']
LOG.audit(_("Create volume of %s GB"), size, context=context)
+
+ vol_type = vol.get('volume_type', None)
+ if vol_type:
+ try:
+ vol_type = volume_types.get_volume_type_by_name(context,
+ vol_type)
+ except exception.NotFound:
+ return faults.Fault(exc.HTTPNotFound())
+
+ metadata = vol.get('metadata', None)
+
new_volume = self.volume_api.create(context, size, None,
vol.get('display_name'),
- vol.get('display_description'))
+ vol.get('display_description'),
+ volume_type=vol_type,
+ metadata=metadata)
# Work around problem that instance is lazy-loaded...
- new_volume['instance'] = None
+ new_volume = self.volume_api.get(context, new_volume['id'])
retval = _translate_volume_detail_view(context, new_volume)
diff --git a/nova/api/openstack/contrib/volumetypes.py b/nova/api/openstack/contrib/volumetypes.py
new file mode 100644
index 000000000..ed33a8819
--- /dev/null
+++ b/nova/api/openstack/contrib/volumetypes.py
@@ -0,0 +1,197 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright (c) 2011 Zadara Storage Inc.
+# Copyright (c) 2011 OpenStack LLC.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+""" The volume type & volume types extra specs extension"""
+
+from webob import exc
+
+from nova import db
+from nova import exception
+from nova import quota
+from nova.volume import volume_types
+from nova.api.openstack import extensions
+from nova.api.openstack import faults
+from nova.api.openstack import wsgi
+
+
+class VolumeTypesController(object):
+ """ The volume types API controller for the Openstack API """
+
+ def index(self, req):
+ """ Returns the list of volume types """
+ context = req.environ['nova.context']
+ return volume_types.get_all_types(context)
+
+ def create(self, req, body):
+ """Creates a new volume type."""
+ context = req.environ['nova.context']
+
+ if not body or body == "":
+ return faults.Fault(exc.HTTPUnprocessableEntity())
+
+ vol_type = body.get('volume_type', None)
+ if vol_type is None or vol_type == "":
+ return faults.Fault(exc.HTTPUnprocessableEntity())
+
+ name = vol_type.get('name', None)
+ specs = vol_type.get('extra_specs', {})
+
+ if name is None or name == "":
+ return faults.Fault(exc.HTTPUnprocessableEntity())
+
+ try:
+ volume_types.create(context, name, specs)
+ vol_type = volume_types.get_volume_type_by_name(context, name)
+ except quota.QuotaError as error:
+ self._handle_quota_error(error)
+ except exception.NotFound:
+ return faults.Fault(exc.HTTPNotFound())
+
+ return {'volume_type': vol_type}
+
+ def show(self, req, id):
+ """ Return a single volume type item """
+ context = req.environ['nova.context']
+
+ try:
+ vol_type = volume_types.get_volume_type(context, id)
+ except exception.NotFound or exception.ApiError:
+ return faults.Fault(exc.HTTPNotFound())
+
+ return {'volume_type': vol_type}
+
+ def delete(self, req, id):
+ """ Deletes an existing volume type """
+ context = req.environ['nova.context']
+
+ try:
+ vol_type = volume_types.get_volume_type(context, id)
+ volume_types.destroy(context, vol_type['name'])
+ except exception.NotFound:
+ return faults.Fault(exc.HTTPNotFound())
+
+ def _handle_quota_error(self, error):
+ """Reraise quota errors as api-specific http exceptions."""
+ if error.code == "MetadataLimitExceeded":
+ raise exc.HTTPBadRequest(explanation=error.message)
+ raise error
+
+
+class VolumeTypeExtraSpecsController(object):
+ """ The volume type extra specs API controller for the Openstack API """
+
+ def _get_extra_specs(self, context, vol_type_id):
+ extra_specs = db.api.volume_type_extra_specs_get(context, vol_type_id)
+ specs_dict = {}
+ for key, value in extra_specs.iteritems():
+ specs_dict[key] = value
+ return dict(extra_specs=specs_dict)
+
+ def _check_body(self, body):
+ if body == None or body == "":
+ expl = _('No Request Body')
+ raise exc.HTTPBadRequest(explanation=expl)
+
+ def index(self, req, vol_type_id):
+ """ Returns the list of extra specs for a given volume type """
+ context = req.environ['nova.context']
+ return self._get_extra_specs(context, vol_type_id)
+
+ def create(self, req, vol_type_id, body):
+ self._check_body(body)
+ context = req.environ['nova.context']
+ specs = body.get('extra_specs')
+ try:
+ db.api.volume_type_extra_specs_update_or_create(context,
+ vol_type_id,
+ specs)
+ except quota.QuotaError as error:
+ self._handle_quota_error(error)
+ return body
+
+ def update(self, req, vol_type_id, id, body):
+ self._check_body(body)
+ context = req.environ['nova.context']
+ if not id in body:
+ expl = _('Request body and URI mismatch')
+ raise exc.HTTPBadRequest(explanation=expl)
+ if len(body) > 1:
+ expl = _('Request body contains too many items')
+ raise exc.HTTPBadRequest(explanation=expl)
+ try:
+ db.api.volume_type_extra_specs_update_or_create(context,
+ vol_type_id,
+ body)
+ except quota.QuotaError as error:
+ self._handle_quota_error(error)
+
+ return body
+
+ def show(self, req, vol_type_id, id):
+ """ Return a single extra spec item """
+ context = req.environ['nova.context']
+ specs = self._get_extra_specs(context, vol_type_id)
+ if id in specs['extra_specs']:
+ return {id: specs['extra_specs'][id]}
+ else:
+ return faults.Fault(exc.HTTPNotFound())
+
+ def delete(self, req, vol_type_id, id):
+ """ Deletes an existing extra spec """
+ context = req.environ['nova.context']
+ db.api.volume_type_extra_specs_delete(context, vol_type_id, id)
+
+ def _handle_quota_error(self, error):
+ """Reraise quota errors as api-specific http exceptions."""
+ if error.code == "MetadataLimitExceeded":
+ raise exc.HTTPBadRequest(explanation=error.message)
+ raise error
+
+
+class Volumetypes(extensions.ExtensionDescriptor):
+
+ def get_name(self):
+ return "VolumeTypes"
+
+ def get_alias(self):
+ return "os-volume-types"
+
+ def get_description(self):
+ return "Volume types support"
+
+ def get_namespace(self):
+ return \
+ "http://docs.openstack.org/ext/volume_types/api/v1.1"
+
+ def get_updated(self):
+ return "2011-08-24T00:00:00+00:00"
+
+ def get_resources(self):
+ resources = []
+ res = extensions.ResourceExtension(
+ 'os-volume-types',
+ VolumeTypesController())
+ resources.append(res)
+
+ res = extensions.ResourceExtension('extra_specs',
+ VolumeTypeExtraSpecsController(),
+ parent=dict(
+ member_name='vol_type',
+ collection_name='os-volume-types'))
+ resources.append(res)
+
+ return resources
diff --git a/nova/api/openstack/create_instance_helper.py b/nova/api/openstack/create_instance_helper.py
index 4acee2211..be5ebd2ab 100644
--- a/nova/api/openstack/create_instance_helper.py
+++ b/nova/api/openstack/create_instance_helper.py
@@ -138,7 +138,10 @@ class CreateInstanceHelper(object):
raise exc.HTTPBadRequest(explanation=msg)
zone_blob = server_dict.get('blob')
+
user_data = server_dict.get('user_data')
+ self._validate_user_data(user_data)
+
availability_zone = server_dict.get('availability_zone')
name = server_dict['name']
self._validate_server_name(name)
@@ -371,6 +374,16 @@ class CreateInstanceHelper(object):
return networks
+ def _validate_user_data(self, user_data):
+ """Check if the user_data is encoded properly"""
+ if not user_data:
+ return
+ try:
+ user_data = base64.b64decode(user_data)
+ except TypeError:
+ expl = _('Userdata content cannot be decoded')
+ raise exc.HTTPBadRequest(explanation=expl)
+
class ServerXMLDeserializer(wsgi.XMLDeserializer):
"""
diff --git a/nova/api/openstack/servers.py b/nova/api/openstack/servers.py
index 5bbb4e52e..c1960a09f 100644
--- a/nova/api/openstack/servers.py
+++ b/nova/api/openstack/servers.py
@@ -95,17 +95,15 @@ class Controller(object):
search_opts['recurse_zones'] = utils.bool_from_str(
search_opts.get('recurse_zones', False))
- # If search by 'status', we need to convert it to 'state'
- # If the status is unknown, bail.
- # Leave 'state' in search_opts so compute can pass it on to
- # child zones..
+ # If search by 'status', we need to convert it to 'vm_state'
+ # to pass on to child zones.
if 'status' in search_opts:
status = search_opts['status']
- search_opts['state'] = common.power_states_from_status(status)
- if len(search_opts['state']) == 0:
+ state = common.vm_state_from_status(status)
+ if state is None:
reason = _('Invalid server status: %(status)s') % locals()
- LOG.error(reason)
raise exception.InvalidInput(reason=reason)
+ search_opts['vm_state'] = state
# By default, compute's get_all() will return deleted instances.
# If an admin hasn't specified a 'deleted' search option, we need
@@ -611,9 +609,8 @@ class ControllerV10(Controller):
try:
self.compute_api.rebuild(context, instance_id, image_id, password)
- except exception.BuildInProgress:
- msg = _("Instance %s is currently being rebuilt.") % instance_id
- LOG.debug(msg)
+ except exception.RebuildRequiresActiveInstance:
+ msg = _("Instance %s must be active to rebuild.") % instance_id
raise exc.HTTPConflict(explanation=msg)
return webob.Response(status_int=202)
@@ -753,9 +750,8 @@ class ControllerV11(Controller):
self.compute_api.rebuild(context, instance_id, image_href,
password, name=name, metadata=metadata,
files_to_inject=personalities)
- except exception.BuildInProgress:
- msg = _("Instance %s is currently being rebuilt.") % instance_id
- LOG.debug(msg)
+ except exception.RebuildRequiresActiveInstance:
+ msg = _("Instance %s must be active to rebuild.") % instance_id
raise exc.HTTPConflict(explanation=msg)
except exception.InstanceNotFound:
msg = _("Instance %s could not be found") % instance_id
diff --git a/nova/api/openstack/views/servers.py b/nova/api/openstack/views/servers.py
index 6fd09aae6..01640fc20 100644
--- a/nova/api/openstack/views/servers.py
+++ b/nova/api/openstack/views/servers.py
@@ -21,13 +21,12 @@ import hashlib
import os
from nova import exception
-import nova.compute
-import nova.context
from nova.api.openstack import common
from nova.api.openstack.views import addresses as addresses_view
from nova.api.openstack.views import flavors as flavors_view
from nova.api.openstack.views import images as images_view
from nova import utils
+from nova.compute import vm_states
class ViewBuilder(object):
@@ -61,6 +60,8 @@ class ViewBuilder(object):
def _build_detail(self, inst):
"""Returns a detailed model of a server."""
+ vm_state = inst.get('vm_state', vm_states.BUILDING)
+ task_state = inst.get('task_state')
inst_dict = {
'id': inst['id'],
@@ -68,13 +69,7 @@ class ViewBuilder(object):
'user_id': inst.get('user_id', ''),
'tenant_id': inst.get('project_id', ''),
'description': inst.get('display_description', ''),
- 'status': common.status_from_power_state(inst.get('state'))}
-
- ctxt = nova.context.get_admin_context()
- compute_api = nova.compute.API()
-
- if compute_api.has_finished_migration(ctxt, inst['uuid']):
- inst_dict['status'] = 'RESIZE-CONFIRM'
+ 'status': common.status_from_state(vm_state, task_state)}
# Return the metadata as a dictionary
metadata = {}
diff --git a/nova/compute/api.py b/nova/compute/api.py
index 95d6a901d..6b462bef1 100644
--- a/nova/compute/api.py
+++ b/nova/compute/api.py
@@ -19,13 +19,11 @@
"""Handles all requests relating to instances (guest vms)."""
-import eventlet
import novaclient
import re
import time
from nova import block_device
-from nova import db
from nova import exception
from nova import flags
import nova.image
@@ -37,6 +35,8 @@ from nova import utils
from nova import volume
from nova.compute import instance_types
from nova.compute import power_state
+from nova.compute import task_states
+from nova.compute import vm_states
from nova.compute.utils import terminate_volumes
from nova.scheduler import api as scheduler_api
from nova.db import base
@@ -75,12 +75,18 @@ def generate_default_hostname(instance):
def _is_able_to_shutdown(instance, instance_id):
- states = {'terminating': "Instance %s is already being terminated",
- 'migrating': "Instance %s is being migrated",
- 'stopping': "Instance %s is being stopped"}
- msg = states.get(instance['state_description'])
- if msg:
- LOG.warning(_(msg), instance_id)
+ vm_state = instance["vm_state"]
+ task_state = instance["task_state"]
+
+ valid_shutdown_states = [
+ vm_states.ACTIVE,
+ vm_states.REBUILDING,
+ vm_states.BUILDING,
+ ]
+
+ if vm_state not in valid_shutdown_states:
+ LOG.warn(_("Instance %(instance_id)s is not in an 'active' state. It "
+ "is currently %(vm_state)s. Shutdown aborted.") % locals())
return False
return True
@@ -237,7 +243,7 @@ class API(base.Base):
self.ensure_default_security_group(context)
if key_data is None and key_name:
- key_pair = db.key_pair_get(context, context.user_id, key_name)
+ key_pair = self.db.key_pair_get(context, context.user_id, key_name)
key_data = key_pair['public_key']
if reservation_id is None:
@@ -251,10 +257,10 @@ class API(base.Base):
'image_ref': image_href,
'kernel_id': kernel_id or '',
'ramdisk_id': ramdisk_id or '',
+ 'power_state': power_state.NOSTATE,
+ 'vm_state': vm_states.BUILDING,
'config_drive_id': config_drive_id or '',
'config_drive': config_drive or '',
- 'state': 0,
- 'state_description': 'scheduling',
'user_id': context.user_id,
'project_id': context.project_id,
'launch_time': time.strftime('%Y-%m-%dT%H:%M:%SZ', time.gmtime()),
@@ -389,9 +395,9 @@ class API(base.Base):
security_groups = []
for security_group_name in security_group:
- group = db.security_group_get_by_name(context,
- context.project_id,
- security_group_name)
+ group = self.db.security_group_get_by_name(context,
+ context.project_id,
+ security_group_name)
security_groups.append(group['id'])
for security_group_id in security_groups:
@@ -415,6 +421,8 @@ class API(base.Base):
updates['display_name'] = "Server %s" % instance_id
instance['display_name'] = updates['display_name']
updates['hostname'] = self.hostname_factory(instance)
+ updates['vm_state'] = vm_states.BUILDING
+ updates['task_state'] = task_states.SCHEDULING
instance = self.update(context, instance_id, **updates)
return instance
@@ -551,8 +559,9 @@ class API(base.Base):
def has_finished_migration(self, context, instance_uuid):
"""Returns true if an instance has a finished migration."""
try:
- db.migration_get_by_instance_and_status(context, instance_uuid,
- 'finished')
+ self.db.migration_get_by_instance_and_status(context,
+ instance_uuid,
+ 'finished')
return True
except exception.NotFound:
return False
@@ -566,14 +575,15 @@ class API(base.Base):
:param context: the security context
"""
try:
- db.security_group_get_by_name(context, context.project_id,
- 'default')
+ self.db.security_group_get_by_name(context,
+ context.project_id,
+ 'default')
except exception.NotFound:
values = {'name': 'default',
'description': 'default',
'user_id': context.user_id,
'project_id': context.project_id}
- db.security_group_create(context, values)
+ self.db.security_group_create(context, values)
def trigger_security_group_rules_refresh(self, context, security_group_id):
"""Called when a rule is added to or removed from a security_group."""
@@ -638,7 +648,7 @@ class API(base.Base):
"""Called when a rule is added to or removed from a security_group"""
hosts = [x['host'] for (x, idx)
- in db.service_get_all_compute_sorted(context)]
+ in self.db.service_get_all_compute_sorted(context)]
for host in hosts:
rpc.cast(context,
self.db.queue_get_for(context, FLAGS.compute_topic, host),
@@ -666,11 +676,11 @@ class API(base.Base):
def add_security_group(self, context, instance_id, security_group_name):
"""Add security group to the instance"""
- security_group = db.security_group_get_by_name(context,
- context.project_id,
- security_group_name)
+ security_group = self.db.security_group_get_by_name(context,
+ context.project_id,
+ security_group_name)
# check if the server exists
- inst = db.instance_get(context, instance_id)
+ inst = self.db.instance_get(context, instance_id)
#check if the security group is associated with the server
if self._is_security_group_associated_with_server(security_group,
instance_id):
@@ -682,21 +692,21 @@ class API(base.Base):
if inst['state'] != power_state.RUNNING:
raise exception.InstanceNotRunning(instance_id=instance_id)
- db.instance_add_security_group(context.elevated(),
- instance_id,
- security_group['id'])
+ self.db.instance_add_security_group(context.elevated(),
+ instance_id,
+ security_group['id'])
rpc.cast(context,
- db.queue_get_for(context, FLAGS.compute_topic, inst['host']),
+ self.db.queue_get_for(context, FLAGS.compute_topic, inst['host']),
{"method": "refresh_security_group_rules",
"args": {"security_group_id": security_group['id']}})
def remove_security_group(self, context, instance_id, security_group_name):
"""Remove the security group associated with the instance"""
- security_group = db.security_group_get_by_name(context,
- context.project_id,
- security_group_name)
+ security_group = self.db.security_group_get_by_name(context,
+ context.project_id,
+ security_group_name)
# check if the server exists
- inst = db.instance_get(context, instance_id)
+ inst = self.db.instance_get(context, instance_id)
#check if the security group is associated with the server
if not self._is_security_group_associated_with_server(security_group,
instance_id):
@@ -708,11 +718,11 @@ class API(base.Base):
if inst['state'] != power_state.RUNNING:
raise exception.InstanceNotRunning(instance_id=instance_id)
- db.instance_remove_security_group(context.elevated(),
- instance_id,
- security_group['id'])
+ self.db.instance_remove_security_group(context.elevated(),
+ instance_id,
+ security_group['id'])
rpc.cast(context,
- db.queue_get_for(context, FLAGS.compute_topic, inst['host']),
+ self.db.queue_get_for(context, FLAGS.compute_topic, inst['host']),
{"method": "refresh_security_group_rules",
"args": {"security_group_id": security_group['id']}})
@@ -750,10 +760,8 @@ class API(base.Base):
return
self.update(context,
- instance['id'],
- state_description='terminating',
- state=0,
- terminated_at=utils.utcnow())
+ instance_id,
+ task_state=task_states.DELETING)
host = instance['host']
if host:
@@ -773,9 +781,9 @@ class API(base.Base):
return
self.update(context,
- instance['id'],
- state_description='stopping',
- state=power_state.NOSTATE,
+ instance_id,
+ vm_state=vm_states.ACTIVE,
+ task_state=task_states.STOPPING,
terminated_at=utils.utcnow())
host = instance['host']
@@ -787,12 +795,18 @@ class API(base.Base):
"""Start an instance."""
LOG.debug(_("Going to try to start %s"), instance_id)
instance = self._get_instance(context, instance_id, 'starting')
- if instance['state_description'] != 'stopped':
- _state_description = instance['state_description']
+ vm_state = instance["vm_state"]
+
+ if vm_state != vm_states.STOPPED:
LOG.warning(_("Instance %(instance_id)s is not "
- "stopped(%(_state_description)s)") % locals())
+ "stopped. (%(vm_state)s)") % locals())
return
+ self.update(context,
+ instance_id,
+ vm_state=vm_states.STOPPED,
+ task_state=task_states.STARTING)
+
# TODO(yamahata): injected_files isn't supported right now.
# It is used only for osapi. not for ec2 api.
# availability_zone isn't used by run_instance.
@@ -802,6 +816,15 @@ class API(base.Base):
"args": {"topic": FLAGS.compute_topic,
"instance_id": instance_id}})
+ def get_active_by_window(self, context, begin, end=None, project_id=None):
+ """Get instances that were continuously active over a window."""
+ return self.db.instance_get_active_by_window(context, begin, end,
+ project_id)
+
+ def get_instance_type(self, context, instance_type_id):
+ """Get an instance type by instance type id."""
+ return self.db.instance_type_get(context, instance_type_id)
+
def get(self, context, instance_id):
"""Get a single instance with the given instance_id."""
# NOTE(sirp): id used to be exclusively integer IDs; now we're
@@ -1002,7 +1025,7 @@ class API(base.Base):
:param extra_properties: dict of extra image properties to include
"""
- instance = db.api.instance_get(context, instance_id)
+ instance = self.db.instance_get(context, instance_id)
properties = {'instance_uuid': instance['uuid'],
'user_id': str(context.user_id),
'image_state': 'creating',
@@ -1021,32 +1044,39 @@ class API(base.Base):
@scheduler_api.reroute_compute("reboot")
def reboot(self, context, instance_id):
"""Reboot the given instance."""
+ self.update(context,
+ instance_id,
+ vm_state=vm_states.ACTIVE,
+ task_state=task_states.REBOOTING)
self._cast_compute_message('reboot_instance', context, instance_id)
@scheduler_api.reroute_compute("rebuild")
def rebuild(self, context, instance_id, image_href, admin_password,
name=None, metadata=None, files_to_inject=None):
"""Rebuild the given instance with the provided metadata."""
- instance = db.api.instance_get(context, instance_id)
+ instance = self.db.instance_get(context, instance_id)
+ name = name or instance["display_name"]
- if instance["state"] == power_state.BUILDING:
- msg = _("Instance already building")
- raise exception.BuildInProgress(msg)
+ if instance["vm_state"] != vm_states.ACTIVE:
+ msg = _("Instance must be active to rebuild.")
+ raise exception.RebuildRequiresActiveInstance(msg)
files_to_inject = files_to_inject or []
+ metadata = metadata or {}
+
self._check_injected_file_quota(context, files_to_inject)
+ self._check_metadata_properties_quota(context, metadata)
- values = {}
- if metadata is not None:
- self._check_metadata_properties_quota(context, metadata)
- values['metadata'] = metadata
- if name is not None:
- values['display_name'] = name
- self.db.instance_update(context, instance_id, values)
+ self.update(context,
+ instance_id,
+ metadata=metadata,
+ display_name=name,
+ image_ref=image_href,
+ vm_state=vm_states.ACTIVE,
+ task_state=task_states.REBUILDING)
rebuild_params = {
"new_pass": admin_password,
- "image_ref": image_href,
"injected_files": files_to_inject,
}
@@ -1067,6 +1097,11 @@ class API(base.Base):
raise exception.MigrationNotFoundByStatus(instance_id=instance_id,
status='finished')
+ self.update(context,
+ instance_id,
+ vm_state=vm_states.ACTIVE,
+ task_state=None)
+
params = {'migration_id': migration_ref['id']}
self._cast_compute_message('revert_resize', context,
instance_ref['uuid'],
@@ -1087,6 +1122,12 @@ class API(base.Base):
if not migration_ref:
raise exception.MigrationNotFoundByStatus(instance_id=instance_id,
status='finished')
+
+ self.update(context,
+ instance_id,
+ vm_state=vm_states.ACTIVE,
+ task_state=None)
+
params = {'migration_id': migration_ref['id']}
self._cast_compute_message('confirm_resize', context,
instance_ref['uuid'],
@@ -1132,6 +1173,11 @@ class API(base.Base):
if (current_memory_mb == new_memory_mb) and flavor_id:
raise exception.CannotResizeToSameSize()
+ self.update(context,
+ instance_id,
+ vm_state=vm_states.RESIZING,
+ task_state=task_states.RESIZE_PREP)
+
instance_ref = self._get_instance(context, instance_id, 'resize')
self._cast_scheduler_message(context,
{"method": "prep_resize",
@@ -1165,11 +1211,19 @@ class API(base.Base):
@scheduler_api.reroute_compute("pause")
def pause(self, context, instance_id):
"""Pause the given instance."""
+ self.update(context,
+ instance_id,
+ vm_state=vm_states.ACTIVE,
+ task_state=task_states.PAUSING)
self._cast_compute_message('pause_instance', context, instance_id)
@scheduler_api.reroute_compute("unpause")
def unpause(self, context, instance_id):
"""Unpause the given instance."""
+ self.update(context,
+ instance_id,
+ vm_state=vm_states.PAUSED,
+ task_state=task_states.UNPAUSING)
self._cast_compute_message('unpause_instance', context, instance_id)
def _call_compute_message_for_host(self, action, context, host, params):
@@ -1202,21 +1256,37 @@ class API(base.Base):
@scheduler_api.reroute_compute("suspend")
def suspend(self, context, instance_id):
"""Suspend the given instance."""
+ self.update(context,
+ instance_id,
+ vm_state=vm_states.ACTIVE,
+ task_state=task_states.SUSPENDING)
self._cast_compute_message('suspend_instance', context, instance_id)
@scheduler_api.reroute_compute("resume")
def resume(self, context, instance_id):
"""Resume the given instance."""
+ self.update(context,
+ instance_id,
+ vm_state=vm_states.SUSPENDED,
+ task_state=task_states.RESUMING)
self._cast_compute_message('resume_instance', context, instance_id)
@scheduler_api.reroute_compute("rescue")
def rescue(self, context, instance_id):
"""Rescue the given instance."""
+ self.update(context,
+ instance_id,
+ vm_state=vm_states.ACTIVE,
+ task_state=task_states.RESCUING)
self._cast_compute_message('rescue_instance', context, instance_id)
@scheduler_api.reroute_compute("unrescue")
def unrescue(self, context, instance_id):
"""Unrescue the given instance."""
+ self.update(context,
+ instance_id,
+ vm_state=vm_states.RESCUED,
+ task_state=task_states.UNRESCUING)
self._cast_compute_message('unrescue_instance', context, instance_id)
@scheduler_api.reroute_compute("set_admin_password")
diff --git a/nova/compute/manager.py b/nova/compute/manager.py
index ade15e310..0477db745 100644
--- a/nova/compute/manager.py
+++ b/nova/compute/manager.py
@@ -56,6 +56,8 @@ from nova import rpc
from nova import utils
from nova import volume
from nova.compute import power_state
+from nova.compute import task_states
+from nova.compute import vm_states
from nova.notifier import api as notifier
from nova.compute.utils import terminate_volumes
from nova.virt import driver
@@ -146,6 +148,10 @@ class ComputeManager(manager.SchedulerDependentManager):
super(ComputeManager, self).__init__(service_name="compute",
*args, **kwargs)
+ def _instance_update(self, context, instance_id, **kwargs):
+ """Update an instance in the database using kwargs as value."""
+ return self.db.instance_update(context, instance_id, kwargs)
+
def init_host(self):
"""Initialization for a standalone compute service."""
self.driver.init_host(host=self.host)
@@ -153,8 +159,8 @@ class ComputeManager(manager.SchedulerDependentManager):
instances = self.db.instance_get_all_by_host(context, self.host)
for instance in instances:
inst_name = instance['name']
- db_state = instance['state']
- drv_state = self._update_state(context, instance['id'])
+ db_state = instance['power_state']
+ drv_state = self._get_power_state(context, instance)
expect_running = db_state == power_state.RUNNING \
and drv_state != db_state
@@ -177,34 +183,13 @@ class ComputeManager(manager.SchedulerDependentManager):
LOG.warning(_('Hypervisor driver does not '
'support firewall rules'))
- def _update_state(self, context, instance_id, state=None):
- """Update the state of an instance from the driver info."""
- instance_ref = self.db.instance_get(context, instance_id)
-
- if state is None:
- try:
- LOG.debug(_('Checking state of %s'), instance_ref['name'])
- info = self.driver.get_info(instance_ref['name'])
- except exception.NotFound:
- info = None
-
- if info is not None:
- state = info['state']
- else:
- state = power_state.FAILED
-
- self.db.instance_set_state(context, instance_id, state)
- return state
-
- def _update_launched_at(self, context, instance_id, launched_at=None):
- """Update the launched_at parameter of the given instance."""
- data = {'launched_at': launched_at or utils.utcnow()}
- self.db.instance_update(context, instance_id, data)
-
- def _update_image_ref(self, context, instance_id, image_ref):
- """Update the image_id for the given instance."""
- data = {'image_ref': image_ref}
- self.db.instance_update(context, instance_id, data)
+ def _get_power_state(self, context, instance):
+ """Retrieve the power state for the given instance."""
+ LOG.debug(_('Checking state of %s'), instance['name'])
+ try:
+ return self.driver.get_info(instance['name'])["state"]
+ except exception.NotFound:
+ return power_state.FAILED
def get_console_topic(self, context, **kwargs):
"""Retrieves the console host for a project on this host.
@@ -256,11 +241,6 @@ class ComputeManager(manager.SchedulerDependentManager):
def _setup_block_device_mapping(self, context, instance_id):
"""setup volumes for block device mapping"""
- self.db.instance_set_state(context,
- instance_id,
- power_state.NOSTATE,
- 'block_device_mapping')
-
volume_api = volume.API()
block_device_mapping = []
swap = None
@@ -394,17 +374,12 @@ class ComputeManager(manager.SchedulerDependentManager):
updates = {}
updates['host'] = self.host
updates['launched_on'] = self.host
- instance = self.db.instance_update(context,
- instance_id,
- updates)
+ updates['vm_state'] = vm_states.BUILDING
+ updates['task_state'] = task_states.NETWORKING
+ instance = self.db.instance_update(context, instance_id, updates)
instance['injected_files'] = kwargs.get('injected_files', [])
instance['admin_pass'] = kwargs.get('admin_password', None)
- self.db.instance_set_state(context,
- instance_id,
- power_state.NOSTATE,
- 'networking')
-
is_vpn = instance['image_ref'] == str(FLAGS.vpn_image_id)
try:
# NOTE(vish): This could be a cast because we don't do anything
@@ -423,6 +398,11 @@ class ComputeManager(manager.SchedulerDependentManager):
# all vif creation and network injection, maybe this is correct
network_info = []
+ self._instance_update(context,
+ instance_id,
+ vm_state=vm_states.BUILDING,
+ task_state=task_states.BLOCK_DEVICE_MAPPING)
+
(swap, ephemerals,
block_device_mapping) = self._setup_block_device_mapping(
context, instance_id)
@@ -432,9 +412,12 @@ class ComputeManager(manager.SchedulerDependentManager):
'ephemerals': ephemerals,
'block_device_mapping': block_device_mapping}
- # TODO(vish) check to make sure the availability zone matches
- self._update_state(context, instance_id, power_state.BUILDING)
+ self._instance_update(context,
+ instance_id,
+ vm_state=vm_states.BUILDING,
+ task_state=task_states.SPAWNING)
+ # TODO(vish) check to make sure the availability zone matches
try:
self.driver.spawn(context, instance,
network_info, block_device_info)
@@ -443,13 +426,21 @@ class ComputeManager(manager.SchedulerDependentManager):
"virtualization enabled in the BIOS? Details: "
"%(ex)s") % locals()
LOG.exception(msg)
+ return
+
+ current_power_state = self._get_power_state(context, instance)
+ self._instance_update(context,
+ instance_id,
+ power_state=current_power_state,
+ vm_state=vm_states.ACTIVE,
+ task_state=None,
+ launched_at=utils.utcnow())
- self._update_launched_at(context, instance_id)
- self._update_state(context, instance_id)
usage_info = utils.usage_from_instance(instance)
notifier.notify('compute.%s' % self.host,
'compute.instance.create',
notifier.INFO, usage_info)
+
except exception.InstanceNotFound:
# FIXME(wwolf): We are just ignoring InstanceNotFound
# exceptions here in case the instance was immediately
@@ -485,8 +476,7 @@ class ComputeManager(manager.SchedulerDependentManager):
for volume in volumes:
self._detach_volume(context, instance_id, volume['id'], False)
- if (instance['state'] == power_state.SHUTOFF and
- instance['state_description'] != 'stopped'):
+ if instance['power_state'] == power_state.SHUTOFF:
self.db.instance_destroy(context, instance_id)
raise exception.Error(_('trying to destroy already destroyed'
' instance: %s') % instance_id)
@@ -501,9 +491,14 @@ class ComputeManager(manager.SchedulerDependentManager):
"""Terminate an instance on this host."""
self._shutdown_instance(context, instance_id, 'Terminating')
instance = self.db.instance_get(context.elevated(), instance_id)
+ self._instance_update(context,
+ instance_id,
+ vm_state=vm_states.DELETED,
+ task_state=None,
+ terminated_at=utils.utcnow())
- # TODO(ja): should we keep it in a terminated state for a bit?
self.db.instance_destroy(context, instance_id)
+
usage_info = utils.usage_from_instance(instance)
notifier.notify('compute.%s' % self.host,
'compute.instance.delete',
@@ -514,7 +509,10 @@ class ComputeManager(manager.SchedulerDependentManager):
def stop_instance(self, context, instance_id):
"""Stopping an instance on this host."""
self._shutdown_instance(context, instance_id, 'Stopping')
- # instance state will be updated to stopped by _poll_instance_states()
+ self._instance_update(context,
+ instance_id,
+ vm_state=vm_states.STOPPED,
+ task_state=None)
@exception.wrap_exception(notifier=notifier, publisher_id=publisher_id())
@checks_instance_lock
@@ -526,7 +524,7 @@ class ComputeManager(manager.SchedulerDependentManager):
:param context: `nova.RequestContext` object
:param instance_id: Instance identifier (integer)
- :param image_ref: Image identifier (href or integer)
+ :param injected_files: Files to inject
:param new_pass: password to set on rebuilt instance
"""
context = context.elevated()
@@ -534,29 +532,46 @@ class ComputeManager(manager.SchedulerDependentManager):
instance_ref = self.db.instance_get(context, instance_id)
LOG.audit(_("Rebuilding instance %s"), instance_id, context=context)
- self._update_state(context, instance_id, power_state.BUILDING)
+ current_power_state = self._get_power_state(context, instance_ref)
+ self._instance_update(context,
+ instance_id,
+ power_state=current_power_state,
+ vm_state=vm_states.REBUILDING,
+ task_state=None)
network_info = self._get_instance_nw_info(context, instance_ref)
-
self.driver.destroy(instance_ref, network_info)
- image_ref = kwargs.get('image_ref')
- instance_ref.image_ref = image_ref
+
+ self._instance_update(context,
+ instance_id,
+ vm_state=vm_states.REBUILDING,
+ task_state=task_states.BLOCK_DEVICE_MAPPING)
+
instance_ref.injected_files = kwargs.get('injected_files', [])
network_info = self.network_api.get_instance_nw_info(context,
instance_ref)
bd_mapping = self._setup_block_device_mapping(context, instance_id)
+ self._instance_update(context,
+ instance_id,
+ vm_state=vm_states.REBUILDING,
+ task_state=task_states.SPAWNING)
+
# pull in new password here since the original password isn't in the db
instance_ref.admin_pass = kwargs.get('new_pass',
utils.generate_password(FLAGS.password_length))
self.driver.spawn(context, instance_ref, network_info, bd_mapping)
- self._update_image_ref(context, instance_id, image_ref)
- self._update_launched_at(context, instance_id)
- self._update_state(context, instance_id)
- usage_info = utils.usage_from_instance(instance_ref,
- image_ref=image_ref)
+ current_power_state = self._get_power_state(context, instance_ref)
+ self._instance_update(context,
+ instance_id,
+ power_state=current_power_state,
+ vm_state=vm_states.ACTIVE,
+ task_state=None,
+ launched_at=utils.utcnow())
+
+ usage_info = utils.usage_from_instance(instance_ref)
notifier.notify('compute.%s' % self.host,
'compute.instance.rebuild',
notifier.INFO,
@@ -566,26 +581,34 @@ class ComputeManager(manager.SchedulerDependentManager):
@checks_instance_lock
def reboot_instance(self, context, instance_id):
"""Reboot an instance on this host."""
+ LOG.audit(_("Rebooting instance %s"), instance_id, context=context)
context = context.elevated()
- self._update_state(context, instance_id)
instance_ref = self.db.instance_get(context, instance_id)
- LOG.audit(_("Rebooting instance %s"), instance_id, context=context)
- if instance_ref['state'] != power_state.RUNNING:
- state = instance_ref['state']
+ current_power_state = self._get_power_state(context, instance_ref)
+ self._instance_update(context,
+ instance_id,
+ power_state=current_power_state,
+ vm_state=vm_states.ACTIVE,
+ task_state=task_states.REBOOTING)
+
+ if instance_ref['power_state'] != power_state.RUNNING:
+ state = instance_ref['power_state']
running = power_state.RUNNING
LOG.warn(_('trying to reboot a non-running '
'instance: %(instance_id)s (state: %(state)s '
'expected: %(running)s)') % locals(),
context=context)
- self.db.instance_set_state(context,
- instance_id,
- power_state.NOSTATE,
- 'rebooting')
network_info = self._get_instance_nw_info(context, instance_ref)
self.driver.reboot(instance_ref, network_info)
- self._update_state(context, instance_id)
+
+ current_power_state = self._get_power_state(context, instance_ref)
+ self._instance_update(context,
+ instance_id,
+ power_state=current_power_state,
+ vm_state=vm_states.ACTIVE,
+ task_state=None)
@exception.wrap_exception(notifier=notifier, publisher_id=publisher_id())
def snapshot_instance(self, context, instance_id, image_id,
@@ -601,37 +624,45 @@ class ComputeManager(manager.SchedulerDependentManager):
:param rotation: int representing how many backups to keep around;
None if rotation shouldn't be used (as in the case of snapshots)
"""
+ if image_type == "snapshot":
+ task_state = task_states.IMAGE_SNAPSHOT
+ elif image_type == "backup":
+ task_state = task_states.IMAGE_BACKUP
+ else:
+ raise Exception(_('Image type not recognized %s') % image_type)
+
context = context.elevated()
instance_ref = self.db.instance_get(context, instance_id)
- #NOTE(sirp): update_state currently only refreshes the state field
- # if we add is_snapshotting, we will need this refreshed too,
- # potentially?
- self._update_state(context, instance_id)
+ current_power_state = self._get_power_state(context, instance_ref)
+ self._instance_update(context,
+ instance_id,
+ power_state=current_power_state,
+ vm_state=vm_states.ACTIVE,
+ task_state=task_state)
LOG.audit(_('instance %s: snapshotting'), instance_id,
context=context)
- if instance_ref['state'] != power_state.RUNNING:
- state = instance_ref['state']
+
+ if instance_ref['power_state'] != power_state.RUNNING:
+ state = instance_ref['power_state']
running = power_state.RUNNING
LOG.warn(_('trying to snapshot a non-running '
'instance: %(instance_id)s (state: %(state)s '
'expected: %(running)s)') % locals())
self.driver.snapshot(context, instance_ref, image_id)
+ self._instance_update(context, instance_id, task_state=None)
+
+ if image_type == 'snapshot' and rotation:
+ raise exception.ImageRotationNotAllowed()
+
+ elif image_type == 'backup' and rotation:
+ instance_uuid = instance_ref['uuid']
+ self.rotate_backups(context, instance_uuid, backup_type, rotation)
- if image_type == 'snapshot':
- if rotation:
- raise exception.ImageRotationNotAllowed()
elif image_type == 'backup':
- if rotation:
- instance_uuid = instance_ref['uuid']
- self.rotate_backups(context, instance_uuid, backup_type,
- rotation)
- else:
- raise exception.RotationRequiredForBackup()
- else:
- raise Exception(_('Image type not recognized %s') % image_type)
+ raise exception.RotationRequiredForBackup()
def rotate_backups(self, context, instance_uuid, backup_type, rotation):
"""Delete excess backups associated to an instance.
@@ -699,7 +730,7 @@ class ComputeManager(manager.SchedulerDependentManager):
for i in xrange(max_tries):
instance_ref = self.db.instance_get(context, instance_id)
instance_id = instance_ref["id"]
- instance_state = instance_ref["state"]
+ instance_state = instance_ref["power_state"]
expected_state = power_state.RUNNING
if instance_state != expected_state:
@@ -734,7 +765,7 @@ class ComputeManager(manager.SchedulerDependentManager):
context = context.elevated()
instance_ref = self.db.instance_get(context, instance_id)
instance_id = instance_ref['id']
- instance_state = instance_ref['state']
+ instance_state = instance_ref['power_state']
expected_state = power_state.RUNNING
if instance_state != expected_state:
LOG.warn(_('trying to inject a file into a non-running '
@@ -752,7 +783,7 @@ class ComputeManager(manager.SchedulerDependentManager):
context = context.elevated()
instance_ref = self.db.instance_get(context, instance_id)
instance_id = instance_ref['id']
- instance_state = instance_ref['state']
+ instance_state = instance_ref['power_state']
expected_state = power_state.RUNNING
if instance_state != expected_state:
LOG.warn(_('trying to update agent on a non-running '
@@ -767,40 +798,41 @@ class ComputeManager(manager.SchedulerDependentManager):
@checks_instance_lock
def rescue_instance(self, context, instance_id):
"""Rescue an instance on this host."""
+ LOG.audit(_('instance %s: rescuing'), instance_id, context=context)
context = context.elevated()
+
instance_ref = self.db.instance_get(context, instance_id)
- LOG.audit(_('instance %s: rescuing'), instance_id, context=context)
- self.db.instance_set_state(context,
- instance_id,
- power_state.NOSTATE,
- 'rescuing')
- _update_state = lambda result: self._update_state_callback(
- self, context, instance_id, result)
network_info = self._get_instance_nw_info(context, instance_ref)
- self.driver.rescue(context, instance_ref, _update_state, network_info)
- self._update_state(context, instance_id)
+
+ # NOTE(blamar): None of the virt drivers use the 'callback' param
+ self.driver.rescue(context, instance_ref, None, network_info)
+
+ current_power_state = self._get_power_state(context, instance_ref)
+ self._instance_update(context,
+ instance_id,
+ vm_state=vm_states.RESCUED,
+ task_state=None,
+ power_state=current_power_state)
@exception.wrap_exception(notifier=notifier, publisher_id=publisher_id())
@checks_instance_lock
def unrescue_instance(self, context, instance_id):
"""Rescue an instance on this host."""
+ LOG.audit(_('instance %s: unrescuing'), instance_id, context=context)
context = context.elevated()
+
instance_ref = self.db.instance_get(context, instance_id)
- LOG.audit(_('instance %s: unrescuing'), instance_id, context=context)
- self.db.instance_set_state(context,
- instance_id,
- power_state.NOSTATE,
- 'unrescuing')
- _update_state = lambda result: self._update_state_callback(
- self, context, instance_id, result)
network_info = self._get_instance_nw_info(context, instance_ref)
- self.driver.unrescue(instance_ref, _update_state, network_info)
- self._update_state(context, instance_id)
- @staticmethod
- def _update_state_callback(self, context, instance_id, result):
- """Update instance state when async task completes."""
- self._update_state(context, instance_id)
+ # NOTE(blamar): None of the virt drivers use the 'callback' param
+ self.driver.unrescue(instance_ref, None, network_info)
+
+ current_power_state = self._get_power_state(context, instance_ref)
+ self._instance_update(context,
+ instance_id,
+ vm_state=vm_states.ACTIVE,
+ task_state=None,
+ power_state=current_power_state)
@exception.wrap_exception(notifier=notifier, publisher_id=publisher_id())
@checks_instance_lock
@@ -859,11 +891,12 @@ class ComputeManager(manager.SchedulerDependentManager):
# Just roll back the record. There's no need to resize down since
# the 'old' VM already has the preferred attributes
- self.db.instance_update(context, instance_ref['uuid'],
- dict(memory_mb=instance_type['memory_mb'],
- vcpus=instance_type['vcpus'],
- local_gb=instance_type['local_gb'],
- instance_type_id=instance_type['id']))
+ self._instance_update(context,
+ instance_ref["uuid"],
+ memory_mb=instance_type['memory_mb'],
+ vcpus=instance_type['vcpus'],
+ local_gb=instance_type['local_gb'],
+ instance_type_id=instance_type['id'])
self.driver.revert_migration(instance_ref)
self.db.migration_update(context, migration_id,
@@ -890,8 +923,11 @@ class ComputeManager(manager.SchedulerDependentManager):
instance_ref = self.db.instance_get_by_uuid(context, instance_id)
if instance_ref['host'] == FLAGS.host:
- raise exception.Error(_(
- 'Migration error: destination same as source!'))
+ self._instance_update(context,
+ instance_id,
+ vm_state=vm_states.ERROR)
+ msg = _('Migration error: destination same as source!')
+ raise exception.Error(msg)
old_instance_type = self.db.instance_type_get(context,
instance_ref['instance_type_id'])
@@ -985,6 +1021,11 @@ class ComputeManager(manager.SchedulerDependentManager):
self.driver.finish_migration(context, instance_ref, disk_info,
network_info, resize_instance)
+ self._instance_update(context,
+ instance_id,
+ vm_state=vm_states.ACTIVE,
+ task_state=task_states.RESIZE_VERIFY)
+
self.db.migration_update(context, migration_id,
{'status': 'finished', })
@@ -1016,35 +1057,35 @@ class ComputeManager(manager.SchedulerDependentManager):
@checks_instance_lock
def pause_instance(self, context, instance_id):
"""Pause an instance on this host."""
+ LOG.audit(_('instance %s: pausing'), instance_id, context=context)
context = context.elevated()
+
instance_ref = self.db.instance_get(context, instance_id)
- LOG.audit(_('instance %s: pausing'), instance_id, context=context)
- self.db.instance_set_state(context,
- instance_id,
- power_state.NOSTATE,
- 'pausing')
- self.driver.pause(instance_ref,
- lambda result: self._update_state_callback(self,
- context,
- instance_id,
- result))
+ self.driver.pause(instance_ref, lambda result: None)
+
+ current_power_state = self._get_power_state(context, instance_ref)
+ self._instance_update(context,
+ instance_id,
+ power_state=current_power_state,
+ vm_state=vm_states.PAUSED,
+ task_state=None)
@exception.wrap_exception(notifier=notifier, publisher_id=publisher_id())
@checks_instance_lock
def unpause_instance(self, context, instance_id):
"""Unpause a paused instance on this host."""
+ LOG.audit(_('instance %s: unpausing'), instance_id, context=context)
context = context.elevated()
+
instance_ref = self.db.instance_get(context, instance_id)
- LOG.audit(_('instance %s: unpausing'), instance_id, context=context)
- self.db.instance_set_state(context,
- instance_id,
- power_state.NOSTATE,
- 'unpausing')
- self.driver.unpause(instance_ref,
- lambda result: self._update_state_callback(self,
- context,
- instance_id,
- result))
+ self.driver.unpause(instance_ref, lambda result: None)
+
+ current_power_state = self._get_power_state(context, instance_ref)
+ self._instance_update(context,
+ instance_id,
+ power_state=current_power_state,
+ vm_state=vm_states.ACTIVE,
+ task_state=None)
@exception.wrap_exception(notifier=notifier, publisher_id=publisher_id())
def host_power_action(self, context, host=None, action=None):
@@ -1060,7 +1101,7 @@ class ComputeManager(manager.SchedulerDependentManager):
def get_diagnostics(self, context, instance_id):
"""Retrieve diagnostics for an instance on this host."""
instance_ref = self.db.instance_get(context, instance_id)
- if instance_ref["state"] == power_state.RUNNING:
+ if instance_ref["power_state"] == power_state.RUNNING:
LOG.audit(_("instance %s: retrieving diagnostics"), instance_id,
context=context)
return self.driver.get_diagnostics(instance_ref)
@@ -1069,33 +1110,35 @@ class ComputeManager(manager.SchedulerDependentManager):
@checks_instance_lock
def suspend_instance(self, context, instance_id):
"""Suspend the given instance."""
+ LOG.audit(_('instance %s: suspending'), instance_id, context=context)
context = context.elevated()
+
instance_ref = self.db.instance_get(context, instance_id)
- LOG.audit(_('instance %s: suspending'), instance_id, context=context)
- self.db.instance_set_state(context, instance_id,
- power_state.NOSTATE,
- 'suspending')
- self.driver.suspend(instance_ref,
- lambda result: self._update_state_callback(self,
- context,
- instance_id,
- result))
+ self.driver.suspend(instance_ref, lambda result: None)
+
+ current_power_state = self._get_power_state(context, instance_ref)
+ self._instance_update(context,
+ instance_id,
+ power_state=current_power_state,
+ vm_state=vm_states.SUSPENDED,
+ task_state=None)
@exception.wrap_exception(notifier=notifier, publisher_id=publisher_id())
@checks_instance_lock
def resume_instance(self, context, instance_id):
"""Resume the given suspended instance."""
+ LOG.audit(_('instance %s: resuming'), instance_id, context=context)
context = context.elevated()
+
instance_ref = self.db.instance_get(context, instance_id)
- LOG.audit(_('instance %s: resuming'), instance_id, context=context)
- self.db.instance_set_state(context, instance_id,
- power_state.NOSTATE,
- 'resuming')
- self.driver.resume(instance_ref,
- lambda result: self._update_state_callback(self,
- context,
- instance_id,
- result))
+ self.driver.resume(instance_ref, lambda result: None)
+
+ current_power_state = self._get_power_state(context, instance_ref)
+ self._instance_update(context,
+ instance_id,
+ power_state=current_power_state,
+ vm_state=vm_states.ACTIVE,
+ task_state=None)
@exception.wrap_exception(notifier=notifier, publisher_id=publisher_id())
def lock_instance(self, context, instance_id):
@@ -1506,11 +1549,14 @@ class ComputeManager(manager.SchedulerDependentManager):
'block_migration': block_migration}})
# Restore instance state
- self.db.instance_update(ctxt,
- instance_ref['id'],
- {'state_description': 'running',
- 'state': power_state.RUNNING,
- 'host': dest})
+ current_power_state = self._get_power_state(ctxt, instance_ref)
+ self._instance_update(ctxt,
+ instance_ref["id"],
+ host=dest,
+ power_state=current_power_state,
+ vm_state=vm_states.ACTIVE,
+ task_state=None)
+
# Restore volume state
for volume_ref in instance_ref['volumes']:
volume_id = volume_ref['id']
@@ -1556,11 +1602,11 @@ class ComputeManager(manager.SchedulerDependentManager):
This param specifies destination host.
"""
host = instance_ref['host']
- self.db.instance_update(context,
- instance_ref['id'],
- {'state_description': 'running',
- 'state': power_state.RUNNING,
- 'host': host})
+ self._instance_update(context,
+ instance_ref['id'],
+ host=host,
+ vm_state=vm_states.ACTIVE,
+ task_state=None)
for volume_ref in instance_ref['volumes']:
volume_id = volume_ref['id']
@@ -1608,10 +1654,9 @@ class ComputeManager(manager.SchedulerDependentManager):
error_list.append(ex)
try:
- self._poll_instance_states(context)
+ self._sync_power_states(context)
except Exception as ex:
- LOG.warning(_("Error during instance poll: %s"),
- unicode(ex))
+ LOG.warning(_("Error during power_state sync: %s"), unicode(ex))
error_list.append(ex)
return error_list
@@ -1626,68 +1671,40 @@ class ComputeManager(manager.SchedulerDependentManager):
self.update_service_capabilities(
self.driver.get_host_stats(refresh=True))
- def _poll_instance_states(self, context):
+ def _sync_power_states(self, context):
+ """Align power states between the database and the hypervisor.
+
+ The hypervisor is authoritative for the power_state data, so we
+ simply loop over all known instances for this host and update the
+ power_state according to the hypervisor. If the instance is not found
+ then it will be set to power_state.NOSTATE, because it doesn't exist
+ on the hypervisor.
+
+ """
vm_instances = self.driver.list_instances_detail()
vm_instances = dict((vm.name, vm) for vm in vm_instances)
+ db_instances = self.db.instance_get_all_by_host(context, self.host)
- # Keep a list of VMs not in the DB, cross them off as we find them
- vms_not_found_in_db = list(vm_instances.keys())
+ num_vm_instances = len(vm_instances)
+ num_db_instances = len(db_instances)
- db_instances = self.db.instance_get_all_by_host(context, self.host)
+ if num_vm_instances != num_db_instances:
+ LOG.info(_("Found %(num_db_instances)s in the database and "
+ "%(num_vm_instances)s on the hypervisor.") % locals())
for db_instance in db_instances:
- name = db_instance['name']
- db_state = db_instance['state']
+ name = db_instance["name"]
+ db_power_state = db_instance['power_state']
vm_instance = vm_instances.get(name)
if vm_instance is None:
- # NOTE(justinsb): We have to be very careful here, because a
- # concurrent operation could be in progress (e.g. a spawn)
- if db_state == power_state.BUILDING:
- # TODO(justinsb): This does mean that if we crash during a
- # spawn, the machine will never leave the spawning state,
- # but this is just the way nova is; this function isn't
- # trying to correct that problem.
- # We could have a separate task to correct this error.
- # TODO(justinsb): What happens during a live migration?
- LOG.info(_("Found instance '%(name)s' in DB but no VM. "
- "State=%(db_state)s, so assuming spawn is in "
- "progress.") % locals())
- vm_state = db_state
- else:
- LOG.info(_("Found instance '%(name)s' in DB but no VM. "
- "State=%(db_state)s, so setting state to "
- "shutoff.") % locals())
- vm_state = power_state.SHUTOFF
- if db_instance['state_description'] == 'stopping':
- self.db.instance_stop(context, db_instance['id'])
- continue
+ vm_power_state = power_state.NOSTATE
else:
- vm_state = vm_instance.state
- vms_not_found_in_db.remove(name)
-
- if (db_instance['state_description'] in ['migrating', 'stopping']):
- # A situation which db record exists, but no instance"
- # sometimes occurs while live-migration at src compute,
- # this case should be ignored.
- LOG.debug(_("Ignoring %(name)s, as it's currently being "
- "migrated.") % locals())
- continue
-
- if vm_state != db_state:
- LOG.info(_("DB/VM state mismatch. Changing state from "
- "'%(db_state)s' to '%(vm_state)s'") % locals())
- self._update_state(context, db_instance['id'], vm_state)
+ vm_power_state = vm_instance.state
- # NOTE(justinsb): We no longer auto-remove SHUTOFF instances
- # It's quite hard to get them back when we do.
-
- # Are there VMs not in the DB?
- for vm_not_found_in_db in vms_not_found_in_db:
- name = vm_not_found_in_db
+ if vm_power_state == db_power_state:
+ continue
- # We only care about instances that compute *should* know about
- if name.startswith("instance-"):
- # TODO(justinsb): What to do here? Adopt it? Shut it down?
- LOG.warning(_("Found VM not in DB: '%(name)s'. Ignoring")
- % locals())
+ self._instance_update(context,
+ db_instance["id"],
+ power_state=vm_power_state)
diff --git a/nova/compute/task_states.py b/nova/compute/task_states.py
new file mode 100644
index 000000000..e3315a542
--- /dev/null
+++ b/nova/compute/task_states.py
@@ -0,0 +1,59 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright 2010 OpenStack LLC.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""Possible task states for instances.
+
+Compute instance task states represent what is happening to the instance at the
+current moment. These tasks can be generic, such as 'spawning', or specific,
+such as 'block_device_mapping'. These task states allow for a better view into
+what an instance is doing and should be displayed to users/administrators as
+necessary.
+
+"""
+
+SCHEDULING = 'scheduling'
+BLOCK_DEVICE_MAPPING = 'block_device_mapping'
+NETWORKING = 'networking'
+SPAWNING = 'spawning'
+
+IMAGE_SNAPSHOT = 'image_snapshot'
+IMAGE_BACKUP = 'image_backup'
+
+UPDATING_PASSWORD = 'updating_password'
+
+RESIZE_PREP = 'resize_prep'
+RESIZE_MIGRATING = 'resize_migrating'
+RESIZE_MIGRATED = 'resize_migrated'
+RESIZE_FINISH = 'resize_finish'
+RESIZE_REVERTING = 'resize_reverting'
+RESIZE_CONFIRMING = 'resize_confirming'
+RESIZE_VERIFY = 'resize_verify'
+
+REBUILDING = 'rebuilding'
+
+REBOOTING = 'rebooting'
+PAUSING = 'pausing'
+UNPAUSING = 'unpausing'
+SUSPENDING = 'suspending'
+RESUMING = 'resuming'
+
+RESCUING = 'rescuing'
+UNRESCUING = 'unrescuing'
+
+DELETING = 'deleting'
+STOPPING = 'stopping'
+STARTING = 'starting'
diff --git a/nova/compute/vm_states.py b/nova/compute/vm_states.py
new file mode 100644
index 000000000..6f16c1f09
--- /dev/null
+++ b/nova/compute/vm_states.py
@@ -0,0 +1,39 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright 2010 OpenStack LLC.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""Possible vm states for instances.
+
+Compute instance vm states represent the state of an instance as it pertains to
+a user or administrator. When combined with task states (task_states.py), a
+better picture can be formed regarding the instance's health.
+
+"""
+
+ACTIVE = 'active'
+BUILDING = 'building'
+REBUILDING = 'rebuilding'
+
+PAUSED = 'paused'
+SUSPENDED = 'suspended'
+RESCUED = 'rescued'
+DELETED = 'deleted'
+STOPPED = 'stopped'
+
+MIGRATING = 'migrating'
+RESIZING = 'resizing'
+
+ERROR = 'error'
diff --git a/nova/context.py b/nova/context.py
index b917a1d81..5c22641a0 100644
--- a/nova/context.py
+++ b/nova/context.py
@@ -38,7 +38,7 @@ class RequestContext(object):
self.roles = roles or []
self.is_admin = is_admin
if self.is_admin is None:
- self.admin = 'admin' in self.roles
+ self.is_admin = 'admin' in [x.lower() for x in self.roles]
self.read_deleted = read_deleted
self.remote_address = remote_address
if not timestamp:
diff --git a/nova/db/api.py b/nova/db/api.py
index 2d854f24c..148887635 100644
--- a/nova/db/api.py
+++ b/nova/db/api.py
@@ -49,7 +49,8 @@ flags.DEFINE_string('volume_name_template', 'volume-%08x',
'Template string to be used to generate instance names')
flags.DEFINE_string('snapshot_name_template', 'snapshot-%08x',
'Template string to be used to generate snapshot names')
-
+flags.DEFINE_string('vsa_name_template', 'vsa-%08x',
+ 'Template string to be used to generate VSA names')
IMPL = utils.LazyPluggable(FLAGS['db_backend'],
sqlalchemy='nova.db.sqlalchemy.api')
@@ -495,9 +496,20 @@ def instance_get_all_by_filters(context, filters):
return IMPL.instance_get_all_by_filters(context, filters)
-def instance_get_active_by_window(context, begin, end=None):
- """Get instances active during a certain time window."""
- return IMPL.instance_get_active_by_window(context, begin, end)
+def instance_get_active_by_window(context, begin, end=None, project_id=None):
+ """Get instances active during a certain time window.
+
+ Specifying a project_id will filter for a certain project."""
+ return IMPL.instance_get_active_by_window(context, begin, end, project_id)
+
+
+def instance_get_active_by_window_joined(context, begin, end=None,
+ project_id=None):
+ """Get instances and joins active during a certain time window.
+
+ Specifying a project_id will filter for a certain project."""
+ return IMPL.instance_get_active_by_window_joined(context, begin, end,
+ project_id)
def instance_get_all_by_user(context, user_id):
@@ -1436,3 +1448,112 @@ def instance_type_extra_specs_update_or_create(context, instance_type_id,
key/value pairs specified in the extra specs dict argument"""
IMPL.instance_type_extra_specs_update_or_create(context, instance_type_id,
extra_specs)
+
+
+##################
+
+
+def volume_metadata_get(context, volume_id):
+ """Get all metadata for a volume."""
+ return IMPL.volume_metadata_get(context, volume_id)
+
+
+def volume_metadata_delete(context, volume_id, key):
+ """Delete the given metadata item."""
+ IMPL.volume_metadata_delete(context, volume_id, key)
+
+
+def volume_metadata_update(context, volume_id, metadata, delete):
+ """Update metadata if it exists, otherwise create it."""
+ IMPL.volume_metadata_update(context, volume_id, metadata, delete)
+
+
+##################
+
+
+def volume_type_create(context, values):
+ """Create a new volume type."""
+ return IMPL.volume_type_create(context, values)
+
+
+def volume_type_get_all(context, inactive=False):
+ """Get all volume types."""
+ return IMPL.volume_type_get_all(context, inactive)
+
+
+def volume_type_get(context, id):
+ """Get volume type by id."""
+ return IMPL.volume_type_get(context, id)
+
+
+def volume_type_get_by_name(context, name):
+ """Get volume type by name."""
+ return IMPL.volume_type_get_by_name(context, name)
+
+
+def volume_type_destroy(context, name):
+ """Delete a volume type."""
+ return IMPL.volume_type_destroy(context, name)
+
+
+def volume_type_purge(context, name):
+ """Purges (removes) a volume type from DB.
+
+ Use volume_type_destroy for most cases
+
+ """
+ return IMPL.volume_type_purge(context, name)
+
+
+####################
+
+
+def volume_type_extra_specs_get(context, volume_type_id):
+ """Get all extra specs for a volume type."""
+ return IMPL.volume_type_extra_specs_get(context, volume_type_id)
+
+
+def volume_type_extra_specs_delete(context, volume_type_id, key):
+ """Delete the given extra specs item."""
+ IMPL.volume_type_extra_specs_delete(context, volume_type_id, key)
+
+
+def volume_type_extra_specs_update_or_create(context, volume_type_id,
+ extra_specs):
+ """Create or update volume type extra specs. This adds or modifies the
+ key/value pairs specified in the extra specs dict argument"""
+ IMPL.volume_type_extra_specs_update_or_create(context, volume_type_id,
+ extra_specs)
+
+
+####################
+
+
+def vsa_create(context, values):
+ """Creates Virtual Storage Array record."""
+ return IMPL.vsa_create(context, values)
+
+
+def vsa_update(context, vsa_id, values):
+ """Updates Virtual Storage Array record."""
+ return IMPL.vsa_update(context, vsa_id, values)
+
+
+def vsa_destroy(context, vsa_id):
+ """Deletes Virtual Storage Array record."""
+ return IMPL.vsa_destroy(context, vsa_id)
+
+
+def vsa_get(context, vsa_id):
+ """Get Virtual Storage Array record by ID."""
+ return IMPL.vsa_get(context, vsa_id)
+
+
+def vsa_get_all(context):
+ """Get all Virtual Storage Array records."""
+ return IMPL.vsa_get_all(context)
+
+
+def vsa_get_all_by_project(context, project_id):
+ """Get all Virtual Storage Array records by project ID."""
+ return IMPL.vsa_get_all_by_project(context, project_id)
diff --git a/nova/db/sqlalchemy/api.py b/nova/db/sqlalchemy/api.py
index 04b5405f6..c97ff5070 100644
--- a/nova/db/sqlalchemy/api.py
+++ b/nova/db/sqlalchemy/api.py
@@ -28,6 +28,7 @@ from nova import flags
from nova import ipv6
from nova import utils
from nova import log as logging
+from nova.compute import vm_states
from nova.db.sqlalchemy import models
from nova.db.sqlalchemy.session import get_session
from sqlalchemy import or_
@@ -132,6 +133,20 @@ def require_instance_exists(f):
return wrapper
+def require_volume_exists(f):
+ """Decorator to require the specified volume to exist.
+
+ Requres the wrapped function to use context and volume_id as
+ their first two arguments.
+ """
+
+ def wrapper(context, volume_id, *args, **kwargs):
+ db.api.volume_get(context, volume_id)
+ return f(context, volume_id, *args, **kwargs)
+ wrapper.__name__ = f.__name__
+ return wrapper
+
+
###################
@@ -1019,11 +1034,11 @@ def virtual_interface_delete_by_instance(context, instance_id):
###################
-def _metadata_refs(metadata_dict):
+def _metadata_refs(metadata_dict, meta_class):
metadata_refs = []
if metadata_dict:
for k, v in metadata_dict.iteritems():
- metadata_ref = models.InstanceMetadata()
+ metadata_ref = meta_class()
metadata_ref['key'] = k
metadata_ref['value'] = v
metadata_refs.append(metadata_ref)
@@ -1037,8 +1052,8 @@ def instance_create(context, values):
context - request context object
values - dict containing column values.
"""
- values['metadata'] = _metadata_refs(values.get('metadata'))
-
+ values['metadata'] = _metadata_refs(values.get('metadata'),
+ models.InstanceMetadata)
instance_ref = models.Instance()
instance_ref['uuid'] = str(utils.gen_uuid())
@@ -1088,12 +1103,11 @@ def instance_destroy(context, instance_id):
def instance_stop(context, instance_id):
session = get_session()
with session.begin():
- from nova.compute import power_state
session.query(models.Instance).\
filter_by(id=instance_id).\
update({'host': None,
- 'state': power_state.SHUTOFF,
- 'state_description': 'stopped',
+ 'vm_state': vm_states.STOPPED,
+ 'task_state': None,
'updated_at': literal_column('updated_at')})
session.query(models.SecurityGroupInstanceAssociation).\
filter_by(instance_id=instance_id).\
@@ -1252,7 +1266,7 @@ def instance_get_all_by_filters(context, filters):
# Filters for exact matches that we can do along with the SQL query...
# For other filters that don't match this, we will do regexp matching
exact_match_filter_names = ['project_id', 'user_id', 'image_ref',
- 'state', 'instance_type_id', 'deleted']
+ 'vm_state', 'instance_type_id', 'deleted']
query_filters = [key for key in filters.iterkeys()
if key in exact_match_filter_names]
@@ -1292,21 +1306,40 @@ def instance_get_all_by_filters(context, filters):
return instances
+@require_context
+def instance_get_active_by_window(context, begin, end=None, project_id=None):
+ """Return instances that were continuously active over window."""
+ session = get_session()
+ query = session.query(models.Instance).\
+ filter(models.Instance.launched_at < begin)
+ if end:
+ query = query.filter(or_(models.Instance.terminated_at == None,
+ models.Instance.terminated_at > end))
+ else:
+ query = query.filter(models.Instance.terminated_at == None)
+ if project_id:
+ query = query.filter_by(project_id=project_id)
+ return query.all()
+
+
@require_admin_context
-def instance_get_active_by_window(context, begin, end=None):
- """Return instances that were continuously active over the given window"""
+def instance_get_active_by_window_joined(context, begin, end=None,
+ project_id=None):
+ """Return instances and joins that were continuously active over window."""
session = get_session()
query = session.query(models.Instance).\
- options(joinedload_all('fixed_ips.floating_ips')).\
- options(joinedload('security_groups')).\
- options(joinedload_all('fixed_ips.network')).\
- options(joinedload('instance_type')).\
- filter(models.Instance.launched_at < begin)
+ options(joinedload_all('fixed_ips.floating_ips')).\
+ options(joinedload('security_groups')).\
+ options(joinedload_all('fixed_ips.network')).\
+ options(joinedload('instance_type')).\
+ filter(models.Instance.launched_at < begin)
if end:
query = query.filter(or_(models.Instance.terminated_at == None,
models.Instance.terminated_at > end))
else:
query = query.filter(models.Instance.terminated_at == None)
+ if project_id:
+ query = query.filter_by(project_id=project_id)
return query.all()
@@ -1470,18 +1503,6 @@ def instance_get_floating_address(context, instance_id):
return fixed_ip_refs[0].floating_ips[0]['address']
-@require_admin_context
-def instance_set_state(context, instance_id, state, description=None):
- # TODO(devcamcar): Move this out of models and into driver
- from nova.compute import power_state
- if not description:
- description = power_state.name(state)
- db.instance_update(context,
- instance_id,
- {'state': state,
- 'state_description': description})
-
-
@require_context
def instance_update(context, instance_id, values):
session = get_session()
@@ -2144,6 +2165,8 @@ def volume_attached(context, volume_id, instance_id, mountpoint):
@require_context
def volume_create(context, values):
+ values['volume_metadata'] = _metadata_refs(values.get('metadata'),
+ models.VolumeMetadata)
volume_ref = models.Volume()
volume_ref.update(values)
@@ -2180,6 +2203,11 @@ def volume_destroy(context, volume_id):
session.query(models.IscsiTarget).\
filter_by(volume_id=volume_id).\
update({'volume_id': None})
+ session.query(models.VolumeMetadata).\
+ filter_by(volume_id=volume_id).\
+ update({'deleted': True,
+ 'deleted_at': utils.utcnow(),
+ 'updated_at': literal_column('updated_at')})
@require_admin_context
@@ -2203,12 +2231,16 @@ def volume_get(context, volume_id, session=None):
if is_admin_context(context):
result = session.query(models.Volume).\
options(joinedload('instance')).\
+ options(joinedload('volume_metadata')).\
+ options(joinedload('volume_type')).\
filter_by(id=volume_id).\
filter_by(deleted=can_read_deleted(context)).\
first()
elif is_user_context(context):
result = session.query(models.Volume).\
options(joinedload('instance')).\
+ options(joinedload('volume_metadata')).\
+ options(joinedload('volume_type')).\
filter_by(project_id=context.project_id).\
filter_by(id=volume_id).\
filter_by(deleted=False).\
@@ -2224,6 +2256,8 @@ def volume_get_all(context):
session = get_session()
return session.query(models.Volume).\
options(joinedload('instance')).\
+ options(joinedload('volume_metadata')).\
+ options(joinedload('volume_type')).\
filter_by(deleted=can_read_deleted(context)).\
all()
@@ -2233,6 +2267,8 @@ def volume_get_all_by_host(context, host):
session = get_session()
return session.query(models.Volume).\
options(joinedload('instance')).\
+ options(joinedload('volume_metadata')).\
+ options(joinedload('volume_type')).\
filter_by(host=host).\
filter_by(deleted=can_read_deleted(context)).\
all()
@@ -2242,6 +2278,8 @@ def volume_get_all_by_host(context, host):
def volume_get_all_by_instance(context, instance_id):
session = get_session()
result = session.query(models.Volume).\
+ options(joinedload('volume_metadata')).\
+ options(joinedload('volume_type')).\
filter_by(instance_id=instance_id).\
filter_by(deleted=False).\
all()
@@ -2257,6 +2295,8 @@ def volume_get_all_by_project(context, project_id):
session = get_session()
return session.query(models.Volume).\
options(joinedload('instance')).\
+ options(joinedload('volume_metadata')).\
+ options(joinedload('volume_type')).\
filter_by(project_id=project_id).\
filter_by(deleted=can_read_deleted(context)).\
all()
@@ -2269,6 +2309,8 @@ def volume_get_instance(context, volume_id):
filter_by(id=volume_id).\
filter_by(deleted=can_read_deleted(context)).\
options(joinedload('instance')).\
+ options(joinedload('volume_metadata')).\
+ options(joinedload('volume_type')).\
first()
if not result:
raise exception.VolumeNotFound(volume_id=volume_id)
@@ -2303,12 +2345,116 @@ def volume_get_iscsi_target_num(context, volume_id):
@require_context
def volume_update(context, volume_id, values):
session = get_session()
+ metadata = values.get('metadata')
+ if metadata is not None:
+ volume_metadata_update(context,
+ volume_id,
+ values.pop('metadata'),
+ delete=True)
with session.begin():
volume_ref = volume_get(context, volume_id, session=session)
volume_ref.update(values)
volume_ref.save(session=session)
+####################
+
+
+@require_context
+@require_volume_exists
+def volume_metadata_get(context, volume_id):
+ session = get_session()
+
+ meta_results = session.query(models.VolumeMetadata).\
+ filter_by(volume_id=volume_id).\
+ filter_by(deleted=False).\
+ all()
+
+ meta_dict = {}
+ for i in meta_results:
+ meta_dict[i['key']] = i['value']
+ return meta_dict
+
+
+@require_context
+@require_volume_exists
+def volume_metadata_delete(context, volume_id, key):
+ session = get_session()
+ session.query(models.VolumeMetadata).\
+ filter_by(volume_id=volume_id).\
+ filter_by(key=key).\
+ filter_by(deleted=False).\
+ update({'deleted': True,
+ 'deleted_at': utils.utcnow(),
+ 'updated_at': literal_column('updated_at')})
+
+
+@require_context
+@require_volume_exists
+def volume_metadata_delete_all(context, volume_id):
+ session = get_session()
+ session.query(models.VolumeMetadata).\
+ filter_by(volume_id=volume_id).\
+ filter_by(deleted=False).\
+ update({'deleted': True,
+ 'deleted_at': utils.utcnow(),
+ 'updated_at': literal_column('updated_at')})
+
+
+@require_context
+@require_volume_exists
+def volume_metadata_get_item(context, volume_id, key, session=None):
+ if not session:
+ session = get_session()
+
+ meta_result = session.query(models.VolumeMetadata).\
+ filter_by(volume_id=volume_id).\
+ filter_by(key=key).\
+ filter_by(deleted=False).\
+ first()
+
+ if not meta_result:
+ raise exception.VolumeMetadataNotFound(metadata_key=key,
+ volume_id=volume_id)
+ return meta_result
+
+
+@require_context
+@require_volume_exists
+def volume_metadata_update(context, volume_id, metadata, delete):
+ session = get_session()
+
+ # Set existing metadata to deleted if delete argument is True
+ if delete:
+ original_metadata = volume_metadata_get(context, volume_id)
+ for meta_key, meta_value in original_metadata.iteritems():
+ if meta_key not in metadata:
+ meta_ref = volume_metadata_get_item(context, volume_id,
+ meta_key, session)
+ meta_ref.update({'deleted': True})
+ meta_ref.save(session=session)
+
+ meta_ref = None
+
+ # Now update all existing items with new values, or create new meta objects
+ for meta_key, meta_value in metadata.iteritems():
+
+ # update the value whether it exists or not
+ item = {"value": meta_value}
+
+ try:
+ meta_ref = volume_metadata_get_item(context, volume_id,
+ meta_key, session)
+ except exception.VolumeMetadataNotFound, e:
+ meta_ref = models.VolumeMetadata()
+ item.update({"key": meta_key, "volume_id": volume_id})
+
+ meta_ref.update(item)
+ meta_ref.save(session=session)
+
+ return metadata
+
+
###################
@@ -3143,7 +3289,7 @@ def instance_type_create(_context, values):
def _dict_with_extra_specs(inst_type_query):
- """Takes an instance type query returned by sqlalchemy
+ """Takes an instance OR volume type query returned by sqlalchemy
and returns it as a dictionary, converting the extra_specs
entry from a list of dicts:
@@ -3525,3 +3671,278 @@ def instance_type_extra_specs_update_or_create(context, instance_type_id,
"deleted": 0})
spec_ref.save(session=session)
return specs
+
+
+##################
+
+
+@require_admin_context
+def volume_type_create(_context, values):
+ """Create a new instance type. In order to pass in extra specs,
+ the values dict should contain a 'extra_specs' key/value pair:
+
+ {'extra_specs' : {'k1': 'v1', 'k2': 'v2', ...}}
+
+ """
+ try:
+ specs = values.get('extra_specs')
+
+ values['extra_specs'] = _metadata_refs(values.get('extra_specs'),
+ models.VolumeTypeExtraSpecs)
+ volume_type_ref = models.VolumeTypes()
+ volume_type_ref.update(values)
+ volume_type_ref.save()
+ except Exception, e:
+ raise exception.DBError(e)
+ return volume_type_ref
+
+
+@require_context
+def volume_type_get_all(context, inactive=False, filters={}):
+ """
+ Returns a dict describing all volume_types with name as key.
+ """
+ session = get_session()
+ if inactive:
+ vol_types = session.query(models.VolumeTypes).\
+ options(joinedload('extra_specs')).\
+ order_by("name").\
+ all()
+ else:
+ vol_types = session.query(models.VolumeTypes).\
+ options(joinedload('extra_specs')).\
+ filter_by(deleted=False).\
+ order_by("name").\
+ all()
+ vol_dict = {}
+ if vol_types:
+ for i in vol_types:
+ vol_dict[i['name']] = _dict_with_extra_specs(i)
+ return vol_dict
+
+
+@require_context
+def volume_type_get(context, id):
+ """Returns a dict describing specific volume_type"""
+ session = get_session()
+ vol_type = session.query(models.VolumeTypes).\
+ options(joinedload('extra_specs')).\
+ filter_by(id=id).\
+ first()
+
+ if not vol_type:
+ raise exception.VolumeTypeNotFound(volume_type=id)
+ else:
+ return _dict_with_extra_specs(vol_type)
+
+
+@require_context
+def volume_type_get_by_name(context, name):
+ """Returns a dict describing specific volume_type"""
+ session = get_session()
+ vol_type = session.query(models.VolumeTypes).\
+ options(joinedload('extra_specs')).\
+ filter_by(name=name).\
+ first()
+ if not vol_type:
+ raise exception.VolumeTypeNotFoundByName(volume_type_name=name)
+ else:
+ return _dict_with_extra_specs(vol_type)
+
+
+@require_admin_context
+def volume_type_destroy(context, name):
+ """ Marks specific volume_type as deleted"""
+ session = get_session()
+ volume_type_ref = session.query(models.VolumeTypes).\
+ filter_by(name=name)
+ records = volume_type_ref.update(dict(deleted=True))
+ if records == 0:
+ raise exception.VolumeTypeNotFoundByName(volume_type_name=name)
+ else:
+ return volume_type_ref
+
+
+@require_admin_context
+def volume_type_purge(context, name):
+ """ Removes specific volume_type from DB
+ Usually volume_type_destroy should be used
+ """
+ session = get_session()
+ volume_type_ref = session.query(models.VolumeTypes).\
+ filter_by(name=name)
+ records = volume_type_ref.delete()
+ if records == 0:
+ raise exception.VolumeTypeNotFoundByName(volume_type_name=name)
+ else:
+ return volume_type_ref
+
+
+####################
+
+
+@require_context
+def volume_type_extra_specs_get(context, volume_type_id):
+ session = get_session()
+
+ spec_results = session.query(models.VolumeTypeExtraSpecs).\
+ filter_by(volume_type_id=volume_type_id).\
+ filter_by(deleted=False).\
+ all()
+
+ spec_dict = {}
+ for i in spec_results:
+ spec_dict[i['key']] = i['value']
+ return spec_dict
+
+
+@require_context
+def volume_type_extra_specs_delete(context, volume_type_id, key):
+ session = get_session()
+ session.query(models.VolumeTypeExtraSpecs).\
+ filter_by(volume_type_id=volume_type_id).\
+ filter_by(key=key).\
+ filter_by(deleted=False).\
+ update({'deleted': True,
+ 'deleted_at': utils.utcnow(),
+ 'updated_at': literal_column('updated_at')})
+
+
+@require_context
+def volume_type_extra_specs_get_item(context, volume_type_id, key,
+ session=None):
+
+ if not session:
+ session = get_session()
+
+ spec_result = session.query(models.VolumeTypeExtraSpecs).\
+ filter_by(volume_type_id=volume_type_id).\
+ filter_by(key=key).\
+ filter_by(deleted=False).\
+ first()
+
+ if not spec_result:
+ raise exception.\
+ VolumeTypeExtraSpecsNotFound(extra_specs_key=key,
+ volume_type_id=volume_type_id)
+ return spec_result
+
+
+@require_context
+def volume_type_extra_specs_update_or_create(context, volume_type_id,
+ specs):
+ session = get_session()
+ spec_ref = None
+ for key, value in specs.iteritems():
+ try:
+ spec_ref = volume_type_extra_specs_get_item(
+ context, volume_type_id, key, session)
+ except exception.VolumeTypeExtraSpecsNotFound, e:
+ spec_ref = models.VolumeTypeExtraSpecs()
+ spec_ref.update({"key": key, "value": value,
+ "volume_type_id": volume_type_id,
+ "deleted": 0})
+ spec_ref.save(session=session)
+ return specs
+
+
+ ####################
+
+
+@require_admin_context
+def vsa_create(context, values):
+ """
+ Creates Virtual Storage Array record.
+ """
+ try:
+ vsa_ref = models.VirtualStorageArray()
+ vsa_ref.update(values)
+ vsa_ref.save()
+ except Exception, e:
+ raise exception.DBError(e)
+ return vsa_ref
+
+
+@require_admin_context
+def vsa_update(context, vsa_id, values):
+ """
+ Updates Virtual Storage Array record.
+ """
+ session = get_session()
+ with session.begin():
+ vsa_ref = vsa_get(context, vsa_id, session=session)
+ vsa_ref.update(values)
+ vsa_ref.save(session=session)
+ return vsa_ref
+
+
+@require_admin_context
+def vsa_destroy(context, vsa_id):
+ """
+ Deletes Virtual Storage Array record.
+ """
+ session = get_session()
+ with session.begin():
+ session.query(models.VirtualStorageArray).\
+ filter_by(id=vsa_id).\
+ update({'deleted': True,
+ 'deleted_at': utils.utcnow(),
+ 'updated_at': literal_column('updated_at')})
+
+
+@require_context
+def vsa_get(context, vsa_id, session=None):
+ """
+ Get Virtual Storage Array record by ID.
+ """
+ if not session:
+ session = get_session()
+ result = None
+
+ if is_admin_context(context):
+ result = session.query(models.VirtualStorageArray).\
+ options(joinedload('vsa_instance_type')).\
+ filter_by(id=vsa_id).\
+ filter_by(deleted=can_read_deleted(context)).\
+ first()
+ elif is_user_context(context):
+ result = session.query(models.VirtualStorageArray).\
+ options(joinedload('vsa_instance_type')).\
+ filter_by(project_id=context.project_id).\
+ filter_by(id=vsa_id).\
+ filter_by(deleted=False).\
+ first()
+ if not result:
+ raise exception.VirtualStorageArrayNotFound(id=vsa_id)
+
+ return result
+
+
+@require_admin_context
+def vsa_get_all(context):
+ """
+ Get all Virtual Storage Array records.
+ """
+ session = get_session()
+ return session.query(models.VirtualStorageArray).\
+ options(joinedload('vsa_instance_type')).\
+ filter_by(deleted=can_read_deleted(context)).\
+ all()
+
+
+@require_context
+def vsa_get_all_by_project(context, project_id):
+ """
+ Get all Virtual Storage Array records by project ID.
+ """
+ authorize_project_context(context, project_id)
+
+ session = get_session()
+ return session.query(models.VirtualStorageArray).\
+ options(joinedload('vsa_instance_type')).\
+ filter_by(project_id=project_id).\
+ filter_by(deleted=can_read_deleted(context)).\
+ all()
+
+
+ ####################
diff --git a/nova/db/sqlalchemy/migrate_repo/versions/016_make_quotas_key_and_value.py b/nova/db/sqlalchemy/migrate_repo/versions/016_make_quotas_key_and_value.py
index a4fe3e482..56b287171 100644
--- a/nova/db/sqlalchemy/migrate_repo/versions/016_make_quotas_key_and_value.py
+++ b/nova/db/sqlalchemy/migrate_repo/versions/016_make_quotas_key_and_value.py
@@ -75,8 +75,8 @@ def new_style_quotas_table(name):
)
-def existing_quotas_table(migrate_engine):
- return Table('quotas', meta, autoload=True, autoload_with=migrate_engine)
+def quotas_table(migrate_engine, name='quotas'):
+ return Table(name, meta, autoload=True, autoload_with=migrate_engine)
def _assert_no_duplicate_project_ids(quotas):
@@ -179,13 +179,18 @@ def upgrade(migrate_engine):
# bind migrate_engine to your metadata
meta.bind = migrate_engine
- old_quotas = existing_quotas_table(migrate_engine)
+ old_quotas = quotas_table(migrate_engine)
assert_old_quotas_have_no_active_duplicates(migrate_engine, old_quotas)
new_quotas = new_style_quotas_table('quotas_new')
new_quotas.create()
convert_forward(migrate_engine, old_quotas, new_quotas)
old_quotas.drop()
+
+ # clear metadata to work around this:
+ # http://code.google.com/p/sqlalchemy-migrate/issues/detail?id=128
+ meta.clear()
+ new_quotas = quotas_table(migrate_engine, 'quotas_new')
new_quotas.rename('quotas')
@@ -193,11 +198,16 @@ def downgrade(migrate_engine):
# Operations to reverse the above upgrade go here.
meta.bind = migrate_engine
- new_quotas = existing_quotas_table(migrate_engine)
+ new_quotas = quotas_table(migrate_engine)
assert_new_quotas_have_no_active_duplicates(migrate_engine, new_quotas)
old_quotas = old_style_quotas_table('quotas_old')
old_quotas.create()
convert_backward(migrate_engine, old_quotas, new_quotas)
new_quotas.drop()
+
+ # clear metadata to work around this:
+ # http://code.google.com/p/sqlalchemy-migrate/issues/detail?id=128
+ meta.clear()
+ old_quotas = quotas_table(migrate_engine, 'quotas_old')
old_quotas.rename('quotas')
diff --git a/nova/db/sqlalchemy/migrate_repo/versions/036_change_flavor_id_in_migrations.py b/nova/db/sqlalchemy/migrate_repo/versions/036_change_flavor_id_in_migrations.py
index f3244033b..dfbd4ba32 100644
--- a/nova/db/sqlalchemy/migrate_repo/versions/036_change_flavor_id_in_migrations.py
+++ b/nova/db/sqlalchemy/migrate_repo/versions/036_change_flavor_id_in_migrations.py
@@ -40,13 +40,17 @@ def upgrade(migrate_engine):
migrations.create_column(new_instance_type_id)
# Convert flavor_id to instance_type_id
+ itypes = {}
for instance_type in migrate_engine.execute(instance_types.select()):
+ itypes[instance_type.id] = instance_type.flavorid
+
+ for instance_type_id in itypes.keys():
migrate_engine.execute(migrations.update()\
- .where(migrations.c.old_flavor_id == instance_type.flavorid)\
- .values(old_instance_type_id=instance_type.id))
+ .where(migrations.c.old_flavor_id == itypes[instance_type_id])\
+ .values(old_instance_type_id=instance_type_id))
migrate_engine.execute(migrations.update()\
- .where(migrations.c.new_flavor_id == instance_type.flavorid)\
- .values(new_instance_type_id=instance_type.id))
+ .where(migrations.c.new_flavor_id == itypes[instance_type_id])\
+ .values(new_instance_type_id=instance_type_id))
migrations.c.old_flavor_id.drop()
migrations.c.new_flavor_id.drop()
diff --git a/nova/db/sqlalchemy/migrate_repo/versions/042_add_volume_types_and_extradata.py b/nova/db/sqlalchemy/migrate_repo/versions/042_add_volume_types_and_extradata.py
new file mode 100644
index 000000000..dd4cccb9e
--- /dev/null
+++ b/nova/db/sqlalchemy/migrate_repo/versions/042_add_volume_types_and_extradata.py
@@ -0,0 +1,115 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright (c) 2011 Zadara Storage Inc.
+# Copyright (c) 2011 OpenStack LLC.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from sqlalchemy import Column, DateTime, Integer, MetaData, String, Table
+from sqlalchemy import Text, Boolean, ForeignKey
+
+from nova import log as logging
+
+meta = MetaData()
+
+# Just for the ForeignKey and column creation to succeed, these are not the
+# actual definitions of tables .
+#
+
+volumes = Table('volumes', meta,
+ Column('id', Integer(), primary_key=True, nullable=False),
+ )
+
+volume_type_id = Column('volume_type_id', Integer(), nullable=True)
+
+
+# New Tables
+#
+
+volume_types = Table('volume_types', meta,
+ Column('created_at', DateTime(timezone=False)),
+ Column('updated_at', DateTime(timezone=False)),
+ Column('deleted_at', DateTime(timezone=False)),
+ Column('deleted', Boolean(create_constraint=True, name=None)),
+ Column('id', Integer(), primary_key=True, nullable=False),
+ Column('name',
+ String(length=255, convert_unicode=False, assert_unicode=None,
+ unicode_error=None, _warn_on_bytestring=False),
+ unique=True))
+
+volume_type_extra_specs_table = Table('volume_type_extra_specs', meta,
+ Column('created_at', DateTime(timezone=False)),
+ Column('updated_at', DateTime(timezone=False)),
+ Column('deleted_at', DateTime(timezone=False)),
+ Column('deleted', Boolean(create_constraint=True, name=None)),
+ Column('id', Integer(), primary_key=True, nullable=False),
+ Column('volume_type_id',
+ Integer(),
+ ForeignKey('volume_types.id'),
+ nullable=False),
+ Column('key',
+ String(length=255, convert_unicode=False, assert_unicode=None,
+ unicode_error=None, _warn_on_bytestring=False)),
+ Column('value',
+ String(length=255, convert_unicode=False, assert_unicode=None,
+ unicode_error=None, _warn_on_bytestring=False)))
+
+
+volume_metadata_table = Table('volume_metadata', meta,
+ Column('created_at', DateTime(timezone=False)),
+ Column('updated_at', DateTime(timezone=False)),
+ Column('deleted_at', DateTime(timezone=False)),
+ Column('deleted', Boolean(create_constraint=True, name=None)),
+ Column('id', Integer(), primary_key=True, nullable=False),
+ Column('volume_id',
+ Integer(),
+ ForeignKey('volumes.id'),
+ nullable=False),
+ Column('key',
+ String(length=255, convert_unicode=False, assert_unicode=None,
+ unicode_error=None, _warn_on_bytestring=False)),
+ Column('value',
+ String(length=255, convert_unicode=False, assert_unicode=None,
+ unicode_error=None, _warn_on_bytestring=False)))
+
+
+new_tables = (volume_types,
+ volume_type_extra_specs_table,
+ volume_metadata_table)
+
+#
+# Tables to alter
+#
+
+
+def upgrade(migrate_engine):
+ meta.bind = migrate_engine
+
+ for table in new_tables:
+ try:
+ table.create()
+ except Exception:
+ logging.info(repr(table))
+ logging.exception('Exception while creating table')
+ raise
+
+ volumes.create_column(volume_type_id)
+
+
+def downgrade(migrate_engine):
+ meta.bind = migrate_engine
+
+ volumes.drop_column(volume_type_id)
+
+ for table in new_tables:
+ table.drop()
diff --git a/nova/db/sqlalchemy/migrate_repo/versions/043_add_vsa_data.py b/nova/db/sqlalchemy/migrate_repo/versions/043_add_vsa_data.py
new file mode 100644
index 000000000..844643704
--- /dev/null
+++ b/nova/db/sqlalchemy/migrate_repo/versions/043_add_vsa_data.py
@@ -0,0 +1,75 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright (c) 2011 Zadara Storage Inc.
+# Copyright (c) 2011 OpenStack LLC.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from sqlalchemy import Column, DateTime, Integer, MetaData, String, Table
+from sqlalchemy import Text, Boolean, ForeignKey
+
+from nova import log as logging
+
+meta = MetaData()
+
+#
+# New Tables
+#
+
+virtual_storage_arrays = Table('virtual_storage_arrays', meta,
+ Column('created_at', DateTime(timezone=False)),
+ Column('updated_at', DateTime(timezone=False)),
+ Column('deleted_at', DateTime(timezone=False)),
+ Column('deleted', Boolean(create_constraint=True, name=None)),
+ Column('id', Integer(), primary_key=True, nullable=False),
+ Column('display_name',
+ String(length=255, convert_unicode=False, assert_unicode=None,
+ unicode_error=None, _warn_on_bytestring=False)),
+ Column('display_description',
+ String(length=255, convert_unicode=False, assert_unicode=None,
+ unicode_error=None, _warn_on_bytestring=False)),
+ Column('project_id',
+ String(length=255, convert_unicode=False, assert_unicode=None,
+ unicode_error=None, _warn_on_bytestring=False)),
+ Column('availability_zone',
+ String(length=255, convert_unicode=False, assert_unicode=None,
+ unicode_error=None, _warn_on_bytestring=False)),
+ Column('instance_type_id', Integer(), nullable=False),
+ Column('image_ref',
+ String(length=255, convert_unicode=False, assert_unicode=None,
+ unicode_error=None, _warn_on_bytestring=False)),
+ Column('vc_count', Integer(), nullable=False),
+ Column('vol_count', Integer(), nullable=False),
+ Column('status',
+ String(length=255, convert_unicode=False, assert_unicode=None,
+ unicode_error=None, _warn_on_bytestring=False)),
+ )
+
+
+def upgrade(migrate_engine):
+ # Upgrade operations go here. Don't create your own engine;
+ # bind migrate_engine to your metadata
+ meta.bind = migrate_engine
+
+ try:
+ virtual_storage_arrays.create()
+ except Exception:
+ logging.info(repr(table))
+ logging.exception('Exception while creating table')
+ raise
+
+
+def downgrade(migrate_engine):
+ meta.bind = migrate_engine
+
+ virtual_storage_arrays.drop()
diff --git a/nova/db/sqlalchemy/migrate_repo/versions/044_update_instance_states.py b/nova/db/sqlalchemy/migrate_repo/versions/044_update_instance_states.py
new file mode 100644
index 000000000..e58ae5362
--- /dev/null
+++ b/nova/db/sqlalchemy/migrate_repo/versions/044_update_instance_states.py
@@ -0,0 +1,138 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright 2010 OpenStack LLC.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import sqlalchemy
+from sqlalchemy import MetaData, Table, Column, String
+
+from nova.compute import task_states
+from nova.compute import vm_states
+
+
+meta = MetaData()
+
+
+c_task_state = Column('task_state',
+ String(length=255, convert_unicode=False,
+ assert_unicode=None, unicode_error=None,
+ _warn_on_bytestring=False),
+ nullable=True)
+
+
+_upgrade_translations = {
+ "stopping": {
+ "state_description": vm_states.ACTIVE,
+ "task_state": task_states.STOPPING,
+ },
+ "stopped": {
+ "state_description": vm_states.STOPPED,
+ "task_state": None,
+ },
+ "terminated": {
+ "state_description": vm_states.DELETED,
+ "task_state": None,
+ },
+ "terminating": {
+ "state_description": vm_states.ACTIVE,
+ "task_state": task_states.DELETING,
+ },
+ "running": {
+ "state_description": vm_states.ACTIVE,
+ "task_state": None,
+ },
+ "scheduling": {
+ "state_description": vm_states.BUILDING,
+ "task_state": task_states.SCHEDULING,
+ },
+ "migrating": {
+ "state_description": vm_states.MIGRATING,
+ "task_state": None,
+ },
+ "pending": {
+ "state_description": vm_states.BUILDING,
+ "task_state": task_states.SCHEDULING,
+ },
+}
+
+
+_downgrade_translations = {
+ vm_states.ACTIVE: {
+ None: "running",
+ task_states.DELETING: "terminating",
+ task_states.STOPPING: "stopping",
+ },
+ vm_states.BUILDING: {
+ None: "pending",
+ task_states.SCHEDULING: "scheduling",
+ },
+ vm_states.STOPPED: {
+ None: "stopped",
+ },
+ vm_states.REBUILDING: {
+ None: "pending",
+ },
+ vm_states.DELETED: {
+ None: "terminated",
+ },
+ vm_states.MIGRATING: {
+ None: "migrating",
+ },
+}
+
+
+def upgrade(migrate_engine):
+ meta.bind = migrate_engine
+
+ instance_table = Table('instances', meta, autoload=True,
+ autoload_with=migrate_engine)
+
+ c_state = instance_table.c.state
+ c_state.alter(name='power_state')
+
+ c_vm_state = instance_table.c.state_description
+ c_vm_state.alter(name='vm_state')
+
+ instance_table.create_column(c_task_state)
+
+ for old_state, values in _upgrade_translations.iteritems():
+ instance_table.update().\
+ values(**values).\
+ where(c_vm_state == old_state).\
+ execute()
+
+
+def downgrade(migrate_engine):
+ meta.bind = migrate_engine
+
+ instance_table = Table('instances', meta, autoload=True,
+ autoload_with=migrate_engine)
+
+ c_task_state = instance_table.c.task_state
+
+ c_state = instance_table.c.power_state
+ c_state.alter(name='state')
+
+ c_vm_state = instance_table.c.vm_state
+ c_vm_state.alter(name='state_description')
+
+ for old_vm_state, old_task_states in _downgrade_translations.iteritems():
+ for old_task_state, new_state_desc in old_task_states.iteritems():
+ instance_table.update().\
+ where(c_task_state == old_task_state).\
+ where(c_vm_state == old_vm_state).\
+ values(vm_state=new_state_desc).\
+ execute()
+
+ instance_table.drop_column('task_state')
diff --git a/nova/db/sqlalchemy/migration.py b/nova/db/sqlalchemy/migration.py
index d9e303599..bb05986c9 100644
--- a/nova/db/sqlalchemy/migration.py
+++ b/nova/db/sqlalchemy/migration.py
@@ -64,7 +64,9 @@ def db_version():
'users', 'user_project_association',
'user_project_role_association',
'user_role_association',
- 'volumes'):
+ 'virtual_storage_arrays',
+ 'volumes', 'volume_metadata',
+ 'volume_types', 'volume_type_extra_specs'):
assert table in meta.tables
return db_version_control(1)
except AssertionError:
diff --git a/nova/db/sqlalchemy/models.py b/nova/db/sqlalchemy/models.py
index 0680501e9..854034f12 100644
--- a/nova/db/sqlalchemy/models.py
+++ b/nova/db/sqlalchemy/models.py
@@ -193,8 +193,9 @@ class Instance(BASE, NovaBase):
key_name = Column(String(255))
key_data = Column(Text)
- state = Column(Integer)
- state_description = Column(String(255))
+ power_state = Column(Integer)
+ vm_state = Column(String(255))
+ task_state = Column(String(255))
memory_mb = Column(Integer)
vcpus = Column(Integer)
@@ -238,16 +239,31 @@ class Instance(BASE, NovaBase):
access_ip_v4 = Column(String(255))
access_ip_v6 = Column(String(255))
- # TODO(vish): see Ewan's email about state improvements, probably
- # should be in a driver base class or some such
- # vmstate_state = running, halted, suspended, paused
- # power_state = what we have
- # task_state = transitory and may trigger power state transition
- #@validates('state')
- #def validate_state(self, key, state):
- # assert(state in ['nostate', 'running', 'blocked', 'paused',
- # 'shutdown', 'shutoff', 'crashed'])
+class VirtualStorageArray(BASE, NovaBase):
+ """
+ Represents a virtual storage array supplying block storage to instances.
+ """
+ __tablename__ = 'virtual_storage_arrays'
+
+ id = Column(Integer, primary_key=True, autoincrement=True)
+
+ @property
+ def name(self):
+ return FLAGS.vsa_name_template % self.id
+
+ # User editable field for display in user-facing UIs
+ display_name = Column(String(255))
+ display_description = Column(String(255))
+
+ project_id = Column(String(255))
+ availability_zone = Column(String(255))
+
+ instance_type_id = Column(Integer, ForeignKey('instance_types.id'))
+ image_ref = Column(String(255))
+ vc_count = Column(Integer, default=0) # number of requested VC instances
+ vol_count = Column(Integer, default=0) # total number of BE volumes
+ status = Column(String(255))
class InstanceActions(BASE, NovaBase):
@@ -279,6 +295,12 @@ class InstanceTypes(BASE, NovaBase):
primaryjoin='and_(Instance.instance_type_id == '
'InstanceTypes.id)')
+ vsas = relationship(VirtualStorageArray,
+ backref=backref('vsa_instance_type', uselist=False),
+ foreign_keys=id,
+ primaryjoin='and_(VirtualStorageArray.instance_type_id'
+ ' == InstanceTypes.id)')
+
class Volume(BASE, NovaBase):
"""Represents a block storage device that can be attached to a vm."""
@@ -318,6 +340,50 @@ class Volume(BASE, NovaBase):
provider_location = Column(String(255))
provider_auth = Column(String(255))
+ volume_type_id = Column(Integer)
+
+
+class VolumeMetadata(BASE, NovaBase):
+ """Represents a metadata key/value pair for a volume"""
+ __tablename__ = 'volume_metadata'
+ id = Column(Integer, primary_key=True)
+ key = Column(String(255))
+ value = Column(String(255))
+ volume_id = Column(Integer, ForeignKey('volumes.id'), nullable=False)
+ volume = relationship(Volume, backref="volume_metadata",
+ foreign_keys=volume_id,
+ primaryjoin='and_('
+ 'VolumeMetadata.volume_id == Volume.id,'
+ 'VolumeMetadata.deleted == False)')
+
+
+class VolumeTypes(BASE, NovaBase):
+ """Represent possible volume_types of volumes offered"""
+ __tablename__ = "volume_types"
+ id = Column(Integer, primary_key=True)
+ name = Column(String(255), unique=True)
+
+ volumes = relationship(Volume,
+ backref=backref('volume_type', uselist=False),
+ foreign_keys=id,
+ primaryjoin='and_(Volume.volume_type_id == '
+ 'VolumeTypes.id)')
+
+
+class VolumeTypeExtraSpecs(BASE, NovaBase):
+ """Represents additional specs as key/value pairs for a volume_type"""
+ __tablename__ = 'volume_type_extra_specs'
+ id = Column(Integer, primary_key=True)
+ key = Column(String(255))
+ value = Column(String(255))
+ volume_type_id = Column(Integer, ForeignKey('volume_types.id'),
+ nullable=False)
+ volume_type = relationship(VolumeTypes, backref="extra_specs",
+ foreign_keys=volume_type_id,
+ primaryjoin='and_('
+ 'VolumeTypeExtraSpecs.volume_type_id == VolumeTypes.id,'
+ 'VolumeTypeExtraSpecs.deleted == False)')
+
class Quota(BASE, NovaBase):
"""Represents a single quota override for a project.
@@ -803,7 +869,9 @@ def register_models():
Network, SecurityGroup, SecurityGroupIngressRule,
SecurityGroupInstanceAssociation, AuthToken, User,
Project, Certificate, ConsolePool, Console, Zone,
- AgentBuild, InstanceMetadata, InstanceTypeExtraSpecs, Migration)
+ VolumeMetadata, VolumeTypes, VolumeTypeExtraSpecs,
+ AgentBuild, InstanceMetadata, InstanceTypeExtraSpecs, Migration,
+ VirtualStorageArray)
engine = create_engine(FLAGS.sql_connection, echo=False)
for model in models:
model.metadata.create_all(engine)
diff --git a/nova/exception.py b/nova/exception.py
index 66740019b..fca4586c3 100644
--- a/nova/exception.py
+++ b/nova/exception.py
@@ -61,7 +61,7 @@ class ApiError(Error):
super(ApiError, self).__init__(outstr)
-class BuildInProgress(Error):
+class RebuildRequiresActiveInstance(Error):
pass
@@ -197,6 +197,10 @@ class InvalidInstanceType(Invalid):
message = _("Invalid instance type %(instance_type)s.")
+class InvalidVolumeType(Invalid):
+ message = _("Invalid volume type %(volume_type)s.")
+
+
class InvalidPortRange(Invalid):
message = _("Invalid port range %(from_port)s:%(to_port)s.")
@@ -338,6 +342,29 @@ class VolumeNotFoundForInstance(VolumeNotFound):
message = _("Volume not found for instance %(instance_id)s.")
+class VolumeMetadataNotFound(NotFound):
+ message = _("Volume %(volume_id)s has no metadata with "
+ "key %(metadata_key)s.")
+
+
+class NoVolumeTypesFound(NotFound):
+ message = _("Zero volume types found.")
+
+
+class VolumeTypeNotFound(NotFound):
+ message = _("Volume type %(volume_type_id)s could not be found.")
+
+
+class VolumeTypeNotFoundByName(VolumeTypeNotFound):
+ message = _("Volume type with name %(volume_type_name)s "
+ "could not be found.")
+
+
+class VolumeTypeExtraSpecsNotFound(NotFound):
+ message = _("Volume Type %(volume_type_id)s has no extra specs with "
+ "key %(extra_specs_key)s.")
+
+
class SnapshotNotFound(NotFound):
message = _("Snapshot %(snapshot_id)s could not be found.")
@@ -506,6 +533,10 @@ class NoMoreFloatingIps(FloatingIpNotFound):
message = _("Zero floating ips available.")
+class FloatingIpAlreadyInUse(NovaException):
+ message = _("Floating ip %(address)s already in use by %(fixed_ip)s.")
+
+
class NoFloatingIpsDefined(NotFound):
message = _("Zero floating ips exist.")
@@ -756,6 +787,18 @@ class PasteAppNotFound(NotFound):
message = _("Could not load paste app '%(name)s' from %(path)s")
+class VSANovaAccessParamNotFound(Invalid):
+ message = _("Nova access parameters were not specified.")
+
+
+class VirtualStorageArrayNotFound(NotFound):
+ message = _("Virtual Storage Array %(id)d could not be found.")
+
+
+class VirtualStorageArrayNotFoundByName(NotFound):
+ message = _("Virtual Storage Array %(name)s could not be found.")
+
+
class CannotResizeToSameSize(NovaException):
message = _("When resizing, instances must change size!")
diff --git a/nova/flags.py b/nova/flags.py
index 95000df1b..aa76defe5 100644
--- a/nova/flags.py
+++ b/nova/flags.py
@@ -292,6 +292,7 @@ DEFINE_string('ajax_console_proxy_url',
in the form "http://127.0.0.1:8000"')
DEFINE_string('ajax_console_proxy_port',
8000, 'port that ajax_console_proxy binds')
+DEFINE_string('vsa_topic', 'vsa', 'the topic that nova-vsa service listens on')
DEFINE_bool('verbose', False, 'show debug output')
DEFINE_boolean('fake_rabbit', False, 'use a fake rabbit')
DEFINE_bool('fake_network', False,
@@ -302,8 +303,12 @@ DEFINE_bool('rabbit_use_ssl', False, 'connect over SSL')
DEFINE_string('rabbit_userid', 'guest', 'rabbit userid')
DEFINE_string('rabbit_password', 'guest', 'rabbit password')
DEFINE_string('rabbit_virtual_host', '/', 'rabbit virtual host')
-DEFINE_integer('rabbit_retry_interval', 10, 'rabbit connection retry interval')
-DEFINE_integer('rabbit_max_retries', 12, 'rabbit connection attempts')
+DEFINE_integer('rabbit_retry_interval', 1,
+ 'rabbit connection retry interval to start')
+DEFINE_integer('rabbit_retry_backoff', 2,
+ 'rabbit connection retry backoff in seconds')
+DEFINE_integer('rabbit_max_retries', 0,
+ 'maximum rabbit connection attempts (0=try forever)')
DEFINE_string('control_exchange', 'nova', 'the main exchange to connect to')
DEFINE_boolean('rabbit_durable_queues', False, 'use durable queues')
DEFINE_list('enabled_apis', ['ec2', 'osapi'],
@@ -371,6 +376,17 @@ DEFINE_string('volume_manager', 'nova.volume.manager.VolumeManager',
'Manager for volume')
DEFINE_string('scheduler_manager', 'nova.scheduler.manager.SchedulerManager',
'Manager for scheduler')
+DEFINE_string('vsa_manager', 'nova.vsa.manager.VsaManager',
+ 'Manager for vsa')
+DEFINE_string('vc_image_name', 'vc_image',
+ 'the VC image ID (for a VC image that exists in DB Glance)')
+# VSA constants and enums
+DEFINE_string('default_vsa_instance_type', 'm1.small',
+ 'default instance type for VSA instances')
+DEFINE_integer('max_vcs_in_vsa', 32,
+ 'maxinum VCs in a VSA')
+DEFINE_integer('vsa_part_size_gb', 100,
+ 'default partition size for shared capacity')
# The service to use for image search and retrieval
DEFINE_string('image_service', 'nova.image.glance.GlanceImageService',
diff --git a/nova/image/glance.py b/nova/image/glance.py
index 9060f6a91..7233eb18d 100644
--- a/nova/image/glance.py
+++ b/nova/image/glance.py
@@ -141,19 +141,30 @@ class GlanceImageService(service.BaseImageService):
"""Paginate through results from glance server"""
images = fetch_func(**kwargs)
- for image in images:
- yield image
- else:
+ if not images:
# break out of recursive loop to end pagination
return
+ for image in images:
+ yield image
+
try:
# attempt to advance the marker in order to fetch next page
kwargs['marker'] = images[-1]['id']
except KeyError:
raise exception.ImagePaginationFailed()
- self._fetch_images(fetch_func, **kwargs)
+ try:
+ kwargs['limit'] = kwargs['limit'] - len(images)
+ # break if we have reached a provided limit
+ if kwargs['limit'] <= 0:
+ return
+ except KeyError:
+ # ignore missing limit, just proceed without it
+ pass
+
+ for image in self._fetch_images(fetch_func, **kwargs):
+ yield image
def show(self, context, image_id):
"""Returns a dict with image data for the given opaque image id."""
diff --git a/nova/ipv6/account_identifier.py b/nova/ipv6/account_identifier.py
index 27bb01988..8a08510ac 100644
--- a/nova/ipv6/account_identifier.py
+++ b/nova/ipv6/account_identifier.py
@@ -39,7 +39,8 @@ def to_global(prefix, mac, project_id):
except TypeError:
raise TypeError(_('Bad prefix for to_global_ipv6: %s') % prefix)
except NameError:
- raise TypeError(_('Bad project_id for to_global_ipv6: %s') % project_id)
+ raise TypeError(_('Bad project_id for to_global_ipv6: %s') %
+ project_id)
def to_mac(ipv6_address):
diff --git a/nova/log.py b/nova/log.py
index 222b8c5fb..eb0b6020f 100644
--- a/nova/log.py
+++ b/nova/log.py
@@ -32,6 +32,7 @@ import json
import logging
import logging.handlers
import os
+import stat
import sys
import traceback
@@ -257,7 +258,10 @@ class NovaRootLogger(NovaLogger):
self.filelog = WatchedFileHandler(logpath)
self.addHandler(self.filelog)
self.logpath = logpath
- os.chmod(self.logpath, FLAGS.logfile_mode)
+
+ st = os.stat(self.logpath)
+ if st.st_mode != (stat.S_IFREG | FLAGS.logfile_mode):
+ os.chmod(self.logpath, FLAGS.logfile_mode)
else:
self.removeHandler(self.filelog)
self.addHandler(self.streamlog)
diff --git a/nova/network/api.py b/nova/network/api.py
index d04474df3..78580d360 100644
--- a/nova/network/api.py
+++ b/nova/network/api.py
@@ -111,6 +111,12 @@ class API(base.Base):
'(%(project)s)') %
{'address': floating_ip['address'],
'project': context.project_id})
+
+ # If this address has been previously associated to a
+ # different instance, disassociate the floating_ip
+ if floating_ip['fixed_ip'] and floating_ip['fixed_ip'] is not fixed_ip:
+ self.disassociate_floating_ip(context, floating_ip['address'])
+
# NOTE(vish): if we are multi_host, send to the instances host
if fixed_ip['network']['multi_host']:
host = fixed_ip['instance']['host']
diff --git a/nova/network/manager.py b/nova/network/manager.py
index 404a3180e..e6b30d1a0 100644
--- a/nova/network/manager.py
+++ b/nova/network/manager.py
@@ -280,6 +280,13 @@ class FloatingIP(object):
def associate_floating_ip(self, context, floating_address, fixed_address):
"""Associates an floating ip to a fixed ip."""
+ floating_ip = self.db.floating_ip_get_by_address(context,
+ floating_address)
+ if floating_ip['fixed_ip']:
+ raise exception.FloatingIpAlreadyInUse(
+ address=floating_ip['address'],
+ fixed_ip=floating_ip['fixed_ip']['address'])
+
self.db.floating_ip_fixed_ip_associate(context,
floating_address,
fixed_address)
@@ -484,17 +491,17 @@ class NetworkManager(manager.SchedulerDependentManager):
# TODO(tr3buchet) eventually "enabled" should be determined
def ip_dict(ip):
return {
- "ip": ip,
- "netmask": network["netmask"],
- "enabled": "1"}
+ 'ip': ip,
+ 'netmask': network['netmask'],
+ 'enabled': '1'}
def ip6_dict():
return {
- "ip": ipv6.to_global(network['cidr_v6'],
+ 'ip': ipv6.to_global(network['cidr_v6'],
vif['address'],
network['project_id']),
- "netmask": network['netmask_v6'],
- "enabled": "1"}
+ 'netmask': network['netmask_v6'],
+ 'enabled': '1'}
network_dict = {
'bridge': network['bridge'],
'id': network['id'],
diff --git a/nova/notifier/api.py b/nova/notifier/api.py
index 6ef4a050e..043838536 100644
--- a/nova/notifier/api.py
+++ b/nova/notifier/api.py
@@ -122,4 +122,5 @@ def notify(publisher_id, event_type, priority, payload):
driver.notify(msg)
except Exception, e:
LOG.exception(_("Problem '%(e)s' attempting to "
- "send to notification system." % locals()))
+ "send to notification system. Payload=%(payload)s" %
+ locals()))
diff --git a/nova/notifier/list_notifier.py b/nova/notifier/list_notifier.py
new file mode 100644
index 000000000..955ae1b57
--- /dev/null
+++ b/nova/notifier/list_notifier.py
@@ -0,0 +1,68 @@
+# Copyright 2011 OpenStack LLC.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from nova import flags
+from nova import log as logging
+from nova import utils
+from nova.exception import ClassNotFound
+
+flags.DEFINE_multistring('list_notifier_drivers',
+ ['nova.notifier.no_op_notifier'],
+ 'List of drivers to send notifications')
+
+FLAGS = flags.FLAGS
+
+LOG = logging.getLogger('nova.notifier.list_notifier')
+
+drivers = None
+
+
+class ImportFailureNotifier(object):
+ """Noisily re-raises some exception over-and-over when notify is called."""
+
+ def __init__(self, exception):
+ self.exception = exception
+
+ def notify(self, message):
+ raise self.exception
+
+
+def _get_drivers():
+ """Instantiates and returns drivers based on the flag values."""
+ global drivers
+ if not drivers:
+ drivers = []
+ for notification_driver in FLAGS.list_notifier_drivers:
+ try:
+ drivers.append(utils.import_object(notification_driver))
+ except ClassNotFound as e:
+ drivers.append(ImportFailureNotifier(e))
+ return drivers
+
+
+def notify(message):
+ """Passes notification to mulitple notifiers in a list."""
+ for driver in _get_drivers():
+ try:
+ driver.notify(message)
+ except Exception as e:
+ LOG.exception(_("Problem '%(e)s' attempting to send to "
+ "notification driver %(driver)s." % locals()))
+
+
+def _reset_drivers():
+ """Used by unit tests to reset the drivers."""
+ global drivers
+ drivers = None
diff --git a/nova/quota.py b/nova/quota.py
index 48e598659..771477747 100644
--- a/nova/quota.py
+++ b/nova/quota.py
@@ -116,8 +116,9 @@ def allowed_volumes(context, requested_volumes, size):
allowed_gigabytes = _get_request_allotment(requested_gigabytes,
used_gigabytes,
quota['gigabytes'])
- allowed_volumes = min(allowed_volumes,
- int(allowed_gigabytes // size))
+ if size != 0:
+ allowed_volumes = min(allowed_volumes,
+ int(allowed_gigabytes // size))
return min(requested_volumes, allowed_volumes)
diff --git a/nova/rpc/__init__.py b/nova/rpc/__init__.py
index bdf7f705b..c0cfdd5ce 100644
--- a/nova/rpc/__init__.py
+++ b/nova/rpc/__init__.py
@@ -23,44 +23,35 @@ from nova import flags
FLAGS = flags.FLAGS
flags.DEFINE_string('rpc_backend',
- 'nova.rpc.amqp',
- "The messaging module to use, defaults to AMQP.")
+ 'nova.rpc.impl_kombu',
+ "The messaging module to use, defaults to kombu.")
-RPCIMPL = import_object(FLAGS.rpc_backend)
+_RPCIMPL = None
-def create_connection(new=True):
- return RPCIMPL.Connection.instance(new=True)
-
+def get_impl():
+ """Delay import of rpc_backend until FLAGS are loaded."""
+ global _RPCIMPL
+ if _RPCIMPL is None:
+ _RPCIMPL = import_object(FLAGS.rpc_backend)
+ return _RPCIMPL
-def create_consumer(conn, topic, proxy, fanout=False):
- if fanout:
- return RPCIMPL.FanoutAdapterConsumer(
- connection=conn,
- topic=topic,
- proxy=proxy)
- else:
- return RPCIMPL.TopicAdapterConsumer(
- connection=conn,
- topic=topic,
- proxy=proxy)
-
-def create_consumer_set(conn, consumers):
- return RPCIMPL.ConsumerSet(connection=conn, consumer_list=consumers)
+def create_connection(new=True):
+ return get_impl().create_connection(new=new)
def call(context, topic, msg):
- return RPCIMPL.call(context, topic, msg)
+ return get_impl().call(context, topic, msg)
def cast(context, topic, msg):
- return RPCIMPL.cast(context, topic, msg)
+ return get_impl().cast(context, topic, msg)
def fanout_cast(context, topic, msg):
- return RPCIMPL.fanout_cast(context, topic, msg)
+ return get_impl().fanout_cast(context, topic, msg)
def multicall(context, topic, msg):
- return RPCIMPL.multicall(context, topic, msg)
+ return get_impl().multicall(context, topic, msg)
diff --git a/nova/rpc/common.py b/nova/rpc/common.py
index 1d3065a83..b8c280630 100644
--- a/nova/rpc/common.py
+++ b/nova/rpc/common.py
@@ -1,8 +1,14 @@
from nova import exception
+from nova import flags
from nova import log as logging
LOG = logging.getLogger('nova.rpc')
+flags.DEFINE_integer('rpc_thread_pool_size', 1024,
+ 'Size of RPC thread pool')
+flags.DEFINE_integer('rpc_conn_pool_size', 30,
+ 'Size of RPC connection pool')
+
class RemoteError(exception.Error):
"""Signifies that a remote class has raised an exception.
diff --git a/nova/rpc/amqp.py b/nova/rpc/impl_carrot.py
index fe429b266..303a4ff88 100644
--- a/nova/rpc/amqp.py
+++ b/nova/rpc/impl_carrot.py
@@ -33,6 +33,7 @@ import uuid
from carrot import connection as carrot_connection
from carrot import messaging
+import eventlet
from eventlet import greenpool
from eventlet import pools
from eventlet import queue
@@ -42,21 +43,22 @@ from nova import context
from nova import exception
from nova import fakerabbit
from nova import flags
-from nova import log as logging
-from nova import utils
from nova.rpc.common import RemoteError, LOG
+# Needed for tests
+eventlet.monkey_patch()
FLAGS = flags.FLAGS
-flags.DEFINE_integer('rpc_thread_pool_size', 1024,
- 'Size of RPC thread pool')
-flags.DEFINE_integer('rpc_conn_pool_size', 30,
- 'Size of RPC connection pool')
class Connection(carrot_connection.BrokerConnection):
"""Connection instance object."""
+ def __init__(self, *args, **kwargs):
+ super(Connection, self).__init__(*args, **kwargs)
+ self._rpc_consumers = []
+ self._rpc_consumer_thread = None
+
@classmethod
def instance(cls, new=True):
"""Returns the instance."""
@@ -94,13 +96,63 @@ class Connection(carrot_connection.BrokerConnection):
pass
return cls.instance()
+ def close(self):
+ self.cancel_consumer_thread()
+ for consumer in self._rpc_consumers:
+ try:
+ consumer.close()
+ except Exception:
+ # ignore all errors
+ pass
+ self._rpc_consumers = []
+ super(Connection, self).close()
+
+ def consume_in_thread(self):
+ """Consumer from all queues/consumers in a greenthread"""
+
+ consumer_set = ConsumerSet(connection=self,
+ consumer_list=self._rpc_consumers)
+
+ def _consumer_thread():
+ try:
+ consumer_set.wait()
+ except greenlet.GreenletExit:
+ return
+ if self._rpc_consumer_thread is None:
+ self._rpc_consumer_thread = eventlet.spawn(_consumer_thread)
+ return self._rpc_consumer_thread
+
+ def cancel_consumer_thread(self):
+ """Cancel a consumer thread"""
+ if self._rpc_consumer_thread is not None:
+ self._rpc_consumer_thread.kill()
+ try:
+ self._rpc_consumer_thread.wait()
+ except greenlet.GreenletExit:
+ pass
+ self._rpc_consumer_thread = None
+
+ def create_consumer(self, topic, proxy, fanout=False):
+ """Create a consumer that calls methods in the proxy"""
+ if fanout:
+ consumer = FanoutAdapterConsumer(
+ connection=self,
+ topic=topic,
+ proxy=proxy)
+ else:
+ consumer = TopicAdapterConsumer(
+ connection=self,
+ topic=topic,
+ proxy=proxy)
+ self._rpc_consumers.append(consumer)
+
class Pool(pools.Pool):
"""Class that implements a Pool of Connections."""
# TODO(comstud): Timeout connections not used in a while
def create(self):
- LOG.debug('Creating new connection')
+ LOG.debug('Pool creating new connection')
return Connection.instance(new=True)
# Create a ConnectionPool to use for RPC calls. We'll order the
@@ -119,25 +171,34 @@ class Consumer(messaging.Consumer):
"""
def __init__(self, *args, **kwargs):
- for i in xrange(FLAGS.rabbit_max_retries):
- if i > 0:
- time.sleep(FLAGS.rabbit_retry_interval)
+ max_retries = FLAGS.rabbit_max_retries
+ sleep_time = FLAGS.rabbit_retry_interval
+ tries = 0
+ while True:
+ tries += 1
+ if tries > 1:
+ time.sleep(sleep_time)
+ # backoff for next retry attempt.. if there is one
+ sleep_time += FLAGS.rabbit_retry_backoff
+ if sleep_time > 30:
+ sleep_time = 30
try:
super(Consumer, self).__init__(*args, **kwargs)
self.failed_connection = False
break
except Exception as e: # Catching all because carrot sucks
+ self.failed_connection = True
+ if max_retries > 0 and tries == max_retries:
+ break
fl_host = FLAGS.rabbit_host
fl_port = FLAGS.rabbit_port
- fl_intv = FLAGS.rabbit_retry_interval
+ fl_intv = sleep_time
LOG.error(_('AMQP server on %(fl_host)s:%(fl_port)d is'
' unreachable: %(e)s. Trying again in %(fl_intv)d'
' seconds.') % locals())
- self.failed_connection = True
if self.failed_connection:
LOG.error(_('Unable to connect to AMQP server '
- 'after %d tries. Shutting down.'),
- FLAGS.rabbit_max_retries)
+ 'after %(tries)d tries. Shutting down.') % locals())
sys.exit(1)
def fetch(self, no_ack=None, auto_ack=None, enable_callbacks=False):
@@ -166,12 +227,6 @@ class Consumer(messaging.Consumer):
LOG.exception(_('Failed to fetch message from queue: %s' % e))
self.failed_connection = True
- def attach_to_eventlet(self):
- """Only needed for unit tests!"""
- timer = utils.LoopingCall(self.fetch, enable_callbacks=True)
- timer.start(0.1)
- return timer
-
class AdapterConsumer(Consumer):
"""Calls methods on a proxy object based on method and args."""
@@ -242,7 +297,7 @@ class AdapterConsumer(Consumer):
# NOTE(vish): this iterates through the generator
list(rval)
except Exception as e:
- logging.exception('Exception during message handling')
+ LOG.exception('Exception during message handling')
if msg_id:
msg_reply(msg_id, None, sys.exc_info())
return
@@ -520,6 +575,11 @@ class MulticallWaiter(object):
yield result
+def create_connection(new=True):
+ """Create a connection"""
+ return Connection.instance(new=new)
+
+
def call(context, topic, msg):
"""Sends a message on a topic and wait for a response."""
rv = multicall(context, topic, msg)
diff --git a/nova/rpc/impl_kombu.py b/nova/rpc/impl_kombu.py
new file mode 100644
index 000000000..b994a6a10
--- /dev/null
+++ b/nova/rpc/impl_kombu.py
@@ -0,0 +1,781 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright 2011 OpenStack LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import kombu
+import kombu.entity
+import kombu.messaging
+import kombu.connection
+import itertools
+import sys
+import time
+import traceback
+import types
+import uuid
+
+import eventlet
+from eventlet import greenpool
+from eventlet import pools
+import greenlet
+
+from nova import context
+from nova import exception
+from nova import flags
+from nova.rpc.common import RemoteError, LOG
+
+# Needed for tests
+eventlet.monkey_patch()
+
+FLAGS = flags.FLAGS
+
+
+class ConsumerBase(object):
+ """Consumer base class."""
+
+ def __init__(self, channel, callback, tag, **kwargs):
+ """Declare a queue on an amqp channel.
+
+ 'channel' is the amqp channel to use
+ 'callback' is the callback to call when messages are received
+ 'tag' is a unique ID for the consumer on the channel
+
+ queue name, exchange name, and other kombu options are
+ passed in here as a dictionary.
+ """
+ self.callback = callback
+ self.tag = str(tag)
+ self.kwargs = kwargs
+ self.queue = None
+ self.reconnect(channel)
+
+ def reconnect(self, channel):
+ """Re-declare the queue after a rabbit reconnect"""
+ self.channel = channel
+ self.kwargs['channel'] = channel
+ self.queue = kombu.entity.Queue(**self.kwargs)
+ self.queue.declare()
+
+ def consume(self, *args, **kwargs):
+ """Actually declare the consumer on the amqp channel. This will
+ start the flow of messages from the queue. Using the
+ Connection.iterconsume() iterator will process the messages,
+ calling the appropriate callback.
+
+ If a callback is specified in kwargs, use that. Otherwise,
+ use the callback passed during __init__()
+
+ If kwargs['nowait'] is True, then this call will block until
+ a message is read.
+
+ Messages will automatically be acked if the callback doesn't
+ raise an exception
+ """
+
+ options = {'consumer_tag': self.tag}
+ options['nowait'] = kwargs.get('nowait', False)
+ callback = kwargs.get('callback', self.callback)
+ if not callback:
+ raise ValueError("No callback defined")
+
+ def _callback(raw_message):
+ message = self.channel.message_to_python(raw_message)
+ callback(message.payload)
+ message.ack()
+
+ self.queue.consume(*args, callback=_callback, **options)
+
+ def cancel(self):
+ """Cancel the consuming from the queue, if it has started"""
+ try:
+ self.queue.cancel(self.tag)
+ except KeyError, e:
+ # NOTE(comstud): Kludge to get around a amqplib bug
+ if str(e) != "u'%s'" % self.tag:
+ raise
+ self.queue = None
+
+
+class DirectConsumer(ConsumerBase):
+ """Queue/consumer class for 'direct'"""
+
+ def __init__(self, channel, msg_id, callback, tag, **kwargs):
+ """Init a 'direct' queue.
+
+ 'channel' is the amqp channel to use
+ 'msg_id' is the msg_id to listen on
+ 'callback' is the callback to call when messages are received
+ 'tag' is a unique ID for the consumer on the channel
+
+ Other kombu options may be passed
+ """
+ # Default options
+ options = {'durable': False,
+ 'auto_delete': True,
+ 'exclusive': True}
+ options.update(kwargs)
+ exchange = kombu.entity.Exchange(
+ name=msg_id,
+ type='direct',
+ durable=options['durable'],
+ auto_delete=options['auto_delete'])
+ super(DirectConsumer, self).__init__(
+ channel,
+ callback,
+ tag,
+ name=msg_id,
+ exchange=exchange,
+ routing_key=msg_id,
+ **options)
+
+
+class TopicConsumer(ConsumerBase):
+ """Consumer class for 'topic'"""
+
+ def __init__(self, channel, topic, callback, tag, **kwargs):
+ """Init a 'topic' queue.
+
+ 'channel' is the amqp channel to use
+ 'topic' is the topic to listen on
+ 'callback' is the callback to call when messages are received
+ 'tag' is a unique ID for the consumer on the channel
+
+ Other kombu options may be passed
+ """
+ # Default options
+ options = {'durable': FLAGS.rabbit_durable_queues,
+ 'auto_delete': False,
+ 'exclusive': False}
+ options.update(kwargs)
+ exchange = kombu.entity.Exchange(
+ name=FLAGS.control_exchange,
+ type='topic',
+ durable=options['durable'],
+ auto_delete=options['auto_delete'])
+ super(TopicConsumer, self).__init__(
+ channel,
+ callback,
+ tag,
+ name=topic,
+ exchange=exchange,
+ routing_key=topic,
+ **options)
+
+
+class FanoutConsumer(ConsumerBase):
+ """Consumer class for 'fanout'"""
+
+ def __init__(self, channel, topic, callback, tag, **kwargs):
+ """Init a 'fanout' queue.
+
+ 'channel' is the amqp channel to use
+ 'topic' is the topic to listen on
+ 'callback' is the callback to call when messages are received
+ 'tag' is a unique ID for the consumer on the channel
+
+ Other kombu options may be passed
+ """
+ unique = uuid.uuid4().hex
+ exchange_name = '%s_fanout' % topic
+ queue_name = '%s_fanout_%s' % (topic, unique)
+
+ # Default options
+ options = {'durable': False,
+ 'auto_delete': True,
+ 'exclusive': True}
+ options.update(kwargs)
+ exchange = kombu.entity.Exchange(
+ name=exchange_name,
+ type='fanout',
+ durable=options['durable'],
+ auto_delete=options['auto_delete'])
+ super(FanoutConsumer, self).__init__(
+ channel,
+ callback,
+ tag,
+ name=queue_name,
+ exchange=exchange,
+ routing_key=topic,
+ **options)
+
+
+class Publisher(object):
+ """Base Publisher class"""
+
+ def __init__(self, channel, exchange_name, routing_key, **kwargs):
+ """Init the Publisher class with the exchange_name, routing_key,
+ and other options
+ """
+ self.exchange_name = exchange_name
+ self.routing_key = routing_key
+ self.kwargs = kwargs
+ self.reconnect(channel)
+
+ def reconnect(self, channel):
+ """Re-establish the Producer after a rabbit reconnection"""
+ self.exchange = kombu.entity.Exchange(name=self.exchange_name,
+ **self.kwargs)
+ self.producer = kombu.messaging.Producer(exchange=self.exchange,
+ channel=channel, routing_key=self.routing_key)
+
+ def send(self, msg):
+ """Send a message"""
+ self.producer.publish(msg)
+
+
+class DirectPublisher(Publisher):
+ """Publisher class for 'direct'"""
+ def __init__(self, channel, msg_id, **kwargs):
+ """init a 'direct' publisher.
+
+ Kombu options may be passed as keyword args to override defaults
+ """
+
+ options = {'durable': False,
+ 'auto_delete': True,
+ 'exclusive': True}
+ options.update(kwargs)
+ super(DirectPublisher, self).__init__(channel,
+ msg_id,
+ msg_id,
+ type='direct',
+ **options)
+
+
+class TopicPublisher(Publisher):
+ """Publisher class for 'topic'"""
+ def __init__(self, channel, topic, **kwargs):
+ """init a 'topic' publisher.
+
+ Kombu options may be passed as keyword args to override defaults
+ """
+ options = {'durable': FLAGS.rabbit_durable_queues,
+ 'auto_delete': False,
+ 'exclusive': False}
+ options.update(kwargs)
+ super(TopicPublisher, self).__init__(channel,
+ FLAGS.control_exchange,
+ topic,
+ type='topic',
+ **options)
+
+
+class FanoutPublisher(Publisher):
+ """Publisher class for 'fanout'"""
+ def __init__(self, channel, topic, **kwargs):
+ """init a 'fanout' publisher.
+
+ Kombu options may be passed as keyword args to override defaults
+ """
+ options = {'durable': False,
+ 'auto_delete': True,
+ 'exclusive': True}
+ options.update(kwargs)
+ super(FanoutPublisher, self).__init__(channel,
+ '%s_fanout' % topic,
+ None,
+ type='fanout',
+ **options)
+
+
+class Connection(object):
+ """Connection object."""
+
+ def __init__(self):
+ self.consumers = []
+ self.consumer_thread = None
+ self.max_retries = FLAGS.rabbit_max_retries
+ # Try forever?
+ if self.max_retries <= 0:
+ self.max_retries = None
+ self.interval_start = FLAGS.rabbit_retry_interval
+ self.interval_stepping = FLAGS.rabbit_retry_backoff
+ # max retry-interval = 30 seconds
+ self.interval_max = 30
+ self.memory_transport = False
+
+ self.params = dict(hostname=FLAGS.rabbit_host,
+ port=FLAGS.rabbit_port,
+ userid=FLAGS.rabbit_userid,
+ password=FLAGS.rabbit_password,
+ virtual_host=FLAGS.rabbit_virtual_host)
+ if FLAGS.fake_rabbit:
+ self.params['transport'] = 'memory'
+ self.memory_transport = True
+ else:
+ self.memory_transport = False
+ self.connection = None
+ self.reconnect()
+
+ def reconnect(self):
+ """Handles reconnecting and re-estblishing queues"""
+ if self.connection:
+ try:
+ self.connection.close()
+ except self.connection.connection_errors:
+ pass
+ time.sleep(1)
+ self.connection = kombu.connection.BrokerConnection(**self.params)
+ if self.memory_transport:
+ # Kludge to speed up tests.
+ self.connection.transport.polling_interval = 0.0
+ self.consumer_num = itertools.count(1)
+
+ try:
+ self.connection.ensure_connection(errback=self.connect_error,
+ max_retries=self.max_retries,
+ interval_start=self.interval_start,
+ interval_step=self.interval_stepping,
+ interval_max=self.interval_max)
+ except self.connection.connection_errors, e:
+ # We should only get here if max_retries is set. We'll go
+ # ahead and exit in this case.
+ err_str = str(e)
+ max_retries = self.max_retries
+ LOG.error(_('Unable to connect to AMQP server '
+ 'after %(max_retries)d tries: %(err_str)s') % locals())
+ sys.exit(1)
+ LOG.info(_('Connected to AMQP server on %(hostname)s:%(port)d' %
+ self.params))
+ self.channel = self.connection.channel()
+ # work around 'memory' transport bug in 1.1.3
+ if self.memory_transport:
+ self.channel._new_queue('ae.undeliver')
+ for consumer in self.consumers:
+ consumer.reconnect(self.channel)
+ if self.consumers:
+ LOG.debug(_("Re-established AMQP queues"))
+
+ def get_channel(self):
+ """Convenience call for bin/clear_rabbit_queues"""
+ return self.channel
+
+ def connect_error(self, exc, interval):
+ """Callback when there are connection re-tries by kombu"""
+ info = self.params.copy()
+ info['intv'] = interval
+ info['e'] = exc
+ LOG.error(_('AMQP server on %(hostname)s:%(port)d is'
+ ' unreachable: %(e)s. Trying again in %(intv)d'
+ ' seconds.') % info)
+
+ def close(self):
+ """Close/release this connection"""
+ self.cancel_consumer_thread()
+ self.connection.release()
+ self.connection = None
+
+ def reset(self):
+ """Reset a connection so it can be used again"""
+ self.cancel_consumer_thread()
+ self.channel.close()
+ self.channel = self.connection.channel()
+ # work around 'memory' transport bug in 1.1.3
+ if self.memory_transport:
+ self.channel._new_queue('ae.undeliver')
+ self.consumers = []
+
+ def declare_consumer(self, consumer_cls, topic, callback):
+ """Create a Consumer using the class that was passed in and
+ add it to our list of consumers
+ """
+ consumer = consumer_cls(self.channel, topic, callback,
+ self.consumer_num.next())
+ self.consumers.append(consumer)
+ return consumer
+
+ def iterconsume(self, limit=None):
+ """Return an iterator that will consume from all queues/consumers"""
+ while True:
+ try:
+ queues_head = self.consumers[:-1]
+ queues_tail = self.consumers[-1]
+ for queue in queues_head:
+ queue.consume(nowait=True)
+ queues_tail.consume(nowait=False)
+
+ for iteration in itertools.count(0):
+ if limit and iteration >= limit:
+ raise StopIteration
+ yield self.connection.drain_events()
+ except self.connection.connection_errors, e:
+ LOG.exception(_('Failed to consume message from queue: '
+ '%s' % str(e)))
+ self.reconnect()
+
+ def cancel_consumer_thread(self):
+ """Cancel a consumer thread"""
+ if self.consumer_thread is not None:
+ self.consumer_thread.kill()
+ try:
+ self.consumer_thread.wait()
+ except greenlet.GreenletExit:
+ pass
+ self.consumer_thread = None
+
+ def publisher_send(self, cls, topic, msg):
+ """Send to a publisher based on the publisher class"""
+ while True:
+ publisher = None
+ try:
+ publisher = cls(self.channel, topic)
+ publisher.send(msg)
+ return
+ except self.connection.connection_errors, e:
+ LOG.exception(_('Failed to publish message %s' % str(e)))
+ try:
+ self.reconnect()
+ if publisher:
+ publisher.reconnect(self.channel)
+ except self.connection.connection_errors, e:
+ pass
+
+ def declare_direct_consumer(self, topic, callback):
+ """Create a 'direct' queue.
+ In nova's use, this is generally a msg_id queue used for
+ responses for call/multicall
+ """
+ self.declare_consumer(DirectConsumer, topic, callback)
+
+ def declare_topic_consumer(self, topic, callback=None):
+ """Create a 'topic' consumer."""
+ self.declare_consumer(TopicConsumer, topic, callback)
+
+ def declare_fanout_consumer(self, topic, callback):
+ """Create a 'fanout' consumer"""
+ self.declare_consumer(FanoutConsumer, topic, callback)
+
+ def direct_send(self, msg_id, msg):
+ """Send a 'direct' message"""
+ self.publisher_send(DirectPublisher, msg_id, msg)
+
+ def topic_send(self, topic, msg):
+ """Send a 'topic' message"""
+ self.publisher_send(TopicPublisher, topic, msg)
+
+ def fanout_send(self, topic, msg):
+ """Send a 'fanout' message"""
+ self.publisher_send(FanoutPublisher, topic, msg)
+
+ def consume(self, limit=None):
+ """Consume from all queues/consumers"""
+ it = self.iterconsume(limit=limit)
+ while True:
+ try:
+ it.next()
+ except StopIteration:
+ return
+
+ def consume_in_thread(self):
+ """Consumer from all queues/consumers in a greenthread"""
+ def _consumer_thread():
+ try:
+ self.consume()
+ except greenlet.GreenletExit:
+ return
+ if self.consumer_thread is None:
+ self.consumer_thread = eventlet.spawn(_consumer_thread)
+ return self.consumer_thread
+
+ def create_consumer(self, topic, proxy, fanout=False):
+ """Create a consumer that calls a method in a proxy object"""
+ if fanout:
+ self.declare_fanout_consumer(topic, ProxyCallback(proxy))
+ else:
+ self.declare_topic_consumer(topic, ProxyCallback(proxy))
+
+
+class Pool(pools.Pool):
+ """Class that implements a Pool of Connections."""
+
+ # TODO(comstud): Timeout connections not used in a while
+ def create(self):
+ LOG.debug('Pool creating new connection')
+ return Connection()
+
+# Create a ConnectionPool to use for RPC calls. We'll order the
+# pool as a stack (LIFO), so that we can potentially loop through and
+# timeout old unused connections at some point
+ConnectionPool = Pool(
+ max_size=FLAGS.rpc_conn_pool_size,
+ order_as_stack=True)
+
+
+class ConnectionContext(object):
+ """The class that is actually returned to the caller of
+ create_connection(). This is a essentially a wrapper around
+ Connection that supports 'with' and can return a new Connection or
+ one from a pool. It will also catch when an instance of this class
+ is to be deleted so that we can return Connections to the pool on
+ exceptions and so forth without making the caller be responsible for
+ catching all exceptions and making sure to return a connection to
+ the pool.
+ """
+
+ def __init__(self, pooled=True):
+ """Create a new connection, or get one from the pool"""
+ self.connection = None
+ if pooled:
+ self.connection = ConnectionPool.get()
+ else:
+ self.connection = Connection()
+ self.pooled = pooled
+
+ def __enter__(self):
+ """with ConnectionContext() should return self"""
+ return self
+
+ def _done(self):
+ """If the connection came from a pool, clean it up and put it back.
+ If it did not come from a pool, close it.
+ """
+ if self.connection:
+ if self.pooled:
+ # Reset the connection so it's ready for the next caller
+ # to grab from the pool
+ self.connection.reset()
+ ConnectionPool.put(self.connection)
+ else:
+ try:
+ self.connection.close()
+ except Exception:
+ # There's apparently a bug in kombu 'memory' transport
+ # which causes an assert failure.
+ # But, we probably want to ignore all exceptions when
+ # trying to close a connection, anyway...
+ pass
+ self.connection = None
+
+ def __exit__(self, t, v, tb):
+ """end of 'with' statement. We're done here."""
+ self._done()
+
+ def __del__(self):
+ """Caller is done with this connection. Make sure we cleaned up."""
+ self._done()
+
+ def close(self):
+ """Caller is done with this connection."""
+ self._done()
+
+ def __getattr__(self, key):
+ """Proxy all other calls to the Connection instance"""
+ if self.connection:
+ return getattr(self.connection, key)
+ else:
+ raise exception.InvalidRPCConnectionReuse()
+
+
+class ProxyCallback(object):
+ """Calls methods on a proxy object based on method and args."""
+
+ def __init__(self, proxy):
+ self.proxy = proxy
+ self.pool = greenpool.GreenPool(FLAGS.rpc_thread_pool_size)
+
+ def __call__(self, message_data):
+ """Consumer callback to call a method on a proxy object.
+
+ Parses the message for validity and fires off a thread to call the
+ proxy object method.
+
+ Message data should be a dictionary with two keys:
+ method: string representing the method to call
+ args: dictionary of arg: value
+
+ Example: {'method': 'echo', 'args': {'value': 42}}
+
+ """
+ LOG.debug(_('received %s') % message_data)
+ ctxt = _unpack_context(message_data)
+ method = message_data.get('method')
+ args = message_data.get('args', {})
+ if not method:
+ LOG.warn(_('no method for message: %s') % message_data)
+ ctxt.reply(_('No method for message: %s') % message_data)
+ return
+ self.pool.spawn_n(self._process_data, ctxt, method, args)
+
+ @exception.wrap_exception()
+ def _process_data(self, ctxt, method, args):
+ """Thread that maigcally looks for a method on the proxy
+ object and calls it.
+ """
+
+ node_func = getattr(self.proxy, str(method))
+ node_args = dict((str(k), v) for k, v in args.iteritems())
+ # NOTE(vish): magic is fun!
+ try:
+ rval = node_func(context=ctxt, **node_args)
+ # Check if the result was a generator
+ if isinstance(rval, types.GeneratorType):
+ for x in rval:
+ ctxt.reply(x, None)
+ else:
+ ctxt.reply(rval, None)
+ # This final None tells multicall that it is done.
+ ctxt.reply(None, None)
+ except Exception as e:
+ LOG.exception('Exception during message handling')
+ ctxt.reply(None, sys.exc_info())
+ return
+
+
+def _unpack_context(msg):
+ """Unpack context from msg."""
+ context_dict = {}
+ for key in list(msg.keys()):
+ # NOTE(vish): Some versions of python don't like unicode keys
+ # in kwargs.
+ key = str(key)
+ if key.startswith('_context_'):
+ value = msg.pop(key)
+ context_dict[key[9:]] = value
+ context_dict['msg_id'] = msg.pop('_msg_id', None)
+ LOG.debug(_('unpacked context: %s'), context_dict)
+ return RpcContext.from_dict(context_dict)
+
+
+def _pack_context(msg, context):
+ """Pack context into msg.
+
+ Values for message keys need to be less than 255 chars, so we pull
+ context out into a bunch of separate keys. If we want to support
+ more arguments in rabbit messages, we may want to do the same
+ for args at some point.
+
+ """
+ context_d = dict([('_context_%s' % key, value)
+ for (key, value) in context.to_dict().iteritems()])
+ msg.update(context_d)
+
+
+class RpcContext(context.RequestContext):
+ """Context that supports replying to a rpc.call"""
+ def __init__(self, *args, **kwargs):
+ msg_id = kwargs.pop('msg_id', None)
+ self.msg_id = msg_id
+ super(RpcContext, self).__init__(*args, **kwargs)
+
+ def reply(self, *args, **kwargs):
+ if self.msg_id:
+ msg_reply(self.msg_id, *args, **kwargs)
+
+
+class MulticallWaiter(object):
+ def __init__(self, connection):
+ self._connection = connection
+ self._iterator = connection.iterconsume()
+ self._result = None
+ self._done = False
+
+ def done(self):
+ self._done = True
+ self._connection.close()
+
+ def __call__(self, data):
+ """The consume() callback will call this. Store the result."""
+ if data['failure']:
+ self._result = RemoteError(*data['failure'])
+ else:
+ self._result = data['result']
+
+ def __iter__(self):
+ """Return a result until we get a 'None' response from consumer"""
+ if self._done:
+ raise StopIteration
+ while True:
+ self._iterator.next()
+ result = self._result
+ if isinstance(result, Exception):
+ self.done()
+ raise result
+ if result == None:
+ self.done()
+ raise StopIteration
+ yield result
+
+
+def create_connection(new=True):
+ """Create a connection"""
+ return ConnectionContext(pooled=not new)
+
+
+def multicall(context, topic, msg):
+ """Make a call that returns multiple times."""
+ # Can't use 'with' for multicall, as it returns an iterator
+ # that will continue to use the connection. When it's done,
+ # connection.close() will get called which will put it back into
+ # the pool
+ LOG.debug(_('Making asynchronous call on %s ...'), topic)
+ msg_id = uuid.uuid4().hex
+ msg.update({'_msg_id': msg_id})
+ LOG.debug(_('MSG_ID is %s') % (msg_id))
+ _pack_context(msg, context)
+
+ conn = ConnectionContext()
+ wait_msg = MulticallWaiter(conn)
+ conn.declare_direct_consumer(msg_id, wait_msg)
+ conn.topic_send(topic, msg)
+
+ return wait_msg
+
+
+def call(context, topic, msg):
+ """Sends a message on a topic and wait for a response."""
+ rv = multicall(context, topic, msg)
+ # NOTE(vish): return the last result from the multicall
+ rv = list(rv)
+ if not rv:
+ return
+ return rv[-1]
+
+
+def cast(context, topic, msg):
+ """Sends a message on a topic without waiting for a response."""
+ LOG.debug(_('Making asynchronous cast on %s...'), topic)
+ _pack_context(msg, context)
+ with ConnectionContext() as conn:
+ conn.topic_send(topic, msg)
+
+
+def fanout_cast(context, topic, msg):
+ """Sends a message on a fanout exchange without waiting for a response."""
+ LOG.debug(_('Making asynchronous fanout cast...'))
+ _pack_context(msg, context)
+ with ConnectionContext() as conn:
+ conn.fanout_send(topic, msg)
+
+
+def msg_reply(msg_id, reply=None, failure=None):
+ """Sends a reply or an error on the channel signified by msg_id.
+
+ Failure should be a sys.exc_info() tuple.
+
+ """
+ with ConnectionContext() as conn:
+ if failure:
+ message = str(failure[1])
+ tb = traceback.format_exception(*failure)
+ LOG.error(_("Returning exception %s to caller"), message)
+ LOG.error(tb)
+ failure = (failure[0].__name__, str(failure[1]), tb)
+
+ try:
+ msg = {'result': reply, 'failure': failure}
+ except TypeError:
+ msg = {'result': dict((k, repr(v))
+ for k, v in reply.__dict__.iteritems()),
+ 'failure': failure}
+ conn.direct_send(msg_id, msg)
diff --git a/nova/scheduler/abstract_scheduler.py b/nova/scheduler/abstract_scheduler.py
index e8c343a4b..7f17b642f 100644
--- a/nova/scheduler/abstract_scheduler.py
+++ b/nova/scheduler/abstract_scheduler.py
@@ -180,7 +180,6 @@ class AbstractScheduler(driver.Scheduler):
for zone_id, result in child_results:
if not result:
continue
- assert isinstance(zone_id, int)
for zone_rec in zones:
if zone_rec['id'] != zone_id:
diff --git a/nova/scheduler/driver.py b/nova/scheduler/driver.py
index f28353f05..22f4e14f9 100644
--- a/nova/scheduler/driver.py
+++ b/nova/scheduler/driver.py
@@ -30,6 +30,7 @@ from nova import log as logging
from nova import rpc
from nova import utils
from nova.compute import power_state
+from nova.compute import vm_states
from nova.api.ec2 import ec2utils
@@ -104,10 +105,8 @@ class Scheduler(object):
dest, block_migration)
# Changing instance_state.
- db.instance_set_state(context,
- instance_id,
- power_state.PAUSED,
- 'migrating')
+ values = {"vm_state": vm_states.MIGRATING}
+ db.instance_update(context, instance_id, values)
# Changing volume state
for volume_ref in instance_ref['volumes']:
@@ -129,8 +128,7 @@ class Scheduler(object):
"""
# Checking instance is running.
- if (power_state.RUNNING != instance_ref['state'] or \
- 'running' != instance_ref['state_description']):
+ if instance_ref['power_state'] != power_state.RUNNING:
instance_id = ec2utils.id_to_ec2_id(instance_ref['id'])
raise exception.InstanceNotRunning(instance_id=instance_id)
diff --git a/nova/scheduler/vsa.py b/nova/scheduler/vsa.py
new file mode 100644
index 000000000..6962dd86b
--- /dev/null
+++ b/nova/scheduler/vsa.py
@@ -0,0 +1,535 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright (c) 2011 Zadara Storage Inc.
+# Copyright (c) 2011 OpenStack LLC.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""
+VSA Simple Scheduler
+"""
+
+from nova import context
+from nova import db
+from nova import flags
+from nova import log as logging
+from nova import rpc
+from nova import utils
+from nova.scheduler import driver
+from nova.scheduler import simple
+from nova.vsa.api import VsaState
+from nova.volume import volume_types
+
+LOG = logging.getLogger('nova.scheduler.vsa')
+
+FLAGS = flags.FLAGS
+flags.DEFINE_integer('drive_type_approx_capacity_percent', 10,
+ 'The percentage range for capacity comparison')
+flags.DEFINE_integer('vsa_unique_hosts_per_alloc', 10,
+ 'The number of unique hosts per storage allocation')
+flags.DEFINE_boolean('vsa_select_unique_drives', True,
+ 'Allow selection of same host for multiple drives')
+
+
+def BYTES_TO_GB(bytes):
+ return bytes >> 30
+
+
+def GB_TO_BYTES(gb):
+ return gb << 30
+
+
+class VsaScheduler(simple.SimpleScheduler):
+ """Implements Scheduler for volume placement."""
+
+ def __init__(self, *args, **kwargs):
+ super(VsaScheduler, self).__init__(*args, **kwargs)
+ self._notify_all_volume_hosts("startup")
+
+ def _notify_all_volume_hosts(self, event):
+ rpc.fanout_cast(context.get_admin_context(),
+ FLAGS.volume_topic,
+ {"method": "notification",
+ "args": {"event": event}})
+
+ def _qosgrp_match(self, drive_type, qos_values):
+
+ def _compare_names(str1, str2):
+ return str1.lower() == str2.lower()
+
+ def _compare_sizes_approxim(cap_capacity, size):
+ cap_capacity = BYTES_TO_GB(int(cap_capacity))
+ size = int(size)
+ size_perc = size * \
+ FLAGS.drive_type_approx_capacity_percent / 100
+
+ return cap_capacity >= size - size_perc and \
+ cap_capacity <= size + size_perc
+
+ # Add more entries for additional comparisons
+ compare_list = [{'cap1': 'DriveType',
+ 'cap2': 'type',
+ 'cmp_func': _compare_names},
+ {'cap1': 'DriveCapacity',
+ 'cap2': 'size',
+ 'cmp_func': _compare_sizes_approxim}]
+
+ for cap in compare_list:
+ if cap['cap1'] in qos_values.keys() and \
+ cap['cap2'] in drive_type.keys() and \
+ cap['cmp_func'] is not None and \
+ cap['cmp_func'](qos_values[cap['cap1']],
+ drive_type[cap['cap2']]):
+ pass
+ else:
+ return False
+ return True
+
+ def _get_service_states(self):
+ return self.zone_manager.service_states
+
+ def _filter_hosts(self, topic, request_spec, host_list=None):
+
+ LOG.debug(_("_filter_hosts: %(request_spec)s"), locals())
+
+ drive_type = request_spec['drive_type']
+ LOG.debug(_("Filter hosts for drive type %s"), drive_type['name'])
+
+ if host_list is None:
+ host_list = self._get_service_states().iteritems()
+
+ filtered_hosts = [] # returns list of (hostname, capability_dict)
+ for host, host_dict in host_list:
+ for service_name, service_dict in host_dict.iteritems():
+ if service_name != topic:
+ continue
+
+ gos_info = service_dict.get('drive_qos_info', {})
+ for qosgrp, qos_values in gos_info.iteritems():
+ if self._qosgrp_match(drive_type, qos_values):
+ if qos_values['AvailableCapacity'] > 0:
+ filtered_hosts.append((host, gos_info))
+ else:
+ LOG.debug(_("Host %s has no free capacity. Skip"),
+ host)
+ break
+
+ host_names = [item[0] for item in filtered_hosts]
+ LOG.debug(_("Filter hosts: %s"), host_names)
+ return filtered_hosts
+
+ def _allowed_to_use_host(self, host, selected_hosts, unique):
+ if unique == False or \
+ host not in [item[0] for item in selected_hosts]:
+ return True
+ else:
+ return False
+
+ def _add_hostcap_to_list(self, selected_hosts, host, cap):
+ if host not in [item[0] for item in selected_hosts]:
+ selected_hosts.append((host, cap))
+
+ def host_selection_algorithm(self, request_spec, all_hosts,
+ selected_hosts, unique):
+ """Must override this method for VSA scheduler to work."""
+ raise NotImplementedError(_("Must implement host selection mechanism"))
+
+ def _select_hosts(self, request_spec, all_hosts, selected_hosts=None):
+
+ if selected_hosts is None:
+ selected_hosts = []
+
+ host = None
+ if len(selected_hosts) >= FLAGS.vsa_unique_hosts_per_alloc:
+ # try to select from already selected hosts only
+ LOG.debug(_("Maximum number of hosts selected (%d)"),
+ len(selected_hosts))
+ unique = False
+ (host, qos_cap) = self.host_selection_algorithm(request_spec,
+ selected_hosts,
+ selected_hosts,
+ unique)
+
+ LOG.debug(_("Selected excessive host %(host)s"), locals())
+ else:
+ unique = FLAGS.vsa_select_unique_drives
+
+ if host is None:
+ # if we've not tried yet (# of sel hosts < max) - unique=True
+ # or failed to select from selected_hosts - unique=False
+ # select from all hosts
+ (host, qos_cap) = self.host_selection_algorithm(request_spec,
+ all_hosts,
+ selected_hosts,
+ unique)
+ if host is None:
+ raise driver.WillNotSchedule(_("No available hosts"))
+
+ return (host, qos_cap)
+
+ def _provision_volume(self, context, vol, vsa_id, availability_zone):
+
+ if availability_zone is None:
+ availability_zone = FLAGS.storage_availability_zone
+
+ now = utils.utcnow()
+ options = {
+ 'size': vol['size'],
+ 'user_id': context.user_id,
+ 'project_id': context.project_id,
+ 'snapshot_id': None,
+ 'availability_zone': availability_zone,
+ 'status': "creating",
+ 'attach_status': "detached",
+ 'display_name': vol['name'],
+ 'display_description': vol['description'],
+ 'volume_type_id': vol['volume_type_id'],
+ 'metadata': dict(to_vsa_id=vsa_id),
+ 'host': vol['host'],
+ 'scheduled_at': now
+ }
+
+ size = vol['size']
+ host = vol['host']
+ name = vol['name']
+ LOG.debug(_("Provision volume %(name)s of size %(size)s GB on "\
+ "host %(host)s"), locals())
+
+ volume_ref = db.volume_create(context, options)
+ rpc.cast(context,
+ db.queue_get_for(context, "volume", vol['host']),
+ {"method": "create_volume",
+ "args": {"volume_id": volume_ref['id'],
+ "snapshot_id": None}})
+
+ def _check_host_enforcement(self, context, availability_zone):
+ if (availability_zone
+ and ':' in availability_zone
+ and context.is_admin):
+ zone, _x, host = availability_zone.partition(':')
+ service = db.service_get_by_args(context.elevated(), host,
+ 'nova-volume')
+ if not self.service_is_up(service):
+ raise driver.WillNotSchedule(_("Host %s not available") % host)
+
+ return host
+ else:
+ return None
+
+ def _assign_hosts_to_volumes(self, context, volume_params, forced_host):
+
+ prev_volume_type_id = None
+ request_spec = {}
+ selected_hosts = []
+
+ LOG.debug(_("volume_params %(volume_params)s") % locals())
+
+ i = 1
+ for vol in volume_params:
+ name = vol['name']
+ LOG.debug(_("%(i)d: Volume %(name)s"), locals())
+ i += 1
+
+ if forced_host:
+ vol['host'] = forced_host
+ vol['capabilities'] = None
+ continue
+
+ volume_type_id = vol['volume_type_id']
+ request_spec['size'] = vol['size']
+
+ if prev_volume_type_id is None or\
+ prev_volume_type_id != volume_type_id:
+ # generate list of hosts for this drive type
+
+ volume_type = volume_types.get_volume_type(context,
+ volume_type_id)
+ drive_type = {
+ 'name': volume_type['extra_specs'].get('drive_name'),
+ 'type': volume_type['extra_specs'].get('drive_type'),
+ 'size': int(volume_type['extra_specs'].get('drive_size')),
+ 'rpm': volume_type['extra_specs'].get('drive_rpm'),
+ }
+ request_spec['drive_type'] = drive_type
+
+ all_hosts = self._filter_hosts("volume", request_spec)
+ prev_volume_type_id = volume_type_id
+
+ (host, qos_cap) = self._select_hosts(request_spec,
+ all_hosts, selected_hosts)
+ vol['host'] = host
+ vol['capabilities'] = qos_cap
+ self._consume_resource(qos_cap, vol['size'], -1)
+
+ def schedule_create_volumes(self, context, request_spec,
+ availability_zone=None, *_args, **_kwargs):
+ """Picks hosts for hosting multiple volumes."""
+
+ num_volumes = request_spec.get('num_volumes')
+ LOG.debug(_("Attempting to spawn %(num_volumes)d volume(s)") %
+ locals())
+
+ vsa_id = request_spec.get('vsa_id')
+ volume_params = request_spec.get('volumes')
+
+ host = self._check_host_enforcement(context, availability_zone)
+
+ try:
+ self._print_capabilities_info()
+
+ self._assign_hosts_to_volumes(context, volume_params, host)
+
+ for vol in volume_params:
+ self._provision_volume(context, vol, vsa_id, availability_zone)
+ except:
+ if vsa_id:
+ db.vsa_update(context, vsa_id, dict(status=VsaState.FAILED))
+
+ for vol in volume_params:
+ if 'capabilities' in vol:
+ self._consume_resource(vol['capabilities'],
+ vol['size'], 1)
+ raise
+
+ return None
+
+ def schedule_create_volume(self, context, volume_id, *_args, **_kwargs):
+ """Picks the best host based on requested drive type capability."""
+ volume_ref = db.volume_get(context, volume_id)
+
+ host = self._check_host_enforcement(context,
+ volume_ref['availability_zone'])
+ if host:
+ now = utils.utcnow()
+ db.volume_update(context, volume_id, {'host': host,
+ 'scheduled_at': now})
+ return host
+
+ volume_type_id = volume_ref['volume_type_id']
+ if volume_type_id:
+ volume_type = volume_types.get_volume_type(context, volume_type_id)
+
+ if volume_type_id is None or\
+ volume_types.is_vsa_volume(volume_type_id, volume_type):
+
+ LOG.debug(_("Non-VSA volume %d"), volume_ref['id'])
+ return super(VsaScheduler, self).schedule_create_volume(context,
+ volume_id, *_args, **_kwargs)
+
+ self._print_capabilities_info()
+
+ drive_type = {
+ 'name': volume_type['extra_specs'].get('drive_name'),
+ 'type': volume_type['extra_specs'].get('drive_type'),
+ 'size': int(volume_type['extra_specs'].get('drive_size')),
+ 'rpm': volume_type['extra_specs'].get('drive_rpm'),
+ }
+
+ LOG.debug(_("Spawning volume %(volume_id)s with drive type "\
+ "%(drive_type)s"), locals())
+
+ request_spec = {'size': volume_ref['size'],
+ 'drive_type': drive_type}
+ hosts = self._filter_hosts("volume", request_spec)
+
+ try:
+ (host, qos_cap) = self._select_hosts(request_spec, all_hosts=hosts)
+ except:
+ if volume_ref['to_vsa_id']:
+ db.vsa_update(context, volume_ref['to_vsa_id'],
+ dict(status=VsaState.FAILED))
+ raise
+
+ if host:
+ now = utils.utcnow()
+ db.volume_update(context, volume_id, {'host': host,
+ 'scheduled_at': now})
+ self._consume_resource(qos_cap, volume_ref['size'], -1)
+ return host
+
+ def _consume_full_drive(self, qos_values, direction):
+ qos_values['FullDrive']['NumFreeDrives'] += direction
+ qos_values['FullDrive']['NumOccupiedDrives'] -= direction
+
+ def _consume_partition(self, qos_values, size, direction):
+
+ if qos_values['PartitionDrive']['PartitionSize'] != 0:
+ partition_size = qos_values['PartitionDrive']['PartitionSize']
+ else:
+ partition_size = size
+ part_per_drive = qos_values['DriveCapacity'] / partition_size
+
+ if direction == -1 and \
+ qos_values['PartitionDrive']['NumFreePartitions'] == 0:
+
+ self._consume_full_drive(qos_values, direction)
+ qos_values['PartitionDrive']['NumFreePartitions'] += \
+ part_per_drive
+
+ qos_values['PartitionDrive']['NumFreePartitions'] += direction
+ qos_values['PartitionDrive']['NumOccupiedPartitions'] -= direction
+
+ if direction == 1 and \
+ qos_values['PartitionDrive']['NumFreePartitions'] >= \
+ part_per_drive:
+
+ self._consume_full_drive(qos_values, direction)
+ qos_values['PartitionDrive']['NumFreePartitions'] -= \
+ part_per_drive
+
+ def _consume_resource(self, qos_values, size, direction):
+ if qos_values is None:
+ LOG.debug(_("No capability selected for volume of size %(size)s"),
+ locals())
+ return
+
+ if size == 0: # full drive match
+ qos_values['AvailableCapacity'] += direction * \
+ qos_values['DriveCapacity']
+ self._consume_full_drive(qos_values, direction)
+ else:
+ qos_values['AvailableCapacity'] += direction * GB_TO_BYTES(size)
+ self._consume_partition(qos_values, GB_TO_BYTES(size), direction)
+ return
+
+ def _print_capabilities_info(self):
+ host_list = self._get_service_states().iteritems()
+ for host, host_dict in host_list:
+ for service_name, service_dict in host_dict.iteritems():
+ if service_name != "volume":
+ continue
+
+ LOG.info(_("Host %s:"), host)
+
+ gos_info = service_dict.get('drive_qos_info', {})
+ for qosgrp, qos_values in gos_info.iteritems():
+ total = qos_values['TotalDrives']
+ used = qos_values['FullDrive']['NumOccupiedDrives']
+ free = qos_values['FullDrive']['NumFreeDrives']
+ avail = BYTES_TO_GB(qos_values['AvailableCapacity'])
+
+ LOG.info(_("\tDrive %(qosgrp)-25s: total %(total)2s, "\
+ "used %(used)2s, free %(free)2s. Available "\
+ "capacity %(avail)-5s"), locals())
+
+
+class VsaSchedulerLeastUsedHost(VsaScheduler):
+ """
+ Implements VSA scheduler to select the host with least used capacity
+ of particular type.
+ """
+
+ def __init__(self, *args, **kwargs):
+ super(VsaSchedulerLeastUsedHost, self).__init__(*args, **kwargs)
+
+ def host_selection_algorithm(self, request_spec, all_hosts,
+ selected_hosts, unique):
+ size = request_spec['size']
+ drive_type = request_spec['drive_type']
+ best_host = None
+ best_qoscap = None
+ best_cap = None
+ min_used = 0
+
+ for (host, capabilities) in all_hosts:
+
+ has_enough_capacity = False
+ used_capacity = 0
+ for qosgrp, qos_values in capabilities.iteritems():
+
+ used_capacity = used_capacity + qos_values['TotalCapacity'] \
+ - qos_values['AvailableCapacity']
+
+ if self._qosgrp_match(drive_type, qos_values):
+ # we found required qosgroup
+
+ if size == 0: # full drive match
+ if qos_values['FullDrive']['NumFreeDrives'] > 0:
+ has_enough_capacity = True
+ matched_qos = qos_values
+ else:
+ break
+ else:
+ if qos_values['AvailableCapacity'] >= size and \
+ (qos_values['PartitionDrive'][
+ 'NumFreePartitions'] > 0 or \
+ qos_values['FullDrive']['NumFreeDrives'] > 0):
+ has_enough_capacity = True
+ matched_qos = qos_values
+ else:
+ break
+
+ if has_enough_capacity and \
+ self._allowed_to_use_host(host,
+ selected_hosts,
+ unique) and \
+ (best_host is None or used_capacity < min_used):
+
+ min_used = used_capacity
+ best_host = host
+ best_qoscap = matched_qos
+ best_cap = capabilities
+
+ if best_host:
+ self._add_hostcap_to_list(selected_hosts, best_host, best_cap)
+ min_used = BYTES_TO_GB(min_used)
+ LOG.debug(_("\t LeastUsedHost: Best host: %(best_host)s. "\
+ "(used capacity %(min_used)s)"), locals())
+ return (best_host, best_qoscap)
+
+
+class VsaSchedulerMostAvailCapacity(VsaScheduler):
+ """
+ Implements VSA scheduler to select the host with most available capacity
+ of one particular type.
+ """
+
+ def __init__(self, *args, **kwargs):
+ super(VsaSchedulerMostAvailCapacity, self).__init__(*args, **kwargs)
+
+ def host_selection_algorithm(self, request_spec, all_hosts,
+ selected_hosts, unique):
+ size = request_spec['size']
+ drive_type = request_spec['drive_type']
+ best_host = None
+ best_qoscap = None
+ best_cap = None
+ max_avail = 0
+
+ for (host, capabilities) in all_hosts:
+ for qosgrp, qos_values in capabilities.iteritems():
+ if self._qosgrp_match(drive_type, qos_values):
+ # we found required qosgroup
+
+ if size == 0: # full drive match
+ available = qos_values['FullDrive']['NumFreeDrives']
+ else:
+ available = qos_values['AvailableCapacity']
+
+ if available > max_avail and \
+ self._allowed_to_use_host(host,
+ selected_hosts,
+ unique):
+ max_avail = available
+ best_host = host
+ best_qoscap = qos_values
+ best_cap = capabilities
+ break # go to the next host
+
+ if best_host:
+ self._add_hostcap_to_list(selected_hosts, best_host, best_cap)
+ type_str = "drives" if size == 0 else "bytes"
+ LOG.debug(_("\t MostAvailCap: Best host: %(best_host)s. "\
+ "(available %(max_avail)s %(type_str)s)"), locals())
+
+ return (best_host, best_qoscap)
diff --git a/nova/service.py b/nova/service.py
index 959e79052..247eb4fb1 100644
--- a/nova/service.py
+++ b/nova/service.py
@@ -153,26 +153,15 @@ class Service(object):
self.topic)
# Share this same connection for these Consumers
- consumer_all = rpc.create_consumer(self.conn, self.topic, self,
- fanout=False)
+ self.conn.create_consumer(self.topic, self, fanout=False)
node_topic = '%s.%s' % (self.topic, self.host)
- consumer_node = rpc.create_consumer(self.conn, node_topic, self,
- fanout=False)
+ self.conn.create_consumer(node_topic, self, fanout=False)
- fanout = rpc.create_consumer(self.conn, self.topic, self, fanout=True)
+ self.conn.create_consumer(self.topic, self, fanout=True)
- consumers = [consumer_all, consumer_node, fanout]
- consumer_set = rpc.create_consumer_set(self.conn, consumers)
-
- # Wait forever, processing these consumers
- def _wait():
- try:
- consumer_set.wait()
- finally:
- consumer_set.close()
-
- self.consumer_set_thread = eventlet.spawn(_wait)
+ # Consume from all consumers in a thread
+ self.conn.consume_in_thread()
if self.report_interval:
pulse = utils.LoopingCall(self.report_state)
@@ -237,10 +226,11 @@ class Service(object):
logging.warn(_('Service killed that has no database entry'))
def stop(self):
- self.consumer_set_thread.kill()
+ # Try to shut the connection down, but if we get any sort of
+ # errors, go ahead and ignore them.. as we're shutting down anyway
try:
- self.consumer_set_thread.wait()
- except greenlet.GreenletExit:
+ self.conn.close()
+ except Exception:
pass
for x in self.timers:
try:
diff --git a/nova/test.py b/nova/test.py
index 88f1489e8..d759aef60 100644
--- a/nova/test.py
+++ b/nova/test.py
@@ -277,3 +277,21 @@ class TestCase(unittest.TestCase):
continue
else:
self.assertEqual(sub_value, super_value)
+
+ def assertIn(self, a, b, *args, **kwargs):
+ """Python < v2.7 compatibility. Assert 'a' in 'b'"""
+ try:
+ f = super(TestCase, self).assertIn
+ except AttributeError:
+ self.assertTrue(a in b, *args, **kwargs)
+ else:
+ f(a, b, *args, **kwargs)
+
+ def assertNotIn(self, a, b, *args, **kwargs):
+ """Python < v2.7 compatibility. Assert 'a' NOT in 'b'"""
+ try:
+ f = super(TestCase, self).assertNotIn
+ except AttributeError:
+ self.assertFalse(a in b, *args, **kwargs)
+ else:
+ f(a, b, *args, **kwargs)
diff --git a/nova/tests/api/openstack/contrib/test_createserverext.py b/nova/tests/api/openstack/contrib/test_createserverext.py
index 01c7587e3..a6da9abfd 100644
--- a/nova/tests/api/openstack/contrib/test_createserverext.py
+++ b/nova/tests/api/openstack/contrib/test_createserverext.py
@@ -23,6 +23,7 @@ from xml.dom import minidom
import stubout
import webob
+from nova import db
from nova import exception
from nova import flags
from nova import test
@@ -76,6 +77,8 @@ class CreateserverextTest(test.TestCase):
def __init__(self):
self.injected_files = None
self.networks = None
+ self.user_data = None
+ self.db = db
def create(self, *args, **kwargs):
if 'injected_files' in kwargs:
@@ -87,6 +90,10 @@ class CreateserverextTest(test.TestCase):
self.networks = kwargs['requested_networks']
else:
self.networks = None
+
+ if 'user_data' in kwargs:
+ self.user_data = kwargs['user_data']
+
return [{'id': '1234', 'display_name': 'fakeinstance',
'uuid': FAKE_UUID,
'user_id': 'fake',
@@ -122,6 +129,14 @@ class CreateserverextTest(test.TestCase):
server['networks'] = network_list
return {'server': server}
+ def _create_user_data_request_dict(self, user_data):
+ server = {}
+ server['name'] = 'new-server-test'
+ server['imageRef'] = 1
+ server['flavorRef'] = 1
+ server['user_data'] = user_data
+ return {'server': server}
+
def _get_create_request_json(self, body_dict):
req = webob.Request.blank('/v1.1/123/os-create-server-ext')
req.headers['Content-Type'] = 'application/json'
@@ -181,6 +196,13 @@ class CreateserverextTest(test.TestCase):
self._run_create_instance_with_mock_compute_api(request)
return request, response, compute_api.networks
+ def _create_instance_with_user_data_json(self, networks):
+ body_dict = self._create_user_data_request_dict(networks)
+ request = self._get_create_request_json(body_dict)
+ compute_api, response = \
+ self._run_create_instance_with_mock_compute_api(request)
+ return request, response, compute_api.user_data
+
def _create_instance_with_networks_xml(self, networks):
body_dict = self._create_networks_request_dict(networks)
request = self._get_create_request_xml(body_dict)
@@ -307,3 +329,25 @@ class CreateserverextTest(test.TestCase):
self.assertEquals(response.status_int, 202)
self.assertEquals(compute_api.networks,
[('aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa', None)])
+
+ def test_create_instance_with_userdata(self):
+ user_data_contents = '#!/bin/bash\necho "Oh no!"\n'
+ user_data_contents = base64.b64encode(user_data_contents)
+ request, response, user_data = \
+ self._create_instance_with_user_data_json(user_data_contents)
+ self.assertEquals(response.status_int, 202)
+ self.assertEquals(user_data, user_data_contents)
+
+ def test_create_instance_with_userdata_none(self):
+ user_data_contents = None
+ request, response, user_data = \
+ self._create_instance_with_user_data_json(user_data_contents)
+ self.assertEquals(response.status_int, 202)
+ self.assertEquals(user_data, user_data_contents)
+
+ def test_create_instance_with_userdata_with_non_b64_content(self):
+ user_data_contents = '#!/bin/bash\necho "Oh no!"\n'
+ request, response, user_data = \
+ self._create_instance_with_user_data_json(user_data_contents)
+ self.assertEquals(response.status_int, 400)
+ self.assertEquals(user_data, None)
diff --git a/nova/tests/api/openstack/contrib/test_floating_ips.py b/nova/tests/api/openstack/contrib/test_floating_ips.py
index 568faf867..fc10f2f6c 100644
--- a/nova/tests/api/openstack/contrib/test_floating_ips.py
+++ b/nova/tests/api/openstack/contrib/test_floating_ips.py
@@ -20,9 +20,11 @@ import webob
from nova import compute
from nova import context
from nova import db
-from nova import test
from nova import network
+from nova import rpc
+from nova import test
from nova.tests.api.openstack import fakes
+from nova.tests.api.openstack import test_servers
from nova.api.openstack.contrib.floating_ips import FloatingIPController
@@ -60,10 +62,38 @@ def compute_api_associate(self, context, instance_id, floating_ip):
pass
+def network_api_associate(self, context, floating_ip, fixed_ip):
+ pass
+
+
def network_api_disassociate(self, context, floating_address):
pass
+def network_get_instance_nw_info(self, context, instance):
+ info = {
+ 'label': 'fake',
+ 'gateway': 'fake',
+ 'dhcp_server': 'fake',
+ 'broadcast': 'fake',
+ 'mac': 'fake',
+ 'vif_uuid': 'fake',
+ 'rxtx_cap': 'fake',
+ 'dns': [],
+ 'ips': [{'ip': '10.0.0.1'}],
+ 'should_create_bridge': False,
+ 'should_create_vlan': False}
+
+ return [['ignore', info]]
+
+
+def fake_instance_get(context, instance_id):
+ return {
+ "id": 1,
+ "user_id": 'fakeuser',
+ "project_id": '123'}
+
+
class FloatingIpTest(test.TestCase):
address = "10.10.10.10"
@@ -79,23 +109,21 @@ class FloatingIpTest(test.TestCase):
def setUp(self):
super(FloatingIpTest, self).setUp()
- self.controller = FloatingIPController()
- fakes.stub_out_networking(self.stubs)
- fakes.stub_out_rate_limiting(self.stubs)
self.stubs.Set(network.api.API, "get_floating_ip",
network_api_get_floating_ip)
self.stubs.Set(network.api.API, "get_floating_ip_by_ip",
network_api_get_floating_ip)
self.stubs.Set(network.api.API, "list_floating_ips",
network_api_list_floating_ips)
- self.stubs.Set(network.api.API, "allocate_floating_ip",
- network_api_allocate)
self.stubs.Set(network.api.API, "release_floating_ip",
network_api_release)
- self.stubs.Set(compute.api.API, "associate_floating_ip",
- compute_api_associate)
self.stubs.Set(network.api.API, "disassociate_floating_ip",
network_api_disassociate)
+ self.stubs.Set(network.api.API, "get_instance_nw_info",
+ network_get_instance_nw_info)
+ self.stubs.Set(db.api, 'instance_get',
+ fake_instance_get)
+
self.context = context.get_admin_context()
self._create_floating_ip()
@@ -143,7 +171,20 @@ class FloatingIpTest(test.TestCase):
self.assertEqual(res_dict['floating_ip']['ip'], '10.10.10.10')
self.assertEqual(res_dict['floating_ip']['instance_id'], None)
+ def test_floating_ip_allocate_no_free_ips(self):
+ def fake_call(*args, **kwargs):
+ raise(rpc.RemoteError('NoMoreFloatingIps', '', ''))
+
+ self.stubs.Set(rpc, "call", fake_call)
+ req = webob.Request.blank('/v1.1/123/os-floating-ips')
+ req.method = 'POST'
+ req.headers['Content-Type'] = 'application/json'
+ res = req.get_response(fakes.wsgi_app())
+ self.assertEqual(res.status_int, 400)
+
def test_floating_ip_allocate(self):
+ self.stubs.Set(network.api.API, "allocate_floating_ip",
+ network_api_allocate)
req = webob.Request.blank('/v1.1/123/os-floating-ips')
req.method = 'POST'
req.headers['Content-Type'] = 'application/json'
@@ -165,6 +206,8 @@ class FloatingIpTest(test.TestCase):
self.assertEqual(res.status_int, 202)
def test_add_floating_ip_to_instance(self):
+ self.stubs.Set(network.api.API, "associate_floating_ip",
+ network_api_associate)
body = dict(addFloatingIp=dict(address='11.0.0.1'))
req = webob.Request.blank('/v1.1/123/servers/test_inst/action')
req.method = "POST"
@@ -174,6 +217,65 @@ class FloatingIpTest(test.TestCase):
resp = req.get_response(fakes.wsgi_app())
self.assertEqual(resp.status_int, 202)
+ def test_associate_floating_ip_to_instance_wrong_project_id(self):
+ def fake_fixed_ip_get_by_address(ctx, address, session=None):
+ return {'address': address, 'network': {'multi_host': None,
+ 'host': 'fake'}}
+ self.stubs.Set(db.api, "fixed_ip_get_by_address",
+ fake_fixed_ip_get_by_address)
+ db.floating_ip_update(self.context, self.address, {'project_id': 'bad',
+ 'fixed_ip_id': 1})
+ body = dict(addFloatingIp=dict(address=self.address))
+ req = webob.Request.blank('/v1.1/123/servers/test_inst/action')
+ req.method = "POST"
+ req.body = json.dumps(body)
+ req.headers["content-type"] = "application/json"
+ resp = req.get_response(fakes.wsgi_app())
+ self.assertEqual(resp.status_int, 400)
+
+ def test_associate_floating_ip_to_instance_no_project_id(self):
+ def fake_fixed_ip_get_by_address(ctx, address, session=None):
+ return {'address': address, 'network': {'multi_host': None,
+ 'host': 'fake'}}
+ self.stubs.Set(db.api, "fixed_ip_get_by_address",
+ fake_fixed_ip_get_by_address)
+ db.floating_ip_update(self.context, self.address, {'project_id': None,
+ 'fixed_ip_id': 1})
+ body = dict(addFloatingIp=dict(address=self.address))
+ req = webob.Request.blank('/v1.1/123/servers/test_inst/action')
+ req.method = "POST"
+ req.body = json.dumps(body)
+ req.headers["content-type"] = "application/json"
+ resp = req.get_response(fakes.wsgi_app())
+ self.assertEqual(resp.status_int, 400)
+
+ def test_add_associated_floating_ip_to_instance(self):
+ def fake_fixed_ip_get_by_address(ctx, address, session=None):
+ return {'address': address, 'network': {'multi_host': None,
+ 'host': 'fake'}}
+
+ self.disassociated = False
+
+ def fake_network_api_disassociate(local_self, ctx, floating_address):
+ self.disassociated = True
+
+ db.floating_ip_update(self.context, self.address, {'project_id': '123',
+ 'fixed_ip_id': 1})
+ self.stubs.Set(network.api.API, "disassociate_floating_ip",
+ fake_network_api_disassociate)
+ self.stubs.Set(db.api, "fixed_ip_get_by_address",
+ fake_fixed_ip_get_by_address)
+
+ body = dict(addFloatingIp=dict(address=self.address))
+ req = webob.Request.blank('/v1.1/123/servers/test_inst/action')
+ req.method = "POST"
+ req.body = json.dumps(body)
+ req.headers["content-type"] = "application/json"
+
+ resp = req.get_response(fakes.wsgi_app())
+ self.assertEqual(resp.status_int, 202)
+ self.assertTrue(self.disassociated)
+
def test_remove_floating_ip_from_instance(self):
body = dict(removeFloatingIp=dict(address='11.0.0.1'))
req = webob.Request.blank('/v1.1/123/servers/test_inst/action')
diff --git a/nova/tests/api/openstack/contrib/test_security_groups.py b/nova/tests/api/openstack/contrib/test_security_groups.py
index bc1536911..0816a6312 100644
--- a/nova/tests/api/openstack/contrib/test_security_groups.py
+++ b/nova/tests/api/openstack/contrib/test_security_groups.py
@@ -360,7 +360,7 @@ class TestSecurityGroups(test.TestCase):
def test_associate_by_invalid_server_id(self):
body = dict(addSecurityGroup=dict(name='test'))
- self.stubs.Set(nova.db, 'security_group_get_by_name',
+ self.stubs.Set(nova.db.api, 'security_group_get_by_name',
return_security_group)
req = webob.Request.blank('/v1.1/123/servers/invalid/action')
req.headers['Content-Type'] = 'application/json'
@@ -372,7 +372,7 @@ class TestSecurityGroups(test.TestCase):
def test_associate_without_body(self):
req = webob.Request.blank('/v1.1/123/servers/1/action')
body = dict(addSecurityGroup=None)
- self.stubs.Set(nova.db, 'instance_get', return_server)
+ self.stubs.Set(nova.db.api, 'instance_get', return_server)
req.headers['Content-Type'] = 'application/json'
req.method = 'POST'
req.body = json.dumps(body)
@@ -382,7 +382,7 @@ class TestSecurityGroups(test.TestCase):
def test_associate_no_security_group_name(self):
req = webob.Request.blank('/v1.1/123/servers/1/action')
body = dict(addSecurityGroup=dict())
- self.stubs.Set(nova.db, 'instance_get', return_server)
+ self.stubs.Set(nova.db.api, 'instance_get', return_server)
req.headers['Content-Type'] = 'application/json'
req.method = 'POST'
req.body = json.dumps(body)
@@ -392,7 +392,7 @@ class TestSecurityGroups(test.TestCase):
def test_associate_security_group_name_with_whitespaces(self):
req = webob.Request.blank('/v1.1/123/servers/1/action')
body = dict(addSecurityGroup=dict(name=" "))
- self.stubs.Set(nova.db, 'instance_get', return_server)
+ self.stubs.Set(nova.db.api, 'instance_get', return_server)
req.headers['Content-Type'] = 'application/json'
req.method = 'POST'
req.body = json.dumps(body)
@@ -400,9 +400,9 @@ class TestSecurityGroups(test.TestCase):
self.assertEquals(response.status_int, 400)
def test_associate_non_existing_instance(self):
- self.stubs.Set(nova.db, 'instance_get', return_server_nonexistant)
+ self.stubs.Set(nova.db.api, 'instance_get', return_server_nonexistant)
body = dict(addSecurityGroup=dict(name="test"))
- self.stubs.Set(nova.db, 'security_group_get_by_name',
+ self.stubs.Set(nova.db.api, 'security_group_get_by_name',
return_security_group)
req = webob.Request.blank('/v1.1/123/servers/10000/action')
req.headers['Content-Type'] = 'application/json'
@@ -412,8 +412,8 @@ class TestSecurityGroups(test.TestCase):
self.assertEquals(response.status_int, 404)
def test_associate_non_running_instance(self):
- self.stubs.Set(nova.db, 'instance_get', return_non_running_server)
- self.stubs.Set(nova.db, 'security_group_get_by_name',
+ self.stubs.Set(nova.db.api, 'instance_get', return_non_running_server)
+ self.stubs.Set(nova.db.api, 'security_group_get_by_name',
return_security_group_without_instances)
body = dict(addSecurityGroup=dict(name="test"))
req = webob.Request.blank('/v1.1/123/servers/1/action')
@@ -424,8 +424,8 @@ class TestSecurityGroups(test.TestCase):
self.assertEquals(response.status_int, 400)
def test_associate_already_associated_security_group_to_instance(self):
- self.stubs.Set(nova.db, 'instance_get', return_server)
- self.stubs.Set(nova.db, 'security_group_get_by_name',
+ self.stubs.Set(nova.db.api, 'instance_get', return_server)
+ self.stubs.Set(nova.db.api, 'security_group_get_by_name',
return_security_group)
body = dict(addSecurityGroup=dict(name="test"))
req = webob.Request.blank('/v1.1/123/servers/1/action')
@@ -436,12 +436,12 @@ class TestSecurityGroups(test.TestCase):
self.assertEquals(response.status_int, 400)
def test_associate(self):
- self.stubs.Set(nova.db, 'instance_get', return_server)
- self.mox.StubOutWithMock(nova.db, 'instance_add_security_group')
- nova.db.instance_add_security_group(mox.IgnoreArg(),
+ self.stubs.Set(nova.db.api, 'instance_get', return_server)
+ self.mox.StubOutWithMock(nova.db.api, 'instance_add_security_group')
+ nova.db.api.instance_add_security_group(mox.IgnoreArg(),
mox.IgnoreArg(),
mox.IgnoreArg())
- self.stubs.Set(nova.db, 'security_group_get_by_name',
+ self.stubs.Set(nova.db.api, 'security_group_get_by_name',
return_security_group_without_instances)
self.mox.ReplayAll()
@@ -454,12 +454,12 @@ class TestSecurityGroups(test.TestCase):
self.assertEquals(response.status_int, 202)
def test_associate_xml(self):
- self.stubs.Set(nova.db, 'instance_get', return_server)
- self.mox.StubOutWithMock(nova.db, 'instance_add_security_group')
- nova.db.instance_add_security_group(mox.IgnoreArg(),
+ self.stubs.Set(nova.db.api, 'instance_get', return_server)
+ self.mox.StubOutWithMock(nova.db.api, 'instance_add_security_group')
+ nova.db.api.instance_add_security_group(mox.IgnoreArg(),
mox.IgnoreArg(),
mox.IgnoreArg())
- self.stubs.Set(nova.db, 'security_group_get_by_name',
+ self.stubs.Set(nova.db.api, 'security_group_get_by_name',
return_security_group_without_instances)
self.mox.ReplayAll()
@@ -483,7 +483,7 @@ class TestSecurityGroups(test.TestCase):
def test_disassociate_by_invalid_server_id(self):
body = dict(removeSecurityGroup=dict(name='test'))
- self.stubs.Set(nova.db, 'security_group_get_by_name',
+ self.stubs.Set(nova.db.api, 'security_group_get_by_name',
return_security_group)
req = webob.Request.blank('/v1.1/123/servers/invalid/action')
req.headers['Content-Type'] = 'application/json'
@@ -495,7 +495,7 @@ class TestSecurityGroups(test.TestCase):
def test_disassociate_without_body(self):
req = webob.Request.blank('/v1.1/123/servers/1/action')
body = dict(removeSecurityGroup=None)
- self.stubs.Set(nova.db, 'instance_get', return_server)
+ self.stubs.Set(nova.db.api, 'instance_get', return_server)
req.headers['Content-Type'] = 'application/json'
req.method = 'POST'
req.body = json.dumps(body)
@@ -505,7 +505,7 @@ class TestSecurityGroups(test.TestCase):
def test_disassociate_no_security_group_name(self):
req = webob.Request.blank('/v1.1/123/servers/1/action')
body = dict(removeSecurityGroup=dict())
- self.stubs.Set(nova.db, 'instance_get', return_server)
+ self.stubs.Set(nova.db.api, 'instance_get', return_server)
req.headers['Content-Type'] = 'application/json'
req.method = 'POST'
req.body = json.dumps(body)
@@ -515,7 +515,7 @@ class TestSecurityGroups(test.TestCase):
def test_disassociate_security_group_name_with_whitespaces(self):
req = webob.Request.blank('/v1.1/123/servers/1/action')
body = dict(removeSecurityGroup=dict(name=" "))
- self.stubs.Set(nova.db, 'instance_get', return_server)
+ self.stubs.Set(nova.db.api, 'instance_get', return_server)
req.headers['Content-Type'] = 'application/json'
req.method = 'POST'
req.body = json.dumps(body)
@@ -523,9 +523,9 @@ class TestSecurityGroups(test.TestCase):
self.assertEquals(response.status_int, 400)
def test_disassociate_non_existing_instance(self):
- self.stubs.Set(nova.db, 'instance_get', return_server_nonexistant)
+ self.stubs.Set(nova.db.api, 'instance_get', return_server_nonexistant)
body = dict(removeSecurityGroup=dict(name="test"))
- self.stubs.Set(nova.db, 'security_group_get_by_name',
+ self.stubs.Set(nova.db.api, 'security_group_get_by_name',
return_security_group)
req = webob.Request.blank('/v1.1/123/servers/10000/action')
req.headers['Content-Type'] = 'application/json'
@@ -535,8 +535,8 @@ class TestSecurityGroups(test.TestCase):
self.assertEquals(response.status_int, 404)
def test_disassociate_non_running_instance(self):
- self.stubs.Set(nova.db, 'instance_get', return_non_running_server)
- self.stubs.Set(nova.db, 'security_group_get_by_name',
+ self.stubs.Set(nova.db.api, 'instance_get', return_non_running_server)
+ self.stubs.Set(nova.db.api, 'security_group_get_by_name',
return_security_group)
body = dict(removeSecurityGroup=dict(name="test"))
req = webob.Request.blank('/v1.1/123/servers/1/action')
@@ -547,8 +547,8 @@ class TestSecurityGroups(test.TestCase):
self.assertEquals(response.status_int, 400)
def test_disassociate_already_associated_security_group_to_instance(self):
- self.stubs.Set(nova.db, 'instance_get', return_server)
- self.stubs.Set(nova.db, 'security_group_get_by_name',
+ self.stubs.Set(nova.db.api, 'instance_get', return_server)
+ self.stubs.Set(nova.db.api, 'security_group_get_by_name',
return_security_group_without_instances)
body = dict(removeSecurityGroup=dict(name="test"))
req = webob.Request.blank('/v1.1/123/servers/1/action')
@@ -559,12 +559,12 @@ class TestSecurityGroups(test.TestCase):
self.assertEquals(response.status_int, 400)
def test_disassociate(self):
- self.stubs.Set(nova.db, 'instance_get', return_server)
- self.mox.StubOutWithMock(nova.db, 'instance_remove_security_group')
- nova.db.instance_remove_security_group(mox.IgnoreArg(),
+ self.stubs.Set(nova.db.api, 'instance_get', return_server)
+ self.mox.StubOutWithMock(nova.db.api, 'instance_remove_security_group')
+ nova.db.api.instance_remove_security_group(mox.IgnoreArg(),
mox.IgnoreArg(),
mox.IgnoreArg())
- self.stubs.Set(nova.db, 'security_group_get_by_name',
+ self.stubs.Set(nova.db.api, 'security_group_get_by_name',
return_security_group)
self.mox.ReplayAll()
@@ -577,12 +577,12 @@ class TestSecurityGroups(test.TestCase):
self.assertEquals(response.status_int, 202)
def test_disassociate_xml(self):
- self.stubs.Set(nova.db, 'instance_get', return_server)
- self.mox.StubOutWithMock(nova.db, 'instance_remove_security_group')
- nova.db.instance_remove_security_group(mox.IgnoreArg(),
+ self.stubs.Set(nova.db.api, 'instance_get', return_server)
+ self.mox.StubOutWithMock(nova.db.api, 'instance_remove_security_group')
+ nova.db.api.instance_remove_security_group(mox.IgnoreArg(),
mox.IgnoreArg(),
mox.IgnoreArg())
- self.stubs.Set(nova.db, 'security_group_get_by_name',
+ self.stubs.Set(nova.db.api, 'security_group_get_by_name',
return_security_group)
self.mox.ReplayAll()
diff --git a/nova/tests/api/openstack/contrib/test_simple_tenant_usage.py b/nova/tests/api/openstack/contrib/test_simple_tenant_usage.py
new file mode 100644
index 000000000..2430b9d51
--- /dev/null
+++ b/nova/tests/api/openstack/contrib/test_simple_tenant_usage.py
@@ -0,0 +1,172 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright 2011 OpenStack LLC.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import datetime
+import json
+import webob
+
+from nova import context
+from nova import flags
+from nova import test
+from nova.compute import api
+from nova.tests.api.openstack import fakes
+
+
+FLAGS = flags.FLAGS
+
+SERVERS = 5
+TENANTS = 2
+HOURS = 24
+LOCAL_GB = 10
+MEMORY_MB = 1024
+VCPUS = 2
+STOP = datetime.datetime.utcnow()
+START = STOP - datetime.timedelta(hours=HOURS)
+
+
+def fake_instance_type_get(self, context, instance_type_id):
+ return {'id': 1,
+ 'vcpus': VCPUS,
+ 'local_gb': LOCAL_GB,
+ 'memory_mb': MEMORY_MB,
+ 'name':
+ 'fakeflavor'}
+
+
+def get_fake_db_instance(start, end, instance_id, tenant_id):
+ return {'id': instance_id,
+ 'image_ref': '1',
+ 'project_id': tenant_id,
+ 'user_id': 'fakeuser',
+ 'display_name': 'name',
+ 'state_description': 'state',
+ 'instance_type_id': 1,
+ 'launched_at': start,
+ 'terminated_at': end}
+
+
+def fake_instance_get_active_by_window(self, context, begin, end, project_id):
+ return [get_fake_db_instance(START,
+ STOP,
+ x,
+ "faketenant_%s" % (x / SERVERS))
+ for x in xrange(TENANTS * SERVERS)]
+
+
+class SimpleTenantUsageTest(test.TestCase):
+ def setUp(self):
+ super(SimpleTenantUsageTest, self).setUp()
+ self.stubs.Set(api.API, "get_instance_type",
+ fake_instance_type_get)
+ self.stubs.Set(api.API, "get_active_by_window",
+ fake_instance_get_active_by_window)
+ self.admin_context = context.RequestContext('fakeadmin_0',
+ 'faketenant_0',
+ is_admin=True)
+ self.user_context = context.RequestContext('fakeadmin_0',
+ 'faketenant_0',
+ is_admin=False)
+ self.alt_user_context = context.RequestContext('fakeadmin_0',
+ 'faketenant_1',
+ is_admin=False)
+ FLAGS.allow_admin_api = True
+
+ def test_verify_index(self):
+ req = webob.Request.blank(
+ '/v1.1/123/os-simple-tenant-usage?start=%s&end=%s' %
+ (START.isoformat(), STOP.isoformat()))
+ req.method = "GET"
+ req.headers["content-type"] = "application/json"
+
+ res = req.get_response(fakes.wsgi_app(
+ fake_auth_context=self.admin_context))
+
+ self.assertEqual(res.status_int, 200)
+ res_dict = json.loads(res.body)
+ usages = res_dict['tenant_usages']
+ from nova import log as logging
+ logging.warn(usages)
+ for i in xrange(TENANTS):
+ self.assertEqual(int(usages[i]['total_hours']),
+ SERVERS * HOURS)
+ self.assertEqual(int(usages[i]['total_local_gb_usage']),
+ SERVERS * LOCAL_GB * HOURS)
+ self.assertEqual(int(usages[i]['total_memory_mb_usage']),
+ SERVERS * MEMORY_MB * HOURS)
+ self.assertEqual(int(usages[i]['total_vcpus_usage']),
+ SERVERS * VCPUS * HOURS)
+ self.assertFalse(usages[i].get('server_usages'))
+
+ def test_verify_detailed_index(self):
+ req = webob.Request.blank(
+ '/v1.1/123/os-simple-tenant-usage?'
+ 'detailed=1&start=%s&end=%s' %
+ (START.isoformat(), STOP.isoformat()))
+ req.method = "GET"
+ req.headers["content-type"] = "application/json"
+
+ res = req.get_response(fakes.wsgi_app(
+ fake_auth_context=self.admin_context))
+ self.assertEqual(res.status_int, 200)
+ res_dict = json.loads(res.body)
+ usages = res_dict['tenant_usages']
+ for i in xrange(TENANTS):
+ servers = usages[i]['server_usages']
+ for j in xrange(SERVERS):
+ self.assertEqual(int(servers[j]['hours']), HOURS)
+
+ def test_verify_index_fails_for_nonadmin(self):
+ req = webob.Request.blank(
+ '/v1.1/123/os-simple-tenant-usage?'
+ 'detailed=1&start=%s&end=%s' %
+ (START.isoformat(), STOP.isoformat()))
+ req.method = "GET"
+ req.headers["content-type"] = "application/json"
+
+ res = req.get_response(fakes.wsgi_app())
+ self.assertEqual(res.status_int, 403)
+
+ def test_verify_show(self):
+ req = webob.Request.blank(
+ '/v1.1/faketenant_0/os-simple-tenant-usage/'
+ 'faketenant_0?start=%s&end=%s' %
+ (START.isoformat(), STOP.isoformat()))
+ req.method = "GET"
+ req.headers["content-type"] = "application/json"
+
+ res = req.get_response(fakes.wsgi_app(
+ fake_auth_context=self.user_context))
+ self.assertEqual(res.status_int, 200)
+ res_dict = json.loads(res.body)
+
+ usage = res_dict['tenant_usage']
+ servers = usage['server_usages']
+ self.assertEqual(len(usage['server_usages']), SERVERS)
+ for j in xrange(SERVERS):
+ self.assertEqual(int(servers[j]['hours']), HOURS)
+
+ def test_verify_show_cant_view_other_tenant(self):
+ req = webob.Request.blank(
+ '/v1.1/faketenant_1/os-simple-tenant-usage/'
+ 'faketenant_0?start=%s&end=%s' %
+ (START.isoformat(), STOP.isoformat()))
+ req.method = "GET"
+ req.headers["content-type"] = "application/json"
+
+ res = req.get_response(fakes.wsgi_app(
+ fake_auth_context=self.alt_user_context))
+ self.assertEqual(res.status_int, 403)
diff --git a/nova/tests/api/openstack/contrib/test_vsa.py b/nova/tests/api/openstack/contrib/test_vsa.py
new file mode 100644
index 000000000..311b6cb8d
--- /dev/null
+++ b/nova/tests/api/openstack/contrib/test_vsa.py
@@ -0,0 +1,450 @@
+# Copyright 2011 OpenStack LLC.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import json
+import stubout
+import unittest
+import webob
+
+from nova import context
+from nova import db
+from nova import exception
+from nova import flags
+from nova import log as logging
+from nova import test
+from nova import volume
+from nova import vsa
+from nova.api import openstack
+from nova.tests.api.openstack import fakes
+import nova.wsgi
+
+from nova.api.openstack.contrib.virtual_storage_arrays import _vsa_view
+
+FLAGS = flags.FLAGS
+
+LOG = logging.getLogger('nova.tests.api.openstack.vsa')
+
+last_param = {}
+
+
+def _get_default_vsa_param():
+ return {
+ 'display_name': 'Test_VSA_name',
+ 'display_description': 'Test_VSA_description',
+ 'vc_count': 1,
+ 'instance_type': 'm1.small',
+ 'instance_type_id': 5,
+ 'image_name': None,
+ 'availability_zone': None,
+ 'storage': [],
+ 'shared': False
+ }
+
+
+def stub_vsa_create(self, context, **param):
+ global last_param
+ LOG.debug(_("_create: param=%s"), param)
+ param['id'] = 123
+ param['name'] = 'Test name'
+ param['instance_type_id'] = 5
+ last_param = param
+ return param
+
+
+def stub_vsa_delete(self, context, vsa_id):
+ global last_param
+ last_param = dict(vsa_id=vsa_id)
+
+ LOG.debug(_("_delete: %s"), locals())
+ if vsa_id != '123':
+ raise exception.NotFound
+
+
+def stub_vsa_get(self, context, vsa_id):
+ global last_param
+ last_param = dict(vsa_id=vsa_id)
+
+ LOG.debug(_("_get: %s"), locals())
+ if vsa_id != '123':
+ raise exception.NotFound
+
+ param = _get_default_vsa_param()
+ param['id'] = vsa_id
+ return param
+
+
+def stub_vsa_get_all(self, context):
+ LOG.debug(_("_get_all: %s"), locals())
+ param = _get_default_vsa_param()
+ param['id'] = 123
+ return [param]
+
+
+class VSAApiTest(test.TestCase):
+ def setUp(self):
+ super(VSAApiTest, self).setUp()
+ self.stubs = stubout.StubOutForTesting()
+ fakes.FakeAuthManager.reset_fake_data()
+ fakes.FakeAuthDatabase.data = {}
+ fakes.stub_out_networking(self.stubs)
+ fakes.stub_out_rate_limiting(self.stubs)
+ fakes.stub_out_auth(self.stubs)
+ self.stubs.Set(vsa.api.API, "create", stub_vsa_create)
+ self.stubs.Set(vsa.api.API, "delete", stub_vsa_delete)
+ self.stubs.Set(vsa.api.API, "get", stub_vsa_get)
+ self.stubs.Set(vsa.api.API, "get_all", stub_vsa_get_all)
+
+ self.context = context.get_admin_context()
+
+ def tearDown(self):
+ self.stubs.UnsetAll()
+ super(VSAApiTest, self).tearDown()
+
+ def test_vsa_create(self):
+ global last_param
+ last_param = {}
+
+ vsa = {"displayName": "VSA Test Name",
+ "displayDescription": "VSA Test Desc"}
+ body = dict(vsa=vsa)
+ req = webob.Request.blank('/v1.1/777/zadr-vsa')
+ req.method = 'POST'
+ req.body = json.dumps(body)
+ req.headers['content-type'] = 'application/json'
+
+ resp = req.get_response(fakes.wsgi_app())
+ self.assertEqual(resp.status_int, 200)
+
+ # Compare if parameters were correctly passed to stub
+ self.assertEqual(last_param['display_name'], "VSA Test Name")
+ self.assertEqual(last_param['display_description'], "VSA Test Desc")
+
+ resp_dict = json.loads(resp.body)
+ self.assertTrue('vsa' in resp_dict)
+ self.assertEqual(resp_dict['vsa']['displayName'], vsa['displayName'])
+ self.assertEqual(resp_dict['vsa']['displayDescription'],
+ vsa['displayDescription'])
+
+ def test_vsa_create_no_body(self):
+ req = webob.Request.blank('/v1.1/777/zadr-vsa')
+ req.method = 'POST'
+ req.body = json.dumps({})
+ req.headers['content-type'] = 'application/json'
+
+ resp = req.get_response(fakes.wsgi_app())
+ self.assertEqual(resp.status_int, 422)
+
+ def test_vsa_delete(self):
+ global last_param
+ last_param = {}
+
+ vsa_id = 123
+ req = webob.Request.blank('/v1.1/777/zadr-vsa/%d' % vsa_id)
+ req.method = 'DELETE'
+
+ resp = req.get_response(fakes.wsgi_app())
+ self.assertEqual(resp.status_int, 200)
+ self.assertEqual(str(last_param['vsa_id']), str(vsa_id))
+
+ def test_vsa_delete_invalid_id(self):
+ global last_param
+ last_param = {}
+
+ vsa_id = 234
+ req = webob.Request.blank('/v1.1/777/zadr-vsa/%d' % vsa_id)
+ req.method = 'DELETE'
+
+ resp = req.get_response(fakes.wsgi_app())
+ self.assertEqual(resp.status_int, 404)
+ self.assertEqual(str(last_param['vsa_id']), str(vsa_id))
+
+ def test_vsa_show(self):
+ global last_param
+ last_param = {}
+
+ vsa_id = 123
+ req = webob.Request.blank('/v1.1/777/zadr-vsa/%d' % vsa_id)
+ req.method = 'GET'
+ resp = req.get_response(fakes.wsgi_app())
+ self.assertEqual(resp.status_int, 200)
+ self.assertEqual(str(last_param['vsa_id']), str(vsa_id))
+
+ resp_dict = json.loads(resp.body)
+ self.assertTrue('vsa' in resp_dict)
+ self.assertEqual(resp_dict['vsa']['id'], str(vsa_id))
+
+ def test_vsa_show_invalid_id(self):
+ global last_param
+ last_param = {}
+
+ vsa_id = 234
+ req = webob.Request.blank('/v1.1/777/zadr-vsa/%d' % vsa_id)
+ req.method = 'GET'
+ resp = req.get_response(fakes.wsgi_app())
+ self.assertEqual(resp.status_int, 404)
+ self.assertEqual(str(last_param['vsa_id']), str(vsa_id))
+
+ def test_vsa_index(self):
+ req = webob.Request.blank('/v1.1/777/zadr-vsa')
+ req.method = 'GET'
+ resp = req.get_response(fakes.wsgi_app())
+ self.assertEqual(resp.status_int, 200)
+
+ resp_dict = json.loads(resp.body)
+
+ self.assertTrue('vsaSet' in resp_dict)
+ resp_vsas = resp_dict['vsaSet']
+ self.assertEqual(len(resp_vsas), 1)
+
+ resp_vsa = resp_vsas.pop()
+ self.assertEqual(resp_vsa['id'], 123)
+
+ def test_vsa_detail(self):
+ req = webob.Request.blank('/v1.1/777/zadr-vsa/detail')
+ req.method = 'GET'
+ resp = req.get_response(fakes.wsgi_app())
+ self.assertEqual(resp.status_int, 200)
+
+ resp_dict = json.loads(resp.body)
+
+ self.assertTrue('vsaSet' in resp_dict)
+ resp_vsas = resp_dict['vsaSet']
+ self.assertEqual(len(resp_vsas), 1)
+
+ resp_vsa = resp_vsas.pop()
+ self.assertEqual(resp_vsa['id'], 123)
+
+
+def _get_default_volume_param():
+ return {
+ 'id': 123,
+ 'status': 'available',
+ 'size': 100,
+ 'availability_zone': 'nova',
+ 'created_at': None,
+ 'attach_status': 'detached',
+ 'name': 'vol name',
+ 'display_name': 'Default vol name',
+ 'display_description': 'Default vol description',
+ 'volume_type_id': 1,
+ 'volume_metadata': [],
+ }
+
+
+def stub_get_vsa_volume_type(self, context):
+ return {'id': 1,
+ 'name': 'VSA volume type',
+ 'extra_specs': {'type': 'vsa_volume'}}
+
+
+def stub_volume_create(self, context, size, snapshot_id, name, description,
+ **param):
+ LOG.debug(_("_create: param=%s"), size)
+ vol = _get_default_volume_param()
+ vol['size'] = size
+ vol['display_name'] = name
+ vol['display_description'] = description
+ return vol
+
+
+def stub_volume_update(self, context, **param):
+ LOG.debug(_("_volume_update: param=%s"), param)
+ pass
+
+
+def stub_volume_delete(self, context, **param):
+ LOG.debug(_("_volume_delete: param=%s"), param)
+ pass
+
+
+def stub_volume_get(self, context, volume_id):
+ LOG.debug(_("_volume_get: volume_id=%s"), volume_id)
+ vol = _get_default_volume_param()
+ vol['id'] = volume_id
+ meta = {'key': 'from_vsa_id', 'value': '123'}
+ if volume_id == '345':
+ meta = {'key': 'to_vsa_id', 'value': '123'}
+ vol['volume_metadata'].append(meta)
+ return vol
+
+
+def stub_volume_get_notfound(self, context, volume_id):
+ raise exception.NotFound
+
+
+def stub_volume_get_all(self, context, search_opts):
+ vol = stub_volume_get(self, context, '123')
+ vol['metadata'] = search_opts['metadata']
+ return [vol]
+
+
+def return_vsa(context, vsa_id):
+ return {'id': vsa_id}
+
+
+class VSAVolumeApiTest(test.TestCase):
+
+ def setUp(self, test_obj=None, test_objs=None):
+ super(VSAVolumeApiTest, self).setUp()
+ self.stubs = stubout.StubOutForTesting()
+ fakes.FakeAuthManager.reset_fake_data()
+ fakes.FakeAuthDatabase.data = {}
+ fakes.stub_out_networking(self.stubs)
+ fakes.stub_out_rate_limiting(self.stubs)
+ fakes.stub_out_auth(self.stubs)
+ self.stubs.Set(nova.db.api, 'vsa_get', return_vsa)
+ self.stubs.Set(vsa.api.API, "get_vsa_volume_type",
+ stub_get_vsa_volume_type)
+
+ self.stubs.Set(volume.api.API, "update", stub_volume_update)
+ self.stubs.Set(volume.api.API, "delete", stub_volume_delete)
+ self.stubs.Set(volume.api.API, "get", stub_volume_get)
+ self.stubs.Set(volume.api.API, "get_all", stub_volume_get_all)
+
+ self.context = context.get_admin_context()
+ self.test_obj = test_obj if test_obj else "volume"
+ self.test_objs = test_objs if test_objs else "volumes"
+
+ def tearDown(self):
+ self.stubs.UnsetAll()
+ super(VSAVolumeApiTest, self).tearDown()
+
+ def test_vsa_volume_create(self):
+ self.stubs.Set(volume.api.API, "create", stub_volume_create)
+
+ vol = {"size": 100,
+ "displayName": "VSA Volume Test Name",
+ "displayDescription": "VSA Volume Test Desc"}
+ body = {self.test_obj: vol}
+ req = webob.Request.blank('/v1.1/777/zadr-vsa/123/%s' % self.test_objs)
+ req.method = 'POST'
+ req.body = json.dumps(body)
+ req.headers['content-type'] = 'application/json'
+ resp = req.get_response(fakes.wsgi_app())
+
+ if self.test_obj == "volume":
+ self.assertEqual(resp.status_int, 200)
+
+ resp_dict = json.loads(resp.body)
+ self.assertTrue(self.test_obj in resp_dict)
+ self.assertEqual(resp_dict[self.test_obj]['size'],
+ vol['size'])
+ self.assertEqual(resp_dict[self.test_obj]['displayName'],
+ vol['displayName'])
+ self.assertEqual(resp_dict[self.test_obj]['displayDescription'],
+ vol['displayDescription'])
+ else:
+ self.assertEqual(resp.status_int, 400)
+
+ def test_vsa_volume_create_no_body(self):
+ req = webob.Request.blank('/v1.1/777/zadr-vsa/123/%s' % self.test_objs)
+ req.method = 'POST'
+ req.body = json.dumps({})
+ req.headers['content-type'] = 'application/json'
+
+ resp = req.get_response(fakes.wsgi_app())
+ if self.test_obj == "volume":
+ self.assertEqual(resp.status_int, 422)
+ else:
+ self.assertEqual(resp.status_int, 400)
+
+ def test_vsa_volume_index(self):
+ req = webob.Request.blank('/v1.1/777/zadr-vsa/123/%s' % self.test_objs)
+ resp = req.get_response(fakes.wsgi_app())
+ self.assertEqual(resp.status_int, 200)
+
+ def test_vsa_volume_detail(self):
+ req = webob.Request.blank('/v1.1/777/zadr-vsa/123/%s/detail' % \
+ self.test_objs)
+ resp = req.get_response(fakes.wsgi_app())
+ self.assertEqual(resp.status_int, 200)
+
+ def test_vsa_volume_show(self):
+ obj_num = 234 if self.test_objs == "volumes" else 345
+ req = webob.Request.blank('/v1.1/777/zadr-vsa/123/%s/%s' % \
+ (self.test_objs, obj_num))
+ resp = req.get_response(fakes.wsgi_app())
+ self.assertEqual(resp.status_int, 200)
+
+ def test_vsa_volume_show_no_vsa_assignment(self):
+ req = webob.Request.blank('/v1.1/777/zadr-vsa/4/%s/333' % \
+ (self.test_objs))
+ resp = req.get_response(fakes.wsgi_app())
+ self.assertEqual(resp.status_int, 400)
+
+ def test_vsa_volume_show_no_volume(self):
+ self.stubs.Set(volume.api.API, "get", stub_volume_get_notfound)
+
+ req = webob.Request.blank('/v1.1/777/zadr-vsa/123/%s/333' % \
+ (self.test_objs))
+ resp = req.get_response(fakes.wsgi_app())
+ self.assertEqual(resp.status_int, 404)
+
+ def test_vsa_volume_update(self):
+ obj_num = 234 if self.test_objs == "volumes" else 345
+ update = {"status": "available",
+ "displayName": "Test Display name"}
+ body = {self.test_obj: update}
+ req = webob.Request.blank('/v1.1/777/zadr-vsa/123/%s/%s' % \
+ (self.test_objs, obj_num))
+ req.method = 'PUT'
+ req.body = json.dumps(body)
+ req.headers['content-type'] = 'application/json'
+
+ resp = req.get_response(fakes.wsgi_app())
+ if self.test_obj == "volume":
+ self.assertEqual(resp.status_int, 202)
+ else:
+ self.assertEqual(resp.status_int, 400)
+
+ def test_vsa_volume_delete(self):
+ obj_num = 234 if self.test_objs == "volumes" else 345
+ req = webob.Request.blank('/v1.1/777/zadr-vsa/123/%s/%s' % \
+ (self.test_objs, obj_num))
+ req.method = 'DELETE'
+ resp = req.get_response(fakes.wsgi_app())
+ if self.test_obj == "volume":
+ self.assertEqual(resp.status_int, 202)
+ else:
+ self.assertEqual(resp.status_int, 400)
+
+ def test_vsa_volume_delete_no_vsa_assignment(self):
+ req = webob.Request.blank('/v1.1/777/zadr-vsa/4/%s/333' % \
+ (self.test_objs))
+ req.method = 'DELETE'
+ resp = req.get_response(fakes.wsgi_app())
+ self.assertEqual(resp.status_int, 400)
+
+ def test_vsa_volume_delete_no_volume(self):
+ self.stubs.Set(volume.api.API, "get", stub_volume_get_notfound)
+
+ req = webob.Request.blank('/v1.1/777/zadr-vsa/123/%s/333' % \
+ (self.test_objs))
+ req.method = 'DELETE'
+ resp = req.get_response(fakes.wsgi_app())
+ if self.test_obj == "volume":
+ self.assertEqual(resp.status_int, 404)
+ else:
+ self.assertEqual(resp.status_int, 400)
+
+
+class VSADriveApiTest(VSAVolumeApiTest):
+ def setUp(self):
+ super(VSADriveApiTest, self).setUp(test_obj="drive",
+ test_objs="drives")
+
+ def tearDown(self):
+ self.stubs.UnsetAll()
+ super(VSADriveApiTest, self).tearDown()
diff --git a/nova/tests/api/openstack/test_extensions.py b/nova/tests/api/openstack/test_extensions.py
index 9f923852d..31443242b 100644
--- a/nova/tests/api/openstack/test_extensions.py
+++ b/nova/tests/api/openstack/test_extensions.py
@@ -95,8 +95,11 @@ class ExtensionControllerTest(test.TestCase):
"Quotas",
"Rescue",
"SecurityGroups",
+ "SimpleTenantUsage",
+ "VSAs",
"VirtualInterfaces",
"Volumes",
+ "VolumeTypes",
]
self.ext_list.sort()
diff --git a/nova/tests/api/openstack/test_server_actions.py b/nova/tests/api/openstack/test_server_actions.py
index 3dfdeb79c..b9ef41465 100644
--- a/nova/tests/api/openstack/test_server_actions.py
+++ b/nova/tests/api/openstack/test_server_actions.py
@@ -10,8 +10,8 @@ from nova import utils
from nova import exception
from nova import flags
from nova.api.openstack import create_instance_helper
+from nova.compute import vm_states
from nova.compute import instance_types
-from nova.compute import power_state
import nova.db.api
from nova import test
from nova.tests.api.openstack import common
@@ -35,17 +35,19 @@ def return_server_with_attributes(**kwargs):
return _return_server
-def return_server_with_power_state(power_state):
- return return_server_with_attributes(power_state=power_state)
+def return_server_with_state(vm_state, task_state=None):
+ return return_server_with_attributes(vm_state=vm_state,
+ task_state=task_state)
-def return_server_with_uuid_and_power_state(power_state):
- return return_server_with_power_state(power_state)
-
+def return_server_with_uuid_and_state(vm_state, task_state=None):
+ def _return_server(context, id):
+ return return_server_with_state(vm_state, task_state)
+ return _return_server
-def stub_instance(id, power_state=0, metadata=None,
- image_ref="10", flavor_id="1", name=None):
+def stub_instance(id, metadata=None, image_ref="10", flavor_id="1",
+ name=None, vm_state=None, task_state=None):
if metadata is not None:
metadata_items = [{'key':k, 'value':v} for k, v in metadata.items()]
else:
@@ -66,8 +68,8 @@ def stub_instance(id, power_state=0, metadata=None,
"launch_index": 0,
"key_name": "",
"key_data": "",
- "state": power_state,
- "state_description": "",
+ "vm_state": vm_state or vm_states.ACTIVE,
+ "task_state": task_state,
"memory_mb": 0,
"vcpus": 0,
"local_gb": 0,
@@ -175,11 +177,11 @@ class ServerActionsTest(test.TestCase):
},
}
- state = power_state.BUILDING
- new_return_server = return_server_with_power_state(state)
+ state = vm_states.BUILDING
+ new_return_server = return_server_with_state(state)
self.stubs.Set(nova.db.api, 'instance_get', new_return_server)
self.stubs.Set(nova.db, 'instance_get_by_uuid',
- return_server_with_uuid_and_power_state(state))
+ return_server_with_uuid_and_state(state))
req = webob.Request.blank('/v1.0/servers/1/action')
req.method = 'POST'
@@ -242,19 +244,6 @@ class ServerActionsTest(test.TestCase):
res = req.get_response(fakes.wsgi_app())
self.assertEqual(res.status_int, 500)
- def test_resized_server_has_correct_status(self):
- req = self.webreq('/1', 'GET')
-
- def fake_migration_get(*args):
- return {}
-
- self.stubs.Set(nova.db, 'migration_get_by_instance_and_status',
- fake_migration_get)
- res = req.get_response(fakes.wsgi_app())
- self.assertEqual(res.status_int, 200)
- body = json.loads(res.body)
- self.assertEqual(body['server']['status'], 'RESIZE-CONFIRM')
-
def test_confirm_resize_server(self):
req = self.webreq('/1/action', 'POST', dict(confirmResize=None))
@@ -642,11 +631,11 @@ class ServerActionsTestV11(test.TestCase):
},
}
- state = power_state.BUILDING
- new_return_server = return_server_with_power_state(state)
+ state = vm_states.BUILDING
+ new_return_server = return_server_with_state(state)
self.stubs.Set(nova.db.api, 'instance_get', new_return_server)
self.stubs.Set(nova.db, 'instance_get_by_uuid',
- return_server_with_uuid_and_power_state(state))
+ return_server_with_uuid_and_state(state))
req = webob.Request.blank('/v1.1/fake/servers/1/action')
req.method = 'POST'
diff --git a/nova/tests/api/openstack/test_servers.py b/nova/tests/api/openstack/test_servers.py
index 1ecb35f63..f3819924a 100644
--- a/nova/tests/api/openstack/test_servers.py
+++ b/nova/tests/api/openstack/test_servers.py
@@ -37,7 +37,8 @@ from nova.api.openstack import wsgi
from nova.api.openstack import xmlutil
import nova.compute.api
from nova.compute import instance_types
-from nova.compute import power_state
+from nova.compute import task_states
+from nova.compute import vm_states
import nova.db.api
import nova.scheduler.api
from nova.db.sqlalchemy.models import Instance
@@ -91,15 +92,18 @@ def return_server_with_addresses(private, public):
return _return_server
-def return_server_with_power_state(power_state):
+def return_server_with_state(vm_state, task_state=None):
def _return_server(context, id):
- return stub_instance(id, power_state=power_state)
+ return stub_instance(id, vm_state=vm_state, task_state=task_state)
return _return_server
-def return_server_with_uuid_and_power_state(power_state):
+def return_server_with_uuid_and_state(vm_state, task_state):
def _return_server(context, id):
- return stub_instance(id, uuid=FAKE_UUID, power_state=power_state)
+ return stub_instance(id,
+ uuid=FAKE_UUID,
+ vm_state=vm_state,
+ task_state=task_state)
return _return_server
@@ -148,7 +152,8 @@ def instance_addresses(context, instance_id):
def stub_instance(id, user_id='fake', project_id='fake', private_address=None,
- public_addresses=None, host=None, power_state=0,
+ public_addresses=None, host=None,
+ vm_state=None, task_state=None,
reservation_id="", uuid=FAKE_UUID, image_ref="10",
flavor_id="1", interfaces=None, name=None,
description='fakedescription',
@@ -185,8 +190,8 @@ def stub_instance(id, user_id='fake', project_id='fake', private_address=None,
"launch_index": 0,
"key_name": "",
"key_data": "",
- "state": power_state,
- "state_description": "",
+ "vm_state": vm_state or vm_states.BUILDING,
+ "task_state": task_state,
"memory_mb": 0,
"vcpus": 0,
"local_gb": 0,
@@ -501,7 +506,7 @@ class ServersTest(test.TestCase):
},
]
new_return_server = return_server_with_attributes(
- interfaces=interfaces, power_state=1)
+ interfaces=interfaces, vm_state=vm_states.ACTIVE)
self.stubs.Set(nova.db.api, 'instance_get', new_return_server)
req = webob.Request.blank('/v1.1/fake/servers/1')
@@ -597,8 +602,8 @@ class ServersTest(test.TestCase):
},
]
new_return_server = return_server_with_attributes(
- interfaces=interfaces, power_state=1, image_ref=image_ref,
- flavor_id=flavor_id)
+ interfaces=interfaces, vm_state=vm_states.ACTIVE,
+ image_ref=image_ref, flavor_id=flavor_id)
self.stubs.Set(nova.db.api, 'instance_get', new_return_server)
req = webob.Request.blank('/v1.1/fake/servers/1')
@@ -1242,9 +1247,8 @@ class ServersTest(test.TestCase):
def test_get_servers_allows_status_v1_1(self):
def fake_get_all(compute_self, context, search_opts=None):
self.assertNotEqual(search_opts, None)
- self.assertTrue('state' in search_opts)
- self.assertEqual(set(search_opts['state']),
- set([power_state.RUNNING, power_state.BLOCKED]))
+ self.assertTrue('vm_state' in search_opts)
+ self.assertEqual(search_opts['vm_state'], vm_states.ACTIVE)
return [stub_instance(100)]
self.stubs.Set(nova.compute.API, 'get_all', fake_get_all)
@@ -1261,13 +1265,9 @@ class ServersTest(test.TestCase):
def test_get_servers_invalid_status_v1_1(self):
"""Test getting servers by invalid status"""
-
self.flags(allow_admin_api=False)
-
req = webob.Request.blank('/v1.1/fake/servers?status=running')
res = req.get_response(fakes.wsgi_app())
- # The following assert will fail if either of the asserts in
- # fake_get_all() fail
self.assertEqual(res.status_int, 400)
self.assertTrue(res.body.find('Invalid server status') > -1)
@@ -1774,6 +1774,7 @@ class ServersTest(test.TestCase):
server = json.loads(res.body)['server']
self.assertEqual(16, len(server['adminPass']))
self.assertEqual(1, server['id'])
+ self.assertEqual("BUILD", server["status"])
self.assertEqual(0, server['progress'])
self.assertEqual('server_test', server['name'])
self.assertEqual(expected_flavor, server['flavor'])
@@ -2531,23 +2532,51 @@ class ServersTest(test.TestCase):
self.assertEqual(res.status_int, 204)
self.assertEqual(self.server_delete_called, True)
- def test_shutdown_status(self):
- new_server = return_server_with_power_state(power_state.SHUTDOWN)
- self.stubs.Set(nova.db.api, 'instance_get', new_server)
- req = webob.Request.blank('/v1.0/servers/1')
- res = req.get_response(fakes.wsgi_app())
- self.assertEqual(res.status_int, 200)
- res_dict = json.loads(res.body)
- self.assertEqual(res_dict['server']['status'], 'SHUTDOWN')
- def test_shutoff_status(self):
- new_server = return_server_with_power_state(power_state.SHUTOFF)
+class TestServerStatus(test.TestCase):
+
+ def _get_with_state(self, vm_state, task_state=None):
+ new_server = return_server_with_state(vm_state, task_state)
self.stubs.Set(nova.db.api, 'instance_get', new_server)
- req = webob.Request.blank('/v1.0/servers/1')
- res = req.get_response(fakes.wsgi_app())
- self.assertEqual(res.status_int, 200)
- res_dict = json.loads(res.body)
- self.assertEqual(res_dict['server']['status'], 'SHUTOFF')
+ request = webob.Request.blank('/v1.0/servers/1')
+ response = request.get_response(fakes.wsgi_app())
+ self.assertEqual(response.status_int, 200)
+ return json.loads(response.body)
+
+ def test_active(self):
+ response = self._get_with_state(vm_states.ACTIVE)
+ self.assertEqual(response['server']['status'], 'ACTIVE')
+
+ def test_reboot(self):
+ response = self._get_with_state(vm_states.ACTIVE,
+ task_states.REBOOTING)
+ self.assertEqual(response['server']['status'], 'REBOOT')
+
+ def test_rebuild(self):
+ response = self._get_with_state(vm_states.REBUILDING)
+ self.assertEqual(response['server']['status'], 'REBUILD')
+
+ def test_rebuild_error(self):
+ response = self._get_with_state(vm_states.ERROR)
+ self.assertEqual(response['server']['status'], 'ERROR')
+
+ def test_resize(self):
+ response = self._get_with_state(vm_states.RESIZING)
+ self.assertEqual(response['server']['status'], 'RESIZE')
+
+ def test_verify_resize(self):
+ response = self._get_with_state(vm_states.ACTIVE,
+ task_states.RESIZE_VERIFY)
+ self.assertEqual(response['server']['status'], 'VERIFY_RESIZE')
+
+ def test_password_update(self):
+ response = self._get_with_state(vm_states.ACTIVE,
+ task_states.UPDATING_PASSWORD)
+ self.assertEqual(response['server']['status'], 'PASSWORD')
+
+ def test_stopped(self):
+ response = self._get_with_state(vm_states.STOPPED)
+ self.assertEqual(response['server']['status'], 'STOPPED')
class TestServerCreateRequestXMLDeserializerV10(unittest.TestCase):
@@ -3293,6 +3322,7 @@ class TestServerInstanceCreation(test.TestCase):
def __init__(self):
self.injected_files = None
self.networks = None
+ self.db = db
def create(self, *args, **kwargs):
if 'injected_files' in kwargs:
@@ -3603,8 +3633,8 @@ class ServersViewBuilderV11Test(test.TestCase):
"launch_index": 0,
"key_name": "",
"key_data": "",
- "state": 0,
- "state_description": "",
+ "vm_state": vm_states.BUILDING,
+ "task_state": None,
"memory_mb": 0,
"vcpus": 0,
"local_gb": 0,
@@ -3752,7 +3782,7 @@ class ServersViewBuilderV11Test(test.TestCase):
def test_build_server_detail_active_status(self):
#set the power state of the instance to running
- self.instance['state'] = 1
+ self.instance['vm_state'] = vm_states.ACTIVE
image_bookmark = "http://localhost/images/5"
flavor_bookmark = "http://localhost/flavors/1"
expected_server = {
diff --git a/nova/tests/api/openstack/test_volume_types.py b/nova/tests/api/openstack/test_volume_types.py
new file mode 100644
index 000000000..192e66854
--- /dev/null
+++ b/nova/tests/api/openstack/test_volume_types.py
@@ -0,0 +1,171 @@
+# Copyright 2011 OpenStack LLC.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import json
+import stubout
+import webob
+
+from nova import exception
+from nova import context
+from nova import test
+from nova import log as logging
+from nova.volume import volume_types
+from nova.tests.api.openstack import fakes
+
+LOG = logging.getLogger('nova.tests.api.openstack.test_volume_types')
+
+last_param = {}
+
+
+def stub_volume_type(id):
+ specs = {
+ "key1": "value1",
+ "key2": "value2",
+ "key3": "value3",
+ "key4": "value4",
+ "key5": "value5"}
+ return dict(id=id, name='vol_type_%s' % str(id), extra_specs=specs)
+
+
+def return_volume_types_get_all_types(context):
+ return dict(vol_type_1=stub_volume_type(1),
+ vol_type_2=stub_volume_type(2),
+ vol_type_3=stub_volume_type(3))
+
+
+def return_empty_volume_types_get_all_types(context):
+ return {}
+
+
+def return_volume_types_get_volume_type(context, id):
+ if id == "777":
+ raise exception.VolumeTypeNotFound(volume_type_id=id)
+ return stub_volume_type(int(id))
+
+
+def return_volume_types_destroy(context, name):
+ if name == "777":
+ raise exception.VolumeTypeNotFoundByName(volume_type_name=name)
+ pass
+
+
+def return_volume_types_create(context, name, specs):
+ pass
+
+
+def return_volume_types_get_by_name(context, name):
+ if name == "777":
+ raise exception.VolumeTypeNotFoundByName(volume_type_name=name)
+ return stub_volume_type(int(name.split("_")[2]))
+
+
+class VolumeTypesApiTest(test.TestCase):
+ def setUp(self):
+ super(VolumeTypesApiTest, self).setUp()
+ fakes.stub_out_key_pair_funcs(self.stubs)
+
+ def tearDown(self):
+ self.stubs.UnsetAll()
+ super(VolumeTypesApiTest, self).tearDown()
+
+ def test_volume_types_index(self):
+ self.stubs.Set(volume_types, 'get_all_types',
+ return_volume_types_get_all_types)
+ req = webob.Request.blank('/v1.1/123/os-volume-types')
+ res = req.get_response(fakes.wsgi_app())
+ self.assertEqual(200, res.status_int)
+ res_dict = json.loads(res.body)
+ self.assertEqual('application/json', res.headers['Content-Type'])
+
+ self.assertEqual(3, len(res_dict))
+ for name in ['vol_type_1', 'vol_type_2', 'vol_type_3']:
+ self.assertEqual(name, res_dict[name]['name'])
+ self.assertEqual('value1', res_dict[name]['extra_specs']['key1'])
+
+ def test_volume_types_index_no_data(self):
+ self.stubs.Set(volume_types, 'get_all_types',
+ return_empty_volume_types_get_all_types)
+ req = webob.Request.blank('/v1.1/123/os-volume-types')
+ res = req.get_response(fakes.wsgi_app())
+ res_dict = json.loads(res.body)
+ self.assertEqual(200, res.status_int)
+ self.assertEqual('application/json', res.headers['Content-Type'])
+ self.assertEqual(0, len(res_dict))
+
+ def test_volume_types_show(self):
+ self.stubs.Set(volume_types, 'get_volume_type',
+ return_volume_types_get_volume_type)
+ req = webob.Request.blank('/v1.1/123/os-volume-types/1')
+ res = req.get_response(fakes.wsgi_app())
+ self.assertEqual(200, res.status_int)
+ res_dict = json.loads(res.body)
+ self.assertEqual('application/json', res.headers['Content-Type'])
+ self.assertEqual(1, len(res_dict))
+ self.assertEqual('vol_type_1', res_dict['volume_type']['name'])
+
+ def test_volume_types_show_not_found(self):
+ self.stubs.Set(volume_types, 'get_volume_type',
+ return_volume_types_get_volume_type)
+ req = webob.Request.blank('/v1.1/123/os-volume-types/777')
+ res = req.get_response(fakes.wsgi_app())
+ self.assertEqual(404, res.status_int)
+
+ def test_volume_types_delete(self):
+ self.stubs.Set(volume_types, 'get_volume_type',
+ return_volume_types_get_volume_type)
+ self.stubs.Set(volume_types, 'destroy',
+ return_volume_types_destroy)
+ req = webob.Request.blank('/v1.1/123/os-volume-types/1')
+ req.method = 'DELETE'
+ res = req.get_response(fakes.wsgi_app())
+ self.assertEqual(200, res.status_int)
+
+ def test_volume_types_delete_not_found(self):
+ self.stubs.Set(volume_types, 'get_volume_type',
+ return_volume_types_get_volume_type)
+ self.stubs.Set(volume_types, 'destroy',
+ return_volume_types_destroy)
+ req = webob.Request.blank('/v1.1/123/os-volume-types/777')
+ req.method = 'DELETE'
+ res = req.get_response(fakes.wsgi_app())
+ self.assertEqual(404, res.status_int)
+
+ def test_create(self):
+ self.stubs.Set(volume_types, 'create',
+ return_volume_types_create)
+ self.stubs.Set(volume_types, 'get_volume_type_by_name',
+ return_volume_types_get_by_name)
+ req = webob.Request.blank('/v1.1/123/os-volume-types')
+ req.method = 'POST'
+ req.body = '{"volume_type": {"name": "vol_type_1", '\
+ '"extra_specs": {"key1": "value1"}}}'
+ req.headers["content-type"] = "application/json"
+ res = req.get_response(fakes.wsgi_app())
+ self.assertEqual(200, res.status_int)
+ res_dict = json.loads(res.body)
+ self.assertEqual('application/json', res.headers['Content-Type'])
+ self.assertEqual(1, len(res_dict))
+ self.assertEqual('vol_type_1', res_dict['volume_type']['name'])
+
+ def test_create_empty_body(self):
+ self.stubs.Set(volume_types, 'create',
+ return_volume_types_create)
+ self.stubs.Set(volume_types, 'get_volume_type_by_name',
+ return_volume_types_get_by_name)
+ req = webob.Request.blank('/v1.1/123/os-volume-types')
+ req.method = 'POST'
+ req.headers["content-type"] = "application/json"
+ res = req.get_response(fakes.wsgi_app())
+ self.assertEqual(400, res.status_int)
diff --git a/nova/tests/api/openstack/test_volume_types_extra_specs.py b/nova/tests/api/openstack/test_volume_types_extra_specs.py
new file mode 100644
index 000000000..34bdada22
--- /dev/null
+++ b/nova/tests/api/openstack/test_volume_types_extra_specs.py
@@ -0,0 +1,181 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright (c) 2011 Zadara Storage Inc.
+# Copyright (c) 2011 OpenStack LLC.
+# Copyright 2011 University of Southern California
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import json
+import stubout
+import webob
+import os.path
+
+
+from nova import test
+from nova.api import openstack
+from nova.api.openstack import extensions
+from nova.tests.api.openstack import fakes
+import nova.wsgi
+
+
+def return_create_volume_type_extra_specs(context, volume_type_id,
+ extra_specs):
+ return stub_volume_type_extra_specs()
+
+
+def return_volume_type_extra_specs(context, volume_type_id):
+ return stub_volume_type_extra_specs()
+
+
+def return_empty_volume_type_extra_specs(context, volume_type_id):
+ return {}
+
+
+def delete_volume_type_extra_specs(context, volume_type_id, key):
+ pass
+
+
+def stub_volume_type_extra_specs():
+ specs = {
+ "key1": "value1",
+ "key2": "value2",
+ "key3": "value3",
+ "key4": "value4",
+ "key5": "value5"}
+ return specs
+
+
+class VolumeTypesExtraSpecsTest(test.TestCase):
+
+ def setUp(self):
+ super(VolumeTypesExtraSpecsTest, self).setUp()
+ fakes.stub_out_key_pair_funcs(self.stubs)
+ self.api_path = '/v1.1/123/os-volume-types/1/extra_specs'
+
+ def test_index(self):
+ self.stubs.Set(nova.db.api, 'volume_type_extra_specs_get',
+ return_volume_type_extra_specs)
+ request = webob.Request.blank(self.api_path)
+ res = request.get_response(fakes.wsgi_app())
+ self.assertEqual(200, res.status_int)
+ res_dict = json.loads(res.body)
+ self.assertEqual('application/json', res.headers['Content-Type'])
+ self.assertEqual('value1', res_dict['extra_specs']['key1'])
+
+ def test_index_no_data(self):
+ self.stubs.Set(nova.db.api, 'volume_type_extra_specs_get',
+ return_empty_volume_type_extra_specs)
+ req = webob.Request.blank(self.api_path)
+ res = req.get_response(fakes.wsgi_app())
+ res_dict = json.loads(res.body)
+ self.assertEqual(200, res.status_int)
+ self.assertEqual('application/json', res.headers['Content-Type'])
+ self.assertEqual(0, len(res_dict['extra_specs']))
+
+ def test_show(self):
+ self.stubs.Set(nova.db.api, 'volume_type_extra_specs_get',
+ return_volume_type_extra_specs)
+ req = webob.Request.blank(self.api_path + '/key5')
+ res = req.get_response(fakes.wsgi_app())
+ self.assertEqual(200, res.status_int)
+ res_dict = json.loads(res.body)
+ self.assertEqual('application/json', res.headers['Content-Type'])
+ self.assertEqual('value5', res_dict['key5'])
+
+ def test_show_spec_not_found(self):
+ self.stubs.Set(nova.db.api, 'volume_type_extra_specs_get',
+ return_empty_volume_type_extra_specs)
+ req = webob.Request.blank(self.api_path + '/key6')
+ res = req.get_response(fakes.wsgi_app())
+ res_dict = json.loads(res.body)
+ self.assertEqual(404, res.status_int)
+
+ def test_delete(self):
+ self.stubs.Set(nova.db.api, 'volume_type_extra_specs_delete',
+ delete_volume_type_extra_specs)
+ req = webob.Request.blank(self.api_path + '/key5')
+ req.method = 'DELETE'
+ res = req.get_response(fakes.wsgi_app())
+ self.assertEqual(200, res.status_int)
+
+ def test_create(self):
+ self.stubs.Set(nova.db.api,
+ 'volume_type_extra_specs_update_or_create',
+ return_create_volume_type_extra_specs)
+ req = webob.Request.blank(self.api_path)
+ req.method = 'POST'
+ req.body = '{"extra_specs": {"key1": "value1"}}'
+ req.headers["content-type"] = "application/json"
+ res = req.get_response(fakes.wsgi_app())
+ res_dict = json.loads(res.body)
+ self.assertEqual(200, res.status_int)
+ self.assertEqual('application/json', res.headers['Content-Type'])
+ self.assertEqual('value1', res_dict['extra_specs']['key1'])
+
+ def test_create_empty_body(self):
+ self.stubs.Set(nova.db.api,
+ 'volume_type_extra_specs_update_or_create',
+ return_create_volume_type_extra_specs)
+ req = webob.Request.blank(self.api_path)
+ req.method = 'POST'
+ req.headers["content-type"] = "application/json"
+ res = req.get_response(fakes.wsgi_app())
+ self.assertEqual(400, res.status_int)
+
+ def test_update_item(self):
+ self.stubs.Set(nova.db.api,
+ 'volume_type_extra_specs_update_or_create',
+ return_create_volume_type_extra_specs)
+ req = webob.Request.blank(self.api_path + '/key1')
+ req.method = 'PUT'
+ req.body = '{"key1": "value1"}'
+ req.headers["content-type"] = "application/json"
+ res = req.get_response(fakes.wsgi_app())
+ self.assertEqual(200, res.status_int)
+ self.assertEqual('application/json', res.headers['Content-Type'])
+ res_dict = json.loads(res.body)
+ self.assertEqual('value1', res_dict['key1'])
+
+ def test_update_item_empty_body(self):
+ self.stubs.Set(nova.db.api,
+ 'volume_type_extra_specs_update_or_create',
+ return_create_volume_type_extra_specs)
+ req = webob.Request.blank(self.api_path + '/key1')
+ req.method = 'PUT'
+ req.headers["content-type"] = "application/json"
+ res = req.get_response(fakes.wsgi_app())
+ self.assertEqual(400, res.status_int)
+
+ def test_update_item_too_many_keys(self):
+ self.stubs.Set(nova.db.api,
+ 'volume_type_extra_specs_update_or_create',
+ return_create_volume_type_extra_specs)
+ req = webob.Request.blank(self.api_path + '/key1')
+ req.method = 'PUT'
+ req.body = '{"key1": "value1", "key2": "value2"}'
+ req.headers["content-type"] = "application/json"
+ res = req.get_response(fakes.wsgi_app())
+ self.assertEqual(400, res.status_int)
+
+ def test_update_item_body_uri_mismatch(self):
+ self.stubs.Set(nova.db.api,
+ 'volume_type_extra_specs_update_or_create',
+ return_create_volume_type_extra_specs)
+ req = webob.Request.blank(self.api_path + '/bad')
+ req.method = 'PUT'
+ req.body = '{"key1": "value1"}'
+ req.headers["content-type"] = "application/json"
+ res = req.get_response(fakes.wsgi_app())
+ self.assertEqual(400, res.status_int)
diff --git a/nova/tests/image/test_glance.py b/nova/tests/image/test_glance.py
index 0ff508ffa..5df25df37 100644
--- a/nova/tests/image/test_glance.py
+++ b/nova/tests/image/test_glance.py
@@ -38,7 +38,16 @@ class StubGlanceClient(object):
return self.images[image_id]
def get_images_detailed(self, filters=None, marker=None, limit=None):
- return self.images.itervalues()
+ images = self.images.values()
+ if marker is None:
+ index = 0
+ else:
+ for index, image in enumerate(images):
+ if image['id'] == marker:
+ index += 1
+ break
+ # default to a page size of 3 to ensure we flex the pagination code
+ return images[index:index + 3]
def get_image(self, image_id):
return self.images[image_id], []
@@ -86,23 +95,23 @@ class TestGlanceImageServiceProperties(BaseGlanceTest):
"""Ensure attributes which aren't BASE_IMAGE_ATTRS are stored in the
properties dict
"""
- fixtures = {'image1': {'name': 'image1', 'is_public': True,
+ fixtures = {'image1': {'id': '1', 'name': 'image1', 'is_public': True,
'foo': 'bar',
'properties': {'prop1': 'propvalue1'}}}
self.client.images = fixtures
image_meta = self.service.show(self.context, 'image1')
- expected = {'name': 'image1', 'is_public': True,
+ expected = {'id': '1', 'name': 'image1', 'is_public': True,
'properties': {'prop1': 'propvalue1', 'foo': 'bar'}}
self.assertEqual(image_meta, expected)
def test_detail_passes_through_to_client(self):
- fixtures = {'image1': {'name': 'image1', 'is_public': True,
+ fixtures = {'image1': {'id': '1', 'name': 'image1', 'is_public': True,
'foo': 'bar',
'properties': {'prop1': 'propvalue1'}}}
self.client.images = fixtures
image_meta = self.service.detail(self.context)
- expected = [{'name': 'image1', 'is_public': True,
+ expected = [{'id': '1', 'name': 'image1', 'is_public': True,
'properties': {'prop1': 'propvalue1', 'foo': 'bar'}}]
self.assertEqual(image_meta, expected)
@@ -166,6 +175,7 @@ class TestGetterDateTimeNoneTests(BaseGlanceTest):
def _make_datetime_fixtures(self):
fixtures = {
'image1': {
+ 'id': '1',
'name': 'image1',
'is_public': True,
'created_at': self.NOW_GLANCE_FORMAT,
@@ -173,6 +183,7 @@ class TestGetterDateTimeNoneTests(BaseGlanceTest):
'deleted_at': self.NOW_GLANCE_FORMAT,
},
'image2': {
+ 'id': '2',
'name': 'image2',
'is_public': True,
'created_at': self.NOW_GLANCE_OLD_FORMAT,
@@ -183,13 +194,17 @@ class TestGetterDateTimeNoneTests(BaseGlanceTest):
return fixtures
def _make_none_datetime_fixtures(self):
- fixtures = {'image1': {'name': 'image1', 'is_public': True,
+ fixtures = {'image1': {'id': '1',
+ 'name': 'image1',
+ 'is_public': True,
'updated_at': None,
'deleted_at': None}}
return fixtures
def _make_blank_datetime_fixtures(self):
- fixtures = {'image1': {'name': 'image1', 'is_public': True,
+ fixtures = {'image1': {'id': '1',
+ 'name': 'image1',
+ 'is_public': True,
'updated_at': '',
'deleted_at': ''}}
return fixtures
diff --git a/nova/tests/integrated/test_servers.py b/nova/tests/integrated/test_servers.py
index c2f800689..2cf604d06 100644
--- a/nova/tests/integrated/test_servers.py
+++ b/nova/tests/integrated/test_servers.py
@@ -28,6 +28,17 @@ LOG = logging.getLogger('nova.tests.integrated')
class ServersTest(integrated_helpers._IntegratedTestBase):
+ def _wait_for_creation(self, server):
+ retries = 0
+ while server['status'] == 'BUILD':
+ time.sleep(1)
+ server = self.api.get_server(server['id'])
+ print server
+ retries = retries + 1
+ if retries > 5:
+ break
+ return server
+
def test_get_servers(self):
"""Simple check that listing servers works."""
servers = self.api.get_servers()
@@ -36,9 +47,9 @@ class ServersTest(integrated_helpers._IntegratedTestBase):
def test_create_and_delete_server(self):
"""Creates and deletes a server."""
+ self.flags(stub_network=True)
# Create server
-
# Build the server data gradually, checking errors along the way
server = {}
good_server = self._build_minimal_create_server_request()
@@ -91,19 +102,11 @@ class ServersTest(integrated_helpers._IntegratedTestBase):
server_ids = [server['id'] for server in servers]
self.assertTrue(created_server_id in server_ids)
- # Wait (briefly) for creation
- retries = 0
- while found_server['status'] == 'build':
- LOG.debug("found server: %s" % found_server)
- time.sleep(1)
- found_server = self.api.get_server(created_server_id)
- retries = retries + 1
- if retries > 5:
- break
+ found_server = self._wait_for_creation(found_server)
# It should be available...
# TODO(justinsb): Mock doesn't yet do this...
- #self.assertEqual('available', found_server['status'])
+ self.assertEqual('ACTIVE', found_server['status'])
servers = self.api.get_servers(detail=True)
for server in servers:
self.assertTrue("image" in server)
@@ -181,6 +184,7 @@ class ServersTest(integrated_helpers._IntegratedTestBase):
def test_create_and_rebuild_server(self):
"""Rebuild a server."""
+ self.flags(stub_network=True)
# create a server with initially has no metadata
server = self._build_minimal_create_server_request()
@@ -190,10 +194,12 @@ class ServersTest(integrated_helpers._IntegratedTestBase):
self.assertTrue(created_server['id'])
created_server_id = created_server['id']
+ created_server = self._wait_for_creation(created_server)
+
# rebuild the server with metadata
post = {}
post['rebuild'] = {
- "imageRef": "https://localhost/v1.1/32278/images/2",
+ "imageRef": "https://localhost/v1.1/32278/images/3",
"name": "blah",
}
@@ -205,12 +211,14 @@ class ServersTest(integrated_helpers._IntegratedTestBase):
self.assertEqual(created_server_id, found_server['id'])
self.assertEqual({}, found_server.get('metadata'))
self.assertEqual('blah', found_server.get('name'))
+ self.assertEqual('3', found_server.get('image')['id'])
# Cleanup
self._delete_server(created_server_id)
def test_create_and_rebuild_server_with_metadata(self):
"""Rebuild a server with metadata."""
+ self.flags(stub_network=True)
# create a server with initially has no metadata
server = self._build_minimal_create_server_request()
@@ -220,6 +228,8 @@ class ServersTest(integrated_helpers._IntegratedTestBase):
self.assertTrue(created_server['id'])
created_server_id = created_server['id']
+ created_server = self._wait_for_creation(created_server)
+
# rebuild the server with metadata
post = {}
post['rebuild'] = {
@@ -247,6 +257,7 @@ class ServersTest(integrated_helpers._IntegratedTestBase):
def test_create_and_rebuild_server_with_metadata_removal(self):
"""Rebuild a server with metadata."""
+ self.flags(stub_network=True)
# create a server with initially has no metadata
server = self._build_minimal_create_server_request()
@@ -263,6 +274,8 @@ class ServersTest(integrated_helpers._IntegratedTestBase):
self.assertTrue(created_server['id'])
created_server_id = created_server['id']
+ created_server = self._wait_for_creation(created_server)
+
# rebuild the server with metadata
post = {}
post['rebuild'] = {
diff --git a/nova/tests/integrated/test_volumes.py b/nova/tests/integrated/test_volumes.py
index d3e936462..d6c5e1ba1 100644
--- a/nova/tests/integrated/test_volumes.py
+++ b/nova/tests/integrated/test_volumes.py
@@ -285,6 +285,23 @@ class VolumesTest(integrated_helpers._IntegratedTestBase):
self.assertEquals(undisco_move['mountpoint'], device)
self.assertEquals(undisco_move['instance_id'], server_id)
+ def test_create_volume_with_metadata(self):
+ """Creates and deletes a volume."""
+
+ # Create volume
+ metadata = {'key1': 'value1',
+ 'key2': 'value2'}
+ created_volume = self.api.post_volume(
+ {'volume': {'size': 1,
+ 'metadata': metadata}})
+ LOG.debug("created_volume: %s" % created_volume)
+ self.assertTrue(created_volume['id'])
+ created_volume_id = created_volume['id']
+
+ # Check it's there and metadata present
+ found_volume = self.api.get_volume(created_volume_id)
+ self.assertEqual(created_volume_id, found_volume['id'])
+ self.assertEqual(metadata, found_volume['metadata'])
if __name__ == "__main__":
unittest.main()
diff --git a/nova/tests/notifier/__init__.py b/nova/tests/notifier/__init__.py
new file mode 100644
index 000000000..bd862c46a
--- /dev/null
+++ b/nova/tests/notifier/__init__.py
@@ -0,0 +1,16 @@
+# Copyright 2011 Openstack LLC.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from nova.tests import *
diff --git a/nova/tests/notifier/test_list_notifier.py b/nova/tests/notifier/test_list_notifier.py
new file mode 100644
index 000000000..b77720759
--- /dev/null
+++ b/nova/tests/notifier/test_list_notifier.py
@@ -0,0 +1,88 @@
+# Copyright 2011 OpenStack LLC.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import stubout
+import sys
+
+import nova
+from nova import log as logging
+import nova.notifier.api
+from nova.notifier.api import notify
+from nova.notifier import log_notifier
+from nova.notifier import no_op_notifier
+from nova.notifier import list_notifier
+from nova import test
+
+
+class NotifierListTestCase(test.TestCase):
+ """Test case for notifications"""
+
+ def setUp(self):
+ super(NotifierListTestCase, self).setUp()
+ list_notifier._reset_drivers()
+ self.stubs = stubout.StubOutForTesting()
+ # Mock log to add one to exception_count when log.exception is called
+
+ def mock_exception(cls, *args):
+ self.exception_count += 1
+
+ self.exception_count = 0
+ list_notifier_log = logging.getLogger('nova.notifier.list_notifier')
+ self.stubs.Set(list_notifier_log, "exception", mock_exception)
+ # Mock no_op notifier to add one to notify_count when called.
+
+ def mock_notify(cls, *args):
+ self.notify_count += 1
+
+ self.notify_count = 0
+ self.stubs.Set(nova.notifier.no_op_notifier, 'notify', mock_notify)
+ # Mock log_notifier to raise RuntimeError when called.
+
+ def mock_notify2(cls, *args):
+ raise RuntimeError("Bad notifier.")
+
+ self.stubs.Set(nova.notifier.log_notifier, 'notify', mock_notify2)
+
+ def tearDown(self):
+ self.stubs.UnsetAll()
+ list_notifier._reset_drivers()
+ super(NotifierListTestCase, self).tearDown()
+
+ def test_send_notifications_successfully(self):
+ self.flags(notification_driver='nova.notifier.list_notifier',
+ list_notifier_drivers=['nova.notifier.no_op_notifier',
+ 'nova.notifier.no_op_notifier'])
+ notify('publisher_id', 'event_type',
+ nova.notifier.api.WARN, dict(a=3))
+ self.assertEqual(self.notify_count, 2)
+ self.assertEqual(self.exception_count, 0)
+
+ def test_send_notifications_with_errors(self):
+
+ self.flags(notification_driver='nova.notifier.list_notifier',
+ list_notifier_drivers=['nova.notifier.no_op_notifier',
+ 'nova.notifier.log_notifier'])
+ notify('publisher_id', 'event_type', nova.notifier.api.WARN, dict(a=3))
+ self.assertEqual(self.notify_count, 1)
+ self.assertEqual(self.exception_count, 1)
+
+ def test_when_driver_fails_to_import(self):
+ self.flags(notification_driver='nova.notifier.list_notifier',
+ list_notifier_drivers=['nova.notifier.no_op_notifier',
+ 'nova.notifier.logo_notifier',
+ 'fdsjgsdfhjkhgsfkj'])
+ notify('publisher_id', 'event_type', nova.notifier.api.WARN, dict(a=3))
+ self.assertEqual(self.exception_count, 2)
+ self.assertEqual(self.notify_count, 1)
diff --git a/nova/tests/scheduler/test_scheduler.py b/nova/tests/scheduler/test_scheduler.py
index 158df2a27..a52dd041a 100644
--- a/nova/tests/scheduler/test_scheduler.py
+++ b/nova/tests/scheduler/test_scheduler.py
@@ -40,6 +40,7 @@ from nova.scheduler import driver
from nova.scheduler import manager
from nova.scheduler import multi
from nova.compute import power_state
+from nova.compute import vm_states
FLAGS = flags.FLAGS
@@ -94,6 +95,9 @@ class SchedulerTestCase(test.TestCase):
inst['vcpus'] = kwargs.get('vcpus', 1)
inst['memory_mb'] = kwargs.get('memory_mb', 10)
inst['local_gb'] = kwargs.get('local_gb', 20)
+ inst['vm_state'] = kwargs.get('vm_state', vm_states.ACTIVE)
+ inst['power_state'] = kwargs.get('power_state', power_state.RUNNING)
+ inst['task_state'] = kwargs.get('task_state', None)
return db.instance_create(ctxt, inst)
def test_fallback(self):
@@ -271,8 +275,9 @@ class SimpleDriverTestCase(test.TestCase):
inst['memory_mb'] = kwargs.get('memory_mb', 20)
inst['local_gb'] = kwargs.get('local_gb', 30)
inst['launched_on'] = kwargs.get('launghed_on', 'dummy')
- inst['state_description'] = kwargs.get('state_description', 'running')
- inst['state'] = kwargs.get('state', power_state.RUNNING)
+ inst['vm_state'] = kwargs.get('vm_state', vm_states.ACTIVE)
+ inst['task_state'] = kwargs.get('task_state', None)
+ inst['power_state'] = kwargs.get('power_state', power_state.RUNNING)
return db.instance_create(self.context, inst)['id']
def _create_volume(self):
@@ -664,14 +669,14 @@ class SimpleDriverTestCase(test.TestCase):
block_migration=False)
i_ref = db.instance_get(self.context, instance_id)
- self.assertTrue(i_ref['state_description'] == 'migrating')
+ self.assertTrue(i_ref['vm_state'] == vm_states.MIGRATING)
db.instance_destroy(self.context, instance_id)
db.volume_destroy(self.context, v_ref['id'])
def test_live_migration_src_check_instance_not_running(self):
"""The instance given by instance_id is not running."""
- instance_id = self._create_instance(state_description='migrating')
+ instance_id = self._create_instance(power_state=power_state.NOSTATE)
i_ref = db.instance_get(self.context, instance_id)
try:
diff --git a/nova/tests/scheduler/test_vsa_scheduler.py b/nova/tests/scheduler/test_vsa_scheduler.py
new file mode 100644
index 000000000..37964f00d
--- /dev/null
+++ b/nova/tests/scheduler/test_vsa_scheduler.py
@@ -0,0 +1,641 @@
+# Copyright 2011 OpenStack LLC.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import stubout
+
+import nova
+
+from nova import context
+from nova import db
+from nova import exception
+from nova import flags
+from nova import log as logging
+from nova import test
+from nova import utils
+from nova.volume import volume_types
+
+from nova.scheduler import vsa as vsa_sched
+from nova.scheduler import driver
+
+FLAGS = flags.FLAGS
+LOG = logging.getLogger('nova.tests.scheduler.vsa')
+
+scheduled_volumes = []
+scheduled_volume = {}
+global_volume = {}
+
+
+class FakeVsaLeastUsedScheduler(
+ vsa_sched.VsaSchedulerLeastUsedHost):
+ # No need to stub anything at the moment
+ pass
+
+
+class FakeVsaMostAvailCapacityScheduler(
+ vsa_sched.VsaSchedulerMostAvailCapacity):
+ # No need to stub anything at the moment
+ pass
+
+
+class VsaSchedulerTestCase(test.TestCase):
+
+ def _get_vol_creation_request(self, num_vols, drive_ix, size=0):
+ volume_params = []
+ for i in range(num_vols):
+
+ name = 'name_' + str(i)
+ try:
+ volume_types.create(self.context, name,
+ extra_specs={'type': 'vsa_drive',
+ 'drive_name': name,
+ 'drive_type': 'type_' + str(drive_ix),
+ 'drive_size': 1 + 100 * (drive_ix)})
+ self.created_types_lst.append(name)
+ except exception.ApiError:
+ # type is already created
+ pass
+
+ volume_type = volume_types.get_volume_type_by_name(self.context,
+ name)
+ volume = {'size': size,
+ 'snapshot_id': None,
+ 'name': 'vol_' + str(i),
+ 'description': None,
+ 'volume_type_id': volume_type['id']}
+ volume_params.append(volume)
+
+ return {'num_volumes': len(volume_params),
+ 'vsa_id': 123,
+ 'volumes': volume_params}
+
+ def _generate_default_service_states(self):
+ service_states = {}
+ for i in range(self.host_num):
+ host = {}
+ hostname = 'host_' + str(i)
+ if hostname in self.exclude_host_list:
+ continue
+
+ host['volume'] = {'timestamp': utils.utcnow(),
+ 'drive_qos_info': {}}
+
+ for j in range(self.drive_type_start_ix,
+ self.drive_type_start_ix + self.drive_type_num):
+ dtype = {}
+ dtype['Name'] = 'name_' + str(j)
+ dtype['DriveType'] = 'type_' + str(j)
+ dtype['TotalDrives'] = 2 * (self.init_num_drives + i)
+ dtype['DriveCapacity'] = vsa_sched.GB_TO_BYTES(1 + 100 * j)
+ dtype['TotalCapacity'] = dtype['TotalDrives'] * \
+ dtype['DriveCapacity']
+ dtype['AvailableCapacity'] = (dtype['TotalDrives'] - i) * \
+ dtype['DriveCapacity']
+ dtype['DriveRpm'] = 7200
+ dtype['DifCapable'] = 0
+ dtype['SedCapable'] = 0
+ dtype['PartitionDrive'] = {
+ 'PartitionSize': 0,
+ 'NumOccupiedPartitions': 0,
+ 'NumFreePartitions': 0}
+ dtype['FullDrive'] = {
+ 'NumFreeDrives': dtype['TotalDrives'] - i,
+ 'NumOccupiedDrives': i}
+ host['volume']['drive_qos_info'][dtype['Name']] = dtype
+
+ service_states[hostname] = host
+
+ return service_states
+
+ def _print_service_states(self):
+ for host, host_val in self.service_states.iteritems():
+ LOG.info(_("Host %s"), host)
+ total_used = 0
+ total_available = 0
+ qos = host_val['volume']['drive_qos_info']
+
+ for k, d in qos.iteritems():
+ LOG.info("\t%s: type %s: drives (used %2d, total %2d) "\
+ "size %3d, total %4d, used %4d, avail %d",
+ k, d['DriveType'],
+ d['FullDrive']['NumOccupiedDrives'], d['TotalDrives'],
+ vsa_sched.BYTES_TO_GB(d['DriveCapacity']),
+ vsa_sched.BYTES_TO_GB(d['TotalCapacity']),
+ vsa_sched.BYTES_TO_GB(d['TotalCapacity'] - \
+ d['AvailableCapacity']),
+ vsa_sched.BYTES_TO_GB(d['AvailableCapacity']))
+
+ total_used += vsa_sched.BYTES_TO_GB(d['TotalCapacity'] - \
+ d['AvailableCapacity'])
+ total_available += vsa_sched.BYTES_TO_GB(
+ d['AvailableCapacity'])
+ LOG.info("Host %s: used %d, avail %d",
+ host, total_used, total_available)
+
+ def _set_service_states(self, host_num,
+ drive_type_start_ix, drive_type_num,
+ init_num_drives=10,
+ exclude_host_list=[]):
+ self.host_num = host_num
+ self.drive_type_start_ix = drive_type_start_ix
+ self.drive_type_num = drive_type_num
+ self.exclude_host_list = exclude_host_list
+ self.init_num_drives = init_num_drives
+ self.service_states = self._generate_default_service_states()
+
+ def _get_service_states(self):
+ return self.service_states
+
+ def _fake_get_service_states(self):
+ return self._get_service_states()
+
+ def _fake_provision_volume(self, context, vol, vsa_id, availability_zone):
+ global scheduled_volumes
+ scheduled_volumes.append(dict(vol=vol,
+ vsa_id=vsa_id,
+ az=availability_zone))
+ name = vol['name']
+ host = vol['host']
+ LOG.debug(_("Test: provision vol %(name)s on host %(host)s"),
+ locals())
+ LOG.debug(_("\t vol=%(vol)s"), locals())
+ pass
+
+ def _fake_vsa_update(self, context, vsa_id, values):
+ LOG.debug(_("Test: VSA update request: vsa_id=%(vsa_id)s "\
+ "values=%(values)s"), locals())
+ pass
+
+ def _fake_volume_create(self, context, options):
+ LOG.debug(_("Test: Volume create: %s"), options)
+ options['id'] = 123
+ global global_volume
+ global_volume = options
+ return options
+
+ def _fake_volume_get(self, context, volume_id):
+ LOG.debug(_("Test: Volume get request: id=%(volume_id)s"), locals())
+ global global_volume
+ global_volume['id'] = volume_id
+ global_volume['availability_zone'] = None
+ return global_volume
+
+ def _fake_volume_update(self, context, volume_id, values):
+ LOG.debug(_("Test: Volume update request: id=%(volume_id)s "\
+ "values=%(values)s"), locals())
+ global scheduled_volume
+ scheduled_volume = {'id': volume_id, 'host': values['host']}
+ pass
+
+ def _fake_service_get_by_args(self, context, host, binary):
+ return "service"
+
+ def _fake_service_is_up_True(self, service):
+ return True
+
+ def _fake_service_is_up_False(self, service):
+ return False
+
+ def setUp(self, sched_class=None):
+ super(VsaSchedulerTestCase, self).setUp()
+ self.stubs = stubout.StubOutForTesting()
+ self.context = context.get_admin_context()
+
+ if sched_class is None:
+ self.sched = FakeVsaLeastUsedScheduler()
+ else:
+ self.sched = sched_class
+
+ self.host_num = 10
+ self.drive_type_num = 5
+
+ self.stubs.Set(self.sched,
+ '_get_service_states', self._fake_get_service_states)
+ self.stubs.Set(self.sched,
+ '_provision_volume', self._fake_provision_volume)
+ self.stubs.Set(nova.db, 'vsa_update', self._fake_vsa_update)
+
+ self.stubs.Set(nova.db, 'volume_get', self._fake_volume_get)
+ self.stubs.Set(nova.db, 'volume_update', self._fake_volume_update)
+
+ self.created_types_lst = []
+
+ def tearDown(self):
+ for name in self.created_types_lst:
+ volume_types.purge(self.context, name)
+
+ self.stubs.UnsetAll()
+ super(VsaSchedulerTestCase, self).tearDown()
+
+ def test_vsa_sched_create_volumes_simple(self):
+ global scheduled_volumes
+ scheduled_volumes = []
+ self._set_service_states(host_num=10,
+ drive_type_start_ix=0,
+ drive_type_num=5,
+ init_num_drives=10,
+ exclude_host_list=['host_1', 'host_3'])
+ prev = self._generate_default_service_states()
+ request_spec = self._get_vol_creation_request(num_vols=3, drive_ix=2)
+
+ self.sched.schedule_create_volumes(self.context,
+ request_spec,
+ availability_zone=None)
+
+ self.assertEqual(len(scheduled_volumes), 3)
+ self.assertEqual(scheduled_volumes[0]['vol']['host'], 'host_0')
+ self.assertEqual(scheduled_volumes[1]['vol']['host'], 'host_2')
+ self.assertEqual(scheduled_volumes[2]['vol']['host'], 'host_4')
+
+ cur = self._get_service_states()
+ for host in ['host_0', 'host_2', 'host_4']:
+ cur_dtype = cur[host]['volume']['drive_qos_info']['name_2']
+ prev_dtype = prev[host]['volume']['drive_qos_info']['name_2']
+ self.assertEqual(cur_dtype['DriveType'], prev_dtype['DriveType'])
+ self.assertEqual(cur_dtype['FullDrive']['NumFreeDrives'],
+ prev_dtype['FullDrive']['NumFreeDrives'] - 1)
+ self.assertEqual(cur_dtype['FullDrive']['NumOccupiedDrives'],
+ prev_dtype['FullDrive']['NumOccupiedDrives'] + 1)
+
+ def test_vsa_sched_no_drive_type(self):
+ self._set_service_states(host_num=10,
+ drive_type_start_ix=0,
+ drive_type_num=5,
+ init_num_drives=1)
+ request_spec = self._get_vol_creation_request(num_vols=1, drive_ix=6)
+ self.assertRaises(driver.WillNotSchedule,
+ self.sched.schedule_create_volumes,
+ self.context,
+ request_spec,
+ availability_zone=None)
+
+ def test_vsa_sched_no_enough_drives(self):
+ global scheduled_volumes
+ scheduled_volumes = []
+
+ self._set_service_states(host_num=3,
+ drive_type_start_ix=0,
+ drive_type_num=1,
+ init_num_drives=0)
+ prev = self._generate_default_service_states()
+ request_spec = self._get_vol_creation_request(num_vols=3, drive_ix=0)
+
+ self.assertRaises(driver.WillNotSchedule,
+ self.sched.schedule_create_volumes,
+ self.context,
+ request_spec,
+ availability_zone=None)
+
+ # check that everything was returned back
+ cur = self._get_service_states()
+ for k, v in prev.iteritems():
+ self.assertEqual(prev[k]['volume']['drive_qos_info'],
+ cur[k]['volume']['drive_qos_info'])
+
+ def test_vsa_sched_wrong_topic(self):
+ self._set_service_states(host_num=1,
+ drive_type_start_ix=0,
+ drive_type_num=5,
+ init_num_drives=1)
+ states = self._get_service_states()
+ new_states = {}
+ new_states['host_0'] = {'compute': states['host_0']['volume']}
+ self.service_states = new_states
+ request_spec = self._get_vol_creation_request(num_vols=1, drive_ix=0)
+
+ self.assertRaises(driver.WillNotSchedule,
+ self.sched.schedule_create_volumes,
+ self.context,
+ request_spec,
+ availability_zone=None)
+
+ def test_vsa_sched_provision_volume(self):
+ global global_volume
+ global_volume = {}
+ self._set_service_states(host_num=1,
+ drive_type_start_ix=0,
+ drive_type_num=1,
+ init_num_drives=1)
+ request_spec = self._get_vol_creation_request(num_vols=1, drive_ix=0)
+
+ self.stubs.UnsetAll()
+ self.stubs.Set(self.sched,
+ '_get_service_states', self._fake_get_service_states)
+ self.stubs.Set(nova.db, 'volume_create', self._fake_volume_create)
+
+ self.sched.schedule_create_volumes(self.context,
+ request_spec,
+ availability_zone=None)
+
+ self.assertEqual(request_spec['volumes'][0]['name'],
+ global_volume['display_name'])
+
+ def test_vsa_sched_no_free_drives(self):
+ self._set_service_states(host_num=1,
+ drive_type_start_ix=0,
+ drive_type_num=1,
+ init_num_drives=1)
+ request_spec = self._get_vol_creation_request(num_vols=1, drive_ix=0)
+
+ self.sched.schedule_create_volumes(self.context,
+ request_spec,
+ availability_zone=None)
+
+ cur = self._get_service_states()
+ cur_dtype = cur['host_0']['volume']['drive_qos_info']['name_0']
+ self.assertEqual(cur_dtype['FullDrive']['NumFreeDrives'], 1)
+
+ new_request = self._get_vol_creation_request(num_vols=1, drive_ix=0)
+
+ self.sched.schedule_create_volumes(self.context,
+ request_spec,
+ availability_zone=None)
+ self._print_service_states()
+
+ self.assertRaises(driver.WillNotSchedule,
+ self.sched.schedule_create_volumes,
+ self.context,
+ new_request,
+ availability_zone=None)
+
+ def test_vsa_sched_forced_host(self):
+ global scheduled_volumes
+ scheduled_volumes = []
+
+ self._set_service_states(host_num=10,
+ drive_type_start_ix=0,
+ drive_type_num=5,
+ init_num_drives=10)
+
+ request_spec = self._get_vol_creation_request(num_vols=3, drive_ix=2)
+
+ self.assertRaises(exception.HostBinaryNotFound,
+ self.sched.schedule_create_volumes,
+ self.context,
+ request_spec,
+ availability_zone="nova:host_5")
+
+ self.stubs.Set(nova.db,
+ 'service_get_by_args', self._fake_service_get_by_args)
+ self.stubs.Set(self.sched,
+ 'service_is_up', self._fake_service_is_up_False)
+
+ self.assertRaises(driver.WillNotSchedule,
+ self.sched.schedule_create_volumes,
+ self.context,
+ request_spec,
+ availability_zone="nova:host_5")
+
+ self.stubs.Set(self.sched,
+ 'service_is_up', self._fake_service_is_up_True)
+
+ self.sched.schedule_create_volumes(self.context,
+ request_spec,
+ availability_zone="nova:host_5")
+
+ self.assertEqual(len(scheduled_volumes), 3)
+ self.assertEqual(scheduled_volumes[0]['vol']['host'], 'host_5')
+ self.assertEqual(scheduled_volumes[1]['vol']['host'], 'host_5')
+ self.assertEqual(scheduled_volumes[2]['vol']['host'], 'host_5')
+
+ def test_vsa_sched_create_volumes_partition(self):
+ global scheduled_volumes
+ scheduled_volumes = []
+ self._set_service_states(host_num=5,
+ drive_type_start_ix=0,
+ drive_type_num=5,
+ init_num_drives=1,
+ exclude_host_list=['host_0', 'host_2'])
+ prev = self._generate_default_service_states()
+ request_spec = self._get_vol_creation_request(num_vols=3,
+ drive_ix=3,
+ size=50)
+ self.sched.schedule_create_volumes(self.context,
+ request_spec,
+ availability_zone=None)
+
+ self.assertEqual(len(scheduled_volumes), 3)
+ self.assertEqual(scheduled_volumes[0]['vol']['host'], 'host_1')
+ self.assertEqual(scheduled_volumes[1]['vol']['host'], 'host_3')
+ self.assertEqual(scheduled_volumes[2]['vol']['host'], 'host_4')
+
+ cur = self._get_service_states()
+ for host in ['host_1', 'host_3', 'host_4']:
+ cur_dtype = cur[host]['volume']['drive_qos_info']['name_3']
+ prev_dtype = prev[host]['volume']['drive_qos_info']['name_3']
+
+ self.assertEqual(cur_dtype['DriveType'], prev_dtype['DriveType'])
+ self.assertEqual(cur_dtype['FullDrive']['NumFreeDrives'],
+ prev_dtype['FullDrive']['NumFreeDrives'] - 1)
+ self.assertEqual(cur_dtype['FullDrive']['NumOccupiedDrives'],
+ prev_dtype['FullDrive']['NumOccupiedDrives'] + 1)
+
+ self.assertEqual(prev_dtype['PartitionDrive']
+ ['NumOccupiedPartitions'], 0)
+ self.assertEqual(cur_dtype['PartitionDrive']
+ ['NumOccupiedPartitions'], 1)
+ self.assertEqual(cur_dtype['PartitionDrive']
+ ['NumFreePartitions'], 5)
+
+ self.assertEqual(prev_dtype['PartitionDrive']
+ ['NumFreePartitions'], 0)
+ self.assertEqual(prev_dtype['PartitionDrive']
+ ['PartitionSize'], 0)
+
+ def test_vsa_sched_create_single_volume_az(self):
+ global scheduled_volume
+ scheduled_volume = {}
+
+ def _fake_volume_get_az(context, volume_id):
+ LOG.debug(_("Test: Volume get: id=%(volume_id)s"), locals())
+ return {'id': volume_id, 'availability_zone': 'nova:host_3'}
+
+ self.stubs.Set(nova.db, 'volume_get', _fake_volume_get_az)
+ self.stubs.Set(nova.db,
+ 'service_get_by_args', self._fake_service_get_by_args)
+ self.stubs.Set(self.sched,
+ 'service_is_up', self._fake_service_is_up_True)
+
+ host = self.sched.schedule_create_volume(self.context,
+ 123, availability_zone=None)
+
+ self.assertEqual(host, 'host_3')
+ self.assertEqual(scheduled_volume['id'], 123)
+ self.assertEqual(scheduled_volume['host'], 'host_3')
+
+ def test_vsa_sched_create_single_non_vsa_volume(self):
+ global scheduled_volume
+ scheduled_volume = {}
+
+ global global_volume
+ global_volume = {}
+ global_volume['volume_type_id'] = None
+
+ self.assertRaises(driver.NoValidHost,
+ self.sched.schedule_create_volume,
+ self.context,
+ 123,
+ availability_zone=None)
+
+ def test_vsa_sched_create_single_volume(self):
+ global scheduled_volume
+ scheduled_volume = {}
+ self._set_service_states(host_num=10,
+ drive_type_start_ix=0,
+ drive_type_num=5,
+ init_num_drives=10,
+ exclude_host_list=['host_0', 'host_1'])
+ prev = self._generate_default_service_states()
+
+ global global_volume
+ global_volume = {}
+
+ drive_ix = 2
+ name = 'name_' + str(drive_ix)
+ volume_types.create(self.context, name,
+ extra_specs={'type': 'vsa_drive',
+ 'drive_name': name,
+ 'drive_type': 'type_' + str(drive_ix),
+ 'drive_size': 1 + 100 * (drive_ix)})
+ self.created_types_lst.append(name)
+ volume_type = volume_types.get_volume_type_by_name(self.context, name)
+
+ global_volume['volume_type_id'] = volume_type['id']
+ global_volume['size'] = 0
+
+ host = self.sched.schedule_create_volume(self.context,
+ 123, availability_zone=None)
+
+ self.assertEqual(host, 'host_2')
+ self.assertEqual(scheduled_volume['id'], 123)
+ self.assertEqual(scheduled_volume['host'], 'host_2')
+
+
+class VsaSchedulerTestCaseMostAvail(VsaSchedulerTestCase):
+
+ def setUp(self):
+ super(VsaSchedulerTestCaseMostAvail, self).setUp(
+ FakeVsaMostAvailCapacityScheduler())
+
+ def tearDown(self):
+ self.stubs.UnsetAll()
+ super(VsaSchedulerTestCaseMostAvail, self).tearDown()
+
+ def test_vsa_sched_create_single_volume(self):
+ global scheduled_volume
+ scheduled_volume = {}
+ self._set_service_states(host_num=10,
+ drive_type_start_ix=0,
+ drive_type_num=5,
+ init_num_drives=10,
+ exclude_host_list=['host_0', 'host_1'])
+ prev = self._generate_default_service_states()
+
+ global global_volume
+ global_volume = {}
+
+ drive_ix = 2
+ name = 'name_' + str(drive_ix)
+ volume_types.create(self.context, name,
+ extra_specs={'type': 'vsa_drive',
+ 'drive_name': name,
+ 'drive_type': 'type_' + str(drive_ix),
+ 'drive_size': 1 + 100 * (drive_ix)})
+ self.created_types_lst.append(name)
+ volume_type = volume_types.get_volume_type_by_name(self.context, name)
+
+ global_volume['volume_type_id'] = volume_type['id']
+ global_volume['size'] = 0
+
+ host = self.sched.schedule_create_volume(self.context,
+ 123, availability_zone=None)
+
+ self.assertEqual(host, 'host_9')
+ self.assertEqual(scheduled_volume['id'], 123)
+ self.assertEqual(scheduled_volume['host'], 'host_9')
+
+ def test_vsa_sched_create_volumes_simple(self):
+ global scheduled_volumes
+ scheduled_volumes = []
+ self._set_service_states(host_num=10,
+ drive_type_start_ix=0,
+ drive_type_num=5,
+ init_num_drives=10,
+ exclude_host_list=['host_1', 'host_3'])
+ prev = self._generate_default_service_states()
+ request_spec = self._get_vol_creation_request(num_vols=3, drive_ix=2)
+
+ self._print_service_states()
+
+ self.sched.schedule_create_volumes(self.context,
+ request_spec,
+ availability_zone=None)
+
+ self.assertEqual(len(scheduled_volumes), 3)
+ self.assertEqual(scheduled_volumes[0]['vol']['host'], 'host_9')
+ self.assertEqual(scheduled_volumes[1]['vol']['host'], 'host_8')
+ self.assertEqual(scheduled_volumes[2]['vol']['host'], 'host_7')
+
+ cur = self._get_service_states()
+ for host in ['host_9', 'host_8', 'host_7']:
+ cur_dtype = cur[host]['volume']['drive_qos_info']['name_2']
+ prev_dtype = prev[host]['volume']['drive_qos_info']['name_2']
+ self.assertEqual(cur_dtype['DriveType'], prev_dtype['DriveType'])
+ self.assertEqual(cur_dtype['FullDrive']['NumFreeDrives'],
+ prev_dtype['FullDrive']['NumFreeDrives'] - 1)
+ self.assertEqual(cur_dtype['FullDrive']['NumOccupiedDrives'],
+ prev_dtype['FullDrive']['NumOccupiedDrives'] + 1)
+
+ def test_vsa_sched_create_volumes_partition(self):
+ global scheduled_volumes
+ scheduled_volumes = []
+ self._set_service_states(host_num=5,
+ drive_type_start_ix=0,
+ drive_type_num=5,
+ init_num_drives=1,
+ exclude_host_list=['host_0', 'host_2'])
+ prev = self._generate_default_service_states()
+ request_spec = self._get_vol_creation_request(num_vols=3,
+ drive_ix=3,
+ size=50)
+ self.sched.schedule_create_volumes(self.context,
+ request_spec,
+ availability_zone=None)
+
+ self.assertEqual(len(scheduled_volumes), 3)
+ self.assertEqual(scheduled_volumes[0]['vol']['host'], 'host_4')
+ self.assertEqual(scheduled_volumes[1]['vol']['host'], 'host_3')
+ self.assertEqual(scheduled_volumes[2]['vol']['host'], 'host_1')
+
+ cur = self._get_service_states()
+ for host in ['host_1', 'host_3', 'host_4']:
+ cur_dtype = cur[host]['volume']['drive_qos_info']['name_3']
+ prev_dtype = prev[host]['volume']['drive_qos_info']['name_3']
+
+ self.assertEqual(cur_dtype['DriveType'], prev_dtype['DriveType'])
+ self.assertEqual(cur_dtype['FullDrive']['NumFreeDrives'],
+ prev_dtype['FullDrive']['NumFreeDrives'] - 1)
+ self.assertEqual(cur_dtype['FullDrive']['NumOccupiedDrives'],
+ prev_dtype['FullDrive']['NumOccupiedDrives'] + 1)
+
+ self.assertEqual(prev_dtype['PartitionDrive']
+ ['NumOccupiedPartitions'], 0)
+ self.assertEqual(cur_dtype['PartitionDrive']
+ ['NumOccupiedPartitions'], 1)
+ self.assertEqual(cur_dtype['PartitionDrive']
+ ['NumFreePartitions'], 5)
+ self.assertEqual(prev_dtype['PartitionDrive']
+ ['NumFreePartitions'], 0)
+ self.assertEqual(prev_dtype['PartitionDrive']
+ ['PartitionSize'], 0)
diff --git a/nova/tests/test_adminapi.py b/nova/tests/test_adminapi.py
index 06cc498ac..aaa633adc 100644
--- a/nova/tests/test_adminapi.py
+++ b/nova/tests/test_adminapi.py
@@ -38,8 +38,6 @@ class AdminApiTestCase(test.TestCase):
super(AdminApiTestCase, self).setUp()
self.flags(connection_type='fake')
- self.conn = rpc.create_connection()
-
# set up our cloud
self.api = admin.AdminController()
diff --git a/nova/tests/test_cloud.py b/nova/tests/test_cloud.py
index 0793784f8..d533a4794 100644
--- a/nova/tests/test_cloud.py
+++ b/nova/tests/test_cloud.py
@@ -38,6 +38,7 @@ from nova import test
from nova import utils
from nova.api.ec2 import cloud
from nova.api.ec2 import ec2utils
+from nova.compute import vm_states
from nova.image import fake
@@ -51,8 +52,6 @@ class CloudTestCase(test.TestCase):
self.flags(connection_type='fake',
stub_network=True)
- self.conn = rpc.create_connection()
-
# set up our cloud
self.cloud = cloud.CloudController()
@@ -1163,7 +1162,7 @@ class CloudTestCase(test.TestCase):
self.compute = self.start_service('compute')
def _wait_for_state(self, ctxt, instance_id, predicate):
- """Wait for an stopping instance to be a given state"""
+ """Wait for a stopped instance to be a given state"""
id = ec2utils.ec2_id_to_id(instance_id)
while True:
info = self.cloud.compute_api.get(context=ctxt, instance_id=id)
@@ -1174,12 +1173,16 @@ class CloudTestCase(test.TestCase):
def _wait_for_running(self, instance_id):
def is_running(info):
- return info['state_description'] == 'running'
+ vm_state = info["vm_state"]
+ task_state = info["task_state"]
+ return vm_state == vm_states.ACTIVE and task_state == None
self._wait_for_state(self.context, instance_id, is_running)
def _wait_for_stopped(self, instance_id):
def is_stopped(info):
- return info['state_description'] == 'stopped'
+ vm_state = info["vm_state"]
+ task_state = info["task_state"]
+ return vm_state == vm_states.STOPPED and task_state == None
self._wait_for_state(self.context, instance_id, is_stopped)
def _wait_for_terminate(self, instance_id):
@@ -1562,7 +1565,7 @@ class CloudTestCase(test.TestCase):
'id': 0,
'root_device_name': '/dev/sdh',
'security_groups': [{'name': 'fake0'}, {'name': 'fake1'}],
- 'state_description': 'stopping',
+ 'vm_state': vm_states.STOPPED,
'instance_type': {'name': 'fake_type'},
'kernel_id': 1,
'ramdisk_id': 2,
@@ -1606,7 +1609,7 @@ class CloudTestCase(test.TestCase):
self.assertEqual(groupSet, expected_groupSet)
self.assertEqual(get_attribute('instanceInitiatedShutdownBehavior'),
{'instance_id': 'i-12345678',
- 'instanceInitiatedShutdownBehavior': 'stop'})
+ 'instanceInitiatedShutdownBehavior': 'stopped'})
self.assertEqual(get_attribute('instanceType'),
{'instance_id': 'i-12345678',
'instanceType': 'fake_type'})
diff --git a/nova/tests/test_compute.py b/nova/tests/test_compute.py
index 6659b81eb..766a7da9b 100644
--- a/nova/tests/test_compute.py
+++ b/nova/tests/test_compute.py
@@ -24,6 +24,7 @@ from nova import compute
from nova.compute import instance_types
from nova.compute import manager as compute_manager
from nova.compute import power_state
+from nova.compute import vm_states
from nova import context
from nova import db
from nova.db.sqlalchemy import models
@@ -763,8 +764,8 @@ class ComputeTestCase(test.TestCase):
'block_migration': False,
'disk': None}}).\
AndRaise(rpc.RemoteError('', '', ''))
- dbmock.instance_update(c, i_ref['id'], {'state_description': 'running',
- 'state': power_state.RUNNING,
+ dbmock.instance_update(c, i_ref['id'], {'vm_state': vm_states.ACTIVE,
+ 'task_state': None,
'host': i_ref['host']})
for v in i_ref['volumes']:
dbmock.volume_update(c, v['id'], {'status': 'in-use'})
@@ -795,8 +796,8 @@ class ComputeTestCase(test.TestCase):
'block_migration': False,
'disk': None}}).\
AndRaise(rpc.RemoteError('', '', ''))
- dbmock.instance_update(c, i_ref['id'], {'state_description': 'running',
- 'state': power_state.RUNNING,
+ dbmock.instance_update(c, i_ref['id'], {'vm_state': vm_states.ACTIVE,
+ 'task_state': None,
'host': i_ref['host']})
self.compute.db = dbmock
@@ -841,8 +842,8 @@ class ComputeTestCase(test.TestCase):
c = context.get_admin_context()
instance_id = self._create_instance()
i_ref = db.instance_get(c, instance_id)
- db.instance_update(c, i_ref['id'], {'state_description': 'migrating',
- 'state': power_state.PAUSED})
+ db.instance_update(c, i_ref['id'], {'vm_state': vm_states.MIGRATING,
+ 'power_state': power_state.PAUSED})
v_ref = db.volume_create(c, {'size': 1, 'instance_id': instance_id})
fix_addr = db.fixed_ip_create(c, {'address': '1.1.1.1',
'instance_id': instance_id})
@@ -903,7 +904,7 @@ class ComputeTestCase(test.TestCase):
instances = db.instance_get_all(context.get_admin_context())
LOG.info(_("After force-killing instances: %s"), instances)
self.assertEqual(len(instances), 1)
- self.assertEqual(power_state.SHUTOFF, instances[0]['state'])
+ self.assertEqual(power_state.NOSTATE, instances[0]['power_state'])
def test_get_all_by_name_regexp(self):
"""Test searching instances by name (display_name)"""
@@ -1323,25 +1324,28 @@ class ComputeTestCase(test.TestCase):
"""Test searching instances by state"""
c = context.get_admin_context()
- instance_id1 = self._create_instance({'state': power_state.SHUTDOWN})
+ instance_id1 = self._create_instance({
+ 'power_state': power_state.SHUTDOWN,
+ })
instance_id2 = self._create_instance({
- 'id': 2,
- 'state': power_state.RUNNING})
+ 'id': 2,
+ 'power_state': power_state.RUNNING,
+ })
instance_id3 = self._create_instance({
- 'id': 10,
- 'state': power_state.RUNNING})
-
+ 'id': 10,
+ 'power_state': power_state.RUNNING,
+ })
instances = self.compute_api.get_all(c,
- search_opts={'state': power_state.SUSPENDED})
+ search_opts={'power_state': power_state.SUSPENDED})
self.assertEqual(len(instances), 0)
instances = self.compute_api.get_all(c,
- search_opts={'state': power_state.SHUTDOWN})
+ search_opts={'power_state': power_state.SHUTDOWN})
self.assertEqual(len(instances), 1)
self.assertEqual(instances[0].id, instance_id1)
instances = self.compute_api.get_all(c,
- search_opts={'state': power_state.RUNNING})
+ search_opts={'power_state': power_state.RUNNING})
self.assertEqual(len(instances), 2)
instance_ids = [instance.id for instance in instances]
self.assertTrue(instance_id2 in instance_ids)
@@ -1349,7 +1353,7 @@ class ComputeTestCase(test.TestCase):
# Test passing a list as search arg
instances = self.compute_api.get_all(c,
- search_opts={'state': [power_state.SHUTDOWN,
+ search_opts={'power_state': [power_state.SHUTDOWN,
power_state.RUNNING]})
self.assertEqual(len(instances), 3)
diff --git a/nova/tests/test_context.py b/nova/tests/test_context.py
new file mode 100644
index 000000000..b2507fa59
--- /dev/null
+++ b/nova/tests/test_context.py
@@ -0,0 +1,33 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright 2011 OpenStack LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from nova import context
+from nova import test
+
+
+class ContextTestCase(test.TestCase):
+
+ def test_request_context_sets_is_admin(self):
+ ctxt = context.RequestContext('111',
+ '222',
+ roles=['admin', 'weasel'])
+ self.assertEquals(ctxt.is_admin, True)
+
+ def test_request_context_sets_is_admin_upcase(self):
+ ctxt = context.RequestContext('111',
+ '222',
+ roles=['Admin', 'weasel'])
+ self.assertEquals(ctxt.is_admin, True)
diff --git a/nova/tests/test_instance_types.py b/nova/tests/test_instance_types.py
index ef271518c..09f532239 100644
--- a/nova/tests/test_instance_types.py
+++ b/nova/tests/test_instance_types.py
@@ -47,6 +47,29 @@ class InstanceTypeTestCase(test.TestCase):
self.id = max_id["id"] + 1
self.name = str(int(time.time()))
+ def _nonexistent_flavor_name(self):
+ """return an instance type name not in the DB"""
+ nonexistent_flavor = "sdfsfsdf"
+ flavors = instance_types.get_all_types()
+ while nonexistent_flavor in flavors:
+ nonexistent_flavor += "z"
+ else:
+ return nonexistent_flavor
+
+ def _nonexistent_flavor_id(self):
+ """return an instance type ID not in the DB"""
+ nonexistent_flavor = 2700
+ flavor_ids = [value["id"] for key, value in\
+ instance_types.get_all_types().iteritems()]
+ while nonexistent_flavor in flavor_ids:
+ nonexistent_flavor += 1
+ else:
+ return nonexistent_flavor
+
+ def _existing_flavor(self):
+ """return first instance type name"""
+ return instance_types.get_all_types().keys()[0]
+
def test_instance_type_create_then_delete(self):
"""Ensure instance types can be created"""
starting_inst_list = instance_types.get_all_types()
@@ -84,10 +107,11 @@ class InstanceTypeTestCase(test.TestCase):
exception.InvalidInput,
instance_types.create, self.name, 256, 1, "aa", self.flavorid)
- def test_non_existant_inst_type_shouldnt_delete(self):
+ def test_non_existent_inst_type_shouldnt_delete(self):
"""Ensures that instance type creation fails with invalid args"""
self.assertRaises(exception.ApiError,
- instance_types.destroy, "sfsfsdfdfs")
+ instance_types.destroy,
+ self._nonexistent_flavor_name())
def test_repeated_inst_types_should_raise_api_error(self):
"""Ensures that instance duplicates raises ApiError"""
@@ -97,3 +121,43 @@ class InstanceTypeTestCase(test.TestCase):
self.assertRaises(
exception.ApiError,
instance_types.create, new_name, 256, 1, 120, self.flavorid)
+
+ def test_will_not_destroy_with_no_name(self):
+ """Ensure destroy sad path of no name raises error"""
+ self.assertRaises(exception.ApiError,
+ instance_types.destroy,
+ self._nonexistent_flavor_name())
+
+ def test_will_not_purge_without_name(self):
+ """Ensure purge without a name raises error"""
+ self.assertRaises(exception.InvalidInstanceType,
+ instance_types.purge, None)
+
+ def test_will_not_purge_with_wrong_name(self):
+ """Ensure purge without correct name raises error"""
+ self.assertRaises(exception.ApiError,
+ instance_types.purge,
+ self._nonexistent_flavor_name())
+
+ def test_will_not_get_bad_default_instance_type(self):
+ """ensures error raised on bad default instance type"""
+ FLAGS.default_instance_type = self._nonexistent_flavor_name()
+ self.assertRaises(exception.InstanceTypeNotFoundByName,
+ instance_types.get_default_instance_type)
+
+ def test_will_not_get_instance_type_by_name_with_no_name(self):
+ """Ensure get by name returns default flavor with no name"""
+ self.assertEqual(instance_types.get_default_instance_type(),
+ instance_types.get_instance_type_by_name(None))
+
+ def test_will_not_get_instance_type_with_bad_name(self):
+ """Ensure get by name returns default flavor with bad name"""
+ self.assertRaises(exception.InstanceTypeNotFound,
+ instance_types.get_instance_type,
+ self._nonexistent_flavor_name())
+
+ def test_will_not_get_flavor_by_bad_flavor_id(self):
+ """Ensure get by flavor raises error with wrong flavorid"""
+ self.assertRaises(exception.InstanceTypeNotFound,
+ instance_types.get_instance_type_by_name,
+ self._nonexistent_flavor_id())
diff --git a/nova/tests/test_ipv6.py b/nova/tests/test_ipv6.py
index 04c1b5598..e1ba4aafb 100644
--- a/nova/tests/test_ipv6.py
+++ b/nova/tests/test_ipv6.py
@@ -48,7 +48,7 @@ class IPv6RFC2462TestCase(test.TestCase):
def test_to_global_with_bad_prefix(self):
bad_prefix = '82'
self.assertRaises(TypeError, ipv6.to_global,
- bad_prefix,
+ bad_prefix,
'2001:db8::216:3eff:fe33:4455',
'test')
diff --git a/nova/tests/test_network.py b/nova/tests/test_network.py
index 0b8539442..25ff940f0 100644
--- a/nova/tests/test_network.py
+++ b/nova/tests/test_network.py
@@ -371,6 +371,22 @@ class VlanNetworkTestCase(test.TestCase):
self.mox.ReplayAll()
self.network.validate_networks(self.context, requested_networks)
+ def test_cant_associate_associated_floating_ip(self):
+ ctxt = context.RequestContext('testuser', 'testproject',
+ is_admin=False)
+
+ def fake_floating_ip_get_by_address(context, address):
+ return {'address': '10.10.10.10',
+ 'fixed_ip': {'address': '10.0.0.1'}}
+ self.stubs.Set(self.network.db, 'floating_ip_get_by_address',
+ fake_floating_ip_get_by_address)
+
+ self.assertRaises(exception.FloatingIpAlreadyInUse,
+ self.network.associate_floating_ip,
+ ctxt,
+ mox.IgnoreArg(),
+ mox.IgnoreArg())
+
class CommonNetworkTestCase(test.TestCase):
diff --git a/nova/tests/test_nova_manage.py b/nova/tests/test_nova_manage.py
index f5ea68a03..520bfbea1 100644
--- a/nova/tests/test_nova_manage.py
+++ b/nova/tests/test_nova_manage.py
@@ -31,6 +31,7 @@ sys.dont_write_bytecode = False
import mox
import stubout
+import StringIO
from nova import context
from nova import db
from nova import exception
@@ -70,3 +71,156 @@ class FixedIpCommandsTestCase(test.TestCase):
self.assertRaises(SystemExit,
self.commands.unreserve,
'55.55.55.55')
+
+
+class NetworkCommandsTestCase(test.TestCase):
+ def setUp(self):
+ super(NetworkCommandsTestCase, self).setUp()
+ self.stubs = stubout.StubOutForTesting()
+ self.commands = nova_manage.NetworkCommands()
+ self.context = context.get_admin_context()
+ self.net = {'id': 0,
+ 'label': 'fake',
+ 'injected': False,
+ 'cidr': '192.168.0.0/24',
+ 'cidr_v6': 'dead:beef::/64',
+ 'multi_host': False,
+ 'gateway_v6': 'dead:beef::1',
+ 'netmask_v6': '64',
+ 'netmask': '255.255.255.0',
+ 'bridge': 'fa0',
+ 'bridge_interface': 'fake_fa0',
+ 'gateway': '192.168.0.1',
+ 'broadcast': '192.168.0.255',
+ 'dns1': '8.8.8.8',
+ 'dns2': '8.8.4.4',
+ 'vlan': 200,
+ 'vpn_public_address': '10.0.0.2',
+ 'vpn_public_port': '2222',
+ 'vpn_private_address': '192.168.0.2',
+ 'dhcp_start': '192.168.0.3',
+ 'project_id': 'fake_project',
+ 'host': 'fake_host',
+ 'uuid': 'aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa'}
+
+ def fake_network_get_by_cidr(context, cidr):
+ self.assertTrue(context.to_dict()['is_admin'])
+ self.assertEqual(cidr, self.fake_net['cidr'])
+ return db_fakes.FakeModel(self.fake_net)
+
+ def fake_network_update(context, network_id, values):
+ self.assertTrue(context.to_dict()['is_admin'])
+ self.assertEqual(network_id, self.fake_net['id'])
+ self.assertEqual(values, self.fake_update_value)
+ self.fake_network_get_by_cidr = fake_network_get_by_cidr
+ self.fake_network_update = fake_network_update
+
+ def tearDown(self):
+ super(NetworkCommandsTestCase, self).tearDown()
+ self.stubs.UnsetAll()
+
+ def test_create(self):
+
+ def fake_create_networks(obj, context, **kwargs):
+ self.assertTrue(context.to_dict()['is_admin'])
+ self.assertEqual(kwargs['label'], 'Test')
+ self.assertEqual(kwargs['cidr'], '10.2.0.0/24')
+ self.assertEqual(kwargs['multi_host'], False)
+ self.assertEqual(kwargs['num_networks'], 1)
+ self.assertEqual(kwargs['network_size'], 256)
+ self.assertEqual(kwargs['vlan_start'], 200)
+ self.assertEqual(kwargs['vpn_start'], 2000)
+ self.assertEqual(kwargs['cidr_v6'], 'fd00:2::/120')
+ self.assertEqual(kwargs['gateway_v6'], 'fd00:2::22')
+ self.assertEqual(kwargs['bridge'], 'br200')
+ self.assertEqual(kwargs['bridge_interface'], 'eth0')
+ self.assertEqual(kwargs['dns1'], '8.8.8.8')
+ self.assertEqual(kwargs['dns2'], '8.8.4.4')
+ self.flags(network_manager='nova.network.manager.VlanManager')
+ from nova.network import manager as net_manager
+ self.stubs.Set(net_manager.VlanManager, 'create_networks',
+ fake_create_networks)
+ self.commands.create(
+ label='Test',
+ fixed_range_v4='10.2.0.0/24',
+ num_networks=1,
+ network_size=256,
+ multi_host='F',
+ vlan_start=200,
+ vpn_start=2000,
+ fixed_range_v6='fd00:2::/120',
+ gateway_v6='fd00:2::22',
+ bridge='br200',
+ bridge_interface='eth0',
+ dns1='8.8.8.8',
+ dns2='8.8.4.4')
+
+ def test_list(self):
+
+ def fake_network_get_all(context):
+ return [db_fakes.FakeModel(self.net)]
+ self.stubs.Set(db, 'network_get_all', fake_network_get_all)
+ output = StringIO.StringIO()
+ sys.stdout = output
+ self.commands.list()
+ sys.stdout = sys.__stdout__
+ result = output.getvalue()
+ _fmt = "%(id)-5s\t%(cidr)-18s\t%(cidr_v6)-15s\t%(dhcp_start)-15s\t" +\
+ "%(dns1)-15s\t%(dns2)-15s\t%(vlan)-15s\t%(project_id)-15s\t" +\
+ "%(uuid)-15s"
+ head = _fmt % {'id': _('id'),
+ 'cidr': _('IPv4'),
+ 'cidr_v6': _('IPv6'),
+ 'dhcp_start': _('start address'),
+ 'dns1': _('DNS1'),
+ 'dns2': _('DNS2'),
+ 'vlan': _('VlanID'),
+ 'project_id': _('project'),
+ 'uuid': _("uuid")}
+ body = _fmt % {'id': self.net['id'],
+ 'cidr': self.net['cidr'],
+ 'cidr_v6': self.net['cidr_v6'],
+ 'dhcp_start': self.net['dhcp_start'],
+ 'dns1': self.net['dns1'],
+ 'dns2': self.net['dns2'],
+ 'vlan': self.net['vlan'],
+ 'project_id': self.net['project_id'],
+ 'uuid': self.net['uuid']}
+ answer = '%s\n%s\n' % (head, body)
+ self.assertEqual(result, answer)
+
+ def test_delete(self):
+ self.fake_net = self.net
+ self.fake_net['project_id'] = None
+ self.fake_net['host'] = None
+ self.stubs.Set(db, 'network_get_by_cidr',
+ self.fake_network_get_by_cidr)
+
+ def fake_network_delete_safe(context, network_id):
+ self.assertTrue(context.to_dict()['is_admin'])
+ self.assertEqual(network_id, self.fake_net['id'])
+ self.stubs.Set(db, 'network_delete_safe', fake_network_delete_safe)
+ self.commands.delete(fixed_range=self.fake_net['cidr'])
+
+ def _test_modify_base(self, update_value, project, host, dis_project=None,
+ dis_host=None):
+ self.fake_net = self.net
+ self.fake_update_value = update_value
+ self.stubs.Set(db, 'network_get_by_cidr',
+ self.fake_network_get_by_cidr)
+ self.stubs.Set(db, 'network_update', self.fake_network_update)
+ self.commands.modify(self.fake_net['cidr'], project=project, host=host,
+ dis_project=dis_project, dis_host=dis_host)
+
+ def test_modify_associate(self):
+ self._test_modify_base(update_value={'project_id': 'test_project',
+ 'host': 'test_host'},
+ project='test_project', host='test_host')
+
+ def test_modify_unchanged(self):
+ self._test_modify_base(update_value={}, project=None, host=None)
+
+ def test_modify_disassociate(self):
+ self._test_modify_base(update_value={'project_id': None, 'host': None},
+ project=None, host=None, dis_project=True,
+ dis_host=True)
diff --git a/nova/tests/test_rpc.py b/nova/tests/test_rpc.py
index ba9c0a859..6b4454747 100644
--- a/nova/tests/test_rpc.py
+++ b/nova/tests/test_rpc.py
@@ -22,168 +22,16 @@ Unit Tests for remote procedure calls using queue
from nova import context
from nova import log as logging
from nova import rpc
-from nova import test
+from nova.tests import test_rpc_common
LOG = logging.getLogger('nova.tests.rpc')
-class RpcTestCase(test.TestCase):
+class RpcTestCase(test_rpc_common._BaseRpcTestCase):
def setUp(self):
+ self.rpc = rpc
super(RpcTestCase, self).setUp()
- self.conn = rpc.create_connection(True)
- self.receiver = TestReceiver()
- self.consumer = rpc.create_consumer(self.conn,
- 'test',
- self.receiver,
- False)
- self.consumer.attach_to_eventlet()
- self.context = context.get_admin_context()
- def test_call_succeed(self):
- value = 42
- result = rpc.call(self.context, 'test', {"method": "echo",
- "args": {"value": value}})
- self.assertEqual(value, result)
-
- def test_call_succeed_despite_multiple_returns(self):
- value = 42
- result = rpc.call(self.context, 'test', {"method": "echo_three_times",
- "args": {"value": value}})
- self.assertEqual(value + 2, result)
-
- def test_call_succeed_despite_multiple_returns_yield(self):
- value = 42
- result = rpc.call(self.context, 'test',
- {"method": "echo_three_times_yield",
- "args": {"value": value}})
- self.assertEqual(value + 2, result)
-
- def test_multicall_succeed_once(self):
- value = 42
- result = rpc.multicall(self.context,
- 'test',
- {"method": "echo",
- "args": {"value": value}})
- for i, x in enumerate(result):
- if i > 0:
- self.fail('should only receive one response')
- self.assertEqual(value + i, x)
-
- def test_multicall_succeed_three_times(self):
- value = 42
- result = rpc.multicall(self.context,
- 'test',
- {"method": "echo_three_times",
- "args": {"value": value}})
- for i, x in enumerate(result):
- self.assertEqual(value + i, x)
-
- def test_multicall_succeed_three_times_yield(self):
- value = 42
- result = rpc.multicall(self.context,
- 'test',
- {"method": "echo_three_times_yield",
- "args": {"value": value}})
- for i, x in enumerate(result):
- self.assertEqual(value + i, x)
-
- def test_context_passed(self):
- """Makes sure a context is passed through rpc call."""
- value = 42
- result = rpc.call(self.context,
- 'test', {"method": "context",
- "args": {"value": value}})
- self.assertEqual(self.context.to_dict(), result)
-
- def test_call_exception(self):
- """Test that exception gets passed back properly.
-
- rpc.call returns a RemoteError object. The value of the
- exception is converted to a string, so we convert it back
- to an int in the test.
-
- """
- value = 42
- self.assertRaises(rpc.RemoteError,
- rpc.call,
- self.context,
- 'test',
- {"method": "fail",
- "args": {"value": value}})
- try:
- rpc.call(self.context,
- 'test',
- {"method": "fail",
- "args": {"value": value}})
- self.fail("should have thrown rpc.RemoteError")
- except rpc.RemoteError as exc:
- self.assertEqual(int(exc.value), value)
-
- def test_nested_calls(self):
- """Test that we can do an rpc.call inside another call."""
- class Nested(object):
- @staticmethod
- def echo(context, queue, value):
- """Calls echo in the passed queue"""
- LOG.debug(_("Nested received %(queue)s, %(value)s")
- % locals())
- # TODO: so, it will replay the context and use the same REQID?
- # that's bizarre.
- ret = rpc.call(context,
- queue,
- {"method": "echo",
- "args": {"value": value}})
- LOG.debug(_("Nested return %s"), ret)
- return value
-
- nested = Nested()
- conn = rpc.create_connection(True)
- consumer = rpc.create_consumer(conn,
- 'nested',
- nested,
- False)
- consumer.attach_to_eventlet()
- value = 42
- result = rpc.call(self.context,
- 'nested', {"method": "echo",
- "args": {"queue": "test",
- "value": value}})
- self.assertEqual(value, result)
-
-
-class TestReceiver(object):
- """Simple Proxy class so the consumer has methods to call.
-
- Uses static methods because we aren't actually storing any state.
-
- """
-
- @staticmethod
- def echo(context, value):
- """Simply returns whatever value is sent in."""
- LOG.debug(_("Received %s"), value)
- return value
-
- @staticmethod
- def context(context, value):
- """Returns dictionary version of context."""
- LOG.debug(_("Received %s"), context)
- return context.to_dict()
-
- @staticmethod
- def echo_three_times(context, value):
- context.reply(value)
- context.reply(value + 1)
- context.reply(value + 2)
-
- @staticmethod
- def echo_three_times_yield(context, value):
- yield value
- yield value + 1
- yield value + 2
-
- @staticmethod
- def fail(context, value):
- """Raises an exception with the value sent in."""
- raise Exception(value)
+ def tearDown(self):
+ super(RpcTestCase, self).tearDown()
diff --git a/nova/tests/test_rpc_amqp.py b/nova/tests/test_rpc_amqp.py
deleted file mode 100644
index 2215a908b..000000000
--- a/nova/tests/test_rpc_amqp.py
+++ /dev/null
@@ -1,88 +0,0 @@
-# vim: tabstop=4 shiftwidth=4 softtabstop=4
-
-# Copyright (c) 2010 Openstack, LLC.
-# Administrator of the National Aeronautics and Space Administration.
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-"""
-Tests For RPC AMQP.
-"""
-
-from nova import context
-from nova import log as logging
-from nova import rpc
-from nova.rpc import amqp
-from nova import test
-
-
-LOG = logging.getLogger('nova.tests.rpc')
-
-
-class RpcAMQPTestCase(test.TestCase):
- def setUp(self):
- super(RpcAMQPTestCase, self).setUp()
- self.conn = rpc.create_connection(True)
- self.receiver = TestReceiver()
- self.consumer = rpc.create_consumer(self.conn,
- 'test',
- self.receiver,
- False)
- self.consumer.attach_to_eventlet()
- self.context = context.get_admin_context()
-
- def test_connectionpool_single(self):
- """Test that ConnectionPool recycles a single connection."""
- conn1 = amqp.ConnectionPool.get()
- amqp.ConnectionPool.put(conn1)
- conn2 = amqp.ConnectionPool.get()
- amqp.ConnectionPool.put(conn2)
- self.assertEqual(conn1, conn2)
-
-
-class TestReceiver(object):
- """Simple Proxy class so the consumer has methods to call.
-
- Uses static methods because we aren't actually storing any state.
-
- """
-
- @staticmethod
- def echo(context, value):
- """Simply returns whatever value is sent in."""
- LOG.debug(_("Received %s"), value)
- return value
-
- @staticmethod
- def context(context, value):
- """Returns dictionary version of context."""
- LOG.debug(_("Received %s"), context)
- return context.to_dict()
-
- @staticmethod
- def echo_three_times(context, value):
- context.reply(value)
- context.reply(value + 1)
- context.reply(value + 2)
-
- @staticmethod
- def echo_three_times_yield(context, value):
- yield value
- yield value + 1
- yield value + 2
-
- @staticmethod
- def fail(context, value):
- """Raises an exception with the value sent in."""
- raise Exception(value)
diff --git a/nova/tests/test_rpc_carrot.py b/nova/tests/test_rpc_carrot.py
new file mode 100644
index 000000000..57cdebf4f
--- /dev/null
+++ b/nova/tests/test_rpc_carrot.py
@@ -0,0 +1,45 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright 2010 United States Government as represented by the
+# Administrator of the National Aeronautics and Space Administration.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+"""
+Unit Tests for remote procedure calls using carrot
+"""
+
+from nova import context
+from nova import log as logging
+from nova.rpc import impl_carrot
+from nova.tests import test_rpc_common
+
+
+LOG = logging.getLogger('nova.tests.rpc')
+
+
+class RpcCarrotTestCase(test_rpc_common._BaseRpcTestCase):
+ def setUp(self):
+ self.rpc = impl_carrot
+ super(RpcCarrotTestCase, self).setUp()
+
+ def tearDown(self):
+ super(RpcCarrotTestCase, self).tearDown()
+
+ def test_connectionpool_single(self):
+ """Test that ConnectionPool recycles a single connection."""
+ conn1 = self.rpc.ConnectionPool.get()
+ self.rpc.ConnectionPool.put(conn1)
+ conn2 = self.rpc.ConnectionPool.get()
+ self.rpc.ConnectionPool.put(conn2)
+ self.assertEqual(conn1, conn2)
diff --git a/nova/tests/test_rpc_common.py b/nova/tests/test_rpc_common.py
new file mode 100644
index 000000000..4ab4e8a0e
--- /dev/null
+++ b/nova/tests/test_rpc_common.py
@@ -0,0 +1,189 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright 2010 United States Government as represented by the
+# Administrator of the National Aeronautics and Space Administration.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+"""
+Unit Tests for remote procedure calls shared between all implementations
+"""
+
+from nova import context
+from nova import log as logging
+from nova.rpc.common import RemoteError
+from nova import test
+
+
+LOG = logging.getLogger('nova.tests.rpc')
+
+
+class _BaseRpcTestCase(test.TestCase):
+ def setUp(self):
+ super(_BaseRpcTestCase, self).setUp()
+ self.conn = self.rpc.create_connection(True)
+ self.receiver = TestReceiver()
+ self.conn.create_consumer('test', self.receiver, False)
+ self.conn.consume_in_thread()
+ self.context = context.get_admin_context()
+
+ def tearDown(self):
+ self.conn.close()
+ super(_BaseRpcTestCase, self).tearDown()
+
+ def test_call_succeed(self):
+ value = 42
+ result = self.rpc.call(self.context, 'test', {"method": "echo",
+ "args": {"value": value}})
+ self.assertEqual(value, result)
+
+ def test_call_succeed_despite_multiple_returns(self):
+ value = 42
+ result = self.rpc.call(self.context, 'test',
+ {"method": "echo_three_times",
+ "args": {"value": value}})
+ self.assertEqual(value + 2, result)
+
+ def test_call_succeed_despite_multiple_returns_yield(self):
+ value = 42
+ result = self.rpc.call(self.context, 'test',
+ {"method": "echo_three_times_yield",
+ "args": {"value": value}})
+ self.assertEqual(value + 2, result)
+
+ def test_multicall_succeed_once(self):
+ value = 42
+ result = self.rpc.multicall(self.context,
+ 'test',
+ {"method": "echo",
+ "args": {"value": value}})
+ for i, x in enumerate(result):
+ if i > 0:
+ self.fail('should only receive one response')
+ self.assertEqual(value + i, x)
+
+ def test_multicall_succeed_three_times(self):
+ value = 42
+ result = self.rpc.multicall(self.context,
+ 'test',
+ {"method": "echo_three_times",
+ "args": {"value": value}})
+ for i, x in enumerate(result):
+ self.assertEqual(value + i, x)
+
+ def test_multicall_succeed_three_times_yield(self):
+ value = 42
+ result = self.rpc.multicall(self.context,
+ 'test',
+ {"method": "echo_three_times_yield",
+ "args": {"value": value}})
+ for i, x in enumerate(result):
+ self.assertEqual(value + i, x)
+
+ def test_context_passed(self):
+ """Makes sure a context is passed through rpc call."""
+ value = 42
+ result = self.rpc.call(self.context,
+ 'test', {"method": "context",
+ "args": {"value": value}})
+ self.assertEqual(self.context.to_dict(), result)
+
+ def test_call_exception(self):
+ """Test that exception gets passed back properly.
+
+ rpc.call returns a RemoteError object. The value of the
+ exception is converted to a string, so we convert it back
+ to an int in the test.
+
+ """
+ value = 42
+ self.assertRaises(RemoteError,
+ self.rpc.call,
+ self.context,
+ 'test',
+ {"method": "fail",
+ "args": {"value": value}})
+ try:
+ self.rpc.call(self.context,
+ 'test',
+ {"method": "fail",
+ "args": {"value": value}})
+ self.fail("should have thrown RemoteError")
+ except RemoteError as exc:
+ self.assertEqual(int(exc.value), value)
+
+ def test_nested_calls(self):
+ """Test that we can do an rpc.call inside another call."""
+ class Nested(object):
+ @staticmethod
+ def echo(context, queue, value):
+ """Calls echo in the passed queue"""
+ LOG.debug(_("Nested received %(queue)s, %(value)s")
+ % locals())
+ # TODO: so, it will replay the context and use the same REQID?
+ # that's bizarre.
+ ret = self.rpc.call(context,
+ queue,
+ {"method": "echo",
+ "args": {"value": value}})
+ LOG.debug(_("Nested return %s"), ret)
+ return value
+
+ nested = Nested()
+ conn = self.rpc.create_connection(True)
+ conn.create_consumer('nested', nested, False)
+ conn.consume_in_thread()
+ value = 42
+ result = self.rpc.call(self.context,
+ 'nested', {"method": "echo",
+ "args": {"queue": "test",
+ "value": value}})
+ conn.close()
+ self.assertEqual(value, result)
+
+
+class TestReceiver(object):
+ """Simple Proxy class so the consumer has methods to call.
+
+ Uses static methods because we aren't actually storing any state.
+
+ """
+
+ @staticmethod
+ def echo(context, value):
+ """Simply returns whatever value is sent in."""
+ LOG.debug(_("Received %s"), value)
+ return value
+
+ @staticmethod
+ def context(context, value):
+ """Returns dictionary version of context."""
+ LOG.debug(_("Received %s"), context)
+ return context.to_dict()
+
+ @staticmethod
+ def echo_three_times(context, value):
+ context.reply(value)
+ context.reply(value + 1)
+ context.reply(value + 2)
+
+ @staticmethod
+ def echo_three_times_yield(context, value):
+ yield value
+ yield value + 1
+ yield value + 2
+
+ @staticmethod
+ def fail(context, value):
+ """Raises an exception with the value sent in."""
+ raise Exception(value)
diff --git a/nova/tests/test_rpc_kombu.py b/nova/tests/test_rpc_kombu.py
new file mode 100644
index 000000000..101ed14af
--- /dev/null
+++ b/nova/tests/test_rpc_kombu.py
@@ -0,0 +1,110 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright 2010 United States Government as represented by the
+# Administrator of the National Aeronautics and Space Administration.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+"""
+Unit Tests for remote procedure calls using kombu
+"""
+
+from nova import context
+from nova import log as logging
+from nova import test
+from nova.rpc import impl_kombu
+from nova.tests import test_rpc_common
+
+
+LOG = logging.getLogger('nova.tests.rpc')
+
+
+class RpcKombuTestCase(test_rpc_common._BaseRpcTestCase):
+ def setUp(self):
+ self.rpc = impl_kombu
+ super(RpcKombuTestCase, self).setUp()
+
+ def tearDown(self):
+ super(RpcKombuTestCase, self).tearDown()
+
+ def test_reusing_connection(self):
+ """Test that reusing a connection returns same one."""
+ conn_context = self.rpc.create_connection(new=False)
+ conn1 = conn_context.connection
+ conn_context.close()
+ conn_context = self.rpc.create_connection(new=False)
+ conn2 = conn_context.connection
+ conn_context.close()
+ self.assertEqual(conn1, conn2)
+
+ def test_topic_send_receive(self):
+ """Test sending to a topic exchange/queue"""
+
+ conn = self.rpc.create_connection()
+ message = 'topic test message'
+
+ self.received_message = None
+
+ def _callback(message):
+ self.received_message = message
+
+ conn.declare_topic_consumer('a_topic', _callback)
+ conn.topic_send('a_topic', message)
+ conn.consume(limit=1)
+ conn.close()
+
+ self.assertEqual(self.received_message, message)
+
+ def test_direct_send_receive(self):
+ """Test sending to a direct exchange/queue"""
+ conn = self.rpc.create_connection()
+ message = 'direct test message'
+
+ self.received_message = None
+
+ def _callback(message):
+ self.received_message = message
+
+ conn.declare_direct_consumer('a_direct', _callback)
+ conn.direct_send('a_direct', message)
+ conn.consume(limit=1)
+ conn.close()
+
+ self.assertEqual(self.received_message, message)
+
+ @test.skip_test("kombu memory transport seems buggy with fanout queues "
+ "as this test passes when you use rabbit (fake_rabbit=False)")
+ def test_fanout_send_receive(self):
+ """Test sending to a fanout exchange and consuming from 2 queues"""
+
+ conn = self.rpc.create_connection()
+ conn2 = self.rpc.create_connection()
+ message = 'fanout test message'
+
+ self.received_message = None
+
+ def _callback(message):
+ self.received_message = message
+
+ conn.declare_fanout_consumer('a_fanout', _callback)
+ conn2.declare_fanout_consumer('a_fanout', _callback)
+ conn.fanout_send('a_fanout', message)
+
+ conn.consume(limit=1)
+ conn.close()
+ self.assertEqual(self.received_message, message)
+
+ self.received_message = None
+ conn2.consume(limit=1)
+ conn2.close()
+ self.assertEqual(self.received_message, message)
diff --git a/nova/tests/test_test.py b/nova/tests/test_test.py
index 64f11fa45..3482ff6a0 100644
--- a/nova/tests/test_test.py
+++ b/nova/tests/test_test.py
@@ -40,6 +40,5 @@ class IsolationTestCase(test.TestCase):
connection = rpc.create_connection(new=True)
proxy = NeverCalled()
- consumer = rpc.create_consumer(connection, 'compute',
- proxy, fanout=False)
- consumer.attach_to_eventlet()
+ connection.create_consumer('compute', proxy, fanout=False)
+ connection.consume_in_thread()
diff --git a/nova/tests/test_test_utils.py b/nova/tests/test_test_utils.py
new file mode 100644
index 000000000..237339758
--- /dev/null
+++ b/nova/tests/test_test_utils.py
@@ -0,0 +1,41 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+#
+# Copyright 2010 OpenStack LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from nova import db
+from nova import test
+from nova.tests import utils as test_utils
+
+
+class TestUtilsTestCase(test.TestCase):
+ def test_get_test_admin_context(self):
+ """get_test_admin_context's return value behaves like admin context"""
+ ctxt = test_utils.get_test_admin_context()
+
+ # TODO(soren): This should verify the full interface context
+ # objects expose.
+ self.assertTrue(ctxt.is_admin)
+
+ def test_get_test_instance(self):
+ """get_test_instance's return value looks like an instance_ref"""
+ instance_ref = test_utils.get_test_instance()
+ ctxt = test_utils.get_test_admin_context()
+ db.instance_get(ctxt, instance_ref['id'])
+
+ def _test_get_test_network_info(self):
+ """Does the return value match a real network_info structure"""
+ # The challenge here is to define what exactly such a structure
+ # must look like.
+ pass
diff --git a/nova/tests/test_virt_drivers.py b/nova/tests/test_virt_drivers.py
new file mode 100644
index 000000000..480247c91
--- /dev/null
+++ b/nova/tests/test_virt_drivers.py
@@ -0,0 +1,489 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+#
+# Copyright 2010 OpenStack LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import base64
+import netaddr
+import sys
+import traceback
+
+from nova import exception
+from nova import flags
+from nova import image
+from nova import log as logging
+from nova import test
+from nova.tests import utils as test_utils
+
+libvirt = None
+FLAGS = flags.FLAGS
+
+LOG = logging.getLogger('nova.tests.test_virt_drivers')
+
+
+def catch_notimplementederror(f):
+ """Decorator to simplify catching drivers raising NotImplementedError
+
+ If a particular call makes a driver raise NotImplementedError, we
+ log it so that we can extract this information afterwards to
+ automatically generate a hypervisor/feature support matrix."""
+ def wrapped_func(self, *args, **kwargs):
+ try:
+ return f(self, *args, **kwargs)
+ except NotImplementedError:
+ frame = traceback.extract_tb(sys.exc_info()[2])[-1]
+ LOG.error('%(driver)s does not implement %(method)s' % {
+ 'driver': type(self.connection),
+ 'method': frame[2]})
+
+ wrapped_func.__name__ = f.__name__
+ wrapped_func.__doc__ = f.__doc__
+ return wrapped_func
+
+
+class _VirtDriverTestCase(test.TestCase):
+ def setUp(self):
+ super(_VirtDriverTestCase, self).setUp()
+ self.connection = self.driver_module.get_connection('')
+ self.ctxt = test_utils.get_test_admin_context()
+ self.image_service = image.get_default_image_service()
+
+ @catch_notimplementederror
+ def test_init_host(self):
+ self.connection.init_host('myhostname')
+
+ @catch_notimplementederror
+ def test_list_instances(self):
+ self.connection.list_instances()
+
+ @catch_notimplementederror
+ def test_list_instances_detail(self):
+ self.connection.list_instances_detail()
+
+ @catch_notimplementederror
+ def test_spawn(self):
+ instance_ref = test_utils.get_test_instance()
+ network_info = test_utils.get_test_network_info()
+ self.connection.spawn(self.ctxt, instance_ref, network_info)
+
+ domains = self.connection.list_instances()
+ self.assertIn(instance_ref['name'], domains)
+
+ domains_details = self.connection.list_instances_detail()
+ self.assertIn(instance_ref['name'], [i.name for i in domains_details])
+
+ @catch_notimplementederror
+ def test_snapshot_not_running(self):
+ instance_ref = test_utils.get_test_instance()
+ img_ref = self.image_service.create(self.ctxt, {'name': 'snap-1'})
+ self.assertRaises(exception.InstanceNotRunning,
+ self.connection.snapshot,
+ self.ctxt, instance_ref, img_ref['id'])
+
+ @catch_notimplementederror
+ def test_snapshot_running(self):
+ instance_ref = test_utils.get_test_instance()
+ network_info = test_utils.get_test_network_info()
+ img_ref = self.image_service.create(self.ctxt, {'name': 'snap-1'})
+ self.connection.spawn(self.ctxt, instance_ref, network_info)
+ self.connection.snapshot(self.ctxt, instance_ref, img_ref['id'])
+
+ @catch_notimplementederror
+ def test_reboot(self):
+ instance_ref = test_utils.get_test_instance()
+ network_info = test_utils.get_test_network_info()
+ self.connection.spawn(self.ctxt, instance_ref, network_info)
+ self.connection.reboot(instance_ref, network_info)
+
+ @catch_notimplementederror
+ def test_get_host_ip_addr(self):
+ host_ip = self.connection.get_host_ip_addr()
+
+ # Will raise an exception if it's not a valid IP at all
+ ip = netaddr.IPAddress(host_ip)
+
+ # For now, assume IPv4.
+ self.assertEquals(ip.version, 4)
+
+ @catch_notimplementederror
+ def test_resize_running(self):
+ instance_ref = test_utils.get_test_instance()
+ network_info = test_utils.get_test_network_info()
+ self.connection.spawn(self.ctxt, instance_ref, network_info)
+ self.connection.resize(instance_ref, 7)
+
+ @catch_notimplementederror
+ def test_set_admin_password(self):
+ instance_ref = test_utils.get_test_instance()
+ network_info = test_utils.get_test_network_info()
+ self.connection.spawn(self.ctxt, instance_ref, network_info)
+ self.connection.set_admin_password(instance_ref, 'p4ssw0rd')
+
+ @catch_notimplementederror
+ def test_inject_file(self):
+ instance_ref = test_utils.get_test_instance()
+ network_info = test_utils.get_test_network_info()
+ self.connection.spawn(self.ctxt, instance_ref, network_info)
+ self.connection.inject_file(instance_ref,
+ base64.b64encode('/testfile'),
+ base64.b64encode('testcontents'))
+
+ @catch_notimplementederror
+ def test_agent_update(self):
+ instance_ref = test_utils.get_test_instance()
+ network_info = test_utils.get_test_network_info()
+ self.connection.spawn(self.ctxt, instance_ref, network_info)
+ self.connection.agent_update(instance_ref, 'http://www.openstack.org/',
+ 'd41d8cd98f00b204e9800998ecf8427e')
+
+ @catch_notimplementederror
+ def test_rescue(self):
+ instance_ref = test_utils.get_test_instance()
+ network_info = test_utils.get_test_network_info()
+ self.connection.spawn(self.ctxt, instance_ref, network_info)
+ self.connection.rescue(self.ctxt, instance_ref,
+ lambda x: None, network_info)
+
+ @catch_notimplementederror
+ def test_unrescue_unrescued_instance(self):
+ instance_ref = test_utils.get_test_instance()
+ network_info = test_utils.get_test_network_info()
+ self.connection.spawn(self.ctxt, instance_ref, network_info)
+ self.connection.unrescue(instance_ref, lambda x: None, network_info)
+
+ @catch_notimplementederror
+ def test_unrescue_rescued_instance(self):
+ instance_ref = test_utils.get_test_instance()
+ network_info = test_utils.get_test_network_info()
+ self.connection.spawn(self.ctxt, instance_ref, network_info)
+ self.connection.rescue(self.ctxt, instance_ref,
+ lambda x: None, network_info)
+ self.connection.unrescue(instance_ref, lambda x: None, network_info)
+
+ @catch_notimplementederror
+ def test_poll_rescued_instances(self):
+ self.connection.poll_rescued_instances(10)
+
+ @catch_notimplementederror
+ def test_migrate_disk_and_power_off(self):
+ instance_ref = test_utils.get_test_instance()
+ network_info = test_utils.get_test_network_info()
+ self.connection.spawn(self.ctxt, instance_ref, network_info)
+ self.connection.migrate_disk_and_power_off(instance_ref, 'dest_host')
+
+ @catch_notimplementederror
+ def test_pause(self):
+ instance_ref = test_utils.get_test_instance()
+ network_info = test_utils.get_test_network_info()
+ self.connection.spawn(self.ctxt, instance_ref, network_info)
+ self.connection.pause(instance_ref, None)
+
+ @catch_notimplementederror
+ def test_unpause_unpaused_instance(self):
+ instance_ref = test_utils.get_test_instance()
+ network_info = test_utils.get_test_network_info()
+ self.connection.spawn(self.ctxt, instance_ref, network_info)
+ self.connection.unpause(instance_ref, None)
+
+ @catch_notimplementederror
+ def test_unpause_paused_instance(self):
+ instance_ref = test_utils.get_test_instance()
+ network_info = test_utils.get_test_network_info()
+ self.connection.spawn(self.ctxt, instance_ref, network_info)
+ self.connection.pause(instance_ref, None)
+ self.connection.unpause(instance_ref, None)
+
+ @catch_notimplementederror
+ def test_suspend(self):
+ instance_ref = test_utils.get_test_instance()
+ network_info = test_utils.get_test_network_info()
+ self.connection.spawn(self.ctxt, instance_ref, network_info)
+ self.connection.suspend(instance_ref, None)
+
+ @catch_notimplementederror
+ def test_resume_unsuspended_instance(self):
+ instance_ref = test_utils.get_test_instance()
+ network_info = test_utils.get_test_network_info()
+ self.connection.spawn(self.ctxt, instance_ref, network_info)
+ self.connection.resume(instance_ref, None)
+
+ @catch_notimplementederror
+ def test_resume_suspended_instance(self):
+ instance_ref = test_utils.get_test_instance()
+ network_info = test_utils.get_test_network_info()
+ self.connection.spawn(self.ctxt, instance_ref, network_info)
+ self.connection.suspend(instance_ref, None)
+ self.connection.resume(instance_ref, None)
+
+ @catch_notimplementederror
+ def test_destroy_instance_nonexistant(self):
+ fake_instance = {'id': 42, 'name': 'I just made this up!'}
+ network_info = test_utils.get_test_network_info()
+ self.connection.destroy(fake_instance, network_info)
+
+ @catch_notimplementederror
+ def test_destroy_instance(self):
+ instance_ref = test_utils.get_test_instance()
+ network_info = test_utils.get_test_network_info()
+ self.connection.spawn(self.ctxt, instance_ref, network_info)
+ self.assertIn(instance_ref['name'],
+ self.connection.list_instances())
+ self.connection.destroy(instance_ref, network_info)
+ self.assertNotIn(instance_ref['name'],
+ self.connection.list_instances())
+
+ @catch_notimplementederror
+ def test_attach_detach_volume(self):
+ network_info = test_utils.get_test_network_info()
+ instance_ref = test_utils.get_test_instance()
+ self.connection.spawn(self.ctxt, instance_ref, network_info)
+ self.connection.attach_volume(instance_ref['name'],
+ '/dev/null', '/mnt/nova/something')
+ self.connection.detach_volume(instance_ref['name'],
+ '/mnt/nova/something')
+
+ @catch_notimplementederror
+ def test_get_info(self):
+ network_info = test_utils.get_test_network_info()
+ instance_ref = test_utils.get_test_instance()
+ self.connection.spawn(self.ctxt, instance_ref, network_info)
+ info = self.connection.get_info(instance_ref['name'])
+ self.assertIn('state', info)
+ self.assertIn('max_mem', info)
+ self.assertIn('mem', info)
+ self.assertIn('num_cpu', info)
+ self.assertIn('cpu_time', info)
+
+ @catch_notimplementederror
+ def test_get_info_for_unknown_instance(self):
+ self.assertRaises(exception.NotFound,
+ self.connection.get_info, 'I just made this name up')
+
+ @catch_notimplementederror
+ def test_get_diagnostics(self):
+ network_info = test_utils.get_test_network_info()
+ instance_ref = test_utils.get_test_instance()
+ self.connection.spawn(self.ctxt, instance_ref, network_info)
+ self.connection.get_diagnostics(instance_ref['name'])
+
+ @catch_notimplementederror
+ def test_list_disks(self):
+ network_info = test_utils.get_test_network_info()
+ instance_ref = test_utils.get_test_instance()
+ self.connection.spawn(self.ctxt, instance_ref, network_info)
+ self.connection.list_disks(instance_ref['name'])
+
+ @catch_notimplementederror
+ def test_list_interfaces(self):
+ network_info = test_utils.get_test_network_info()
+ instance_ref = test_utils.get_test_instance()
+ self.connection.spawn(self.ctxt, instance_ref, network_info)
+ self.connection.list_interfaces(instance_ref['name'])
+
+ @catch_notimplementederror
+ def test_block_stats(self):
+ network_info = test_utils.get_test_network_info()
+ instance_ref = test_utils.get_test_instance()
+ self.connection.spawn(self.ctxt, instance_ref, network_info)
+ stats = self.connection.block_stats(instance_ref['name'], 'someid')
+ self.assertEquals(len(stats), 5)
+
+ @catch_notimplementederror
+ def test_interface_stats(self):
+ network_info = test_utils.get_test_network_info()
+ instance_ref = test_utils.get_test_instance()
+ self.connection.spawn(self.ctxt, instance_ref, network_info)
+ stats = self.connection.interface_stats(instance_ref['name'], 'someid')
+ self.assertEquals(len(stats), 8)
+
+ @catch_notimplementederror
+ def test_get_console_output(self):
+ network_info = test_utils.get_test_network_info()
+ instance_ref = test_utils.get_test_instance()
+ self.connection.spawn(self.ctxt, instance_ref, network_info)
+ console_output = self.connection.get_console_output(instance_ref)
+ self.assertTrue(isinstance(console_output, basestring))
+
+ @catch_notimplementederror
+ def test_get_ajax_console(self):
+ network_info = test_utils.get_test_network_info()
+ instance_ref = test_utils.get_test_instance()
+ self.connection.spawn(self.ctxt, instance_ref, network_info)
+ ajax_console = self.connection.get_ajax_console(instance_ref)
+ self.assertIn('token', ajax_console)
+ self.assertIn('host', ajax_console)
+ self.assertIn('port', ajax_console)
+
+ @catch_notimplementederror
+ def test_get_vnc_console(self):
+ network_info = test_utils.get_test_network_info()
+ instance_ref = test_utils.get_test_instance()
+ self.connection.spawn(self.ctxt, instance_ref, network_info)
+ vnc_console = self.connection.get_vnc_console(instance_ref)
+ self.assertIn('token', vnc_console)
+ self.assertIn('host', vnc_console)
+ self.assertIn('port', vnc_console)
+
+ @catch_notimplementederror
+ def test_get_console_pool_info(self):
+ network_info = test_utils.get_test_network_info()
+ instance_ref = test_utils.get_test_instance()
+ self.connection.spawn(self.ctxt, instance_ref, network_info)
+ console_pool = self.connection.get_console_pool_info(instance_ref)
+ self.assertIn('address', console_pool)
+ self.assertIn('username', console_pool)
+ self.assertIn('password', console_pool)
+
+ @catch_notimplementederror
+ def test_refresh_security_group_rules(self):
+ network_info = test_utils.get_test_network_info()
+ instance_ref = test_utils.get_test_instance()
+ # FIXME: Create security group and add the instance to it
+ self.connection.spawn(self.ctxt, instance_ref, network_info)
+ self.connection.refresh_security_group_rules(1)
+
+ @catch_notimplementederror
+ def test_refresh_security_group_members(self):
+ network_info = test_utils.get_test_network_info()
+ instance_ref = test_utils.get_test_instance()
+ # FIXME: Create security group and add the instance to it
+ self.connection.spawn(self.ctxt, instance_ref, network_info)
+ self.connection.refresh_security_group_members(1)
+
+ @catch_notimplementederror
+ def test_refresh_provider_fw_rules(self):
+ network_info = test_utils.get_test_network_info()
+ instance_ref = test_utils.get_test_instance()
+ self.connection.spawn(self.ctxt, instance_ref, network_info)
+ self.connection.refresh_provider_fw_rules()
+
+ @catch_notimplementederror
+ def test_update_available_resource(self):
+ self.compute = self.start_service('compute', host='dummy')
+ self.connection.update_available_resource(self.ctxt, 'dummy')
+
+ @catch_notimplementederror
+ def test_compare_cpu(self):
+ cpu_info = '''{ "topology": {
+ "sockets": 1,
+ "cores": 2,
+ "threads": 1 },
+ "features": [
+ "xtpr",
+ "tm2",
+ "est",
+ "vmx",
+ "ds_cpl",
+ "monitor",
+ "pbe",
+ "tm",
+ "ht",
+ "ss",
+ "acpi",
+ "ds",
+ "vme"],
+ "arch": "x86_64",
+ "model": "Penryn",
+ "vendor": "Intel" }'''
+
+ self.connection.compare_cpu(cpu_info)
+
+ @catch_notimplementederror
+ def test_ensure_filtering_for_instance(self):
+ instance_ref = test_utils.get_test_instance()
+ network_info = test_utils.get_test_network_info()
+ self.connection.ensure_filtering_rules_for_instance(instance_ref,
+ network_info)
+
+ @catch_notimplementederror
+ def test_unfilter_instance(self):
+ instance_ref = test_utils.get_test_instance()
+ network_info = test_utils.get_test_network_info()
+ self.connection.unfilter_instance(instance_ref, network_info)
+
+ @catch_notimplementederror
+ def test_live_migration(self):
+ network_info = test_utils.get_test_network_info()
+ instance_ref = test_utils.get_test_instance()
+ self.connection.spawn(self.ctxt, instance_ref, network_info)
+ self.connection.live_migration(self.ctxt, instance_ref, 'otherhost',
+ None, None)
+
+ @catch_notimplementederror
+ def _check_host_status_fields(self, host_status):
+ self.assertIn('host_name-description', host_status)
+ self.assertIn('host_hostname', host_status)
+ self.assertIn('host_memory_total', host_status)
+ self.assertIn('host_memory_overhead', host_status)
+ self.assertIn('host_memory_free', host_status)
+ self.assertIn('host_memory_free_computed', host_status)
+ self.assertIn('host_other_config', host_status)
+ self.assertIn('host_ip_address', host_status)
+ self.assertIn('host_cpu_info', host_status)
+ self.assertIn('disk_available', host_status)
+ self.assertIn('disk_total', host_status)
+ self.assertIn('disk_used', host_status)
+ self.assertIn('host_uuid', host_status)
+ self.assertIn('host_name_label', host_status)
+
+ @catch_notimplementederror
+ def test_update_host_status(self):
+ host_status = self.connection.update_host_status()
+ self._check_host_status_fields(host_status)
+
+ @catch_notimplementederror
+ def test_get_host_stats(self):
+ host_status = self.connection.get_host_stats()
+ self._check_host_status_fields(host_status)
+
+ @catch_notimplementederror
+ def test_set_host_enabled(self):
+ self.connection.set_host_enabled('a useless argument?', True)
+
+ @catch_notimplementederror
+ def test_host_power_action_reboot(self):
+ self.connection.host_power_action('a useless argument?', 'reboot')
+
+ @catch_notimplementederror
+ def test_host_power_action_shutdown(self):
+ self.connection.host_power_action('a useless argument?', 'shutdown')
+
+ @catch_notimplementederror
+ def test_host_power_action_startup(self):
+ self.connection.host_power_action('a useless argument?', 'startup')
+
+
+class AbstractDriverTestCase(_VirtDriverTestCase):
+ def setUp(self):
+ import nova.virt.driver
+
+ self.driver_module = nova.virt.driver
+
+ def get_driver_connection(_):
+ return nova.virt.driver.ComputeDriver()
+
+ self.driver_module.get_connection = get_driver_connection
+ super(AbstractDriverTestCase, self).setUp()
+
+
+class FakeConnectionTestCase(_VirtDriverTestCase):
+ def setUp(self):
+ import nova.virt.fake
+ self.driver_module = nova.virt.fake
+ super(FakeConnectionTestCase, self).setUp()
+
+# Before long, we'll add the real hypervisor drivers here as well
+# with whatever instrumentation they need to work independently of
+# their hypervisor. This way, we can verify that they all act the
+# same.
diff --git a/nova/tests/test_volume_types.py b/nova/tests/test_volume_types.py
new file mode 100644
index 000000000..1e190805c
--- /dev/null
+++ b/nova/tests/test_volume_types.py
@@ -0,0 +1,207 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright (c) 2011 Zadara Storage Inc.
+# Copyright (c) 2011 OpenStack LLC.
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+"""
+Unit Tests for volume types code
+"""
+import time
+
+from nova import context
+from nova import db
+from nova import exception
+from nova import flags
+from nova import log as logging
+from nova import test
+from nova import utils
+from nova.volume import volume_types
+from nova.db.sqlalchemy.session import get_session
+from nova.db.sqlalchemy import models
+
+FLAGS = flags.FLAGS
+LOG = logging.getLogger('nova.tests.test_volume_types')
+
+
+class VolumeTypeTestCase(test.TestCase):
+ """Test cases for volume type code"""
+ def setUp(self):
+ super(VolumeTypeTestCase, self).setUp()
+
+ self.ctxt = context.get_admin_context()
+ self.vol_type1_name = str(int(time.time()))
+ self.vol_type1_specs = dict(
+ type="physical drive",
+ drive_type="SAS",
+ size="300",
+ rpm="7200",
+ visible="True")
+ self.vol_type1 = dict(name=self.vol_type1_name,
+ extra_specs=self.vol_type1_specs)
+
+ def test_volume_type_create_then_destroy(self):
+ """Ensure volume types can be created and deleted"""
+ prev_all_vtypes = volume_types.get_all_types(self.ctxt)
+
+ volume_types.create(self.ctxt,
+ self.vol_type1_name,
+ self.vol_type1_specs)
+ new = volume_types.get_volume_type_by_name(self.ctxt,
+ self.vol_type1_name)
+
+ LOG.info(_("Given data: %s"), self.vol_type1_specs)
+ LOG.info(_("Result data: %s"), new)
+
+ for k, v in self.vol_type1_specs.iteritems():
+ self.assertEqual(v, new['extra_specs'][k],
+ 'one of fields doesnt match')
+
+ new_all_vtypes = volume_types.get_all_types(self.ctxt)
+ self.assertEqual(len(prev_all_vtypes) + 1,
+ len(new_all_vtypes),
+ 'drive type was not created')
+
+ volume_types.destroy(self.ctxt, self.vol_type1_name)
+ new_all_vtypes = volume_types.get_all_types(self.ctxt)
+ self.assertEqual(prev_all_vtypes,
+ new_all_vtypes,
+ 'drive type was not deleted')
+
+ def test_volume_type_create_then_purge(self):
+ """Ensure volume types can be created and deleted"""
+ prev_all_vtypes = volume_types.get_all_types(self.ctxt, inactive=1)
+
+ volume_types.create(self.ctxt,
+ self.vol_type1_name,
+ self.vol_type1_specs)
+ new = volume_types.get_volume_type_by_name(self.ctxt,
+ self.vol_type1_name)
+
+ for k, v in self.vol_type1_specs.iteritems():
+ self.assertEqual(v, new['extra_specs'][k],
+ 'one of fields doesnt match')
+
+ new_all_vtypes = volume_types.get_all_types(self.ctxt, inactive=1)
+ self.assertEqual(len(prev_all_vtypes) + 1,
+ len(new_all_vtypes),
+ 'drive type was not created')
+
+ volume_types.destroy(self.ctxt, self.vol_type1_name)
+ new_all_vtypes2 = volume_types.get_all_types(self.ctxt, inactive=1)
+ self.assertEqual(len(new_all_vtypes),
+ len(new_all_vtypes2),
+ 'drive type was incorrectly deleted')
+
+ volume_types.purge(self.ctxt, self.vol_type1_name)
+ new_all_vtypes2 = volume_types.get_all_types(self.ctxt, inactive=1)
+ self.assertEqual(len(new_all_vtypes) - 1,
+ len(new_all_vtypes2),
+ 'drive type was not purged')
+
+ def test_get_all_volume_types(self):
+ """Ensures that all volume types can be retrieved"""
+ session = get_session()
+ total_volume_types = session.query(models.VolumeTypes).\
+ count()
+ vol_types = volume_types.get_all_types(self.ctxt)
+ self.assertEqual(total_volume_types, len(vol_types))
+
+ def test_non_existant_inst_type_shouldnt_delete(self):
+ """Ensures that volume type creation fails with invalid args"""
+ self.assertRaises(exception.ApiError,
+ volume_types.destroy, self.ctxt, "sfsfsdfdfs")
+
+ def test_repeated_vol_types_should_raise_api_error(self):
+ """Ensures that volume duplicates raises ApiError"""
+ new_name = self.vol_type1_name + "dup"
+ volume_types.create(self.ctxt, new_name)
+ volume_types.destroy(self.ctxt, new_name)
+ self.assertRaises(
+ exception.ApiError,
+ volume_types.create, self.ctxt, new_name)
+
+ def test_invalid_volume_types_params(self):
+ """Ensures that volume type creation fails with invalid args"""
+ self.assertRaises(exception.InvalidVolumeType,
+ volume_types.destroy, self.ctxt, None)
+ self.assertRaises(exception.InvalidVolumeType,
+ volume_types.purge, self.ctxt, None)
+ self.assertRaises(exception.InvalidVolumeType,
+ volume_types.get_volume_type, self.ctxt, None)
+ self.assertRaises(exception.InvalidVolumeType,
+ volume_types.get_volume_type_by_name,
+ self.ctxt, None)
+
+ def test_volume_type_get_by_id_and_name(self):
+ """Ensure volume types get returns same entry"""
+ volume_types.create(self.ctxt,
+ self.vol_type1_name,
+ self.vol_type1_specs)
+ new = volume_types.get_volume_type_by_name(self.ctxt,
+ self.vol_type1_name)
+
+ new2 = volume_types.get_volume_type(self.ctxt, new['id'])
+ self.assertEqual(new, new2)
+
+ def test_volume_type_search_by_extra_spec(self):
+ """Ensure volume types get by extra spec returns correct type"""
+ volume_types.create(self.ctxt, "type1", {"key1": "val1",
+ "key2": "val2"})
+ volume_types.create(self.ctxt, "type2", {"key2": "val2",
+ "key3": "val3"})
+ volume_types.create(self.ctxt, "type3", {"key3": "another_value",
+ "key4": "val4"})
+
+ vol_types = volume_types.get_all_types(self.ctxt,
+ search_opts={'extra_specs': {"key1": "val1"}})
+ LOG.info("vol_types: %s" % vol_types)
+ self.assertEqual(len(vol_types), 1)
+ self.assertTrue("type1" in vol_types.keys())
+ self.assertEqual(vol_types['type1']['extra_specs'],
+ {"key1": "val1", "key2": "val2"})
+
+ vol_types = volume_types.get_all_types(self.ctxt,
+ search_opts={'extra_specs': {"key2": "val2"}})
+ LOG.info("vol_types: %s" % vol_types)
+ self.assertEqual(len(vol_types), 2)
+ self.assertTrue("type1" in vol_types.keys())
+ self.assertTrue("type2" in vol_types.keys())
+
+ vol_types = volume_types.get_all_types(self.ctxt,
+ search_opts={'extra_specs': {"key3": "val3"}})
+ LOG.info("vol_types: %s" % vol_types)
+ self.assertEqual(len(vol_types), 1)
+ self.assertTrue("type2" in vol_types.keys())
+
+ def test_volume_type_search_by_extra_spec_multiple(self):
+ """Ensure volume types get by extra spec returns correct type"""
+ volume_types.create(self.ctxt, "type1", {"key1": "val1",
+ "key2": "val2",
+ "key3": "val3"})
+ volume_types.create(self.ctxt, "type2", {"key2": "val2",
+ "key3": "val3"})
+ volume_types.create(self.ctxt, "type3", {"key1": "val1",
+ "key3": "val3",
+ "key4": "val4"})
+
+ vol_types = volume_types.get_all_types(self.ctxt,
+ search_opts={'extra_specs': {"key1": "val1",
+ "key3": "val3"}})
+ LOG.info("vol_types: %s" % vol_types)
+ self.assertEqual(len(vol_types), 2)
+ self.assertTrue("type1" in vol_types.keys())
+ self.assertTrue("type3" in vol_types.keys())
+ self.assertEqual(vol_types['type1']['extra_specs'],
+ {"key1": "val1", "key2": "val2", "key3": "val3"})
+ self.assertEqual(vol_types['type3']['extra_specs'],
+ {"key1": "val1", "key3": "val3", "key4": "val4"})
diff --git a/nova/tests/test_volume_types_extra_specs.py b/nova/tests/test_volume_types_extra_specs.py
new file mode 100644
index 000000000..017b187a1
--- /dev/null
+++ b/nova/tests/test_volume_types_extra_specs.py
@@ -0,0 +1,132 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright (c) 2011 Zadara Storage Inc.
+# Copyright (c) 2011 OpenStack LLC.
+# Copyright 2011 University of Southern California
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+"""
+Unit Tests for volume types extra specs code
+"""
+
+from nova import context
+from nova import db
+from nova import test
+from nova.db.sqlalchemy.session import get_session
+from nova.db.sqlalchemy import models
+
+
+class VolumeTypeExtraSpecsTestCase(test.TestCase):
+
+ def setUp(self):
+ super(VolumeTypeExtraSpecsTestCase, self).setUp()
+ self.context = context.get_admin_context()
+ self.vol_type1 = dict(name="TEST: Regular volume test")
+ self.vol_type1_specs = dict(vol_extra1="value1",
+ vol_extra2="value2",
+ vol_extra3=3)
+ self.vol_type1['extra_specs'] = self.vol_type1_specs
+ ref = db.api.volume_type_create(self.context, self.vol_type1)
+ self.volume_type1_id = ref.id
+ for k, v in self.vol_type1_specs.iteritems():
+ self.vol_type1_specs[k] = str(v)
+
+ self.vol_type2_noextra = dict(name="TEST: Volume type without extra")
+ ref = db.api.volume_type_create(self.context, self.vol_type2_noextra)
+ self.vol_type2_id = ref.id
+
+ def tearDown(self):
+ # Remove the instance type from the database
+ db.api.volume_type_purge(context.get_admin_context(),
+ self.vol_type1['name'])
+ db.api.volume_type_purge(context.get_admin_context(),
+ self.vol_type2_noextra['name'])
+ super(VolumeTypeExtraSpecsTestCase, self).tearDown()
+
+ def test_volume_type_specs_get(self):
+ expected_specs = self.vol_type1_specs.copy()
+ actual_specs = db.api.volume_type_extra_specs_get(
+ context.get_admin_context(),
+ self.volume_type1_id)
+ self.assertEquals(expected_specs, actual_specs)
+
+ def test_volume_type_extra_specs_delete(self):
+ expected_specs = self.vol_type1_specs.copy()
+ del expected_specs['vol_extra2']
+ db.api.volume_type_extra_specs_delete(context.get_admin_context(),
+ self.volume_type1_id,
+ 'vol_extra2')
+ actual_specs = db.api.volume_type_extra_specs_get(
+ context.get_admin_context(),
+ self.volume_type1_id)
+ self.assertEquals(expected_specs, actual_specs)
+
+ def test_volume_type_extra_specs_update(self):
+ expected_specs = self.vol_type1_specs.copy()
+ expected_specs['vol_extra3'] = "4"
+ db.api.volume_type_extra_specs_update_or_create(
+ context.get_admin_context(),
+ self.volume_type1_id,
+ dict(vol_extra3=4))
+ actual_specs = db.api.volume_type_extra_specs_get(
+ context.get_admin_context(),
+ self.volume_type1_id)
+ self.assertEquals(expected_specs, actual_specs)
+
+ def test_volume_type_extra_specs_create(self):
+ expected_specs = self.vol_type1_specs.copy()
+ expected_specs['vol_extra4'] = 'value4'
+ expected_specs['vol_extra5'] = 'value5'
+ db.api.volume_type_extra_specs_update_or_create(
+ context.get_admin_context(),
+ self.volume_type1_id,
+ dict(vol_extra4="value4",
+ vol_extra5="value5"))
+ actual_specs = db.api.volume_type_extra_specs_get(
+ context.get_admin_context(),
+ self.volume_type1_id)
+ self.assertEquals(expected_specs, actual_specs)
+
+ def test_volume_type_get_with_extra_specs(self):
+ volume_type = db.api.volume_type_get(
+ context.get_admin_context(),
+ self.volume_type1_id)
+ self.assertEquals(volume_type['extra_specs'],
+ self.vol_type1_specs)
+
+ volume_type = db.api.volume_type_get(
+ context.get_admin_context(),
+ self.vol_type2_id)
+ self.assertEquals(volume_type['extra_specs'], {})
+
+ def test_volume_type_get_by_name_with_extra_specs(self):
+ volume_type = db.api.volume_type_get_by_name(
+ context.get_admin_context(),
+ self.vol_type1['name'])
+ self.assertEquals(volume_type['extra_specs'],
+ self.vol_type1_specs)
+
+ volume_type = db.api.volume_type_get_by_name(
+ context.get_admin_context(),
+ self.vol_type2_noextra['name'])
+ self.assertEquals(volume_type['extra_specs'], {})
+
+ def test_volume_type_get_all(self):
+ expected_specs = self.vol_type1_specs.copy()
+
+ types = db.api.volume_type_get_all(context.get_admin_context())
+
+ self.assertEquals(
+ types[self.vol_type1['name']]['extra_specs'], expected_specs)
+
+ self.assertEquals(
+ types[self.vol_type2_noextra['name']]['extra_specs'], {})
diff --git a/nova/tests/test_vsa.py b/nova/tests/test_vsa.py
new file mode 100644
index 000000000..3d2d2de13
--- /dev/null
+++ b/nova/tests/test_vsa.py
@@ -0,0 +1,182 @@
+# Copyright 2011 OpenStack LLC.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import base64
+import stubout
+
+from xml.etree import ElementTree
+from xml.etree.ElementTree import Element, SubElement
+
+from nova import context
+from nova import db
+from nova import exception
+from nova import flags
+from nova import log as logging
+from nova import test
+from nova import vsa
+from nova import volume
+from nova.volume import volume_types
+from nova.vsa import utils as vsa_utils
+
+import nova.image.fake
+
+FLAGS = flags.FLAGS
+LOG = logging.getLogger('nova.tests.vsa')
+
+
+class VsaTestCase(test.TestCase):
+
+ def setUp(self):
+ super(VsaTestCase, self).setUp()
+ self.stubs = stubout.StubOutForTesting()
+ self.vsa_api = vsa.API()
+ self.volume_api = volume.API()
+
+ FLAGS.quota_volumes = 100
+ FLAGS.quota_gigabytes = 10000
+
+ self.context = context.get_admin_context()
+
+ volume_types.create(self.context,
+ 'SATA_500_7200',
+ extra_specs={'type': 'vsa_drive',
+ 'drive_name': 'SATA_500_7200',
+ 'drive_type': 'SATA',
+ 'drive_size': '500',
+ 'drive_rpm': '7200'})
+
+ def fake_show_by_name(meh, context, name):
+ if name == 'wrong_image_name':
+ LOG.debug(_("Test: Emulate wrong VSA name. Raise"))
+ raise exception.ImageNotFound
+ return {'id': 1, 'properties': {'kernel_id': 1, 'ramdisk_id': 1}}
+
+ self.stubs.Set(nova.image.fake._FakeImageService,
+ 'show_by_name',
+ fake_show_by_name)
+
+ def tearDown(self):
+ self.stubs.UnsetAll()
+ super(VsaTestCase, self).tearDown()
+
+ def test_vsa_create_delete_defaults(self):
+ param = {'display_name': 'VSA name test'}
+ vsa_ref = self.vsa_api.create(self.context, **param)
+ self.assertEqual(vsa_ref['display_name'], param['display_name'])
+ self.vsa_api.delete(self.context, vsa_ref['id'])
+
+ def test_vsa_create_delete_check_in_db(self):
+ vsa_list1 = self.vsa_api.get_all(self.context)
+ vsa_ref = self.vsa_api.create(self.context)
+ vsa_list2 = self.vsa_api.get_all(self.context)
+ self.assertEqual(len(vsa_list2), len(vsa_list1) + 1)
+
+ self.vsa_api.delete(self.context, vsa_ref['id'])
+ vsa_list3 = self.vsa_api.get_all(self.context)
+ self.assertEqual(len(vsa_list3), len(vsa_list2) - 1)
+
+ def test_vsa_create_delete_high_vc_count(self):
+ param = {'vc_count': FLAGS.max_vcs_in_vsa + 1}
+ vsa_ref = self.vsa_api.create(self.context, **param)
+ self.assertEqual(vsa_ref['vc_count'], FLAGS.max_vcs_in_vsa)
+ self.vsa_api.delete(self.context, vsa_ref['id'])
+
+ def test_vsa_create_wrong_image_name(self):
+ param = {'image_name': 'wrong_image_name'}
+ self.assertRaises(exception.ApiError,
+ self.vsa_api.create, self.context, **param)
+
+ def test_vsa_create_db_error(self):
+
+ def fake_vsa_create(context, options):
+ LOG.debug(_("Test: Emulate DB error. Raise"))
+ raise exception.Error
+
+ self.stubs.Set(nova.db.api, 'vsa_create', fake_vsa_create)
+ self.assertRaises(exception.ApiError,
+ self.vsa_api.create, self.context)
+
+ def test_vsa_create_wrong_storage_params(self):
+ vsa_list1 = self.vsa_api.get_all(self.context)
+ param = {'storage': [{'stub': 1}]}
+ self.assertRaises(exception.ApiError,
+ self.vsa_api.create, self.context, **param)
+ vsa_list2 = self.vsa_api.get_all(self.context)
+ self.assertEqual(len(vsa_list2), len(vsa_list1))
+
+ param = {'storage': [{'drive_name': 'wrong name'}]}
+ self.assertRaises(exception.ApiError,
+ self.vsa_api.create, self.context, **param)
+
+ def test_vsa_create_with_storage(self, multi_vol_creation=True):
+ """Test creation of VSA with BE storage"""
+
+ FLAGS.vsa_multi_vol_creation = multi_vol_creation
+
+ param = {'storage': [{'drive_name': 'SATA_500_7200',
+ 'num_drives': 3}]}
+ vsa_ref = self.vsa_api.create(self.context, **param)
+ self.assertEqual(vsa_ref['vol_count'], 3)
+ self.vsa_api.delete(self.context, vsa_ref['id'])
+
+ param = {'storage': [{'drive_name': 'SATA_500_7200',
+ 'num_drives': 3}],
+ 'shared': True}
+ vsa_ref = self.vsa_api.create(self.context, **param)
+ self.assertEqual(vsa_ref['vol_count'], 15)
+ self.vsa_api.delete(self.context, vsa_ref['id'])
+
+ def test_vsa_create_with_storage_single_volumes(self):
+ self.test_vsa_create_with_storage(multi_vol_creation=False)
+
+ def test_vsa_update(self):
+ vsa_ref = self.vsa_api.create(self.context)
+
+ param = {'vc_count': FLAGS.max_vcs_in_vsa + 1}
+ vsa_ref = self.vsa_api.update(self.context, vsa_ref['id'], **param)
+ self.assertEqual(vsa_ref['vc_count'], FLAGS.max_vcs_in_vsa)
+
+ param = {'vc_count': 2}
+ vsa_ref = self.vsa_api.update(self.context, vsa_ref['id'], **param)
+ self.assertEqual(vsa_ref['vc_count'], 2)
+
+ self.vsa_api.delete(self.context, vsa_ref['id'])
+
+ def test_vsa_generate_user_data(self):
+
+ FLAGS.vsa_multi_vol_creation = False
+ param = {'display_name': 'VSA name test',
+ 'display_description': 'VSA desc test',
+ 'vc_count': 2,
+ 'storage': [{'drive_name': 'SATA_500_7200',
+ 'num_drives': 3}]}
+ vsa_ref = self.vsa_api.create(self.context, **param)
+ volumes = self.vsa_api.get_all_vsa_drives(self.context,
+ vsa_ref['id'])
+
+ user_data = vsa_utils.generate_user_data(vsa_ref, volumes)
+ user_data = base64.b64decode(user_data)
+
+ LOG.debug(_("Test: user_data = %s"), user_data)
+
+ elem = ElementTree.fromstring(user_data)
+ self.assertEqual(elem.findtext('name'),
+ param['display_name'])
+ self.assertEqual(elem.findtext('description'),
+ param['display_description'])
+ self.assertEqual(elem.findtext('vc_count'),
+ str(param['vc_count']))
+
+ self.vsa_api.delete(self.context, vsa_ref['id'])
diff --git a/nova/tests/test_vsa_volumes.py b/nova/tests/test_vsa_volumes.py
new file mode 100644
index 000000000..b7cd4e840
--- /dev/null
+++ b/nova/tests/test_vsa_volumes.py
@@ -0,0 +1,136 @@
+# Copyright 2011 OpenStack LLC.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import stubout
+
+from nova import exception
+from nova import flags
+from nova import vsa
+from nova import volume
+from nova import db
+from nova import context
+from nova import test
+from nova import log as logging
+import nova.image.fake
+
+FLAGS = flags.FLAGS
+LOG = logging.getLogger('nova.tests.vsa.volumes')
+
+
+class VsaVolumesTestCase(test.TestCase):
+
+ def setUp(self):
+ super(VsaVolumesTestCase, self).setUp()
+ self.stubs = stubout.StubOutForTesting()
+ self.vsa_api = vsa.API()
+ self.volume_api = volume.API()
+ self.context = context.get_admin_context()
+
+ self.default_vol_type = self.vsa_api.get_vsa_volume_type(self.context)
+
+ def fake_show_by_name(meh, context, name):
+ return {'id': 1, 'properties': {'kernel_id': 1, 'ramdisk_id': 1}}
+
+ self.stubs.Set(nova.image.fake._FakeImageService,
+ 'show_by_name',
+ fake_show_by_name)
+
+ param = {'display_name': 'VSA name test'}
+ vsa_ref = self.vsa_api.create(self.context, **param)
+ self.vsa_id = vsa_ref['id']
+
+ def tearDown(self):
+ if self.vsa_id:
+ self.vsa_api.delete(self.context, self.vsa_id)
+ self.stubs.UnsetAll()
+ super(VsaVolumesTestCase, self).tearDown()
+
+ def _default_volume_param(self):
+ return {
+ 'size': 1,
+ 'snapshot_id': None,
+ 'name': 'Test volume name',
+ 'description': 'Test volume desc name',
+ 'volume_type': self.default_vol_type,
+ 'metadata': {'from_vsa_id': self.vsa_id}
+ }
+
+ def _get_all_volumes_by_vsa(self):
+ return self.volume_api.get_all(self.context,
+ search_opts={'metadata': {"from_vsa_id": str(self.vsa_id)}})
+
+ def test_vsa_volume_create_delete(self):
+ """ Check if volume properly created and deleted. """
+ volume_param = self._default_volume_param()
+ volume_ref = self.volume_api.create(self.context, **volume_param)
+
+ self.assertEqual(volume_ref['display_name'],
+ volume_param['name'])
+ self.assertEqual(volume_ref['display_description'],
+ volume_param['description'])
+ self.assertEqual(volume_ref['size'],
+ volume_param['size'])
+ self.assertEqual(volume_ref['status'],
+ 'creating')
+
+ vols2 = self._get_all_volumes_by_vsa()
+ self.assertEqual(1, len(vols2))
+ volume_ref = vols2[0]
+
+ self.assertEqual(volume_ref['display_name'],
+ volume_param['name'])
+ self.assertEqual(volume_ref['display_description'],
+ volume_param['description'])
+ self.assertEqual(volume_ref['size'],
+ volume_param['size'])
+ self.assertEqual(volume_ref['status'],
+ 'creating')
+
+ self.volume_api.update(self.context,
+ volume_ref['id'], {'status': 'available'})
+ self.volume_api.delete(self.context, volume_ref['id'])
+
+ vols3 = self._get_all_volumes_by_vsa()
+ self.assertEqual(1, len(vols2))
+ volume_ref = vols3[0]
+ self.assertEqual(volume_ref['status'],
+ 'deleting')
+
+ def test_vsa_volume_delete_nonavail_volume(self):
+ """ Check volume deleton in different states. """
+ volume_param = self._default_volume_param()
+ volume_ref = self.volume_api.create(self.context, **volume_param)
+
+ self.volume_api.update(self.context,
+ volume_ref['id'], {'status': 'in-use'})
+ self.assertRaises(exception.ApiError,
+ self.volume_api.delete,
+ self.context, volume_ref['id'])
+
+ def test_vsa_volume_delete_vsa_with_volumes(self):
+ """ Check volume deleton in different states. """
+
+ vols1 = self._get_all_volumes_by_vsa()
+ for i in range(3):
+ volume_param = self._default_volume_param()
+ volume_ref = self.volume_api.create(self.context, **volume_param)
+
+ vols2 = self._get_all_volumes_by_vsa()
+ self.assertEqual(len(vols1) + 3, len(vols2))
+
+ self.vsa_api.delete(self.context, self.vsa_id)
+
+ vols3 = self._get_all_volumes_by_vsa()
+ self.assertEqual(len(vols1), len(vols3))
diff --git a/nova/tests/test_xenapi.py b/nova/tests/test_xenapi.py
index 2f0559366..45dad3516 100644
--- a/nova/tests/test_xenapi.py
+++ b/nova/tests/test_xenapi.py
@@ -16,7 +16,6 @@
"""Test suite for XenAPI."""
-import eventlet
import functools
import json
import os
@@ -203,42 +202,6 @@ class XenAPIVMTestCase(test.TestCase):
self.context = context.RequestContext(self.user_id, self.project_id)
self.conn = xenapi_conn.get_connection(False)
- def test_parallel_builds(self):
- stubs.stubout_loopingcall_delay(self.stubs)
-
- def _do_build(id, proj, user, *args):
- values = {
- 'id': id,
- 'project_id': proj,
- 'user_id': user,
- 'image_ref': 1,
- 'kernel_id': 2,
- 'ramdisk_id': 3,
- 'instance_type_id': '3', # m1.large
- 'os_type': 'linux',
- 'architecture': 'x86-64'}
- network_info = [({'bridge': 'fa0', 'id': 0, 'injected': False},
- {'broadcast': '192.168.0.255',
- 'dns': ['192.168.0.1'],
- 'gateway': '192.168.0.1',
- 'gateway6': 'dead:beef::1',
- 'ip6s': [{'enabled': '1',
- 'ip': 'dead:beef::dcad:beff:feef:0',
- 'netmask': '64'}],
- 'ips': [{'enabled': '1',
- 'ip': '192.168.0.100',
- 'netmask': '255.255.255.0'}],
- 'label': 'fake',
- 'mac': 'DE:AD:BE:EF:00:00',
- 'rxtx_cap': 3})]
- instance = db.instance_create(self.context, values)
- self.conn.spawn(self.context, instance, network_info)
-
- gt1 = eventlet.spawn(_do_build, 1, self.project_id, self.user_id)
- gt2 = eventlet.spawn(_do_build, 2, self.project_id, self.user_id)
- gt1.wait()
- gt2.wait()
-
def test_list_instances_0(self):
instances = self.conn.list_instances()
self.assertEquals(instances, [])
diff --git a/nova/tests/utils.py b/nova/tests/utils.py
new file mode 100644
index 000000000..e0cacadb4
--- /dev/null
+++ b/nova/tests/utils.py
@@ -0,0 +1,68 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+#
+# Copyright 2011 OpenStack LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+#
+
+import nova.context
+import nova.db
+import nova.flags
+
+FLAGS = nova.flags.FLAGS
+
+
+def get_test_admin_context():
+ return nova.context.get_admin_context()
+
+
+def get_test_instance(context=None):
+ if not context:
+ context = get_test_admin_context()
+
+ test_instance = {'memory_kb': '1024000',
+ 'basepath': '/some/path',
+ 'bridge_name': 'br100',
+ 'vcpus': 2,
+ 'project_id': 'fake',
+ 'bridge': 'br101',
+ 'image_ref': '1',
+ 'instance_type_id': '5'} # m1.small
+
+ instance_ref = nova.db.instance_create(context, test_instance)
+ return instance_ref
+
+
+def get_test_network_info(count=1):
+ ipv6 = FLAGS.use_ipv6
+ fake = 'fake'
+ fake_ip = '0.0.0.0/0'
+ fake_ip_2 = '0.0.0.1/0'
+ fake_ip_3 = '0.0.0.1/0'
+ fake_vlan = 100
+ fake_bridge_interface = 'eth0'
+ network = {'bridge': fake,
+ 'cidr': fake_ip,
+ 'cidr_v6': fake_ip,
+ 'vlan': fake_vlan,
+ 'bridge_interface': fake_bridge_interface,
+ 'injected': False}
+ mapping = {'mac': fake,
+ 'dhcp_server': fake,
+ 'gateway': fake,
+ 'gateway6': fake,
+ 'ips': [{'ip': fake_ip}, {'ip': fake_ip}]}
+ if ipv6:
+ mapping['ip6s'] = [{'ip': fake_ip},
+ {'ip': fake_ip_2},
+ {'ip': fake_ip_3}]
+ return [(network, mapping) for x in xrange(0, count)]
diff --git a/nova/tests/vmwareapi/db_fakes.py b/nova/tests/vmwareapi/db_fakes.py
index afd672c7a..0d896239a 100644
--- a/nova/tests/vmwareapi/db_fakes.py
+++ b/nova/tests/vmwareapi/db_fakes.py
@@ -23,6 +23,8 @@ import time
from nova import db
from nova import utils
+from nova.compute import task_states
+from nova.compute import vm_states
def stub_out_db_instance_api(stubs):
@@ -64,7 +66,8 @@ def stub_out_db_instance_api(stubs):
'image_ref': values['image_ref'],
'kernel_id': values['kernel_id'],
'ramdisk_id': values['ramdisk_id'],
- 'state_description': 'scheduling',
+ 'vm_state': vm_states.BUILDING,
+ 'task_state': task_states.SCHEDULING,
'user_id': values['user_id'],
'project_id': values['project_id'],
'launch_time': time.strftime('%Y-%m-%dT%H:%M:%SZ', time.gmtime()),
diff --git a/nova/tests/xenapi/stubs.py b/nova/tests/xenapi/stubs.py
index a6a1febd6..647a4c1df 100644
--- a/nova/tests/xenapi/stubs.py
+++ b/nova/tests/xenapi/stubs.py
@@ -18,6 +18,8 @@
import eventlet
import json
+import random
+
from nova.virt import xenapi_conn
from nova.virt.xenapi import fake
from nova.virt.xenapi import volume_utils
@@ -191,6 +193,7 @@ class FakeSessionForVMTests(fake.SessionBase):
vm['power_state'] = 'Running'
vm['is_a_template'] = False
vm['is_control_domain'] = False
+ vm['domid'] = random.randrange(1, 1 << 16)
def VM_snapshot(self, session_ref, vm_ref, label):
status = "Running"
@@ -290,6 +293,7 @@ class FakeSessionForMigrationTests(fake.SessionBase):
vm['power_state'] = 'Running'
vm['is_a_template'] = False
vm['is_control_domain'] = False
+ vm['domid'] = random.randrange(1, 1 << 16)
def stub_out_migration_methods(stubs):
diff --git a/nova/virt/driver.py b/nova/virt/driver.py
index 93290aba7..d05b51bd9 100644
--- a/nova/virt/driver.py
+++ b/nova/virt/driver.py
@@ -140,7 +140,7 @@ class ComputeDriver(object):
that it was before this call began.
:param context: security context
- :param instance: Instance of {nova.compute.service.Instance}.
+ :param instance: Instance object as returned by DB layer.
This function should use the data there to guide
the creation of the new instance.
:param network_info:
@@ -152,14 +152,11 @@ class ComputeDriver(object):
def destroy(self, instance, network_info, cleanup=True):
"""Destroy (shutdown and delete) the specified instance.
- The given parameter is an instance of nova.compute.service.Instance,
-
If the instance is not found (for example if networking failed), this
function should still succeed. It's probably a good idea to log a
warning in that case.
- :param instance: Instance of {nova.compute.service.Instance} and so
- the instance is being specified as instance.name.
+ :param instance: Instance object as returned by DB layer.
:param network_info:
:py:meth:`~nova.network.manager.NetworkManager.get_instance_nw_info`
:param cleanup:
@@ -171,8 +168,7 @@ class ComputeDriver(object):
def reboot(self, instance, network_info):
"""Reboot the specified instance.
- :param instance: Instance of {nova.compute.service.Instance} and so
- the instance is being specified as instance.name.
+ :param instance: Instance object as returned by DB layer.
:param network_info:
:py:meth:`~nova.network.manager.NetworkManager.get_instance_nw_info`
"""
@@ -240,10 +236,10 @@ class ComputeDriver(object):
"""
Snapshots the specified instance.
- The given parameter is an instance of nova.compute.service.Instance,
- and so the instance is being specified as instance.name.
-
- The second parameter is the name of the snapshot.
+ :param context: security context
+ :param instance: Instance object as returned by DB layer.
+ :param image_id: Reference to a pre-created image that will
+ hold the snapshot.
"""
raise NotImplementedError()
diff --git a/nova/virt/fake.py b/nova/virt/fake.py
index 13b7aeab5..d5e2bf31b 100644
--- a/nova/virt/fake.py
+++ b/nova/virt/fake.py
@@ -67,6 +67,7 @@ class FakeConnection(driver.ComputeDriver):
'disk_used': 100000000000,
'host_uuid': 'cedb9b39-9388-41df-8891-c5c9a0c0fe5f',
'host_name_label': 'fake-mini'}
+ self._mounts = {}
@classmethod
def instance(cls):
@@ -99,7 +100,8 @@ class FakeConnection(driver.ComputeDriver):
self.instances[name] = fake_instance
def snapshot(self, context, instance, name):
- pass
+ if not instance['name'] in self.instances:
+ raise exception.InstanceNotRunning()
def reboot(self, instance, network_info):
pass
@@ -144,7 +146,7 @@ class FakeConnection(driver.ComputeDriver):
pass
def destroy(self, instance, network_info, cleanup=True):
- key = instance.name
+ key = instance['name']
if key in self.instances:
del self.instances[key]
else:
@@ -152,9 +154,16 @@ class FakeConnection(driver.ComputeDriver):
(key, self.instances))
def attach_volume(self, instance_name, device_path, mountpoint):
+ if not instance_name in self._mounts:
+ self._mounts[instance_name] = {}
+ self._mounts[instance_name][mountpoint] = device_path
return True
def detach_volume(self, instance_name, mountpoint):
+ try:
+ del self._mounts[instance_name][mountpoint]
+ except KeyError:
+ pass
return True
def get_info(self, instance_name):
diff --git a/nova/virt/libvirt.xml.template b/nova/virt/libvirt.xml.template
index 6a02cfa24..d3aeadda4 100644
--- a/nova/virt/libvirt.xml.template
+++ b/nova/virt/libvirt.xml.template
@@ -135,7 +135,9 @@
<interface type='bridge'>
<source bridge='${nic.bridge_name}'/>
<mac address='${nic.mac_address}'/>
- <!-- <model type='virtio'/> CANT RUN virtio network right now -->
+#if $getVar('use_virtio_for_bridges', True)
+ <model type='virtio'/>
+#end if
<filterref filter="nova-instance-${name}-${nic.id}">
<parameter name="IP" value="${nic.ip_address}" />
<parameter name="DHCPSERVER" value="${nic.dhcp_server}" />
diff --git a/nova/virt/libvirt/connection.py b/nova/virt/libvirt/connection.py
index 4388291db..363a20ed0 100644
--- a/nova/virt/libvirt/connection.py
+++ b/nova/virt/libvirt/connection.py
@@ -135,6 +135,9 @@ flags.DEFINE_string('default_local_format',
None,
'The default format a local_volume will be formatted with '
'on creation.')
+flags.DEFINE_bool('libvirt_use_virtio_for_bridges',
+ False,
+ 'Use virtio for bridge interfaces')
def get_connection(read_only):
@@ -1083,6 +1086,8 @@ class LibvirtConnection(driver.ComputeDriver):
'ebs_root': ebs_root,
'local_device': local_device,
'volumes': block_device_mapping,
+ 'use_virtio_for_bridges':
+ FLAGS.libvirt_use_virtio_for_bridges,
'ephemerals': ephemerals}
root_device_name = driver.block_device_info_get_root(block_device_info)
diff --git a/nova/virt/xenapi/fake.py b/nova/virt/xenapi/fake.py
index 7c91aa9b9..97dfd9fa9 100644
--- a/nova/virt/xenapi/fake.py
+++ b/nova/virt/xenapi/fake.py
@@ -51,6 +51,7 @@ A fake XenAPI SDK.
"""
+import random
import uuid
from pprint import pformat
@@ -103,8 +104,10 @@ def create_network(name_label, bridge):
def create_vm(name_label, status,
is_a_template=False, is_control_domain=False):
+ domid = status == 'Running' and random.randrange(1, 1 << 16) or -1
return _create_object('VM',
{'name_label': name_label,
+ 'domid': domid,
'power-state': status,
'is_a_template': is_a_template,
'is_control_domain': is_control_domain})
diff --git a/nova/virt/xenapi/vmops.py b/nova/virt/xenapi/vmops.py
index 64c106f47..c5f105f40 100644
--- a/nova/virt/xenapi/vmops.py
+++ b/nova/virt/xenapi/vmops.py
@@ -239,9 +239,8 @@ class VMOps(object):
self._attach_disks(instance, disk_image_type, vm_ref, first_vdi_ref,
vdis)
- # Alter the image before VM start for, e.g. network injection also
- # alter the image if there's metadata.
- if FLAGS.flat_injected or instance['metadata']:
+ # Alter the image before VM start for network injection.
+ if FLAGS.flat_injected:
VMHelper.preconfigure_instance(self._session, instance,
first_vdi_ref, network_info)
diff --git a/nova/volume/api.py b/nova/volume/api.py
index 52b3a9fed..d9c082514 100644
--- a/nova/volume/api.py
+++ b/nova/volume/api.py
@@ -41,7 +41,8 @@ LOG = logging.getLogger('nova.volume')
class API(base.Base):
"""API for interacting with the volume manager."""
- def create(self, context, size, snapshot_id, name, description):
+ def create(self, context, size, snapshot_id, name, description,
+ volume_type=None, metadata=None, availability_zone=None):
if snapshot_id != None:
snapshot = self.get_snapshot(context, snapshot_id)
if snapshot['status'] != "available":
@@ -57,16 +58,27 @@ class API(base.Base):
raise quota.QuotaError(_("Volume quota exceeded. You cannot "
"create a volume of size %sG") % size)
+ if availability_zone is None:
+ availability_zone = FLAGS.storage_availability_zone
+
+ if volume_type is None:
+ volume_type_id = None
+ else:
+ volume_type_id = volume_type.get('id', None)
+
options = {
'size': size,
'user_id': context.user_id,
'project_id': context.project_id,
'snapshot_id': snapshot_id,
- 'availability_zone': FLAGS.storage_availability_zone,
+ 'availability_zone': availability_zone,
'status': "creating",
'attach_status': "detached",
'display_name': name,
- 'display_description': description}
+ 'display_description': description,
+ 'volume_type_id': volume_type_id,
+ 'metadata': metadata,
+ }
volume = self.db.volume_create(context, options)
rpc.cast(context,
@@ -105,10 +117,45 @@ class API(base.Base):
rv = self.db.volume_get(context, volume_id)
return dict(rv.iteritems())
- def get_all(self, context):
+ def get_all(self, context, search_opts={}):
if context.is_admin:
- return self.db.volume_get_all(context)
- return self.db.volume_get_all_by_project(context, context.project_id)
+ volumes = self.db.volume_get_all(context)
+ else:
+ volumes = self.db.volume_get_all_by_project(context,
+ context.project_id)
+
+ if search_opts:
+ LOG.debug(_("Searching by: %s") % str(search_opts))
+
+ def _check_metadata_match(volume, searchdict):
+ volume_metadata = {}
+ for i in volume.get('volume_metadata'):
+ volume_metadata[i['key']] = i['value']
+
+ for k, v in searchdict.iteritems():
+ if k not in volume_metadata.keys()\
+ or volume_metadata[k] != v:
+ return False
+ return True
+
+ # search_option to filter_name mapping.
+ filter_mapping = {'metadata': _check_metadata_match}
+
+ result = []
+ for volume in volumes:
+ # go over all filters in the list
+ for opt, values in search_opts.iteritems():
+ try:
+ filter_func = filter_mapping[opt]
+ except KeyError:
+ # no such filter - ignore it, go to next filter
+ continue
+ else:
+ if filter_func(volume, values):
+ result.append(volume)
+ break
+ volumes = result
+ return volumes
def get_snapshot(self, context, snapshot_id):
rv = self.db.snapshot_get(context, snapshot_id)
@@ -183,3 +230,38 @@ class API(base.Base):
{"method": "delete_snapshot",
"args": {"topic": FLAGS.volume_topic,
"snapshot_id": snapshot_id}})
+
+ def get_volume_metadata(self, context, volume_id):
+ """Get all metadata associated with a volume."""
+ rv = self.db.volume_metadata_get(context, volume_id)
+ return dict(rv.iteritems())
+
+ def delete_volume_metadata(self, context, volume_id, key):
+ """Delete the given metadata item from an volume."""
+ self.db.volume_metadata_delete(context, volume_id, key)
+
+ def update_volume_metadata(self, context, volume_id,
+ metadata, delete=False):
+ """Updates or creates volume metadata.
+
+ If delete is True, metadata items that are not specified in the
+ `metadata` argument will be deleted.
+
+ """
+ if delete:
+ _metadata = metadata
+ else:
+ _metadata = self.get_volume_metadata(context, volume_id)
+ _metadata.update(metadata)
+
+ self.db.volume_metadata_update(context, volume_id, _metadata, True)
+ return _metadata
+
+ def get_volume_metadata_value(self, volume, key):
+ """Get value of particular metadata key."""
+ metadata = volume.get('volume_metadata')
+ if metadata:
+ for i in volume['volume_metadata']:
+ if i['key'] == key:
+ return i['value']
+ return None
diff --git a/nova/volume/driver.py b/nova/volume/driver.py
index 7d2fb45d4..35e3ea8d0 100644
--- a/nova/volume/driver.py
+++ b/nova/volume/driver.py
@@ -22,11 +22,13 @@ Drivers for volumes.
import time
import os
+from xml.etree import ElementTree
from nova import exception
from nova import flags
from nova import log as logging
from nova import utils
+from nova.volume import volume_types
LOG = logging.getLogger("nova.volume.driver")
@@ -212,6 +214,11 @@ class VolumeDriver(object):
"""Make sure volume is exported."""
raise NotImplementedError()
+ def get_volume_stats(self, refresh=False):
+ """Return the current state of the volume service. If 'refresh' is
+ True, run the update first."""
+ return None
+
class AOEDriver(VolumeDriver):
"""Implements AOE specific volume commands."""
@@ -523,7 +530,7 @@ class ISCSIDriver(VolumeDriver):
"node.session.auth.password",
iscsi_properties['auth_password'])
- self._run_iscsiadm(iscsi_properties, "--login")
+ self._run_iscsiadm(iscsi_properties, ("--login", ))
self._iscsiadm_update(iscsi_properties, "node.startup", "automatic")
@@ -544,7 +551,7 @@ class ISCSIDriver(VolumeDriver):
locals())
# The rescan isn't documented as being necessary(?), but it helps
- self._run_iscsiadm(iscsi_properties, "--rescan")
+ self._run_iscsiadm(iscsi_properties, ("--rescan", ))
tries = tries + 1
if not os.path.exists(mount_device):
@@ -561,7 +568,7 @@ class ISCSIDriver(VolumeDriver):
"""Undiscover volume on a remote host."""
iscsi_properties = self._get_iscsi_properties(volume)
self._iscsiadm_update(iscsi_properties, "node.startup", "manual")
- self._run_iscsiadm(iscsi_properties, "--logout")
+ self._run_iscsiadm(iscsi_properties, ("--logout", ))
self._run_iscsiadm(iscsi_properties, ('--op', 'delete'))
def check_for_export(self, context, volume_id):
@@ -802,3 +809,268 @@ class LoggingVolumeDriver(VolumeDriver):
if match:
matches.append(entry)
return matches
+
+
+class ZadaraBEDriver(ISCSIDriver):
+ """Performs actions to configure Zadara BE module."""
+
+ def _is_vsa_volume(self, volume):
+ return volume_types.is_vsa_volume(volume['volume_type_id'])
+
+ def _is_vsa_drive(self, volume):
+ return volume_types.is_vsa_drive(volume['volume_type_id'])
+
+ def _not_vsa_volume_or_drive(self, volume):
+ """Returns True if volume is not VSA BE volume."""
+ if not volume_types.is_vsa_object(volume['volume_type_id']):
+ LOG.debug(_("\tVolume %s is NOT VSA volume"), volume['name'])
+ return True
+ else:
+ return False
+
+ def check_for_setup_error(self):
+ """No setup necessary for Zadara BE."""
+ pass
+
+ """ Volume Driver methods """
+ def create_volume(self, volume):
+ """Creates BE volume."""
+ if self._not_vsa_volume_or_drive(volume):
+ return super(ZadaraBEDriver, self).create_volume(volume)
+
+ if self._is_vsa_volume(volume):
+ LOG.debug(_("\tFE VSA Volume %s creation - do nothing"),
+ volume['name'])
+ return
+
+ if int(volume['size']) == 0:
+ sizestr = '0' # indicates full-partition
+ else:
+ sizestr = '%s' % (int(volume['size']) << 30) # size in bytes
+
+ # Set the qos-str to default type sas
+ qosstr = 'SAS_1000'
+ volume_type = volume_types.get_volume_type(None,
+ volume['volume_type_id'])
+ if volume_type is not None:
+ qosstr = volume_type['extra_specs']['drive_type'] + \
+ ("_%s" % volume_type['extra_specs']['drive_size'])
+
+ vsa_id = None
+ for i in volume.get('volume_metadata'):
+ if i['key'] == 'to_vsa_id':
+ vsa_id = i['value']
+ break
+
+ try:
+ self._sync_exec('/var/lib/zadara/bin/zadara_sncfg',
+ 'create_qospart',
+ '--qos', qosstr,
+ '--pname', volume['name'],
+ '--psize', sizestr,
+ '--vsaid', vsa_id,
+ run_as_root=True,
+ check_exit_code=0)
+ except exception.ProcessExecutionError:
+ LOG.debug(_("VSA BE create_volume for %s failed"), volume['name'])
+ raise
+
+ LOG.debug(_("VSA BE create_volume for %s succeeded"), volume['name'])
+
+ def delete_volume(self, volume):
+ """Deletes BE volume."""
+ if self._not_vsa_volume_or_drive(volume):
+ return super(ZadaraBEDriver, self).delete_volume(volume)
+
+ if self._is_vsa_volume(volume):
+ LOG.debug(_("\tFE VSA Volume %s deletion - do nothing"),
+ volume['name'])
+ return
+
+ try:
+ self._sync_exec('/var/lib/zadara/bin/zadara_sncfg',
+ 'delete_partition',
+ '--pname', volume['name'],
+ run_as_root=True,
+ check_exit_code=0)
+ except exception.ProcessExecutionError:
+ LOG.debug(_("VSA BE delete_volume for %s failed"), volume['name'])
+ return
+
+ LOG.debug(_("VSA BE delete_volume for %s suceeded"), volume['name'])
+
+ def local_path(self, volume):
+ if self._not_vsa_volume_or_drive(volume):
+ return super(ZadaraBEDriver, self).local_path(volume)
+
+ if self._is_vsa_volume(volume):
+ LOG.debug(_("\tFE VSA Volume %s local path call - call discover"),
+ volume['name'])
+ return super(ZadaraBEDriver, self).discover_volume(None, volume)
+
+ raise exception.Error(_("local_path not supported"))
+
+ def ensure_export(self, context, volume):
+ """ensure BE export for a volume"""
+ if self._not_vsa_volume_or_drive(volume):
+ return super(ZadaraBEDriver, self).ensure_export(context, volume)
+
+ if self._is_vsa_volume(volume):
+ LOG.debug(_("\tFE VSA Volume %s ensure export - do nothing"),
+ volume['name'])
+ return
+
+ try:
+ iscsi_target = self.db.volume_get_iscsi_target_num(context,
+ volume['id'])
+ except exception.NotFound:
+ LOG.info(_("Skipping ensure_export. No iscsi_target " +
+ "provisioned for volume: %d"), volume['id'])
+ return
+
+ try:
+ ret = self._common_be_export(context, volume, iscsi_target)
+ except exception.ProcessExecutionError:
+ return
+ return ret
+
+ def create_export(self, context, volume):
+ """create BE export for a volume"""
+ if self._not_vsa_volume_or_drive(volume):
+ return super(ZadaraBEDriver, self).create_export(context, volume)
+
+ if self._is_vsa_volume(volume):
+ LOG.debug(_("\tFE VSA Volume %s create export - do nothing"),
+ volume['name'])
+ return
+
+ self._ensure_iscsi_targets(context, volume['host'])
+ iscsi_target = self.db.volume_allocate_iscsi_target(context,
+ volume['id'],
+ volume['host'])
+ try:
+ ret = self._common_be_export(context, volume, iscsi_target)
+ except:
+ raise exception.ProcessExecutionError
+ return ret
+
+ def remove_export(self, context, volume):
+ """Removes BE export for a volume."""
+ if self._not_vsa_volume_or_drive(volume):
+ return super(ZadaraBEDriver, self).remove_export(context, volume)
+
+ if self._is_vsa_volume(volume):
+ LOG.debug(_("\tFE VSA Volume %s remove export - do nothing"),
+ volume['name'])
+ return
+
+ try:
+ iscsi_target = self.db.volume_get_iscsi_target_num(context,
+ volume['id'])
+ except exception.NotFound:
+ LOG.info(_("Skipping remove_export. No iscsi_target " +
+ "provisioned for volume: %d"), volume['id'])
+ return
+
+ try:
+ self._sync_exec('/var/lib/zadara/bin/zadara_sncfg',
+ 'remove_export',
+ '--pname', volume['name'],
+ '--tid', iscsi_target,
+ run_as_root=True,
+ check_exit_code=0)
+ except exception.ProcessExecutionError:
+ LOG.debug(_("VSA BE remove_export for %s failed"), volume['name'])
+ return
+
+ def create_snapshot(self, snapshot):
+ """Nothing required for snapshot"""
+ if self._not_vsa_volume_or_drive(volume):
+ return super(ZadaraBEDriver, self).create_snapshot(volume)
+
+ pass
+
+ def delete_snapshot(self, snapshot):
+ """Nothing required to delete a snapshot"""
+ if self._not_vsa_volume_or_drive(volume):
+ return super(ZadaraBEDriver, self).delete_snapshot(volume)
+
+ pass
+
+ """ Internal BE Volume methods """
+ def _common_be_export(self, context, volume, iscsi_target):
+ """
+ Common logic that asks zadara_sncfg to setup iSCSI target/lun for
+ this volume
+ """
+ (out, err) = self._sync_exec(
+ '/var/lib/zadara/bin/zadara_sncfg',
+ 'create_export',
+ '--pname', volume['name'],
+ '--tid', iscsi_target,
+ run_as_root=True,
+ check_exit_code=0)
+
+ result_xml = ElementTree.fromstring(out)
+ response_node = result_xml.find("Sn")
+ if response_node is None:
+ msg = "Malformed response from zadara_sncfg"
+ raise exception.Error(msg)
+
+ sn_ip = response_node.findtext("SnIp")
+ sn_iqn = response_node.findtext("IqnName")
+ iscsi_portal = sn_ip + ":3260," + ("%s" % iscsi_target)
+
+ model_update = {}
+ model_update['provider_location'] = ("%s %s" %
+ (iscsi_portal,
+ sn_iqn))
+ return model_update
+
+ def _get_qosgroup_summary(self):
+ """gets the list of qosgroups from Zadara BE"""
+ try:
+ (out, err) = self._sync_exec(
+ '/var/lib/zadara/bin/zadara_sncfg',
+ 'get_qosgroups_xml',
+ run_as_root=True,
+ check_exit_code=0)
+ except exception.ProcessExecutionError:
+ LOG.debug(_("Failed to retrieve QoS info"))
+ return {}
+
+ qos_groups = {}
+ result_xml = ElementTree.fromstring(out)
+ for element in result_xml.findall('QosGroup'):
+ qos_group = {}
+ # get the name of the group.
+ # If we cannot find it, forget this element
+ group_name = element.findtext("Name")
+ if not group_name:
+ continue
+
+ # loop through all child nodes & fill up attributes of this group
+ for child in element.getchildren():
+ # two types of elements - property of qos-group & sub property
+ # classify them accordingly
+ if child.text:
+ qos_group[child.tag] = int(child.text) \
+ if child.text.isdigit() else child.text
+ else:
+ subelement = {}
+ for subchild in child.getchildren():
+ subelement[subchild.tag] = int(subchild.text) \
+ if subchild.text.isdigit() else subchild.text
+ qos_group[child.tag] = subelement
+
+ # Now add this group to the master qos_groups
+ qos_groups[group_name] = qos_group
+
+ return qos_groups
+
+ def get_volume_stats(self, refresh=False):
+ """Return the current state of the volume service. If 'refresh' is
+ True, run the update first."""
+
+ drive_info = self._get_qosgroup_summary()
+ return {'drive_qos_info': drive_info}
diff --git a/nova/volume/manager.py b/nova/volume/manager.py
index 798bd379a..caa5298d4 100644
--- a/nova/volume/manager.py
+++ b/nova/volume/manager.py
@@ -48,7 +48,9 @@ from nova import exception
from nova import flags
from nova import log as logging
from nova import manager
+from nova import rpc
from nova import utils
+from nova.volume import volume_types
LOG = logging.getLogger('nova.volume.manager')
@@ -60,6 +62,8 @@ flags.DEFINE_string('volume_driver', 'nova.volume.driver.ISCSIDriver',
'Driver to use for volume creation')
flags.DEFINE_boolean('use_local_volumes', True,
'if True, will not discover local volumes')
+flags.DEFINE_boolean('volume_force_update_capabilities', False,
+ 'if True will force update capabilities on each check')
class VolumeManager(manager.SchedulerDependentManager):
@@ -74,6 +78,7 @@ class VolumeManager(manager.SchedulerDependentManager):
# NOTE(vish): Implementation specific db handling is done
# by the driver.
self.driver.db = self.db
+ self._last_volume_stats = []
def init_host(self):
"""Do any initialization that needs to be run if this is a
@@ -123,6 +128,7 @@ class VolumeManager(manager.SchedulerDependentManager):
except Exception:
self.db.volume_update(context,
volume_ref['id'], {'status': 'error'})
+ self._notify_vsa(context, volume_ref, 'error')
raise
now = utils.utcnow()
@@ -130,8 +136,29 @@ class VolumeManager(manager.SchedulerDependentManager):
volume_ref['id'], {'status': 'available',
'launched_at': now})
LOG.debug(_("volume %s: created successfully"), volume_ref['name'])
+ self._notify_vsa(context, volume_ref, 'available')
+ self._reset_stats()
return volume_id
+ def _notify_vsa(self, context, volume_ref, status):
+ if volume_ref['volume_type_id'] is None:
+ return
+
+ if volume_types.is_vsa_drive(volume_ref['volume_type_id']):
+ vsa_id = None
+ for i in volume_ref.get('volume_metadata'):
+ if i['key'] == 'to_vsa_id':
+ vsa_id = int(i['value'])
+ break
+
+ if vsa_id:
+ rpc.cast(context,
+ FLAGS.vsa_topic,
+ {"method": "vsa_volume_created",
+ "args": {"vol_id": volume_ref['id'],
+ "vsa_id": vsa_id,
+ "status": status}})
+
def delete_volume(self, context, volume_id):
"""Deletes and unexports volume."""
context = context.elevated()
@@ -141,6 +168,7 @@ class VolumeManager(manager.SchedulerDependentManager):
if volume_ref['host'] != self.host:
raise exception.Error(_("Volume is not local to this node"))
+ self._reset_stats()
try:
LOG.debug(_("volume %s: removing export"), volume_ref['name'])
self.driver.remove_export(context, volume_ref)
@@ -231,3 +259,53 @@ class VolumeManager(manager.SchedulerDependentManager):
instance_ref = self.db.instance_get(context, instance_id)
for volume in instance_ref['volumes']:
self.driver.check_for_export(context, volume['id'])
+
+ def periodic_tasks(self, context=None):
+ """Tasks to be run at a periodic interval."""
+
+ error_list = []
+ try:
+ self._report_driver_status()
+ except Exception as ex:
+ LOG.warning(_("Error during report_driver_status(): %s"),
+ unicode(ex))
+ error_list.append(ex)
+
+ super(VolumeManager, self).periodic_tasks(context)
+
+ return error_list
+
+ def _volume_stats_changed(self, stat1, stat2):
+ if FLAGS.volume_force_update_capabilities:
+ return True
+ if len(stat1) != len(stat2):
+ return True
+ for (k, v) in stat1.iteritems():
+ if (k, v) not in stat2.iteritems():
+ return True
+ return False
+
+ def _report_driver_status(self):
+ volume_stats = self.driver.get_volume_stats(refresh=True)
+ if volume_stats:
+ LOG.info(_("Checking volume capabilities"))
+
+ if self._volume_stats_changed(self._last_volume_stats,
+ volume_stats):
+ LOG.info(_("New capabilities found: %s"), volume_stats)
+ self._last_volume_stats = volume_stats
+
+ # This will grab info about the host and queue it
+ # to be sent to the Schedulers.
+ self.update_service_capabilities(self._last_volume_stats)
+ else:
+ # avoid repeating fanouts
+ self.update_service_capabilities(None)
+
+ def _reset_stats(self):
+ LOG.info(_("Clear capabilities"))
+ self._last_volume_stats = []
+
+ def notification(self, context, event):
+ LOG.info(_("Notification {%s} received"), event)
+ self._reset_stats()
diff --git a/nova/volume/volume_types.py b/nova/volume/volume_types.py
new file mode 100644
index 000000000..ffa9e6e02
--- /dev/null
+++ b/nova/volume/volume_types.py
@@ -0,0 +1,166 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright (c) 2011 Zadara Storage Inc.
+# Copyright (c) 2011 OpenStack LLC.
+# Copyright 2010 United States Government as represented by the
+# Administrator of the National Aeronautics and Space Administration.
+# Copyright (c) 2010 Citrix Systems, Inc.
+# Copyright 2011 Ken Pepple
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""Built-in volume type properties."""
+
+from nova import context
+from nova import db
+from nova import exception
+from nova import flags
+from nova import log as logging
+
+FLAGS = flags.FLAGS
+LOG = logging.getLogger('nova.volume.volume_types')
+
+
+def create(context, name, extra_specs={}):
+ """Creates volume types."""
+ try:
+ db.volume_type_create(context,
+ dict(name=name,
+ extra_specs=extra_specs))
+ except exception.DBError, e:
+ LOG.exception(_('DB error: %s') % e)
+ raise exception.ApiError(_("Cannot create volume_type with "
+ "name %(name)s and specs %(extra_specs)s")
+ % locals())
+
+
+def destroy(context, name):
+ """Marks volume types as deleted."""
+ if name is None:
+ raise exception.InvalidVolumeType(volume_type=name)
+ else:
+ try:
+ db.volume_type_destroy(context, name)
+ except exception.NotFound:
+ LOG.exception(_('Volume type %s not found for deletion') % name)
+ raise exception.ApiError(_("Unknown volume type: %s") % name)
+
+
+def purge(context, name):
+ """Removes volume types from database."""
+ if name is None:
+ raise exception.InvalidVolumeType(volume_type=name)
+ else:
+ try:
+ db.volume_type_purge(context, name)
+ except exception.NotFound:
+ LOG.exception(_('Volume type %s not found for purge') % name)
+ raise exception.ApiError(_("Unknown volume type: %s") % name)
+
+
+def get_all_types(context, inactive=0, search_opts={}):
+ """Get all non-deleted volume_types.
+
+ Pass true as argument if you want deleted volume types returned also.
+
+ """
+ vol_types = db.volume_type_get_all(context, inactive)
+
+ if search_opts:
+ LOG.debug(_("Searching by: %s") % str(search_opts))
+
+ def _check_extra_specs_match(vol_type, searchdict):
+ for k, v in searchdict.iteritems():
+ if k not in vol_type['extra_specs'].keys()\
+ or vol_type['extra_specs'][k] != v:
+ return False
+ return True
+
+ # search_option to filter_name mapping.
+ filter_mapping = {'extra_specs': _check_extra_specs_match}
+
+ result = {}
+ for type_name, type_args in vol_types.iteritems():
+ # go over all filters in the list
+ for opt, values in search_opts.iteritems():
+ try:
+ filter_func = filter_mapping[opt]
+ except KeyError:
+ # no such filter - ignore it, go to next filter
+ continue
+ else:
+ if filter_func(type_args, values):
+ result[type_name] = type_args
+ break
+ vol_types = result
+ return vol_types
+
+
+def get_volume_type(ctxt, id):
+ """Retrieves single volume type by id."""
+ if id is None:
+ raise exception.InvalidVolumeType(volume_type=id)
+
+ if ctxt is None:
+ ctxt = context.get_admin_context()
+
+ try:
+ return db.volume_type_get(ctxt, id)
+ except exception.DBError:
+ raise exception.ApiError(_("Unknown volume type: %s") % id)
+
+
+def get_volume_type_by_name(context, name):
+ """Retrieves single volume type by name."""
+ if name is None:
+ raise exception.InvalidVolumeType(volume_type=name)
+
+ try:
+ return db.volume_type_get_by_name(context, name)
+ except exception.DBError:
+ raise exception.ApiError(_("Unknown volume type: %s") % name)
+
+
+def is_key_value_present(volume_type_id, key, value, volume_type=None):
+ if volume_type_id is None:
+ return False
+
+ if volume_type is None:
+ volume_type = get_volume_type(context.get_admin_context(),
+ volume_type_id)
+ if volume_type.get('extra_specs') is None or\
+ volume_type['extra_specs'].get(key) != value:
+ return False
+ else:
+ return True
+
+
+def is_vsa_drive(volume_type_id, volume_type=None):
+ return is_key_value_present(volume_type_id,
+ 'type', 'vsa_drive', volume_type)
+
+
+def is_vsa_volume(volume_type_id, volume_type=None):
+ return is_key_value_present(volume_type_id,
+ 'type', 'vsa_volume', volume_type)
+
+
+def is_vsa_object(volume_type_id):
+ if volume_type_id is None:
+ return False
+
+ volume_type = get_volume_type(context.get_admin_context(),
+ volume_type_id)
+
+ return is_vsa_drive(volume_type_id, volume_type) or\
+ is_vsa_volume(volume_type_id, volume_type)
diff --git a/nova/vsa/__init__.py b/nova/vsa/__init__.py
new file mode 100644
index 000000000..09162e006
--- /dev/null
+++ b/nova/vsa/__init__.py
@@ -0,0 +1,18 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright (c) 2011 Zadara Storage Inc.
+# Copyright (c) 2011 OpenStack LLC.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from nova.vsa.api import API
diff --git a/nova/vsa/api.py b/nova/vsa/api.py
new file mode 100644
index 000000000..18cf13705
--- /dev/null
+++ b/nova/vsa/api.py
@@ -0,0 +1,411 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright (c) 2011 Zadara Storage Inc.
+# Copyright (c) 2011 OpenStack LLC.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""
+Handles all requests relating to Virtual Storage Arrays (VSAs).
+
+Experimental code. Requires special VSA image.
+For assistance and guidelines pls contact
+ Zadara Storage Inc & Openstack community
+"""
+
+import sys
+
+from nova import compute
+from nova import db
+from nova import exception
+from nova import flags
+from nova import log as logging
+from nova import rpc
+from nova import volume
+from nova.compute import instance_types
+from nova.db import base
+from nova.volume import volume_types
+
+
+class VsaState:
+ CREATING = 'creating' # VSA creating (not ready yet)
+ LAUNCHING = 'launching' # Launching VCs (all BE volumes were created)
+ CREATED = 'created' # VSA fully created and ready for use
+ PARTIAL = 'partial' # Some BE drives were allocated
+ FAILED = 'failed' # Some BE storage allocations failed
+ DELETING = 'deleting' # VSA started the deletion procedure
+
+
+FLAGS = flags.FLAGS
+flags.DEFINE_string('vsa_ec2_access_key', None,
+ 'EC2 access key used by VSA for accessing nova')
+flags.DEFINE_string('vsa_ec2_user_id', None,
+ 'User ID used by VSA for accessing nova')
+flags.DEFINE_boolean('vsa_multi_vol_creation', True,
+ 'Ask scheduler to create multiple volumes in one call')
+flags.DEFINE_string('vsa_volume_type_name', 'VSA volume type',
+ 'Name of volume type associated with FE VSA volumes')
+
+LOG = logging.getLogger('nova.vsa')
+
+
+class API(base.Base):
+ """API for interacting with the VSA manager."""
+
+ def __init__(self, compute_api=None, volume_api=None, **kwargs):
+ self.compute_api = compute_api or compute.API()
+ self.volume_api = volume_api or volume.API()
+ super(API, self).__init__(**kwargs)
+
+ def _check_volume_type_correctness(self, vol_type):
+ if vol_type.get('extra_specs') == None or\
+ vol_type['extra_specs'].get('type') != 'vsa_drive' or\
+ vol_type['extra_specs'].get('drive_type') == None or\
+ vol_type['extra_specs'].get('drive_size') == None:
+
+ raise exception.ApiError(_("Invalid drive type %s")
+ % vol_type['name'])
+
+ def _get_default_vsa_instance_type(self):
+ return instance_types.get_instance_type_by_name(
+ FLAGS.default_vsa_instance_type)
+
+ def _check_storage_parameters(self, context, vsa_name, storage,
+ shared, first_index=0):
+ """
+ Translates storage array of disks to the list of volumes
+ :param storage: List of dictionaries with following keys:
+ disk_name, num_disks, size
+ :param shared: Specifies if storage is dedicated or shared.
+ For shared storage disks split into partitions
+ """
+ volume_params = []
+ for node in storage:
+
+ name = node.get('drive_name', None)
+ num_disks = node.get('num_drives', 1)
+
+ if name is None:
+ raise exception.ApiError(_("No drive_name param found in %s")
+ % node)
+ try:
+ vol_type = volume_types.get_volume_type_by_name(context, name)
+ except exception.NotFound:
+ raise exception.ApiError(_("Invalid drive type name %s")
+ % name)
+
+ self._check_volume_type_correctness(vol_type)
+
+ # if size field present - override disk size specified in DB
+ size = int(node.get('size',
+ vol_type['extra_specs'].get('drive_size')))
+
+ if shared:
+ part_size = FLAGS.vsa_part_size_gb
+ total_capacity = num_disks * size
+ num_volumes = total_capacity / part_size
+ size = part_size
+ else:
+ num_volumes = num_disks
+ size = 0 # special handling for full drives
+
+ for i in range(num_volumes):
+ volume_name = "drive-%03d" % first_index
+ first_index += 1
+ volume_desc = 'BE volume for VSA %s type %s' % \
+ (vsa_name, name)
+ volume = {
+ 'size': size,
+ 'name': volume_name,
+ 'description': volume_desc,
+ 'volume_type_id': vol_type['id'],
+ }
+ volume_params.append(volume)
+
+ return volume_params
+
+ def create(self, context, display_name='', display_description='',
+ vc_count=1, instance_type=None, image_name=None,
+ availability_zone=None, storage=[], shared=None):
+ """
+ Provision VSA instance with corresponding compute instances
+ and associated volumes
+ :param storage: List of dictionaries with following keys:
+ disk_name, num_disks, size
+ :param shared: Specifies if storage is dedicated or shared.
+ For shared storage disks split into partitions
+ """
+
+ LOG.info(_("*** Experimental VSA code ***"))
+
+ if vc_count > FLAGS.max_vcs_in_vsa:
+ LOG.warning(_("Requested number of VCs (%d) is too high."\
+ " Setting to default"), vc_count)
+ vc_count = FLAGS.max_vcs_in_vsa
+
+ if instance_type is None:
+ instance_type = self._get_default_vsa_instance_type()
+
+ if availability_zone is None:
+ availability_zone = FLAGS.storage_availability_zone
+
+ if storage is None:
+ storage = []
+
+ if shared is None or shared == 'False' or shared == False:
+ shared = False
+ else:
+ shared = True
+
+ # check if image is ready before starting any work
+ if image_name is None:
+ image_name = FLAGS.vc_image_name
+ try:
+ image_service = self.compute_api.image_service
+ vc_image = image_service.show_by_name(context, image_name)
+ vc_image_href = vc_image['id']
+ except exception.ImageNotFound:
+ raise exception.ApiError(_("Failed to find configured image %s")
+ % image_name)
+
+ options = {
+ 'display_name': display_name,
+ 'display_description': display_description,
+ 'project_id': context.project_id,
+ 'availability_zone': availability_zone,
+ 'instance_type_id': instance_type['id'],
+ 'image_ref': vc_image_href,
+ 'vc_count': vc_count,
+ 'status': VsaState.CREATING,
+ }
+ LOG.info(_("Creating VSA: %s") % options)
+
+ # create DB entry for VSA instance
+ try:
+ vsa_ref = self.db.vsa_create(context, options)
+ except exception.Error:
+ raise exception.ApiError(_(sys.exc_info()[1]))
+ vsa_id = vsa_ref['id']
+ vsa_name = vsa_ref['name']
+
+ # check storage parameters
+ try:
+ volume_params = self._check_storage_parameters(context, vsa_name,
+ storage, shared)
+ except exception.ApiError:
+ self.db.vsa_destroy(context, vsa_id)
+ raise exception.ApiError(_("Error in storage parameters: %s")
+ % storage)
+
+ # after creating DB entry, re-check and set some defaults
+ updates = {}
+ if (not hasattr(vsa_ref, 'display_name') or
+ vsa_ref.display_name is None or
+ vsa_ref.display_name == ''):
+ updates['display_name'] = display_name = vsa_name
+ updates['vol_count'] = len(volume_params)
+ vsa_ref = self.update(context, vsa_id, **updates)
+
+ # create volumes
+ if FLAGS.vsa_multi_vol_creation:
+ if len(volume_params) > 0:
+ request_spec = {
+ 'num_volumes': len(volume_params),
+ 'vsa_id': str(vsa_id),
+ 'volumes': volume_params,
+ }
+
+ rpc.cast(context,
+ FLAGS.scheduler_topic,
+ {"method": "create_volumes",
+ "args": {"topic": FLAGS.volume_topic,
+ "request_spec": request_spec,
+ "availability_zone": availability_zone}})
+ else:
+ # create BE volumes one-by-one
+ for vol in volume_params:
+ try:
+ vol_name = vol['name']
+ vol_size = vol['size']
+ vol_type_id = vol['volume_type_id']
+ LOG.debug(_("VSA ID %(vsa_id)d %(vsa_name)s: Create "\
+ "volume %(vol_name)s, %(vol_size)d GB, "\
+ "type %(vol_type_id)s"), locals())
+
+ vol_type = volume_types.get_volume_type(context,
+ vol['volume_type_id'])
+
+ vol_ref = self.volume_api.create(context,
+ vol_size,
+ None,
+ vol_name,
+ vol['description'],
+ volume_type=vol_type,
+ metadata=dict(to_vsa_id=str(vsa_id)),
+ availability_zone=availability_zone)
+ except:
+ self.update_vsa_status(context, vsa_id,
+ status=VsaState.PARTIAL)
+ raise
+
+ if len(volume_params) == 0:
+ # No BE volumes - ask VSA manager to start VCs
+ rpc.cast(context,
+ FLAGS.vsa_topic,
+ {"method": "create_vsa",
+ "args": {"vsa_id": str(vsa_id)}})
+
+ return vsa_ref
+
+ def update_vsa_status(self, context, vsa_id, status):
+ updates = dict(status=status)
+ LOG.info(_("VSA ID %(vsa_id)d: Update VSA status to %(status)s"),
+ locals())
+ return self.update(context, vsa_id, **updates)
+
+ def update(self, context, vsa_id, **kwargs):
+ """Updates the VSA instance in the datastore.
+
+ :param context: The security context
+ :param vsa_id: ID of the VSA instance to update
+ :param kwargs: All additional keyword args are treated
+ as data fields of the instance to be
+ updated
+
+ :returns: None
+ """
+ LOG.info(_("VSA ID %(vsa_id)d: Update VSA call"), locals())
+
+ updatable_fields = ['status', 'vc_count', 'vol_count',
+ 'display_name', 'display_description']
+ changes = {}
+ for field in updatable_fields:
+ if field in kwargs:
+ changes[field] = kwargs[field]
+
+ vc_count = kwargs.get('vc_count', None)
+ if vc_count is not None:
+ # VP-TODO: This request may want to update number of VCs
+ # Get number of current VCs and add/delete VCs appropriately
+ vsa = self.get(context, vsa_id)
+ vc_count = int(vc_count)
+ if vc_count > FLAGS.max_vcs_in_vsa:
+ LOG.warning(_("Requested number of VCs (%d) is too high."\
+ " Setting to default"), vc_count)
+ vc_count = FLAGS.max_vcs_in_vsa
+
+ if vsa['vc_count'] != vc_count:
+ self.update_num_vcs(context, vsa, vc_count)
+ changes['vc_count'] = vc_count
+
+ return self.db.vsa_update(context, vsa_id, changes)
+
+ def update_num_vcs(self, context, vsa, vc_count):
+ vsa_name = vsa['name']
+ old_vc_count = int(vsa['vc_count'])
+ if vc_count > old_vc_count:
+ add_cnt = vc_count - old_vc_count
+ LOG.debug(_("Adding %(add_cnt)s VCs to VSA %(vsa_name)s."),
+ locals())
+ # VP-TODO: actual code for adding new VCs
+
+ elif vc_count < old_vc_count:
+ del_cnt = old_vc_count - vc_count
+ LOG.debug(_("Deleting %(del_cnt)s VCs from VSA %(vsa_name)s."),
+ locals())
+ # VP-TODO: actual code for deleting extra VCs
+
+ def _force_volume_delete(self, ctxt, volume):
+ """Delete a volume, bypassing the check that it must be available."""
+ host = volume['host']
+ if not host:
+ # Deleting volume from database and skipping rpc.
+ self.db.volume_destroy(ctxt, volume['id'])
+ return
+
+ rpc.cast(ctxt,
+ self.db.queue_get_for(ctxt, FLAGS.volume_topic, host),
+ {"method": "delete_volume",
+ "args": {"volume_id": volume['id']}})
+
+ def delete_vsa_volumes(self, context, vsa_id, direction,
+ force_delete=True):
+ if direction == "FE":
+ volumes = self.get_all_vsa_volumes(context, vsa_id)
+ else:
+ volumes = self.get_all_vsa_drives(context, vsa_id)
+
+ for volume in volumes:
+ try:
+ vol_name = volume['name']
+ LOG.info(_("VSA ID %(vsa_id)s: Deleting %(direction)s "\
+ "volume %(vol_name)s"), locals())
+ self.volume_api.delete(context, volume['id'])
+ except exception.ApiError:
+ LOG.info(_("Unable to delete volume %s"), volume['name'])
+ if force_delete:
+ LOG.info(_("VSA ID %(vsa_id)s: Forced delete. "\
+ "%(direction)s volume %(vol_name)s"), locals())
+ self._force_volume_delete(context, volume)
+
+ def delete(self, context, vsa_id):
+ """Terminate a VSA instance."""
+ LOG.info(_("Going to try to terminate VSA ID %s"), vsa_id)
+
+ # Delete all FrontEnd and BackEnd volumes
+ self.delete_vsa_volumes(context, vsa_id, "FE", force_delete=True)
+ self.delete_vsa_volumes(context, vsa_id, "BE", force_delete=True)
+
+ # Delete all VC instances
+ instances = self.compute_api.get_all(context,
+ search_opts={'metadata': dict(vsa_id=str(vsa_id))})
+ for instance in instances:
+ name = instance['name']
+ LOG.debug(_("VSA ID %(vsa_id)s: Delete instance %(name)s"),
+ locals())
+ self.compute_api.delete(context, instance['id'])
+
+ # Delete VSA instance
+ self.db.vsa_destroy(context, vsa_id)
+
+ def get(self, context, vsa_id):
+ rv = self.db.vsa_get(context, vsa_id)
+ return rv
+
+ def get_all(self, context):
+ if context.is_admin:
+ return self.db.vsa_get_all(context)
+ return self.db.vsa_get_all_by_project(context, context.project_id)
+
+ def get_vsa_volume_type(self, context):
+ name = FLAGS.vsa_volume_type_name
+ try:
+ vol_type = volume_types.get_volume_type_by_name(context, name)
+ except exception.NotFound:
+ volume_types.create(context, name,
+ extra_specs=dict(type='vsa_volume'))
+ vol_type = volume_types.get_volume_type_by_name(context, name)
+
+ return vol_type
+
+ def get_all_vsa_instances(self, context, vsa_id):
+ return self.compute_api.get_all(context,
+ search_opts={'metadata': dict(vsa_id=str(vsa_id))})
+
+ def get_all_vsa_volumes(self, context, vsa_id):
+ return self.volume_api.get_all(context,
+ search_opts={'metadata': dict(from_vsa_id=str(vsa_id))})
+
+ def get_all_vsa_drives(self, context, vsa_id):
+ return self.volume_api.get_all(context,
+ search_opts={'metadata': dict(to_vsa_id=str(vsa_id))})
diff --git a/nova/vsa/connection.py b/nova/vsa/connection.py
new file mode 100644
index 000000000..8ac8a1dd5
--- /dev/null
+++ b/nova/vsa/connection.py
@@ -0,0 +1,25 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright (c) 2011 Zadara Storage Inc.
+# Copyright (c) 2011 OpenStack LLC.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""Abstraction of the underlying connection to VC."""
+
+from nova.vsa import fake
+
+
+def get_connection():
+ # Return an object that is able to talk to VCs
+ return fake.FakeVcConnection()
diff --git a/nova/vsa/fake.py b/nova/vsa/fake.py
new file mode 100644
index 000000000..d4248ca01
--- /dev/null
+++ b/nova/vsa/fake.py
@@ -0,0 +1,22 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright (c) 2011 Zadara Storage Inc.
+# Copyright (c) 2011 OpenStack LLC.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+
+class FakeVcConnection(object):
+
+ def init_host(self, host):
+ pass
diff --git a/nova/vsa/manager.py b/nova/vsa/manager.py
new file mode 100644
index 000000000..d4c414106
--- /dev/null
+++ b/nova/vsa/manager.py
@@ -0,0 +1,179 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright (c) 2011 Zadara Storage Inc.
+# Copyright (c) 2011 OpenStack LLC.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""
+Handles all processes relating to Virtual Storage Arrays (VSA).
+
+**Related Flags**
+
+"""
+
+from nova import compute
+from nova import exception
+from nova import flags
+from nova import log as logging
+from nova import manager
+from nova import volume
+from nova import vsa
+from nova import utils
+from nova.compute import instance_types
+from nova.vsa import utils as vsa_utils
+from nova.vsa.api import VsaState
+
+FLAGS = flags.FLAGS
+flags.DEFINE_string('vsa_driver', 'nova.vsa.connection.get_connection',
+ 'Driver to use for controlling VSAs')
+
+LOG = logging.getLogger('nova.vsa.manager')
+
+
+class VsaManager(manager.SchedulerDependentManager):
+ """Manages Virtual Storage Arrays (VSAs)."""
+
+ def __init__(self, vsa_driver=None, *args, **kwargs):
+ if not vsa_driver:
+ vsa_driver = FLAGS.vsa_driver
+ self.driver = utils.import_object(vsa_driver)
+ self.compute_manager = utils.import_object(FLAGS.compute_manager)
+
+ self.compute_api = compute.API()
+ self.volume_api = volume.API()
+ self.vsa_api = vsa.API()
+
+ if FLAGS.vsa_ec2_user_id is None or \
+ FLAGS.vsa_ec2_access_key is None:
+ raise exception.VSANovaAccessParamNotFound()
+
+ super(VsaManager, self).__init__(*args, **kwargs)
+
+ def init_host(self):
+ self.driver.init_host(host=self.host)
+ super(VsaManager, self).init_host()
+
+ @exception.wrap_exception()
+ def create_vsa(self, context, vsa_id):
+ """Called by API if there were no BE volumes assigned"""
+ LOG.debug(_("Create call received for VSA %s"), vsa_id)
+
+ vsa_id = int(vsa_id) # just in case
+
+ try:
+ vsa = self.vsa_api.get(context, vsa_id)
+ except Exception as ex:
+ msg = _("Failed to find VSA %(vsa_id)d") % locals()
+ LOG.exception(msg)
+ return
+
+ return self._start_vcs(context, vsa)
+
+ @exception.wrap_exception()
+ def vsa_volume_created(self, context, vol_id, vsa_id, status):
+ """Callback for volume creations"""
+ LOG.debug(_("VSA ID %(vsa_id)s: Drive %(vol_id)s created. "\
+ "Status %(status)s"), locals())
+ vsa_id = int(vsa_id) # just in case
+
+ # Get all volumes for this VSA
+ # check if any of them still in creating phase
+ drives = self.vsa_api.get_all_vsa_drives(context, vsa_id)
+ for drive in drives:
+ if drive['status'] == 'creating':
+ vol_name = drive['name']
+ vol_disp_name = drive['display_name']
+ LOG.debug(_("Drive %(vol_name)s (%(vol_disp_name)s) still "\
+ "in creating phase - wait"), locals())
+ return
+
+ try:
+ vsa = self.vsa_api.get(context, vsa_id)
+ except Exception as ex:
+ msg = _("Failed to find VSA %(vsa_id)d") % locals()
+ LOG.exception(msg)
+ return
+
+ if len(drives) != vsa['vol_count']:
+ cvol_real = len(drives)
+ cvol_exp = vsa['vol_count']
+ LOG.debug(_("VSA ID %(vsa_id)d: Not all volumes are created "\
+ "(%(cvol_real)d of %(cvol_exp)d)"), locals())
+ return
+
+ # all volumes created (successfully or not)
+ return self._start_vcs(context, vsa, drives)
+
+ def _start_vcs(self, context, vsa, drives=[]):
+ """Start VCs for VSA """
+
+ vsa_id = vsa['id']
+ if vsa['status'] == VsaState.CREATING:
+ self.vsa_api.update_vsa_status(context, vsa_id,
+ VsaState.LAUNCHING)
+ else:
+ return
+
+ # in _separate_ loop go over all volumes and mark as "attached"
+ has_failed_volumes = False
+ for drive in drives:
+ vol_name = drive['name']
+ vol_disp_name = drive['display_name']
+ status = drive['status']
+ LOG.info(_("VSA ID %(vsa_id)d: Drive %(vol_name)s "\
+ "(%(vol_disp_name)s) is in %(status)s state"),
+ locals())
+ if status == 'available':
+ try:
+ # self.volume_api.update(context, volume['id'],
+ # dict(attach_status="attached"))
+ pass
+ except Exception as ex:
+ msg = _("Failed to update attach status for volume "
+ "%(vol_name)s. %(ex)s") % locals()
+ LOG.exception(msg)
+ else:
+ has_failed_volumes = True
+
+ if has_failed_volumes:
+ LOG.info(_("VSA ID %(vsa_id)d: Delete all BE volumes"), locals())
+ self.vsa_api.delete_vsa_volumes(context, vsa_id, "BE", True)
+ self.vsa_api.update_vsa_status(context, vsa_id,
+ VsaState.FAILED)
+ return
+
+ # create user-data record for VC
+ storage_data = vsa_utils.generate_user_data(vsa, drives)
+
+ instance_type = instance_types.get_instance_type(
+ vsa['instance_type_id'])
+
+ # now start the VC instance
+
+ vc_count = vsa['vc_count']
+ LOG.info(_("VSA ID %(vsa_id)d: Start %(vc_count)d instances"),
+ locals())
+ vc_instances = self.compute_api.create(context,
+ instance_type, # vsa['vsa_instance_type'],
+ vsa['image_ref'],
+ min_count=1,
+ max_count=vc_count,
+ display_name='vc-' + vsa['display_name'],
+ display_description='VC for VSA ' + vsa['display_name'],
+ availability_zone=vsa['availability_zone'],
+ user_data=storage_data,
+ metadata=dict(vsa_id=str(vsa_id)))
+
+ self.vsa_api.update_vsa_status(context, vsa_id,
+ VsaState.CREATED)
diff --git a/nova/vsa/utils.py b/nova/vsa/utils.py
new file mode 100644
index 000000000..1de341ac5
--- /dev/null
+++ b/nova/vsa/utils.py
@@ -0,0 +1,80 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright (c) 2011 Zadara Storage Inc.
+# Copyright (c) 2011 OpenStack LLC.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import base64
+from xml.etree import ElementTree
+
+from nova import flags
+
+FLAGS = flags.FLAGS
+
+
+def generate_user_data(vsa, volumes):
+ SubElement = ElementTree.SubElement
+
+ e_vsa = ElementTree.Element("vsa")
+
+ e_vsa_detail = SubElement(e_vsa, "id")
+ e_vsa_detail.text = str(vsa['id'])
+ e_vsa_detail = SubElement(e_vsa, "name")
+ e_vsa_detail.text = vsa['display_name']
+ e_vsa_detail = SubElement(e_vsa, "description")
+ e_vsa_detail.text = vsa['display_description']
+ e_vsa_detail = SubElement(e_vsa, "vc_count")
+ e_vsa_detail.text = str(vsa['vc_count'])
+
+ e_vsa_detail = SubElement(e_vsa, "auth_user")
+ e_vsa_detail.text = FLAGS.vsa_ec2_user_id
+ e_vsa_detail = SubElement(e_vsa, "auth_access_key")
+ e_vsa_detail.text = FLAGS.vsa_ec2_access_key
+
+ e_volumes = SubElement(e_vsa, "volumes")
+ for volume in volumes:
+
+ loc = volume['provider_location']
+ if loc is None:
+ ip = ''
+ iscsi_iqn = ''
+ iscsi_portal = ''
+ else:
+ (iscsi_target, _sep, iscsi_iqn) = loc.partition(" ")
+ (ip, iscsi_portal) = iscsi_target.split(":", 1)
+
+ e_vol = SubElement(e_volumes, "volume")
+ e_vol_detail = SubElement(e_vol, "id")
+ e_vol_detail.text = str(volume['id'])
+ e_vol_detail = SubElement(e_vol, "name")
+ e_vol_detail.text = volume['name']
+ e_vol_detail = SubElement(e_vol, "display_name")
+ e_vol_detail.text = volume['display_name']
+ e_vol_detail = SubElement(e_vol, "size_gb")
+ e_vol_detail.text = str(volume['size'])
+ e_vol_detail = SubElement(e_vol, "status")
+ e_vol_detail.text = volume['status']
+ e_vol_detail = SubElement(e_vol, "ip")
+ e_vol_detail.text = ip
+ e_vol_detail = SubElement(e_vol, "iscsi_iqn")
+ e_vol_detail.text = iscsi_iqn
+ e_vol_detail = SubElement(e_vol, "iscsi_portal")
+ e_vol_detail.text = iscsi_portal
+ e_vol_detail = SubElement(e_vol, "lun")
+ e_vol_detail.text = '0'
+ e_vol_detail = SubElement(e_vol, "sn_host")
+ e_vol_detail.text = volume['host']
+
+ _xml = ElementTree.tostring(e_vsa)
+ return base64.b64encode(_xml)