summaryrefslogtreecommitdiffstats
path: root/nova/tests
diff options
context:
space:
mode:
authorNaveed Massjouni <naveedm9@gmail.com>2011-09-02 01:39:31 -0400
committerNaveed Massjouni <naveedm9@gmail.com>2011-09-02 01:39:31 -0400
commitc5ad2b155a2ad7d7aefea316362cc354d0cf4cf3 (patch)
treeb54c26c6445ddf9d051e9afaa59f5aa2774f2fc8 /nova/tests
parent4f72f6c0fb88baaa680e5dd7973a2b1aa9bd6aaf (diff)
parentd80dc5bbbd1781bd33d9f69b608014e9cc2e41a3 (diff)
Merge from trunk
Diffstat (limited to 'nova/tests')
-rw-r--r--nova/tests/api/ec2/__init__.py19
-rw-r--r--nova/tests/api/ec2/test_middleware.py (renamed from nova/tests/test_middleware.py)45
-rw-r--r--nova/tests/api/openstack/contrib/test_createserverext.py44
-rw-r--r--nova/tests/api/openstack/contrib/test_floating_ips.py139
-rw-r--r--nova/tests/api/openstack/contrib/test_security_groups.py72
-rw-r--r--nova/tests/api/openstack/contrib/test_simple_tenant_usage.py172
-rw-r--r--nova/tests/api/openstack/contrib/test_vsa.py450
-rw-r--r--nova/tests/api/openstack/fakes.py11
-rw-r--r--nova/tests/api/openstack/test_extensions.py2
-rw-r--r--nova/tests/api/openstack/test_server_actions.py47
-rw-r--r--nova/tests/api/openstack/test_servers.py179
-rw-r--r--nova/tests/image/test_glance.py53
-rw-r--r--nova/tests/integrated/test_servers.py37
-rw-r--r--nova/tests/scheduler/test_scheduler.py13
-rw-r--r--nova/tests/scheduler/test_vsa_scheduler.py641
-rw-r--r--nova/tests/test_adminapi.py2
-rw-r--r--nova/tests/test_cloud.py31
-rw-r--r--nova/tests/test_compute.py38
-rw-r--r--nova/tests/test_context.py33
-rw-r--r--nova/tests/test_db_api.py4
-rw-r--r--nova/tests/test_ipv6.py2
-rw-r--r--nova/tests/test_libvirt.py10
-rw-r--r--nova/tests/test_network.py16
-rw-r--r--nova/tests/test_rpc.py162
-rw-r--r--nova/tests/test_rpc_amqp.py88
-rw-r--r--nova/tests/test_rpc_carrot.py45
-rw-r--r--nova/tests/test_rpc_common.py189
-rw-r--r--nova/tests/test_rpc_kombu.py110
-rw-r--r--nova/tests/test_test.py5
-rw-r--r--nova/tests/test_vsa.py182
-rw-r--r--nova/tests/test_vsa_volumes.py136
-rw-r--r--nova/tests/test_xenapi.py37
-rw-r--r--nova/tests/vmwareapi/db_fakes.py5
-rw-r--r--nova/tests/xenapi/stubs.py4
34 files changed, 2556 insertions, 467 deletions
diff --git a/nova/tests/api/ec2/__init__.py b/nova/tests/api/ec2/__init__.py
new file mode 100644
index 000000000..6dab802f2
--- /dev/null
+++ b/nova/tests/api/ec2/__init__.py
@@ -0,0 +1,19 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright 2011 Openstack LLC.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+# NOTE(vish): this forces the fixtures from tests/__init.py:setup() to work
+from nova.tests import *
diff --git a/nova/tests/test_middleware.py b/nova/tests/api/ec2/test_middleware.py
index 40d117c45..295f6c4ea 100644
--- a/nova/tests/test_middleware.py
+++ b/nova/tests/api/ec2/test_middleware.py
@@ -21,10 +21,13 @@ import webob.dec
import webob.exc
from nova.api import ec2
+from nova import context
+from nova import exception
from nova import flags
from nova import test
from nova import utils
+from xml.etree.ElementTree import fromstring as xml_to_tree
FLAGS = flags.FLAGS
@@ -83,3 +86,45 @@ class LockoutTestCase(test.TestCase):
utils.advance_time_seconds(FLAGS.lockout_window * 60)
self._send_bad_attempts('test', FLAGS.lockout_attempts - 1)
self.assertFalse(self._is_locked_out('test'))
+
+
+class ExecutorTestCase(test.TestCase):
+ def setUp(self):
+ super(ExecutorTestCase, self).setUp()
+ self.executor = ec2.Executor()
+
+ def _execute(self, invoke):
+ class Fake(object):
+ pass
+ fake_ec2_request = Fake()
+ fake_ec2_request.invoke = invoke
+
+ fake_wsgi_request = Fake()
+
+ fake_wsgi_request.environ = {
+ 'nova.context': context.get_admin_context(),
+ 'ec2.request': fake_ec2_request,
+ }
+ return self.executor(fake_wsgi_request)
+
+ def _extract_message(self, result):
+ tree = xml_to_tree(result.body)
+ return tree.findall('./Errors')[0].find('Error/Message').text
+
+ def test_instance_not_found(self):
+ def not_found(context):
+ raise exception.InstanceNotFound(instance_id=5)
+ result = self._execute(not_found)
+ self.assertIn('i-00000005', self._extract_message(result))
+
+ def test_snapshot_not_found(self):
+ def not_found(context):
+ raise exception.SnapshotNotFound(snapshot_id=5)
+ result = self._execute(not_found)
+ self.assertIn('snap-00000005', self._extract_message(result))
+
+ def test_volume_not_found(self):
+ def not_found(context):
+ raise exception.VolumeNotFound(volume_id=5)
+ result = self._execute(not_found)
+ self.assertIn('vol-00000005', self._extract_message(result))
diff --git a/nova/tests/api/openstack/contrib/test_createserverext.py b/nova/tests/api/openstack/contrib/test_createserverext.py
index e5eed14fe..089c8e59d 100644
--- a/nova/tests/api/openstack/contrib/test_createserverext.py
+++ b/nova/tests/api/openstack/contrib/test_createserverext.py
@@ -23,6 +23,7 @@ from xml.dom import minidom
import stubout
import webob
+from nova import db
from nova import exception
from nova import flags
from nova import test
@@ -76,6 +77,8 @@ class CreateserverextTest(test.TestCase):
def __init__(self):
self.injected_files = None
self.networks = None
+ self.user_data = None
+ self.db = db
def create(self, *args, **kwargs):
if 'injected_files' in kwargs:
@@ -87,6 +90,10 @@ class CreateserverextTest(test.TestCase):
self.networks = kwargs['requested_networks']
else:
self.networks = None
+
+ if 'user_data' in kwargs:
+ self.user_data = kwargs['user_data']
+
return [{'id': '1234', 'display_name': 'fakeinstance',
'uuid': FAKE_UUID,
'created_at': "",
@@ -119,6 +126,14 @@ class CreateserverextTest(test.TestCase):
server['networks'] = network_list
return {'server': server}
+ def _create_user_data_request_dict(self, user_data):
+ server = {}
+ server['name'] = 'new-server-test'
+ server['imageRef'] = 1
+ server['flavorRef'] = 1
+ server['user_data'] = user_data
+ return {'server': server}
+
def _get_create_request_json(self, body_dict):
req = webob.Request.blank('/v1.1/123/os-create-server-ext')
req.headers['Content-Type'] = 'application/json'
@@ -178,6 +193,13 @@ class CreateserverextTest(test.TestCase):
self._run_create_instance_with_mock_compute_api(request)
return request, response, compute_api.networks
+ def _create_instance_with_user_data_json(self, networks):
+ body_dict = self._create_user_data_request_dict(networks)
+ request = self._get_create_request_json(body_dict)
+ compute_api, response = \
+ self._run_create_instance_with_mock_compute_api(request)
+ return request, response, compute_api.user_data
+
def _create_instance_with_networks_xml(self, networks):
body_dict = self._create_networks_request_dict(networks)
request = self._get_create_request_xml(body_dict)
@@ -304,3 +326,25 @@ class CreateserverextTest(test.TestCase):
self.assertEquals(response.status_int, 202)
self.assertEquals(compute_api.networks,
[('aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa', None)])
+
+ def test_create_instance_with_userdata(self):
+ user_data_contents = '#!/bin/bash\necho "Oh no!"\n'
+ user_data_contents = base64.b64encode(user_data_contents)
+ request, response, user_data = \
+ self._create_instance_with_user_data_json(user_data_contents)
+ self.assertEquals(response.status_int, 202)
+ self.assertEquals(user_data, user_data_contents)
+
+ def test_create_instance_with_userdata_none(self):
+ user_data_contents = None
+ request, response, user_data = \
+ self._create_instance_with_user_data_json(user_data_contents)
+ self.assertEquals(response.status_int, 202)
+ self.assertEquals(user_data, user_data_contents)
+
+ def test_create_instance_with_userdata_with_non_b64_content(self):
+ user_data_contents = '#!/bin/bash\necho "Oh no!"\n'
+ request, response, user_data = \
+ self._create_instance_with_user_data_json(user_data_contents)
+ self.assertEquals(response.status_int, 400)
+ self.assertEquals(user_data, None)
diff --git a/nova/tests/api/openstack/contrib/test_floating_ips.py b/nova/tests/api/openstack/contrib/test_floating_ips.py
index 568faf867..642f2b841 100644
--- a/nova/tests/api/openstack/contrib/test_floating_ips.py
+++ b/nova/tests/api/openstack/contrib/test_floating_ips.py
@@ -20,9 +20,11 @@ import webob
from nova import compute
from nova import context
from nova import db
-from nova import test
from nova import network
+from nova import rpc
+from nova import test
from nova.tests.api.openstack import fakes
+from nova.tests.api.openstack import test_servers
from nova.api.openstack.contrib.floating_ips import FloatingIPController
@@ -36,14 +38,13 @@ def network_api_get_floating_ip(self, context, id):
def network_api_get_floating_ip_by_ip(self, context, address):
return {'id': 1, 'address': '10.10.10.10',
- 'fixed_ip': {'address': '11.0.0.1'}}
+ 'fixed_ip': {'address': '10.0.0.1', 'instance_id': 1}},
def network_api_list_floating_ips(self, context):
return [{'id': 1,
'address': '10.10.10.10',
- 'instance': {'id': 11},
- 'fixed_ip': {'address': '10.0.0.1'}},
+ 'fixed_ip': {'address': '10.0.0.1', 'instance_id': 1}},
{'id': 2,
'address': '10.10.10.11'}]
@@ -60,10 +61,38 @@ def compute_api_associate(self, context, instance_id, floating_ip):
pass
+def network_api_associate(self, context, floating_ip, fixed_ip):
+ pass
+
+
def network_api_disassociate(self, context, floating_address):
pass
+def network_get_instance_nw_info(self, context, instance):
+ info = {
+ 'label': 'fake',
+ 'gateway': 'fake',
+ 'dhcp_server': 'fake',
+ 'broadcast': 'fake',
+ 'mac': 'fake',
+ 'vif_uuid': 'fake',
+ 'rxtx_cap': 'fake',
+ 'dns': [],
+ 'ips': [{'ip': '10.0.0.1'}],
+ 'should_create_bridge': False,
+ 'should_create_vlan': False}
+
+ return [['ignore', info]]
+
+
+def fake_instance_get(context, instance_id):
+ return {
+ "id": 1,
+ "user_id": 'fakeuser',
+ "project_id": '123'}
+
+
class FloatingIpTest(test.TestCase):
address = "10.10.10.10"
@@ -79,23 +108,21 @@ class FloatingIpTest(test.TestCase):
def setUp(self):
super(FloatingIpTest, self).setUp()
- self.controller = FloatingIPController()
- fakes.stub_out_networking(self.stubs)
- fakes.stub_out_rate_limiting(self.stubs)
self.stubs.Set(network.api.API, "get_floating_ip",
network_api_get_floating_ip)
self.stubs.Set(network.api.API, "get_floating_ip_by_ip",
network_api_get_floating_ip)
self.stubs.Set(network.api.API, "list_floating_ips",
network_api_list_floating_ips)
- self.stubs.Set(network.api.API, "allocate_floating_ip",
- network_api_allocate)
self.stubs.Set(network.api.API, "release_floating_ip",
network_api_release)
- self.stubs.Set(compute.api.API, "associate_floating_ip",
- compute_api_associate)
self.stubs.Set(network.api.API, "disassociate_floating_ip",
network_api_disassociate)
+ self.stubs.Set(network.api.API, "get_instance_nw_info",
+ network_get_instance_nw_info)
+ self.stubs.Set(db.api, 'instance_get',
+ fake_instance_get)
+
self.context = context.get_admin_context()
self._create_floating_ip()
@@ -124,7 +151,7 @@ class FloatingIpTest(test.TestCase):
res = req.get_response(fakes.wsgi_app())
self.assertEqual(res.status_int, 200)
res_dict = json.loads(res.body)
- response = {'floating_ips': [{'instance_id': 11,
+ response = {'floating_ips': [{'instance_id': 1,
'ip': '10.10.10.10',
'fixed_ip': '10.0.0.1',
'id': 1},
@@ -143,7 +170,34 @@ class FloatingIpTest(test.TestCase):
self.assertEqual(res_dict['floating_ip']['ip'], '10.10.10.10')
self.assertEqual(res_dict['floating_ip']['instance_id'], None)
+ def test_show_associated_floating_ip(self):
+ def get_floating_ip(self, context, id):
+ return {'id': 1, 'address': '10.10.10.10',
+ 'fixed_ip': {'address': '10.0.0.1', 'instance_id': 1}}
+ self.stubs.Set(network.api.API, "get_floating_ip", get_floating_ip)
+
+ req = webob.Request.blank('/v1.1/123/os-floating-ips/1')
+ res = req.get_response(fakes.wsgi_app())
+ self.assertEqual(res.status_int, 200)
+ res_dict = json.loads(res.body)
+ self.assertEqual(res_dict['floating_ip']['id'], 1)
+ self.assertEqual(res_dict['floating_ip']['ip'], '10.10.10.10')
+ self.assertEqual(res_dict['floating_ip']['instance_id'], 1)
+
+ def test_floating_ip_allocate_no_free_ips(self):
+ def fake_call(*args, **kwargs):
+ raise(rpc.RemoteError('NoMoreFloatingIps', '', ''))
+
+ self.stubs.Set(rpc, "call", fake_call)
+ req = webob.Request.blank('/v1.1/123/os-floating-ips')
+ req.method = 'POST'
+ req.headers['Content-Type'] = 'application/json'
+ res = req.get_response(fakes.wsgi_app())
+ self.assertEqual(res.status_int, 400)
+
def test_floating_ip_allocate(self):
+ self.stubs.Set(network.api.API, "allocate_floating_ip",
+ network_api_allocate)
req = webob.Request.blank('/v1.1/123/os-floating-ips')
req.method = 'POST'
req.headers['Content-Type'] = 'application/json'
@@ -165,6 +219,8 @@ class FloatingIpTest(test.TestCase):
self.assertEqual(res.status_int, 202)
def test_add_floating_ip_to_instance(self):
+ self.stubs.Set(network.api.API, "associate_floating_ip",
+ network_api_associate)
body = dict(addFloatingIp=dict(address='11.0.0.1'))
req = webob.Request.blank('/v1.1/123/servers/test_inst/action')
req.method = "POST"
@@ -174,6 +230,65 @@ class FloatingIpTest(test.TestCase):
resp = req.get_response(fakes.wsgi_app())
self.assertEqual(resp.status_int, 202)
+ def test_associate_floating_ip_to_instance_wrong_project_id(self):
+ def fake_fixed_ip_get_by_address(ctx, address, session=None):
+ return {'address': address, 'network': {'multi_host': None,
+ 'host': 'fake'}}
+ self.stubs.Set(db.api, "fixed_ip_get_by_address",
+ fake_fixed_ip_get_by_address)
+ db.floating_ip_update(self.context, self.address, {'project_id': 'bad',
+ 'fixed_ip_id': 1})
+ body = dict(addFloatingIp=dict(address=self.address))
+ req = webob.Request.blank('/v1.1/123/servers/test_inst/action')
+ req.method = "POST"
+ req.body = json.dumps(body)
+ req.headers["content-type"] = "application/json"
+ resp = req.get_response(fakes.wsgi_app())
+ self.assertEqual(resp.status_int, 400)
+
+ def test_associate_floating_ip_to_instance_no_project_id(self):
+ def fake_fixed_ip_get_by_address(ctx, address, session=None):
+ return {'address': address, 'network': {'multi_host': None,
+ 'host': 'fake'}}
+ self.stubs.Set(db.api, "fixed_ip_get_by_address",
+ fake_fixed_ip_get_by_address)
+ db.floating_ip_update(self.context, self.address, {'project_id': None,
+ 'fixed_ip_id': 1})
+ body = dict(addFloatingIp=dict(address=self.address))
+ req = webob.Request.blank('/v1.1/123/servers/test_inst/action')
+ req.method = "POST"
+ req.body = json.dumps(body)
+ req.headers["content-type"] = "application/json"
+ resp = req.get_response(fakes.wsgi_app())
+ self.assertEqual(resp.status_int, 400)
+
+ def test_add_associated_floating_ip_to_instance(self):
+ def fake_fixed_ip_get_by_address(ctx, address, session=None):
+ return {'address': address, 'network': {'multi_host': None,
+ 'host': 'fake'}}
+
+ self.disassociated = False
+
+ def fake_network_api_disassociate(local_self, ctx, floating_address):
+ self.disassociated = True
+
+ db.floating_ip_update(self.context, self.address, {'project_id': '123',
+ 'fixed_ip_id': 1})
+ self.stubs.Set(network.api.API, "disassociate_floating_ip",
+ fake_network_api_disassociate)
+ self.stubs.Set(db.api, "fixed_ip_get_by_address",
+ fake_fixed_ip_get_by_address)
+
+ body = dict(addFloatingIp=dict(address=self.address))
+ req = webob.Request.blank('/v1.1/123/servers/test_inst/action')
+ req.method = "POST"
+ req.body = json.dumps(body)
+ req.headers["content-type"] = "application/json"
+
+ resp = req.get_response(fakes.wsgi_app())
+ self.assertEqual(resp.status_int, 202)
+ self.assertTrue(self.disassociated)
+
def test_remove_floating_ip_from_instance(self):
body = dict(removeFloatingIp=dict(address='11.0.0.1'))
req = webob.Request.blank('/v1.1/123/servers/test_inst/action')
diff --git a/nova/tests/api/openstack/contrib/test_security_groups.py b/nova/tests/api/openstack/contrib/test_security_groups.py
index bc1536911..0816a6312 100644
--- a/nova/tests/api/openstack/contrib/test_security_groups.py
+++ b/nova/tests/api/openstack/contrib/test_security_groups.py
@@ -360,7 +360,7 @@ class TestSecurityGroups(test.TestCase):
def test_associate_by_invalid_server_id(self):
body = dict(addSecurityGroup=dict(name='test'))
- self.stubs.Set(nova.db, 'security_group_get_by_name',
+ self.stubs.Set(nova.db.api, 'security_group_get_by_name',
return_security_group)
req = webob.Request.blank('/v1.1/123/servers/invalid/action')
req.headers['Content-Type'] = 'application/json'
@@ -372,7 +372,7 @@ class TestSecurityGroups(test.TestCase):
def test_associate_without_body(self):
req = webob.Request.blank('/v1.1/123/servers/1/action')
body = dict(addSecurityGroup=None)
- self.stubs.Set(nova.db, 'instance_get', return_server)
+ self.stubs.Set(nova.db.api, 'instance_get', return_server)
req.headers['Content-Type'] = 'application/json'
req.method = 'POST'
req.body = json.dumps(body)
@@ -382,7 +382,7 @@ class TestSecurityGroups(test.TestCase):
def test_associate_no_security_group_name(self):
req = webob.Request.blank('/v1.1/123/servers/1/action')
body = dict(addSecurityGroup=dict())
- self.stubs.Set(nova.db, 'instance_get', return_server)
+ self.stubs.Set(nova.db.api, 'instance_get', return_server)
req.headers['Content-Type'] = 'application/json'
req.method = 'POST'
req.body = json.dumps(body)
@@ -392,7 +392,7 @@ class TestSecurityGroups(test.TestCase):
def test_associate_security_group_name_with_whitespaces(self):
req = webob.Request.blank('/v1.1/123/servers/1/action')
body = dict(addSecurityGroup=dict(name=" "))
- self.stubs.Set(nova.db, 'instance_get', return_server)
+ self.stubs.Set(nova.db.api, 'instance_get', return_server)
req.headers['Content-Type'] = 'application/json'
req.method = 'POST'
req.body = json.dumps(body)
@@ -400,9 +400,9 @@ class TestSecurityGroups(test.TestCase):
self.assertEquals(response.status_int, 400)
def test_associate_non_existing_instance(self):
- self.stubs.Set(nova.db, 'instance_get', return_server_nonexistant)
+ self.stubs.Set(nova.db.api, 'instance_get', return_server_nonexistant)
body = dict(addSecurityGroup=dict(name="test"))
- self.stubs.Set(nova.db, 'security_group_get_by_name',
+ self.stubs.Set(nova.db.api, 'security_group_get_by_name',
return_security_group)
req = webob.Request.blank('/v1.1/123/servers/10000/action')
req.headers['Content-Type'] = 'application/json'
@@ -412,8 +412,8 @@ class TestSecurityGroups(test.TestCase):
self.assertEquals(response.status_int, 404)
def test_associate_non_running_instance(self):
- self.stubs.Set(nova.db, 'instance_get', return_non_running_server)
- self.stubs.Set(nova.db, 'security_group_get_by_name',
+ self.stubs.Set(nova.db.api, 'instance_get', return_non_running_server)
+ self.stubs.Set(nova.db.api, 'security_group_get_by_name',
return_security_group_without_instances)
body = dict(addSecurityGroup=dict(name="test"))
req = webob.Request.blank('/v1.1/123/servers/1/action')
@@ -424,8 +424,8 @@ class TestSecurityGroups(test.TestCase):
self.assertEquals(response.status_int, 400)
def test_associate_already_associated_security_group_to_instance(self):
- self.stubs.Set(nova.db, 'instance_get', return_server)
- self.stubs.Set(nova.db, 'security_group_get_by_name',
+ self.stubs.Set(nova.db.api, 'instance_get', return_server)
+ self.stubs.Set(nova.db.api, 'security_group_get_by_name',
return_security_group)
body = dict(addSecurityGroup=dict(name="test"))
req = webob.Request.blank('/v1.1/123/servers/1/action')
@@ -436,12 +436,12 @@ class TestSecurityGroups(test.TestCase):
self.assertEquals(response.status_int, 400)
def test_associate(self):
- self.stubs.Set(nova.db, 'instance_get', return_server)
- self.mox.StubOutWithMock(nova.db, 'instance_add_security_group')
- nova.db.instance_add_security_group(mox.IgnoreArg(),
+ self.stubs.Set(nova.db.api, 'instance_get', return_server)
+ self.mox.StubOutWithMock(nova.db.api, 'instance_add_security_group')
+ nova.db.api.instance_add_security_group(mox.IgnoreArg(),
mox.IgnoreArg(),
mox.IgnoreArg())
- self.stubs.Set(nova.db, 'security_group_get_by_name',
+ self.stubs.Set(nova.db.api, 'security_group_get_by_name',
return_security_group_without_instances)
self.mox.ReplayAll()
@@ -454,12 +454,12 @@ class TestSecurityGroups(test.TestCase):
self.assertEquals(response.status_int, 202)
def test_associate_xml(self):
- self.stubs.Set(nova.db, 'instance_get', return_server)
- self.mox.StubOutWithMock(nova.db, 'instance_add_security_group')
- nova.db.instance_add_security_group(mox.IgnoreArg(),
+ self.stubs.Set(nova.db.api, 'instance_get', return_server)
+ self.mox.StubOutWithMock(nova.db.api, 'instance_add_security_group')
+ nova.db.api.instance_add_security_group(mox.IgnoreArg(),
mox.IgnoreArg(),
mox.IgnoreArg())
- self.stubs.Set(nova.db, 'security_group_get_by_name',
+ self.stubs.Set(nova.db.api, 'security_group_get_by_name',
return_security_group_without_instances)
self.mox.ReplayAll()
@@ -483,7 +483,7 @@ class TestSecurityGroups(test.TestCase):
def test_disassociate_by_invalid_server_id(self):
body = dict(removeSecurityGroup=dict(name='test'))
- self.stubs.Set(nova.db, 'security_group_get_by_name',
+ self.stubs.Set(nova.db.api, 'security_group_get_by_name',
return_security_group)
req = webob.Request.blank('/v1.1/123/servers/invalid/action')
req.headers['Content-Type'] = 'application/json'
@@ -495,7 +495,7 @@ class TestSecurityGroups(test.TestCase):
def test_disassociate_without_body(self):
req = webob.Request.blank('/v1.1/123/servers/1/action')
body = dict(removeSecurityGroup=None)
- self.stubs.Set(nova.db, 'instance_get', return_server)
+ self.stubs.Set(nova.db.api, 'instance_get', return_server)
req.headers['Content-Type'] = 'application/json'
req.method = 'POST'
req.body = json.dumps(body)
@@ -505,7 +505,7 @@ class TestSecurityGroups(test.TestCase):
def test_disassociate_no_security_group_name(self):
req = webob.Request.blank('/v1.1/123/servers/1/action')
body = dict(removeSecurityGroup=dict())
- self.stubs.Set(nova.db, 'instance_get', return_server)
+ self.stubs.Set(nova.db.api, 'instance_get', return_server)
req.headers['Content-Type'] = 'application/json'
req.method = 'POST'
req.body = json.dumps(body)
@@ -515,7 +515,7 @@ class TestSecurityGroups(test.TestCase):
def test_disassociate_security_group_name_with_whitespaces(self):
req = webob.Request.blank('/v1.1/123/servers/1/action')
body = dict(removeSecurityGroup=dict(name=" "))
- self.stubs.Set(nova.db, 'instance_get', return_server)
+ self.stubs.Set(nova.db.api, 'instance_get', return_server)
req.headers['Content-Type'] = 'application/json'
req.method = 'POST'
req.body = json.dumps(body)
@@ -523,9 +523,9 @@ class TestSecurityGroups(test.TestCase):
self.assertEquals(response.status_int, 400)
def test_disassociate_non_existing_instance(self):
- self.stubs.Set(nova.db, 'instance_get', return_server_nonexistant)
+ self.stubs.Set(nova.db.api, 'instance_get', return_server_nonexistant)
body = dict(removeSecurityGroup=dict(name="test"))
- self.stubs.Set(nova.db, 'security_group_get_by_name',
+ self.stubs.Set(nova.db.api, 'security_group_get_by_name',
return_security_group)
req = webob.Request.blank('/v1.1/123/servers/10000/action')
req.headers['Content-Type'] = 'application/json'
@@ -535,8 +535,8 @@ class TestSecurityGroups(test.TestCase):
self.assertEquals(response.status_int, 404)
def test_disassociate_non_running_instance(self):
- self.stubs.Set(nova.db, 'instance_get', return_non_running_server)
- self.stubs.Set(nova.db, 'security_group_get_by_name',
+ self.stubs.Set(nova.db.api, 'instance_get', return_non_running_server)
+ self.stubs.Set(nova.db.api, 'security_group_get_by_name',
return_security_group)
body = dict(removeSecurityGroup=dict(name="test"))
req = webob.Request.blank('/v1.1/123/servers/1/action')
@@ -547,8 +547,8 @@ class TestSecurityGroups(test.TestCase):
self.assertEquals(response.status_int, 400)
def test_disassociate_already_associated_security_group_to_instance(self):
- self.stubs.Set(nova.db, 'instance_get', return_server)
- self.stubs.Set(nova.db, 'security_group_get_by_name',
+ self.stubs.Set(nova.db.api, 'instance_get', return_server)
+ self.stubs.Set(nova.db.api, 'security_group_get_by_name',
return_security_group_without_instances)
body = dict(removeSecurityGroup=dict(name="test"))
req = webob.Request.blank('/v1.1/123/servers/1/action')
@@ -559,12 +559,12 @@ class TestSecurityGroups(test.TestCase):
self.assertEquals(response.status_int, 400)
def test_disassociate(self):
- self.stubs.Set(nova.db, 'instance_get', return_server)
- self.mox.StubOutWithMock(nova.db, 'instance_remove_security_group')
- nova.db.instance_remove_security_group(mox.IgnoreArg(),
+ self.stubs.Set(nova.db.api, 'instance_get', return_server)
+ self.mox.StubOutWithMock(nova.db.api, 'instance_remove_security_group')
+ nova.db.api.instance_remove_security_group(mox.IgnoreArg(),
mox.IgnoreArg(),
mox.IgnoreArg())
- self.stubs.Set(nova.db, 'security_group_get_by_name',
+ self.stubs.Set(nova.db.api, 'security_group_get_by_name',
return_security_group)
self.mox.ReplayAll()
@@ -577,12 +577,12 @@ class TestSecurityGroups(test.TestCase):
self.assertEquals(response.status_int, 202)
def test_disassociate_xml(self):
- self.stubs.Set(nova.db, 'instance_get', return_server)
- self.mox.StubOutWithMock(nova.db, 'instance_remove_security_group')
- nova.db.instance_remove_security_group(mox.IgnoreArg(),
+ self.stubs.Set(nova.db.api, 'instance_get', return_server)
+ self.mox.StubOutWithMock(nova.db.api, 'instance_remove_security_group')
+ nova.db.api.instance_remove_security_group(mox.IgnoreArg(),
mox.IgnoreArg(),
mox.IgnoreArg())
- self.stubs.Set(nova.db, 'security_group_get_by_name',
+ self.stubs.Set(nova.db.api, 'security_group_get_by_name',
return_security_group)
self.mox.ReplayAll()
diff --git a/nova/tests/api/openstack/contrib/test_simple_tenant_usage.py b/nova/tests/api/openstack/contrib/test_simple_tenant_usage.py
new file mode 100644
index 000000000..2430b9d51
--- /dev/null
+++ b/nova/tests/api/openstack/contrib/test_simple_tenant_usage.py
@@ -0,0 +1,172 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright 2011 OpenStack LLC.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import datetime
+import json
+import webob
+
+from nova import context
+from nova import flags
+from nova import test
+from nova.compute import api
+from nova.tests.api.openstack import fakes
+
+
+FLAGS = flags.FLAGS
+
+SERVERS = 5
+TENANTS = 2
+HOURS = 24
+LOCAL_GB = 10
+MEMORY_MB = 1024
+VCPUS = 2
+STOP = datetime.datetime.utcnow()
+START = STOP - datetime.timedelta(hours=HOURS)
+
+
+def fake_instance_type_get(self, context, instance_type_id):
+ return {'id': 1,
+ 'vcpus': VCPUS,
+ 'local_gb': LOCAL_GB,
+ 'memory_mb': MEMORY_MB,
+ 'name':
+ 'fakeflavor'}
+
+
+def get_fake_db_instance(start, end, instance_id, tenant_id):
+ return {'id': instance_id,
+ 'image_ref': '1',
+ 'project_id': tenant_id,
+ 'user_id': 'fakeuser',
+ 'display_name': 'name',
+ 'state_description': 'state',
+ 'instance_type_id': 1,
+ 'launched_at': start,
+ 'terminated_at': end}
+
+
+def fake_instance_get_active_by_window(self, context, begin, end, project_id):
+ return [get_fake_db_instance(START,
+ STOP,
+ x,
+ "faketenant_%s" % (x / SERVERS))
+ for x in xrange(TENANTS * SERVERS)]
+
+
+class SimpleTenantUsageTest(test.TestCase):
+ def setUp(self):
+ super(SimpleTenantUsageTest, self).setUp()
+ self.stubs.Set(api.API, "get_instance_type",
+ fake_instance_type_get)
+ self.stubs.Set(api.API, "get_active_by_window",
+ fake_instance_get_active_by_window)
+ self.admin_context = context.RequestContext('fakeadmin_0',
+ 'faketenant_0',
+ is_admin=True)
+ self.user_context = context.RequestContext('fakeadmin_0',
+ 'faketenant_0',
+ is_admin=False)
+ self.alt_user_context = context.RequestContext('fakeadmin_0',
+ 'faketenant_1',
+ is_admin=False)
+ FLAGS.allow_admin_api = True
+
+ def test_verify_index(self):
+ req = webob.Request.blank(
+ '/v1.1/123/os-simple-tenant-usage?start=%s&end=%s' %
+ (START.isoformat(), STOP.isoformat()))
+ req.method = "GET"
+ req.headers["content-type"] = "application/json"
+
+ res = req.get_response(fakes.wsgi_app(
+ fake_auth_context=self.admin_context))
+
+ self.assertEqual(res.status_int, 200)
+ res_dict = json.loads(res.body)
+ usages = res_dict['tenant_usages']
+ from nova import log as logging
+ logging.warn(usages)
+ for i in xrange(TENANTS):
+ self.assertEqual(int(usages[i]['total_hours']),
+ SERVERS * HOURS)
+ self.assertEqual(int(usages[i]['total_local_gb_usage']),
+ SERVERS * LOCAL_GB * HOURS)
+ self.assertEqual(int(usages[i]['total_memory_mb_usage']),
+ SERVERS * MEMORY_MB * HOURS)
+ self.assertEqual(int(usages[i]['total_vcpus_usage']),
+ SERVERS * VCPUS * HOURS)
+ self.assertFalse(usages[i].get('server_usages'))
+
+ def test_verify_detailed_index(self):
+ req = webob.Request.blank(
+ '/v1.1/123/os-simple-tenant-usage?'
+ 'detailed=1&start=%s&end=%s' %
+ (START.isoformat(), STOP.isoformat()))
+ req.method = "GET"
+ req.headers["content-type"] = "application/json"
+
+ res = req.get_response(fakes.wsgi_app(
+ fake_auth_context=self.admin_context))
+ self.assertEqual(res.status_int, 200)
+ res_dict = json.loads(res.body)
+ usages = res_dict['tenant_usages']
+ for i in xrange(TENANTS):
+ servers = usages[i]['server_usages']
+ for j in xrange(SERVERS):
+ self.assertEqual(int(servers[j]['hours']), HOURS)
+
+ def test_verify_index_fails_for_nonadmin(self):
+ req = webob.Request.blank(
+ '/v1.1/123/os-simple-tenant-usage?'
+ 'detailed=1&start=%s&end=%s' %
+ (START.isoformat(), STOP.isoformat()))
+ req.method = "GET"
+ req.headers["content-type"] = "application/json"
+
+ res = req.get_response(fakes.wsgi_app())
+ self.assertEqual(res.status_int, 403)
+
+ def test_verify_show(self):
+ req = webob.Request.blank(
+ '/v1.1/faketenant_0/os-simple-tenant-usage/'
+ 'faketenant_0?start=%s&end=%s' %
+ (START.isoformat(), STOP.isoformat()))
+ req.method = "GET"
+ req.headers["content-type"] = "application/json"
+
+ res = req.get_response(fakes.wsgi_app(
+ fake_auth_context=self.user_context))
+ self.assertEqual(res.status_int, 200)
+ res_dict = json.loads(res.body)
+
+ usage = res_dict['tenant_usage']
+ servers = usage['server_usages']
+ self.assertEqual(len(usage['server_usages']), SERVERS)
+ for j in xrange(SERVERS):
+ self.assertEqual(int(servers[j]['hours']), HOURS)
+
+ def test_verify_show_cant_view_other_tenant(self):
+ req = webob.Request.blank(
+ '/v1.1/faketenant_1/os-simple-tenant-usage/'
+ 'faketenant_0?start=%s&end=%s' %
+ (START.isoformat(), STOP.isoformat()))
+ req.method = "GET"
+ req.headers["content-type"] = "application/json"
+
+ res = req.get_response(fakes.wsgi_app(
+ fake_auth_context=self.alt_user_context))
+ self.assertEqual(res.status_int, 403)
diff --git a/nova/tests/api/openstack/contrib/test_vsa.py b/nova/tests/api/openstack/contrib/test_vsa.py
new file mode 100644
index 000000000..311b6cb8d
--- /dev/null
+++ b/nova/tests/api/openstack/contrib/test_vsa.py
@@ -0,0 +1,450 @@
+# Copyright 2011 OpenStack LLC.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import json
+import stubout
+import unittest
+import webob
+
+from nova import context
+from nova import db
+from nova import exception
+from nova import flags
+from nova import log as logging
+from nova import test
+from nova import volume
+from nova import vsa
+from nova.api import openstack
+from nova.tests.api.openstack import fakes
+import nova.wsgi
+
+from nova.api.openstack.contrib.virtual_storage_arrays import _vsa_view
+
+FLAGS = flags.FLAGS
+
+LOG = logging.getLogger('nova.tests.api.openstack.vsa')
+
+last_param = {}
+
+
+def _get_default_vsa_param():
+ return {
+ 'display_name': 'Test_VSA_name',
+ 'display_description': 'Test_VSA_description',
+ 'vc_count': 1,
+ 'instance_type': 'm1.small',
+ 'instance_type_id': 5,
+ 'image_name': None,
+ 'availability_zone': None,
+ 'storage': [],
+ 'shared': False
+ }
+
+
+def stub_vsa_create(self, context, **param):
+ global last_param
+ LOG.debug(_("_create: param=%s"), param)
+ param['id'] = 123
+ param['name'] = 'Test name'
+ param['instance_type_id'] = 5
+ last_param = param
+ return param
+
+
+def stub_vsa_delete(self, context, vsa_id):
+ global last_param
+ last_param = dict(vsa_id=vsa_id)
+
+ LOG.debug(_("_delete: %s"), locals())
+ if vsa_id != '123':
+ raise exception.NotFound
+
+
+def stub_vsa_get(self, context, vsa_id):
+ global last_param
+ last_param = dict(vsa_id=vsa_id)
+
+ LOG.debug(_("_get: %s"), locals())
+ if vsa_id != '123':
+ raise exception.NotFound
+
+ param = _get_default_vsa_param()
+ param['id'] = vsa_id
+ return param
+
+
+def stub_vsa_get_all(self, context):
+ LOG.debug(_("_get_all: %s"), locals())
+ param = _get_default_vsa_param()
+ param['id'] = 123
+ return [param]
+
+
+class VSAApiTest(test.TestCase):
+ def setUp(self):
+ super(VSAApiTest, self).setUp()
+ self.stubs = stubout.StubOutForTesting()
+ fakes.FakeAuthManager.reset_fake_data()
+ fakes.FakeAuthDatabase.data = {}
+ fakes.stub_out_networking(self.stubs)
+ fakes.stub_out_rate_limiting(self.stubs)
+ fakes.stub_out_auth(self.stubs)
+ self.stubs.Set(vsa.api.API, "create", stub_vsa_create)
+ self.stubs.Set(vsa.api.API, "delete", stub_vsa_delete)
+ self.stubs.Set(vsa.api.API, "get", stub_vsa_get)
+ self.stubs.Set(vsa.api.API, "get_all", stub_vsa_get_all)
+
+ self.context = context.get_admin_context()
+
+ def tearDown(self):
+ self.stubs.UnsetAll()
+ super(VSAApiTest, self).tearDown()
+
+ def test_vsa_create(self):
+ global last_param
+ last_param = {}
+
+ vsa = {"displayName": "VSA Test Name",
+ "displayDescription": "VSA Test Desc"}
+ body = dict(vsa=vsa)
+ req = webob.Request.blank('/v1.1/777/zadr-vsa')
+ req.method = 'POST'
+ req.body = json.dumps(body)
+ req.headers['content-type'] = 'application/json'
+
+ resp = req.get_response(fakes.wsgi_app())
+ self.assertEqual(resp.status_int, 200)
+
+ # Compare if parameters were correctly passed to stub
+ self.assertEqual(last_param['display_name'], "VSA Test Name")
+ self.assertEqual(last_param['display_description'], "VSA Test Desc")
+
+ resp_dict = json.loads(resp.body)
+ self.assertTrue('vsa' in resp_dict)
+ self.assertEqual(resp_dict['vsa']['displayName'], vsa['displayName'])
+ self.assertEqual(resp_dict['vsa']['displayDescription'],
+ vsa['displayDescription'])
+
+ def test_vsa_create_no_body(self):
+ req = webob.Request.blank('/v1.1/777/zadr-vsa')
+ req.method = 'POST'
+ req.body = json.dumps({})
+ req.headers['content-type'] = 'application/json'
+
+ resp = req.get_response(fakes.wsgi_app())
+ self.assertEqual(resp.status_int, 422)
+
+ def test_vsa_delete(self):
+ global last_param
+ last_param = {}
+
+ vsa_id = 123
+ req = webob.Request.blank('/v1.1/777/zadr-vsa/%d' % vsa_id)
+ req.method = 'DELETE'
+
+ resp = req.get_response(fakes.wsgi_app())
+ self.assertEqual(resp.status_int, 200)
+ self.assertEqual(str(last_param['vsa_id']), str(vsa_id))
+
+ def test_vsa_delete_invalid_id(self):
+ global last_param
+ last_param = {}
+
+ vsa_id = 234
+ req = webob.Request.blank('/v1.1/777/zadr-vsa/%d' % vsa_id)
+ req.method = 'DELETE'
+
+ resp = req.get_response(fakes.wsgi_app())
+ self.assertEqual(resp.status_int, 404)
+ self.assertEqual(str(last_param['vsa_id']), str(vsa_id))
+
+ def test_vsa_show(self):
+ global last_param
+ last_param = {}
+
+ vsa_id = 123
+ req = webob.Request.blank('/v1.1/777/zadr-vsa/%d' % vsa_id)
+ req.method = 'GET'
+ resp = req.get_response(fakes.wsgi_app())
+ self.assertEqual(resp.status_int, 200)
+ self.assertEqual(str(last_param['vsa_id']), str(vsa_id))
+
+ resp_dict = json.loads(resp.body)
+ self.assertTrue('vsa' in resp_dict)
+ self.assertEqual(resp_dict['vsa']['id'], str(vsa_id))
+
+ def test_vsa_show_invalid_id(self):
+ global last_param
+ last_param = {}
+
+ vsa_id = 234
+ req = webob.Request.blank('/v1.1/777/zadr-vsa/%d' % vsa_id)
+ req.method = 'GET'
+ resp = req.get_response(fakes.wsgi_app())
+ self.assertEqual(resp.status_int, 404)
+ self.assertEqual(str(last_param['vsa_id']), str(vsa_id))
+
+ def test_vsa_index(self):
+ req = webob.Request.blank('/v1.1/777/zadr-vsa')
+ req.method = 'GET'
+ resp = req.get_response(fakes.wsgi_app())
+ self.assertEqual(resp.status_int, 200)
+
+ resp_dict = json.loads(resp.body)
+
+ self.assertTrue('vsaSet' in resp_dict)
+ resp_vsas = resp_dict['vsaSet']
+ self.assertEqual(len(resp_vsas), 1)
+
+ resp_vsa = resp_vsas.pop()
+ self.assertEqual(resp_vsa['id'], 123)
+
+ def test_vsa_detail(self):
+ req = webob.Request.blank('/v1.1/777/zadr-vsa/detail')
+ req.method = 'GET'
+ resp = req.get_response(fakes.wsgi_app())
+ self.assertEqual(resp.status_int, 200)
+
+ resp_dict = json.loads(resp.body)
+
+ self.assertTrue('vsaSet' in resp_dict)
+ resp_vsas = resp_dict['vsaSet']
+ self.assertEqual(len(resp_vsas), 1)
+
+ resp_vsa = resp_vsas.pop()
+ self.assertEqual(resp_vsa['id'], 123)
+
+
+def _get_default_volume_param():
+ return {
+ 'id': 123,
+ 'status': 'available',
+ 'size': 100,
+ 'availability_zone': 'nova',
+ 'created_at': None,
+ 'attach_status': 'detached',
+ 'name': 'vol name',
+ 'display_name': 'Default vol name',
+ 'display_description': 'Default vol description',
+ 'volume_type_id': 1,
+ 'volume_metadata': [],
+ }
+
+
+def stub_get_vsa_volume_type(self, context):
+ return {'id': 1,
+ 'name': 'VSA volume type',
+ 'extra_specs': {'type': 'vsa_volume'}}
+
+
+def stub_volume_create(self, context, size, snapshot_id, name, description,
+ **param):
+ LOG.debug(_("_create: param=%s"), size)
+ vol = _get_default_volume_param()
+ vol['size'] = size
+ vol['display_name'] = name
+ vol['display_description'] = description
+ return vol
+
+
+def stub_volume_update(self, context, **param):
+ LOG.debug(_("_volume_update: param=%s"), param)
+ pass
+
+
+def stub_volume_delete(self, context, **param):
+ LOG.debug(_("_volume_delete: param=%s"), param)
+ pass
+
+
+def stub_volume_get(self, context, volume_id):
+ LOG.debug(_("_volume_get: volume_id=%s"), volume_id)
+ vol = _get_default_volume_param()
+ vol['id'] = volume_id
+ meta = {'key': 'from_vsa_id', 'value': '123'}
+ if volume_id == '345':
+ meta = {'key': 'to_vsa_id', 'value': '123'}
+ vol['volume_metadata'].append(meta)
+ return vol
+
+
+def stub_volume_get_notfound(self, context, volume_id):
+ raise exception.NotFound
+
+
+def stub_volume_get_all(self, context, search_opts):
+ vol = stub_volume_get(self, context, '123')
+ vol['metadata'] = search_opts['metadata']
+ return [vol]
+
+
+def return_vsa(context, vsa_id):
+ return {'id': vsa_id}
+
+
+class VSAVolumeApiTest(test.TestCase):
+
+ def setUp(self, test_obj=None, test_objs=None):
+ super(VSAVolumeApiTest, self).setUp()
+ self.stubs = stubout.StubOutForTesting()
+ fakes.FakeAuthManager.reset_fake_data()
+ fakes.FakeAuthDatabase.data = {}
+ fakes.stub_out_networking(self.stubs)
+ fakes.stub_out_rate_limiting(self.stubs)
+ fakes.stub_out_auth(self.stubs)
+ self.stubs.Set(nova.db.api, 'vsa_get', return_vsa)
+ self.stubs.Set(vsa.api.API, "get_vsa_volume_type",
+ stub_get_vsa_volume_type)
+
+ self.stubs.Set(volume.api.API, "update", stub_volume_update)
+ self.stubs.Set(volume.api.API, "delete", stub_volume_delete)
+ self.stubs.Set(volume.api.API, "get", stub_volume_get)
+ self.stubs.Set(volume.api.API, "get_all", stub_volume_get_all)
+
+ self.context = context.get_admin_context()
+ self.test_obj = test_obj if test_obj else "volume"
+ self.test_objs = test_objs if test_objs else "volumes"
+
+ def tearDown(self):
+ self.stubs.UnsetAll()
+ super(VSAVolumeApiTest, self).tearDown()
+
+ def test_vsa_volume_create(self):
+ self.stubs.Set(volume.api.API, "create", stub_volume_create)
+
+ vol = {"size": 100,
+ "displayName": "VSA Volume Test Name",
+ "displayDescription": "VSA Volume Test Desc"}
+ body = {self.test_obj: vol}
+ req = webob.Request.blank('/v1.1/777/zadr-vsa/123/%s' % self.test_objs)
+ req.method = 'POST'
+ req.body = json.dumps(body)
+ req.headers['content-type'] = 'application/json'
+ resp = req.get_response(fakes.wsgi_app())
+
+ if self.test_obj == "volume":
+ self.assertEqual(resp.status_int, 200)
+
+ resp_dict = json.loads(resp.body)
+ self.assertTrue(self.test_obj in resp_dict)
+ self.assertEqual(resp_dict[self.test_obj]['size'],
+ vol['size'])
+ self.assertEqual(resp_dict[self.test_obj]['displayName'],
+ vol['displayName'])
+ self.assertEqual(resp_dict[self.test_obj]['displayDescription'],
+ vol['displayDescription'])
+ else:
+ self.assertEqual(resp.status_int, 400)
+
+ def test_vsa_volume_create_no_body(self):
+ req = webob.Request.blank('/v1.1/777/zadr-vsa/123/%s' % self.test_objs)
+ req.method = 'POST'
+ req.body = json.dumps({})
+ req.headers['content-type'] = 'application/json'
+
+ resp = req.get_response(fakes.wsgi_app())
+ if self.test_obj == "volume":
+ self.assertEqual(resp.status_int, 422)
+ else:
+ self.assertEqual(resp.status_int, 400)
+
+ def test_vsa_volume_index(self):
+ req = webob.Request.blank('/v1.1/777/zadr-vsa/123/%s' % self.test_objs)
+ resp = req.get_response(fakes.wsgi_app())
+ self.assertEqual(resp.status_int, 200)
+
+ def test_vsa_volume_detail(self):
+ req = webob.Request.blank('/v1.1/777/zadr-vsa/123/%s/detail' % \
+ self.test_objs)
+ resp = req.get_response(fakes.wsgi_app())
+ self.assertEqual(resp.status_int, 200)
+
+ def test_vsa_volume_show(self):
+ obj_num = 234 if self.test_objs == "volumes" else 345
+ req = webob.Request.blank('/v1.1/777/zadr-vsa/123/%s/%s' % \
+ (self.test_objs, obj_num))
+ resp = req.get_response(fakes.wsgi_app())
+ self.assertEqual(resp.status_int, 200)
+
+ def test_vsa_volume_show_no_vsa_assignment(self):
+ req = webob.Request.blank('/v1.1/777/zadr-vsa/4/%s/333' % \
+ (self.test_objs))
+ resp = req.get_response(fakes.wsgi_app())
+ self.assertEqual(resp.status_int, 400)
+
+ def test_vsa_volume_show_no_volume(self):
+ self.stubs.Set(volume.api.API, "get", stub_volume_get_notfound)
+
+ req = webob.Request.blank('/v1.1/777/zadr-vsa/123/%s/333' % \
+ (self.test_objs))
+ resp = req.get_response(fakes.wsgi_app())
+ self.assertEqual(resp.status_int, 404)
+
+ def test_vsa_volume_update(self):
+ obj_num = 234 if self.test_objs == "volumes" else 345
+ update = {"status": "available",
+ "displayName": "Test Display name"}
+ body = {self.test_obj: update}
+ req = webob.Request.blank('/v1.1/777/zadr-vsa/123/%s/%s' % \
+ (self.test_objs, obj_num))
+ req.method = 'PUT'
+ req.body = json.dumps(body)
+ req.headers['content-type'] = 'application/json'
+
+ resp = req.get_response(fakes.wsgi_app())
+ if self.test_obj == "volume":
+ self.assertEqual(resp.status_int, 202)
+ else:
+ self.assertEqual(resp.status_int, 400)
+
+ def test_vsa_volume_delete(self):
+ obj_num = 234 if self.test_objs == "volumes" else 345
+ req = webob.Request.blank('/v1.1/777/zadr-vsa/123/%s/%s' % \
+ (self.test_objs, obj_num))
+ req.method = 'DELETE'
+ resp = req.get_response(fakes.wsgi_app())
+ if self.test_obj == "volume":
+ self.assertEqual(resp.status_int, 202)
+ else:
+ self.assertEqual(resp.status_int, 400)
+
+ def test_vsa_volume_delete_no_vsa_assignment(self):
+ req = webob.Request.blank('/v1.1/777/zadr-vsa/4/%s/333' % \
+ (self.test_objs))
+ req.method = 'DELETE'
+ resp = req.get_response(fakes.wsgi_app())
+ self.assertEqual(resp.status_int, 400)
+
+ def test_vsa_volume_delete_no_volume(self):
+ self.stubs.Set(volume.api.API, "get", stub_volume_get_notfound)
+
+ req = webob.Request.blank('/v1.1/777/zadr-vsa/123/%s/333' % \
+ (self.test_objs))
+ req.method = 'DELETE'
+ resp = req.get_response(fakes.wsgi_app())
+ if self.test_obj == "volume":
+ self.assertEqual(resp.status_int, 404)
+ else:
+ self.assertEqual(resp.status_int, 400)
+
+
+class VSADriveApiTest(VSAVolumeApiTest):
+ def setUp(self):
+ super(VSADriveApiTest, self).setUp(test_obj="drive",
+ test_objs="drives")
+
+ def tearDown(self):
+ self.stubs.UnsetAll()
+ super(VSADriveApiTest, self).tearDown()
diff --git a/nova/tests/api/openstack/fakes.py b/nova/tests/api/openstack/fakes.py
index a095dd90a..44681d395 100644
--- a/nova/tests/api/openstack/fakes.py
+++ b/nova/tests/api/openstack/fakes.py
@@ -107,13 +107,20 @@ def stub_out_key_pair_funcs(stubs, have_key_pair=True):
def key_pair(context, user_id):
return [dict(name='key', public_key='public_key')]
+ def one_key_pair(context, user_id, name):
+ if name == 'key':
+ return dict(name='key', public_key='public_key')
+ else:
+ raise exc.KeypairNotFound(user_id=user_id, name=name)
+
def no_key_pair(context, user_id):
return []
if have_key_pair:
- stubs.Set(nova.db, 'key_pair_get_all_by_user', key_pair)
+ stubs.Set(nova.db.api, 'key_pair_get_all_by_user', key_pair)
+ stubs.Set(nova.db.api, 'key_pair_get', one_key_pair)
else:
- stubs.Set(nova.db, 'key_pair_get_all_by_user', no_key_pair)
+ stubs.Set(nova.db.api, 'key_pair_get_all_by_user', no_key_pair)
def stub_out_image_service(stubs):
diff --git a/nova/tests/api/openstack/test_extensions.py b/nova/tests/api/openstack/test_extensions.py
index c78588d65..31443242b 100644
--- a/nova/tests/api/openstack/test_extensions.py
+++ b/nova/tests/api/openstack/test_extensions.py
@@ -95,6 +95,8 @@ class ExtensionControllerTest(test.TestCase):
"Quotas",
"Rescue",
"SecurityGroups",
+ "SimpleTenantUsage",
+ "VSAs",
"VirtualInterfaces",
"Volumes",
"VolumeTypes",
diff --git a/nova/tests/api/openstack/test_server_actions.py b/nova/tests/api/openstack/test_server_actions.py
index 3dfdeb79c..b9ef41465 100644
--- a/nova/tests/api/openstack/test_server_actions.py
+++ b/nova/tests/api/openstack/test_server_actions.py
@@ -10,8 +10,8 @@ from nova import utils
from nova import exception
from nova import flags
from nova.api.openstack import create_instance_helper
+from nova.compute import vm_states
from nova.compute import instance_types
-from nova.compute import power_state
import nova.db.api
from nova import test
from nova.tests.api.openstack import common
@@ -35,17 +35,19 @@ def return_server_with_attributes(**kwargs):
return _return_server
-def return_server_with_power_state(power_state):
- return return_server_with_attributes(power_state=power_state)
+def return_server_with_state(vm_state, task_state=None):
+ return return_server_with_attributes(vm_state=vm_state,
+ task_state=task_state)
-def return_server_with_uuid_and_power_state(power_state):
- return return_server_with_power_state(power_state)
-
+def return_server_with_uuid_and_state(vm_state, task_state=None):
+ def _return_server(context, id):
+ return return_server_with_state(vm_state, task_state)
+ return _return_server
-def stub_instance(id, power_state=0, metadata=None,
- image_ref="10", flavor_id="1", name=None):
+def stub_instance(id, metadata=None, image_ref="10", flavor_id="1",
+ name=None, vm_state=None, task_state=None):
if metadata is not None:
metadata_items = [{'key':k, 'value':v} for k, v in metadata.items()]
else:
@@ -66,8 +68,8 @@ def stub_instance(id, power_state=0, metadata=None,
"launch_index": 0,
"key_name": "",
"key_data": "",
- "state": power_state,
- "state_description": "",
+ "vm_state": vm_state or vm_states.ACTIVE,
+ "task_state": task_state,
"memory_mb": 0,
"vcpus": 0,
"local_gb": 0,
@@ -175,11 +177,11 @@ class ServerActionsTest(test.TestCase):
},
}
- state = power_state.BUILDING
- new_return_server = return_server_with_power_state(state)
+ state = vm_states.BUILDING
+ new_return_server = return_server_with_state(state)
self.stubs.Set(nova.db.api, 'instance_get', new_return_server)
self.stubs.Set(nova.db, 'instance_get_by_uuid',
- return_server_with_uuid_and_power_state(state))
+ return_server_with_uuid_and_state(state))
req = webob.Request.blank('/v1.0/servers/1/action')
req.method = 'POST'
@@ -242,19 +244,6 @@ class ServerActionsTest(test.TestCase):
res = req.get_response(fakes.wsgi_app())
self.assertEqual(res.status_int, 500)
- def test_resized_server_has_correct_status(self):
- req = self.webreq('/1', 'GET')
-
- def fake_migration_get(*args):
- return {}
-
- self.stubs.Set(nova.db, 'migration_get_by_instance_and_status',
- fake_migration_get)
- res = req.get_response(fakes.wsgi_app())
- self.assertEqual(res.status_int, 200)
- body = json.loads(res.body)
- self.assertEqual(body['server']['status'], 'RESIZE-CONFIRM')
-
def test_confirm_resize_server(self):
req = self.webreq('/1/action', 'POST', dict(confirmResize=None))
@@ -642,11 +631,11 @@ class ServerActionsTestV11(test.TestCase):
},
}
- state = power_state.BUILDING
- new_return_server = return_server_with_power_state(state)
+ state = vm_states.BUILDING
+ new_return_server = return_server_with_state(state)
self.stubs.Set(nova.db.api, 'instance_get', new_return_server)
self.stubs.Set(nova.db, 'instance_get_by_uuid',
- return_server_with_uuid_and_power_state(state))
+ return_server_with_uuid_and_state(state))
req = webob.Request.blank('/v1.1/fake/servers/1/action')
req.method = 'POST'
diff --git a/nova/tests/api/openstack/test_servers.py b/nova/tests/api/openstack/test_servers.py
index 87ae19f56..00640c094 100644
--- a/nova/tests/api/openstack/test_servers.py
+++ b/nova/tests/api/openstack/test_servers.py
@@ -37,7 +37,8 @@ from nova.api.openstack import wsgi
from nova.api.openstack import xmlutil
import nova.compute.api
from nova.compute import instance_types
-from nova.compute import power_state
+from nova.compute import task_states
+from nova.compute import vm_states
import nova.db.api
import nova.scheduler.api
from nova.db.sqlalchemy.models import Instance
@@ -91,15 +92,18 @@ def return_server_with_addresses(private, public):
return _return_server
-def return_server_with_power_state(power_state):
+def return_server_with_state(vm_state, task_state=None):
def _return_server(context, id):
- return stub_instance(id, power_state=power_state)
+ return stub_instance(id, vm_state=vm_state, task_state=task_state)
return _return_server
-def return_server_with_uuid_and_power_state(power_state):
+def return_server_with_uuid_and_state(vm_state, task_state):
def _return_server(context, id):
- return stub_instance(id, uuid=FAKE_UUID, power_state=power_state)
+ return stub_instance(id,
+ uuid=FAKE_UUID,
+ vm_state=vm_state,
+ task_state=task_state)
return _return_server
@@ -148,9 +152,10 @@ def instance_addresses(context, instance_id):
def stub_instance(id, user_id='fake', project_id='fake', private_address=None,
- public_addresses=None, host=None, power_state=0,
+ public_addresses=None, host=None,
+ vm_state=None, task_state=None,
reservation_id="", uuid=FAKE_UUID, image_ref="10",
- flavor_id="1", interfaces=None, name=None,
+ flavor_id="1", interfaces=None, name=None, key_name='',
access_ipv4=None, access_ipv6=None):
metadata = []
metadata.append(InstanceMetadata(key='seq', value=id))
@@ -166,6 +171,11 @@ def stub_instance(id, user_id='fake', project_id='fake', private_address=None,
if host is not None:
host = str(host)
+ if key_name:
+ key_data = 'FAKE'
+ else:
+ key_data = ''
+
# ReservationID isn't sent back, hack it in there.
server_name = name or "server%s" % id
if reservation_id != "":
@@ -182,10 +192,10 @@ def stub_instance(id, user_id='fake', project_id='fake', private_address=None,
"kernel_id": "",
"ramdisk_id": "",
"launch_index": 0,
- "key_name": "",
- "key_data": "",
- "state": power_state,
- "state_description": "",
+ "key_name": key_name,
+ "key_data": key_data,
+ "vm_state": vm_state or vm_states.BUILDING,
+ "task_state": task_state,
"memory_mb": 0,
"vcpus": 0,
"local_gb": 0,
@@ -345,6 +355,7 @@ class ServersTest(test.TestCase):
"accessIPv4": "",
"accessIPv6": "",
"hostId": '',
+ "key_name": '',
"image": {
"id": "10",
"links": [
@@ -456,7 +467,7 @@ class ServersTest(test.TestCase):
},
]
new_return_server = return_server_with_attributes(
- interfaces=interfaces, power_state=1)
+ interfaces=interfaces, vm_state=vm_states.ACTIVE)
self.stubs.Set(nova.db.api, 'instance_get', new_return_server)
req = webob.Request.blank('/v1.1/fake/servers/1')
@@ -474,6 +485,7 @@ class ServersTest(test.TestCase):
"accessIPv4": "",
"accessIPv6": "",
"hostId": '',
+ "key_name": '',
"image": {
"id": "10",
"links": [
@@ -549,8 +561,8 @@ class ServersTest(test.TestCase):
},
]
new_return_server = return_server_with_attributes(
- interfaces=interfaces, power_state=1, image_ref=image_ref,
- flavor_id=flavor_id)
+ interfaces=interfaces, vm_state=vm_states.ACTIVE,
+ image_ref=image_ref, flavor_id=flavor_id)
self.stubs.Set(nova.db.api, 'instance_get', new_return_server)
req = webob.Request.blank('/v1.1/fake/servers/1')
@@ -568,6 +580,7 @@ class ServersTest(test.TestCase):
"accessIPv4": "",
"accessIPv6": "",
"hostId": '',
+ "key_name": '',
"image": {
"id": "10",
"links": [
@@ -1171,9 +1184,8 @@ class ServersTest(test.TestCase):
def test_get_servers_allows_status_v1_1(self):
def fake_get_all(compute_self, context, search_opts=None):
self.assertNotEqual(search_opts, None)
- self.assertTrue('state' in search_opts)
- self.assertEqual(set(search_opts['state']),
- set([power_state.RUNNING, power_state.BLOCKED]))
+ self.assertTrue('vm_state' in search_opts)
+ self.assertEqual(search_opts['vm_state'], vm_states.ACTIVE)
return [stub_instance(100)]
self.stubs.Set(nova.compute.API, 'get_all', fake_get_all)
@@ -1190,13 +1202,9 @@ class ServersTest(test.TestCase):
def test_get_servers_invalid_status_v1_1(self):
"""Test getting servers by invalid status"""
-
self.flags(allow_admin_api=False)
-
req = webob.Request.blank('/v1.1/fake/servers?status=running')
res = req.get_response(fakes.wsgi_app())
- # The following assert will fail if either of the asserts in
- # fake_get_all() fail
self.assertEqual(res.status_int, 400)
self.assertTrue(res.body.find('Invalid server status') > -1)
@@ -1219,6 +1227,31 @@ class ServersTest(test.TestCase):
self.assertEqual(len(servers), 1)
self.assertEqual(servers[0]['id'], 100)
+ def test_get_servers_allows_changes_since_v1_1(self):
+ def fake_get_all(compute_self, context, search_opts=None):
+ self.assertNotEqual(search_opts, None)
+ self.assertTrue('changes-since' in search_opts)
+ changes_since = datetime.datetime(2011, 1, 24, 17, 8, 1)
+ self.assertEqual(search_opts['changes-since'], changes_since)
+ self.assertTrue('deleted' not in search_opts)
+ return [stub_instance(100)]
+
+ self.stubs.Set(nova.compute.API, 'get_all', fake_get_all)
+
+ params = 'changes-since=2011-01-24T17:08:01Z'
+ req = webob.Request.blank('/v1.1/fake/servers?%s' % params)
+ res = req.get_response(fakes.wsgi_app())
+ self.assertEqual(res.status_int, 200)
+ servers = json.loads(res.body)['servers']
+ self.assertEqual(len(servers), 1)
+ self.assertEqual(servers[0]['id'], 100)
+
+ def test_get_servers_allows_changes_since_bad_value_v1_1(self):
+ params = 'changes-since=asdf'
+ req = webob.Request.blank('/v1.1/fake/servers?%s' % params)
+ res = req.get_response(fakes.wsgi_app())
+ self.assertEqual(res.status_int, 400)
+
def test_get_servers_unknown_or_admin_options1(self):
"""Test getting servers by admin-only or unknown options.
This tests when admin_api is off. Make sure the admin and
@@ -1700,6 +1733,7 @@ class ServersTest(test.TestCase):
server = json.loads(res.body)['server']
self.assertEqual(16, len(server['adminPass']))
self.assertEqual(1, server['id'])
+ self.assertEqual("BUILD", server["status"])
self.assertEqual(0, server['progress'])
self.assertEqual('server_test', server['name'])
self.assertEqual(expected_flavor, server['flavor'])
@@ -1707,6 +1741,36 @@ class ServersTest(test.TestCase):
self.assertEqual('1.2.3.4', server['accessIPv4'])
self.assertEqual('fead::1234', server['accessIPv6'])
+ def test_create_instance_v1_1_invalid_key_name(self):
+ self._setup_for_create_instance()
+
+ image_href = 'http://localhost/v1.1/images/2'
+ flavor_ref = 'http://localhost/flavors/3'
+ body = dict(server=dict(
+ name='server_test', imageRef=image_href, flavorRef=flavor_ref,
+ key_name='nonexistentkey'))
+ req = webob.Request.blank('/v1.1/fake/servers')
+ req.method = 'POST'
+ req.body = json.dumps(body)
+ req.headers["content-type"] = "application/json"
+ res = req.get_response(fakes.wsgi_app())
+ self.assertEqual(res.status_int, 400)
+
+ def test_create_instance_v1_1_valid_key_name(self):
+ self._setup_for_create_instance()
+
+ image_href = 'http://localhost/v1.1/images/2'
+ flavor_ref = 'http://localhost/flavors/3'
+ body = dict(server=dict(
+ name='server_test', imageRef=image_href, flavorRef=flavor_ref,
+ key_name='key'))
+ req = webob.Request.blank('/v1.1/fake/servers')
+ req.method = 'POST'
+ req.body = json.dumps(body)
+ req.headers["content-type"] = "application/json"
+ res = req.get_response(fakes.wsgi_app())
+ self.assertEqual(res.status_int, 202)
+
def test_create_instance_v1_1_invalid_flavor_href(self):
self._setup_for_create_instance()
@@ -2429,23 +2493,51 @@ class ServersTest(test.TestCase):
self.assertEqual(res.status_int, 204)
self.assertEqual(self.server_delete_called, True)
- def test_shutdown_status(self):
- new_server = return_server_with_power_state(power_state.SHUTDOWN)
- self.stubs.Set(nova.db.api, 'instance_get', new_server)
- req = webob.Request.blank('/v1.0/servers/1')
- res = req.get_response(fakes.wsgi_app())
- self.assertEqual(res.status_int, 200)
- res_dict = json.loads(res.body)
- self.assertEqual(res_dict['server']['status'], 'SHUTDOWN')
- def test_shutoff_status(self):
- new_server = return_server_with_power_state(power_state.SHUTOFF)
+class TestServerStatus(test.TestCase):
+
+ def _get_with_state(self, vm_state, task_state=None):
+ new_server = return_server_with_state(vm_state, task_state)
self.stubs.Set(nova.db.api, 'instance_get', new_server)
- req = webob.Request.blank('/v1.0/servers/1')
- res = req.get_response(fakes.wsgi_app())
- self.assertEqual(res.status_int, 200)
- res_dict = json.loads(res.body)
- self.assertEqual(res_dict['server']['status'], 'SHUTOFF')
+ request = webob.Request.blank('/v1.0/servers/1')
+ response = request.get_response(fakes.wsgi_app())
+ self.assertEqual(response.status_int, 200)
+ return json.loads(response.body)
+
+ def test_active(self):
+ response = self._get_with_state(vm_states.ACTIVE)
+ self.assertEqual(response['server']['status'], 'ACTIVE')
+
+ def test_reboot(self):
+ response = self._get_with_state(vm_states.ACTIVE,
+ task_states.REBOOTING)
+ self.assertEqual(response['server']['status'], 'REBOOT')
+
+ def test_rebuild(self):
+ response = self._get_with_state(vm_states.REBUILDING)
+ self.assertEqual(response['server']['status'], 'REBUILD')
+
+ def test_rebuild_error(self):
+ response = self._get_with_state(vm_states.ERROR)
+ self.assertEqual(response['server']['status'], 'ERROR')
+
+ def test_resize(self):
+ response = self._get_with_state(vm_states.RESIZING)
+ self.assertEqual(response['server']['status'], 'RESIZE')
+
+ def test_verify_resize(self):
+ response = self._get_with_state(vm_states.ACTIVE,
+ task_states.RESIZE_VERIFY)
+ self.assertEqual(response['server']['status'], 'VERIFY_RESIZE')
+
+ def test_password_update(self):
+ response = self._get_with_state(vm_states.ACTIVE,
+ task_states.UPDATING_PASSWORD)
+ self.assertEqual(response['server']['status'], 'PASSWORD')
+
+ def test_stopped(self):
+ response = self._get_with_state(vm_states.STOPPED)
+ self.assertEqual(response['server']['status'], 'STOPPED')
class TestServerCreateRequestXMLDeserializerV10(unittest.TestCase):
@@ -3203,6 +3295,7 @@ class TestServerInstanceCreation(test.TestCase):
def __init__(self):
self.injected_files = None
self.networks = None
+ self.db = db
def create(self, *args, **kwargs):
if 'injected_files' in kwargs:
@@ -3510,8 +3603,8 @@ class ServersViewBuilderV11Test(test.TestCase):
"launch_index": 0,
"key_name": "",
"key_data": "",
- "state": 0,
- "state_description": "",
+ "vm_state": vm_states.BUILDING,
+ "task_state": None,
"memory_mb": 0,
"vcpus": 0,
"local_gb": 0,
@@ -3561,6 +3654,7 @@ class ServersViewBuilderV11Test(test.TestCase):
"id": 1,
"uuid": self.instance['uuid'],
"name": "test_server",
+ "key_name": '',
"links": [
{
"rel": "self",
@@ -3584,6 +3678,7 @@ class ServersViewBuilderV11Test(test.TestCase):
"id": 1,
"uuid": self.instance['uuid'],
"name": "test_server",
+ "key_name": '',
"config_drive": None,
"links": [
{
@@ -3617,6 +3712,7 @@ class ServersViewBuilderV11Test(test.TestCase):
"accessIPv4": "",
"accessIPv6": "",
"hostId": '',
+ "key_name": '',
"image": {
"id": "5",
"links": [
@@ -3656,7 +3752,7 @@ class ServersViewBuilderV11Test(test.TestCase):
def test_build_server_detail_active_status(self):
#set the power state of the instance to running
- self.instance['state'] = 1
+ self.instance['vm_state'] = vm_states.ACTIVE
image_bookmark = "http://localhost/images/5"
flavor_bookmark = "http://localhost/flavors/1"
expected_server = {
@@ -3671,6 +3767,7 @@ class ServersViewBuilderV11Test(test.TestCase):
"accessIPv4": "",
"accessIPv6": "",
"hostId": '',
+ "key_name": '',
"image": {
"id": "5",
"links": [
@@ -3722,6 +3819,7 @@ class ServersViewBuilderV11Test(test.TestCase):
"created": "2010-10-10T12:00:00Z",
"progress": 0,
"name": "test_server",
+ "key_name": "",
"status": "BUILD",
"hostId": '',
"image": {
@@ -3777,6 +3875,7 @@ class ServersViewBuilderV11Test(test.TestCase):
"created": "2010-10-10T12:00:00Z",
"progress": 0,
"name": "test_server",
+ "key_name": "",
"status": "BUILD",
"hostId": '',
"image": {
@@ -3839,6 +3938,7 @@ class ServersViewBuilderV11Test(test.TestCase):
"accessIPv4": "",
"accessIPv6": "",
"hostId": '',
+ "key_name": '',
"image": {
"id": "5",
"links": [
@@ -3982,6 +4082,7 @@ class ServerXMLSerializationTest(test.TestCase):
"name": "test_server",
"status": "BUILD",
"hostId": 'e4d909c290d0fb1ca068ffaddf22cbd0',
+ "key_name": '',
"accessIPv4": "1.2.3.4",
"accessIPv6": "fead::1234",
"image": {
diff --git a/nova/tests/image/test_glance.py b/nova/tests/image/test_glance.py
index 0ff508ffa..b1ebd8436 100644
--- a/nova/tests/image/test_glance.py
+++ b/nova/tests/image/test_glance.py
@@ -20,6 +20,7 @@ import datetime
import unittest
from nova import context
+from nova import exception
from nova import test
from nova.image import glance
@@ -38,7 +39,16 @@ class StubGlanceClient(object):
return self.images[image_id]
def get_images_detailed(self, filters=None, marker=None, limit=None):
- return self.images.itervalues()
+ images = self.images.values()
+ if marker is None:
+ index = 0
+ else:
+ for index, image in enumerate(images):
+ if image['id'] == marker:
+ index += 1
+ break
+ # default to a page size of 3 to ensure we flex the pagination code
+ return images[index:index + 3]
def get_image(self, image_id):
return self.images[image_id], []
@@ -86,23 +96,48 @@ class TestGlanceImageServiceProperties(BaseGlanceTest):
"""Ensure attributes which aren't BASE_IMAGE_ATTRS are stored in the
properties dict
"""
- fixtures = {'image1': {'name': 'image1', 'is_public': True,
+ fixtures = {'image1': {'id': '1', 'name': 'image1', 'is_public': True,
'foo': 'bar',
'properties': {'prop1': 'propvalue1'}}}
self.client.images = fixtures
image_meta = self.service.show(self.context, 'image1')
+ expected = {'id': '1', 'name': 'image1', 'is_public': True,
+ 'properties': {'prop1': 'propvalue1', 'foo': 'bar'}}
+ self.assertEqual(image_meta, expected)
+
+ def test_show_raises_when_no_authtoken_in_the_context(self):
+ fixtures = {'image1': {'name': 'image1', 'is_public': False,
+ 'foo': 'bar',
+ 'properties': {'prop1': 'propvalue1'}}}
+ self.client.images = fixtures
+ self.context.auth_token = False
+
expected = {'name': 'image1', 'is_public': True,
'properties': {'prop1': 'propvalue1', 'foo': 'bar'}}
+ self.assertRaises(exception.ImageNotFound,
+ self.service.show, self.context, 'image1')
+
+ def test_show_passes_through_to_client_with_authtoken_in_context(self):
+ fixtures = {'image1': {'name': 'image1', 'is_public': False,
+ 'foo': 'bar',
+ 'properties': {'prop1': 'propvalue1'}}}
+ self.client.images = fixtures
+ self.context.auth_token = True
+
+ expected = {'name': 'image1', 'is_public': False,
+ 'properties': {'prop1': 'propvalue1', 'foo': 'bar'}}
+
+ image_meta = self.service.show(self.context, 'image1')
self.assertEqual(image_meta, expected)
def test_detail_passes_through_to_client(self):
- fixtures = {'image1': {'name': 'image1', 'is_public': True,
+ fixtures = {'image1': {'id': '1', 'name': 'image1', 'is_public': True,
'foo': 'bar',
'properties': {'prop1': 'propvalue1'}}}
self.client.images = fixtures
image_meta = self.service.detail(self.context)
- expected = [{'name': 'image1', 'is_public': True,
+ expected = [{'id': '1', 'name': 'image1', 'is_public': True,
'properties': {'prop1': 'propvalue1', 'foo': 'bar'}}]
self.assertEqual(image_meta, expected)
@@ -166,6 +201,7 @@ class TestGetterDateTimeNoneTests(BaseGlanceTest):
def _make_datetime_fixtures(self):
fixtures = {
'image1': {
+ 'id': '1',
'name': 'image1',
'is_public': True,
'created_at': self.NOW_GLANCE_FORMAT,
@@ -173,6 +209,7 @@ class TestGetterDateTimeNoneTests(BaseGlanceTest):
'deleted_at': self.NOW_GLANCE_FORMAT,
},
'image2': {
+ 'id': '2',
'name': 'image2',
'is_public': True,
'created_at': self.NOW_GLANCE_OLD_FORMAT,
@@ -183,13 +220,17 @@ class TestGetterDateTimeNoneTests(BaseGlanceTest):
return fixtures
def _make_none_datetime_fixtures(self):
- fixtures = {'image1': {'name': 'image1', 'is_public': True,
+ fixtures = {'image1': {'id': '1',
+ 'name': 'image1',
+ 'is_public': True,
'updated_at': None,
'deleted_at': None}}
return fixtures
def _make_blank_datetime_fixtures(self):
- fixtures = {'image1': {'name': 'image1', 'is_public': True,
+ fixtures = {'image1': {'id': '1',
+ 'name': 'image1',
+ 'is_public': True,
'updated_at': '',
'deleted_at': ''}}
return fixtures
diff --git a/nova/tests/integrated/test_servers.py b/nova/tests/integrated/test_servers.py
index c2f800689..2cf604d06 100644
--- a/nova/tests/integrated/test_servers.py
+++ b/nova/tests/integrated/test_servers.py
@@ -28,6 +28,17 @@ LOG = logging.getLogger('nova.tests.integrated')
class ServersTest(integrated_helpers._IntegratedTestBase):
+ def _wait_for_creation(self, server):
+ retries = 0
+ while server['status'] == 'BUILD':
+ time.sleep(1)
+ server = self.api.get_server(server['id'])
+ print server
+ retries = retries + 1
+ if retries > 5:
+ break
+ return server
+
def test_get_servers(self):
"""Simple check that listing servers works."""
servers = self.api.get_servers()
@@ -36,9 +47,9 @@ class ServersTest(integrated_helpers._IntegratedTestBase):
def test_create_and_delete_server(self):
"""Creates and deletes a server."""
+ self.flags(stub_network=True)
# Create server
-
# Build the server data gradually, checking errors along the way
server = {}
good_server = self._build_minimal_create_server_request()
@@ -91,19 +102,11 @@ class ServersTest(integrated_helpers._IntegratedTestBase):
server_ids = [server['id'] for server in servers]
self.assertTrue(created_server_id in server_ids)
- # Wait (briefly) for creation
- retries = 0
- while found_server['status'] == 'build':
- LOG.debug("found server: %s" % found_server)
- time.sleep(1)
- found_server = self.api.get_server(created_server_id)
- retries = retries + 1
- if retries > 5:
- break
+ found_server = self._wait_for_creation(found_server)
# It should be available...
# TODO(justinsb): Mock doesn't yet do this...
- #self.assertEqual('available', found_server['status'])
+ self.assertEqual('ACTIVE', found_server['status'])
servers = self.api.get_servers(detail=True)
for server in servers:
self.assertTrue("image" in server)
@@ -181,6 +184,7 @@ class ServersTest(integrated_helpers._IntegratedTestBase):
def test_create_and_rebuild_server(self):
"""Rebuild a server."""
+ self.flags(stub_network=True)
# create a server with initially has no metadata
server = self._build_minimal_create_server_request()
@@ -190,10 +194,12 @@ class ServersTest(integrated_helpers._IntegratedTestBase):
self.assertTrue(created_server['id'])
created_server_id = created_server['id']
+ created_server = self._wait_for_creation(created_server)
+
# rebuild the server with metadata
post = {}
post['rebuild'] = {
- "imageRef": "https://localhost/v1.1/32278/images/2",
+ "imageRef": "https://localhost/v1.1/32278/images/3",
"name": "blah",
}
@@ -205,12 +211,14 @@ class ServersTest(integrated_helpers._IntegratedTestBase):
self.assertEqual(created_server_id, found_server['id'])
self.assertEqual({}, found_server.get('metadata'))
self.assertEqual('blah', found_server.get('name'))
+ self.assertEqual('3', found_server.get('image')['id'])
# Cleanup
self._delete_server(created_server_id)
def test_create_and_rebuild_server_with_metadata(self):
"""Rebuild a server with metadata."""
+ self.flags(stub_network=True)
# create a server with initially has no metadata
server = self._build_minimal_create_server_request()
@@ -220,6 +228,8 @@ class ServersTest(integrated_helpers._IntegratedTestBase):
self.assertTrue(created_server['id'])
created_server_id = created_server['id']
+ created_server = self._wait_for_creation(created_server)
+
# rebuild the server with metadata
post = {}
post['rebuild'] = {
@@ -247,6 +257,7 @@ class ServersTest(integrated_helpers._IntegratedTestBase):
def test_create_and_rebuild_server_with_metadata_removal(self):
"""Rebuild a server with metadata."""
+ self.flags(stub_network=True)
# create a server with initially has no metadata
server = self._build_minimal_create_server_request()
@@ -263,6 +274,8 @@ class ServersTest(integrated_helpers._IntegratedTestBase):
self.assertTrue(created_server['id'])
created_server_id = created_server['id']
+ created_server = self._wait_for_creation(created_server)
+
# rebuild the server with metadata
post = {}
post['rebuild'] = {
diff --git a/nova/tests/scheduler/test_scheduler.py b/nova/tests/scheduler/test_scheduler.py
index 158df2a27..a52dd041a 100644
--- a/nova/tests/scheduler/test_scheduler.py
+++ b/nova/tests/scheduler/test_scheduler.py
@@ -40,6 +40,7 @@ from nova.scheduler import driver
from nova.scheduler import manager
from nova.scheduler import multi
from nova.compute import power_state
+from nova.compute import vm_states
FLAGS = flags.FLAGS
@@ -94,6 +95,9 @@ class SchedulerTestCase(test.TestCase):
inst['vcpus'] = kwargs.get('vcpus', 1)
inst['memory_mb'] = kwargs.get('memory_mb', 10)
inst['local_gb'] = kwargs.get('local_gb', 20)
+ inst['vm_state'] = kwargs.get('vm_state', vm_states.ACTIVE)
+ inst['power_state'] = kwargs.get('power_state', power_state.RUNNING)
+ inst['task_state'] = kwargs.get('task_state', None)
return db.instance_create(ctxt, inst)
def test_fallback(self):
@@ -271,8 +275,9 @@ class SimpleDriverTestCase(test.TestCase):
inst['memory_mb'] = kwargs.get('memory_mb', 20)
inst['local_gb'] = kwargs.get('local_gb', 30)
inst['launched_on'] = kwargs.get('launghed_on', 'dummy')
- inst['state_description'] = kwargs.get('state_description', 'running')
- inst['state'] = kwargs.get('state', power_state.RUNNING)
+ inst['vm_state'] = kwargs.get('vm_state', vm_states.ACTIVE)
+ inst['task_state'] = kwargs.get('task_state', None)
+ inst['power_state'] = kwargs.get('power_state', power_state.RUNNING)
return db.instance_create(self.context, inst)['id']
def _create_volume(self):
@@ -664,14 +669,14 @@ class SimpleDriverTestCase(test.TestCase):
block_migration=False)
i_ref = db.instance_get(self.context, instance_id)
- self.assertTrue(i_ref['state_description'] == 'migrating')
+ self.assertTrue(i_ref['vm_state'] == vm_states.MIGRATING)
db.instance_destroy(self.context, instance_id)
db.volume_destroy(self.context, v_ref['id'])
def test_live_migration_src_check_instance_not_running(self):
"""The instance given by instance_id is not running."""
- instance_id = self._create_instance(state_description='migrating')
+ instance_id = self._create_instance(power_state=power_state.NOSTATE)
i_ref = db.instance_get(self.context, instance_id)
try:
diff --git a/nova/tests/scheduler/test_vsa_scheduler.py b/nova/tests/scheduler/test_vsa_scheduler.py
new file mode 100644
index 000000000..37964f00d
--- /dev/null
+++ b/nova/tests/scheduler/test_vsa_scheduler.py
@@ -0,0 +1,641 @@
+# Copyright 2011 OpenStack LLC.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import stubout
+
+import nova
+
+from nova import context
+from nova import db
+from nova import exception
+from nova import flags
+from nova import log as logging
+from nova import test
+from nova import utils
+from nova.volume import volume_types
+
+from nova.scheduler import vsa as vsa_sched
+from nova.scheduler import driver
+
+FLAGS = flags.FLAGS
+LOG = logging.getLogger('nova.tests.scheduler.vsa')
+
+scheduled_volumes = []
+scheduled_volume = {}
+global_volume = {}
+
+
+class FakeVsaLeastUsedScheduler(
+ vsa_sched.VsaSchedulerLeastUsedHost):
+ # No need to stub anything at the moment
+ pass
+
+
+class FakeVsaMostAvailCapacityScheduler(
+ vsa_sched.VsaSchedulerMostAvailCapacity):
+ # No need to stub anything at the moment
+ pass
+
+
+class VsaSchedulerTestCase(test.TestCase):
+
+ def _get_vol_creation_request(self, num_vols, drive_ix, size=0):
+ volume_params = []
+ for i in range(num_vols):
+
+ name = 'name_' + str(i)
+ try:
+ volume_types.create(self.context, name,
+ extra_specs={'type': 'vsa_drive',
+ 'drive_name': name,
+ 'drive_type': 'type_' + str(drive_ix),
+ 'drive_size': 1 + 100 * (drive_ix)})
+ self.created_types_lst.append(name)
+ except exception.ApiError:
+ # type is already created
+ pass
+
+ volume_type = volume_types.get_volume_type_by_name(self.context,
+ name)
+ volume = {'size': size,
+ 'snapshot_id': None,
+ 'name': 'vol_' + str(i),
+ 'description': None,
+ 'volume_type_id': volume_type['id']}
+ volume_params.append(volume)
+
+ return {'num_volumes': len(volume_params),
+ 'vsa_id': 123,
+ 'volumes': volume_params}
+
+ def _generate_default_service_states(self):
+ service_states = {}
+ for i in range(self.host_num):
+ host = {}
+ hostname = 'host_' + str(i)
+ if hostname in self.exclude_host_list:
+ continue
+
+ host['volume'] = {'timestamp': utils.utcnow(),
+ 'drive_qos_info': {}}
+
+ for j in range(self.drive_type_start_ix,
+ self.drive_type_start_ix + self.drive_type_num):
+ dtype = {}
+ dtype['Name'] = 'name_' + str(j)
+ dtype['DriveType'] = 'type_' + str(j)
+ dtype['TotalDrives'] = 2 * (self.init_num_drives + i)
+ dtype['DriveCapacity'] = vsa_sched.GB_TO_BYTES(1 + 100 * j)
+ dtype['TotalCapacity'] = dtype['TotalDrives'] * \
+ dtype['DriveCapacity']
+ dtype['AvailableCapacity'] = (dtype['TotalDrives'] - i) * \
+ dtype['DriveCapacity']
+ dtype['DriveRpm'] = 7200
+ dtype['DifCapable'] = 0
+ dtype['SedCapable'] = 0
+ dtype['PartitionDrive'] = {
+ 'PartitionSize': 0,
+ 'NumOccupiedPartitions': 0,
+ 'NumFreePartitions': 0}
+ dtype['FullDrive'] = {
+ 'NumFreeDrives': dtype['TotalDrives'] - i,
+ 'NumOccupiedDrives': i}
+ host['volume']['drive_qos_info'][dtype['Name']] = dtype
+
+ service_states[hostname] = host
+
+ return service_states
+
+ def _print_service_states(self):
+ for host, host_val in self.service_states.iteritems():
+ LOG.info(_("Host %s"), host)
+ total_used = 0
+ total_available = 0
+ qos = host_val['volume']['drive_qos_info']
+
+ for k, d in qos.iteritems():
+ LOG.info("\t%s: type %s: drives (used %2d, total %2d) "\
+ "size %3d, total %4d, used %4d, avail %d",
+ k, d['DriveType'],
+ d['FullDrive']['NumOccupiedDrives'], d['TotalDrives'],
+ vsa_sched.BYTES_TO_GB(d['DriveCapacity']),
+ vsa_sched.BYTES_TO_GB(d['TotalCapacity']),
+ vsa_sched.BYTES_TO_GB(d['TotalCapacity'] - \
+ d['AvailableCapacity']),
+ vsa_sched.BYTES_TO_GB(d['AvailableCapacity']))
+
+ total_used += vsa_sched.BYTES_TO_GB(d['TotalCapacity'] - \
+ d['AvailableCapacity'])
+ total_available += vsa_sched.BYTES_TO_GB(
+ d['AvailableCapacity'])
+ LOG.info("Host %s: used %d, avail %d",
+ host, total_used, total_available)
+
+ def _set_service_states(self, host_num,
+ drive_type_start_ix, drive_type_num,
+ init_num_drives=10,
+ exclude_host_list=[]):
+ self.host_num = host_num
+ self.drive_type_start_ix = drive_type_start_ix
+ self.drive_type_num = drive_type_num
+ self.exclude_host_list = exclude_host_list
+ self.init_num_drives = init_num_drives
+ self.service_states = self._generate_default_service_states()
+
+ def _get_service_states(self):
+ return self.service_states
+
+ def _fake_get_service_states(self):
+ return self._get_service_states()
+
+ def _fake_provision_volume(self, context, vol, vsa_id, availability_zone):
+ global scheduled_volumes
+ scheduled_volumes.append(dict(vol=vol,
+ vsa_id=vsa_id,
+ az=availability_zone))
+ name = vol['name']
+ host = vol['host']
+ LOG.debug(_("Test: provision vol %(name)s on host %(host)s"),
+ locals())
+ LOG.debug(_("\t vol=%(vol)s"), locals())
+ pass
+
+ def _fake_vsa_update(self, context, vsa_id, values):
+ LOG.debug(_("Test: VSA update request: vsa_id=%(vsa_id)s "\
+ "values=%(values)s"), locals())
+ pass
+
+ def _fake_volume_create(self, context, options):
+ LOG.debug(_("Test: Volume create: %s"), options)
+ options['id'] = 123
+ global global_volume
+ global_volume = options
+ return options
+
+ def _fake_volume_get(self, context, volume_id):
+ LOG.debug(_("Test: Volume get request: id=%(volume_id)s"), locals())
+ global global_volume
+ global_volume['id'] = volume_id
+ global_volume['availability_zone'] = None
+ return global_volume
+
+ def _fake_volume_update(self, context, volume_id, values):
+ LOG.debug(_("Test: Volume update request: id=%(volume_id)s "\
+ "values=%(values)s"), locals())
+ global scheduled_volume
+ scheduled_volume = {'id': volume_id, 'host': values['host']}
+ pass
+
+ def _fake_service_get_by_args(self, context, host, binary):
+ return "service"
+
+ def _fake_service_is_up_True(self, service):
+ return True
+
+ def _fake_service_is_up_False(self, service):
+ return False
+
+ def setUp(self, sched_class=None):
+ super(VsaSchedulerTestCase, self).setUp()
+ self.stubs = stubout.StubOutForTesting()
+ self.context = context.get_admin_context()
+
+ if sched_class is None:
+ self.sched = FakeVsaLeastUsedScheduler()
+ else:
+ self.sched = sched_class
+
+ self.host_num = 10
+ self.drive_type_num = 5
+
+ self.stubs.Set(self.sched,
+ '_get_service_states', self._fake_get_service_states)
+ self.stubs.Set(self.sched,
+ '_provision_volume', self._fake_provision_volume)
+ self.stubs.Set(nova.db, 'vsa_update', self._fake_vsa_update)
+
+ self.stubs.Set(nova.db, 'volume_get', self._fake_volume_get)
+ self.stubs.Set(nova.db, 'volume_update', self._fake_volume_update)
+
+ self.created_types_lst = []
+
+ def tearDown(self):
+ for name in self.created_types_lst:
+ volume_types.purge(self.context, name)
+
+ self.stubs.UnsetAll()
+ super(VsaSchedulerTestCase, self).tearDown()
+
+ def test_vsa_sched_create_volumes_simple(self):
+ global scheduled_volumes
+ scheduled_volumes = []
+ self._set_service_states(host_num=10,
+ drive_type_start_ix=0,
+ drive_type_num=5,
+ init_num_drives=10,
+ exclude_host_list=['host_1', 'host_3'])
+ prev = self._generate_default_service_states()
+ request_spec = self._get_vol_creation_request(num_vols=3, drive_ix=2)
+
+ self.sched.schedule_create_volumes(self.context,
+ request_spec,
+ availability_zone=None)
+
+ self.assertEqual(len(scheduled_volumes), 3)
+ self.assertEqual(scheduled_volumes[0]['vol']['host'], 'host_0')
+ self.assertEqual(scheduled_volumes[1]['vol']['host'], 'host_2')
+ self.assertEqual(scheduled_volumes[2]['vol']['host'], 'host_4')
+
+ cur = self._get_service_states()
+ for host in ['host_0', 'host_2', 'host_4']:
+ cur_dtype = cur[host]['volume']['drive_qos_info']['name_2']
+ prev_dtype = prev[host]['volume']['drive_qos_info']['name_2']
+ self.assertEqual(cur_dtype['DriveType'], prev_dtype['DriveType'])
+ self.assertEqual(cur_dtype['FullDrive']['NumFreeDrives'],
+ prev_dtype['FullDrive']['NumFreeDrives'] - 1)
+ self.assertEqual(cur_dtype['FullDrive']['NumOccupiedDrives'],
+ prev_dtype['FullDrive']['NumOccupiedDrives'] + 1)
+
+ def test_vsa_sched_no_drive_type(self):
+ self._set_service_states(host_num=10,
+ drive_type_start_ix=0,
+ drive_type_num=5,
+ init_num_drives=1)
+ request_spec = self._get_vol_creation_request(num_vols=1, drive_ix=6)
+ self.assertRaises(driver.WillNotSchedule,
+ self.sched.schedule_create_volumes,
+ self.context,
+ request_spec,
+ availability_zone=None)
+
+ def test_vsa_sched_no_enough_drives(self):
+ global scheduled_volumes
+ scheduled_volumes = []
+
+ self._set_service_states(host_num=3,
+ drive_type_start_ix=0,
+ drive_type_num=1,
+ init_num_drives=0)
+ prev = self._generate_default_service_states()
+ request_spec = self._get_vol_creation_request(num_vols=3, drive_ix=0)
+
+ self.assertRaises(driver.WillNotSchedule,
+ self.sched.schedule_create_volumes,
+ self.context,
+ request_spec,
+ availability_zone=None)
+
+ # check that everything was returned back
+ cur = self._get_service_states()
+ for k, v in prev.iteritems():
+ self.assertEqual(prev[k]['volume']['drive_qos_info'],
+ cur[k]['volume']['drive_qos_info'])
+
+ def test_vsa_sched_wrong_topic(self):
+ self._set_service_states(host_num=1,
+ drive_type_start_ix=0,
+ drive_type_num=5,
+ init_num_drives=1)
+ states = self._get_service_states()
+ new_states = {}
+ new_states['host_0'] = {'compute': states['host_0']['volume']}
+ self.service_states = new_states
+ request_spec = self._get_vol_creation_request(num_vols=1, drive_ix=0)
+
+ self.assertRaises(driver.WillNotSchedule,
+ self.sched.schedule_create_volumes,
+ self.context,
+ request_spec,
+ availability_zone=None)
+
+ def test_vsa_sched_provision_volume(self):
+ global global_volume
+ global_volume = {}
+ self._set_service_states(host_num=1,
+ drive_type_start_ix=0,
+ drive_type_num=1,
+ init_num_drives=1)
+ request_spec = self._get_vol_creation_request(num_vols=1, drive_ix=0)
+
+ self.stubs.UnsetAll()
+ self.stubs.Set(self.sched,
+ '_get_service_states', self._fake_get_service_states)
+ self.stubs.Set(nova.db, 'volume_create', self._fake_volume_create)
+
+ self.sched.schedule_create_volumes(self.context,
+ request_spec,
+ availability_zone=None)
+
+ self.assertEqual(request_spec['volumes'][0]['name'],
+ global_volume['display_name'])
+
+ def test_vsa_sched_no_free_drives(self):
+ self._set_service_states(host_num=1,
+ drive_type_start_ix=0,
+ drive_type_num=1,
+ init_num_drives=1)
+ request_spec = self._get_vol_creation_request(num_vols=1, drive_ix=0)
+
+ self.sched.schedule_create_volumes(self.context,
+ request_spec,
+ availability_zone=None)
+
+ cur = self._get_service_states()
+ cur_dtype = cur['host_0']['volume']['drive_qos_info']['name_0']
+ self.assertEqual(cur_dtype['FullDrive']['NumFreeDrives'], 1)
+
+ new_request = self._get_vol_creation_request(num_vols=1, drive_ix=0)
+
+ self.sched.schedule_create_volumes(self.context,
+ request_spec,
+ availability_zone=None)
+ self._print_service_states()
+
+ self.assertRaises(driver.WillNotSchedule,
+ self.sched.schedule_create_volumes,
+ self.context,
+ new_request,
+ availability_zone=None)
+
+ def test_vsa_sched_forced_host(self):
+ global scheduled_volumes
+ scheduled_volumes = []
+
+ self._set_service_states(host_num=10,
+ drive_type_start_ix=0,
+ drive_type_num=5,
+ init_num_drives=10)
+
+ request_spec = self._get_vol_creation_request(num_vols=3, drive_ix=2)
+
+ self.assertRaises(exception.HostBinaryNotFound,
+ self.sched.schedule_create_volumes,
+ self.context,
+ request_spec,
+ availability_zone="nova:host_5")
+
+ self.stubs.Set(nova.db,
+ 'service_get_by_args', self._fake_service_get_by_args)
+ self.stubs.Set(self.sched,
+ 'service_is_up', self._fake_service_is_up_False)
+
+ self.assertRaises(driver.WillNotSchedule,
+ self.sched.schedule_create_volumes,
+ self.context,
+ request_spec,
+ availability_zone="nova:host_5")
+
+ self.stubs.Set(self.sched,
+ 'service_is_up', self._fake_service_is_up_True)
+
+ self.sched.schedule_create_volumes(self.context,
+ request_spec,
+ availability_zone="nova:host_5")
+
+ self.assertEqual(len(scheduled_volumes), 3)
+ self.assertEqual(scheduled_volumes[0]['vol']['host'], 'host_5')
+ self.assertEqual(scheduled_volumes[1]['vol']['host'], 'host_5')
+ self.assertEqual(scheduled_volumes[2]['vol']['host'], 'host_5')
+
+ def test_vsa_sched_create_volumes_partition(self):
+ global scheduled_volumes
+ scheduled_volumes = []
+ self._set_service_states(host_num=5,
+ drive_type_start_ix=0,
+ drive_type_num=5,
+ init_num_drives=1,
+ exclude_host_list=['host_0', 'host_2'])
+ prev = self._generate_default_service_states()
+ request_spec = self._get_vol_creation_request(num_vols=3,
+ drive_ix=3,
+ size=50)
+ self.sched.schedule_create_volumes(self.context,
+ request_spec,
+ availability_zone=None)
+
+ self.assertEqual(len(scheduled_volumes), 3)
+ self.assertEqual(scheduled_volumes[0]['vol']['host'], 'host_1')
+ self.assertEqual(scheduled_volumes[1]['vol']['host'], 'host_3')
+ self.assertEqual(scheduled_volumes[2]['vol']['host'], 'host_4')
+
+ cur = self._get_service_states()
+ for host in ['host_1', 'host_3', 'host_4']:
+ cur_dtype = cur[host]['volume']['drive_qos_info']['name_3']
+ prev_dtype = prev[host]['volume']['drive_qos_info']['name_3']
+
+ self.assertEqual(cur_dtype['DriveType'], prev_dtype['DriveType'])
+ self.assertEqual(cur_dtype['FullDrive']['NumFreeDrives'],
+ prev_dtype['FullDrive']['NumFreeDrives'] - 1)
+ self.assertEqual(cur_dtype['FullDrive']['NumOccupiedDrives'],
+ prev_dtype['FullDrive']['NumOccupiedDrives'] + 1)
+
+ self.assertEqual(prev_dtype['PartitionDrive']
+ ['NumOccupiedPartitions'], 0)
+ self.assertEqual(cur_dtype['PartitionDrive']
+ ['NumOccupiedPartitions'], 1)
+ self.assertEqual(cur_dtype['PartitionDrive']
+ ['NumFreePartitions'], 5)
+
+ self.assertEqual(prev_dtype['PartitionDrive']
+ ['NumFreePartitions'], 0)
+ self.assertEqual(prev_dtype['PartitionDrive']
+ ['PartitionSize'], 0)
+
+ def test_vsa_sched_create_single_volume_az(self):
+ global scheduled_volume
+ scheduled_volume = {}
+
+ def _fake_volume_get_az(context, volume_id):
+ LOG.debug(_("Test: Volume get: id=%(volume_id)s"), locals())
+ return {'id': volume_id, 'availability_zone': 'nova:host_3'}
+
+ self.stubs.Set(nova.db, 'volume_get', _fake_volume_get_az)
+ self.stubs.Set(nova.db,
+ 'service_get_by_args', self._fake_service_get_by_args)
+ self.stubs.Set(self.sched,
+ 'service_is_up', self._fake_service_is_up_True)
+
+ host = self.sched.schedule_create_volume(self.context,
+ 123, availability_zone=None)
+
+ self.assertEqual(host, 'host_3')
+ self.assertEqual(scheduled_volume['id'], 123)
+ self.assertEqual(scheduled_volume['host'], 'host_3')
+
+ def test_vsa_sched_create_single_non_vsa_volume(self):
+ global scheduled_volume
+ scheduled_volume = {}
+
+ global global_volume
+ global_volume = {}
+ global_volume['volume_type_id'] = None
+
+ self.assertRaises(driver.NoValidHost,
+ self.sched.schedule_create_volume,
+ self.context,
+ 123,
+ availability_zone=None)
+
+ def test_vsa_sched_create_single_volume(self):
+ global scheduled_volume
+ scheduled_volume = {}
+ self._set_service_states(host_num=10,
+ drive_type_start_ix=0,
+ drive_type_num=5,
+ init_num_drives=10,
+ exclude_host_list=['host_0', 'host_1'])
+ prev = self._generate_default_service_states()
+
+ global global_volume
+ global_volume = {}
+
+ drive_ix = 2
+ name = 'name_' + str(drive_ix)
+ volume_types.create(self.context, name,
+ extra_specs={'type': 'vsa_drive',
+ 'drive_name': name,
+ 'drive_type': 'type_' + str(drive_ix),
+ 'drive_size': 1 + 100 * (drive_ix)})
+ self.created_types_lst.append(name)
+ volume_type = volume_types.get_volume_type_by_name(self.context, name)
+
+ global_volume['volume_type_id'] = volume_type['id']
+ global_volume['size'] = 0
+
+ host = self.sched.schedule_create_volume(self.context,
+ 123, availability_zone=None)
+
+ self.assertEqual(host, 'host_2')
+ self.assertEqual(scheduled_volume['id'], 123)
+ self.assertEqual(scheduled_volume['host'], 'host_2')
+
+
+class VsaSchedulerTestCaseMostAvail(VsaSchedulerTestCase):
+
+ def setUp(self):
+ super(VsaSchedulerTestCaseMostAvail, self).setUp(
+ FakeVsaMostAvailCapacityScheduler())
+
+ def tearDown(self):
+ self.stubs.UnsetAll()
+ super(VsaSchedulerTestCaseMostAvail, self).tearDown()
+
+ def test_vsa_sched_create_single_volume(self):
+ global scheduled_volume
+ scheduled_volume = {}
+ self._set_service_states(host_num=10,
+ drive_type_start_ix=0,
+ drive_type_num=5,
+ init_num_drives=10,
+ exclude_host_list=['host_0', 'host_1'])
+ prev = self._generate_default_service_states()
+
+ global global_volume
+ global_volume = {}
+
+ drive_ix = 2
+ name = 'name_' + str(drive_ix)
+ volume_types.create(self.context, name,
+ extra_specs={'type': 'vsa_drive',
+ 'drive_name': name,
+ 'drive_type': 'type_' + str(drive_ix),
+ 'drive_size': 1 + 100 * (drive_ix)})
+ self.created_types_lst.append(name)
+ volume_type = volume_types.get_volume_type_by_name(self.context, name)
+
+ global_volume['volume_type_id'] = volume_type['id']
+ global_volume['size'] = 0
+
+ host = self.sched.schedule_create_volume(self.context,
+ 123, availability_zone=None)
+
+ self.assertEqual(host, 'host_9')
+ self.assertEqual(scheduled_volume['id'], 123)
+ self.assertEqual(scheduled_volume['host'], 'host_9')
+
+ def test_vsa_sched_create_volumes_simple(self):
+ global scheduled_volumes
+ scheduled_volumes = []
+ self._set_service_states(host_num=10,
+ drive_type_start_ix=0,
+ drive_type_num=5,
+ init_num_drives=10,
+ exclude_host_list=['host_1', 'host_3'])
+ prev = self._generate_default_service_states()
+ request_spec = self._get_vol_creation_request(num_vols=3, drive_ix=2)
+
+ self._print_service_states()
+
+ self.sched.schedule_create_volumes(self.context,
+ request_spec,
+ availability_zone=None)
+
+ self.assertEqual(len(scheduled_volumes), 3)
+ self.assertEqual(scheduled_volumes[0]['vol']['host'], 'host_9')
+ self.assertEqual(scheduled_volumes[1]['vol']['host'], 'host_8')
+ self.assertEqual(scheduled_volumes[2]['vol']['host'], 'host_7')
+
+ cur = self._get_service_states()
+ for host in ['host_9', 'host_8', 'host_7']:
+ cur_dtype = cur[host]['volume']['drive_qos_info']['name_2']
+ prev_dtype = prev[host]['volume']['drive_qos_info']['name_2']
+ self.assertEqual(cur_dtype['DriveType'], prev_dtype['DriveType'])
+ self.assertEqual(cur_dtype['FullDrive']['NumFreeDrives'],
+ prev_dtype['FullDrive']['NumFreeDrives'] - 1)
+ self.assertEqual(cur_dtype['FullDrive']['NumOccupiedDrives'],
+ prev_dtype['FullDrive']['NumOccupiedDrives'] + 1)
+
+ def test_vsa_sched_create_volumes_partition(self):
+ global scheduled_volumes
+ scheduled_volumes = []
+ self._set_service_states(host_num=5,
+ drive_type_start_ix=0,
+ drive_type_num=5,
+ init_num_drives=1,
+ exclude_host_list=['host_0', 'host_2'])
+ prev = self._generate_default_service_states()
+ request_spec = self._get_vol_creation_request(num_vols=3,
+ drive_ix=3,
+ size=50)
+ self.sched.schedule_create_volumes(self.context,
+ request_spec,
+ availability_zone=None)
+
+ self.assertEqual(len(scheduled_volumes), 3)
+ self.assertEqual(scheduled_volumes[0]['vol']['host'], 'host_4')
+ self.assertEqual(scheduled_volumes[1]['vol']['host'], 'host_3')
+ self.assertEqual(scheduled_volumes[2]['vol']['host'], 'host_1')
+
+ cur = self._get_service_states()
+ for host in ['host_1', 'host_3', 'host_4']:
+ cur_dtype = cur[host]['volume']['drive_qos_info']['name_3']
+ prev_dtype = prev[host]['volume']['drive_qos_info']['name_3']
+
+ self.assertEqual(cur_dtype['DriveType'], prev_dtype['DriveType'])
+ self.assertEqual(cur_dtype['FullDrive']['NumFreeDrives'],
+ prev_dtype['FullDrive']['NumFreeDrives'] - 1)
+ self.assertEqual(cur_dtype['FullDrive']['NumOccupiedDrives'],
+ prev_dtype['FullDrive']['NumOccupiedDrives'] + 1)
+
+ self.assertEqual(prev_dtype['PartitionDrive']
+ ['NumOccupiedPartitions'], 0)
+ self.assertEqual(cur_dtype['PartitionDrive']
+ ['NumOccupiedPartitions'], 1)
+ self.assertEqual(cur_dtype['PartitionDrive']
+ ['NumFreePartitions'], 5)
+ self.assertEqual(prev_dtype['PartitionDrive']
+ ['NumFreePartitions'], 0)
+ self.assertEqual(prev_dtype['PartitionDrive']
+ ['PartitionSize'], 0)
diff --git a/nova/tests/test_adminapi.py b/nova/tests/test_adminapi.py
index 06cc498ac..aaa633adc 100644
--- a/nova/tests/test_adminapi.py
+++ b/nova/tests/test_adminapi.py
@@ -38,8 +38,6 @@ class AdminApiTestCase(test.TestCase):
super(AdminApiTestCase, self).setUp()
self.flags(connection_type='fake')
- self.conn = rpc.create_connection()
-
# set up our cloud
self.api = admin.AdminController()
diff --git a/nova/tests/test_cloud.py b/nova/tests/test_cloud.py
index 0793784f8..3fe6a9b42 100644
--- a/nova/tests/test_cloud.py
+++ b/nova/tests/test_cloud.py
@@ -38,6 +38,7 @@ from nova import test
from nova import utils
from nova.api.ec2 import cloud
from nova.api.ec2 import ec2utils
+from nova.compute import vm_states
from nova.image import fake
@@ -51,8 +52,6 @@ class CloudTestCase(test.TestCase):
self.flags(connection_type='fake',
stub_network=True)
- self.conn = rpc.create_connection()
-
# set up our cloud
self.cloud = cloud.CloudController()
@@ -86,13 +85,6 @@ class CloudTestCase(test.TestCase):
self.stubs.Set(rpc, 'cast', finish_cast)
- def tearDown(self):
- networks = db.project_get_networks(self.context, self.project_id,
- associate=False)
- for network in networks:
- db.network_disassociate(self.context, network['id'])
- super(CloudTestCase, self).tearDown()
-
def _create_key(self, name):
# NOTE(vish): create depends on pool, so just call helper directly
return cloud._gen_key(self.context, self.context.user_id, name)
@@ -494,8 +486,11 @@ class CloudTestCase(test.TestCase):
inst2 = db.instance_create(self.context, args2)
db.instance_destroy(self.context, inst1.id)
result = self.cloud.describe_instances(self.context)
- result = result['reservationSet'][0]['instancesSet']
- self.assertEqual(result[0]['instanceId'],
+ result1 = result['reservationSet'][0]['instancesSet']
+ self.assertEqual(result1[0]['instanceId'],
+ ec2utils.id_to_ec2_id(inst1.id))
+ result2 = result['reservationSet'][1]['instancesSet']
+ self.assertEqual(result2[0]['instanceId'],
ec2utils.id_to_ec2_id(inst2.id))
def _block_device_mapping_create(self, instance_id, mappings):
@@ -1163,7 +1158,7 @@ class CloudTestCase(test.TestCase):
self.compute = self.start_service('compute')
def _wait_for_state(self, ctxt, instance_id, predicate):
- """Wait for an stopping instance to be a given state"""
+ """Wait for a stopped instance to be a given state"""
id = ec2utils.ec2_id_to_id(instance_id)
while True:
info = self.cloud.compute_api.get(context=ctxt, instance_id=id)
@@ -1174,12 +1169,16 @@ class CloudTestCase(test.TestCase):
def _wait_for_running(self, instance_id):
def is_running(info):
- return info['state_description'] == 'running'
+ vm_state = info["vm_state"]
+ task_state = info["task_state"]
+ return vm_state == vm_states.ACTIVE and task_state == None
self._wait_for_state(self.context, instance_id, is_running)
def _wait_for_stopped(self, instance_id):
def is_stopped(info):
- return info['state_description'] == 'stopped'
+ vm_state = info["vm_state"]
+ task_state = info["task_state"]
+ return vm_state == vm_states.STOPPED and task_state == None
self._wait_for_state(self.context, instance_id, is_stopped)
def _wait_for_terminate(self, instance_id):
@@ -1562,7 +1561,7 @@ class CloudTestCase(test.TestCase):
'id': 0,
'root_device_name': '/dev/sdh',
'security_groups': [{'name': 'fake0'}, {'name': 'fake1'}],
- 'state_description': 'stopping',
+ 'vm_state': vm_states.STOPPED,
'instance_type': {'name': 'fake_type'},
'kernel_id': 1,
'ramdisk_id': 2,
@@ -1606,7 +1605,7 @@ class CloudTestCase(test.TestCase):
self.assertEqual(groupSet, expected_groupSet)
self.assertEqual(get_attribute('instanceInitiatedShutdownBehavior'),
{'instance_id': 'i-12345678',
- 'instanceInitiatedShutdownBehavior': 'stop'})
+ 'instanceInitiatedShutdownBehavior': 'stopped'})
self.assertEqual(get_attribute('instanceType'),
{'instance_id': 'i-12345678',
'instanceType': 'fake_type'})
diff --git a/nova/tests/test_compute.py b/nova/tests/test_compute.py
index 6659b81eb..766a7da9b 100644
--- a/nova/tests/test_compute.py
+++ b/nova/tests/test_compute.py
@@ -24,6 +24,7 @@ from nova import compute
from nova.compute import instance_types
from nova.compute import manager as compute_manager
from nova.compute import power_state
+from nova.compute import vm_states
from nova import context
from nova import db
from nova.db.sqlalchemy import models
@@ -763,8 +764,8 @@ class ComputeTestCase(test.TestCase):
'block_migration': False,
'disk': None}}).\
AndRaise(rpc.RemoteError('', '', ''))
- dbmock.instance_update(c, i_ref['id'], {'state_description': 'running',
- 'state': power_state.RUNNING,
+ dbmock.instance_update(c, i_ref['id'], {'vm_state': vm_states.ACTIVE,
+ 'task_state': None,
'host': i_ref['host']})
for v in i_ref['volumes']:
dbmock.volume_update(c, v['id'], {'status': 'in-use'})
@@ -795,8 +796,8 @@ class ComputeTestCase(test.TestCase):
'block_migration': False,
'disk': None}}).\
AndRaise(rpc.RemoteError('', '', ''))
- dbmock.instance_update(c, i_ref['id'], {'state_description': 'running',
- 'state': power_state.RUNNING,
+ dbmock.instance_update(c, i_ref['id'], {'vm_state': vm_states.ACTIVE,
+ 'task_state': None,
'host': i_ref['host']})
self.compute.db = dbmock
@@ -841,8 +842,8 @@ class ComputeTestCase(test.TestCase):
c = context.get_admin_context()
instance_id = self._create_instance()
i_ref = db.instance_get(c, instance_id)
- db.instance_update(c, i_ref['id'], {'state_description': 'migrating',
- 'state': power_state.PAUSED})
+ db.instance_update(c, i_ref['id'], {'vm_state': vm_states.MIGRATING,
+ 'power_state': power_state.PAUSED})
v_ref = db.volume_create(c, {'size': 1, 'instance_id': instance_id})
fix_addr = db.fixed_ip_create(c, {'address': '1.1.1.1',
'instance_id': instance_id})
@@ -903,7 +904,7 @@ class ComputeTestCase(test.TestCase):
instances = db.instance_get_all(context.get_admin_context())
LOG.info(_("After force-killing instances: %s"), instances)
self.assertEqual(len(instances), 1)
- self.assertEqual(power_state.SHUTOFF, instances[0]['state'])
+ self.assertEqual(power_state.NOSTATE, instances[0]['power_state'])
def test_get_all_by_name_regexp(self):
"""Test searching instances by name (display_name)"""
@@ -1323,25 +1324,28 @@ class ComputeTestCase(test.TestCase):
"""Test searching instances by state"""
c = context.get_admin_context()
- instance_id1 = self._create_instance({'state': power_state.SHUTDOWN})
+ instance_id1 = self._create_instance({
+ 'power_state': power_state.SHUTDOWN,
+ })
instance_id2 = self._create_instance({
- 'id': 2,
- 'state': power_state.RUNNING})
+ 'id': 2,
+ 'power_state': power_state.RUNNING,
+ })
instance_id3 = self._create_instance({
- 'id': 10,
- 'state': power_state.RUNNING})
-
+ 'id': 10,
+ 'power_state': power_state.RUNNING,
+ })
instances = self.compute_api.get_all(c,
- search_opts={'state': power_state.SUSPENDED})
+ search_opts={'power_state': power_state.SUSPENDED})
self.assertEqual(len(instances), 0)
instances = self.compute_api.get_all(c,
- search_opts={'state': power_state.SHUTDOWN})
+ search_opts={'power_state': power_state.SHUTDOWN})
self.assertEqual(len(instances), 1)
self.assertEqual(instances[0].id, instance_id1)
instances = self.compute_api.get_all(c,
- search_opts={'state': power_state.RUNNING})
+ search_opts={'power_state': power_state.RUNNING})
self.assertEqual(len(instances), 2)
instance_ids = [instance.id for instance in instances]
self.assertTrue(instance_id2 in instance_ids)
@@ -1349,7 +1353,7 @@ class ComputeTestCase(test.TestCase):
# Test passing a list as search arg
instances = self.compute_api.get_all(c,
- search_opts={'state': [power_state.SHUTDOWN,
+ search_opts={'power_state': [power_state.SHUTDOWN,
power_state.RUNNING]})
self.assertEqual(len(instances), 3)
diff --git a/nova/tests/test_context.py b/nova/tests/test_context.py
new file mode 100644
index 000000000..b2507fa59
--- /dev/null
+++ b/nova/tests/test_context.py
@@ -0,0 +1,33 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright 2011 OpenStack LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from nova import context
+from nova import test
+
+
+class ContextTestCase(test.TestCase):
+
+ def test_request_context_sets_is_admin(self):
+ ctxt = context.RequestContext('111',
+ '222',
+ roles=['admin', 'weasel'])
+ self.assertEquals(ctxt.is_admin, True)
+
+ def test_request_context_sets_is_admin_upcase(self):
+ ctxt = context.RequestContext('111',
+ '222',
+ roles=['Admin', 'weasel'])
+ self.assertEquals(ctxt.is_admin, True)
diff --git a/nova/tests/test_db_api.py b/nova/tests/test_db_api.py
index 038c07f40..60d7abd8c 100644
--- a/nova/tests/test_db_api.py
+++ b/nova/tests/test_db_api.py
@@ -91,5 +91,7 @@ class DbApiTestCase(test.TestCase):
inst2 = db.instance_create(self.context, args2)
db.instance_destroy(self.context, inst1.id)
result = db.instance_get_all_by_filters(self.context.elevated(), {})
- self.assertEqual(1, len(result))
+ self.assertEqual(2, len(result))
self.assertEqual(result[0].id, inst2.id)
+ self.assertEqual(result[1].id, inst1.id)
+ self.assertTrue(result[1].deleted)
diff --git a/nova/tests/test_ipv6.py b/nova/tests/test_ipv6.py
index 04c1b5598..e1ba4aafb 100644
--- a/nova/tests/test_ipv6.py
+++ b/nova/tests/test_ipv6.py
@@ -48,7 +48,7 @@ class IPv6RFC2462TestCase(test.TestCase):
def test_to_global_with_bad_prefix(self):
bad_prefix = '82'
self.assertRaises(TypeError, ipv6.to_global,
- bad_prefix,
+ bad_prefix,
'2001:db8::216:3eff:fe33:4455',
'test')
diff --git a/nova/tests/test_libvirt.py b/nova/tests/test_libvirt.py
index 6a213b4f0..8c6775b29 100644
--- a/nova/tests/test_libvirt.py
+++ b/nova/tests/test_libvirt.py
@@ -34,6 +34,7 @@ from nova import test
from nova import utils
from nova.api.ec2 import cloud
from nova.compute import power_state
+from nova.compute import vm_states
from nova.virt.libvirt import connection
from nova.virt.libvirt import firewall
@@ -674,8 +675,9 @@ class LibvirtConnTestCase(test.TestCase):
# Preparing data
self.compute = utils.import_object(FLAGS.compute_manager)
- instance_dict = {'host': 'fake', 'state': power_state.RUNNING,
- 'state_description': 'running'}
+ instance_dict = {'host': 'fake',
+ 'power_state': power_state.RUNNING,
+ 'vm_state': vm_states.ACTIVE}
instance_ref = db.instance_create(self.context, self.test_instance)
instance_ref = db.instance_update(self.context, instance_ref['id'],
instance_dict)
@@ -713,8 +715,8 @@ class LibvirtConnTestCase(test.TestCase):
self.compute.rollback_live_migration)
instance_ref = db.instance_get(self.context, instance_ref['id'])
- self.assertTrue(instance_ref['state_description'] == 'running')
- self.assertTrue(instance_ref['state'] == power_state.RUNNING)
+ self.assertTrue(instance_ref['vm_state'] == vm_states.ACTIVE)
+ self.assertTrue(instance_ref['power_state'] == power_state.RUNNING)
volume_ref = db.volume_get(self.context, volume_ref['id'])
self.assertTrue(volume_ref['status'] == 'in-use')
diff --git a/nova/tests/test_network.py b/nova/tests/test_network.py
index 0b8539442..25ff940f0 100644
--- a/nova/tests/test_network.py
+++ b/nova/tests/test_network.py
@@ -371,6 +371,22 @@ class VlanNetworkTestCase(test.TestCase):
self.mox.ReplayAll()
self.network.validate_networks(self.context, requested_networks)
+ def test_cant_associate_associated_floating_ip(self):
+ ctxt = context.RequestContext('testuser', 'testproject',
+ is_admin=False)
+
+ def fake_floating_ip_get_by_address(context, address):
+ return {'address': '10.10.10.10',
+ 'fixed_ip': {'address': '10.0.0.1'}}
+ self.stubs.Set(self.network.db, 'floating_ip_get_by_address',
+ fake_floating_ip_get_by_address)
+
+ self.assertRaises(exception.FloatingIpAlreadyInUse,
+ self.network.associate_floating_ip,
+ ctxt,
+ mox.IgnoreArg(),
+ mox.IgnoreArg())
+
class CommonNetworkTestCase(test.TestCase):
diff --git a/nova/tests/test_rpc.py b/nova/tests/test_rpc.py
index ba9c0a859..6b4454747 100644
--- a/nova/tests/test_rpc.py
+++ b/nova/tests/test_rpc.py
@@ -22,168 +22,16 @@ Unit Tests for remote procedure calls using queue
from nova import context
from nova import log as logging
from nova import rpc
-from nova import test
+from nova.tests import test_rpc_common
LOG = logging.getLogger('nova.tests.rpc')
-class RpcTestCase(test.TestCase):
+class RpcTestCase(test_rpc_common._BaseRpcTestCase):
def setUp(self):
+ self.rpc = rpc
super(RpcTestCase, self).setUp()
- self.conn = rpc.create_connection(True)
- self.receiver = TestReceiver()
- self.consumer = rpc.create_consumer(self.conn,
- 'test',
- self.receiver,
- False)
- self.consumer.attach_to_eventlet()
- self.context = context.get_admin_context()
- def test_call_succeed(self):
- value = 42
- result = rpc.call(self.context, 'test', {"method": "echo",
- "args": {"value": value}})
- self.assertEqual(value, result)
-
- def test_call_succeed_despite_multiple_returns(self):
- value = 42
- result = rpc.call(self.context, 'test', {"method": "echo_three_times",
- "args": {"value": value}})
- self.assertEqual(value + 2, result)
-
- def test_call_succeed_despite_multiple_returns_yield(self):
- value = 42
- result = rpc.call(self.context, 'test',
- {"method": "echo_three_times_yield",
- "args": {"value": value}})
- self.assertEqual(value + 2, result)
-
- def test_multicall_succeed_once(self):
- value = 42
- result = rpc.multicall(self.context,
- 'test',
- {"method": "echo",
- "args": {"value": value}})
- for i, x in enumerate(result):
- if i > 0:
- self.fail('should only receive one response')
- self.assertEqual(value + i, x)
-
- def test_multicall_succeed_three_times(self):
- value = 42
- result = rpc.multicall(self.context,
- 'test',
- {"method": "echo_three_times",
- "args": {"value": value}})
- for i, x in enumerate(result):
- self.assertEqual(value + i, x)
-
- def test_multicall_succeed_three_times_yield(self):
- value = 42
- result = rpc.multicall(self.context,
- 'test',
- {"method": "echo_three_times_yield",
- "args": {"value": value}})
- for i, x in enumerate(result):
- self.assertEqual(value + i, x)
-
- def test_context_passed(self):
- """Makes sure a context is passed through rpc call."""
- value = 42
- result = rpc.call(self.context,
- 'test', {"method": "context",
- "args": {"value": value}})
- self.assertEqual(self.context.to_dict(), result)
-
- def test_call_exception(self):
- """Test that exception gets passed back properly.
-
- rpc.call returns a RemoteError object. The value of the
- exception is converted to a string, so we convert it back
- to an int in the test.
-
- """
- value = 42
- self.assertRaises(rpc.RemoteError,
- rpc.call,
- self.context,
- 'test',
- {"method": "fail",
- "args": {"value": value}})
- try:
- rpc.call(self.context,
- 'test',
- {"method": "fail",
- "args": {"value": value}})
- self.fail("should have thrown rpc.RemoteError")
- except rpc.RemoteError as exc:
- self.assertEqual(int(exc.value), value)
-
- def test_nested_calls(self):
- """Test that we can do an rpc.call inside another call."""
- class Nested(object):
- @staticmethod
- def echo(context, queue, value):
- """Calls echo in the passed queue"""
- LOG.debug(_("Nested received %(queue)s, %(value)s")
- % locals())
- # TODO: so, it will replay the context and use the same REQID?
- # that's bizarre.
- ret = rpc.call(context,
- queue,
- {"method": "echo",
- "args": {"value": value}})
- LOG.debug(_("Nested return %s"), ret)
- return value
-
- nested = Nested()
- conn = rpc.create_connection(True)
- consumer = rpc.create_consumer(conn,
- 'nested',
- nested,
- False)
- consumer.attach_to_eventlet()
- value = 42
- result = rpc.call(self.context,
- 'nested', {"method": "echo",
- "args": {"queue": "test",
- "value": value}})
- self.assertEqual(value, result)
-
-
-class TestReceiver(object):
- """Simple Proxy class so the consumer has methods to call.
-
- Uses static methods because we aren't actually storing any state.
-
- """
-
- @staticmethod
- def echo(context, value):
- """Simply returns whatever value is sent in."""
- LOG.debug(_("Received %s"), value)
- return value
-
- @staticmethod
- def context(context, value):
- """Returns dictionary version of context."""
- LOG.debug(_("Received %s"), context)
- return context.to_dict()
-
- @staticmethod
- def echo_three_times(context, value):
- context.reply(value)
- context.reply(value + 1)
- context.reply(value + 2)
-
- @staticmethod
- def echo_three_times_yield(context, value):
- yield value
- yield value + 1
- yield value + 2
-
- @staticmethod
- def fail(context, value):
- """Raises an exception with the value sent in."""
- raise Exception(value)
+ def tearDown(self):
+ super(RpcTestCase, self).tearDown()
diff --git a/nova/tests/test_rpc_amqp.py b/nova/tests/test_rpc_amqp.py
deleted file mode 100644
index 2215a908b..000000000
--- a/nova/tests/test_rpc_amqp.py
+++ /dev/null
@@ -1,88 +0,0 @@
-# vim: tabstop=4 shiftwidth=4 softtabstop=4
-
-# Copyright (c) 2010 Openstack, LLC.
-# Administrator of the National Aeronautics and Space Administration.
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-"""
-Tests For RPC AMQP.
-"""
-
-from nova import context
-from nova import log as logging
-from nova import rpc
-from nova.rpc import amqp
-from nova import test
-
-
-LOG = logging.getLogger('nova.tests.rpc')
-
-
-class RpcAMQPTestCase(test.TestCase):
- def setUp(self):
- super(RpcAMQPTestCase, self).setUp()
- self.conn = rpc.create_connection(True)
- self.receiver = TestReceiver()
- self.consumer = rpc.create_consumer(self.conn,
- 'test',
- self.receiver,
- False)
- self.consumer.attach_to_eventlet()
- self.context = context.get_admin_context()
-
- def test_connectionpool_single(self):
- """Test that ConnectionPool recycles a single connection."""
- conn1 = amqp.ConnectionPool.get()
- amqp.ConnectionPool.put(conn1)
- conn2 = amqp.ConnectionPool.get()
- amqp.ConnectionPool.put(conn2)
- self.assertEqual(conn1, conn2)
-
-
-class TestReceiver(object):
- """Simple Proxy class so the consumer has methods to call.
-
- Uses static methods because we aren't actually storing any state.
-
- """
-
- @staticmethod
- def echo(context, value):
- """Simply returns whatever value is sent in."""
- LOG.debug(_("Received %s"), value)
- return value
-
- @staticmethod
- def context(context, value):
- """Returns dictionary version of context."""
- LOG.debug(_("Received %s"), context)
- return context.to_dict()
-
- @staticmethod
- def echo_three_times(context, value):
- context.reply(value)
- context.reply(value + 1)
- context.reply(value + 2)
-
- @staticmethod
- def echo_three_times_yield(context, value):
- yield value
- yield value + 1
- yield value + 2
-
- @staticmethod
- def fail(context, value):
- """Raises an exception with the value sent in."""
- raise Exception(value)
diff --git a/nova/tests/test_rpc_carrot.py b/nova/tests/test_rpc_carrot.py
new file mode 100644
index 000000000..57cdebf4f
--- /dev/null
+++ b/nova/tests/test_rpc_carrot.py
@@ -0,0 +1,45 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright 2010 United States Government as represented by the
+# Administrator of the National Aeronautics and Space Administration.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+"""
+Unit Tests for remote procedure calls using carrot
+"""
+
+from nova import context
+from nova import log as logging
+from nova.rpc import impl_carrot
+from nova.tests import test_rpc_common
+
+
+LOG = logging.getLogger('nova.tests.rpc')
+
+
+class RpcCarrotTestCase(test_rpc_common._BaseRpcTestCase):
+ def setUp(self):
+ self.rpc = impl_carrot
+ super(RpcCarrotTestCase, self).setUp()
+
+ def tearDown(self):
+ super(RpcCarrotTestCase, self).tearDown()
+
+ def test_connectionpool_single(self):
+ """Test that ConnectionPool recycles a single connection."""
+ conn1 = self.rpc.ConnectionPool.get()
+ self.rpc.ConnectionPool.put(conn1)
+ conn2 = self.rpc.ConnectionPool.get()
+ self.rpc.ConnectionPool.put(conn2)
+ self.assertEqual(conn1, conn2)
diff --git a/nova/tests/test_rpc_common.py b/nova/tests/test_rpc_common.py
new file mode 100644
index 000000000..4ab4e8a0e
--- /dev/null
+++ b/nova/tests/test_rpc_common.py
@@ -0,0 +1,189 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright 2010 United States Government as represented by the
+# Administrator of the National Aeronautics and Space Administration.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+"""
+Unit Tests for remote procedure calls shared between all implementations
+"""
+
+from nova import context
+from nova import log as logging
+from nova.rpc.common import RemoteError
+from nova import test
+
+
+LOG = logging.getLogger('nova.tests.rpc')
+
+
+class _BaseRpcTestCase(test.TestCase):
+ def setUp(self):
+ super(_BaseRpcTestCase, self).setUp()
+ self.conn = self.rpc.create_connection(True)
+ self.receiver = TestReceiver()
+ self.conn.create_consumer('test', self.receiver, False)
+ self.conn.consume_in_thread()
+ self.context = context.get_admin_context()
+
+ def tearDown(self):
+ self.conn.close()
+ super(_BaseRpcTestCase, self).tearDown()
+
+ def test_call_succeed(self):
+ value = 42
+ result = self.rpc.call(self.context, 'test', {"method": "echo",
+ "args": {"value": value}})
+ self.assertEqual(value, result)
+
+ def test_call_succeed_despite_multiple_returns(self):
+ value = 42
+ result = self.rpc.call(self.context, 'test',
+ {"method": "echo_three_times",
+ "args": {"value": value}})
+ self.assertEqual(value + 2, result)
+
+ def test_call_succeed_despite_multiple_returns_yield(self):
+ value = 42
+ result = self.rpc.call(self.context, 'test',
+ {"method": "echo_three_times_yield",
+ "args": {"value": value}})
+ self.assertEqual(value + 2, result)
+
+ def test_multicall_succeed_once(self):
+ value = 42
+ result = self.rpc.multicall(self.context,
+ 'test',
+ {"method": "echo",
+ "args": {"value": value}})
+ for i, x in enumerate(result):
+ if i > 0:
+ self.fail('should only receive one response')
+ self.assertEqual(value + i, x)
+
+ def test_multicall_succeed_three_times(self):
+ value = 42
+ result = self.rpc.multicall(self.context,
+ 'test',
+ {"method": "echo_three_times",
+ "args": {"value": value}})
+ for i, x in enumerate(result):
+ self.assertEqual(value + i, x)
+
+ def test_multicall_succeed_three_times_yield(self):
+ value = 42
+ result = self.rpc.multicall(self.context,
+ 'test',
+ {"method": "echo_three_times_yield",
+ "args": {"value": value}})
+ for i, x in enumerate(result):
+ self.assertEqual(value + i, x)
+
+ def test_context_passed(self):
+ """Makes sure a context is passed through rpc call."""
+ value = 42
+ result = self.rpc.call(self.context,
+ 'test', {"method": "context",
+ "args": {"value": value}})
+ self.assertEqual(self.context.to_dict(), result)
+
+ def test_call_exception(self):
+ """Test that exception gets passed back properly.
+
+ rpc.call returns a RemoteError object. The value of the
+ exception is converted to a string, so we convert it back
+ to an int in the test.
+
+ """
+ value = 42
+ self.assertRaises(RemoteError,
+ self.rpc.call,
+ self.context,
+ 'test',
+ {"method": "fail",
+ "args": {"value": value}})
+ try:
+ self.rpc.call(self.context,
+ 'test',
+ {"method": "fail",
+ "args": {"value": value}})
+ self.fail("should have thrown RemoteError")
+ except RemoteError as exc:
+ self.assertEqual(int(exc.value), value)
+
+ def test_nested_calls(self):
+ """Test that we can do an rpc.call inside another call."""
+ class Nested(object):
+ @staticmethod
+ def echo(context, queue, value):
+ """Calls echo in the passed queue"""
+ LOG.debug(_("Nested received %(queue)s, %(value)s")
+ % locals())
+ # TODO: so, it will replay the context and use the same REQID?
+ # that's bizarre.
+ ret = self.rpc.call(context,
+ queue,
+ {"method": "echo",
+ "args": {"value": value}})
+ LOG.debug(_("Nested return %s"), ret)
+ return value
+
+ nested = Nested()
+ conn = self.rpc.create_connection(True)
+ conn.create_consumer('nested', nested, False)
+ conn.consume_in_thread()
+ value = 42
+ result = self.rpc.call(self.context,
+ 'nested', {"method": "echo",
+ "args": {"queue": "test",
+ "value": value}})
+ conn.close()
+ self.assertEqual(value, result)
+
+
+class TestReceiver(object):
+ """Simple Proxy class so the consumer has methods to call.
+
+ Uses static methods because we aren't actually storing any state.
+
+ """
+
+ @staticmethod
+ def echo(context, value):
+ """Simply returns whatever value is sent in."""
+ LOG.debug(_("Received %s"), value)
+ return value
+
+ @staticmethod
+ def context(context, value):
+ """Returns dictionary version of context."""
+ LOG.debug(_("Received %s"), context)
+ return context.to_dict()
+
+ @staticmethod
+ def echo_three_times(context, value):
+ context.reply(value)
+ context.reply(value + 1)
+ context.reply(value + 2)
+
+ @staticmethod
+ def echo_three_times_yield(context, value):
+ yield value
+ yield value + 1
+ yield value + 2
+
+ @staticmethod
+ def fail(context, value):
+ """Raises an exception with the value sent in."""
+ raise Exception(value)
diff --git a/nova/tests/test_rpc_kombu.py b/nova/tests/test_rpc_kombu.py
new file mode 100644
index 000000000..101ed14af
--- /dev/null
+++ b/nova/tests/test_rpc_kombu.py
@@ -0,0 +1,110 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright 2010 United States Government as represented by the
+# Administrator of the National Aeronautics and Space Administration.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+"""
+Unit Tests for remote procedure calls using kombu
+"""
+
+from nova import context
+from nova import log as logging
+from nova import test
+from nova.rpc import impl_kombu
+from nova.tests import test_rpc_common
+
+
+LOG = logging.getLogger('nova.tests.rpc')
+
+
+class RpcKombuTestCase(test_rpc_common._BaseRpcTestCase):
+ def setUp(self):
+ self.rpc = impl_kombu
+ super(RpcKombuTestCase, self).setUp()
+
+ def tearDown(self):
+ super(RpcKombuTestCase, self).tearDown()
+
+ def test_reusing_connection(self):
+ """Test that reusing a connection returns same one."""
+ conn_context = self.rpc.create_connection(new=False)
+ conn1 = conn_context.connection
+ conn_context.close()
+ conn_context = self.rpc.create_connection(new=False)
+ conn2 = conn_context.connection
+ conn_context.close()
+ self.assertEqual(conn1, conn2)
+
+ def test_topic_send_receive(self):
+ """Test sending to a topic exchange/queue"""
+
+ conn = self.rpc.create_connection()
+ message = 'topic test message'
+
+ self.received_message = None
+
+ def _callback(message):
+ self.received_message = message
+
+ conn.declare_topic_consumer('a_topic', _callback)
+ conn.topic_send('a_topic', message)
+ conn.consume(limit=1)
+ conn.close()
+
+ self.assertEqual(self.received_message, message)
+
+ def test_direct_send_receive(self):
+ """Test sending to a direct exchange/queue"""
+ conn = self.rpc.create_connection()
+ message = 'direct test message'
+
+ self.received_message = None
+
+ def _callback(message):
+ self.received_message = message
+
+ conn.declare_direct_consumer('a_direct', _callback)
+ conn.direct_send('a_direct', message)
+ conn.consume(limit=1)
+ conn.close()
+
+ self.assertEqual(self.received_message, message)
+
+ @test.skip_test("kombu memory transport seems buggy with fanout queues "
+ "as this test passes when you use rabbit (fake_rabbit=False)")
+ def test_fanout_send_receive(self):
+ """Test sending to a fanout exchange and consuming from 2 queues"""
+
+ conn = self.rpc.create_connection()
+ conn2 = self.rpc.create_connection()
+ message = 'fanout test message'
+
+ self.received_message = None
+
+ def _callback(message):
+ self.received_message = message
+
+ conn.declare_fanout_consumer('a_fanout', _callback)
+ conn2.declare_fanout_consumer('a_fanout', _callback)
+ conn.fanout_send('a_fanout', message)
+
+ conn.consume(limit=1)
+ conn.close()
+ self.assertEqual(self.received_message, message)
+
+ self.received_message = None
+ conn2.consume(limit=1)
+ conn2.close()
+ self.assertEqual(self.received_message, message)
diff --git a/nova/tests/test_test.py b/nova/tests/test_test.py
index 64f11fa45..3482ff6a0 100644
--- a/nova/tests/test_test.py
+++ b/nova/tests/test_test.py
@@ -40,6 +40,5 @@ class IsolationTestCase(test.TestCase):
connection = rpc.create_connection(new=True)
proxy = NeverCalled()
- consumer = rpc.create_consumer(connection, 'compute',
- proxy, fanout=False)
- consumer.attach_to_eventlet()
+ connection.create_consumer('compute', proxy, fanout=False)
+ connection.consume_in_thread()
diff --git a/nova/tests/test_vsa.py b/nova/tests/test_vsa.py
new file mode 100644
index 000000000..3d2d2de13
--- /dev/null
+++ b/nova/tests/test_vsa.py
@@ -0,0 +1,182 @@
+# Copyright 2011 OpenStack LLC.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import base64
+import stubout
+
+from xml.etree import ElementTree
+from xml.etree.ElementTree import Element, SubElement
+
+from nova import context
+from nova import db
+from nova import exception
+from nova import flags
+from nova import log as logging
+from nova import test
+from nova import vsa
+from nova import volume
+from nova.volume import volume_types
+from nova.vsa import utils as vsa_utils
+
+import nova.image.fake
+
+FLAGS = flags.FLAGS
+LOG = logging.getLogger('nova.tests.vsa')
+
+
+class VsaTestCase(test.TestCase):
+
+ def setUp(self):
+ super(VsaTestCase, self).setUp()
+ self.stubs = stubout.StubOutForTesting()
+ self.vsa_api = vsa.API()
+ self.volume_api = volume.API()
+
+ FLAGS.quota_volumes = 100
+ FLAGS.quota_gigabytes = 10000
+
+ self.context = context.get_admin_context()
+
+ volume_types.create(self.context,
+ 'SATA_500_7200',
+ extra_specs={'type': 'vsa_drive',
+ 'drive_name': 'SATA_500_7200',
+ 'drive_type': 'SATA',
+ 'drive_size': '500',
+ 'drive_rpm': '7200'})
+
+ def fake_show_by_name(meh, context, name):
+ if name == 'wrong_image_name':
+ LOG.debug(_("Test: Emulate wrong VSA name. Raise"))
+ raise exception.ImageNotFound
+ return {'id': 1, 'properties': {'kernel_id': 1, 'ramdisk_id': 1}}
+
+ self.stubs.Set(nova.image.fake._FakeImageService,
+ 'show_by_name',
+ fake_show_by_name)
+
+ def tearDown(self):
+ self.stubs.UnsetAll()
+ super(VsaTestCase, self).tearDown()
+
+ def test_vsa_create_delete_defaults(self):
+ param = {'display_name': 'VSA name test'}
+ vsa_ref = self.vsa_api.create(self.context, **param)
+ self.assertEqual(vsa_ref['display_name'], param['display_name'])
+ self.vsa_api.delete(self.context, vsa_ref['id'])
+
+ def test_vsa_create_delete_check_in_db(self):
+ vsa_list1 = self.vsa_api.get_all(self.context)
+ vsa_ref = self.vsa_api.create(self.context)
+ vsa_list2 = self.vsa_api.get_all(self.context)
+ self.assertEqual(len(vsa_list2), len(vsa_list1) + 1)
+
+ self.vsa_api.delete(self.context, vsa_ref['id'])
+ vsa_list3 = self.vsa_api.get_all(self.context)
+ self.assertEqual(len(vsa_list3), len(vsa_list2) - 1)
+
+ def test_vsa_create_delete_high_vc_count(self):
+ param = {'vc_count': FLAGS.max_vcs_in_vsa + 1}
+ vsa_ref = self.vsa_api.create(self.context, **param)
+ self.assertEqual(vsa_ref['vc_count'], FLAGS.max_vcs_in_vsa)
+ self.vsa_api.delete(self.context, vsa_ref['id'])
+
+ def test_vsa_create_wrong_image_name(self):
+ param = {'image_name': 'wrong_image_name'}
+ self.assertRaises(exception.ApiError,
+ self.vsa_api.create, self.context, **param)
+
+ def test_vsa_create_db_error(self):
+
+ def fake_vsa_create(context, options):
+ LOG.debug(_("Test: Emulate DB error. Raise"))
+ raise exception.Error
+
+ self.stubs.Set(nova.db.api, 'vsa_create', fake_vsa_create)
+ self.assertRaises(exception.ApiError,
+ self.vsa_api.create, self.context)
+
+ def test_vsa_create_wrong_storage_params(self):
+ vsa_list1 = self.vsa_api.get_all(self.context)
+ param = {'storage': [{'stub': 1}]}
+ self.assertRaises(exception.ApiError,
+ self.vsa_api.create, self.context, **param)
+ vsa_list2 = self.vsa_api.get_all(self.context)
+ self.assertEqual(len(vsa_list2), len(vsa_list1))
+
+ param = {'storage': [{'drive_name': 'wrong name'}]}
+ self.assertRaises(exception.ApiError,
+ self.vsa_api.create, self.context, **param)
+
+ def test_vsa_create_with_storage(self, multi_vol_creation=True):
+ """Test creation of VSA with BE storage"""
+
+ FLAGS.vsa_multi_vol_creation = multi_vol_creation
+
+ param = {'storage': [{'drive_name': 'SATA_500_7200',
+ 'num_drives': 3}]}
+ vsa_ref = self.vsa_api.create(self.context, **param)
+ self.assertEqual(vsa_ref['vol_count'], 3)
+ self.vsa_api.delete(self.context, vsa_ref['id'])
+
+ param = {'storage': [{'drive_name': 'SATA_500_7200',
+ 'num_drives': 3}],
+ 'shared': True}
+ vsa_ref = self.vsa_api.create(self.context, **param)
+ self.assertEqual(vsa_ref['vol_count'], 15)
+ self.vsa_api.delete(self.context, vsa_ref['id'])
+
+ def test_vsa_create_with_storage_single_volumes(self):
+ self.test_vsa_create_with_storage(multi_vol_creation=False)
+
+ def test_vsa_update(self):
+ vsa_ref = self.vsa_api.create(self.context)
+
+ param = {'vc_count': FLAGS.max_vcs_in_vsa + 1}
+ vsa_ref = self.vsa_api.update(self.context, vsa_ref['id'], **param)
+ self.assertEqual(vsa_ref['vc_count'], FLAGS.max_vcs_in_vsa)
+
+ param = {'vc_count': 2}
+ vsa_ref = self.vsa_api.update(self.context, vsa_ref['id'], **param)
+ self.assertEqual(vsa_ref['vc_count'], 2)
+
+ self.vsa_api.delete(self.context, vsa_ref['id'])
+
+ def test_vsa_generate_user_data(self):
+
+ FLAGS.vsa_multi_vol_creation = False
+ param = {'display_name': 'VSA name test',
+ 'display_description': 'VSA desc test',
+ 'vc_count': 2,
+ 'storage': [{'drive_name': 'SATA_500_7200',
+ 'num_drives': 3}]}
+ vsa_ref = self.vsa_api.create(self.context, **param)
+ volumes = self.vsa_api.get_all_vsa_drives(self.context,
+ vsa_ref['id'])
+
+ user_data = vsa_utils.generate_user_data(vsa_ref, volumes)
+ user_data = base64.b64decode(user_data)
+
+ LOG.debug(_("Test: user_data = %s"), user_data)
+
+ elem = ElementTree.fromstring(user_data)
+ self.assertEqual(elem.findtext('name'),
+ param['display_name'])
+ self.assertEqual(elem.findtext('description'),
+ param['display_description'])
+ self.assertEqual(elem.findtext('vc_count'),
+ str(param['vc_count']))
+
+ self.vsa_api.delete(self.context, vsa_ref['id'])
diff --git a/nova/tests/test_vsa_volumes.py b/nova/tests/test_vsa_volumes.py
new file mode 100644
index 000000000..b7cd4e840
--- /dev/null
+++ b/nova/tests/test_vsa_volumes.py
@@ -0,0 +1,136 @@
+# Copyright 2011 OpenStack LLC.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import stubout
+
+from nova import exception
+from nova import flags
+from nova import vsa
+from nova import volume
+from nova import db
+from nova import context
+from nova import test
+from nova import log as logging
+import nova.image.fake
+
+FLAGS = flags.FLAGS
+LOG = logging.getLogger('nova.tests.vsa.volumes')
+
+
+class VsaVolumesTestCase(test.TestCase):
+
+ def setUp(self):
+ super(VsaVolumesTestCase, self).setUp()
+ self.stubs = stubout.StubOutForTesting()
+ self.vsa_api = vsa.API()
+ self.volume_api = volume.API()
+ self.context = context.get_admin_context()
+
+ self.default_vol_type = self.vsa_api.get_vsa_volume_type(self.context)
+
+ def fake_show_by_name(meh, context, name):
+ return {'id': 1, 'properties': {'kernel_id': 1, 'ramdisk_id': 1}}
+
+ self.stubs.Set(nova.image.fake._FakeImageService,
+ 'show_by_name',
+ fake_show_by_name)
+
+ param = {'display_name': 'VSA name test'}
+ vsa_ref = self.vsa_api.create(self.context, **param)
+ self.vsa_id = vsa_ref['id']
+
+ def tearDown(self):
+ if self.vsa_id:
+ self.vsa_api.delete(self.context, self.vsa_id)
+ self.stubs.UnsetAll()
+ super(VsaVolumesTestCase, self).tearDown()
+
+ def _default_volume_param(self):
+ return {
+ 'size': 1,
+ 'snapshot_id': None,
+ 'name': 'Test volume name',
+ 'description': 'Test volume desc name',
+ 'volume_type': self.default_vol_type,
+ 'metadata': {'from_vsa_id': self.vsa_id}
+ }
+
+ def _get_all_volumes_by_vsa(self):
+ return self.volume_api.get_all(self.context,
+ search_opts={'metadata': {"from_vsa_id": str(self.vsa_id)}})
+
+ def test_vsa_volume_create_delete(self):
+ """ Check if volume properly created and deleted. """
+ volume_param = self._default_volume_param()
+ volume_ref = self.volume_api.create(self.context, **volume_param)
+
+ self.assertEqual(volume_ref['display_name'],
+ volume_param['name'])
+ self.assertEqual(volume_ref['display_description'],
+ volume_param['description'])
+ self.assertEqual(volume_ref['size'],
+ volume_param['size'])
+ self.assertEqual(volume_ref['status'],
+ 'creating')
+
+ vols2 = self._get_all_volumes_by_vsa()
+ self.assertEqual(1, len(vols2))
+ volume_ref = vols2[0]
+
+ self.assertEqual(volume_ref['display_name'],
+ volume_param['name'])
+ self.assertEqual(volume_ref['display_description'],
+ volume_param['description'])
+ self.assertEqual(volume_ref['size'],
+ volume_param['size'])
+ self.assertEqual(volume_ref['status'],
+ 'creating')
+
+ self.volume_api.update(self.context,
+ volume_ref['id'], {'status': 'available'})
+ self.volume_api.delete(self.context, volume_ref['id'])
+
+ vols3 = self._get_all_volumes_by_vsa()
+ self.assertEqual(1, len(vols2))
+ volume_ref = vols3[0]
+ self.assertEqual(volume_ref['status'],
+ 'deleting')
+
+ def test_vsa_volume_delete_nonavail_volume(self):
+ """ Check volume deleton in different states. """
+ volume_param = self._default_volume_param()
+ volume_ref = self.volume_api.create(self.context, **volume_param)
+
+ self.volume_api.update(self.context,
+ volume_ref['id'], {'status': 'in-use'})
+ self.assertRaises(exception.ApiError,
+ self.volume_api.delete,
+ self.context, volume_ref['id'])
+
+ def test_vsa_volume_delete_vsa_with_volumes(self):
+ """ Check volume deleton in different states. """
+
+ vols1 = self._get_all_volumes_by_vsa()
+ for i in range(3):
+ volume_param = self._default_volume_param()
+ volume_ref = self.volume_api.create(self.context, **volume_param)
+
+ vols2 = self._get_all_volumes_by_vsa()
+ self.assertEqual(len(vols1) + 3, len(vols2))
+
+ self.vsa_api.delete(self.context, self.vsa_id)
+
+ vols3 = self._get_all_volumes_by_vsa()
+ self.assertEqual(len(vols1), len(vols3))
diff --git a/nova/tests/test_xenapi.py b/nova/tests/test_xenapi.py
index 2f0559366..45dad3516 100644
--- a/nova/tests/test_xenapi.py
+++ b/nova/tests/test_xenapi.py
@@ -16,7 +16,6 @@
"""Test suite for XenAPI."""
-import eventlet
import functools
import json
import os
@@ -203,42 +202,6 @@ class XenAPIVMTestCase(test.TestCase):
self.context = context.RequestContext(self.user_id, self.project_id)
self.conn = xenapi_conn.get_connection(False)
- def test_parallel_builds(self):
- stubs.stubout_loopingcall_delay(self.stubs)
-
- def _do_build(id, proj, user, *args):
- values = {
- 'id': id,
- 'project_id': proj,
- 'user_id': user,
- 'image_ref': 1,
- 'kernel_id': 2,
- 'ramdisk_id': 3,
- 'instance_type_id': '3', # m1.large
- 'os_type': 'linux',
- 'architecture': 'x86-64'}
- network_info = [({'bridge': 'fa0', 'id': 0, 'injected': False},
- {'broadcast': '192.168.0.255',
- 'dns': ['192.168.0.1'],
- 'gateway': '192.168.0.1',
- 'gateway6': 'dead:beef::1',
- 'ip6s': [{'enabled': '1',
- 'ip': 'dead:beef::dcad:beff:feef:0',
- 'netmask': '64'}],
- 'ips': [{'enabled': '1',
- 'ip': '192.168.0.100',
- 'netmask': '255.255.255.0'}],
- 'label': 'fake',
- 'mac': 'DE:AD:BE:EF:00:00',
- 'rxtx_cap': 3})]
- instance = db.instance_create(self.context, values)
- self.conn.spawn(self.context, instance, network_info)
-
- gt1 = eventlet.spawn(_do_build, 1, self.project_id, self.user_id)
- gt2 = eventlet.spawn(_do_build, 2, self.project_id, self.user_id)
- gt1.wait()
- gt2.wait()
-
def test_list_instances_0(self):
instances = self.conn.list_instances()
self.assertEquals(instances, [])
diff --git a/nova/tests/vmwareapi/db_fakes.py b/nova/tests/vmwareapi/db_fakes.py
index afd672c7a..0d896239a 100644
--- a/nova/tests/vmwareapi/db_fakes.py
+++ b/nova/tests/vmwareapi/db_fakes.py
@@ -23,6 +23,8 @@ import time
from nova import db
from nova import utils
+from nova.compute import task_states
+from nova.compute import vm_states
def stub_out_db_instance_api(stubs):
@@ -64,7 +66,8 @@ def stub_out_db_instance_api(stubs):
'image_ref': values['image_ref'],
'kernel_id': values['kernel_id'],
'ramdisk_id': values['ramdisk_id'],
- 'state_description': 'scheduling',
+ 'vm_state': vm_states.BUILDING,
+ 'task_state': task_states.SCHEDULING,
'user_id': values['user_id'],
'project_id': values['project_id'],
'launch_time': time.strftime('%Y-%m-%dT%H:%M:%SZ', time.gmtime()),
diff --git a/nova/tests/xenapi/stubs.py b/nova/tests/xenapi/stubs.py
index a6a1febd6..647a4c1df 100644
--- a/nova/tests/xenapi/stubs.py
+++ b/nova/tests/xenapi/stubs.py
@@ -18,6 +18,8 @@
import eventlet
import json
+import random
+
from nova.virt import xenapi_conn
from nova.virt.xenapi import fake
from nova.virt.xenapi import volume_utils
@@ -191,6 +193,7 @@ class FakeSessionForVMTests(fake.SessionBase):
vm['power_state'] = 'Running'
vm['is_a_template'] = False
vm['is_control_domain'] = False
+ vm['domid'] = random.randrange(1, 1 << 16)
def VM_snapshot(self, session_ref, vm_ref, label):
status = "Running"
@@ -290,6 +293,7 @@ class FakeSessionForMigrationTests(fake.SessionBase):
vm['power_state'] = 'Running'
vm['is_a_template'] = False
vm['is_control_domain'] = False
+ vm['domid'] = random.randrange(1, 1 << 16)
def stub_out_migration_methods(stubs):