summaryrefslogtreecommitdiffstats
path: root/nova/tests
diff options
context:
space:
mode:
Diffstat (limited to 'nova/tests')
-rw-r--r--nova/tests/api/openstack/contrib/test_createserverext.py13
-rw-r--r--nova/tests/api/openstack/contrib/test_volumes.py14
-rw-r--r--nova/tests/api/openstack/test_extensions.py3
-rw-r--r--nova/tests/api/openstack/test_server_actions.py4
-rw-r--r--nova/tests/api/openstack/test_servers.py183
-rw-r--r--nova/tests/integrated/api/client.py19
-rw-r--r--nova/tests/integrated/test_servers.py36
-rw-r--r--nova/tests/scheduler/test_abstract_scheduler.py75
-rw-r--r--nova/tests/scheduler/test_least_cost_scheduler.py2
-rw-r--r--nova/tests/scheduler/test_scheduler.py487
-rw-r--r--nova/tests/scheduler/test_vsa_scheduler.py30
-rw-r--r--nova/tests/test_compute.py153
-rw-r--r--nova/tests/test_quota.py17
13 files changed, 766 insertions, 270 deletions
diff --git a/nova/tests/api/openstack/contrib/test_createserverext.py b/nova/tests/api/openstack/contrib/test_createserverext.py
index d2fac26c7..9921b54a3 100644
--- a/nova/tests/api/openstack/contrib/test_createserverext.py
+++ b/nova/tests/api/openstack/contrib/test_createserverext.py
@@ -25,6 +25,7 @@ import webob
from nova import db
from nova import exception
from nova import flags
+from nova import rpc
from nova import test
import nova.api.openstack
from nova.tests.api.openstack import fakes
@@ -118,13 +119,15 @@ class CreateserverextTest(test.TestCase):
if 'user_data' in kwargs:
self.user_data = kwargs['user_data']
- return [{'id': '1234', 'display_name': 'fakeinstance',
+ resv_id = None
+
+ return ([{'id': '1234', 'display_name': 'fakeinstance',
'uuid': FAKE_UUID,
'user_id': 'fake',
'project_id': 'fake',
'created_at': "",
'updated_at': "",
- 'progress': 0}]
+ 'progress': 0}], resv_id)
def set_admin_password(self, *args, **kwargs):
pass
@@ -133,10 +136,9 @@ class CreateserverextTest(test.TestCase):
self.stubs.Set(nova.compute, 'API',
self._make_stub_method(compute_api))
self.stubs.Set(
- nova.api.openstack.create_instance_helper.CreateInstanceHelper,
+ nova.api.openstack.servers.Controller,
'_get_kernel_ramdisk_from_image',
self._make_stub_method((1, 1)))
-
return compute_api
def _setup_mock_network_api(self):
@@ -399,7 +401,8 @@ class CreateserverextTest(test.TestCase):
self._setup_mock_network_api()
body_dict = self._create_security_group_request_dict(security_groups)
request = self._get_create_request_json(body_dict)
- response = request.get_response(fakes.wsgi_app())
+ compute_api, response = \
+ self._run_create_instance_with_mock_compute_api(request)
self.assertEquals(response.status_int, 202)
def test_get_server_by_id_verify_security_groups_json(self):
diff --git a/nova/tests/api/openstack/contrib/test_volumes.py b/nova/tests/api/openstack/contrib/test_volumes.py
index f61e25d12..9a24b9a54 100644
--- a/nova/tests/api/openstack/contrib/test_volumes.py
+++ b/nova/tests/api/openstack/contrib/test_volumes.py
@@ -29,8 +29,12 @@ FLAGS = flags.FLAGS
def fake_compute_api_create(cls, context, instance_type, image_href, **kwargs):
+ global _block_device_mapping_seen
+ _block_device_mapping_seen = kwargs.get('block_device_mapping')
+
inst_type = instance_types.get_instance_type_by_flavor_id(2)
- return [{'id': 1,
+ resv_id = None
+ return ([{'id': 1,
'display_name': 'test_server',
'uuid': fake_gen_uuid(),
'instance_type': dict(inst_type),
@@ -42,7 +46,7 @@ def fake_compute_api_create(cls, context, instance_type, image_href, **kwargs):
'created_at': datetime.datetime(2010, 10, 10, 12, 0, 0),
'updated_at': datetime.datetime(2010, 11, 11, 11, 0, 0),
'progress': 0
- }]
+ }], resv_id)
def fake_get_instance_nw_info(cls, context, instance):
@@ -73,6 +77,8 @@ class BootFromVolumeTest(test.TestCase):
delete_on_termination=False,
)]
))
+ global _block_device_mapping_seen
+ _block_device_mapping_seen = None
req = webob.Request.blank('/v1.1/fake/os-volumes_boot')
req.method = 'POST'
req.body = json.dumps(body)
@@ -85,3 +91,7 @@ class BootFromVolumeTest(test.TestCase):
self.assertEqual(u'test_server', server['name'])
self.assertEqual(3, int(server['image']['id']))
self.assertEqual(FLAGS.password_length, len(server['adminPass']))
+ self.assertEqual(len(_block_device_mapping_seen), 1)
+ self.assertEqual(_block_device_mapping_seen[0]['volume_id'], 1)
+ self.assertEqual(_block_device_mapping_seen[0]['device_name'],
+ '/dev/vda')
diff --git a/nova/tests/api/openstack/test_extensions.py b/nova/tests/api/openstack/test_extensions.py
index a5c6fe65a..4f66f5405 100644
--- a/nova/tests/api/openstack/test_extensions.py
+++ b/nova/tests/api/openstack/test_extensions.py
@@ -87,6 +87,7 @@ class ExtensionControllerTest(test.TestCase):
self.ext_list = [
"Createserverext",
"DeferredDelete",
+ "DiskConfig",
"FlavorExtraSpecs",
"FlavorExtraData",
"Floating_ips",
@@ -102,7 +103,7 @@ class ExtensionControllerTest(test.TestCase):
"VirtualInterfaces",
"Volumes",
"VolumeTypes",
- "DiskConfig",
+ "Zones",
]
self.ext_list.sort()
diff --git a/nova/tests/api/openstack/test_server_actions.py b/nova/tests/api/openstack/test_server_actions.py
index 3811fcf0f..04b603237 100644
--- a/nova/tests/api/openstack/test_server_actions.py
+++ b/nova/tests/api/openstack/test_server_actions.py
@@ -9,7 +9,7 @@ from nova import context
from nova import utils
from nova import exception
from nova import flags
-from nova.api.openstack import create_instance_helper
+from nova.api.openstack import servers
from nova.compute import vm_states
from nova.compute import instance_types
import nova.db.api
@@ -971,7 +971,7 @@ class ServerActionsTestV11(test.TestCase):
class TestServerActionXMLDeserializerV11(test.TestCase):
def setUp(self):
- self.deserializer = create_instance_helper.ServerXMLDeserializerV11()
+ self.deserializer = servers.ServerXMLDeserializerV11()
def tearDown(self):
pass
diff --git a/nova/tests/api/openstack/test_servers.py b/nova/tests/api/openstack/test_servers.py
index 29be7a812..fa67db688 100644
--- a/nova/tests/api/openstack/test_servers.py
+++ b/nova/tests/api/openstack/test_servers.py
@@ -33,7 +33,6 @@ from nova import flags
from nova import test
from nova import utils
import nova.api.openstack
-from nova.api.openstack import create_instance_helper
from nova.api.openstack import servers
from nova.api.openstack import xmlutil
import nova.compute.api
@@ -1562,10 +1561,15 @@ class ServersTest(test.TestCase):
def _setup_for_create_instance(self):
"""Shared implementation for tests below that create instance"""
+
+ self.instance_cache_num = 0
+ self.instance_cache = {}
+
def instance_create(context, inst):
inst_type = instance_types.get_instance_type_by_flavor_id(3)
image_ref = 'http://localhost/images/2'
- return {'id': 1,
+ self.instance_cache_num += 1
+ instance = {'id': self.instance_cache_num,
'display_name': 'server_test',
'uuid': FAKE_UUID,
'instance_type': dict(inst_type),
@@ -1574,11 +1578,32 @@ class ServersTest(test.TestCase):
'image_ref': image_ref,
'user_id': 'fake',
'project_id': 'fake',
+ 'reservation_id': inst['reservation_id'],
"created_at": datetime.datetime(2010, 10, 10, 12, 0, 0),
"updated_at": datetime.datetime(2010, 11, 11, 11, 0, 0),
"config_drive": self.config_drive,
"progress": 0
}
+ self.instance_cache[instance['id']] = instance
+ return instance
+
+ def instance_get(context, instance_id):
+ """Stub for compute/api create() pulling in instance after
+ scheduling
+ """
+ return self.instance_cache[instance_id]
+
+ def rpc_call_wrapper(context, topic, msg):
+ """Stub out the scheduler creating the instance entry"""
+ if topic == FLAGS.scheduler_topic and \
+ msg['method'] == 'run_instance':
+ request_spec = msg['args']['request_spec']
+ num_instances = request_spec.get('num_instances', 1)
+ instances = []
+ for x in xrange(num_instances):
+ instances.append(instance_create(context,
+ request_spec['instance_properties']))
+ return instances
def server_update(context, id, params):
return instance_create(context, id)
@@ -1601,18 +1626,20 @@ class ServersTest(test.TestCase):
self.stubs.Set(nova.db.api, 'project_get_networks',
project_get_networks)
self.stubs.Set(nova.db.api, 'instance_create', instance_create)
+ self.stubs.Set(nova.db.api, 'instance_get', instance_get)
self.stubs.Set(nova.rpc, 'cast', fake_method)
- self.stubs.Set(nova.rpc, 'call', fake_method)
+ self.stubs.Set(nova.rpc, 'call', rpc_call_wrapper)
self.stubs.Set(nova.db.api, 'instance_update', server_update)
self.stubs.Set(nova.db.api, 'queue_get_for', queue_get_for)
self.stubs.Set(nova.network.manager.VlanManager, 'allocate_fixed_ip',
fake_method)
self.stubs.Set(
- nova.api.openstack.create_instance_helper.CreateInstanceHelper,
- "_get_kernel_ramdisk_from_image", kernel_ramdisk_mapping)
+ servers.Controller,
+ "_get_kernel_ramdisk_from_image",
+ kernel_ramdisk_mapping)
self.stubs.Set(nova.compute.api.API, "_find_host", find_host)
- def _test_create_instance_helper(self):
+ def _test_create_instance(self):
self._setup_for_create_instance()
body = dict(server=dict(
@@ -1636,7 +1663,7 @@ class ServersTest(test.TestCase):
self.assertEqual(FAKE_UUID, server['uuid'])
def test_create_instance(self):
- self._test_create_instance_helper()
+ self._test_create_instance()
def test_create_instance_has_uuid(self):
"""Tests at the db-layer instead of API layer since that's where the
@@ -1648,51 +1675,134 @@ class ServersTest(test.TestCase):
expected = FAKE_UUID
self.assertEqual(instance['uuid'], expected)
- def test_create_instance_via_zones(self):
- """Server generated ReservationID"""
+ def test_create_multiple_instances(self):
+ """Test creating multiple instances but not asking for
+ reservation_id
+ """
self._setup_for_create_instance()
- self.flags(allow_admin_api=True)
- body = dict(server=dict(
- name='server_test', imageId=3, flavorId=2,
- metadata={'hello': 'world', 'open': 'stack'},
- personality={}))
- req = webob.Request.blank('/v1.0/zones/boot')
+ image_href = 'http://localhost/v1.1/123/images/2'
+ flavor_ref = 'http://localhost/123/flavors/3'
+ body = {
+ 'server': {
+ 'min_count': 2,
+ 'name': 'server_test',
+ 'imageRef': image_href,
+ 'flavorRef': flavor_ref,
+ 'metadata': {'hello': 'world',
+ 'open': 'stack'},
+ 'personality': []
+ }
+ }
+
+ req = webob.Request.blank('/v1.1/123/servers')
req.method = 'POST'
req.body = json.dumps(body)
req.headers["content-type"] = "application/json"
res = req.get_response(fakes.wsgi_app())
+ self.assertEqual(res.status_int, 202)
+ body = json.loads(res.body)
+ self.assertIn('server', body)
- reservation_id = json.loads(res.body)['reservation_id']
- self.assertEqual(res.status_int, 200)
+ def test_create_multiple_instances_resv_id_return(self):
+ """Test creating multiple instances with asking for
+ reservation_id
+ """
+ self._setup_for_create_instance()
+
+ image_href = 'http://localhost/v1.1/123/images/2'
+ flavor_ref = 'http://localhost/123/flavors/3'
+ body = {
+ 'server': {
+ 'min_count': 2,
+ 'name': 'server_test',
+ 'imageRef': image_href,
+ 'flavorRef': flavor_ref,
+ 'metadata': {'hello': 'world',
+ 'open': 'stack'},
+ 'personality': [],
+ 'return_reservation_id': True
+ }
+ }
+
+ req = webob.Request.blank('/v1.1/123/servers')
+ req.method = 'POST'
+ req.body = json.dumps(body)
+ req.headers["content-type"] = "application/json"
+
+ res = req.get_response(fakes.wsgi_app())
+ self.assertEqual(res.status_int, 202)
+ body = json.loads(res.body)
+ reservation_id = body.get('reservation_id')
self.assertNotEqual(reservation_id, "")
self.assertNotEqual(reservation_id, None)
self.assertTrue(len(reservation_id) > 1)
- def test_create_instance_via_zones_with_resid(self):
- """User supplied ReservationID"""
+ def test_create_instance_with_user_supplied_reservation_id(self):
+ """Non-admin supplied reservation_id should be ignored."""
self._setup_for_create_instance()
- self.flags(allow_admin_api=True)
- body = dict(server=dict(
- name='server_test', imageId=3, flavorId=2,
- metadata={'hello': 'world', 'open': 'stack'},
- personality={}, reservation_id='myresid'))
- req = webob.Request.blank('/v1.0/zones/boot')
+ image_href = 'http://localhost/v1.1/123/images/2'
+ flavor_ref = 'http://localhost/123/flavors/3'
+ body = {
+ 'server': {
+ 'name': 'server_test',
+ 'imageRef': image_href,
+ 'flavorRef': flavor_ref,
+ 'metadata': {'hello': 'world',
+ 'open': 'stack'},
+ 'personality': [],
+ 'reservation_id': 'myresid',
+ 'return_reservation_id': True
+ }
+ }
+
+ req = webob.Request.blank('/v1.1/123/servers')
req.method = 'POST'
req.body = json.dumps(body)
req.headers["content-type"] = "application/json"
res = req.get_response(fakes.wsgi_app())
+ self.assertEqual(res.status_int, 202)
+ res_body = json.loads(res.body)
+ self.assertIn('reservation_id', res_body)
+ self.assertNotEqual(res_body['reservation_id'], 'myresid')
+ def test_create_instance_with_admin_supplied_reservation_id(self):
+ """Admin supplied reservation_id should be honored."""
+ self._setup_for_create_instance()
+
+ image_href = 'http://localhost/v1.1/123/images/2'
+ flavor_ref = 'http://localhost/123/flavors/3'
+ body = {
+ 'server': {
+ 'name': 'server_test',
+ 'imageRef': image_href,
+ 'flavorRef': flavor_ref,
+ 'metadata': {'hello': 'world',
+ 'open': 'stack'},
+ 'personality': [],
+ 'reservation_id': 'myresid',
+ 'return_reservation_id': True
+ }
+ }
+
+ req = webob.Request.blank('/v1.1/123/servers')
+ req.method = 'POST'
+ req.body = json.dumps(body)
+ req.headers["content-type"] = "application/json"
+
+ context = nova.context.RequestContext('testuser', 'testproject',
+ is_admin=True)
+ res = req.get_response(fakes.wsgi_app(fake_auth_context=context))
+ self.assertEqual(res.status_int, 202)
reservation_id = json.loads(res.body)['reservation_id']
- self.assertEqual(res.status_int, 200)
self.assertEqual(reservation_id, "myresid")
def test_create_instance_no_key_pair(self):
fakes.stub_out_key_pair_funcs(self.stubs, have_key_pair=False)
- self._test_create_instance_helper()
+ self._test_create_instance()
def test_create_instance_no_name(self):
self._setup_for_create_instance()
@@ -2782,7 +2892,7 @@ class TestServerStatus(test.TestCase):
class TestServerCreateRequestXMLDeserializerV10(unittest.TestCase):
def setUp(self):
- self.deserializer = create_instance_helper.ServerXMLDeserializer()
+ self.deserializer = servers.ServerXMLDeserializer()
def test_minimal_request(self):
serial_request = """
@@ -3068,7 +3178,7 @@ class TestServerCreateRequestXMLDeserializerV11(test.TestCase):
def setUp(self):
super(TestServerCreateRequestXMLDeserializerV11, self).setUp()
- self.deserializer = create_instance_helper.ServerXMLDeserializerV11()
+ self.deserializer = servers.ServerXMLDeserializerV11()
def test_minimal_request(self):
serial_request = """
@@ -3543,10 +3653,12 @@ class TestServerInstanceCreation(test.TestCase):
else:
self.injected_files = None
- return [{'id': '1234', 'display_name': 'fakeinstance',
+ resv_id = None
+
+ return ([{'id': '1234', 'display_name': 'fakeinstance',
'user_id': 'fake',
'project_id': 'fake',
- 'uuid': FAKE_UUID}]
+ 'uuid': FAKE_UUID}], resv_id)
def set_admin_password(self, *args, **kwargs):
pass
@@ -3559,8 +3671,9 @@ class TestServerInstanceCreation(test.TestCase):
compute_api = MockComputeAPI()
self.stubs.Set(nova.compute, 'API', make_stub_method(compute_api))
self.stubs.Set(
- nova.api.openstack.create_instance_helper.CreateInstanceHelper,
- '_get_kernel_ramdisk_from_image', make_stub_method((1, 1)))
+ servers.Controller,
+ '_get_kernel_ramdisk_from_image',
+ make_stub_method((1, 1)))
return compute_api
def _create_personality_request_dict(self, personality_files):
@@ -3821,8 +3934,8 @@ class TestGetKernelRamdiskFromImage(test.TestCase):
@staticmethod
def _get_k_r(image_meta):
"""Rebinding function to a shorter name for convenience"""
- kernel_id, ramdisk_id = create_instance_helper.CreateInstanceHelper. \
- _do_get_kernel_ramdisk_from_image(image_meta)
+ kernel_id, ramdisk_id = servers.Controller.\
+ _do_get_kernel_ramdisk_from_image(image_meta)
return kernel_id, ramdisk_id
diff --git a/nova/tests/integrated/api/client.py b/nova/tests/integrated/api/client.py
index 67c35fe6b..c942b0108 100644
--- a/nova/tests/integrated/api/client.py
+++ b/nova/tests/integrated/api/client.py
@@ -16,6 +16,7 @@
import json
import httplib
+import urllib
import urlparse
from nova import log as logging
@@ -100,7 +101,7 @@ class TestOpenStackClient(object):
relative_url = parsed_url.path
if parsed_url.query:
- relative_url = relative_url + parsed_url.query
+ relative_url = relative_url + "?" + parsed_url.query
LOG.info(_("Doing %(method)s on %(relative_url)s") % locals())
if body:
LOG.info(_("Body: %s") % body)
@@ -205,12 +206,24 @@ class TestOpenStackClient(object):
def get_server(self, server_id):
return self.api_get('/servers/%s' % server_id)['server']
- def get_servers(self, detail=True):
+ def get_servers(self, detail=True, search_opts=None):
rel_url = '/servers/detail' if detail else '/servers'
+
+ if search_opts is not None:
+ qparams = {}
+ for opt, val in search_opts.iteritems():
+ qparams[opt] = val
+ if qparams:
+ query_string = "?%s" % urllib.urlencode(qparams)
+ rel_url += query_string
return self.api_get(rel_url)['servers']
def post_server(self, server):
- return self.api_post('/servers', server)['server']
+ response = self.api_post('/servers', server)
+ if 'reservation_id' in response:
+ return response
+ else:
+ return response['server']
def put_server(self, server_id, server):
return self.api_put('/servers/%s' % server_id, server)
diff --git a/nova/tests/integrated/test_servers.py b/nova/tests/integrated/test_servers.py
index ca3eb7340..4bfad93b1 100644
--- a/nova/tests/integrated/test_servers.py
+++ b/nova/tests/integrated/test_servers.py
@@ -438,6 +438,42 @@ class ServersTest(integrated_helpers._IntegratedTestBase):
# Cleanup
self._delete_server(server_id)
+ def test_create_multiple_servers(self):
+ """Creates multiple servers and checks for reservation_id"""
+
+ # Create 2 servers, setting 'return_reservation_id, which should
+ # return a reservation_id
+ server = self._build_minimal_create_server_request()
+ server['min_count'] = 2
+ server['return_reservation_id'] = True
+ post = {'server': server}
+ response = self.api.post_server(post)
+ self.assertIn('reservation_id', response)
+ reservation_id = response['reservation_id']
+ self.assertNotIn(reservation_id, ['', None])
+
+ # Create 1 more server, which should not return a reservation_id
+ server = self._build_minimal_create_server_request()
+ post = {'server': server}
+ created_server = self.api.post_server(post)
+ self.assertTrue(created_server['id'])
+ created_server_id = created_server['id']
+
+ # lookup servers created by the first request.
+ servers = self.api.get_servers(detail=True,
+ search_opts={'reservation_id': reservation_id})
+ server_map = dict((server['id'], server) for server in servers)
+ found_server = server_map.get(created_server_id)
+ # The server from the 2nd request should not be there.
+ self.assertEqual(found_server, None)
+ # Should have found 2 servers.
+ self.assertEqual(len(server_map), 2)
+
+ # Cleanup
+ self._delete_server(created_server_id)
+ for server_id in server_map.iterkeys():
+ self._delete_server(server_id)
+
if __name__ == "__main__":
unittest.main()
diff --git a/nova/tests/scheduler/test_abstract_scheduler.py b/nova/tests/scheduler/test_abstract_scheduler.py
index 5549ea453..da25f1544 100644
--- a/nova/tests/scheduler/test_abstract_scheduler.py
+++ b/nova/tests/scheduler/test_abstract_scheduler.py
@@ -20,6 +20,7 @@ import json
import nova.db
+from nova import context
from nova import exception
from nova import rpc
from nova import test
@@ -102,7 +103,7 @@ def fake_empty_call_zone_method(context, method, specs, zones):
was_called = False
-def fake_provision_resource(context, item, instance_id, request_spec, kwargs):
+def fake_provision_resource(context, item, request_spec, kwargs):
global was_called
was_called = True
@@ -118,8 +119,7 @@ def fake_provision_resource_locally(context, build_plan, request_spec, kwargs):
was_called = True
-def fake_provision_resource_from_blob(context, item, instance_id,
- request_spec, kwargs):
+def fake_provision_resource_from_blob(context, item, request_spec, kwargs):
global was_called
was_called = True
@@ -185,7 +185,7 @@ class AbstractSchedulerTestCase(test.TestCase):
zm = FakeZoneManager()
sched.set_zone_manager(zm)
- fake_context = {}
+ fake_context = context.RequestContext('user', 'project')
build_plan = sched.select(fake_context,
{'instance_type': {'memory_mb': 512},
'num_instances': 4})
@@ -229,9 +229,10 @@ class AbstractSchedulerTestCase(test.TestCase):
zm = FakeEmptyZoneManager()
sched.set_zone_manager(zm)
- fake_context = {}
+ fake_context = context.RequestContext('user', 'project')
+ request_spec = {}
self.assertRaises(driver.NoValidHost, sched.schedule_run_instance,
- fake_context, 1,
+ fake_context, request_spec,
dict(host_filter=None, instance_type={}))
def test_schedule_do_not_schedule_with_hint(self):
@@ -250,8 +251,8 @@ class AbstractSchedulerTestCase(test.TestCase):
'blob': "Non-None blob data",
}
- result = sched.schedule_run_instance(None, 1, request_spec)
- self.assertEquals(None, result)
+ instances = sched.schedule_run_instance(None, request_spec)
+ self.assertTrue(instances)
self.assertTrue(was_called)
def test_provision_resource_local(self):
@@ -263,7 +264,7 @@ class AbstractSchedulerTestCase(test.TestCase):
fake_provision_resource_locally)
request_spec = {'hostname': "foo"}
- sched._provision_resource(None, request_spec, 1, request_spec, {})
+ sched._provision_resource(None, request_spec, request_spec, {})
self.assertTrue(was_called)
def test_provision_resource_remote(self):
@@ -275,7 +276,7 @@ class AbstractSchedulerTestCase(test.TestCase):
fake_provision_resource_from_blob)
request_spec = {}
- sched._provision_resource(None, request_spec, 1, request_spec, {})
+ sched._provision_resource(None, request_spec, request_spec, {})
self.assertTrue(was_called)
def test_provision_resource_from_blob_empty(self):
@@ -285,7 +286,7 @@ class AbstractSchedulerTestCase(test.TestCase):
request_spec = {}
self.assertRaises(abstract_scheduler.InvalidBlob,
sched._provision_resource_from_blob,
- None, {}, 1, {}, {})
+ None, {}, {}, {})
def test_provision_resource_from_blob_with_local_blob(self):
"""
@@ -303,20 +304,21 @@ class AbstractSchedulerTestCase(test.TestCase):
# return fake instances
return {'id': 1, 'uuid': 'f874093c-7b17-49c0-89c3-22a5348497f9'}
- def fake_rpc_cast(*args, **kwargs):
+ def fake_cast_to_compute_host(*args, **kwargs):
pass
self.stubs.Set(sched, '_decrypt_blob',
fake_decrypt_blob_returns_local_info)
+ self.stubs.Set(driver, 'cast_to_compute_host',
+ fake_cast_to_compute_host)
self.stubs.Set(compute_api.API,
'create_db_entry_for_new_instance',
fake_create_db_entry_for_new_instance)
- self.stubs.Set(rpc, 'cast', fake_rpc_cast)
build_plan_item = {'blob': "Non-None blob data"}
request_spec = {'image': {}, 'instance_properties': {}}
- sched._provision_resource_from_blob(None, build_plan_item, 1,
+ sched._provision_resource_from_blob(None, build_plan_item,
request_spec, {})
self.assertTrue(was_called)
@@ -335,7 +337,7 @@ class AbstractSchedulerTestCase(test.TestCase):
request_spec = {'blob': "Non-None blob data"}
- sched._provision_resource_from_blob(None, request_spec, 1,
+ sched._provision_resource_from_blob(None, request_spec,
request_spec, {})
self.assertTrue(was_called)
@@ -352,7 +354,7 @@ class AbstractSchedulerTestCase(test.TestCase):
request_spec = {'child_blob': True, 'child_zone': True}
- sched._provision_resource_from_blob(None, request_spec, 1,
+ sched._provision_resource_from_blob(None, request_spec,
request_spec, {})
self.assertTrue(was_called)
@@ -386,7 +388,7 @@ class AbstractSchedulerTestCase(test.TestCase):
zm.service_states = {}
sched.set_zone_manager(zm)
- fake_context = {}
+ fake_context = context.RequestContext('user', 'project')
build_plan = sched.select(fake_context,
{'instance_type': {'memory_mb': 512},
'num_instances': 4})
@@ -394,6 +396,45 @@ class AbstractSchedulerTestCase(test.TestCase):
# 0 from local zones, 12 from remotes
self.assertEqual(12, len(build_plan))
+ def test_run_instance_non_admin(self):
+ """Test creating an instance locally using run_instance, passing
+ a non-admin context. DB actions should work."""
+ sched = FakeAbstractScheduler()
+
+ def fake_cast_to_compute_host(*args, **kwargs):
+ pass
+
+ def fake_zone_get_all_zero(context):
+ # make sure this is called with admin context, even though
+ # we're using user context below
+ self.assertTrue(context.is_admin)
+ return []
+
+ self.stubs.Set(driver, 'cast_to_compute_host',
+ fake_cast_to_compute_host)
+ self.stubs.Set(sched, '_call_zone_method', fake_call_zone_method)
+ self.stubs.Set(nova.db, 'zone_get_all', fake_zone_get_all_zero)
+
+ zm = FakeZoneManager()
+ sched.set_zone_manager(zm)
+
+ fake_context = context.RequestContext('user', 'project')
+
+ request_spec = {
+ 'image': {'properties': {}},
+ 'security_group': [],
+ 'instance_properties': {
+ 'project_id': fake_context.project_id,
+ 'user_id': fake_context.user_id},
+ 'instance_type': {'memory_mb': 256},
+ 'filter_driver': 'nova.scheduler.host_filter.AllHostsFilter'
+ }
+
+ instances = sched.schedule_run_instance(fake_context, request_spec)
+ self.assertEqual(len(instances), 1)
+ self.assertFalse(instances[0].get('_is_precooked', False))
+ nova.db.instance_destroy(fake_context, instances[0]['id'])
+
class BaseSchedulerTestCase(test.TestCase):
"""Test case for Base Scheduler."""
diff --git a/nova/tests/scheduler/test_least_cost_scheduler.py b/nova/tests/scheduler/test_least_cost_scheduler.py
index af58de527..b8847a2bf 100644
--- a/nova/tests/scheduler/test_least_cost_scheduler.py
+++ b/nova/tests/scheduler/test_least_cost_scheduler.py
@@ -134,7 +134,7 @@ class LeastCostSchedulerTestCase(test.TestCase):
expected = []
for idx, (hostname, services) in enumerate(hosts):
- caps = copy.deepcopy(services["compute"])
+ caps = copy.deepcopy(services)
# Costs are normalized so over 10 hosts, each host with increasing
# free ram will cost 1/N more. Since the lowest cost host has some
# free ram, we add in the 1/N for the base_cost
diff --git a/nova/tests/scheduler/test_scheduler.py b/nova/tests/scheduler/test_scheduler.py
index 3a10e9287..77db3520b 100644
--- a/nova/tests/scheduler/test_scheduler.py
+++ b/nova/tests/scheduler/test_scheduler.py
@@ -35,10 +35,13 @@ from nova import service
from nova import test
from nova import rpc
from nova import utils
+from nova.db.sqlalchemy import models
from nova.scheduler import api
from nova.scheduler import driver
from nova.scheduler import manager
from nova.scheduler import multi
+from nova.scheduler.simple import SimpleScheduler
+from nova.scheduler.zone import ZoneScheduler
from nova.compute import power_state
from nova.compute import vm_states
@@ -53,17 +56,86 @@ FAKE_UUID_NOT_FOUND = 'ffffffff-ffff-ffff-ffff-ffffffffffff'
FAKE_UUID = 'aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa'
-class FakeContext(object):
- auth_token = None
+def _create_instance_dict(**kwargs):
+ """Create a dictionary for a test instance"""
+ inst = {}
+ # NOTE(jk0): If an integer is passed as the image_ref, the image
+ # service will use the default image service (in this case, the fake).
+ inst['image_ref'] = '1'
+ inst['reservation_id'] = 'r-fakeres'
+ inst['user_id'] = kwargs.get('user_id', 'admin')
+ inst['project_id'] = kwargs.get('project_id', 'fake')
+ inst['instance_type_id'] = '1'
+ if 'host' in kwargs:
+ inst['host'] = kwargs.get('host')
+ inst['vcpus'] = kwargs.get('vcpus', 1)
+ inst['memory_mb'] = kwargs.get('memory_mb', 20)
+ inst['local_gb'] = kwargs.get('local_gb', 30)
+ inst['vm_state'] = kwargs.get('vm_state', vm_states.ACTIVE)
+ inst['power_state'] = kwargs.get('power_state', power_state.RUNNING)
+ inst['task_state'] = kwargs.get('task_state', None)
+ inst['availability_zone'] = kwargs.get('availability_zone', None)
+ inst['ami_launch_index'] = 0
+ inst['launched_on'] = kwargs.get('launched_on', 'dummy')
+ return inst
+
+
+def _create_volume():
+ """Create a test volume"""
+ vol = {}
+ vol['size'] = 1
+ vol['availability_zone'] = 'test'
+ ctxt = context.get_admin_context()
+ return db.volume_create(ctxt, vol)['id']
+
+
+def _create_instance(**kwargs):
+ """Create a test instance"""
+ ctxt = context.get_admin_context()
+ return db.instance_create(ctxt, _create_instance_dict(**kwargs))
+
+
+def _create_instance_from_spec(spec):
+ return _create_instance(**spec['instance_properties'])
+
+
+def _create_request_spec(**kwargs):
+ return dict(instance_properties=_create_instance_dict(**kwargs))
+
+
+def _fake_cast_to_compute_host(context, host, method, **kwargs):
+ global _picked_host
+ _picked_host = host
+
+
+def _fake_cast_to_volume_host(context, host, method, **kwargs):
+ global _picked_host
+ _picked_host = host
+
+
+def _fake_create_instance_db_entry(simple_self, context, request_spec):
+ instance = _create_instance_from_spec(request_spec)
+ global instance_ids
+ instance_ids.append(instance['id'])
+ return instance
+
+
+class FakeContext(context.RequestContext):
+ def __init__(self, *args, **kwargs):
+ super(FakeContext, self).__init__('user', 'project', **kwargs)
class TestDriver(driver.Scheduler):
"""Scheduler Driver for Tests"""
- def schedule(context, topic, *args, **kwargs):
- return 'fallback_host'
+ def schedule(self, context, topic, method, *args, **kwargs):
+ host = 'fallback_host'
+ driver.cast_to_host(context, topic, host, method, **kwargs)
- def schedule_named_method(context, topic, num):
- return 'named_host'
+ def schedule_named_method(self, context, num=None):
+ topic = 'topic'
+ host = 'named_host'
+ method = 'named_method'
+ driver.cast_to_host(context, topic, host, method, num=num)
class SchedulerTestCase(test.TestCase):
@@ -89,31 +161,16 @@ class SchedulerTestCase(test.TestCase):
return db.service_get(ctxt, s_ref['id'])
- def _create_instance(self, **kwargs):
- """Create a test instance"""
- ctxt = context.get_admin_context()
- inst = {}
- inst['user_id'] = 'admin'
- inst['project_id'] = kwargs.get('project_id', 'fake')
- inst['host'] = kwargs.get('host', 'dummy')
- inst['vcpus'] = kwargs.get('vcpus', 1)
- inst['memory_mb'] = kwargs.get('memory_mb', 10)
- inst['local_gb'] = kwargs.get('local_gb', 20)
- inst['vm_state'] = kwargs.get('vm_state', vm_states.ACTIVE)
- inst['power_state'] = kwargs.get('power_state', power_state.RUNNING)
- inst['task_state'] = kwargs.get('task_state', None)
- return db.instance_create(ctxt, inst)
-
def test_fallback(self):
scheduler = manager.SchedulerManager()
self.mox.StubOutWithMock(rpc, 'cast', use_mock_anything=True)
ctxt = context.get_admin_context()
rpc.cast(ctxt,
- 'topic.fallback_host',
+ 'fake_topic.fallback_host',
{'method': 'noexist',
'args': {'num': 7}})
self.mox.ReplayAll()
- scheduler.noexist(ctxt, 'topic', num=7)
+ scheduler.noexist(ctxt, 'fake_topic', num=7)
def test_named_method(self):
scheduler = manager.SchedulerManager()
@@ -173,8 +230,8 @@ class SchedulerTestCase(test.TestCase):
scheduler = manager.SchedulerManager()
ctxt = context.get_admin_context()
s_ref = self._create_compute_service()
- i_ref1 = self._create_instance(project_id='p-01', host=s_ref['host'])
- i_ref2 = self._create_instance(project_id='p-02', vcpus=3,
+ i_ref1 = _create_instance(project_id='p-01', host=s_ref['host'])
+ i_ref2 = _create_instance(project_id='p-02', vcpus=3,
host=s_ref['host'])
result = scheduler.show_host_resources(ctxt, s_ref['host'])
@@ -197,7 +254,10 @@ class ZoneSchedulerTestCase(test.TestCase):
"""Test case for zone scheduler"""
def setUp(self):
super(ZoneSchedulerTestCase, self).setUp()
- self.flags(scheduler_driver='nova.scheduler.zone.ZoneScheduler')
+ self.flags(
+ scheduler_driver='nova.scheduler.multi.MultiScheduler',
+ compute_scheduler_driver='nova.scheduler.zone.ZoneScheduler',
+ volume_scheduler_driver='nova.scheduler.zone.ZoneScheduler')
def _create_service_model(self, **kwargs):
service = db.sqlalchemy.models.Service()
@@ -214,7 +274,7 @@ class ZoneSchedulerTestCase(test.TestCase):
def test_with_two_zones(self):
scheduler = manager.SchedulerManager()
- ctxt = context.get_admin_context()
+ ctxt = context.RequestContext('user', 'project')
service_list = [self._create_service_model(id=1,
host='host1',
zone='zone1'),
@@ -230,66 +290,53 @@ class ZoneSchedulerTestCase(test.TestCase):
self._create_service_model(id=5,
host='host5',
zone='zone2')]
+
+ request_spec = _create_request_spec(availability_zone='zone1')
+
+ fake_instance = _create_instance_dict(
+ **request_spec['instance_properties'])
+ fake_instance['id'] = 100
+ fake_instance['uuid'] = FAKE_UUID
+
self.mox.StubOutWithMock(db, 'service_get_all_by_topic')
+ self.mox.StubOutWithMock(db, 'instance_update')
+ # Assumes we're testing with MultiScheduler
+ compute_sched_driver = scheduler.driver.drivers['compute']
+ self.mox.StubOutWithMock(compute_sched_driver,
+ 'create_instance_db_entry')
+ self.mox.StubOutWithMock(rpc, 'cast', use_mock_anything=True)
+
arg = IgnoreArg()
db.service_get_all_by_topic(arg, arg).AndReturn(service_list)
- self.mox.StubOutWithMock(rpc, 'cast', use_mock_anything=True)
- rpc.cast(ctxt,
+ compute_sched_driver.create_instance_db_entry(arg,
+ request_spec).AndReturn(fake_instance)
+ db.instance_update(arg, 100, {'host': 'host1', 'scheduled_at': arg})
+ rpc.cast(arg,
'compute.host1',
{'method': 'run_instance',
- 'args': {'instance_id': 'i-ffffffff',
- 'availability_zone': 'zone1'}})
+ 'args': {'instance_id': 100}})
self.mox.ReplayAll()
scheduler.run_instance(ctxt,
'compute',
- instance_id='i-ffffffff',
- availability_zone='zone1')
+ request_spec=request_spec)
class SimpleDriverTestCase(test.TestCase):
"""Test case for simple driver"""
def setUp(self):
super(SimpleDriverTestCase, self).setUp()
+ simple_scheduler = 'nova.scheduler.simple.SimpleScheduler'
self.flags(connection_type='fake',
- stub_network=True,
- max_cores=4,
- max_gigabytes=4,
- network_manager='nova.network.manager.FlatManager',
- volume_driver='nova.volume.driver.FakeISCSIDriver',
- scheduler_driver='nova.scheduler.simple.SimpleScheduler')
+ stub_network=True,
+ max_cores=4,
+ max_gigabytes=4,
+ network_manager='nova.network.manager.FlatManager',
+ volume_driver='nova.volume.driver.FakeISCSIDriver',
+ scheduler_driver='nova.scheduler.multi.MultiScheduler',
+ compute_scheduler_driver=simple_scheduler,
+ volume_scheduler_driver=simple_scheduler)
self.scheduler = manager.SchedulerManager()
self.context = context.get_admin_context()
- self.user_id = 'fake'
- self.project_id = 'fake'
-
- def _create_instance(self, **kwargs):
- """Create a test instance"""
- inst = {}
- # NOTE(jk0): If an integer is passed as the image_ref, the image
- # service will use the default image service (in this case, the fake).
- inst['image_ref'] = '1'
- inst['reservation_id'] = 'r-fakeres'
- inst['user_id'] = self.user_id
- inst['project_id'] = self.project_id
- inst['instance_type_id'] = '1'
- inst['vcpus'] = kwargs.get('vcpus', 1)
- inst['ami_launch_index'] = 0
- inst['availability_zone'] = kwargs.get('availability_zone', None)
- inst['host'] = kwargs.get('host', 'dummy')
- inst['memory_mb'] = kwargs.get('memory_mb', 20)
- inst['local_gb'] = kwargs.get('local_gb', 30)
- inst['launched_on'] = kwargs.get('launghed_on', 'dummy')
- inst['vm_state'] = kwargs.get('vm_state', vm_states.ACTIVE)
- inst['task_state'] = kwargs.get('task_state', None)
- inst['power_state'] = kwargs.get('power_state', power_state.RUNNING)
- return db.instance_create(self.context, inst)['id']
-
- def _create_volume(self):
- """Create a test volume"""
- vol = {}
- vol['size'] = 1
- vol['availability_zone'] = 'test'
- return db.volume_create(self.context, vol)['id']
def _create_compute_service(self, **kwargs):
"""Create a compute service."""
@@ -369,14 +416,30 @@ class SimpleDriverTestCase(test.TestCase):
'compute',
FLAGS.compute_manager)
compute2.start()
- instance_id1 = self._create_instance()
- compute1.run_instance(self.context, instance_id1)
- instance_id2 = self._create_instance()
- host = self.scheduler.driver.schedule_run_instance(self.context,
- instance_id2)
- self.assertEqual(host, 'host2')
- compute1.terminate_instance(self.context, instance_id1)
- db.instance_destroy(self.context, instance_id2)
+
+ global instance_ids
+ instance_ids = []
+ instance_ids.append(_create_instance()['id'])
+ compute1.run_instance(self.context, instance_ids[0])
+
+ self.stubs.Set(SimpleScheduler,
+ 'create_instance_db_entry', _fake_create_instance_db_entry)
+ global _picked_host
+ _picked_host = None
+ self.stubs.Set(driver,
+ 'cast_to_compute_host', _fake_cast_to_compute_host)
+
+ request_spec = _create_request_spec()
+ instances = self.scheduler.driver.schedule_run_instance(
+ self.context, request_spec)
+
+ self.assertEqual(_picked_host, 'host2')
+ self.assertEqual(len(instance_ids), 2)
+ self.assertEqual(len(instances), 1)
+ self.assertEqual(instances[0].get('_is_precooked', False), False)
+
+ compute1.terminate_instance(self.context, instance_ids[0])
+ compute2.terminate_instance(self.context, instance_ids[1])
compute1.kill()
compute2.kill()
@@ -392,14 +455,27 @@ class SimpleDriverTestCase(test.TestCase):
'compute',
FLAGS.compute_manager)
compute2.start()
- instance_id1 = self._create_instance()
- compute1.run_instance(self.context, instance_id1)
- instance_id2 = self._create_instance(availability_zone='nova:host1')
- host = self.scheduler.driver.schedule_run_instance(self.context,
- instance_id2)
- self.assertEqual('host1', host)
- compute1.terminate_instance(self.context, instance_id1)
- db.instance_destroy(self.context, instance_id2)
+
+ global instance_ids
+ instance_ids = []
+ instance_ids.append(_create_instance()['id'])
+ compute1.run_instance(self.context, instance_ids[0])
+
+ self.stubs.Set(SimpleScheduler,
+ 'create_instance_db_entry', _fake_create_instance_db_entry)
+ global _picked_host
+ _picked_host = None
+ self.stubs.Set(driver,
+ 'cast_to_compute_host', _fake_cast_to_compute_host)
+
+ request_spec = _create_request_spec(availability_zone='nova:host1')
+ instances = self.scheduler.driver.schedule_run_instance(
+ self.context, request_spec)
+ self.assertEqual(_picked_host, 'host1')
+ self.assertEqual(len(instance_ids), 2)
+
+ compute1.terminate_instance(self.context, instance_ids[0])
+ compute1.terminate_instance(self.context, instance_ids[1])
compute1.kill()
compute2.kill()
@@ -414,12 +490,21 @@ class SimpleDriverTestCase(test.TestCase):
delta = datetime.timedelta(seconds=FLAGS.service_down_time * 2)
past = now - delta
db.service_update(self.context, s1['id'], {'updated_at': past})
- instance_id2 = self._create_instance(availability_zone='nova:host1')
+
+ global instance_ids
+ instance_ids = []
+ self.stubs.Set(SimpleScheduler,
+ 'create_instance_db_entry', _fake_create_instance_db_entry)
+ global _picked_host
+ _picked_host = None
+ self.stubs.Set(driver,
+ 'cast_to_compute_host', _fake_cast_to_compute_host)
+
+ request_spec = _create_request_spec(availability_zone='nova:host1')
self.assertRaises(driver.WillNotSchedule,
self.scheduler.driver.schedule_run_instance,
self.context,
- instance_id2)
- db.instance_destroy(self.context, instance_id2)
+ request_spec)
compute1.kill()
def test_will_schedule_on_disabled_host_if_specified_no_queue(self):
@@ -430,11 +515,22 @@ class SimpleDriverTestCase(test.TestCase):
compute1.start()
s1 = db.service_get_by_args(self.context, 'host1', 'nova-compute')
db.service_update(self.context, s1['id'], {'disabled': True})
- instance_id2 = self._create_instance(availability_zone='nova:host1')
- host = self.scheduler.driver.schedule_run_instance(self.context,
- instance_id2)
- self.assertEqual('host1', host)
- db.instance_destroy(self.context, instance_id2)
+
+ global instance_ids
+ instance_ids = []
+ self.stubs.Set(SimpleScheduler,
+ 'create_instance_db_entry', _fake_create_instance_db_entry)
+ global _picked_host
+ _picked_host = None
+ self.stubs.Set(driver,
+ 'cast_to_compute_host', _fake_cast_to_compute_host)
+
+ request_spec = _create_request_spec(availability_zone='nova:host1')
+ instances = self.scheduler.driver.schedule_run_instance(
+ self.context, request_spec)
+ self.assertEqual(_picked_host, 'host1')
+ self.assertEqual(len(instance_ids), 1)
+ compute1.terminate_instance(self.context, instance_ids[0])
compute1.kill()
def test_too_many_cores_no_queue(self):
@@ -452,17 +548,17 @@ class SimpleDriverTestCase(test.TestCase):
instance_ids1 = []
instance_ids2 = []
for index in xrange(FLAGS.max_cores):
- instance_id = self._create_instance()
+ instance_id = _create_instance()['id']
compute1.run_instance(self.context, instance_id)
instance_ids1.append(instance_id)
- instance_id = self._create_instance()
+ instance_id = _create_instance()['id']
compute2.run_instance(self.context, instance_id)
instance_ids2.append(instance_id)
- instance_id = self._create_instance()
+ request_spec = _create_request_spec()
self.assertRaises(driver.NoValidHost,
self.scheduler.driver.schedule_run_instance,
self.context,
- instance_id)
+ request_spec)
for instance_id in instance_ids1:
compute1.terminate_instance(self.context, instance_id)
for instance_id in instance_ids2:
@@ -481,13 +577,19 @@ class SimpleDriverTestCase(test.TestCase):
'nova-volume',
'volume',
FLAGS.volume_manager)
+
+ global _picked_host
+ _picked_host = None
+ self.stubs.Set(driver,
+ 'cast_to_volume_host', _fake_cast_to_volume_host)
+
volume2.start()
- volume_id1 = self._create_volume()
+ volume_id1 = _create_volume()
volume1.create_volume(self.context, volume_id1)
- volume_id2 = self._create_volume()
- host = self.scheduler.driver.schedule_create_volume(self.context,
- volume_id2)
- self.assertEqual(host, 'host2')
+ volume_id2 = _create_volume()
+ self.scheduler.driver.schedule_create_volume(self.context,
+ volume_id2)
+ self.assertEqual(_picked_host, 'host2')
volume1.delete_volume(self.context, volume_id1)
db.volume_destroy(self.context, volume_id2)
@@ -514,17 +616,30 @@ class SimpleDriverTestCase(test.TestCase):
compute2.kill()
def test_least_busy_host_gets_instance(self):
- """Ensures the host with less cores gets the next one"""
+ """Ensures the host with less cores gets the next one w/ Simple"""
compute1 = self.start_service('compute', host='host1')
compute2 = self.start_service('compute', host='host2')
- instance_id1 = self._create_instance()
- compute1.run_instance(self.context, instance_id1)
- instance_id2 = self._create_instance()
- host = self.scheduler.driver.schedule_run_instance(self.context,
- instance_id2)
- self.assertEqual(host, 'host2')
- compute1.terminate_instance(self.context, instance_id1)
- db.instance_destroy(self.context, instance_id2)
+
+ global instance_ids
+ instance_ids = []
+ instance_ids.append(_create_instance()['id'])
+ compute1.run_instance(self.context, instance_ids[0])
+
+ self.stubs.Set(SimpleScheduler,
+ 'create_instance_db_entry', _fake_create_instance_db_entry)
+ global _picked_host
+ _picked_host = None
+ self.stubs.Set(driver,
+ 'cast_to_compute_host', _fake_cast_to_compute_host)
+
+ request_spec = _create_request_spec()
+ instances = self.scheduler.driver.schedule_run_instance(
+ self.context, request_spec)
+ self.assertEqual(_picked_host, 'host2')
+ self.assertEqual(len(instance_ids), 2)
+
+ compute1.terminate_instance(self.context, instance_ids[0])
+ compute2.terminate_instance(self.context, instance_ids[1])
compute1.kill()
compute2.kill()
@@ -532,41 +647,64 @@ class SimpleDriverTestCase(test.TestCase):
"""Ensures if you set availability_zone it launches on that zone"""
compute1 = self.start_service('compute', host='host1')
compute2 = self.start_service('compute', host='host2')
- instance_id1 = self._create_instance()
- compute1.run_instance(self.context, instance_id1)
- instance_id2 = self._create_instance(availability_zone='nova:host1')
- host = self.scheduler.driver.schedule_run_instance(self.context,
- instance_id2)
- self.assertEqual('host1', host)
- compute1.terminate_instance(self.context, instance_id1)
- db.instance_destroy(self.context, instance_id2)
+
+ global instance_ids
+ instance_ids = []
+ instance_ids.append(_create_instance()['id'])
+ compute1.run_instance(self.context, instance_ids[0])
+
+ self.stubs.Set(SimpleScheduler,
+ 'create_instance_db_entry', _fake_create_instance_db_entry)
+ global _picked_host
+ _picked_host = None
+ self.stubs.Set(driver,
+ 'cast_to_compute_host', _fake_cast_to_compute_host)
+
+ request_spec = _create_request_spec(availability_zone='nova:host1')
+ instances = self.scheduler.driver.schedule_run_instance(
+ self.context, request_spec)
+ self.assertEqual(_picked_host, 'host1')
+ self.assertEqual(len(instance_ids), 2)
+
+ compute1.terminate_instance(self.context, instance_ids[0])
+ compute1.terminate_instance(self.context, instance_ids[1])
compute1.kill()
compute2.kill()
- def test_wont_sechedule_if_specified_host_is_down(self):
+ def test_wont_schedule_if_specified_host_is_down(self):
compute1 = self.start_service('compute', host='host1')
s1 = db.service_get_by_args(self.context, 'host1', 'nova-compute')
now = utils.utcnow()
delta = datetime.timedelta(seconds=FLAGS.service_down_time * 2)
past = now - delta
db.service_update(self.context, s1['id'], {'updated_at': past})
- instance_id2 = self._create_instance(availability_zone='nova:host1')
+ request_spec = _create_request_spec(availability_zone='nova:host1')
self.assertRaises(driver.WillNotSchedule,
self.scheduler.driver.schedule_run_instance,
self.context,
- instance_id2)
- db.instance_destroy(self.context, instance_id2)
+ request_spec)
compute1.kill()
def test_will_schedule_on_disabled_host_if_specified(self):
compute1 = self.start_service('compute', host='host1')
s1 = db.service_get_by_args(self.context, 'host1', 'nova-compute')
db.service_update(self.context, s1['id'], {'disabled': True})
- instance_id2 = self._create_instance(availability_zone='nova:host1')
- host = self.scheduler.driver.schedule_run_instance(self.context,
- instance_id2)
- self.assertEqual('host1', host)
- db.instance_destroy(self.context, instance_id2)
+
+ global instance_ids
+ instance_ids = []
+ self.stubs.Set(SimpleScheduler,
+ 'create_instance_db_entry', _fake_create_instance_db_entry)
+ global _picked_host
+ _picked_host = None
+ self.stubs.Set(driver,
+ 'cast_to_compute_host', _fake_cast_to_compute_host)
+
+ request_spec = _create_request_spec(availability_zone='nova:host1')
+ instances = self.scheduler.driver.schedule_run_instance(
+ self.context, request_spec)
+ self.assertEqual(_picked_host, 'host1')
+ self.assertEqual(len(instance_ids), 1)
+ compute1.terminate_instance(self.context, instance_ids[0])
compute1.kill()
def test_too_many_cores(self):
@@ -576,18 +714,30 @@ class SimpleDriverTestCase(test.TestCase):
instance_ids1 = []
instance_ids2 = []
for index in xrange(FLAGS.max_cores):
- instance_id = self._create_instance()
+ instance_id = _create_instance()['id']
compute1.run_instance(self.context, instance_id)
instance_ids1.append(instance_id)
- instance_id = self._create_instance()
+ instance_id = _create_instance()['id']
compute2.run_instance(self.context, instance_id)
instance_ids2.append(instance_id)
- instance_id = self._create_instance()
+
+ def _create_instance_db_entry(simple_self, context, request_spec):
+ self.fail(_("Shouldn't try to create DB entry when at "
+ "max cores"))
+ self.stubs.Set(SimpleScheduler,
+ 'create_instance_db_entry', _create_instance_db_entry)
+
+ global _picked_host
+ _picked_host = None
+ self.stubs.Set(driver,
+ 'cast_to_compute_host', _fake_cast_to_compute_host)
+
+ request_spec = _create_request_spec()
+
self.assertRaises(driver.NoValidHost,
self.scheduler.driver.schedule_run_instance,
self.context,
- instance_id)
- db.instance_destroy(self.context, instance_id)
+ request_spec)
for instance_id in instance_ids1:
compute1.terminate_instance(self.context, instance_id)
for instance_id in instance_ids2:
@@ -599,12 +749,18 @@ class SimpleDriverTestCase(test.TestCase):
"""Ensures the host with less gigabytes gets the next one"""
volume1 = self.start_service('volume', host='host1')
volume2 = self.start_service('volume', host='host2')
- volume_id1 = self._create_volume()
+
+ global _picked_host
+ _picked_host = None
+ self.stubs.Set(driver,
+ 'cast_to_volume_host', _fake_cast_to_volume_host)
+
+ volume_id1 = _create_volume()
volume1.create_volume(self.context, volume_id1)
- volume_id2 = self._create_volume()
- host = self.scheduler.driver.schedule_create_volume(self.context,
- volume_id2)
- self.assertEqual(host, 'host2')
+ volume_id2 = _create_volume()
+ self.scheduler.driver.schedule_create_volume(self.context,
+ volume_id2)
+ self.assertEqual(_picked_host, 'host2')
volume1.delete_volume(self.context, volume_id1)
db.volume_destroy(self.context, volume_id2)
volume1.kill()
@@ -617,13 +773,13 @@ class SimpleDriverTestCase(test.TestCase):
volume_ids1 = []
volume_ids2 = []
for index in xrange(FLAGS.max_gigabytes):
- volume_id = self._create_volume()
+ volume_id = _create_volume()
volume1.create_volume(self.context, volume_id)
volume_ids1.append(volume_id)
- volume_id = self._create_volume()
+ volume_id = _create_volume()
volume2.create_volume(self.context, volume_id)
volume_ids2.append(volume_id)
- volume_id = self._create_volume()
+ volume_id = _create_volume()
self.assertRaises(driver.NoValidHost,
self.scheduler.driver.schedule_create_volume,
self.context,
@@ -636,13 +792,13 @@ class SimpleDriverTestCase(test.TestCase):
volume2.kill()
def test_scheduler_live_migration_with_volume(self):
- """scheduler_live_migration() works correctly as expected.
+ """schedule_live_migration() works correctly as expected.
Also, checks instance state is changed from 'running' -> 'migrating'.
"""
- instance_id = self._create_instance()
+ instance_id = _create_instance(host='dummy')['id']
i_ref = db.instance_get(self.context, instance_id)
dic = {'instance_id': instance_id, 'size': 1}
v_ref = db.volume_create(self.context, dic)
@@ -680,7 +836,8 @@ class SimpleDriverTestCase(test.TestCase):
def test_live_migration_src_check_instance_not_running(self):
"""The instance given by instance_id is not running."""
- instance_id = self._create_instance(power_state=power_state.NOSTATE)
+ instance_id = _create_instance(
+ power_state=power_state.NOSTATE)['id']
i_ref = db.instance_get(self.context, instance_id)
try:
@@ -695,7 +852,7 @@ class SimpleDriverTestCase(test.TestCase):
def test_live_migration_src_check_volume_node_not_alive(self):
"""Raise exception when volume node is not alive."""
- instance_id = self._create_instance()
+ instance_id = _create_instance()['id']
i_ref = db.instance_get(self.context, instance_id)
dic = {'instance_id': instance_id, 'size': 1}
v_ref = db.volume_create(self.context, {'instance_id': instance_id,
@@ -715,7 +872,7 @@ class SimpleDriverTestCase(test.TestCase):
def test_live_migration_src_check_compute_node_not_alive(self):
"""Confirms src-compute node is alive."""
- instance_id = self._create_instance()
+ instance_id = _create_instance()['id']
i_ref = db.instance_get(self.context, instance_id)
t = utils.utcnow() - datetime.timedelta(10)
s_ref = self._create_compute_service(created_at=t, updated_at=t,
@@ -730,7 +887,7 @@ class SimpleDriverTestCase(test.TestCase):
def test_live_migration_src_check_works_correctly(self):
"""Confirms this method finishes with no error."""
- instance_id = self._create_instance()
+ instance_id = _create_instance()['id']
i_ref = db.instance_get(self.context, instance_id)
s_ref = self._create_compute_service(host=i_ref['host'])
@@ -743,7 +900,7 @@ class SimpleDriverTestCase(test.TestCase):
def test_live_migration_dest_check_not_alive(self):
"""Confirms exception raises in case dest host does not exist."""
- instance_id = self._create_instance()
+ instance_id = _create_instance()['id']
i_ref = db.instance_get(self.context, instance_id)
t = utils.utcnow() - datetime.timedelta(10)
s_ref = self._create_compute_service(created_at=t, updated_at=t,
@@ -758,7 +915,7 @@ class SimpleDriverTestCase(test.TestCase):
def test_live_migration_dest_check_service_same_host(self):
"""Confirms exceptioin raises in case dest and src is same host."""
- instance_id = self._create_instance()
+ instance_id = _create_instance()['id']
i_ref = db.instance_get(self.context, instance_id)
s_ref = self._create_compute_service(host=i_ref['host'])
@@ -771,9 +928,9 @@ class SimpleDriverTestCase(test.TestCase):
def test_live_migration_dest_check_service_lack_memory(self):
"""Confirms exception raises when dest doesn't have enough memory."""
- instance_id = self._create_instance()
- instance_id2 = self._create_instance(host='somewhere',
- memory_mb=12)
+ instance_id = _create_instance()['id']
+ instance_id2 = _create_instance(host='somewhere',
+ memory_mb=12)['id']
i_ref = db.instance_get(self.context, instance_id)
s_ref = self._create_compute_service(host='somewhere')
@@ -787,9 +944,9 @@ class SimpleDriverTestCase(test.TestCase):
def test_block_migration_dest_check_service_lack_disk(self):
"""Confirms exception raises when dest doesn't have enough disk."""
- instance_id = self._create_instance()
- instance_id2 = self._create_instance(host='somewhere',
- local_gb=70)
+ instance_id = _create_instance()['id']
+ instance_id2 = _create_instance(host='somewhere',
+ local_gb=70)['id']
i_ref = db.instance_get(self.context, instance_id)
s_ref = self._create_compute_service(host='somewhere')
@@ -803,7 +960,7 @@ class SimpleDriverTestCase(test.TestCase):
def test_live_migration_dest_check_service_works_correctly(self):
"""Confirms method finishes with no error."""
- instance_id = self._create_instance()
+ instance_id = _create_instance()['id']
i_ref = db.instance_get(self.context, instance_id)
s_ref = self._create_compute_service(host='somewhere',
memory_mb_used=5)
@@ -821,7 +978,7 @@ class SimpleDriverTestCase(test.TestCase):
dest = 'dummydest'
# mocks for live_migration_common_check()
- instance_id = self._create_instance()
+ instance_id = _create_instance()['id']
i_ref = db.instance_get(self.context, instance_id)
t1 = utils.utcnow() - datetime.timedelta(10)
s_ref = self._create_compute_service(created_at=t1, updated_at=t1,
@@ -855,7 +1012,7 @@ class SimpleDriverTestCase(test.TestCase):
def test_live_migration_common_check_service_different_hypervisor(self):
"""Original host and dest host has different hypervisor type."""
dest = 'dummydest'
- instance_id = self._create_instance()
+ instance_id = _create_instance(host='dummy')['id']
i_ref = db.instance_get(self.context, instance_id)
# compute service for destination
@@ -880,7 +1037,7 @@ class SimpleDriverTestCase(test.TestCase):
def test_live_migration_common_check_service_different_version(self):
"""Original host and dest host has different hypervisor version."""
dest = 'dummydest'
- instance_id = self._create_instance()
+ instance_id = _create_instance(host='dummy')['id']
i_ref = db.instance_get(self.context, instance_id)
# compute service for destination
@@ -904,10 +1061,10 @@ class SimpleDriverTestCase(test.TestCase):
db.service_destroy(self.context, s_ref2['id'])
def test_live_migration_common_check_checking_cpuinfo_fail(self):
- """Raise excetion when original host doen't have compatible cpu."""
+ """Raise exception when original host doesn't have compatible cpu."""
dest = 'dummydest'
- instance_id = self._create_instance()
+ instance_id = _create_instance(host='dummy')['id']
i_ref = db.instance_get(self.context, instance_id)
# compute service for destination
@@ -927,7 +1084,7 @@ class SimpleDriverTestCase(test.TestCase):
self.mox.ReplayAll()
try:
- self.scheduler.driver._live_migration_common_check(self.context,
+ driver._live_migration_common_check(self.context,
i_ref,
dest,
False)
@@ -1021,7 +1178,6 @@ class FakeResource(object):
class ZoneRedirectTest(test.TestCase):
def setUp(self):
super(ZoneRedirectTest, self).setUp()
- self.stubs = stubout.StubOutForTesting()
self.stubs.Set(db, 'zone_get_all', zone_get_all)
self.stubs.Set(db, 'instance_get_by_uuid',
@@ -1029,7 +1185,6 @@ class ZoneRedirectTest(test.TestCase):
self.flags(enable_zone_routing=True)
def tearDown(self):
- self.stubs.UnsetAll()
super(ZoneRedirectTest, self).tearDown()
def test_trap_found_locally(self):
@@ -1257,12 +1412,10 @@ class FakeNovaClientZones(object):
class CallZoneMethodTest(test.TestCase):
def setUp(self):
super(CallZoneMethodTest, self).setUp()
- self.stubs = stubout.StubOutForTesting()
self.stubs.Set(db, 'zone_get_all', zone_get_all)
self.stubs.Set(novaclient, 'Client', FakeNovaClientZones)
def tearDown(self):
- self.stubs.UnsetAll()
super(CallZoneMethodTest, self).tearDown()
def test_call_zone_method(self):
diff --git a/nova/tests/scheduler/test_vsa_scheduler.py b/nova/tests/scheduler/test_vsa_scheduler.py
index 37964f00d..802946e1b 100644
--- a/nova/tests/scheduler/test_vsa_scheduler.py
+++ b/nova/tests/scheduler/test_vsa_scheduler.py
@@ -22,6 +22,7 @@ from nova import db
from nova import exception
from nova import flags
from nova import log as logging
+from nova import rpc
from nova import test
from nova import utils
from nova.volume import volume_types
@@ -37,6 +38,10 @@ scheduled_volume = {}
global_volume = {}
+def fake_rpc_cast(*args, **kwargs):
+ pass
+
+
class FakeVsaLeastUsedScheduler(
vsa_sched.VsaSchedulerLeastUsedHost):
# No need to stub anything at the moment
@@ -170,12 +175,10 @@ class VsaSchedulerTestCase(test.TestCase):
LOG.debug(_("Test: provision vol %(name)s on host %(host)s"),
locals())
LOG.debug(_("\t vol=%(vol)s"), locals())
- pass
def _fake_vsa_update(self, context, vsa_id, values):
LOG.debug(_("Test: VSA update request: vsa_id=%(vsa_id)s "\
"values=%(values)s"), locals())
- pass
def _fake_volume_create(self, context, options):
LOG.debug(_("Test: Volume create: %s"), options)
@@ -196,7 +199,6 @@ class VsaSchedulerTestCase(test.TestCase):
"values=%(values)s"), locals())
global scheduled_volume
scheduled_volume = {'id': volume_id, 'host': values['host']}
- pass
def _fake_service_get_by_args(self, context, host, binary):
return "service"
@@ -209,7 +211,6 @@ class VsaSchedulerTestCase(test.TestCase):
def setUp(self, sched_class=None):
super(VsaSchedulerTestCase, self).setUp()
- self.stubs = stubout.StubOutForTesting()
self.context = context.get_admin_context()
if sched_class is None:
@@ -220,6 +221,7 @@ class VsaSchedulerTestCase(test.TestCase):
self.host_num = 10
self.drive_type_num = 5
+ self.stubs.Set(rpc, 'cast', fake_rpc_cast)
self.stubs.Set(self.sched,
'_get_service_states', self._fake_get_service_states)
self.stubs.Set(self.sched,
@@ -234,8 +236,6 @@ class VsaSchedulerTestCase(test.TestCase):
def tearDown(self):
for name in self.created_types_lst:
volume_types.purge(self.context, name)
-
- self.stubs.UnsetAll()
super(VsaSchedulerTestCase, self).tearDown()
def test_vsa_sched_create_volumes_simple(self):
@@ -333,6 +333,8 @@ class VsaSchedulerTestCase(test.TestCase):
self.stubs.Set(self.sched,
'_get_service_states', self._fake_get_service_states)
self.stubs.Set(nova.db, 'volume_create', self._fake_volume_create)
+ self.stubs.Set(nova.db, 'volume_update', self._fake_volume_update)
+ self.stubs.Set(rpc, 'cast', fake_rpc_cast)
self.sched.schedule_create_volumes(self.context,
request_spec,
@@ -467,10 +469,9 @@ class VsaSchedulerTestCase(test.TestCase):
self.stubs.Set(self.sched,
'service_is_up', self._fake_service_is_up_True)
- host = self.sched.schedule_create_volume(self.context,
- 123, availability_zone=None)
+ self.sched.schedule_create_volume(self.context,
+ 123, availability_zone=None)
- self.assertEqual(host, 'host_3')
self.assertEqual(scheduled_volume['id'], 123)
self.assertEqual(scheduled_volume['host'], 'host_3')
@@ -514,10 +515,9 @@ class VsaSchedulerTestCase(test.TestCase):
global_volume['volume_type_id'] = volume_type['id']
global_volume['size'] = 0
- host = self.sched.schedule_create_volume(self.context,
- 123, availability_zone=None)
+ self.sched.schedule_create_volume(self.context,
+ 123, availability_zone=None)
- self.assertEqual(host, 'host_2')
self.assertEqual(scheduled_volume['id'], 123)
self.assertEqual(scheduled_volume['host'], 'host_2')
@@ -529,7 +529,6 @@ class VsaSchedulerTestCaseMostAvail(VsaSchedulerTestCase):
FakeVsaMostAvailCapacityScheduler())
def tearDown(self):
- self.stubs.UnsetAll()
super(VsaSchedulerTestCaseMostAvail, self).tearDown()
def test_vsa_sched_create_single_volume(self):
@@ -558,10 +557,9 @@ class VsaSchedulerTestCaseMostAvail(VsaSchedulerTestCase):
global_volume['volume_type_id'] = volume_type['id']
global_volume['size'] = 0
- host = self.sched.schedule_create_volume(self.context,
- 123, availability_zone=None)
+ self.sched.schedule_create_volume(self.context,
+ 123, availability_zone=None)
- self.assertEqual(host, 'host_9')
self.assertEqual(scheduled_volume['id'], 123)
self.assertEqual(scheduled_volume['host'], 'host_9')
diff --git a/nova/tests/test_compute.py b/nova/tests/test_compute.py
index 86ffc18a6..4da551c93 100644
--- a/nova/tests/test_compute.py
+++ b/nova/tests/test_compute.py
@@ -28,6 +28,7 @@ from nova import db
from nova import exception
from nova import flags
from nova import log as logging
+from nova.scheduler import driver as scheduler_driver
from nova import rpc
from nova import test
from nova import utils
@@ -56,6 +57,38 @@ class FakeTime(object):
self.counter += t
+orig_rpc_call = rpc.call
+orig_rpc_cast = rpc.cast
+
+
+def rpc_call_wrapper(context, topic, msg, do_cast=True):
+ """Stub out the scheduler creating the instance entry"""
+ if topic == FLAGS.scheduler_topic and \
+ msg['method'] == 'run_instance':
+ request_spec = msg['args']['request_spec']
+ scheduler = scheduler_driver.Scheduler
+ num_instances = request_spec.get('num_instances', 1)
+ instances = []
+ for x in xrange(num_instances):
+ instance = scheduler().create_instance_db_entry(
+ context, request_spec)
+ encoded = scheduler_driver.encode_instance(instance)
+ instances.append(encoded)
+ return instances
+ else:
+ if do_cast:
+ orig_rpc_cast(context, topic, msg)
+ else:
+ return orig_rpc_call(context, topic, msg)
+
+
+def rpc_cast_wrapper(context, topic, msg):
+ """Stub out the scheduler creating the instance entry in
+ the reservation_id case.
+ """
+ rpc_call_wrapper(context, topic, msg, do_cast=True)
+
+
def nop_report_driver_status(self):
pass
@@ -80,6 +113,8 @@ class ComputeTestCase(test.TestCase):
'properties': {'kernel_id': 1, 'ramdisk_id': 1}}
self.stubs.Set(fake_image._FakeImageService, 'show', fake_show)
+ self.stubs.Set(rpc, 'call', rpc_call_wrapper)
+ self.stubs.Set(rpc, 'cast', rpc_cast_wrapper)
def _create_instance(self, params=None):
"""Create a test instance"""
@@ -142,7 +177,7 @@ class ComputeTestCase(test.TestCase):
"""Verify that an instance cannot be created without a display_name."""
cases = [dict(), dict(display_name=None)]
for instance in cases:
- ref = self.compute_api.create(self.context,
+ (ref, resv_id) = self.compute_api.create(self.context,
instance_types.get_default_instance_type(), None, **instance)
try:
self.assertNotEqual(ref[0]['display_name'], None)
@@ -152,7 +187,7 @@ class ComputeTestCase(test.TestCase):
def test_create_instance_associates_security_groups(self):
"""Make sure create associates security groups"""
group = self._create_group()
- ref = self.compute_api.create(
+ (ref, resv_id) = self.compute_api.create(
self.context,
instance_type=instance_types.get_default_instance_type(),
image_href=None,
@@ -212,7 +247,7 @@ class ComputeTestCase(test.TestCase):
('<}\x1fh\x10e\x08l\x02l\x05o\x12!{>', 'hello'),
('hello_server', 'hello-server')]
for display_name, hostname in cases:
- ref = self.compute_api.create(self.context,
+ (ref, resv_id) = self.compute_api.create(self.context,
instance_types.get_default_instance_type(), None,
display_name=display_name)
try:
@@ -224,7 +259,7 @@ class ComputeTestCase(test.TestCase):
"""Make sure destroying disassociates security groups"""
group = self._create_group()
- ref = self.compute_api.create(
+ (ref, resv_id) = self.compute_api.create(
self.context,
instance_type=instance_types.get_default_instance_type(),
image_href=None,
@@ -240,7 +275,7 @@ class ComputeTestCase(test.TestCase):
"""Make sure destroying security groups disassociates instances"""
group = self._create_group()
- ref = self.compute_api.create(
+ (ref, resv_id) = self.compute_api.create(
self.context,
instance_type=instance_types.get_default_instance_type(),
image_href=None,
@@ -1398,6 +1433,84 @@ class ComputeTestCase(test.TestCase):
'swap'),
swap_size)
+ def test_reservation_id_one_instance(self):
+ """Verify building an instance has a reservation_id that
+ matches return value from create"""
+ (refs, resv_id) = self.compute_api.create(self.context,
+ instance_types.get_default_instance_type(), None)
+ try:
+ self.assertEqual(len(refs), 1)
+ self.assertEqual(refs[0]['reservation_id'], resv_id)
+ finally:
+ db.instance_destroy(self.context, refs[0]['id'])
+
+ def test_reservation_ids_two_instances(self):
+ """Verify building 2 instances at once results in a
+ reservation_id being returned equal to reservation id set
+ in both instances
+ """
+ (refs, resv_id) = self.compute_api.create(self.context,
+ instance_types.get_default_instance_type(), None,
+ min_count=2, max_count=2)
+ try:
+ self.assertEqual(len(refs), 2)
+ self.assertNotEqual(resv_id, None)
+ finally:
+ for instance in refs:
+ self.assertEqual(instance['reservation_id'], resv_id)
+ db.instance_destroy(self.context, instance['id'])
+
+ def test_reservation_ids_two_instances_no_wait(self):
+ """Verify building 2 instances at once without waiting for
+ instance IDs results in a reservation_id being returned equal
+ to reservation id set in both instances
+ """
+ (refs, resv_id) = self.compute_api.create(self.context,
+ instance_types.get_default_instance_type(), None,
+ min_count=2, max_count=2, wait_for_instances=False)
+ try:
+ self.assertEqual(refs, None)
+ self.assertNotEqual(resv_id, None)
+ finally:
+ instances = self.compute_api.get_all(self.context,
+ search_opts={'reservation_id': resv_id})
+ self.assertEqual(len(instances), 2)
+ for instance in instances:
+ self.assertEqual(instance['reservation_id'], resv_id)
+ db.instance_destroy(self.context, instance['id'])
+
+ def test_create_with_specified_reservation_id(self):
+ """Verify building instances with a specified
+ reservation_id results in the correct reservation_id
+ being set
+ """
+
+ # We need admin context to be able to specify our own
+ # reservation_ids.
+ context = self.context.elevated()
+ # 1 instance
+ (refs, resv_id) = self.compute_api.create(context,
+ instance_types.get_default_instance_type(), None,
+ min_count=1, max_count=1, reservation_id='meow')
+ try:
+ self.assertEqual(len(refs), 1)
+ self.assertEqual(resv_id, 'meow')
+ finally:
+ self.assertEqual(refs[0]['reservation_id'], resv_id)
+ db.instance_destroy(self.context, refs[0]['id'])
+
+ # 2 instances
+ (refs, resv_id) = self.compute_api.create(context,
+ instance_types.get_default_instance_type(), None,
+ min_count=2, max_count=2, reservation_id='woof')
+ try:
+ self.assertEqual(len(refs), 2)
+ self.assertEqual(resv_id, 'woof')
+ finally:
+ for instance in refs:
+ self.assertEqual(instance['reservation_id'], resv_id)
+ db.instance_destroy(self.context, instance['id'])
+
class ComputeTestMinRamMinDisk(test.TestCase):
def setUp(self):
@@ -1405,6 +1518,8 @@ class ComputeTestMinRamMinDisk(test.TestCase):
self.compute = utils.import_object(FLAGS.compute_manager)
self.compute_api = compute.API()
self.context = context.RequestContext('fake', 'fake')
+ self.stubs.Set(rpc, 'call', rpc_call_wrapper)
+ self.stubs.Set(rpc, 'cast', rpc_cast_wrapper)
self.fake_image = {
'id': 1, 'properties': {'kernel_id': 1, 'ramdisk_id': 1}}
@@ -1425,10 +1540,9 @@ class ComputeTestMinRamMinDisk(test.TestCase):
# Now increase the inst_type memory and make sure all is fine.
inst_type['memory_mb'] = 2
- ref = self.compute_api.create(self.context, inst_type, None)
- self.assertTrue(ref)
-
- db.instance_destroy(self.context, ref[0]['id'])
+ (refs, resv_id) = self.compute_api.create(self.context,
+ inst_type, None)
+ db.instance_destroy(self.context, refs[0]['id'])
def test_create_with_too_little_disk(self):
"""Test an instance type with too little disk space"""
@@ -1447,10 +1561,9 @@ class ComputeTestMinRamMinDisk(test.TestCase):
# Now increase the inst_type disk space and make sure all is fine.
inst_type['local_gb'] = 2
- ref = self.compute_api.create(self.context, inst_type, None)
- self.assertTrue(ref)
-
- db.instance_destroy(self.context, ref[0]['id'])
+ (refs, resv_id) = self.compute_api.create(self.context,
+ inst_type, None)
+ db.instance_destroy(self.context, refs[0]['id'])
def test_create_just_enough_ram_and_disk(self):
"""Test an instance type with just enough ram and disk space"""
@@ -1466,10 +1579,9 @@ class ComputeTestMinRamMinDisk(test.TestCase):
return img
self.stubs.Set(fake_image._FakeImageService, 'show', fake_show)
- ref = self.compute_api.create(self.context, inst_type, None)
- self.assertTrue(ref)
-
- db.instance_destroy(self.context, ref[0]['id'])
+ (refs, resv_id) = self.compute_api.create(self.context,
+ inst_type, None)
+ db.instance_destroy(self.context, refs[0]['id'])
def test_create_with_no_ram_and_disk_reqs(self):
"""Test an instance type with no min_ram or min_disk"""
@@ -1482,7 +1594,6 @@ class ComputeTestMinRamMinDisk(test.TestCase):
return copy(self.fake_image)
self.stubs.Set(fake_image._FakeImageService, 'show', fake_show)
- ref = self.compute_api.create(self.context, inst_type, None)
- self.assertTrue(ref)
-
- db.instance_destroy(self.context, ref[0]['id'])
+ (refs, resv_id) = self.compute_api.create(self.context,
+ inst_type, None)
+ db.instance_destroy(self.context, refs[0]['id'])
diff --git a/nova/tests/test_quota.py b/nova/tests/test_quota.py
index f4b481ebe..35d48dd62 100644
--- a/nova/tests/test_quota.py
+++ b/nova/tests/test_quota.py
@@ -21,9 +21,11 @@ from nova import context
from nova import db
from nova import flags
from nova import quota
+from nova import rpc
from nova import test
from nova import volume
from nova.compute import instance_types
+from nova.scheduler import driver as scheduler_driver
FLAGS = flags.FLAGS
@@ -51,6 +53,21 @@ class QuotaTestCase(test.TestCase):
self.context = context.RequestContext(self.user_id,
self.project_id,
True)
+ orig_rpc_call = rpc.call
+
+ def rpc_call_wrapper(context, topic, msg):
+ """Stub out the scheduler creating the instance entry"""
+ if topic == FLAGS.scheduler_topic and \
+ msg['method'] == 'run_instance':
+ scheduler = scheduler_driver.Scheduler
+ instance = scheduler().create_instance_db_entry(
+ context,
+ msg['args']['request_spec'])
+ return [scheduler_driver.encode_instance(instance)]
+ else:
+ return orig_rpc_call(context, topic, msg)
+
+ self.stubs.Set(rpc, 'call', rpc_call_wrapper)
def _create_instance(self, cores=2):
"""Create a test instance"""