summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--nova/scheduler/driver.py5
-rw-r--r--nova/tests/scheduler/fakes.py5
-rw-r--r--nova/tests/scheduler/test_chance_scheduler.py146
-rw-r--r--nova/tests/scheduler/test_multi_scheduler.py107
-rw-r--r--nova/tests/scheduler/test_scheduler.py2534
-rw-r--r--nova/tests/scheduler/test_vsa_scheduler.py135
-rw-r--r--nova/tests/test_utils.py34
7 files changed, 1387 insertions, 1579 deletions
diff --git a/nova/scheduler/driver.py b/nova/scheduler/driver.py
index a0d718d5f..f9fd59ff7 100644
--- a/nova/scheduler/driver.py
+++ b/nova/scheduler/driver.py
@@ -344,7 +344,7 @@ class Scheduler(object):
# Checking original host( where instance was launched at) exists.
try:
oservice_refs = db.service_get_all_compute_by_host(context,
- instance_ref['launched_on'])
+ instance_ref['host'])
except exception.NotFound:
raise exception.SourceHostUnavailable()
oservice_ref = oservice_refs[0]['compute_node'][0]
@@ -406,7 +406,6 @@ class Scheduler(object):
:param dest: destination host
"""
-
# Getting total available memory of host
avail = self._get_compute_info(context, dest, 'memory_mb')
@@ -466,7 +465,7 @@ class Scheduler(object):
instance_ref['host'])
ret = rpc.call(context, topic,
{"method": 'get_instance_disk_info',
- "args": {'instance_name': instance_ref.name}})
+ "args": {'instance_name': instance_ref['name']}})
disk_infos = utils.loads(ret)
except rpc.RemoteError:
LOG.exception(_("host %(dest)s is not compatible with "
diff --git a/nova/tests/scheduler/fakes.py b/nova/tests/scheduler/fakes.py
index 0b8391a4d..f607b44fa 100644
--- a/nova/tests/scheduler/fakes.py
+++ b/nova/tests/scheduler/fakes.py
@@ -94,6 +94,11 @@ class FakeHostState(host_manager.HostState):
setattr(self, key, val)
+class FakeComputeAPI(object):
+ def create_db_entry_for_new_instance(self, *args, **kwargs):
+ pass
+
+
def mox_host_manager_db_calls(mox, context):
mox.StubOutWithMock(db, 'compute_node_get_all')
mox.StubOutWithMock(db, 'instance_get_all')
diff --git a/nova/tests/scheduler/test_chance_scheduler.py b/nova/tests/scheduler/test_chance_scheduler.py
index b6a91cdf5..8f82ea16f 100644
--- a/nova/tests/scheduler/test_chance_scheduler.py
+++ b/nova/tests/scheduler/test_chance_scheduler.py
@@ -1,3 +1,5 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
# Copyright 2011 OpenStack LLC.
# All Rights Reserved.
#
@@ -16,24 +18,29 @@
Tests For Chance Scheduler.
"""
-from nova import test
+import random
+
+from nova import context
+from nova import exception
+from nova.scheduler import driver
from nova.scheduler import chance
+from nova.tests.scheduler import test_scheduler
-class ChanceSchedulerTestCase(test.TestCase):
+class ChanceSchedulerTestCase(test_scheduler.SchedulerTestCase):
"""Test case for Chance Scheduler."""
- def test_filter_hosts_avoid_matches(self):
+ driver_cls = chance.ChanceScheduler
+
+ def test_filter_hosts_avoid(self):
"""Test to make sure _filter_hosts() filters original hosts if
avoid_original_host is True."""
- sched = chance.ChanceScheduler()
-
hosts = ['host1', 'host2', 'host3']
request_spec = dict(instance_properties=dict(host='host2'))
filter_properties = {'ignore_hosts': ['host2']}
- filtered = sched._filter_hosts(request_spec, hosts,
+ filtered = self.driver._filter_hosts(request_spec, hosts,
filter_properties=filter_properties)
self.assertEqual(filtered, ['host1', 'host3'])
@@ -41,12 +48,133 @@ class ChanceSchedulerTestCase(test.TestCase):
"""Test to make sure _filter_hosts() does not filter original
hosts if avoid_original_host is False."""
- sched = chance.ChanceScheduler()
-
hosts = ['host1', 'host2', 'host3']
request_spec = dict(instance_properties=dict(host='host2'))
filter_properties = {'ignore_hosts': []}
- filtered = sched._filter_hosts(request_spec, hosts,
+ filtered = self.driver._filter_hosts(request_spec, hosts,
filter_properties=filter_properties)
self.assertEqual(filtered, hosts)
+
+ def test_basic_schedule_run_instance(self):
+ ctxt = context.RequestContext('fake', 'fake', False)
+ ctxt_elevated = 'fake-context-elevated'
+ fake_args = (1, 2, 3)
+ fake_kwargs = {'fake_kwarg1': 'fake_value1',
+ 'fake_kwarg2': 'fake_value2'}
+ instance_opts = {'fake_opt1': 'meow'}
+ request_spec = {'num_instances': 2,
+ 'instance_properties': instance_opts}
+ instance1 = {'uuid': 'fake-uuid1'}
+ instance2 = {'uuid': 'fake-uuid2'}
+ instance1_encoded = {'uuid': 'fake-uuid1', '_is_precooked': False}
+ instance2_encoded = {'uuid': 'fake-uuid2', '_is_precooked': False}
+
+ # create_instance_db_entry() usually does this, but we're
+ # stubbing it.
+ def _add_uuid1(ctxt, request_spec):
+ request_spec['instance_properties']['uuid'] = 'fake-uuid1'
+
+ def _add_uuid2(ctxt, request_spec):
+ request_spec['instance_properties']['uuid'] = 'fake-uuid2'
+
+ self.mox.StubOutWithMock(ctxt, 'elevated')
+ self.mox.StubOutWithMock(self.driver, 'hosts_up')
+ self.mox.StubOutWithMock(random, 'random')
+ self.mox.StubOutWithMock(self.driver, 'create_instance_db_entry')
+ self.mox.StubOutWithMock(driver, 'cast_to_compute_host')
+ self.mox.StubOutWithMock(driver, 'encode_instance')
+
+ ctxt.elevated().AndReturn(ctxt_elevated)
+ # instance 1
+ self.driver.hosts_up(ctxt_elevated, 'compute').AndReturn(
+ ['host1', 'host2', 'host3', 'host4'])
+ random.random().AndReturn(.5)
+ self.driver.create_instance_db_entry(ctxt,
+ request_spec).WithSideEffects(_add_uuid1).AndReturn(
+ instance1)
+ driver.cast_to_compute_host(ctxt, 'host3', 'run_instance',
+ instance_uuid=instance1['uuid'], **fake_kwargs)
+ driver.encode_instance(instance1).AndReturn(instance1_encoded)
+ # instance 2
+ ctxt.elevated().AndReturn(ctxt_elevated)
+ self.driver.hosts_up(ctxt_elevated, 'compute').AndReturn(
+ ['host1', 'host2', 'host3', 'host4'])
+ random.random().AndReturn(.2)
+ self.driver.create_instance_db_entry(ctxt,
+ request_spec).WithSideEffects(_add_uuid2).AndReturn(
+ instance2)
+ driver.cast_to_compute_host(ctxt, 'host1', 'run_instance',
+ instance_uuid=instance2['uuid'], **fake_kwargs)
+ driver.encode_instance(instance2).AndReturn(instance2_encoded)
+
+ self.mox.ReplayAll()
+ result = self.driver.schedule_run_instance(ctxt, request_spec,
+ *fake_args, **fake_kwargs)
+ expected = [instance1_encoded, instance2_encoded]
+ self.assertEqual(result, expected)
+
+ def test_basic_schedule_run_instance_no_hosts(self):
+ ctxt = context.RequestContext('fake', 'fake', False)
+ ctxt_elevated = 'fake-context-elevated'
+ fake_args = (1, 2, 3)
+ fake_kwargs = {'fake_kwarg1': 'fake_value1',
+ 'fake_kwarg2': 'fake_value2'}
+ instance_opts = 'fake_instance_opts'
+ request_spec = {'num_instances': 2,
+ 'instance_properties': instance_opts}
+
+ self.mox.StubOutWithMock(ctxt, 'elevated')
+ self.mox.StubOutWithMock(self.driver, 'hosts_up')
+
+ # instance 1
+ ctxt.elevated().AndReturn(ctxt_elevated)
+ self.driver.hosts_up(ctxt_elevated, 'compute').AndReturn([])
+
+ self.mox.ReplayAll()
+ self.assertRaises(exception.NoValidHost,
+ self.driver.schedule_run_instance, ctxt, request_spec,
+ *fake_args, **fake_kwargs)
+
+ def test_basic_schedule_fallback(self):
+ ctxt = context.RequestContext('fake', 'fake', False)
+ ctxt_elevated = 'fake-context-elevated'
+ topic = 'fake_topic'
+ method = 'fake_method'
+ fake_args = (1, 2, 3)
+ fake_kwargs = {'fake_kwarg1': 'fake_value1',
+ 'fake_kwarg2': 'fake_value2'}
+
+ self.mox.StubOutWithMock(ctxt, 'elevated')
+ self.mox.StubOutWithMock(self.driver, 'hosts_up')
+ self.mox.StubOutWithMock(random, 'random')
+ self.mox.StubOutWithMock(driver, 'cast_to_host')
+
+ ctxt.elevated().AndReturn(ctxt_elevated)
+ self.driver.hosts_up(ctxt_elevated, topic).AndReturn(
+ ['host1', 'host2', 'host3', 'host4'])
+ random.random().AndReturn(.5)
+ driver.cast_to_host(ctxt, topic, 'host3', method, **fake_kwargs)
+
+ self.mox.ReplayAll()
+ self.driver.schedule(ctxt, topic, method, *fake_args, **fake_kwargs)
+
+ def test_basic_schedule_fallback_no_hosts(self):
+ ctxt = context.RequestContext('fake', 'fake', False)
+ ctxt_elevated = 'fake-context-elevated'
+ topic = 'fake_topic'
+ method = 'fake_method'
+ fake_args = (1, 2, 3)
+ fake_kwargs = {'fake_kwarg1': 'fake_value1',
+ 'fake_kwarg2': 'fake_value2'}
+
+ self.mox.StubOutWithMock(ctxt, 'elevated')
+ self.mox.StubOutWithMock(self.driver, 'hosts_up')
+
+ ctxt.elevated().AndReturn(ctxt_elevated)
+ self.driver.hosts_up(ctxt_elevated, topic).AndReturn([])
+
+ self.mox.ReplayAll()
+ self.assertRaises(exception.NoValidHost,
+ self.driver.schedule, ctxt, topic, method,
+ *fake_args, **fake_kwargs)
diff --git a/nova/tests/scheduler/test_multi_scheduler.py b/nova/tests/scheduler/test_multi_scheduler.py
new file mode 100644
index 000000000..cefc31b7e
--- /dev/null
+++ b/nova/tests/scheduler/test_multi_scheduler.py
@@ -0,0 +1,107 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright 2010 United States Government as represented by the
+# Administrator of the National Aeronautics and Space Administration.
+# Copyright 2011 OpenStack LLC
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+"""
+Tests For Multi Scheduler
+"""
+
+from nova.scheduler import driver
+from nova.scheduler import multi
+from nova.tests.scheduler import test_scheduler
+
+
+class FakeComputeScheduler(driver.Scheduler):
+ is_fake_compute = True
+
+ def schedule_run_instance(self, *args, **kwargs):
+ pass
+
+ def schedule_start_instance(self, *args, **kwargs):
+ pass
+
+ def schedule(self, *args, **kwargs):
+ pass
+
+
+class FakeVolumeScheduler(driver.Scheduler):
+ is_fake_volume = True
+
+ def schedule_create_volume(self, *args, **kwargs):
+ pass
+
+ def schedule_create_volumes(self, *args, **kwargs):
+ pass
+
+ def schedule(self, *args, **kwargs):
+ pass
+
+
+class MultiDriverTestCase(test_scheduler.SchedulerTestCase):
+ """Test case for multi driver"""
+
+ driver_cls = multi.MultiScheduler
+
+ def setUp(self):
+ super(MultiDriverTestCase, self).setUp()
+ base_name = 'nova.tests.scheduler.test_multi_scheduler.%s'
+ compute_cls_name = base_name % 'FakeComputeScheduler'
+ volume_cls_name = base_name % 'FakeVolumeScheduler'
+ self.flags(compute_scheduler_driver=compute_cls_name,
+ volume_scheduler_driver=volume_cls_name)
+ self._manager = multi.MultiScheduler()
+
+ def test_drivers_inited(self):
+ mgr = self._manager
+ self.assertEqual(len(mgr.drivers), 2)
+ self.assertTrue(mgr.drivers['compute'].is_fake_compute)
+ self.assertTrue(mgr.drivers['volume'].is_fake_volume)
+
+ def test_proxy_calls(self):
+ mgr = self._manager
+ compute_driver = mgr.drivers['compute']
+ volume_driver = mgr.drivers['volume']
+
+ test_methods = {compute_driver: ['run_instance', 'start_instance'],
+ volume_driver: ['create_volume', 'create_volumes']}
+
+ for driver, methods in test_methods.iteritems():
+ for method in methods:
+ mgr_func = getattr(mgr, 'schedule_' + method)
+ driver_func = getattr(driver, 'schedule_' + method)
+ self.assertEqual(mgr_func, driver_func)
+
+ def test_schedule_fallback_proxy(self):
+ mgr = self._manager
+
+ self.mox.StubOutWithMock(mgr.drivers['compute'], 'schedule')
+ self.mox.StubOutWithMock(mgr.drivers['volume'], 'schedule')
+
+ ctxt = 'fake_context'
+ method = 'fake_method'
+ fake_args = (1, 2, 3)
+ fake_kwargs = {'fake_kwarg1': 'fake_value1',
+ 'fake_kwarg2': 'fake_value2'}
+
+ mgr.drivers['compute'].schedule(ctxt, 'compute', method,
+ *fake_args, **fake_kwargs)
+ mgr.drivers['volume'].schedule(ctxt, 'volume', method,
+ *fake_args, **fake_kwargs)
+
+ self.mox.ReplayAll()
+ mgr.schedule(ctxt, 'compute', method, *fake_args, **fake_kwargs)
+ mgr.schedule(ctxt, 'volume', method, *fake_args, **fake_kwargs)
diff --git a/nova/tests/scheduler/test_scheduler.py b/nova/tests/scheduler/test_scheduler.py
index b612b86ac..19591c080 100644
--- a/nova/tests/scheduler/test_scheduler.py
+++ b/nova/tests/scheduler/test_scheduler.py
@@ -20,1637 +20,1181 @@ Tests For Scheduler
"""
import datetime
-import mox
-
-from novaclient import v1_1 as novaclient
-from novaclient import exceptions as novaclient_exceptions
+import json
+from nova.compute import api as compute_api
+from nova.compute import power_state
+from nova.compute import vm_states
from nova import context
from nova import db
from nova import exception
from nova import flags
-from nova import service
-from nova import test
from nova import rpc
-from nova import utils
-from nova.scheduler import api
+from nova.rpc import common as rpc_common
from nova.scheduler import driver
from nova.scheduler import manager
-from nova.scheduler.simple import SimpleScheduler
-from nova.compute import power_state
-from nova.compute import task_states
-from nova.compute import vm_states
+from nova import test
+from nova.tests.scheduler import fakes
+from nova import utils
FLAGS = flags.FLAGS
-flags.DECLARE('max_cores', 'nova.scheduler.simple')
-flags.DECLARE('stub_network', 'nova.compute.manager')
-flags.DECLARE('instances_path', 'nova.compute.manager')
-FAKE_UUID_NOT_FOUND = 'ffffffff-ffff-ffff-ffff-ffffffffffff'
-FAKE_UUID = 'aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa'
+class SchedulerManagerTestCase(test.TestCase):
+ """Test case for scheduler manager"""
+ manager_cls = manager.SchedulerManager
+ driver_cls = driver.Scheduler
+ driver_cls_name = 'nova.scheduler.driver.Scheduler'
-def _create_instance_dict(**kwargs):
- """Create a dictionary for a test instance"""
- inst = {}
- # NOTE(jk0): If an integer is passed as the image_ref, the image
- # service will use the default image service (in this case, the fake).
- inst['image_ref'] = kwargs.get('image_ref',
- 'cedef40a-ed67-4d10-800e-17455edce175')
- inst['reservation_id'] = 'r-fakeres'
- inst['user_id'] = kwargs.get('user_id', 'admin')
- inst['project_id'] = kwargs.get('project_id', 'fake')
- inst['instance_type_id'] = '1'
- if 'host' in kwargs:
- inst['host'] = kwargs.get('host')
- inst['vcpus'] = kwargs.get('vcpus', 1)
- inst['memory_mb'] = kwargs.get('memory_mb', 20)
- inst['local_gb'] = kwargs.get('local_gb', 30)
- inst['vm_state'] = kwargs.get('vm_state', vm_states.ACTIVE)
- inst['power_state'] = kwargs.get('power_state', power_state.RUNNING)
- inst['task_state'] = kwargs.get('task_state', None)
- inst['availability_zone'] = kwargs.get('availability_zone', None)
- inst['ami_launch_index'] = 0
- inst['launched_on'] = kwargs.get('launched_on', 'dummy')
- return inst
+ def setUp(self):
+ super(SchedulerManagerTestCase, self).setUp()
+ self.flags(scheduler_driver=self.driver_cls_name)
+ self.stubs.Set(compute_api, 'API', fakes.FakeComputeAPI)
+ self.manager = self.manager_cls()
+ self.context = context.RequestContext('fake_user', 'fake_project')
+ self.topic = 'fake_topic'
+ self.fake_args = (1, 2, 3)
+ self.fake_kwargs = {'cat': 'meow', 'dog': 'woof'}
+
+ def test_1_correct_init(self):
+ # Correct scheduler driver
+ manager = self.manager
+ self.assertTrue(isinstance(manager.driver, self.driver_cls))
+
+ def test_get_host_list(self):
+ expected = 'fake_hosts'
+
+ self.mox.StubOutWithMock(self.manager.driver, 'get_host_list')
+ self.manager.driver.get_host_list().AndReturn(expected)
+
+ self.mox.ReplayAll()
+ result = self.manager.get_host_list(self.context)
+ self.assertEqual(result, expected)
+
+ def test_get_zone_list(self):
+ expected = 'fake_zones'
+ self.mox.StubOutWithMock(self.manager.driver, 'get_zone_list')
+ self.manager.driver.get_zone_list().AndReturn(expected)
-def _create_volume():
- """Create a test volume"""
- vol = {}
- vol['size'] = 1
- vol['availability_zone'] = 'nova'
- ctxt = context.get_admin_context()
- return db.volume_create(ctxt, vol)['id']
+ self.mox.ReplayAll()
+ result = self.manager.get_zone_list(self.context)
+ self.assertEqual(result, expected)
+
+ def test_get_service_capabilities(self):
+ expected = 'fake_service_capabs'
+ self.mox.StubOutWithMock(self.manager.driver,
+ 'get_service_capabilities')
+ self.manager.driver.get_service_capabilities().AndReturn(
+ expected)
+
+ self.mox.ReplayAll()
+ result = self.manager.get_service_capabilities(self.context)
+ self.assertEqual(result, expected)
-def _create_instance(**kwargs):
- """Create a test instance"""
- ctxt = context.get_admin_context()
- return db.instance_create(ctxt, _create_instance_dict(**kwargs))
+ def test_update_service_capabilities(self):
+ service_name = 'fake_service'
+ host = 'fake_host'
+ self.mox.StubOutWithMock(self.manager.driver,
+ 'update_service_capabilities')
-def _create_instance_from_spec(spec):
- return _create_instance(**spec['instance_properties'])
+ # Test no capabilities passes empty dictionary
+ self.manager.driver.update_service_capabilities(service_name,
+ host, {})
+ self.mox.ReplayAll()
+ result = self.manager.update_service_capabilities(self.context,
+ service_name=service_name, host=host)
+ self.mox.VerifyAll()
+
+ self.mox.ResetAll()
+ # Test capabilities passes correctly
+ capabilities = {'fake_capability': 'fake_value'}
+ self.manager.driver.update_service_capabilities(
+ service_name, host, capabilities)
+ self.mox.ReplayAll()
+ result = self.manager.update_service_capabilities(self.context,
+ service_name=service_name, host=host,
+ capabilities=capabilities)
+ def test_existing_method(self):
+ def stub_method(self, *args, **kwargs):
+ pass
+ setattr(self.manager.driver, 'schedule_stub_method', stub_method)
-def _create_request_spec(**kwargs):
- return dict(instance_properties=_create_instance_dict(**kwargs))
+ self.mox.StubOutWithMock(self.manager.driver,
+ 'schedule_stub_method')
+ self.manager.driver.schedule_stub_method(self.context,
+ *self.fake_args, **self.fake_kwargs)
+ self.mox.ReplayAll()
+ self.manager.stub_method(self.context, self.topic,
+ *self.fake_args, **self.fake_kwargs)
-def _fake_cast_to_compute_host(context, host, method, **kwargs):
- global _picked_host
- _picked_host = host
+ def test_missing_method_fallback(self):
+ self.mox.StubOutWithMock(self.manager.driver, 'schedule')
+ self.manager.driver.schedule(self.context, self.topic,
+ 'noexist', *self.fake_args, **self.fake_kwargs)
+ self.mox.ReplayAll()
+ self.manager.noexist(self.context, self.topic,
+ *self.fake_args, **self.fake_kwargs)
-def _fake_cast_to_volume_host(context, host, method, **kwargs):
- global _picked_host
- _picked_host = host
+ def test_select(self):
+ expected = 'fake_select'
+ self.mox.StubOutWithMock(self.manager.driver, 'select')
+ self.manager.driver.select(self.context,
+ *self.fake_args, **self.fake_kwargs).AndReturn(expected)
-def _fake_create_instance_db_entry(simple_self, context, request_spec):
- instance = _create_instance_from_spec(request_spec)
- global instance_uuids
- instance_uuids.append(instance['uuid'])
- request_spec['instance_properties']['uuid'] = instance['uuid']
- return instance
+ self.mox.ReplayAll()
+ result = self.manager.select(self.context, *self.fake_args,
+ **self.fake_kwargs)
+ self.assertEqual(result, expected)
+
+ def test_show_host_resources(self):
+ host = 'fake_host'
+
+ computes = [{'host': host,
+ 'compute_node': [{'vcpus': 4,
+ 'vcpus_used': 2,
+ 'memory_mb': 1024,
+ 'memory_mb_used': 512,
+ 'local_gb': 1024,
+ 'local_gb_used': 512}]}]
+ instances = [{'project_id': 'project1',
+ 'vcpus': 1,
+ 'memory_mb': 128,
+ 'local_gb': 128},
+ {'project_id': 'project1',
+ 'vcpus': 2,
+ 'memory_mb': 256,
+ 'local_gb': 384},
+ {'project_id': 'project2',
+ 'vcpus': 2,
+ 'memory_mb': 256,
+ 'local_gb': 256}]
+
+ self.mox.StubOutWithMock(db, 'service_get_all_compute_by_host')
+ self.mox.StubOutWithMock(db, 'instance_get_all_by_host')
+
+ db.service_get_all_compute_by_host(self.context, host).AndReturn(
+ computes)
+ db.instance_get_all_by_host(self.context, host).AndReturn(instances)
+ self.mox.ReplayAll()
+ result = self.manager.show_host_resources(self.context, host)
+ expected = {'usage': {'project1': {'memory_mb': 384,
+ 'vcpus': 3,
+ 'local_gb': 512},
+ 'project2': {'memory_mb': 256,
+ 'vcpus': 2,
+ 'local_gb': 256}},
+ 'resource': {'vcpus_used': 2,
+ 'local_gb_used': 512,
+ 'memory_mb': 1024,
+ 'vcpus': 4,
+ 'local_gb': 1024,
+ 'memory_mb_used': 512}}
+ self.assertDictMatch(result, expected)
+
+ def test_run_instance_exception_puts_instance_in_error_state(self):
+ """Test that a NoValidHost exception for run_instance puts
+ the instance in ERROR state and eats the exception.
+ """
-class FakeContext(context.RequestContext):
- def __init__(self, *args, **kwargs):
- super(FakeContext, self).__init__('user', 'project', **kwargs)
+ fake_instance_uuid = 'fake-instance-id'
+ # Make sure the method exists that we're going to test call
+ def stub_method(*args, **kwargs):
+ pass
-class TestDriver(driver.Scheduler):
- """Scheduler Driver for Tests"""
- def schedule(self, context, topic, method, *args, **kwargs):
- host = 'fallback_host'
- driver.cast_to_host(context, topic, host, method, **kwargs)
+ setattr(self.manager.driver, 'schedule_run_instance', stub_method)
- def schedule_named_method(self, context, num=None):
- topic = 'topic'
- host = 'named_host'
- method = 'named_method'
- driver.cast_to_host(context, topic, host, method, num=num)
+ self.mox.StubOutWithMock(self.manager.driver,
+ 'schedule_run_instance')
+ self.mox.StubOutWithMock(db, 'instance_update')
- def schedule_failing_method(self, context, instance_id):
- raise exception.NoValidHost(reason="")
+ request_spec = {'instance_properties':
+ {'uuid': fake_instance_uuid}}
+ self.fake_kwargs['request_spec'] = request_spec
+ self.manager.driver.schedule_run_instance(self.context,
+ *self.fake_args, **self.fake_kwargs).AndRaise(
+ exception.NoValidHost(reason=""))
+ db.instance_update(self.context, fake_instance_uuid,
+ {'vm_state': vm_states.ERROR})
-class SchedulerTestCase(test.TestCase):
- """Test case for scheduler"""
- def setUp(self):
- super(SchedulerTestCase, self).setUp()
- driver = 'nova.tests.scheduler.test_scheduler.TestDriver'
- self.flags(scheduler_driver=driver)
-
- def tearDown(self):
- self.stubs.UnsetAll()
- super(SchedulerTestCase, self).tearDown()
-
- def _create_compute_service(self):
- """Create compute-manager(ComputeNode and Service record)."""
- ctxt = context.get_admin_context()
- dic = {'host': 'dummy', 'binary': 'nova-compute', 'topic': 'compute',
- 'report_count': 0, 'availability_zone': 'dummyzone'}
- s_ref = db.service_create(ctxt, dic)
-
- dic = {'service_id': s_ref['id'],
- 'vcpus': 16, 'memory_mb': 32, 'local_gb': 100,
- 'vcpus_used': 16, 'memory_mb_used': 32, 'local_gb_used': 10,
- 'hypervisor_type': 'qemu', 'hypervisor_version': 12003,
- 'cpu_info': ''}
- db.compute_node_create(ctxt, dic)
-
- return db.service_get(ctxt, s_ref['id'])
-
- def test_fallback(self):
- scheduler = manager.SchedulerManager()
- self.mox.StubOutWithMock(rpc, 'cast', use_mock_anything=True)
- ctxt = context.get_admin_context()
- rpc.cast(ctxt,
- 'fake_topic.fallback_host',
- {'method': 'noexist',
- 'args': {'num': 7}})
- self.mox.ReplayAll()
- scheduler.noexist(ctxt, 'fake_topic', num=7)
-
- def test_named_method(self):
- scheduler = manager.SchedulerManager()
- self.mox.StubOutWithMock(rpc, 'cast', use_mock_anything=True)
- ctxt = context.get_admin_context()
- rpc.cast(ctxt,
- 'topic.named_host',
- {'method': 'named_method',
- 'args': {'num': 7}})
self.mox.ReplayAll()
- scheduler.named_method(ctxt, 'topic', num=7)
-
- def test_show_host_resources_host_not_exit(self):
- """A host given as an argument does not exists."""
-
- scheduler = manager.SchedulerManager()
- dest = 'dummydest'
- ctxt = context.get_admin_context()
-
- self.assertRaises(exception.NotFound, scheduler.show_host_resources,
- ctxt, dest)
- #TODO(bcwaldon): reimplement this functionality
- #c1 = (e.message.find(_("does not exist or is not a "
- # "compute node.")) >= 0)
-
- def _dic_is_equal(self, dic1, dic2, keys=None):
- """Compares 2 dictionary contents(Helper method)"""
- if not keys:
- keys = ['vcpus', 'memory_mb', 'local_gb',
- 'vcpus_used', 'memory_mb_used', 'local_gb_used']
-
- for key in keys:
- if not (dic1[key] == dic2[key]):
- return False
- return True
-
- def _assert_state(self, state_dict):
- """assert the instance is in the state defined by state_dict"""
- instances = db.instance_get_all(context.get_admin_context())
- self.assertEqual(len(instances), 1)
-
- if 'vm_state' in state_dict:
- self.assertEqual(state_dict['vm_state'], instances[0]['vm_state'])
- if 'task_state' in state_dict:
- self.assertEqual(state_dict['task_state'],
- instances[0]['task_state'])
- if 'power_state' in state_dict:
- self.assertEqual(state_dict['power_state'],
- instances[0]['power_state'])
-
- def test_no_valid_host_exception_on_start(self):
- """check the vm goes to ERROR state if the scheduler fails.
-
- If the scheduler driver cannot allocate a host for the VM during
- start_instance, it will raise a NoValidHost exception. In this
- scenario, we have to make sure that the VM state is set to ERROR.
- """
- def NoValidHost_raiser(context, topic, *args, **kwargs):
- raise exception.NoValidHost(_("Test NoValidHost exception"))
- scheduler = manager.SchedulerManager()
- ins_ref = _create_instance(task_state=task_states.STARTING,
- vm_state=vm_states.STOPPED)
- self.stubs.Set(TestDriver, 'schedule', NoValidHost_raiser)
- ctxt = context.get_admin_context()
- scheduler.start_instance(ctxt, 'topic', instance_id=ins_ref['id'])
- # assert that the instance goes to ERROR state
- self._assert_state({'vm_state': vm_states.ERROR,
- 'task_state': task_states.STARTING})
-
- def test_no_valid_host_exception_on_run_with_id(self):
- """check the vm goes to ERROR state if run_instance fails"""
-
- def NoValidHost_raiser(context, topic, *args, **kwargs):
- raise exception.NoValidHost(_("Test NoValidHost exception"))
- scheduler = manager.SchedulerManager()
- ins_ref = _create_instance(task_state=task_states.STARTING,
- vm_state=vm_states.STOPPED)
- self.stubs.Set(TestDriver, 'schedule', NoValidHost_raiser)
- ctxt = context.get_admin_context()
- request_spec = {'instance_properties': {'uuid': ins_ref['uuid']}}
- scheduler.run_instance(ctxt, 'topic', request_spec=request_spec)
- # assert that the instance goes to ERROR state
- self._assert_state({'vm_state': vm_states.ERROR,
- 'task_state': task_states.STARTING})
-
- def test_no_valid_host_exception_on_run_without_id(self):
- """check error handler doesn't raise if instance wasn't created"""
-
- def NoValidHost_raiser(context, topic, *args, **kwargs):
- raise exception.NoValidHost(_("Test NoValidHost exception"))
- scheduler = manager.SchedulerManager()
- self.stubs.Set(TestDriver, 'schedule', NoValidHost_raiser)
- ctxt = context.get_admin_context()
- request_spec = {'instance_properties': {}}
- scheduler.run_instance(ctxt, 'topic', request_spec=request_spec)
- # No error
-
- def test_show_host_resources_no_project(self):
- """No instance are running on the given host."""
-
- scheduler = manager.SchedulerManager()
- ctxt = context.get_admin_context()
- s_ref = self._create_compute_service()
-
- result = scheduler.show_host_resources(ctxt, s_ref['host'])
-
- # result checking
- c1 = ('resource' in result and 'usage' in result)
- compute_node = s_ref['compute_node'][0]
- c2 = self._dic_is_equal(result['resource'], compute_node)
- c3 = result['usage'] == {}
- self.assertTrue(c1 and c2 and c3)
- db.service_destroy(ctxt, s_ref['id'])
-
- def test_show_host_resources_works_correctly(self):
- """Show_host_resources() works correctly as expected."""
-
- scheduler = manager.SchedulerManager()
- ctxt = context.get_admin_context()
- s_ref = self._create_compute_service()
- i_ref1 = _create_instance(project_id='p-01', host=s_ref['host'])
- i_ref2 = _create_instance(project_id='p-02', vcpus=3,
- host=s_ref['host'])
-
- result = scheduler.show_host_resources(ctxt, s_ref['host'])
-
- c1 = ('resource' in result and 'usage' in result)
- compute_node = s_ref['compute_node'][0]
- c2 = self._dic_is_equal(result['resource'], compute_node)
- c3 = result['usage'].keys() == ['p-01', 'p-02']
- keys = ['vcpus', 'memory_mb', 'local_gb']
- c4 = self._dic_is_equal(result['usage']['p-01'], i_ref1, keys)
- c5 = self._dic_is_equal(result['usage']['p-02'], i_ref2, keys)
- self.assertTrue(c1 and c2 and c3 and c4 and c5)
-
- db.service_destroy(ctxt, s_ref['id'])
- db.instance_destroy(ctxt, i_ref1['id'])
- db.instance_destroy(ctxt, i_ref2['id'])
-
-
-class SimpleDriverTestCase(test.TestCase):
- """Test case for simple driver"""
- def setUp(self):
- super(SimpleDriverTestCase, self).setUp()
- simple_scheduler = 'nova.scheduler.simple.SimpleScheduler'
- self.flags(connection_type='fake',
- stub_network=True,
- max_cores=4,
- max_gigabytes=4,
- network_manager='nova.network.manager.FlatManager',
- volume_driver='nova.volume.driver.FakeISCSIDriver',
- scheduler_driver='nova.scheduler.multi.MultiScheduler',
- compute_scheduler_driver=simple_scheduler,
- volume_scheduler_driver=simple_scheduler)
- self.scheduler = manager.SchedulerManager()
- self.context = context.get_admin_context()
-
- def _create_compute_service(self, **kwargs):
- """Create a compute service."""
-
- dic = {'binary': 'nova-compute', 'topic': 'compute',
- 'report_count': 0, 'availability_zone': 'dummyzone'}
- dic['host'] = kwargs.get('host', 'dummy')
- s_ref = db.service_create(self.context, dic)
- if 'created_at' in kwargs.keys() or 'updated_at' in kwargs.keys():
- t = utils.utcnow() - datetime.timedelta(0)
- dic['created_at'] = kwargs.get('created_at', t)
- dic['updated_at'] = kwargs.get('updated_at', t)
- db.service_update(self.context, s_ref['id'], dic)
-
- dic = {'service_id': s_ref['id'],
- 'vcpus': 16, 'memory_mb': 32, 'local_gb': 100,
- 'vcpus_used': 16, 'local_gb_used': 10,
- 'hypervisor_type': 'qemu', 'hypervisor_version': 12003,
- 'cpu_info': ''}
- dic['memory_mb_used'] = kwargs.get('memory_mb_used', 32)
- dic['hypervisor_type'] = kwargs.get('hypervisor_type', 'qemu')
- dic['hypervisor_version'] = kwargs.get('hypervisor_version', 12003)
- db.compute_node_create(self.context, dic)
- return db.service_get(self.context, s_ref['id'])
-
- def test_regular_user_can_schedule(self):
- """Ensures a non-admin can run an instance"""
- compute1 = service.Service('host1',
- 'nova-compute',
- 'compute',
- FLAGS.compute_manager)
- compute1.start()
- _create_instance()
- ctxt = context.RequestContext('fake', 'fake', is_admin=False)
- global instance_uuids
- instance_uuids = []
- self.stubs.Set(SimpleScheduler,
- 'create_instance_db_entry', _fake_create_instance_db_entry)
- self.stubs.Set(driver,
- 'cast_to_compute_host', _fake_cast_to_compute_host)
- request_spec = _create_request_spec()
- self.scheduler.driver.schedule_run_instance(ctxt, request_spec)
- compute1.kill()
-
- def test_doesnt_report_disabled_hosts_as_up_no_queue(self):
- """Ensures driver doesn't find hosts before they are enabled"""
- # NOTE(vish): constructing service without create method
- # because we are going to use it without queue
- compute1 = service.Service('host1',
- 'nova-compute',
- 'compute',
- FLAGS.compute_manager)
- compute1.start()
- compute2 = service.Service('host2',
- 'nova-compute',
- 'compute',
- FLAGS.compute_manager)
- compute2.start()
- s1 = db.service_get_by_args(self.context, 'host1', 'nova-compute')
- s2 = db.service_get_by_args(self.context, 'host2', 'nova-compute')
- db.service_update(self.context, s1['id'], {'disabled': True})
- db.service_update(self.context, s2['id'], {'disabled': True})
- hosts = self.scheduler.driver.hosts_up(self.context, 'compute')
- self.assertEqual(0, len(hosts))
- compute1.kill()
- compute2.kill()
-
- def test_reports_enabled_hosts_as_up_no_queue(self):
- """Ensures driver can find the hosts that are up"""
- # NOTE(vish): constructing service without create method
- # because we are going to use it without queue
- compute1 = service.Service('host1',
- 'nova-compute',
- 'compute',
- FLAGS.compute_manager)
- compute1.start()
- compute2 = service.Service('host2',
- 'nova-compute',
- 'compute',
- FLAGS.compute_manager)
- compute2.start()
- hosts = self.scheduler.driver.hosts_up(self.context, 'compute')
- self.assertEqual(2, len(hosts))
- compute1.kill()
- compute2.kill()
-
- def test_least_busy_host_gets_instance_no_queue(self):
- """Ensures the host with less cores gets the next one"""
- compute1 = service.Service('host1',
- 'nova-compute',
- 'compute',
- FLAGS.compute_manager)
- compute1.start()
- compute2 = service.Service('host2',
- 'nova-compute',
- 'compute',
- FLAGS.compute_manager)
- compute2.start()
-
- global instance_uuids
- instance_uuids = []
- instance = _create_instance()
- instance_uuids.append(instance['uuid'])
- compute1.run_instance(self.context, instance_uuids[0])
-
- self.stubs.Set(SimpleScheduler,
- 'create_instance_db_entry', _fake_create_instance_db_entry)
- global _picked_host
- _picked_host = None
- self.stubs.Set(driver,
- 'cast_to_compute_host', _fake_cast_to_compute_host)
-
- request_spec = _create_request_spec()
- instances = self.scheduler.driver.schedule_run_instance(
- self.context, request_spec)
-
- self.assertEqual(_picked_host, 'host2')
- self.assertEqual(len(instance_uuids), 2)
- self.assertEqual(len(instances), 1)
- self.assertEqual(instances[0].get('_is_precooked', False), False)
-
- compute1.terminate_instance(self.context, instance_uuids[0])
- compute2.terminate_instance(self.context, instance_uuids[1])
- compute1.kill()
- compute2.kill()
-
- def test_specific_host_gets_instance_no_queue(self):
- """Ensures if you set zone:host it launches on that host"""
- compute1 = service.Service('host1',
- 'nova-compute',
- 'compute',
- FLAGS.compute_manager)
- compute1.start()
- compute2 = service.Service('host2',
- 'nova-compute',
- 'compute',
- FLAGS.compute_manager)
- compute2.start()
-
- global instance_uuids
- instance_uuids = []
- instance = _create_instance()
- instance_uuids.append(instance['uuid'])
- compute1.run_instance(self.context, instance_uuids[0])
-
- self.stubs.Set(SimpleScheduler,
- 'create_instance_db_entry', _fake_create_instance_db_entry)
- global _picked_host
- _picked_host = None
- self.stubs.Set(driver,
- 'cast_to_compute_host', _fake_cast_to_compute_host)
-
- request_spec = _create_request_spec(availability_zone='nova:host1')
- self.scheduler.driver.schedule_run_instance(self.context, request_spec)
- self.assertEqual(_picked_host, 'host1')
- self.assertEqual(len(instance_uuids), 2)
-
- compute1.terminate_instance(self.context, instance_uuids[0])
- compute1.terminate_instance(self.context, instance_uuids[1])
- compute1.kill()
- compute2.kill()
-
- def test_wont_schedule_if_specified_host_is_down_no_queue(self):
- compute1 = service.Service('host1',
- 'nova-compute',
- 'compute',
- FLAGS.compute_manager)
- compute1.start()
- s1 = db.service_get_by_args(self.context, 'host1', 'nova-compute')
- now = utils.utcnow()
- delta = datetime.timedelta(seconds=FLAGS.service_down_time * 2)
- past = now - delta
- db.service_update(self.context, s1['id'], {'updated_at': past})
-
- global instance_uuids
- instance_uuids = []
- self.stubs.Set(SimpleScheduler,
- 'create_instance_db_entry', _fake_create_instance_db_entry)
- global _picked_host
- _picked_host = None
- self.stubs.Set(driver,
- 'cast_to_compute_host', _fake_cast_to_compute_host)
-
- request_spec = _create_request_spec(availability_zone='nova:host1')
- self.assertRaises(exception.WillNotSchedule,
- self.scheduler.driver.schedule_run_instance,
- self.context,
- request_spec)
- compute1.kill()
-
- def test_will_schedule_on_disabled_host_if_specified_no_queue(self):
- compute1 = service.Service('host1',
- 'nova-compute',
- 'compute',
- FLAGS.compute_manager)
- compute1.start()
- s1 = db.service_get_by_args(self.context, 'host1', 'nova-compute')
- db.service_update(self.context, s1['id'], {'disabled': True})
-
- global instance_uuids
- instance_uuids = []
- self.stubs.Set(SimpleScheduler,
- 'create_instance_db_entry', _fake_create_instance_db_entry)
- global _picked_host
- _picked_host = None
- self.stubs.Set(driver,
- 'cast_to_compute_host', _fake_cast_to_compute_host)
-
- request_spec = _create_request_spec(availability_zone='nova:host1')
- self.scheduler.driver.schedule_run_instance(self.context, request_spec)
- self.assertEqual(_picked_host, 'host1')
- self.assertEqual(len(instance_uuids), 1)
- compute1.terminate_instance(self.context, instance_uuids[0])
- compute1.kill()
-
- def test_specific_zone_gets_instance_no_queue(self):
- """Ensures if you set availability_zone it launches on that zone"""
- self.flags(node_availability_zone='zone1')
- compute1 = service.Service('host1',
- 'nova-compute',
- 'compute',
- FLAGS.compute_manager)
- compute1.start()
- self.flags(node_availability_zone='zone2')
- compute2 = service.Service('host2',
- 'nova-compute',
- 'compute',
- FLAGS.compute_manager)
- compute2.start()
-
- global instance_uuids
- instance_uuids = []
- instance = _create_instance()
- instance_uuids.append(instance['uuid'])
- compute1.run_instance(self.context, instance_uuids[0])
-
- self.stubs.Set(SimpleScheduler,
- 'create_instance_db_entry', _fake_create_instance_db_entry)
- global _picked_host
- _picked_host = None
- self.stubs.Set(driver,
- 'cast_to_compute_host', _fake_cast_to_compute_host)
-
- request_spec = _create_request_spec(availability_zone='zone1')
- self.scheduler.driver.schedule_run_instance(self.context, request_spec)
- self.assertEqual(_picked_host, 'host1')
- self.assertEqual(len(instance_uuids), 2)
-
- compute1.terminate_instance(self.context, instance_uuids[0])
- compute1.terminate_instance(self.context, instance_uuids[1])
- compute1.kill()
- compute2.kill()
-
- def test_bad_instance_zone_fails(self):
- self.flags(node_availability_zone='zone1')
- compute1 = service.Service('host1',
- 'nova-compute',
- 'compute',
- FLAGS.compute_manager)
- compute1.start()
- request_spec = _create_request_spec(availability_zone='zone2')
- try:
- self.assertRaises(exception.NoValidHost,
- self.scheduler.driver.schedule_run_instance,
- self.context,
- request_spec)
- finally:
- compute1.kill()
-
- def test_bad_volume_zone_fails(self):
- self.flags(node_availability_zone='zone1')
- volume1 = service.Service('host1',
- 'nova-volume',
- 'volume',
- FLAGS.volume_manager)
- volume1.start()
- # uses 'nova' for zone
- volume_id = _create_volume()
- try:
- self.assertRaises(exception.NoValidHost,
- self.scheduler.driver.schedule_create_volume,
- self.context,
- volume_id)
- finally:
- db.volume_destroy(self.context, volume_id)
- volume1.kill()
-
- def test_too_many_cores_no_queue(self):
- """Ensures we don't go over max cores"""
- compute1 = service.Service('host1',
- 'nova-compute',
- 'compute',
- FLAGS.compute_manager)
- compute1.start()
- compute2 = service.Service('host2',
- 'nova-compute',
- 'compute',
- FLAGS.compute_manager)
- compute2.start()
- instance_uuids1 = []
- instance_uuids2 = []
- for index in xrange(FLAGS.max_cores):
- instance = _create_instance()
- compute1.run_instance(self.context, instance['uuid'])
- instance_uuids1.append(instance['uuid'])
- instance = _create_instance()
- compute2.run_instance(self.context, instance['uuid'])
- instance_uuids2.append(instance['uuid'])
- request_spec = _create_request_spec()
- self.assertRaises(exception.NoValidHost,
- self.scheduler.driver.schedule_run_instance,
- self.context,
- request_spec)
- for instance_uuid in instance_uuids1:
- compute1.terminate_instance(self.context, instance_uuid)
- for instance_uuid in instance_uuids2:
- compute2.terminate_instance(self.context, instance_uuid)
- compute1.kill()
- compute2.kill()
-
- def test_least_busy_host_gets_volume_no_queue(self):
- """Ensures the host with less gigabytes gets the next one"""
- volume1 = service.Service('host1',
- 'nova-volume',
- 'volume',
- FLAGS.volume_manager)
- volume1.start()
- volume2 = service.Service('host2',
- 'nova-volume',
- 'volume',
- FLAGS.volume_manager)
-
- global _picked_host
- _picked_host = None
- self.stubs.Set(driver,
- 'cast_to_volume_host', _fake_cast_to_volume_host)
-
- volume2.start()
- volume_id1 = _create_volume()
- volume1.create_volume(self.context, volume_id1)
- volume_id2 = _create_volume()
- self.scheduler.driver.schedule_create_volume(self.context,
- volume_id2)
- self.assertEqual(_picked_host, 'host2')
- volume1.delete_volume(self.context, volume_id1)
- db.volume_destroy(self.context, volume_id2)
-
- def test_doesnt_report_disabled_hosts_as_up2(self):
- """Ensures driver doesn't find hosts before they are enabled"""
- compute1 = self.start_service('compute', host='host1')
- compute2 = self.start_service('compute', host='host2')
- s1 = db.service_get_by_args(self.context, 'host1', 'nova-compute')
- s2 = db.service_get_by_args(self.context, 'host2', 'nova-compute')
- db.service_update(self.context, s1['id'], {'disabled': True})
- db.service_update(self.context, s2['id'], {'disabled': True})
- hosts = self.scheduler.driver.hosts_up(self.context, 'compute')
- self.assertEqual(0, len(hosts))
- compute1.kill()
- compute2.kill()
-
- def test_reports_enabled_hosts_as_up(self):
- """Ensures driver can find the hosts that are up"""
- compute1 = self.start_service('compute', host='host1')
- compute2 = self.start_service('compute', host='host2')
- hosts = self.scheduler.driver.hosts_up(self.context, 'compute')
- self.assertEqual(2, len(hosts))
- compute1.kill()
- compute2.kill()
-
- def test_least_busy_host_gets_instance(self):
- """Ensures the host with less cores gets the next one w/ Simple"""
- compute1 = self.start_service('compute', host='host1')
- compute2 = self.start_service('compute', host='host2')
-
- global instance_uuids
- instance_uuids = []
- instance = _create_instance()
- instance_uuids.append(instance['uuid'])
- compute1.run_instance(self.context, instance_uuids[0])
-
- self.stubs.Set(SimpleScheduler,
- 'create_instance_db_entry', _fake_create_instance_db_entry)
- global _picked_host
- _picked_host = None
- self.stubs.Set(driver,
- 'cast_to_compute_host', _fake_cast_to_compute_host)
-
- request_spec = _create_request_spec()
- self.scheduler.driver.schedule_run_instance(self.context, request_spec)
- self.assertEqual(_picked_host, 'host2')
- self.assertEqual(len(instance_uuids), 2)
-
- compute1.terminate_instance(self.context, instance_uuids[0])
- compute2.terminate_instance(self.context, instance_uuids[1])
- compute1.kill()
- compute2.kill()
-
- def test_specific_host_gets_instance(self):
- """Ensures if you set availability_zone it launches on that zone"""
- compute1 = self.start_service('compute', host='host1')
- compute2 = self.start_service('compute', host='host2')
-
- global instance_uuids
- instance_uuids = []
- instance = _create_instance()
- instance_uuids.append(instance['uuid'])
- compute1.run_instance(self.context, instance_uuids[0])
-
- self.stubs.Set(SimpleScheduler,
- 'create_instance_db_entry', _fake_create_instance_db_entry)
- global _picked_host
- _picked_host = None
- self.stubs.Set(driver,
- 'cast_to_compute_host', _fake_cast_to_compute_host)
-
- request_spec = _create_request_spec(availability_zone='nova:host1')
- self.scheduler.driver.schedule_run_instance(self.context, request_spec)
- self.assertEqual(_picked_host, 'host1')
- self.assertEqual(len(instance_uuids), 2)
-
- compute1.terminate_instance(self.context, instance_uuids[0])
- compute1.terminate_instance(self.context, instance_uuids[1])
- compute1.kill()
- compute2.kill()
-
- def test_wont_schedule_if_specified_host_is_down(self):
- compute1 = self.start_service('compute', host='host1')
- s1 = db.service_get_by_args(self.context, 'host1', 'nova-compute')
- now = utils.utcnow()
- delta = datetime.timedelta(seconds=FLAGS.service_down_time * 2)
- past = now - delta
- db.service_update(self.context, s1['id'], {'updated_at': past})
- request_spec = _create_request_spec(availability_zone='nova:host1')
- self.assertRaises(exception.WillNotSchedule,
- self.scheduler.driver.schedule_run_instance,
- self.context,
- request_spec)
- compute1.kill()
-
- def test_will_schedule_on_disabled_host_if_specified(self):
- compute1 = self.start_service('compute', host='host1')
- s1 = db.service_get_by_args(self.context, 'host1', 'nova-compute')
- db.service_update(self.context, s1['id'], {'disabled': True})
-
- global instance_uuids
- instance_uuids = []
- self.stubs.Set(SimpleScheduler,
- 'create_instance_db_entry', _fake_create_instance_db_entry)
- global _picked_host
- _picked_host = None
- self.stubs.Set(driver,
- 'cast_to_compute_host', _fake_cast_to_compute_host)
-
- request_spec = _create_request_spec(availability_zone='nova:host1')
- self.scheduler.driver.schedule_run_instance(self.context, request_spec)
- self.assertEqual(_picked_host, 'host1')
- self.assertEqual(len(instance_uuids), 1)
- compute1.terminate_instance(self.context, instance_uuids[0])
- compute1.kill()
-
- def test_isolation_of_images(self):
- self.flags(isolated_images=['hotmess'], isolated_hosts=['host1'])
- compute1 = self.start_service('compute', host='host1')
- compute2 = self.start_service('compute', host='host2')
- instance = _create_instance()
- compute1.run_instance(self.context, instance['uuid'])
- global instance_uuids
- instance_uuids = []
- self.stubs.Set(SimpleScheduler,
- 'create_instance_db_entry', _fake_create_instance_db_entry)
- global _picked_host
- _picked_host = None
- self.stubs.Set(driver,
- 'cast_to_compute_host', _fake_cast_to_compute_host)
- request_spec = _create_request_spec(image_ref='hotmess')
- self.scheduler.driver.schedule_run_instance(self.context, request_spec)
- self.assertEqual(_picked_host, 'host1')
- self.assertEqual(len(instance_uuids), 1)
- compute1.terminate_instance(self.context, instance['uuid'])
- compute1.terminate_instance(self.context, instance_uuids[0])
- compute1.kill()
- compute2.kill()
-
- def test_non_isolation_of_not_isolated_images(self):
- self.flags(isolated_images=['hotmess'], isolated_hosts=['host1'])
- compute1 = self.start_service('compute', host='host1')
- compute2 = self.start_service('compute', host='host2')
- instance = _create_instance()
- compute2.run_instance(self.context, instance['uuid'])
- global instance_uuids
- instance_uuids = []
- self.stubs.Set(SimpleScheduler,
- 'create_instance_db_entry', _fake_create_instance_db_entry)
- global _picked_host
- _picked_host = None
- self.stubs.Set(driver,
- 'cast_to_compute_host', _fake_cast_to_compute_host)
- request_spec = _create_request_spec()
- self.scheduler.driver.schedule_run_instance(self.context, request_spec)
- self.assertEqual(_picked_host, 'host2')
- self.assertEqual(len(instance_uuids), 1)
- compute2.terminate_instance(self.context, instance['uuid'])
- compute2.terminate_instance(self.context, instance_uuids[0])
- compute1.kill()
- compute2.kill()
-
- def test_isolated_images_are_resource_bound(self):
- """Ensures we don't go over max cores"""
- self.flags(isolated_images=['hotmess'], isolated_hosts=['host1'])
- compute1 = self.start_service('compute', host='host1')
- instance_uuids1 = []
- for index in xrange(FLAGS.max_cores):
- instance = _create_instance()
- compute1.run_instance(self.context, instance['uuid'])
- instance_uuids1.append(instance['uuid'])
-
- def _create_instance_db_entry(simple_self, context, request_spec):
- self.fail(_("Shouldn't try to create DB entry when at "
- "max cores"))
- self.stubs.Set(SimpleScheduler,
- 'create_instance_db_entry', _create_instance_db_entry)
-
- global _picked_host
- _picked_host = None
- self.stubs.Set(driver,
- 'cast_to_compute_host', _fake_cast_to_compute_host)
-
- request_spec = _create_request_spec()
-
- self.assertRaises(exception.NoValidHost,
- self.scheduler.driver.schedule_run_instance,
- self.context,
- request_spec)
- for instance_uuid in instance_uuids1:
- compute1.terminate_instance(self.context, instance_uuid)
- compute1.kill()
-
- def test_isolated_images_disable_resource_checking(self):
- self.flags(isolated_images=['hotmess'], isolated_hosts=['host1'],
- skip_isolated_core_check=True)
- compute1 = self.start_service('compute', host='host1')
- global instance_uuids
- instance_uuids = []
- for index in xrange(FLAGS.max_cores):
- instance = _create_instance()
- compute1.run_instance(self.context, instance['uuid'])
- instance_uuids.append(instance['uuid'])
-
- self.stubs.Set(SimpleScheduler,
- 'create_instance_db_entry', _fake_create_instance_db_entry)
- global _picked_host
- _picked_host = None
- self.stubs.Set(driver,
- 'cast_to_compute_host', _fake_cast_to_compute_host)
- request_spec = _create_request_spec(image_ref='hotmess')
- self.scheduler.driver.schedule_run_instance(self.context, request_spec)
- self.assertEqual(_picked_host, 'host1')
- self.assertEqual(len(instance_uuids), FLAGS.max_cores + 1)
- for instance_uuid in instance_uuids:
- compute1.terminate_instance(self.context, instance_uuid)
- compute1.kill()
-
- def test_too_many_cores(self):
- """Ensures we don't go over max cores"""
- compute1 = self.start_service('compute', host='host1')
- compute2 = self.start_service('compute', host='host2')
- instance_uuids1 = []
- instance_uuids2 = []
- for index in xrange(FLAGS.max_cores):
- instance = _create_instance()
- compute1.run_instance(self.context, instance['uuid'])
- instance_uuids1.append(instance['uuid'])
- instance = _create_instance()
- compute2.run_instance(self.context, instance['uuid'])
- instance_uuids2.append(instance['uuid'])
-
- def _create_instance_db_entry(simple_self, context, request_spec):
- self.fail(_("Shouldn't try to create DB entry when at "
- "max cores"))
- self.stubs.Set(SimpleScheduler,
- 'create_instance_db_entry', _create_instance_db_entry)
-
- global _picked_host
- _picked_host = None
- self.stubs.Set(driver,
- 'cast_to_compute_host', _fake_cast_to_compute_host)
-
- request_spec = _create_request_spec()
-
- self.assertRaises(exception.NoValidHost,
- self.scheduler.driver.schedule_run_instance,
- self.context,
- request_spec)
- for instance_uuid in instance_uuids1:
- compute1.terminate_instance(self.context, instance_uuid)
- for instance_uuid in instance_uuids2:
- compute2.terminate_instance(self.context, instance_uuid)
- compute1.kill()
- compute2.kill()
-
- def test_least_busy_host_gets_volume(self):
- """Ensures the host with less gigabytes gets the next one"""
- volume1 = self.start_service('volume', host='host1')
- volume2 = self.start_service('volume', host='host2')
-
- global _picked_host
- _picked_host = None
- self.stubs.Set(driver,
- 'cast_to_volume_host', _fake_cast_to_volume_host)
-
- volume_id1 = _create_volume()
- volume1.create_volume(self.context, volume_id1)
- volume_id2 = _create_volume()
- self.scheduler.driver.schedule_create_volume(self.context,
- volume_id2)
- self.assertEqual(_picked_host, 'host2')
- volume1.delete_volume(self.context, volume_id1)
- db.volume_destroy(self.context, volume_id2)
- volume1.kill()
- volume2.kill()
-
- def test_too_many_gigabytes(self):
- """Ensures we don't go over max gigabytes"""
- volume1 = self.start_service('volume', host='host1')
- volume2 = self.start_service('volume', host='host2')
- volume_ids1 = []
- volume_ids2 = []
- for index in xrange(FLAGS.max_gigabytes):
- volume_id = _create_volume()
- volume1.create_volume(self.context, volume_id)
- volume_ids1.append(volume_id)
- volume_id = _create_volume()
- volume2.create_volume(self.context, volume_id)
- volume_ids2.append(volume_id)
- volume_id = _create_volume()
- self.assertRaises(exception.NoValidHost,
- self.scheduler.driver.schedule_create_volume,
- self.context,
- volume_id)
- for volume_id in volume_ids1:
- volume1.delete_volume(self.context, volume_id)
- for volume_id in volume_ids2:
- volume2.delete_volume(self.context, volume_id)
- volume1.kill()
- volume2.kill()
-
- def test_scheduler_live_migration_with_volume(self):
- """schedule_live_migration() works correctly as expected.
-
- Also, checks instance state is changed from 'running' -> 'migrating'.
+ self.manager.run_instance(self.context, self.topic,
+ *self.fake_args, **self.fake_kwargs)
+ def test_start_instance_exception_puts_instance_in_error_state(self):
+ """Test that an NoValidHost exception for start_instance puts
+ the instance in ERROR state and eats the exception.
"""
- instance_id = _create_instance(host='dummy')['id']
- i_ref = db.instance_get(self.context, instance_id)
- dic = {'instance_id': instance_id, 'size': 1}
- v_ref = db.volume_create(self.context, dic)
-
- # cannot check 2nd argument b/c the addresses of instance object
- # is different.
- driver_i = self.scheduler.driver
- nocare = mox.IgnoreArg()
- self.mox.StubOutWithMock(driver_i, '_live_migration_src_check')
- self.mox.StubOutWithMock(driver_i, '_live_migration_dest_check')
- self.mox.StubOutWithMock(driver_i, '_live_migration_common_check')
- driver_i._live_migration_src_check(nocare, nocare)
- driver_i._live_migration_dest_check(nocare, nocare,
- i_ref['host'], False, False)
- driver_i._live_migration_common_check(nocare, nocare,
- i_ref['host'], False, False)
- self.mox.StubOutWithMock(rpc, 'cast', use_mock_anything=True)
- kwargs = {'instance_id': instance_id, 'dest': i_ref['host'],
- 'block_migration': False}
- rpc.cast(self.context,
- db.queue_get_for(nocare, FLAGS.compute_topic, i_ref['host']),
- {"method": 'live_migration', "args": kwargs})
+ fake_instance_id = 'fake-instance-id'
+ self.fake_kwargs['instance_id'] = fake_instance_id
- self.mox.ReplayAll()
- self.scheduler.live_migration(self.context, FLAGS.compute_topic,
- instance_id=instance_id,
- dest=i_ref['host'],
- block_migration=False,
- disk_over_commit=False)
-
- i_ref = db.instance_get(self.context, instance_id)
- self.assertTrue(i_ref['vm_state'] == vm_states.MIGRATING)
- db.instance_destroy(self.context, instance_id)
- db.volume_destroy(self.context, v_ref['id'])
-
- def test_live_migration_src_check_instance_not_running(self):
- """The instance given by instance_id is not running."""
+ # Make sure the method exists that we're going to test call
+ def stub_method(*args, **kwargs):
+ pass
- instance_id = _create_instance(
- power_state=power_state.NOSTATE)['id']
- i_ref = db.instance_get(self.context, instance_id)
+ setattr(self.manager.driver, 'schedule_start_instance', stub_method)
- try:
- self.scheduler.driver._live_migration_src_check(self.context,
- i_ref)
- except exception.Invalid, e:
- c = (e.message.find('is not running') > 0)
+ self.mox.StubOutWithMock(self.manager.driver,
+ 'schedule_start_instance')
+ self.mox.StubOutWithMock(db, 'instance_update')
- self.assertTrue(c)
- db.instance_destroy(self.context, instance_id)
+ self.manager.driver.schedule_start_instance(self.context,
+ *self.fake_args, **self.fake_kwargs).AndRaise(
+ exception.NoValidHost(reason=""))
+ db.instance_update(self.context, fake_instance_id,
+ {'vm_state': vm_states.ERROR})
- def test_live_migration_src_check_volume_node_not_alive(self):
- """Raise exception when volume node is not alive."""
+ self.mox.ReplayAll()
+ self.manager.start_instance(self.context, self.topic,
+ *self.fake_args, **self.fake_kwargs)
- instance_id = _create_instance()['id']
- i_ref = db.instance_get(self.context, instance_id)
- dic = {'instance_id': instance_id, 'size': 1}
- v_ref = db.volume_create(self.context, {'instance_id': instance_id,
- 'size': 1})
- t1 = utils.utcnow() - datetime.timedelta(1)
- dic = {'created_at': t1, 'updated_at': t1, 'binary': 'nova-volume',
- 'topic': 'volume', 'report_count': 0}
- s_ref = db.service_create(self.context, dic)
- self.assertRaises(exception.VolumeServiceUnavailable,
- self.scheduler.driver.schedule_live_migration,
- self.context, instance_id, i_ref['host'])
+class SchedulerTestCase(test.TestCase):
+ """Test case for base scheduler driver class"""
- db.instance_destroy(self.context, instance_id)
- db.service_destroy(self.context, s_ref['id'])
- db.volume_destroy(self.context, v_ref['id'])
+ # So we can subclass this test and re-use tests if we need.
+ driver_cls = driver.Scheduler
- def test_live_migration_src_check_compute_node_not_alive(self):
- """Confirms src-compute node is alive."""
- instance_id = _create_instance()['id']
- i_ref = db.instance_get(self.context, instance_id)
- t = utils.utcnow() - datetime.timedelta(10)
- s_ref = self._create_compute_service(created_at=t, updated_at=t,
- host=i_ref['host'])
+ def setUp(self):
+ super(SchedulerTestCase, self).setUp()
+ self.stubs.Set(compute_api, 'API', fakes.FakeComputeAPI)
+ self.driver = self.driver_cls()
+ self.context = context.RequestContext('fake_user', 'fake_project')
+ self.topic = 'fake_topic'
- self.assertRaises(exception.ComputeServiceUnavailable,
- self.scheduler.driver._live_migration_src_check,
- self.context, i_ref)
+ def test_get_host_list(self):
+ expected = 'fake_hosts'
- db.instance_destroy(self.context, instance_id)
- db.service_destroy(self.context, s_ref['id'])
+ self.mox.StubOutWithMock(self.driver.host_manager, 'get_host_list')
+ self.driver.host_manager.get_host_list().AndReturn(expected)
- def test_live_migration_src_check_works_correctly(self):
- """Confirms this method finishes with no error."""
- instance_id = _create_instance()['id']
- i_ref = db.instance_get(self.context, instance_id)
- s_ref = self._create_compute_service(host=i_ref['host'])
+ self.mox.ReplayAll()
+ result = self.driver.get_host_list()
+ self.assertEqual(result, expected)
- ret = self.scheduler.driver._live_migration_src_check(self.context,
- i_ref)
+ def test_get_zone_list(self):
+ expected = 'fake_zones'
- self.assertTrue(ret is None)
- db.instance_destroy(self.context, instance_id)
- db.service_destroy(self.context, s_ref['id'])
+ self.mox.StubOutWithMock(self.driver.zone_manager, 'get_zone_list')
+ self.driver.zone_manager.get_zone_list().AndReturn(expected)
- def test_live_migration_dest_check_not_alive(self):
- """Confirms exception raises in case dest host does not exist."""
- instance_id = _create_instance()['id']
- i_ref = db.instance_get(self.context, instance_id)
- t = utils.utcnow() - datetime.timedelta(10)
- s_ref = self._create_compute_service(created_at=t, updated_at=t,
- host=i_ref['host'])
+ self.mox.ReplayAll()
+ result = self.driver.get_zone_list()
+ self.assertEqual(result, expected)
- self.assertRaises(exception.ComputeServiceUnavailable,
- self.scheduler.driver._live_migration_dest_check,
- self.context, i_ref, i_ref['host'], False, False)
+ def test_get_service_capabilities(self):
+ expected = 'fake_service_capabs'
- db.instance_destroy(self.context, instance_id)
- db.service_destroy(self.context, s_ref['id'])
+ self.mox.StubOutWithMock(self.driver.host_manager,
+ 'get_service_capabilities')
+ self.driver.host_manager.get_service_capabilities().AndReturn(
+ expected)
- def test_live_migration_dest_check_not_alive(self):
- """Confirms exception raises in case dest host does not exist."""
- instance_id = _create_instance()['id']
- i_ref = db.instance_get(self.context, instance_id)
- t = utils.utcnow() - datetime.timedelta(10)
- s_ref = self._create_compute_service(created_at=t, updated_at=t,
- host=i_ref['host'])
+ self.mox.ReplayAll()
+ result = self.driver.get_service_capabilities()
+ self.assertEqual(result, expected)
- self.assertRaises(exception.ComputeServiceUnavailable,
- self.scheduler.driver._live_migration_dest_check,
- self.context, i_ref, i_ref['host'], False, False)
+ def test_update_service_capabilities(self):
+ service_name = 'fake_service'
+ host = 'fake_host'
- db.instance_destroy(self.context, instance_id)
- db.service_destroy(self.context, s_ref['id'])
+ self.mox.StubOutWithMock(self.driver.host_manager,
+ 'update_service_capabilities')
- def test_live_migration_dest_check_service_same_host(self):
- """Confirms exception raises in case dest and src is same host."""
- instance_id = _create_instance()['id']
- i_ref = db.instance_get(self.context, instance_id)
- s_ref = self._create_compute_service(host=i_ref['host'])
+ capabilities = {'fake_capability': 'fake_value'}
+ self.driver.host_manager.update_service_capabilities(
+ service_name, host, capabilities)
+ self.mox.ReplayAll()
+ result = self.driver.update_service_capabilities(service_name,
+ host, capabilities)
- self.assertRaises(exception.UnableToMigrateToSelf,
- self.scheduler.driver._live_migration_dest_check,
- self.context, i_ref, i_ref['host'], False, False)
+ def test_hosts_up(self):
+ service1 = {'host': 'host1'}
+ service2 = {'host': 'host2'}
+ services = [service1, service2]
- db.instance_destroy(self.context, instance_id)
- db.service_destroy(self.context, s_ref['id'])
+ self.mox.StubOutWithMock(db, 'service_get_all_by_topic')
+ self.mox.StubOutWithMock(utils, 'service_is_up')
- def test_live_migration_dest_check_service_lack_memory(self):
- """Confirms exception raises when dest doesn't have enough memory."""
- instance_id = _create_instance()['id']
- instance_id2 = _create_instance(host='somewhere',
- memory_mb=12)['id']
- i_ref = db.instance_get(self.context, instance_id)
- s_ref = self._create_compute_service(host='somewhere')
+ db.service_get_all_by_topic(self.context,
+ self.topic).AndReturn(services)
+ utils.service_is_up(service1).AndReturn(False)
+ utils.service_is_up(service2).AndReturn(True)
- self.assertRaises(exception.MigrationError,
- self.scheduler.driver._live_migration_dest_check,
- self.context, i_ref, 'somewhere', False, False)
+ self.mox.ReplayAll()
+ result = self.driver.hosts_up(self.context, self.topic)
+ self.assertEqual(result, ['host2'])
+
+ def test_create_instance_db_entry(self):
+ base_options = {'fake_option': 'meow'}
+ image = 'fake_image'
+ instance_type = 'fake_instance_type'
+ security_group = 'fake_security_group'
+ block_device_mapping = 'fake_block_device_mapping'
+ request_spec = {'instance_properties': base_options,
+ 'image': image,
+ 'instance_type': instance_type,
+ 'security_group': security_group,
+ 'block_device_mapping': block_device_mapping}
+
+ self.mox.StubOutWithMock(self.driver.compute_api,
+ 'create_db_entry_for_new_instance')
+ self.mox.StubOutWithMock(db, 'instance_get_by_uuid')
+
+ # New entry
+ fake_instance = {'uuid': 'fake-uuid'}
+ self.driver.compute_api.create_db_entry_for_new_instance(
+ self.context, instance_type, image, base_options,
+ security_group,
+ block_device_mapping).AndReturn(fake_instance)
+ self.mox.ReplayAll()
+ instance = self.driver.create_instance_db_entry(self.context,
+ request_spec)
+ self.mox.VerifyAll()
+ self.assertEqual(instance, fake_instance)
- db.instance_destroy(self.context, instance_id)
- db.instance_destroy(self.context, instance_id2)
- db.service_destroy(self.context, s_ref['id'])
+ # Entry created by compute already
+ self.mox.ResetAll()
- def test_block_migration_dest_check_service_lack_disk(self):
- """Confirms exception raises when dest doesn't have enough disk."""
- instance_id = _create_instance()['id']
- instance_id2 = _create_instance(host='somewhere',
- local_gb=70)['id']
- i_ref = db.instance_get(self.context, instance_id)
- s_ref = self._create_compute_service(host='somewhere')
+ fake_uuid = 'fake-uuid'
+ base_options['uuid'] = fake_uuid
+ fake_instance = {'uuid': fake_uuid}
+ db.instance_get_by_uuid(self.context, fake_uuid).AndReturn(
+ fake_instance)
- self.assertRaises(exception.MigrationError,
- self.scheduler.driver._live_migration_dest_check,
- self.context, i_ref, 'somewhere', True, False)
-
- db.instance_destroy(self.context, instance_id)
- db.instance_destroy(self.context, instance_id2)
- db.service_destroy(self.context, s_ref['id'])
-
- def test_live_migration_dest_check_service_works_correctly(self):
- """Confirms method finishes with no error."""
- instance_id = _create_instance()['id']
- i_ref = db.instance_get(self.context, instance_id)
- s_ref = self._create_compute_service(host='somewhere',
- memory_mb_used=5)
-
- ret = self.scheduler.driver._live_migration_dest_check(self.context,
- i_ref,
- 'somewhere',
- False, False)
- self.assertTrue(ret is None)
- db.instance_destroy(self.context, instance_id)
- db.service_destroy(self.context, s_ref['id'])
-
- def test_live_migration_common_check_service_orig_not_exists(self):
- """Destination host does not exist."""
-
- dest = 'dummydest'
- # mocks for live_migration_common_check()
- instance_id = _create_instance()['id']
- i_ref = db.instance_get(self.context, instance_id)
- t1 = utils.utcnow() - datetime.timedelta(10)
- s_ref = self._create_compute_service(created_at=t1, updated_at=t1,
- host=dest)
-
- # mocks for mounted_on_same_shared_storage()
- fpath = '/test/20110127120000'
- self.mox.StubOutWithMock(driver, 'rpc', use_mock_anything=True)
- topic = FLAGS.compute_topic
- driver.rpc.call(mox.IgnoreArg(),
- db.queue_get_for(self.context, topic, dest),
- {"method": 'create_shared_storage_test_file'}).AndReturn(fpath)
- driver.rpc.call(mox.IgnoreArg(),
- db.queue_get_for(mox.IgnoreArg(), topic, i_ref['host']),
- {"method": 'check_shared_storage_test_file',
- "args": {'filename': fpath}})
- driver.rpc.call(mox.IgnoreArg(),
- db.queue_get_for(mox.IgnoreArg(), topic, dest),
- {"method": 'cleanup_shared_storage_test_file',
- "args": {'filename': fpath}})
+ self.mox.ReplayAll()
+ instance = self.driver.create_instance_db_entry(self.context,
+ request_spec)
+ self.assertEqual(instance, fake_instance)
+
+ def _live_migration_instance(self):
+ volume1 = {'id': 31338}
+ volume2 = {'id': 31339}
+ return {'id': 31337, 'name': 'fake-instance',
+ 'host': 'fake_host1',
+ 'volumes': [volume1, volume2],
+ 'power_state': power_state.RUNNING,
+ 'memory_mb': 1024,
+ 'local_gb': 1024}
+
+ def test_live_migration_basic(self):
+ """Test basic schedule_live_migration functionality"""
+ self.mox.StubOutWithMock(db, 'instance_get')
+ self.mox.StubOutWithMock(self.driver, '_live_migration_src_check')
+ self.mox.StubOutWithMock(self.driver, '_live_migration_dest_check')
+ self.mox.StubOutWithMock(self.driver, '_live_migration_common_check')
+ self.mox.StubOutWithMock(db, 'instance_update')
+ self.mox.StubOutWithMock(db, 'volume_update')
+ self.mox.StubOutWithMock(driver, 'cast_to_compute_host')
+
+ dest = 'fake_host2'
+ block_migration = False
+ disk_over_commit = False
+ instance = self._live_migration_instance()
+ db.instance_get(self.context, instance['id']).AndReturn(instance)
+
+ self.driver._live_migration_src_check(self.context, instance)
+ self.driver._live_migration_dest_check(self.context, instance,
+ dest, block_migration, disk_over_commit)
+ self.driver._live_migration_common_check(self.context, instance,
+ dest, block_migration, disk_over_commit)
+ db.instance_update(self.context, instance['id'],
+ {'vm_state': vm_states.MIGRATING})
+
+ db.volume_update(self.context, instance['volumes'][0]['id'],
+ {'status': 'migrating'})
+ db.volume_update(self.context, instance['volumes'][1]['id'],
+ {'status': 'migrating'})
+
+ driver.cast_to_compute_host(self.context, instance['host'],
+ 'live_migration', update_db=False,
+ instance_id=instance['id'], dest=dest,
+ block_migration=block_migration)
self.mox.ReplayAll()
- #self.assertRaises(exception.SourceHostUnavailable,
- self.assertRaises(exception.FileNotFound,
- self.scheduler.driver._live_migration_common_check,
- self.context, i_ref, dest, False, False)
+ self.driver.schedule_live_migration(self.context,
+ instance_id=instance['id'], dest=dest,
+ block_migration=block_migration,
+ disk_over_commit=disk_over_commit)
+
+ def test_live_migration_all_checks_pass(self):
+ """Test live migration when all checks pass."""
+
+ self.mox.StubOutWithMock(db, 'instance_get')
+ self.mox.StubOutWithMock(db, 'service_get_all_by_topic')
+ self.mox.StubOutWithMock(utils, 'service_is_up')
+ self.mox.StubOutWithMock(db, 'service_get_all_compute_by_host')
+ self.mox.StubOutWithMock(self.driver, '_get_compute_info')
+ self.mox.StubOutWithMock(db, 'instance_get_all_by_host')
+ self.mox.StubOutWithMock(db, 'queue_get_for')
+ self.mox.StubOutWithMock(rpc, 'call')
+ self.mox.StubOutWithMock(rpc, 'cast')
+ self.mox.StubOutWithMock(db, 'instance_update')
+ self.mox.StubOutWithMock(db, 'volume_update')
+ self.mox.StubOutWithMock(driver, 'cast_to_compute_host')
+
+ dest = 'fake_host2'
+ block_migration = True
+ disk_over_commit = True
+ instance = self._live_migration_instance()
+ db.instance_get(self.context, instance['id']).AndReturn(instance)
+
+ # Source checks (volume and source compute are up)
+ db.service_get_all_by_topic(self.context, 'volume').AndReturn(
+ ['fake_service'])
+ utils.service_is_up('fake_service').AndReturn(True)
+ db.service_get_all_compute_by_host(self.context,
+ instance['host']).AndReturn(['fake_service2'])
+ utils.service_is_up('fake_service2').AndReturn(True)
+
+ # Destination checks (compute is up, enough memory, disk)
+ db.service_get_all_compute_by_host(self.context,
+ dest).AndReturn(['fake_service3'])
+ utils.service_is_up('fake_service3').AndReturn(True)
+ # assert_compute_node_has_enough_memory()
+ self.driver._get_compute_info(self.context, dest,
+ 'memory_mb').AndReturn(2048)
+ db.instance_get_all_by_host(self.context, dest).AndReturn(
+ [dict(memory_mb=256), dict(memory_mb=512)])
+ # assert_compute_node_has_enough_disk()
+ db.queue_get_for(self.context, FLAGS.compute_topic,
+ dest).AndReturn('dest_queue1')
+ rpc.call(self.context, 'dest_queue1',
+ {'method': 'update_available_resource'})
+ self.driver._get_compute_info(self.context, dest,
+ 'disk_available_least').AndReturn(1025)
+ db.queue_get_for(self.context, FLAGS.compute_topic,
+ instance['host']).AndReturn('src_queue1')
+ rpc.call(self.context, 'src_queue1',
+ {'method': 'get_instance_disk_info',
+ 'args': {'instance_name': instance['name']}}).AndReturn(
+ json.dumps([{'disk_size': 1024 * (1024 ** 3)}]))
+
+ # Common checks (shared storage ok, same hypervisor,e tc)
+ db.queue_get_for(self.context, FLAGS.compute_topic,
+ dest).AndReturn('dest_queue')
+ db.queue_get_for(self.context, FLAGS.compute_topic,
+ instance['host']).AndReturn('src_queue')
+ tmp_filename = 'test-filename'
+ rpc.call(self.context, 'dest_queue',
+ {'method': 'create_shared_storage_test_file'}
+ ).AndReturn(tmp_filename)
+ rpc.call(self.context, 'src_queue',
+ {'method': 'check_shared_storage_test_file',
+ 'args': {'filename': tmp_filename}}).AndReturn(False)
+ rpc.call(self.context, 'dest_queue',
+ {'method': 'cleanup_shared_storage_test_file',
+ 'args': {'filename': tmp_filename}})
+ db.service_get_all_compute_by_host(self.context, dest).AndReturn(
+ [{'compute_node': [{'hypervisor_type': 'xen',
+ 'hypervisor_version': 1}]}])
+ # newer hypervisor version for src
+ db.service_get_all_compute_by_host(self.context,
+ instance['host']).AndReturn(
+ [{'compute_node': [{'hypervisor_type': 'xen',
+ 'hypervisor_version': 1,
+ 'cpu_info': 'fake_cpu_info'}]}])
+ db.queue_get_for(self.context, FLAGS.compute_topic,
+ dest).AndReturn('dest_queue')
+ rpc.call(self.context, 'dest_queue',
+ {'method': 'compare_cpu',
+ 'args': {'cpu_info': 'fake_cpu_info'}}).AndReturn(True)
+
+ db.instance_update(self.context, instance['id'],
+ {'vm_state': vm_states.MIGRATING})
+ db.volume_update(self.context, instance['volumes'][0]['id'],
+ {'status': 'migrating'})
+ db.volume_update(self.context, instance['volumes'][1]['id'],
+ {'status': 'migrating'})
+
+ driver.cast_to_compute_host(self.context, instance['host'],
+ 'live_migration', update_db=False,
+ instance_id=instance['id'], dest=dest,
+ block_migration=block_migration)
- db.instance_destroy(self.context, instance_id)
- db.service_destroy(self.context, s_ref['id'])
+ self.mox.ReplayAll()
+ result = self.driver.schedule_live_migration(self.context,
+ instance_id=instance['id'], dest=dest,
+ block_migration=block_migration,
+ disk_over_commit=disk_over_commit)
+ self.assertEqual(result, None)
- def test_live_migration_common_check_service_different_hypervisor(self):
- """Original host and dest host has different hypervisor type."""
- dest = 'dummydest'
- instance_id = _create_instance(host='dummy')['id']
- i_ref = db.instance_get(self.context, instance_id)
+ def test_live_migration_instance_not_running(self):
+ """The instance given by instance_id is not running."""
- # compute service for destination
- s_ref = self._create_compute_service(host=i_ref['host'])
- # compute service for original host
- s_ref2 = self._create_compute_service(host=dest, hypervisor_type='xen')
+ self.mox.StubOutWithMock(db, 'instance_get')
- # mocks
- driver = self.scheduler.driver
- self.mox.StubOutWithMock(driver, 'mounted_on_same_shared_storage')
- driver.mounted_on_same_shared_storage(mox.IgnoreArg(), i_ref, dest)
+ dest = 'fake_host2'
+ block_migration = False
+ instance = self._live_migration_instance()
+ instance['power_state'] = power_state.NOSTATE
- self.mox.ReplayAll()
- self.assertRaises(exception.InvalidHypervisorType,
- self.scheduler.driver._live_migration_common_check,
- self.context, i_ref, dest, False, False)
-
- db.instance_destroy(self.context, instance_id)
- db.service_destroy(self.context, s_ref['id'])
- db.service_destroy(self.context, s_ref2['id'])
-
- def test_live_migration_common_check_service_different_version(self):
- """Original host and dest host has different hypervisor version."""
- dest = 'dummydest'
- instance_id = _create_instance(host='dummy')['id']
- i_ref = db.instance_get(self.context, instance_id)
-
- # compute service for destination
- s_ref = self._create_compute_service(host=i_ref['host'])
- # compute service for original host
- s_ref2 = self._create_compute_service(host=dest,
- hypervisor_version=12002)
-
- # mocks
- driver = self.scheduler.driver
- self.mox.StubOutWithMock(driver, 'mounted_on_same_shared_storage')
- driver.mounted_on_same_shared_storage(mox.IgnoreArg(), i_ref, dest)
+ db.instance_get(self.context, instance['id']).AndReturn(instance)
self.mox.ReplayAll()
- self.assertRaises(exception.DestinationHypervisorTooOld,
- self.scheduler.driver._live_migration_common_check,
- self.context, i_ref, dest, False, False)
-
- db.instance_destroy(self.context, instance_id)
- db.service_destroy(self.context, s_ref['id'])
- db.service_destroy(self.context, s_ref2['id'])
-
- def test_live_migration_common_check_checking_cpuinfo_fail(self):
- """Raise exception when original host doesn't have compatible cpu."""
-
- dest = 'dummydest'
- instance_id = _create_instance(host='dummy')['id']
- i_ref = db.instance_get(self.context, instance_id)
-
- # compute service for destination
- s_ref = self._create_compute_service(host=i_ref['host'])
- # compute service for original host
- s_ref2 = self._create_compute_service(host=dest)
-
- # mocks
- driver = self.scheduler.driver
- self.mox.StubOutWithMock(driver, 'mounted_on_same_shared_storage')
- driver.mounted_on_same_shared_storage(mox.IgnoreArg(), i_ref, dest)
- self.mox.StubOutWithMock(rpc, 'call', use_mock_anything=True)
- rpc.call(mox.IgnoreArg(), mox.IgnoreArg(),
- {"method": 'compare_cpu',
- "args": {'cpu_info': s_ref2['compute_node'][0]['cpu_info']}}).\
- AndRaise(rpc.RemoteError(exception.InvalidCPUInfo,
- exception.InvalidCPUInfo(reason='fake')))
- self.mox.ReplayAll()
+ c = False
try:
- driver._live_migration_common_check(self.context,
- i_ref,
- dest,
- False,
- False)
- except rpc.RemoteError, e:
- c = (e.exc_type == exception.InvalidCPUInfo)
-
+ self.driver.schedule_live_migration(self.context,
+ instance_id=instance['id'], dest=dest,
+ block_migration=block_migration)
+ self._test_scheduler_live_migration(options)
+ except exception.Invalid, e:
+ c = (str(e).find('is not running') > 0)
self.assertTrue(c)
- db.instance_destroy(self.context, instance_id)
- db.service_destroy(self.context, s_ref['id'])
- db.service_destroy(self.context, s_ref2['id'])
- def test_exception_puts_instance_in_error_state(self):
- """Test that an exception from the scheduler puts an instance
- in the ERROR state."""
+ def test_live_migration_volume_node_not_alive(self):
+ """Raise exception when volume node is not alive."""
- scheduler = manager.SchedulerManager()
- ctxt = context.get_admin_context()
- inst = _create_instance()
- self.assertRaises(Exception, scheduler._schedule,
- 'failing_method', ctxt, 'scheduler',
- instance_id=inst['uuid'])
+ self.mox.StubOutWithMock(db, 'instance_get')
+ self.mox.StubOutWithMock(db, 'service_get_all_by_topic')
+ self.mox.StubOutWithMock(utils, 'service_is_up')
- # Refresh the instance
- inst = db.instance_get(ctxt, inst['id'])
+ dest = 'fake_host2'
+ block_migration = False
+ instance = self._live_migration_instance()
+ db.instance_get(self.context, instance['id']).AndReturn(instance)
+ # Volume down
+ db.service_get_all_by_topic(self.context, 'volume').AndReturn(
+ ['fake_service'])
+ utils.service_is_up('fake_service').AndReturn(False)
+ self.mox.ReplayAll()
+ self.assertRaises(exception.VolumeServiceUnavailable,
+ self.driver.schedule_live_migration, self.context,
+ instance_id=instance['id'], dest=dest,
+ block_migration=block_migration)
+
+ def test_live_migration_compute_src_not_alive(self):
+ """Raise exception when src compute node is not alive."""
+
+ self.mox.StubOutWithMock(db, 'instance_get')
+ self.mox.StubOutWithMock(db, 'service_get_all_by_topic')
+ self.mox.StubOutWithMock(utils, 'service_is_up')
+ self.mox.StubOutWithMock(db, 'service_get_all_compute_by_host')
+
+ dest = 'fake_host2'
+ block_migration = False
+ instance = self._live_migration_instance()
+ db.instance_get(self.context, instance['id']).AndReturn(instance)
+ # Volume up
+ db.service_get_all_by_topic(self.context, 'volume').AndReturn(
+ ['fake_service'])
+ utils.service_is_up('fake_service').AndReturn(True)
+
+ # Compute down
+ db.service_get_all_compute_by_host(self.context,
+ instance['host']).AndReturn(['fake_service2'])
+ utils.service_is_up('fake_service2').AndReturn(False)
-class MultiDriverTestCase(SimpleDriverTestCase):
- """Test case for multi driver."""
+ self.mox.ReplayAll()
+ self.assertRaises(exception.ComputeServiceUnavailable,
+ self.driver.schedule_live_migration, self.context,
+ instance_id=instance['id'], dest=dest,
+ block_migration=block_migration)
- def setUp(self):
- super(MultiDriverTestCase, self).setUp()
- self.flags(connection_type='fake',
- stub_network=True,
- max_cores=4,
- max_gigabytes=4,
- network_manager='nova.network.manager.FlatManager',
- volume_driver='nova.volume.driver.FakeISCSIDriver',
- compute_scheduler_driver=('nova.scheduler.simple'
- '.SimpleScheduler'),
- volume_scheduler_driver=('nova.scheduler.simple'
- '.SimpleScheduler'),
- scheduler_driver='nova.scheduler.multi.MultiScheduler')
- self.scheduler = manager.SchedulerManager()
+ def test_live_migration_compute_dest_not_alive(self):
+ """Raise exception when dest compute node is not alive."""
+ self.mox.StubOutWithMock(db, 'instance_get')
+ self.mox.StubOutWithMock(self.driver, '_live_migration_src_check')
+ self.mox.StubOutWithMock(db, 'service_get_all_compute_by_host')
+ self.mox.StubOutWithMock(utils, 'service_is_up')
-class FakeZone(object):
- def __init__(self, id, api_url, username, password, name='child'):
- self.id = id
- self.api_url = api_url
- self.username = username
- self.password = password
- self.name = name
+ dest = 'fake_host2'
+ block_migration = False
+ instance = self._live_migration_instance()
+ db.instance_get(self.context, instance['id']).AndReturn(instance)
+ self.driver._live_migration_src_check(self.context, instance)
+ db.service_get_all_compute_by_host(self.context,
+ dest).AndReturn(['fake_service3'])
+ # Compute is down
+ utils.service_is_up('fake_service3').AndReturn(False)
-ZONE_API_URL1 = "http://1.example.com"
-ZONE_API_URL2 = "http://2.example.com"
+ self.mox.ReplayAll()
+ self.assertRaises(exception.ComputeServiceUnavailable,
+ self.driver.schedule_live_migration, self.context,
+ instance_id=instance['id'], dest=dest,
+ block_migration=block_migration)
+ def test_live_migration_dest_check_service_same_host(self):
+ """Confirms exception raises in case dest and src is same host."""
-def zone_get_all(context):
- return [
- FakeZone(1, ZONE_API_URL1, 'bob', 'xxx'),
- FakeZone(2, ZONE_API_URL2, 'bob', 'xxx'),
- ]
+ self.mox.StubOutWithMock(db, 'instance_get')
+ self.mox.StubOutWithMock(self.driver, '_live_migration_src_check')
+ self.mox.StubOutWithMock(db, 'service_get_all_compute_by_host')
+ self.mox.StubOutWithMock(utils, 'service_is_up')
+ block_migration = False
+ disk_over_commit = False
+ instance = self._live_migration_instance()
+ # make dest same as src
+ dest = instance['host']
-def fake_instance_get_by_uuid(context, uuid):
- if FAKE_UUID_NOT_FOUND:
- raise exception.InstanceNotFound(instance_id=uuid)
- else:
- return {'id': 1}
+ db.instance_get(self.context, instance['id']).AndReturn(instance)
+ self.driver._live_migration_src_check(self.context, instance)
+ db.service_get_all_compute_by_host(self.context,
+ dest).AndReturn(['fake_service3'])
+ utils.service_is_up('fake_service3').AndReturn(True)
-class FakeRerouteCompute(api.reroute_compute):
- def __init__(self, method_name, id_to_return=1):
- super(FakeRerouteCompute, self).__init__(method_name)
- self.id_to_return = id_to_return
+ self.mox.ReplayAll()
+ self.assertRaises(exception.UnableToMigrateToSelf,
+ self.driver.schedule_live_migration, self.context,
+ instance_id=instance['id'], dest=dest,
+ block_migration=block_migration,
+ disk_over_commit=False)
- def _call_child_zones(self, context, zones, function):
- return []
+ def test_live_migration_dest_check_service_lack_memory(self):
+ """Confirms exception raises when dest doesn't have enough memory."""
- def get_collection_context_and_id(self, args, kwargs):
- return ("servers", None, self.id_to_return)
+ self.mox.StubOutWithMock(db, 'instance_get')
+ self.mox.StubOutWithMock(self.driver, '_live_migration_src_check')
+ self.mox.StubOutWithMock(db, 'service_get_all_compute_by_host')
+ self.mox.StubOutWithMock(utils, 'service_is_up')
+ self.mox.StubOutWithMock(self.driver, '_get_compute_info')
+ self.mox.StubOutWithMock(db, 'instance_get_all_by_host')
+
+ dest = 'fake_host2'
+ block_migration = False
+ disk_over_commit = False
+ instance = self._live_migration_instance()
+ db.instance_get(self.context, instance['id']).AndReturn(instance)
+
+ self.driver._live_migration_src_check(self.context, instance)
+ db.service_get_all_compute_by_host(self.context,
+ dest).AndReturn(['fake_service3'])
+ utils.service_is_up('fake_service3').AndReturn(True)
+
+ self.driver._get_compute_info(self.context, dest,
+ 'memory_mb').AndReturn(2048)
+ db.instance_get_all_by_host(self.context, dest).AndReturn(
+ [dict(memory_mb=1024), dict(memory_mb=512)])
- def unmarshall_result(self, zone_responses):
- return dict(magic="found me")
+ self.mox.ReplayAll()
+ self.assertRaises(exception.MigrationError,
+ self.driver.schedule_live_migration, self.context,
+ instance_id=instance['id'], dest=dest,
+ block_migration=block_migration,
+ disk_over_commit=disk_over_commit)
+ def test_block_migration_dest_check_service_lack_disk(self):
+ """Confirms exception raises when dest doesn't have enough disk."""
-def go_boom(self, context, instance):
- raise exception.InstanceNotFound(instance_id=instance)
+ self.mox.StubOutWithMock(db, 'instance_get')
+ self.mox.StubOutWithMock(self.driver, '_live_migration_src_check')
+ self.mox.StubOutWithMock(db, 'service_get_all_compute_by_host')
+ self.mox.StubOutWithMock(utils, 'service_is_up')
+ self.mox.StubOutWithMock(self.driver,
+ 'assert_compute_node_has_enough_memory')
+ self.mox.StubOutWithMock(self.driver, '_get_compute_info')
+ self.mox.StubOutWithMock(db, 'instance_get_all_by_host')
+ self.mox.StubOutWithMock(db, 'queue_get_for')
+ self.mox.StubOutWithMock(rpc, 'call')
+
+ dest = 'fake_host2'
+ block_migration = True
+ disk_over_commit = True
+ instance = self._live_migration_instance()
+ db.instance_get(self.context, instance['id']).AndReturn(instance)
+
+ self.driver._live_migration_src_check(self.context, instance)
+ db.service_get_all_compute_by_host(self.context,
+ dest).AndReturn(['fake_service3'])
+ utils.service_is_up('fake_service3').AndReturn(True)
+
+ # Enough memory
+ self.driver.assert_compute_node_has_enough_memory(self.context,
+ instance, dest)
+
+ # Not enough disk
+ db.queue_get_for(self.context, FLAGS.compute_topic,
+ dest).AndReturn('dest_queue')
+ rpc.call(self.context, 'dest_queue',
+ {'method': 'update_available_resource'})
+ self.driver._get_compute_info(self.context, dest,
+ 'disk_available_least').AndReturn(1023)
+ db.queue_get_for(self.context, FLAGS.compute_topic,
+ instance['host']).AndReturn('src_queue')
+ rpc.call(self.context, 'src_queue',
+ {'method': 'get_instance_disk_info',
+ 'args': {'instance_name': instance['name']}}).AndReturn(
+ json.dumps([{'disk_size': 1024 * (1024 ** 3)}]))
+ self.mox.ReplayAll()
+ self.assertRaises(exception.MigrationError,
+ self.driver.schedule_live_migration, self.context,
+ instance_id=instance['id'], dest=dest,
+ block_migration=block_migration,
+ disk_over_commit=disk_over_commit)
+
+ def test_live_migration_different_shared_storage_raises(self):
+ """Src and dest must have same shared storage for live migration"""
+
+ self.mox.StubOutWithMock(db, 'instance_get')
+ self.mox.StubOutWithMock(self.driver, '_live_migration_src_check')
+ self.mox.StubOutWithMock(self.driver, '_live_migration_dest_check')
+ self.mox.StubOutWithMock(db, 'queue_get_for')
+ self.mox.StubOutWithMock(rpc, 'call')
+ self.mox.StubOutWithMock(rpc, 'cast')
+
+ dest = 'fake_host2'
+ block_migration = False
+ disk_over_commit = False
+ instance = self._live_migration_instance()
+ db.instance_get(self.context, instance['id']).AndReturn(instance)
+
+ self.driver._live_migration_src_check(self.context, instance)
+ self.driver._live_migration_dest_check(self.context, instance,
+ dest, block_migration, disk_over_commit)
+
+ db.queue_get_for(self.context, FLAGS.compute_topic,
+ dest).AndReturn('dest_queue')
+ db.queue_get_for(self.context, FLAGS.compute_topic,
+ instance['host']).AndReturn('src_queue')
+ tmp_filename = 'test-filename'
+ rpc.call(self.context, 'dest_queue',
+ {'method': 'create_shared_storage_test_file'}
+ ).AndReturn(tmp_filename)
+ rpc.call(self.context, 'src_queue',
+ {'method': 'check_shared_storage_test_file',
+ 'args': {'filename': tmp_filename}}).AndReturn(False)
+ rpc.call(self.context, 'dest_queue',
+ {'method': 'cleanup_shared_storage_test_file',
+ 'args': {'filename': tmp_filename}})
-def found_instance(self, context, instance):
- return dict(name='myserver')
+ self.mox.ReplayAll()
+ # FIXME(comstud): See LP891756.
+ self.assertRaises(exception.FileNotFound,
+ self.driver.schedule_live_migration, self.context,
+ instance_id=instance['id'], dest=dest,
+ block_migration=block_migration,
+ disk_over_commit=disk_over_commit)
+
+ def test_live_migration_same_shared_storage_okay(self):
+ """live migration works with same src and dest shared storage"""
+
+ self.mox.StubOutWithMock(db, 'instance_get')
+ self.mox.StubOutWithMock(self.driver, '_live_migration_src_check')
+ self.mox.StubOutWithMock(self.driver, '_live_migration_dest_check')
+ self.mox.StubOutWithMock(db, 'queue_get_for')
+ self.mox.StubOutWithMock(rpc, 'call')
+ self.mox.StubOutWithMock(rpc, 'cast')
+
+ dest = 'fake_host2'
+ block_migration = False
+ disk_over_commit = False
+ instance = self._live_migration_instance()
+ db.instance_get(self.context, instance['id']).AndReturn(instance)
+
+ self.driver._live_migration_src_check(self.context, instance)
+ self.driver._live_migration_dest_check(self.context, instance,
+ dest, block_migration, disk_over_commit)
+
+ db.queue_get_for(self.context, FLAGS.compute_topic,
+ dest).AndReturn('dest_queue')
+ db.queue_get_for(self.context, FLAGS.compute_topic,
+ instance['host']).AndReturn('src_queue')
+ tmp_filename = 'test-filename'
+ rpc.call(self.context, 'dest_queue',
+ {'method': 'create_shared_storage_test_file'}
+ ).AndReturn(tmp_filename)
+ rpc.call(self.context, 'src_queue',
+ {'method': 'check_shared_storage_test_file',
+ 'args': {'filename': tmp_filename}}).AndReturn(False)
+ rpc.call(self.context, 'dest_queue',
+ {'method': 'cleanup_shared_storage_test_file',
+ 'args': {'filename': tmp_filename}})
+ self.mox.ReplayAll()
+ # FIXME(comstud): See LP891756.
+ self.assertRaises(exception.FileNotFound,
+ self.driver.schedule_live_migration, self.context,
+ instance_id=instance['id'], dest=dest,
+ block_migration=block_migration,
+ disk_over_commit=disk_over_commit)
+
+ def test_live_migration_different_hypervisor_type_raises(self):
+ self.mox.StubOutWithMock(db, 'instance_get')
+ self.mox.StubOutWithMock(self.driver, '_live_migration_src_check')
+ self.mox.StubOutWithMock(self.driver, '_live_migration_dest_check')
+ self.mox.StubOutWithMock(db, 'queue_get_for')
+ self.mox.StubOutWithMock(rpc, 'call')
+ self.mox.StubOutWithMock(rpc, 'cast')
+ self.mox.StubOutWithMock(db, 'service_get_all_compute_by_host')
+
+ dest = 'fake_host2'
+ block_migration = False
+ disk_over_commit = False
+ instance = self._live_migration_instance()
+ db.instance_get(self.context, instance['id']).AndReturn(instance)
+
+ self.driver._live_migration_src_check(self.context, instance)
+ self.driver._live_migration_dest_check(self.context, instance,
+ dest, block_migration, disk_over_commit)
+
+ db.queue_get_for(self.context, FLAGS.compute_topic,
+ dest).AndReturn('dest_queue')
+ db.queue_get_for(self.context, FLAGS.compute_topic,
+ instance['host']).AndReturn('src_queue')
+ tmp_filename = 'test-filename'
+ rpc.call(self.context, 'dest_queue',
+ {'method': 'create_shared_storage_test_file'}
+ ).AndReturn(tmp_filename)
+ rpc.call(self.context, 'src_queue',
+ {'method': 'check_shared_storage_test_file',
+ 'args': {'filename': tmp_filename}}).AndReturn(True)
+ rpc.call(self.context, 'dest_queue',
+ {'method': 'cleanup_shared_storage_test_file',
+ 'args': {'filename': tmp_filename}})
+ db.service_get_all_compute_by_host(self.context, dest).AndReturn(
+ [{'compute_node': [{'hypervisor_type': 'xen',
+ 'hypervisor_version': 1}]}])
+ # different hypervisor type
+ db.service_get_all_compute_by_host(self.context,
+ instance['host']).AndReturn(
+ [{'compute_node': [{'hypervisor_type': 'not-xen',
+ 'hypervisor_version': 1}]}])
-class FakeResource(object):
- def __init__(self, attribute_dict):
- for k, v in attribute_dict.iteritems():
- setattr(self, k, v)
+ self.mox.ReplayAll()
+ self.assertRaises(exception.InvalidHypervisorType,
+ self.driver.schedule_live_migration, self.context,
+ instance_id=instance['id'], dest=dest,
+ block_migration=block_migration,
+ disk_over_commit=disk_over_commit)
+
+ def test_live_migration_dest_hypervisor_version_older_raises(self):
+ self.mox.StubOutWithMock(db, 'instance_get')
+ self.mox.StubOutWithMock(self.driver, '_live_migration_src_check')
+ self.mox.StubOutWithMock(self.driver, '_live_migration_dest_check')
+ self.mox.StubOutWithMock(db, 'queue_get_for')
+ self.mox.StubOutWithMock(rpc, 'call')
+ self.mox.StubOutWithMock(rpc, 'cast')
+ self.mox.StubOutWithMock(db, 'service_get_all_compute_by_host')
+
+ dest = 'fake_host2'
+ block_migration = False
+ disk_over_commit = False
+ instance = self._live_migration_instance()
+ db.instance_get(self.context, instance['id']).AndReturn(instance)
+
+ self.driver._live_migration_src_check(self.context, instance)
+ self.driver._live_migration_dest_check(self.context, instance,
+ dest, block_migration, disk_over_commit)
+
+ db.queue_get_for(self.context, FLAGS.compute_topic,
+ dest).AndReturn('dest_queue')
+ db.queue_get_for(self.context, FLAGS.compute_topic,
+ instance['host']).AndReturn('src_queue')
+ tmp_filename = 'test-filename'
+ rpc.call(self.context, 'dest_queue',
+ {'method': 'create_shared_storage_test_file'}
+ ).AndReturn(tmp_filename)
+ rpc.call(self.context, 'src_queue',
+ {'method': 'check_shared_storage_test_file',
+ 'args': {'filename': tmp_filename}}).AndReturn(True)
+ rpc.call(self.context, 'dest_queue',
+ {'method': 'cleanup_shared_storage_test_file',
+ 'args': {'filename': tmp_filename}})
+ db.service_get_all_compute_by_host(self.context, dest).AndReturn(
+ [{'compute_node': [{'hypervisor_type': 'xen',
+ 'hypervisor_version': 1}]}])
+ # newer hypervisor version for src
+ db.service_get_all_compute_by_host(self.context,
+ instance['host']).AndReturn(
+ [{'compute_node': [{'hypervisor_type': 'xen',
+ 'hypervisor_version': 2}]}])
+ self.mox.ReplayAll()
+ self.assertRaises(exception.DestinationHypervisorTooOld,
+ self.driver.schedule_live_migration, self.context,
+ instance_id=instance['id'], dest=dest,
+ block_migration=block_migration,
+ disk_over_commit=disk_over_commit)
+
+ def test_live_migration_dest_host_incompatable_cpu_raises(self):
+ self.mox.StubOutWithMock(db, 'instance_get')
+ self.mox.StubOutWithMock(self.driver, '_live_migration_src_check')
+ self.mox.StubOutWithMock(self.driver, '_live_migration_dest_check')
+ self.mox.StubOutWithMock(db, 'queue_get_for')
+ self.mox.StubOutWithMock(rpc, 'call')
+ self.mox.StubOutWithMock(rpc, 'cast')
+ self.mox.StubOutWithMock(db, 'service_get_all_compute_by_host')
+
+ dest = 'fake_host2'
+ block_migration = False
+ disk_over_commit = False
+ instance = self._live_migration_instance()
+ db.instance_get(self.context, instance['id']).AndReturn(instance)
+
+ self.driver._live_migration_src_check(self.context, instance)
+ self.driver._live_migration_dest_check(self.context, instance,
+ dest, block_migration, disk_over_commit)
+
+ db.queue_get_for(self.context, FLAGS.compute_topic,
+ dest).AndReturn('dest_queue')
+ db.queue_get_for(self.context, FLAGS.compute_topic,
+ instance['host']).AndReturn('src_queue')
+ tmp_filename = 'test-filename'
+ rpc.call(self.context, 'dest_queue',
+ {'method': 'create_shared_storage_test_file'}
+ ).AndReturn(tmp_filename)
+ rpc.call(self.context, 'src_queue',
+ {'method': 'check_shared_storage_test_file',
+ 'args': {'filename': tmp_filename}}).AndReturn(True)
+ rpc.call(self.context, 'dest_queue',
+ {'method': 'cleanup_shared_storage_test_file',
+ 'args': {'filename': tmp_filename}})
+ db.service_get_all_compute_by_host(self.context, dest).AndReturn(
+ [{'compute_node': [{'hypervisor_type': 'xen',
+ 'hypervisor_version': 1}]}])
+ db.service_get_all_compute_by_host(self.context,
+ instance['host']).AndReturn(
+ [{'compute_node': [{'hypervisor_type': 'xen',
+ 'hypervisor_version': 1,
+ 'cpu_info': 'fake_cpu_info'}]}])
+ db.queue_get_for(self.context, FLAGS.compute_topic,
+ dest).AndReturn('dest_queue')
+ rpc.call(self.context, 'dest_queue',
+ {'method': 'compare_cpu',
+ 'args': {'cpu_info': 'fake_cpu_info'}}).AndRaise(
+ rpc.RemoteError())
+
+ self.mox.ReplayAll()
+ self.assertRaises(rpc_common.RemoteError,
+ self.driver.schedule_live_migration, self.context,
+ instance_id=instance['id'], dest=dest,
+ block_migration=block_migration)
- def pause(self):
- pass
+class SchedulerDriverModuleTestCase(test.TestCase):
+ """Test case for scheduler driver module methods"""
-class ZoneRedirectTest(test.TestCase):
def setUp(self):
- super(ZoneRedirectTest, self).setUp()
+ super(SchedulerDriverModuleTestCase, self).setUp()
+ self.context = context.RequestContext('fake_user', 'fake_project')
+
+ def test_cast_to_volume_host_update_db_with_volume_id(self):
+ host = 'fake_host1'
+ method = 'fake_method'
+ fake_kwargs = {'volume_id': 31337,
+ 'extra_arg': 'meow'}
+ queue = 'fake_queue'
+
+ self.mox.StubOutWithMock(utils, 'utcnow')
+ self.mox.StubOutWithMock(db, 'volume_update')
+ self.mox.StubOutWithMock(db, 'queue_get_for')
+ self.mox.StubOutWithMock(rpc, 'cast')
+
+ utils.utcnow().AndReturn('fake-now')
+ db.volume_update(self.context, 31337,
+ {'host': host, 'scheduled_at': 'fake-now'})
+ db.queue_get_for(self.context, 'volume', host).AndReturn(queue)
+ rpc.cast(self.context, queue,
+ {'method': method,
+ 'args': fake_kwargs})
- self.stubs.Set(db, 'zone_get_all', zone_get_all)
- self.stubs.Set(db, 'instance_get_by_uuid',
- fake_instance_get_by_uuid)
- self.flags(enable_zone_routing=True)
+ self.mox.ReplayAll()
+ driver.cast_to_volume_host(self.context, host, method,
+ update_db=True, **fake_kwargs)
- def tearDown(self):
- super(ZoneRedirectTest, self).tearDown()
+ def test_cast_to_volume_host_update_db_without_volume_id(self):
+ host = 'fake_host1'
+ method = 'fake_method'
+ fake_kwargs = {'extra_arg': 'meow'}
+ queue = 'fake_queue'
- def test_trap_found_locally(self):
- decorator = FakeRerouteCompute("foo")
- try:
- result = decorator(found_instance)(None, None, 1)
- except api.RedirectResult, e:
- self.fail(_("Successful database hit should succeed"))
+ self.mox.StubOutWithMock(db, 'queue_get_for')
+ self.mox.StubOutWithMock(rpc, 'cast')
- def test_trap_not_found_locally_id_passed(self):
- """When an integer ID is not found locally, we cannot reroute to
- another zone, so just return InstanceNotFound exception
- """
- decorator = FakeRerouteCompute("foo")
- self.assertRaises(exception.InstanceNotFound,
- decorator(go_boom), None, None, 1)
+ db.queue_get_for(self.context, 'volume', host).AndReturn(queue)
+ rpc.cast(self.context, queue,
+ {'method': method,
+ 'args': fake_kwargs})
- def test_trap_not_found_locally_uuid_passed(self):
- """When a UUID is found, if the item isn't found locally, we should
- try to reroute to a child zone to see if they have it
- """
- decorator = FakeRerouteCompute("foo", id_to_return=FAKE_UUID_NOT_FOUND)
- try:
- result = decorator(go_boom)(None, None, 1)
- self.fail(_("Should have rerouted."))
- except api.RedirectResult, e:
- self.assertEquals(e.results['magic'], 'found me')
-
- def test_routing_flags(self):
- self.flags(enable_zone_routing=False)
- decorator = FakeRerouteCompute("foo")
- self.assertRaises(exception.InstanceNotFound, decorator(go_boom),
- None, None, 1)
-
- def test_get_collection_context_and_id(self):
- decorator = api.reroute_compute("foo")
- self.assertEquals(decorator.get_collection_context_and_id(
- (None, 10, 20), {}), ("servers", 10, 20))
- self.assertEquals(decorator.get_collection_context_and_id(
- (None, 11,), dict(instance_id=21)), ("servers", 11, 21))
- self.assertEquals(decorator.get_collection_context_and_id(
- (None,), dict(context=12, instance_id=22)), ("servers", 12, 22))
-
- def test_unmarshal_single_server(self):
- decorator = api.reroute_compute("foo")
- decorator.item_uuid = 'fake_uuid'
- result = decorator.unmarshall_result([])
- self.assertEquals(decorator.unmarshall_result([]), None)
- self.assertEquals(decorator.unmarshall_result(
- [FakeResource(dict(a=1, b=2)), ]),
- dict(server=dict(a=1, b=2)))
- self.assertEquals(decorator.unmarshall_result(
- [FakeResource(dict(a=1, _b=2)), ]),
- dict(server=dict(a=1,)))
- self.assertEquals(decorator.unmarshall_result(
- [FakeResource(dict(a=1, manager=2)), ]),
- dict(server=dict(a=1,)))
- self.assertEquals(decorator.unmarshall_result(
- [FakeResource(dict(_a=1, manager=2)), ]),
- dict(server={}))
-
- def test_one_zone_down_no_instances(self):
-
- def _fake_issue_novaclient_command(nova, zone, *args, **kwargs):
- return None
-
- class FakeNovaClientWithFailure(object):
- def __init__(self, username, password, method, api_url,
- token=None, region_name=None):
- self.api_url = api_url
-
- def authenticate(self):
- if self.api_url == ZONE_API_URL2:
- raise novaclient_exceptions.BadRequest('foo')
-
- self.stubs.Set(api, '_issue_novaclient_command',
- _fake_issue_novaclient_command)
- self.stubs.Set(api.novaclient, 'Client', FakeNovaClientWithFailure)
-
- @api.reroute_compute("get")
- def do_get(self, context, uuid):
- pass
+ self.mox.ReplayAll()
+ driver.cast_to_volume_host(self.context, host, method,
+ update_db=True, **fake_kwargs)
- try:
- do_get(None, FakeContext(), FAKE_UUID)
- self.fail("Should have got redirect exception.")
- except api.RedirectResult, e:
- self.assertTrue(isinstance(e.results, exception.ZoneRequestError))
-
- def test_one_zone_down_got_instance(self):
-
- def _fake_issue_novaclient_command(nova, zone, *args, **kwargs):
- class FakeServer(object):
- def __init__(self):
- self.id = FAKE_UUID
- self.test = '1234'
- return FakeServer()
-
- class FakeNovaClientWithFailure(object):
- def __init__(self, username, password, method, api_url,
- token=None, region_name=None):
- self.api_url = api_url
-
- def authenticate(self):
- if self.api_url == ZONE_API_URL2:
- raise novaclient_exceptions.BadRequest('foo')
-
- self.stubs.Set(api, '_issue_novaclient_command',
- _fake_issue_novaclient_command)
- self.stubs.Set(api.novaclient, 'Client', FakeNovaClientWithFailure)
-
- @api.reroute_compute("get")
- def do_get(self, context, uuid):
- pass
+ def test_cast_to_volume_host_no_update_db(self):
+ host = 'fake_host1'
+ method = 'fake_method'
+ fake_kwargs = {'extra_arg': 'meow'}
+ queue = 'fake_queue'
- try:
- do_get(None, FakeContext(), FAKE_UUID)
- except api.RedirectResult, e:
- results = e.results
- self.assertIn('server', results)
- self.assertEqual(results['server']['id'], FAKE_UUID)
- self.assertEqual(results['server']['test'], '1234')
- except Exception, e:
- self.fail(_("RedirectResult should have been raised: %s" % e))
- else:
- self.fail(_("RedirectResult should have been raised"))
-
- def test_zones_up_no_instances(self):
-
- def _fake_issue_novaclient_command(nova, zone, *args, **kwargs):
- return None
-
- class FakeNovaClientNoFailure(object):
- def __init__(self, username, password, method, api_url,
- token=None, region_name=None):
- pass
-
- def authenticate(self):
- return
-
- self.stubs.Set(api, '_issue_novaclient_command',
- _fake_issue_novaclient_command)
- self.stubs.Set(api.novaclient, 'Client', FakeNovaClientNoFailure)
-
- @api.reroute_compute("get")
- def do_get(self, context, uuid):
- pass
+ self.mox.StubOutWithMock(db, 'queue_get_for')
+ self.mox.StubOutWithMock(rpc, 'cast')
- try:
- do_get(None, FakeContext(), FAKE_UUID)
- self.fail("Expected redirect exception")
- except api.RedirectResult, e:
- self.assertEquals(e.results, None)
+ db.queue_get_for(self.context, 'volume', host).AndReturn(queue)
+ rpc.cast(self.context, queue,
+ {'method': method,
+ 'args': fake_kwargs})
+ self.mox.ReplayAll()
+ driver.cast_to_volume_host(self.context, host, method,
+ update_db=False, **fake_kwargs)
+
+ def test_cast_to_compute_host_update_db_with_instance_id(self):
+ host = 'fake_host1'
+ method = 'fake_method'
+ fake_kwargs = {'instance_id': 31337,
+ 'extra_arg': 'meow'}
+ queue = 'fake_queue'
+
+ self.mox.StubOutWithMock(utils, 'utcnow')
+ self.mox.StubOutWithMock(db, 'instance_update')
+ self.mox.StubOutWithMock(db, 'queue_get_for')
+ self.mox.StubOutWithMock(rpc, 'cast')
+
+ utils.utcnow().AndReturn('fake-now')
+ db.instance_update(self.context, 31337,
+ {'host': host, 'scheduled_at': 'fake-now'})
+ db.queue_get_for(self.context, 'compute', host).AndReturn(queue)
+ rpc.cast(self.context, queue,
+ {'method': method,
+ 'args': fake_kwargs})
-class FakeServerCollection(object):
- def get(self, instance_id):
- return FakeResource(dict(a=10, b=20))
+ self.mox.ReplayAll()
+ driver.cast_to_compute_host(self.context, host, method,
+ update_db=True, **fake_kwargs)
- def find(self, name):
- return FakeResource(dict(a=11, b=22))
+ def test_cast_to_compute_host_update_db_without_instance_id(self):
+ host = 'fake_host1'
+ method = 'fake_method'
+ fake_kwargs = {'extra_arg': 'meow'}
+ queue = 'fake_queue'
+ self.mox.StubOutWithMock(db, 'queue_get_for')
+ self.mox.StubOutWithMock(rpc, 'cast')
-class FakeEmptyServerCollection(object):
- def get(self, f):
- raise novaclient_exceptions.NotFound(1)
+ db.queue_get_for(self.context, 'compute', host).AndReturn(queue)
+ rpc.cast(self.context, queue,
+ {'method': method,
+ 'args': fake_kwargs})
- def find(self, name):
- raise novaclient_exceptions.NotFound(2)
+ self.mox.ReplayAll()
+ driver.cast_to_compute_host(self.context, host, method,
+ update_db=True, **fake_kwargs)
+ def test_cast_to_compute_host_no_update_db(self):
+ host = 'fake_host1'
+ method = 'fake_method'
+ fake_kwargs = {'extra_arg': 'meow'}
+ queue = 'fake_queue'
-class FakeNovaClient(object):
- def __init__(self, collection, *args, **kwargs):
- self.servers = collection
+ self.mox.StubOutWithMock(db, 'queue_get_for')
+ self.mox.StubOutWithMock(rpc, 'cast')
+ db.queue_get_for(self.context, 'compute', host).AndReturn(queue)
+ rpc.cast(self.context, queue,
+ {'method': method,
+ 'args': fake_kwargs})
-class DynamicNovaClientTest(test.TestCase):
- def test_issue_novaclient_command_found(self):
- zone = FakeZone(1, 'http://example.com', 'bob', 'xxx')
- self.assertEquals(api._issue_novaclient_command(
- FakeNovaClient(FakeServerCollection()),
- zone, "servers", "get", 100).a, 10)
+ self.mox.ReplayAll()
+ driver.cast_to_compute_host(self.context, host, method,
+ update_db=False, **fake_kwargs)
- self.assertEquals(api._issue_novaclient_command(
- FakeNovaClient(FakeServerCollection()),
- zone, "servers", "find", name="test").b, 22)
+ def test_cast_to_network_host(self):
+ host = 'fake_host1'
+ method = 'fake_method'
+ fake_kwargs = {'extra_arg': 'meow'}
+ queue = 'fake_queue'
- self.assertEquals(api._issue_novaclient_command(
- FakeNovaClient(FakeServerCollection()),
- zone, "servers", "pause", 100), None)
+ self.mox.StubOutWithMock(db, 'queue_get_for')
+ self.mox.StubOutWithMock(rpc, 'cast')
- def test_issue_novaclient_command_not_found(self):
- zone = FakeZone(1, 'http://example.com', 'bob', 'xxx')
- try:
- api._issue_novaclient_command(FakeNovaClient(
- FakeEmptyServerCollection()), zone, "servers", "get", 100)
- self.fail("Expected NotFound exception")
- except novaclient_exceptions.NotFound, e:
- pass
+ db.queue_get_for(self.context, 'network', host).AndReturn(queue)
+ rpc.cast(self.context, queue,
+ {'method': method,
+ 'args': fake_kwargs})
- try:
- api._issue_novaclient_command(FakeNovaClient(
- FakeEmptyServerCollection()), zone, "servers", "any", "name")
- self.fail("Expected NotFound exception")
- except novaclient_exceptions.NotFound, e:
- pass
+ self.mox.ReplayAll()
+ driver.cast_to_network_host(self.context, host, method,
+ update_db=True, **fake_kwargs)
+
+ def test_cast_to_host_compute_topic(self):
+ host = 'fake_host1'
+ method = 'fake_method'
+ fake_kwargs = {'extra_arg': 'meow'}
+
+ self.mox.StubOutWithMock(driver, 'cast_to_compute_host')
+ driver.cast_to_compute_host(self.context, host, method,
+ update_db=False, **fake_kwargs)
+ self.mox.ReplayAll()
+ driver.cast_to_host(self.context, 'compute', host, method,
+ update_db=False, **fake_kwargs)
-class FakeZonesProxy(object):
- def do_something(self, *args, **kwargs):
- return 42
+ def test_cast_to_host_volume_topic(self):
+ host = 'fake_host1'
+ method = 'fake_method'
+ fake_kwargs = {'extra_arg': 'meow'}
- def raises_exception(self, *args, **kwargs):
- raise Exception('testing')
+ self.mox.StubOutWithMock(driver, 'cast_to_volume_host')
+ driver.cast_to_volume_host(self.context, host, method,
+ update_db=False, **fake_kwargs)
+ self.mox.ReplayAll()
+ driver.cast_to_host(self.context, 'volume', host, method,
+ update_db=False, **fake_kwargs)
-class FakeNovaClientZones(object):
- def __init__(self, *args, **kwargs):
- self.zones = FakeZonesProxy()
+ def test_cast_to_host_network_topic(self):
+ host = 'fake_host1'
+ method = 'fake_method'
+ fake_kwargs = {'extra_arg': 'meow'}
- def authenticate(self):
- pass
+ self.mox.StubOutWithMock(driver, 'cast_to_network_host')
+ driver.cast_to_network_host(self.context, host, method,
+ update_db=False, **fake_kwargs)
+ self.mox.ReplayAll()
+ driver.cast_to_host(self.context, 'network', host, method,
+ update_db=False, **fake_kwargs)
-class CallZoneMethodTest(test.TestCase):
- def setUp(self):
- super(CallZoneMethodTest, self).setUp()
- self.stubs.Set(db, 'zone_get_all', zone_get_all)
- self.stubs.Set(novaclient, 'Client', FakeNovaClientZones)
-
- def tearDown(self):
- super(CallZoneMethodTest, self).tearDown()
-
- def test_call_zone_method(self):
- context = FakeContext()
- method = 'do_something'
- results = api.call_zone_method(context, method)
- self.assertEqual(len(results), 2)
- self.assertIn((1, 42), results)
- self.assertIn((2, 42), results)
-
- def test_call_zone_method_not_present(self):
- context = FakeContext()
- method = 'not_present'
- self.assertRaises(AttributeError, api.call_zone_method,
- context, method)
-
- def test_call_zone_method_generates_exception(self):
- context = FakeContext()
- method = 'raises_exception'
- self.assertRaises(Exception, api.call_zone_method, context, method)
+ def test_cast_to_host_unknown_topic(self):
+ host = 'fake_host1'
+ method = 'fake_method'
+ fake_kwargs = {'extra_arg': 'meow'}
+ topic = 'unknown'
+ queue = 'fake_queue'
+
+ self.mox.StubOutWithMock(db, 'queue_get_for')
+ self.mox.StubOutWithMock(rpc, 'cast')
+
+ db.queue_get_for(self.context, topic, host).AndReturn(queue)
+ rpc.cast(self.context, queue,
+ {'method': method,
+ 'args': fake_kwargs})
+
+ self.mox.ReplayAll()
+ driver.cast_to_host(self.context, topic, host, method,
+ update_db=False, **fake_kwargs)
+
+ def test_encode_instance(self):
+ instance = {'id': 31337,
+ 'test_arg': 'meow'}
+
+ result = driver.encode_instance(instance, True)
+ expected = {'id': instance['id'], '_is_precooked': False}
+ self.assertDictMatch(result, expected)
+ # Orig dict not changed
+ self.assertNotEqual(result, instance)
+
+ result = driver.encode_instance(instance, False)
+ expected = {}
+ expected.update(instance)
+ expected['_is_precooked'] = True
+ self.assertDictMatch(result, expected)
+ # Orig dict not changed
+ self.assertNotEqual(result, instance)
diff --git a/nova/tests/scheduler/test_vsa_scheduler.py b/nova/tests/scheduler/test_vsa_scheduler.py
index a7cd2358a..f4cab42ad 100644
--- a/nova/tests/scheduler/test_vsa_scheduler.py
+++ b/nova/tests/scheduler/test_vsa_scheduler.py
@@ -13,18 +13,18 @@
# License for the specific language governing permissions and limitations
# under the License.
-import nova
-
from nova import context
+from nova import db
from nova import exception
from nova import flags
from nova import log as logging
from nova import rpc
+from nova.scheduler import vsa as vsa_sched
from nova import test
+from nova.tests.scheduler import test_scheduler
from nova import utils
from nova.volume import volume_types
-from nova.scheduler import vsa as vsa_sched
FLAGS = flags.FLAGS
LOG = logging.getLogger('nova.tests.scheduler.vsa')
@@ -50,7 +50,32 @@ class FakeVsaMostAvailCapacityScheduler(
pass
-class VsaSchedulerTestCase(test.TestCase):
+class VsaSchedulerTestCase(test_scheduler.SchedulerTestCase):
+
+ driver_cls = FakeVsaLeastUsedScheduler
+
+ def setUp(self):
+ super(VsaSchedulerTestCase, self).setUp()
+
+ self.host_num = 10
+ self.drive_type_num = 5
+
+ self.stubs.Set(rpc, 'cast', fake_rpc_cast)
+ self.stubs.Set(self.driver,
+ '_get_service_states', self._fake_get_service_states)
+ self.stubs.Set(self.driver,
+ '_provision_volume', self._fake_provision_volume)
+ self.stubs.Set(db, 'vsa_update', self._fake_vsa_update)
+
+ self.stubs.Set(db, 'volume_get', self._fake_volume_get)
+ self.stubs.Set(db, 'volume_update', self._fake_volume_update)
+
+ self.created_types_lst = []
+
+ def tearDown(self):
+ for name in self.created_types_lst:
+ volume_types.purge(self.context.elevated(), name)
+ super(VsaSchedulerTestCase, self).tearDown()
def _get_vol_creation_request(self, num_vols, drive_ix, size=0):
volume_params = []
@@ -58,7 +83,7 @@ class VsaSchedulerTestCase(test.TestCase):
name = 'name_' + str(i)
try:
- volume_types.create(self.context, name,
+ volume_types.create(self.context.elevated(), name,
extra_specs={'type': 'vsa_drive',
'drive_name': name,
'drive_type': 'type_' + str(drive_ix),
@@ -205,35 +230,6 @@ class VsaSchedulerTestCase(test.TestCase):
def _fake_service_is_up_False(self, service):
return False
- def setUp(self, sched_class=None):
- super(VsaSchedulerTestCase, self).setUp()
- self.context = context.get_admin_context()
-
- if sched_class is None:
- self.sched = FakeVsaLeastUsedScheduler()
- else:
- self.sched = sched_class
-
- self.host_num = 10
- self.drive_type_num = 5
-
- self.stubs.Set(rpc, 'cast', fake_rpc_cast)
- self.stubs.Set(self.sched,
- '_get_service_states', self._fake_get_service_states)
- self.stubs.Set(self.sched,
- '_provision_volume', self._fake_provision_volume)
- self.stubs.Set(nova.db, 'vsa_update', self._fake_vsa_update)
-
- self.stubs.Set(nova.db, 'volume_get', self._fake_volume_get)
- self.stubs.Set(nova.db, 'volume_update', self._fake_volume_update)
-
- self.created_types_lst = []
-
- def tearDown(self):
- for name in self.created_types_lst:
- volume_types.purge(self.context, name)
- super(VsaSchedulerTestCase, self).tearDown()
-
def test_vsa_sched_create_volumes_simple(self):
global scheduled_volumes
scheduled_volumes = []
@@ -245,7 +241,7 @@ class VsaSchedulerTestCase(test.TestCase):
prev = self._generate_default_service_states()
request_spec = self._get_vol_creation_request(num_vols=3, drive_ix=2)
- self.sched.schedule_create_volumes(self.context,
+ self.driver.schedule_create_volumes(self.context,
request_spec,
availability_zone=None)
@@ -271,7 +267,7 @@ class VsaSchedulerTestCase(test.TestCase):
init_num_drives=1)
request_spec = self._get_vol_creation_request(num_vols=1, drive_ix=6)
self.assertRaises(exception.NoValidHost,
- self.sched.schedule_create_volumes,
+ self.driver.schedule_create_volumes,
self.context,
request_spec,
availability_zone=None)
@@ -288,7 +284,7 @@ class VsaSchedulerTestCase(test.TestCase):
request_spec = self._get_vol_creation_request(num_vols=3, drive_ix=0)
self.assertRaises(exception.NoValidHost,
- self.sched.schedule_create_volumes,
+ self.driver.schedule_create_volumes,
self.context,
request_spec,
availability_zone=None)
@@ -311,7 +307,7 @@ class VsaSchedulerTestCase(test.TestCase):
request_spec = self._get_vol_creation_request(num_vols=1, drive_ix=0)
self.assertRaises(exception.NoValidHost,
- self.sched.schedule_create_volumes,
+ self.driver.schedule_create_volumes,
self.context,
request_spec,
availability_zone=None)
@@ -326,13 +322,13 @@ class VsaSchedulerTestCase(test.TestCase):
request_spec = self._get_vol_creation_request(num_vols=1, drive_ix=0)
self.stubs.UnsetAll()
- self.stubs.Set(self.sched,
+ self.stubs.Set(self.driver,
'_get_service_states', self._fake_get_service_states)
- self.stubs.Set(nova.db, 'volume_create', self._fake_volume_create)
- self.stubs.Set(nova.db, 'volume_update', self._fake_volume_update)
+ self.stubs.Set(db, 'volume_create', self._fake_volume_create)
+ self.stubs.Set(db, 'volume_update', self._fake_volume_update)
self.stubs.Set(rpc, 'cast', fake_rpc_cast)
- self.sched.schedule_create_volumes(self.context,
+ self.driver.schedule_create_volumes(self.context,
request_spec,
availability_zone=None)
@@ -346,7 +342,7 @@ class VsaSchedulerTestCase(test.TestCase):
init_num_drives=1)
request_spec = self._get_vol_creation_request(num_vols=1, drive_ix=0)
- self.sched.schedule_create_volumes(self.context,
+ self.driver.schedule_create_volumes(self.context,
request_spec,
availability_zone=None)
@@ -356,13 +352,13 @@ class VsaSchedulerTestCase(test.TestCase):
new_request = self._get_vol_creation_request(num_vols=1, drive_ix=0)
- self.sched.schedule_create_volumes(self.context,
+ self.driver.schedule_create_volumes(self.context,
request_spec,
availability_zone=None)
self._print_service_states()
self.assertRaises(exception.NoValidHost,
- self.sched.schedule_create_volumes,
+ self.driver.schedule_create_volumes,
self.context,
new_request,
availability_zone=None)
@@ -379,26 +375,26 @@ class VsaSchedulerTestCase(test.TestCase):
request_spec = self._get_vol_creation_request(num_vols=3, drive_ix=2)
self.assertRaises(exception.HostBinaryNotFound,
- self.sched.schedule_create_volumes,
- self.context,
+ self.driver.schedule_create_volumes,
+ self.context.elevated(),
request_spec,
availability_zone="nova:host_5")
- self.stubs.Set(nova.db,
+ self.stubs.Set(db,
'service_get_by_args', self._fake_service_get_by_args)
self.stubs.Set(utils,
'service_is_up', self._fake_service_is_up_False)
self.assertRaises(exception.WillNotSchedule,
- self.sched.schedule_create_volumes,
- self.context,
+ self.driver.schedule_create_volumes,
+ self.context.elevated(),
request_spec,
availability_zone="nova:host_5")
self.stubs.Set(utils,
'service_is_up', self._fake_service_is_up_True)
- self.sched.schedule_create_volumes(self.context,
+ self.driver.schedule_create_volumes(self.context.elevated(),
request_spec,
availability_zone="nova:host_5")
@@ -419,7 +415,7 @@ class VsaSchedulerTestCase(test.TestCase):
request_spec = self._get_vol_creation_request(num_vols=3,
drive_ix=3,
size=50)
- self.sched.schedule_create_volumes(self.context,
+ self.driver.schedule_create_volumes(self.context,
request_spec,
availability_zone=None)
@@ -459,13 +455,13 @@ class VsaSchedulerTestCase(test.TestCase):
LOG.debug(_("Test: Volume get: id=%(volume_id)s"), locals())
return {'id': volume_id, 'availability_zone': 'nova:host_3'}
- self.stubs.Set(nova.db, 'volume_get', _fake_volume_get_az)
- self.stubs.Set(nova.db,
- 'service_get_by_args', self._fake_service_get_by_args)
+ self.stubs.Set(db, 'volume_get', _fake_volume_get_az)
+ self.stubs.Set(db, 'service_get_by_args',
+ self._fake_service_get_by_args)
self.stubs.Set(utils,
'service_is_up', self._fake_service_is_up_True)
- self.sched.schedule_create_volume(self.context,
+ self.driver.schedule_create_volume(self.context.elevated(),
123, availability_zone=None)
self.assertEqual(scheduled_volume['id'], 123)
@@ -480,10 +476,10 @@ class VsaSchedulerTestCase(test.TestCase):
global_volume['volume_type_id'] = None
self.assertRaises(exception.NoValidHost,
- self.sched.schedule_create_volume,
- self.context,
- 123,
- availability_zone=None)
+ self.driver.schedule_create_volume,
+ self.context,
+ 123,
+ availability_zone=None)
def test_vsa_sched_create_single_volume(self):
global scheduled_volume
@@ -500,7 +496,7 @@ class VsaSchedulerTestCase(test.TestCase):
drive_ix = 2
name = 'name_' + str(drive_ix)
- volume_types.create(self.context, name,
+ volume_types.create(self.context.elevated(), name,
extra_specs={'type': 'vsa_drive',
'drive_name': name,
'drive_type': 'type_' + str(drive_ix),
@@ -511,7 +507,7 @@ class VsaSchedulerTestCase(test.TestCase):
global_volume['volume_type_id'] = volume_type['id']
global_volume['size'] = 0
- self.sched.schedule_create_volume(self.context,
+ self.driver.schedule_create_volume(self.context,
123, availability_zone=None)
self.assertEqual(scheduled_volume['id'], 123)
@@ -520,12 +516,7 @@ class VsaSchedulerTestCase(test.TestCase):
class VsaSchedulerTestCaseMostAvail(VsaSchedulerTestCase):
- def setUp(self):
- super(VsaSchedulerTestCaseMostAvail, self).setUp(
- FakeVsaMostAvailCapacityScheduler())
-
- def tearDown(self):
- super(VsaSchedulerTestCaseMostAvail, self).tearDown()
+ driver_cls = FakeVsaMostAvailCapacityScheduler
def test_vsa_sched_create_single_volume(self):
global scheduled_volume
@@ -542,7 +533,7 @@ class VsaSchedulerTestCaseMostAvail(VsaSchedulerTestCase):
drive_ix = 2
name = 'name_' + str(drive_ix)
- volume_types.create(self.context, name,
+ volume_types.create(self.context.elevated(), name,
extra_specs={'type': 'vsa_drive',
'drive_name': name,
'drive_type': 'type_' + str(drive_ix),
@@ -553,7 +544,7 @@ class VsaSchedulerTestCaseMostAvail(VsaSchedulerTestCase):
global_volume['volume_type_id'] = volume_type['id']
global_volume['size'] = 0
- self.sched.schedule_create_volume(self.context,
+ self.driver.schedule_create_volume(self.context,
123, availability_zone=None)
self.assertEqual(scheduled_volume['id'], 123)
@@ -572,7 +563,7 @@ class VsaSchedulerTestCaseMostAvail(VsaSchedulerTestCase):
self._print_service_states()
- self.sched.schedule_create_volumes(self.context,
+ self.driver.schedule_create_volumes(self.context,
request_spec,
availability_zone=None)
@@ -603,7 +594,7 @@ class VsaSchedulerTestCaseMostAvail(VsaSchedulerTestCase):
request_spec = self._get_vol_creation_request(num_vols=3,
drive_ix=3,
size=50)
- self.sched.schedule_create_volumes(self.context,
+ self.driver.schedule_create_volumes(self.context,
request_spec,
availability_zone=None)
diff --git a/nova/tests/test_utils.py b/nova/tests/test_utils.py
index 7062097ff..06d84a084 100644
--- a/nova/tests/test_utils.py
+++ b/nova/tests/test_utils.py
@@ -645,3 +645,37 @@ class DeprecationTest(test.TestCase):
# Make sure that did *not* generate a warning
self.assertEqual(self.warn, None)
+
+ def test_service_is_up(self):
+ fts_func = datetime.datetime.fromtimestamp
+ fake_now = 1000
+ down_time = 5
+
+ self.flags(service_down_time=down_time)
+ self.mox.StubOutWithMock(utils, 'utcnow')
+
+ # Up (equal)
+ utils.utcnow().AndReturn(fts_func(fake_now))
+ service = {'updated_at': fts_func(fake_now - down_time),
+ 'created_at': fts_func(fake_now - down_time)}
+ self.mox.ReplayAll()
+ result = utils.service_is_up(service)
+ self.assertTrue(result)
+
+ self.mox.ResetAll()
+ # Up
+ utils.utcnow().AndReturn(fts_func(fake_now))
+ service = {'updated_at': fts_func(fake_now - down_time + 1),
+ 'created_at': fts_func(fake_now - down_time + 1)}
+ self.mox.ReplayAll()
+ result = utils.service_is_up(service)
+ self.assertTrue(result)
+
+ self.mox.ResetAll()
+ # Down
+ utils.utcnow().AndReturn(fts_func(fake_now))
+ service = {'updated_at': fts_func(fake_now - down_time - 1),
+ 'created_at': fts_func(fake_now - down_time - 1)}
+ self.mox.ReplayAll()
+ result = utils.service_is_up(service)
+ self.assertFalse(result)