From d8e1b8f21043f435c7c4d6f5de5119c424956b06 Mon Sep 17 00:00:00 2001 From: Sandy Walsh Date: Fri, 28 Oct 2011 08:48:10 -0700 Subject: flatten distributed scheduler This branch removes AbstractScheduler, BaseScheduler and LeastCostScheduler and replaces it with DistributedScheduler. Now the points of extension are handled via the --default_host_filters and --least_cost_functions only. Also, greatly simplified the zone handling logic in DistibutedScheduler, mostly by removing the cryptic dicts with helper classes. Fixed up the Least Cost functions to better deal with multiple functions. (In a followup patch I will removed the normalization that occurs as this will be a problem). Tests were mostly rewritten to support this new world order. Debated removing JSONFilter since it's not accessible from the outside world, but decided to keep it as there are discussions afoot on making scheduler changes without having to redeploy code or restart services. HostFilters once again get the all the host service capabilities, but now via a HostInfo class that mostly contains read-only dicts of capabilities. Virtual resource consumption is done in the DistributedScheduler class now. The filters/weighing functions don't need to worry about this. Also, weighing functions only have to return a single host and not worry about the number of instances requested. Change-Id: I92600a4a9c58b1add775c328a18d8f48c305861e --- nova/tests/scheduler/fake_zone_manager.py | 72 ++++ nova/tests/scheduler/test_abstract_scheduler.py | 462 --------------------- nova/tests/scheduler/test_distributed_scheduler.py | 262 ++++++++++++ nova/tests/scheduler/test_host_filter.py | 97 ++--- nova/tests/scheduler/test_least_cost.py | 109 +++++ nova/tests/scheduler/test_least_cost_scheduler.py | 116 ------ 6 files changed, 492 insertions(+), 626 deletions(-) create mode 100644 nova/tests/scheduler/fake_zone_manager.py delete mode 100644 nova/tests/scheduler/test_abstract_scheduler.py create mode 100644 nova/tests/scheduler/test_distributed_scheduler.py create mode 100644 nova/tests/scheduler/test_least_cost.py delete mode 100644 nova/tests/scheduler/test_least_cost_scheduler.py (limited to 'nova/tests') diff --git a/nova/tests/scheduler/fake_zone_manager.py b/nova/tests/scheduler/fake_zone_manager.py new file mode 100644 index 000000000..c1991d9b0 --- /dev/null +++ b/nova/tests/scheduler/fake_zone_manager.py @@ -0,0 +1,72 @@ +# Copyright 2011 OpenStack LLC. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +""" +Fakes For Distributed Scheduler tests. +""" + +from nova.scheduler import distributed_scheduler +from nova.scheduler import zone_manager + + +class FakeDistributedScheduler(distributed_scheduler.DistributedScheduler): + # No need to stub anything at the moment + pass + + +class FakeZoneManager(zone_manager.ZoneManager): + """host1: free_ram_mb=1024-512-512=0, free_disk_gb=1024-512-512=0 + host2: free_ram_mb=2048-512=1536 free_disk_gb=2048-512=1536 + host3: free_ram_mb=4096-1024=3072 free_disk_gb=4096-1024=3072 + host4: free_ram_mb=8192 free_disk_gb=8192""" + + def __init__(self): + self.service_states = { + 'host1': { + 'compute': {'host_memory_free': 1073741824}, + }, + 'host2': { + 'compute': {'host_memory_free': 2147483648}, + }, + 'host3': { + 'compute': {'host_memory_free': 3221225472}, + }, + 'host4': { + 'compute': {'host_memory_free': 999999999}, + }, + } + + def get_host_list_from_db(self, context): + return [ + ('host1', dict(free_disk_gb=1024, free_ram_mb=1024)), + ('host2', dict(free_disk_gb=2048, free_ram_mb=2048)), + ('host3', dict(free_disk_gb=4096, free_ram_mb=4096)), + ('host4', dict(free_disk_gb=8192, free_ram_mb=8192)), + ] + + def _compute_node_get_all(self, context): + return [ + dict(local_gb=1024, memory_mb=1024, service=dict(host='host1')), + dict(local_gb=2048, memory_mb=2048, service=dict(host='host2')), + dict(local_gb=4096, memory_mb=4096, service=dict(host='host3')), + dict(local_gb=8192, memory_mb=8192, service=dict(host='host4')), + ] + + def _instance_get_all(self, context): + return [ + dict(local_gb=512, memory_mb=512, host='host1'), + dict(local_gb=512, memory_mb=512, host='host1'), + dict(local_gb=512, memory_mb=512, host='host2'), + dict(local_gb=1024, memory_mb=1024, host='host3'), + ] diff --git a/nova/tests/scheduler/test_abstract_scheduler.py b/nova/tests/scheduler/test_abstract_scheduler.py deleted file mode 100644 index 08b0b9cde..000000000 --- a/nova/tests/scheduler/test_abstract_scheduler.py +++ /dev/null @@ -1,462 +0,0 @@ -# Copyright 2011 OpenStack LLC. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -""" -Tests For Abstract Scheduler. -""" - -import json - -import nova.db - -from nova import context -from nova import exception -from nova import rpc -from nova import test -from nova.compute import api as compute_api -from nova.scheduler import driver -from nova.scheduler import abstract_scheduler -from nova.scheduler import base_scheduler -from nova.scheduler import zone_manager - - -def _host_caps(multiplier): - # Returns host capabilities in the following way: - # host1 = memory:free 10 (100max) - # disk:available 100 (1000max) - # hostN = memory:free 10 + 10N - # disk:available 100 + 100N - # in other words: hostN has more resources than host0 - # which means ... don't go above 10 hosts. - return {'host_name-description': 'XenServer %s' % multiplier, - 'host_hostname': 'xs-%s' % multiplier, - 'host_memory_total': 100, - 'host_memory_overhead': 10, - 'host_memory_free': 10 + multiplier * 10, - 'host_memory_free-computed': 10 + multiplier * 10, - 'host_other-config': {}, - 'host_ip_address': '192.168.1.%d' % (100 + multiplier), - 'host_cpu_info': {}, - 'disk_available': 100 + multiplier * 100, - 'disk_total': 1000, - 'disk_used': 0, - 'host_uuid': 'xxx-%d' % multiplier, - 'host_name-label': 'xs-%s' % multiplier} - - -def fake_zone_manager_service_states(num_hosts): - states = {} - for x in xrange(num_hosts): - states['host%02d' % (x + 1)] = {'compute': _host_caps(x)} - return states - - -class FakeAbstractScheduler(abstract_scheduler.AbstractScheduler): - # No need to stub anything at the moment - pass - - -class FakeBaseScheduler(base_scheduler.BaseScheduler): - # No need to stub anything at the moment - pass - - -class FakeZoneManager(zone_manager.ZoneManager): - def __init__(self): - self.service_states = { - 'host1': { - 'compute': {'host_memory_free': 1073741824}, - }, - 'host2': { - 'compute': {'host_memory_free': 2147483648}, - }, - 'host3': { - 'compute': {'host_memory_free': 3221225472}, - }, - 'host4': { - 'compute': {'host_memory_free': 999999999}, - }, - } - - -class FakeEmptyZoneManager(zone_manager.ZoneManager): - def __init__(self): - self.service_states = {} - - -def fake_empty_call_zone_method(context, method, specs, zones): - return [] - - -# Hmm, I should probably be using mox for this. -was_called = False - - -def fake_provision_resource(context, item, request_spec, kwargs): - global was_called - was_called = True - - -def fake_ask_child_zone_to_create_instance(context, zone_info, - request_spec, kwargs): - global was_called - was_called = True - - -def fake_provision_resource_locally(context, build_plan, request_spec, kwargs): - global was_called - was_called = True - - -def fake_provision_resource_from_blob(context, item, request_spec, kwargs): - global was_called - was_called = True - - -def fake_decrypt_blob_returns_local_info(blob): - return {'hostname': 'foooooo'} # values aren't important. - - -def fake_decrypt_blob_returns_child_info(blob): - return {'child_zone': True, - 'child_blob': True} # values aren't important. Keys are. - - -def fake_call_zone_method(context, method, specs, zones): - return [ - (1, [ - dict(weight=1, blob='AAAAAAA'), - dict(weight=111, blob='BBBBBBB'), - dict(weight=112, blob='CCCCCCC'), - dict(weight=113, blob='DDDDDDD'), - ]), - (2, [ - dict(weight=120, blob='EEEEEEE'), - dict(weight=2, blob='FFFFFFF'), - dict(weight=122, blob='GGGGGGG'), - dict(weight=123, blob='HHHHHHH'), - ]), - (3, [ - dict(weight=130, blob='IIIIIII'), - dict(weight=131, blob='JJJJJJJ'), - dict(weight=132, blob='KKKKKKK'), - dict(weight=3, blob='LLLLLLL'), - ]), - ] - - -def fake_zone_get_all(context): - return [ - dict(id=1, api_url='zone1', - username='admin', password='password', - weight_offset=0.0, weight_scale=1.0), - dict(id=2, api_url='zone2', - username='admin', password='password', - weight_offset=1000.0, weight_scale=1.0), - dict(id=3, api_url='zone3', - username='admin', password='password', - weight_offset=0.0, weight_scale=1000.0), - ] - - -class AbstractSchedulerTestCase(test.TestCase): - """Test case for Abstract Scheduler.""" - - def test_abstract_scheduler(self): - """ - Create a nested set of FakeZones, try to build multiple instances - and ensure that a select call returns the appropriate build plan. - """ - sched = FakeAbstractScheduler() - self.stubs.Set(sched, '_call_zone_method', fake_call_zone_method) - self.stubs.Set(nova.db, 'zone_get_all', fake_zone_get_all) - - zm = FakeZoneManager() - sched.set_zone_manager(zm) - - fake_context = context.RequestContext('user', 'project') - build_plan = sched.select(fake_context, - {'instance_type': {'memory_mb': 512}, - 'num_instances': 4}) - - # 4 from local zones, 12 from remotes - self.assertEqual(16, len(build_plan)) - - hostnames = [plan_item['hostname'] - for plan_item in build_plan if 'hostname' in plan_item] - # 4 local hosts - self.assertEqual(4, len(hostnames)) - - def test_adjust_child_weights(self): - """Make sure the weights returned by child zones are - properly adjusted based on the scale/offset in the zone - db entries. - """ - sched = FakeAbstractScheduler() - child_results = fake_call_zone_method(None, None, None, None) - zones = fake_zone_get_all(None) - sched._adjust_child_weights(child_results, zones) - scaled = [130000, 131000, 132000, 3000] - for zone, results in child_results: - for item in results: - w = item['weight'] - if zone == 'zone1': # No change - self.assertTrue(w < 1000.0) - if zone == 'zone2': # Offset +1000 - self.assertTrue(w >= 1000.0 and w < 2000) - if zone == 'zone3': # Scale x1000 - self.assertEqual(scaled.pop(0), w) - - def test_empty_abstract_scheduler(self): - """ - Ensure empty hosts & child_zones result in NoValidHosts exception. - """ - sched = FakeAbstractScheduler() - self.stubs.Set(sched, '_call_zone_method', fake_empty_call_zone_method) - self.stubs.Set(nova.db, 'zone_get_all', fake_zone_get_all) - - zm = FakeEmptyZoneManager() - sched.set_zone_manager(zm) - - fake_context = context.RequestContext('user', 'project') - request_spec = {} - self.assertRaises(driver.NoValidHost, sched.schedule_run_instance, - fake_context, request_spec, - dict(host_filter=None, instance_type={})) - - def test_schedule_do_not_schedule_with_hint(self): - """ - Check the local/child zone routing in the run_instance() call. - If the zone_blob hint was passed in, don't re-schedule. - """ - global was_called - sched = FakeAbstractScheduler() - was_called = False - self.stubs.Set(sched, '_provision_resource', fake_provision_resource) - request_spec = { - 'instance_properties': {}, - 'instance_type': {}, - 'filter_driver': 'nova.scheduler.host_filter.AllHostsFilter', - 'blob': "Non-None blob data", - } - - instances = sched.schedule_run_instance(None, request_spec) - self.assertTrue(instances) - self.assertTrue(was_called) - - def test_provision_resource_local(self): - """Provision a resource locally or remotely.""" - global was_called - sched = FakeAbstractScheduler() - was_called = False - self.stubs.Set(sched, '_provision_resource_locally', - fake_provision_resource_locally) - - request_spec = {'hostname': "foo"} - sched._provision_resource(None, request_spec, request_spec, {}) - self.assertTrue(was_called) - - def test_provision_resource_remote(self): - """Provision a resource locally or remotely.""" - global was_called - sched = FakeAbstractScheduler() - was_called = False - self.stubs.Set(sched, '_provision_resource_from_blob', - fake_provision_resource_from_blob) - - request_spec = {} - sched._provision_resource(None, request_spec, request_spec, {}) - self.assertTrue(was_called) - - def test_provision_resource_from_blob_empty(self): - """Provision a resource locally or remotely given no hints.""" - global was_called - sched = FakeAbstractScheduler() - request_spec = {} - self.assertRaises(abstract_scheduler.InvalidBlob, - sched._provision_resource_from_blob, - None, {}, {}, {}) - - def test_provision_resource_from_blob_with_local_blob(self): - """ - Provision a resource locally or remotely when blob hint passed in. - """ - global was_called - sched = FakeAbstractScheduler() - was_called = False - - def fake_create_db_entry_for_new_instance(self, context, - image, base_options, security_group, - block_device_mapping, num=1): - global was_called - was_called = True - # return fake instances - return {'id': 1, 'uuid': 'f874093c-7b17-49c0-89c3-22a5348497f9'} - - def fake_cast_to_compute_host(*args, **kwargs): - pass - - self.stubs.Set(sched, '_decrypt_blob', - fake_decrypt_blob_returns_local_info) - self.stubs.Set(driver, 'cast_to_compute_host', - fake_cast_to_compute_host) - self.stubs.Set(compute_api.API, - 'create_db_entry_for_new_instance', - fake_create_db_entry_for_new_instance) - - build_plan_item = {'blob': "Non-None blob data"} - request_spec = {'image': {}, 'instance_properties': {}} - - sched._provision_resource_from_blob(None, build_plan_item, - request_spec, {}) - self.assertTrue(was_called) - - def test_provision_resource_from_blob_with_child_blob(self): - """ - Provision a resource locally or remotely when child blob hint - passed in. - """ - global was_called - sched = FakeAbstractScheduler() - self.stubs.Set(sched, '_decrypt_blob', - fake_decrypt_blob_returns_child_info) - was_called = False - self.stubs.Set(sched, '_ask_child_zone_to_create_instance', - fake_ask_child_zone_to_create_instance) - - request_spec = {'blob': "Non-None blob data"} - - sched._provision_resource_from_blob(None, request_spec, - request_spec, {}) - self.assertTrue(was_called) - - def test_provision_resource_from_blob_with_immediate_child_blob(self): - """ - Provision a resource locally or remotely when blob hint passed in - from an immediate child. - """ - global was_called - sched = FakeAbstractScheduler() - was_called = False - self.stubs.Set(sched, '_ask_child_zone_to_create_instance', - fake_ask_child_zone_to_create_instance) - - request_spec = {'child_blob': True, 'child_zone': True} - - sched._provision_resource_from_blob(None, request_spec, - request_spec, {}) - self.assertTrue(was_called) - - def test_decrypt_blob(self): - """Test that the decrypt method works.""" - - fixture = FakeAbstractScheduler() - test_data = {"foo": "bar"} - - class StubDecryptor(object): - def decryptor(self, key): - return lambda blob: blob - - self.stubs.Set(abstract_scheduler, 'crypto', - StubDecryptor()) - - self.assertEqual(fixture._decrypt_blob(test_data), - json.dumps(test_data)) - - def test_empty_local_hosts(self): - """ - Create a nested set of FakeZones, try to build multiple instances - and ensure that a select call returns the appropriate build plan. - """ - sched = FakeAbstractScheduler() - self.stubs.Set(sched, '_call_zone_method', fake_call_zone_method) - self.stubs.Set(nova.db, 'zone_get_all', fake_zone_get_all) - - zm = FakeZoneManager() - # patch this to have no local hosts - zm.service_states = {} - sched.set_zone_manager(zm) - - fake_context = context.RequestContext('user', 'project') - build_plan = sched.select(fake_context, - {'instance_type': {'memory_mb': 512}, - 'num_instances': 4}) - - # 0 from local zones, 12 from remotes - self.assertEqual(12, len(build_plan)) - - def test_run_instance_non_admin(self): - """Test creating an instance locally using run_instance, passing - a non-admin context. DB actions should work.""" - sched = FakeAbstractScheduler() - - def fake_cast_to_compute_host(*args, **kwargs): - pass - - def fake_zone_get_all_zero(context): - # make sure this is called with admin context, even though - # we're using user context below - self.assertTrue(context.is_admin) - return [] - - self.stubs.Set(driver, 'cast_to_compute_host', - fake_cast_to_compute_host) - self.stubs.Set(sched, '_call_zone_method', fake_call_zone_method) - self.stubs.Set(nova.db, 'zone_get_all', fake_zone_get_all_zero) - - zm = FakeZoneManager() - sched.set_zone_manager(zm) - - fake_context = context.RequestContext('user', 'project') - - request_spec = { - 'image': {'properties': {}}, - 'security_group': [], - 'instance_properties': { - 'project_id': fake_context.project_id, - 'user_id': fake_context.user_id}, - 'instance_type': {'memory_mb': 256}, - 'filter_driver': 'nova.scheduler.host_filter.AllHostsFilter' - } - - instances = sched.schedule_run_instance(fake_context, request_spec) - self.assertEqual(len(instances), 1) - self.assertFalse(instances[0].get('_is_precooked', False)) - nova.db.instance_destroy(fake_context, instances[0]['id']) - - -class BaseSchedulerTestCase(test.TestCase): - """Test case for Base Scheduler.""" - - def test_weigh_hosts(self): - """ - Try to weigh a short list of hosts and make sure enough - entries for a larger number instances are returned. - """ - - sched = FakeBaseScheduler() - - # Fake out a list of hosts - zm = FakeZoneManager() - hostlist = [(host, services['compute']) - for host, services in zm.service_states.items() - if 'compute' in services] - - # Call weigh_hosts() - num_instances = len(hostlist) * 2 + len(hostlist) / 2 - instlist = sched.weigh_hosts(dict(num_instances=num_instances), - hostlist) - - # Should be enough entries to cover all instances - self.assertEqual(len(instlist), num_instances) diff --git a/nova/tests/scheduler/test_distributed_scheduler.py b/nova/tests/scheduler/test_distributed_scheduler.py new file mode 100644 index 000000000..a66fae343 --- /dev/null +++ b/nova/tests/scheduler/test_distributed_scheduler.py @@ -0,0 +1,262 @@ +# Copyright 2011 OpenStack LLC. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +""" +Tests For Distributed Scheduler. +""" + +import json + +import nova.db + +from nova import context +from nova import exception +from nova import rpc +from nova import test +from nova.compute import api as compute_api +from nova.scheduler import driver +from nova.scheduler import distributed_scheduler +from nova.scheduler import least_cost +from nova.scheduler import zone_manager +from nova.tests.scheduler import fake_zone_manager as ds_fakes + + +class FakeEmptyZoneManager(zone_manager.ZoneManager): + def __init__(self): + self.service_states = {} + + def get_host_list_from_db(self, context): + return [] + + def _compute_node_get_all(*args, **kwargs): + return [] + + def _instance_get_all(*args, **kwargs): + return [] + + +def fake_call_zone_method(context, method, specs, zones): + return [ + (1, [ + dict(weight=2, blob='AAAAAAA'), + dict(weight=4, blob='BBBBBBB'), + dict(weight=6, blob='CCCCCCC'), + dict(weight=8, blob='DDDDDDD'), + ]), + (2, [ + dict(weight=10, blob='EEEEEEE'), + dict(weight=12, blob='FFFFFFF'), + dict(weight=14, blob='GGGGGGG'), + dict(weight=16, blob='HHHHHHH'), + ]), + (3, [ + dict(weight=18, blob='IIIIIII'), + dict(weight=20, blob='JJJJJJJ'), + dict(weight=22, blob='KKKKKKK'), + dict(weight=24, blob='LLLLLLL'), + ]), + ] + + +def fake_zone_get_all(context): + return [ + dict(id=1, api_url='zone1', + username='admin', password='password', + weight_offset=0.0, weight_scale=1.0), + dict(id=2, api_url='zone2', + username='admin', password='password', + weight_offset=1000.0, weight_scale=1.0), + dict(id=3, api_url='zone3', + username='admin', password='password', + weight_offset=0.0, weight_scale=1000.0), + ] + + +class DistributedSchedulerTestCase(test.TestCase): + """Test case for Distributed Scheduler.""" + + def test_adjust_child_weights(self): + """Make sure the weights returned by child zones are + properly adjusted based on the scale/offset in the zone + db entries. + """ + sched = ds_fakes.FakeDistributedScheduler() + child_results = fake_call_zone_method(None, None, None, None) + zones = fake_zone_get_all(None) + weighted_hosts = sched._adjust_child_weights(child_results, zones) + scaled = [130000, 131000, 132000, 3000] + for weighted_host in weighted_hosts: + w = weighted_host.weight + if weighted_host.zone == 'zone1': # No change + self.assertTrue(w < 1000.0) + if weighted_host.zone == 'zone2': # Offset +1000 + self.assertTrue(w >= 1000.0 and w < 2000) + if weighted_host.zone == 'zone3': # Scale x1000 + self.assertEqual(scaled.pop(0), w) + + def test_run_instance_no_hosts(self): + """ + Ensure empty hosts & child_zones result in NoValidHosts exception. + """ + def _fake_empty_call_zone_method(*args, **kwargs): + return [] + + sched = ds_fakes.FakeDistributedScheduler() + sched.zone_manager = FakeEmptyZoneManager() + self.stubs.Set(sched, '_call_zone_method', + _fake_empty_call_zone_method) + self.stubs.Set(nova.db, 'zone_get_all', fake_zone_get_all) + + fake_context = context.RequestContext('user', 'project') + request_spec = dict(instance_type=dict(memory_mb=1, local_gb=1)) + self.assertRaises(driver.NoValidHost, sched.schedule_run_instance, + fake_context, request_spec) + + def test_run_instance_with_blob_hint(self): + """ + Check the local/child zone routing in the run_instance() call. + If the zone_blob hint was passed in, don't re-schedule. + """ + self.schedule_called = False + self.from_blob_called = False + self.locally_called = False + self.child_zone_called = False + + def _fake_schedule(*args, **kwargs): + self.schedule_called = True + return least_cost.WeightedHost(1, host='x') + + def _fake_make_weighted_host_from_blob(*args, **kwargs): + self.from_blob_called = True + return least_cost.WeightedHost(1, zone='x', blob='y') + + def _fake_provision_resource_locally(*args, **kwargs): + self.locally_called = True + return 1 + + def _fake_ask_child_zone_to_create_instance(*args, **kwargs): + self.child_zone_called = True + return 2 + + sched = ds_fakes.FakeDistributedScheduler() + self.stubs.Set(sched, '_schedule', _fake_schedule) + self.stubs.Set(sched, '_make_weighted_host_from_blob', + _fake_make_weighted_host_from_blob) + self.stubs.Set(sched, '_provision_resource_locally', + _fake_provision_resource_locally) + self.stubs.Set(sched, '_ask_child_zone_to_create_instance', + _fake_ask_child_zone_to_create_instance) + request_spec = { + 'instance_properties': {}, + 'instance_type': {}, + 'filter_driver': 'nova.scheduler.host_filter.AllHostsFilter', + 'blob': "Non-None blob data", + } + + fake_context = context.RequestContext('user', 'project') + instances = sched.schedule_run_instance(fake_context, request_spec) + self.assertTrue(instances) + self.assertFalse(self.schedule_called) + self.assertTrue(self.from_blob_called) + self.assertTrue(self.child_zone_called) + self.assertFalse(self.locally_called) + self.assertEquals(instances, [2]) + + def test_run_instance_non_admin(self): + """Test creating an instance locally using run_instance, passing + a non-admin context. DB actions should work.""" + self.was_admin = False + + def fake_schedule(context, *args, **kwargs): + # make sure this is called with admin context, even though + # we're using user context below + self.was_admin = context.is_admin + return [] + + sched = ds_fakes.FakeDistributedScheduler() + self.stubs.Set(sched, '_schedule', fake_schedule) + + fake_context = context.RequestContext('user', 'project') + + self.assertRaises(driver.NoValidHost, sched.schedule_run_instance, + fake_context, {}) + self.assertTrue(self.was_admin) + + def test_schedule_bad_topic(self): + """Parameter checking.""" + sched = ds_fakes.FakeDistributedScheduler() + self.assertRaises(NotImplementedError, sched._schedule, None, "foo", + {}) + + def test_schedule_no_instance_type(self): + """Parameter checking.""" + sched = ds_fakes.FakeDistributedScheduler() + self.assertRaises(NotImplementedError, sched._schedule, None, + "compute", {}) + + def test_schedule_happy_day(self): + """_schedule() has no branching logic beyond basic input parameter + checking. Just make sure there's nothing glaringly wrong by doing + a happy day pass through.""" + + self.next_weight = 1.0 + + def _fake_filter_hosts(topic, request_info, unfiltered_hosts): + return unfiltered_hosts + + def _fake_weigh_hosts(request_info, hosts): + self.next_weight += 2.0 + host, hostinfo = hosts[0] + return least_cost.WeightedHost(self.next_weight, host=host, + hostinfo=hostinfo) + + sched = ds_fakes.FakeDistributedScheduler() + fake_context = context.RequestContext('user', 'project') + sched.zone_manager = ds_fakes.FakeZoneManager() + self.stubs.Set(sched, '_filter_hosts', _fake_filter_hosts) + self.stubs.Set(least_cost, 'weigh_hosts', _fake_weigh_hosts) + self.stubs.Set(nova.db, 'zone_get_all', fake_zone_get_all) + self.stubs.Set(sched, '_call_zone_method', fake_call_zone_method) + + instance_type = dict(memory_mb=512, local_gb=512) + request_spec = dict(num_instances=10, instance_type=instance_type) + weighted_hosts = sched._schedule(fake_context, 'compute', + request_spec) + self.assertEquals(len(weighted_hosts), 10) + for weighted_host in weighted_hosts: + # We set this up so remote hosts have even weights ... + if int(weighted_host.weight) % 2 == 0: + self.assertTrue(weighted_host.zone != None) + self.assertTrue(weighted_host.host == None) + else: + self.assertTrue(weighted_host.host != None) + self.assertTrue(weighted_host.zone == None) + + def test_decrypt_blob(self): + """Test that the decrypt method works.""" + + fixture = ds_fakes.FakeDistributedScheduler() + test_data = {'weight': 1, 'host': 'x', 'blob': 'y', 'zone': 'z'} + + class StubDecryptor(object): + def decryptor(self, key): + return lambda blob: blob + + self.stubs.Set(distributed_scheduler, 'crypto', StubDecryptor()) + + weighted_host = fixture._make_weighted_host_from_blob( + json.dumps(test_data)) + self.assertTrue(isinstance(weighted_host, least_cost.WeightedHost)) + self.assertEqual(weighted_host.to_dict(), dict(weight=1, host='x', + blob='y', zone='z')) diff --git a/nova/tests/scheduler/test_host_filter.py b/nova/tests/scheduler/test_host_filter.py index a21f4c380..96f26b23c 100644 --- a/nova/tests/scheduler/test_host_filter.py +++ b/nova/tests/scheduler/test_host_filter.py @@ -21,11 +21,9 @@ import json import nova from nova import exception from nova import test -from nova.scheduler import host_filter - - -class FakeZoneManager: - pass +from nova.scheduler import distributed_scheduler as dist +from nova.scheduler import zone_manager +from nova.tests.scheduler import fake_zone_manager as ds_fakes class HostFilterTestCase(test.TestCase): @@ -60,18 +58,18 @@ class HostFilterTestCase(test.TestCase): default_host_filters = ['AllHostsFilter'] self.flags(default_host_filters=default_host_filters) self.instance_type = dict(name='tiny', - memory_mb=50, + memory_mb=30, vcpus=10, - local_gb=500, + local_gb=300, flavorid=1, swap=500, rxtx_quota=30000, rxtx_cap=200, extra_specs={}) self.gpu_instance_type = dict(name='tiny.gpu', - memory_mb=50, + memory_mb=30, vcpus=10, - local_gb=500, + local_gb=300, flavorid=2, swap=500, rxtx_quota=30000, @@ -79,86 +77,89 @@ class HostFilterTestCase(test.TestCase): extra_specs={'xpu_arch': 'fermi', 'xpu_info': 'Tesla 2050'}) - self.zone_manager = FakeZoneManager() + self.zone_manager = ds_fakes.FakeZoneManager() states = {} - for x in xrange(10): - states['host%02d' % (x + 1)] = {'compute': self._host_caps(x)} + for x in xrange(4): + states['host%d' % (x + 1)] = {'compute': self._host_caps(x)} self.zone_manager.service_states = states # Add some extra capabilities to some hosts - host07 = self.zone_manager.service_states['host07']['compute'] - host07['xpu_arch'] = 'fermi' - host07['xpu_info'] = 'Tesla 2050' + host4 = self.zone_manager.service_states['host4']['compute'] + host4['xpu_arch'] = 'fermi' + host4['xpu_info'] = 'Tesla 2050' - host08 = self.zone_manager.service_states['host08']['compute'] - host08['xpu_arch'] = 'radeon' + host2 = self.zone_manager.service_states['host2']['compute'] + host2['xpu_arch'] = 'radeon' - host09 = self.zone_manager.service_states['host09']['compute'] - host09['xpu_arch'] = 'fermi' - host09['xpu_info'] = 'Tesla 2150' + host3 = self.zone_manager.service_states['host3']['compute'] + host3['xpu_arch'] = 'fermi' + host3['xpu_info'] = 'Tesla 2150' def _get_all_hosts(self): - return self.zone_manager.service_states.items() + return self.zone_manager.get_all_host_data(None).items() def test_choose_filter(self): # Test default filter ... - hfs = host_filter.choose_host_filters() + sched = dist.DistributedScheduler() + hfs = sched._choose_host_filters() hf = hfs[0] self.assertEquals(hf._full_name().split(".")[-1], 'AllHostsFilter') # Test valid filter ... - hfs = host_filter.choose_host_filters('InstanceTypeFilter') + hfs = sched._choose_host_filters('InstanceTypeFilter') hf = hfs[0] self.assertEquals(hf._full_name().split(".")[-1], 'InstanceTypeFilter') # Test invalid filter ... try: - host_filter.choose_host_filters('does not exist') + sched._choose_host_filters('does not exist') self.fail("Should not find host filter.") except exception.SchedulerHostFilterNotFound: pass def test_all_host_filter(self): - hfs = host_filter.choose_host_filters('AllHostsFilter') + sched = dist.DistributedScheduler() + hfs = sched._choose_host_filters('AllHostsFilter') hf = hfs[0] all_hosts = self._get_all_hosts() cooked = hf.instance_type_to_filter(self.instance_type) hosts = hf.filter_hosts(all_hosts, cooked) - self.assertEquals(10, len(hosts)) + self.assertEquals(4, len(hosts)) for host, capabilities in hosts: self.assertTrue(host.startswith('host')) def test_instance_type_filter(self): hf = nova.scheduler.filters.InstanceTypeFilter() - # filter all hosts that can support 50 ram and 500 disk + # filter all hosts that can support 30 ram and 300 disk cooked = hf.instance_type_to_filter(self.instance_type) all_hosts = self._get_all_hosts() hosts = hf.filter_hosts(all_hosts, cooked) - self.assertEquals(6, len(hosts)) - just_hosts = [host for host, caps in hosts] + self.assertEquals(3, len(hosts)) + just_hosts = [host for host, hostinfo in hosts] just_hosts.sort() - self.assertEquals('host05', just_hosts[0]) - self.assertEquals('host10', just_hosts[5]) + self.assertEquals('host4', just_hosts[2]) + self.assertEquals('host3', just_hosts[1]) + self.assertEquals('host2', just_hosts[0]) def test_instance_type_filter_extra_specs(self): hf = nova.scheduler.filters.InstanceTypeFilter() - # filter all hosts that can support 50 ram and 500 disk + # filter all hosts that can support 30 ram and 300 disk cooked = hf.instance_type_to_filter(self.gpu_instance_type) all_hosts = self._get_all_hosts() hosts = hf.filter_hosts(all_hosts, cooked) self.assertEquals(1, len(hosts)) just_hosts = [host for host, caps in hosts] - self.assertEquals('host07', just_hosts[0]) + self.assertEquals('host4', just_hosts[0]) def test_json_filter(self): hf = nova.scheduler.filters.JsonFilter() - # filter all hosts that can support 50 ram and 500 disk + # filter all hosts that can support 30 ram and 300 disk cooked = hf.instance_type_to_filter(self.instance_type) all_hosts = self._get_all_hosts() hosts = hf.filter_hosts(all_hosts, cooked) - self.assertEquals(6, len(hosts)) + self.assertEquals(2, len(hosts)) just_hosts = [host for host, caps in hosts] just_hosts.sort() - self.assertEquals('host05', just_hosts[0]) - self.assertEquals('host10', just_hosts[5]) + self.assertEquals('host3', just_hosts[0]) + self.assertEquals('host4', just_hosts[1]) # Try some custom queries @@ -168,18 +169,18 @@ class HostFilterTestCase(test.TestCase): ['<', '$compute.disk_available', 300], ], ['and', - ['>', '$compute.host_memory_free', 70], - ['>', '$compute.disk_available', 700], + ['>', '$compute.host_memory_free', 30], + ['>', '$compute.disk_available', 300], ] ] cooked = json.dumps(raw) hosts = hf.filter_hosts(all_hosts, cooked) - self.assertEquals(5, len(hosts)) + self.assertEquals(3, len(hosts)) just_hosts = [host for host, caps in hosts] just_hosts.sort() - for index, host in zip([1, 2, 8, 9, 10], just_hosts): - self.assertEquals('host%02d' % index, host) + for index, host in zip([1, 2, 4], just_hosts): + self.assertEquals('host%d' % index, host) raw = ['not', ['=', '$compute.host_memory_free', 30], @@ -187,20 +188,20 @@ class HostFilterTestCase(test.TestCase): cooked = json.dumps(raw) hosts = hf.filter_hosts(all_hosts, cooked) - self.assertEquals(9, len(hosts)) + self.assertEquals(3, len(hosts)) just_hosts = [host for host, caps in hosts] just_hosts.sort() - for index, host in zip([1, 2, 4, 5, 6, 7, 8, 9, 10], just_hosts): - self.assertEquals('host%02d' % index, host) + for index, host in zip([1, 2, 4], just_hosts): + self.assertEquals('host%d' % index, host) raw = ['in', '$compute.host_memory_free', 20, 40, 60, 80, 100] cooked = json.dumps(raw) hosts = hf.filter_hosts(all_hosts, cooked) - self.assertEquals(5, len(hosts)) + self.assertEquals(2, len(hosts)) just_hosts = [host for host, caps in hosts] just_hosts.sort() - for index, host in zip([2, 4, 6, 8, 10], just_hosts): - self.assertEquals('host%02d' % index, host) + for index, host in zip([2, 4], just_hosts): + self.assertEquals('host%d' % index, host) # Try some bogus input ... raw = ['unknown command', ] diff --git a/nova/tests/scheduler/test_least_cost.py b/nova/tests/scheduler/test_least_cost.py new file mode 100644 index 000000000..ba6cdb686 --- /dev/null +++ b/nova/tests/scheduler/test_least_cost.py @@ -0,0 +1,109 @@ +# Copyright 2011 OpenStack LLC. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +""" +Tests For Least Cost functions. +""" +from nova.scheduler import least_cost +from nova.scheduler import zone_manager +from nova import test +from nova.tests.scheduler import fake_zone_manager + + +def offset(hostinfo): + return hostinfo.free_ram_mb + 10000 + + +def scale(hostinfo): + return hostinfo.free_ram_mb * 2 + + +class LeastCostTestCase(test.TestCase): + def setUp(self): + super(LeastCostTestCase, self).setUp() + + self.zone_manager = fake_zone_manager.FakeZoneManager() + + def tearDown(self): + super(LeastCostTestCase, self).tearDown() + + def test_normalize_grid(self): + raw = [ + [1, 2, 3, 4, 5], + [10, 20, 30, 40, 50], + [100, 200, 300, 400, 500], + ] + expected = [ + [.2, .4, .6, .8, 1.0], + [.2, .4, .6, .8, 1.0], + [.2, .4, .6, .8, 1.0], + ] + + self.assertEquals(expected, least_cost.normalize_grid(raw)) + + self.assertEquals([[]], least_cost.normalize_grid([])) + self.assertEquals([[]], least_cost.normalize_grid([[]])) + + def test_weighted_sum_happy_day(self): + fn_tuples = [(1.0, offset), (1.0, scale)] + hostinfo_list = self.zone_manager.get_all_host_data(None).items() + + # host1: free_ram_mb=0 + # host2: free_ram_mb=1536 + # host3: free_ram_mb=3072 + # host4: free_ram_mb=8192 + + # [offset, scale]= + # [10000, 11536, 13072, 18192] + # [0, 768, 1536, 4096] + + # normalized = + # [ 0.55, 0.63, 0.72, 1.0] + # [ 0.0, 0.19, 0.38, 1.0] + + # adjusted [ 1.0 * x + 1.0 * y] = + # [0.55, 0.82, 1.1, 2.0] + + # so, host1 should win: + weighted_host = least_cost.weighted_sum(hostinfo_list, fn_tuples) + self.assertTrue(abs(weighted_host.weight - 0.55) < 0.01) + self.assertEqual(weighted_host.host, 'host1') + + def test_weighted_sum_single_function(self): + fn_tuples = [(1.0, offset), ] + hostinfo_list = self.zone_manager.get_all_host_data(None).items() + + # host1: free_ram_mb=0 + # host2: free_ram_mb=1536 + # host3: free_ram_mb=3072 + # host4: free_ram_mb=8192 + + # [offset, ]= + # [10000, 11536, 13072, 18192] + + # normalized = + # [ 0.55, 0.63, 0.72, 1.0] + + # so, host1 should win: + weighted_host = least_cost.weighted_sum(hostinfo_list, fn_tuples) + self.assertTrue(abs(weighted_host.weight - 0.55) < 0.01) + self.assertEqual(weighted_host.host, 'host1') + + def test_get_cost_functions(self): + fns = least_cost.get_cost_fns() + self.assertEquals(len(fns), 1) + weight, fn = fns[0] + self.assertEquals(weight, 1.0) + hostinfo = zone_manager.HostInfo('host', free_ram_mb=1000) + self.assertEquals(1000, fn(hostinfo)) diff --git a/nova/tests/scheduler/test_least_cost_scheduler.py b/nova/tests/scheduler/test_least_cost_scheduler.py deleted file mode 100644 index 589308e38..000000000 --- a/nova/tests/scheduler/test_least_cost_scheduler.py +++ /dev/null @@ -1,116 +0,0 @@ -# Copyright 2011 OpenStack LLC. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -""" -Tests For Least Cost Scheduler -""" -import copy - -from nova import test -from nova.scheduler import least_cost -from nova.tests.scheduler import test_abstract_scheduler - -MB = 1024 * 1024 - - -class FakeHost(object): - def __init__(self, host_id, free_ram, io): - self.id = host_id - self.free_ram = free_ram - self.io = io - - -class WeightedSumTestCase(test.TestCase): - def test_empty_domain(self): - domain = [] - weighted_fns = [] - result = least_cost.weighted_sum(domain, weighted_fns) - expected = [] - self.assertEqual(expected, result) - - def test_basic_costing(self): - hosts = [ - FakeHost(1, 512 * MB, 100), - FakeHost(2, 256 * MB, 400), - FakeHost(3, 512 * MB, 100), - ] - - weighted_fns = [ - (1, lambda h: h.free_ram), # Fill-first, free_ram is a *cost* - (2, lambda h: h.io), # Avoid high I/O - ] - - costs = least_cost.weighted_sum( - domain=hosts, weighted_fns=weighted_fns) - - # Each 256 MB unit of free-ram contributes 0.5 points by way of: - # cost = weight * (score/max_score) = 1 * (256/512) = 0.5 - # Each 100 iops of IO adds 0.5 points by way of: - # cost = 2 * (100/400) = 2 * 0.25 = 0.5 - expected = [1.5, 2.5, 1.5] - self.assertEqual(expected, costs) - - -class LeastCostSchedulerTestCase(test.TestCase): - def setUp(self): - super(LeastCostSchedulerTestCase, self).setUp() - - class FakeZoneManager: - pass - - zone_manager = FakeZoneManager() - - states = test_abstract_scheduler.fake_zone_manager_service_states( - num_hosts=10) - zone_manager.service_states = states - - self.sched = least_cost.LeastCostScheduler() - self.sched.zone_manager = zone_manager - - def tearDown(self): - super(LeastCostSchedulerTestCase, self).tearDown() - - def assertWeights(self, expected, num, request_spec, hosts): - weighted = self.sched.weigh_hosts(request_spec, hosts) - self.assertDictListMatch(weighted, expected, approx_equal=True) - - def test_no_hosts(self): - num = 1 - request_spec = {} - hosts = [] - - expected = [] - self.assertWeights(expected, num, request_spec, hosts) - - def test_noop_cost_fn(self): - self.flags(least_cost_scheduler_cost_functions=[ - 'nova.scheduler.least_cost.noop_cost_fn'], - noop_cost_fn_weight=1) - num = 1 - request_spec = {} - hosts = self.sched.filter_hosts(num, request_spec) - expected = [{"hostname": hostname, "weight": 1, "capabilities": caps} - for hostname, caps in hosts] - self.assertWeights(expected, num, request_spec, hosts) - - def test_cost_fn_weights(self): - self.flags(least_cost_scheduler_cost_functions=[ - 'nova.scheduler.least_cost.noop_cost_fn'], - noop_cost_fn_weight=2) - num = 1 - request_spec = {} - hosts = self.sched.filter_hosts(num, request_spec) - expected = [{"hostname": hostname, "weight": 2, "capabilities": caps} - for hostname, caps in hosts] - self.assertWeights(expected, num, request_spec, hosts) -- cgit