From 8310be5b941931cb6b63a4d0f778cee27878095d Mon Sep 17 00:00:00 2001 From: John Herndon Date: Wed, 16 Jan 2013 16:56:44 +0000 Subject: filter_scheduler: Select from a subset of hosts. Adding a subset of best hosts to choose a host from, instead of choosing the host with the highest weight. The subset size is configurable. Setting the subset size to 1 will mimic the old behavior. DocImpact Change-Id: Icc6f8a7ca7cf992d99b872a45e241f13fa6e1536 Fixes: bug 1098635 --- nova/scheduler/filter_scheduler.py | 37 ++++++++-- nova/tests/scheduler/test_filter_scheduler.py | 100 ++++++++++++++++++++++++++ 2 files changed, 131 insertions(+), 6 deletions(-) diff --git a/nova/scheduler/filter_scheduler.py b/nova/scheduler/filter_scheduler.py index 05ca62923..c9118cb22 100644 --- a/nova/scheduler/filter_scheduler.py +++ b/nova/scheduler/filter_scheduler.py @@ -19,6 +19,8 @@ You can customize this scheduler by specifying your own Host Filters and Weighing Functions. """ +import random + from nova import exception from nova.openstack.common import cfg from nova.openstack.common import log as logging @@ -30,6 +32,21 @@ CONF = cfg.CONF LOG = logging.getLogger(__name__) +filter_scheduler_opts = [ + cfg.IntOpt('scheduler_host_subset_size', + default=1, + help='New instances will be scheduled on a host chosen ' + 'randomly from a subset of the N best hosts. This ' + 'property defines the subset size that a host is ' + 'chosen from. A value of 1 chooses the ' + 'first host returned by the weighing functions. ' + 'This value must be at least 1. Any value less than 1 ' + 'will be ignored, and 1 will be used instead') +] + +CONF.register_opts(filter_scheduler_opts) + + class FilterScheduler(driver.Scheduler): """Scheduler that can be used for filtering and weighing.""" def __init__(self, *args, **kwargs): @@ -314,15 +331,23 @@ class FilterScheduler(driver.Scheduler): weighed_hosts = self.host_manager.get_weighed_hosts(hosts, filter_properties) - best_host = weighed_hosts[0] - LOG.debug(_("Choosing host %(best_host)s") % locals()) - selected_hosts.append(best_host) + + scheduler_host_subset_size = CONF.scheduler_host_subset_size + if scheduler_host_subset_size > len(weighed_hosts): + scheduler_host_subset_size = len(weighed_hosts) + if scheduler_host_subset_size < 1: + scheduler_host_subset_size = 1 + + chosen_host = random.choice( + weighed_hosts[0:scheduler_host_subset_size]) + LOG.debug(_("Choosing host %(chosen_host)s") % locals()) + selected_hosts.append(chosen_host) + # Now consume the resources so the filter/weights # will change for the next instance. - best_host.obj.consume_from_instance(instance_properties) + chosen_host.obj.consume_from_instance(instance_properties) if update_group_hosts is True: - filter_properties['group_hosts'].append(best_host.obj.host) - + filter_properties['group_hosts'].append(chosen_host.obj.host) return selected_hosts def _assert_compute_node_has_enough_memory(self, context, diff --git a/nova/tests/scheduler/test_filter_scheduler.py b/nova/tests/scheduler/test_filter_scheduler.py index ffc228786..4b07581fb 100644 --- a/nova/tests/scheduler/test_filter_scheduler.py +++ b/nova/tests/scheduler/test_filter_scheduler.py @@ -556,3 +556,103 @@ class FilterSchedulerTestCase(test_scheduler.SchedulerTestCase): self.mox.ReplayAll() sched.schedule_run_instance(fake_context, request_spec1, None, None, None, None, filter_properties) + + def test_schedule_host_pool(self): + """Make sure the scheduler_host_subset_size property works properly.""" + + self.flags(scheduler_host_subset_size=2) + sched = fakes.FakeFilterScheduler() + + fake_context = context.RequestContext('user', 'project', + is_admin=True) + self.stubs.Set(sched.host_manager, 'get_filtered_hosts', + fake_get_filtered_hosts) + fakes.mox_host_manager_db_calls(self.mox, fake_context) + + instance_properties = {'project_id': 1, + 'root_gb': 512, + 'memory_mb': 512, + 'ephemeral_gb': 0, + 'vcpus': 1, + 'os_type': 'Linux'} + + request_spec = dict(instance_properties=instance_properties) + filter_properties = {} + self.mox.ReplayAll() + hosts = sched._schedule(self.context, request_spec, + filter_properties=filter_properties) + + # one host should be chosen + self.assertEqual(len(hosts), 1) + + def test_schedule_large_host_pool(self): + """Hosts should still be chosen if pool size + is larger than number of filtered hosts""" + + sched = fakes.FakeFilterScheduler() + + fake_context = context.RequestContext('user', 'project', + is_admin=True) + self.flags(scheduler_host_subset_size=20) + self.stubs.Set(sched.host_manager, 'get_filtered_hosts', + fake_get_filtered_hosts) + fakes.mox_host_manager_db_calls(self.mox, fake_context) + + instance_properties = {'project_id': 1, + 'root_gb': 512, + 'memory_mb': 512, + 'ephemeral_gb': 0, + 'vcpus': 1, + 'os_type': 'Linux'} + request_spec = dict(instance_properties=instance_properties) + filter_properties = {} + self.mox.ReplayAll() + hosts = sched._schedule(self.context, request_spec, + filter_properties=filter_properties) + + # one host should be chose + self.assertEqual(len(hosts), 1) + + def test_schedule_chooses_best_host(self): + """If scheduler_host_subset_size is 1, the largest host with greatest + weight should be returned""" + + self.flags(scheduler_host_subset_size=1) + + sched = fakes.FakeFilterScheduler() + + fake_context = context.RequestContext('user', 'project', + is_admin=True) + self.stubs.Set(sched.host_manager, 'get_filtered_hosts', + fake_get_filtered_hosts) + fakes.mox_host_manager_db_calls(self.mox, fake_context) + + self.next_weight = 50 + + def _fake_weigh_objects(_self, functions, hosts, options): + this_weight = self.next_weight + self.next_weight = 0 + host_state = hosts[0] + return [weights.WeighedHost(host_state, this_weight)] + + instance_properties = {'project_id': 1, + 'root_gb': 512, + 'memory_mb': 512, + 'ephemeral_gb': 0, + 'vcpus': 1, + 'os_type': 'Linux'} + + request_spec = dict(instance_properties=instance_properties) + + self.stubs.Set(weights.HostWeightHandler, + 'get_weighed_objects', _fake_weigh_objects) + + filter_properties = {} + self.mox.ReplayAll() + hosts = sched._schedule(self.context, request_spec, + filter_properties=filter_properties) + + # one host should be chosen + self.assertEquals(1, len(hosts)) + + self.assertEquals(50, hosts[0].weight) -- cgit