summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--doc/source/devref/filter_scheduler.rst7
-rw-r--r--nova/scheduler/driver.py15
-rw-r--r--nova/scheduler/filter_scheduler.py26
-rw-r--r--nova/scheduler/filters/affinity_filter.py20
-rw-r--r--nova/tests/scheduler/test_filter_scheduler.py76
-rw-r--r--nova/tests/scheduler/test_host_filters.py14
6 files changed, 156 insertions, 2 deletions
diff --git a/doc/source/devref/filter_scheduler.rst b/doc/source/devref/filter_scheduler.rst
index 63ed95c82..31dcfde77 100644
--- a/doc/source/devref/filter_scheduler.rst
+++ b/doc/source/devref/filter_scheduler.rst
@@ -91,6 +91,8 @@ There are some standard filter classes to use (:mod:`nova.scheduler.filters`):
* |TypeAffinityFilter| - Only passes hosts that are not already running an
instance of the requested type.
* |AggregateTypeAffinityFilter| - limits instance_type by aggregate.
+* |GroupAntiAffinityFilter| - ensures that each instance in group is on a
+ different host.
Now we can focus on these standard filter classes in details. I will pass the
simplest ones, such as |AllHostsFilter|, |CoreFilter| and |RamFilter| are,
@@ -163,6 +165,10 @@ of the set of instances uses.
the network address of the current host is in the same sub network as it was
defined in the request.
+|GroupAntiAffinityFilter| its method `host_passes` returns `True` if host to
+place the instance on is not in a group of hosts. The group of hosts is
+maintained by a group name. The scheduler hint contains the group name.
+
|JsonFilter| - this filter provides the opportunity to write complicated
queries for the hosts capabilities filtering, based on simple JSON-like syntax.
There can be used the following operations for the host states properties:
@@ -336,6 +342,7 @@ in :mod:`nova.tests.scheduler`.
.. |JsonFilter| replace:: :class:`JsonFilter <nova.scheduler.filters.json_filter.JsonFilter>`
.. |RamFilter| replace:: :class:`RamFilter <nova.scheduler.filters.ram_filter.RamFilter>`
.. |SimpleCIDRAffinityFilter| replace:: :class:`SimpleCIDRAffinityFilter <nova.scheduler.filters.affinity_filter.SimpleCIDRAffinityFilter>`
+.. |GroupAntiAffinityFilter| replace:: :class:`GroupAntiAffinityFilter <nova.scheduler.filters.affinity_filter.GroupAntiAffinityFilter>`
.. |DifferentHostFilter| replace:: :class:`DifferentHostFilter <nova.scheduler.filters.affinity_filter.DifferentHostFilter>`
.. |SameHostFilter| replace:: :class:`SameHostFilter <nova.scheduler.filters.affinity_filter.SameHostFilter>`
.. |RetryFilter| replace:: :class:`RetryFilter <nova.scheduler.filters.retry_filter.RetryFilter>`
diff --git a/nova/scheduler/driver.py b/nova/scheduler/driver.py
index 226e31bba..4ad548798 100644
--- a/nova/scheduler/driver.py
+++ b/nova/scheduler/driver.py
@@ -82,13 +82,16 @@ def handle_schedule_error(context, ex, instance_uuid, request_spec):
'scheduler.run_instance', notifier.ERROR, payload)
-def instance_update_db(context, instance_uuid):
+def instance_update_db(context, instance_uuid, extra_values=None):
'''Clear the host and node - set the scheduled_at field of an Instance.
:returns: An Instance with the updated fields set properly.
'''
now = timeutils.utcnow()
values = {'host': None, 'node': None, 'scheduled_at': now}
+ if extra_values:
+ values.update(extra_values)
+
return db.instance_update(context, instance_uuid, values)
@@ -132,6 +135,16 @@ class Scheduler(object):
for service in services
if self.servicegroup_api.service_is_up(service)]
+ def group_hosts(self, context, group):
+ """Return the list of hosts that have VM's from the group."""
+
+ # The system_metadata 'group' will be filtered
+ members = db.instance_get_all_by_filters(context,
+ {'deleted': False, 'group': group})
+ return [member['host']
+ for member in members
+ if member.get('host') is not None]
+
def schedule_prep_resize(self, context, image, request_spec,
filter_properties, instance, instance_type,
reservations):
diff --git a/nova/scheduler/filter_scheduler.py b/nova/scheduler/filter_scheduler.py
index b35c81837..05ca62923 100644
--- a/nova/scheduler/filter_scheduler.py
+++ b/nova/scheduler/filter_scheduler.py
@@ -133,8 +133,17 @@ class FilterScheduler(driver.Scheduler):
'scheduler.run_instance.scheduled', notifier.INFO,
payload)
+ # Update the metadata if necessary
+ scheduler_hints = filter_properties.get('scheduler_hints') or {}
+ group = scheduler_hints.get('group', None)
+ values = None
+ if group:
+ values = request_spec['instance_properties']['system_metadata']
+ values.update({'group': group})
+ values = {'system_metadata': values}
+
updated_instance = driver.instance_update_db(context,
- instance_uuid)
+ instance_uuid, extra_values=values)
self._post_select_populate_filter_properties(filter_properties,
weighed_host.obj)
@@ -248,6 +257,18 @@ class FilterScheduler(driver.Scheduler):
instance_properties = request_spec['instance_properties']
instance_type = request_spec.get("instance_type", None)
+ # Get the group
+ update_group_hosts = False
+ scheduler_hints = filter_properties.get('scheduler_hints') or {}
+ group = scheduler_hints.get('group', None)
+ if group:
+ group_hosts = self.group_hosts(elevated, group)
+ update_group_hosts = True
+ if 'group_hosts' not in filter_properties:
+ filter_properties.update({'group_hosts': []})
+ configured_hosts = filter_properties['group_hosts']
+ filter_properties['group_hosts'] = configured_hosts + group_hosts
+
config_options = self._get_configuration_options()
# check retry policy. Rather ugly use of instance_uuids[0]...
@@ -299,6 +320,9 @@ class FilterScheduler(driver.Scheduler):
# Now consume the resources so the filter/weights
# will change for the next instance.
best_host.obj.consume_from_instance(instance_properties)
+ if update_group_hosts is True:
+ filter_properties['group_hosts'].append(best_host.obj.host)
+
return selected_hosts
def _assert_compute_node_has_enough_memory(self, context,
diff --git a/nova/scheduler/filters/affinity_filter.py b/nova/scheduler/filters/affinity_filter.py
index 7e51a15f2..a7e894320 100644
--- a/nova/scheduler/filters/affinity_filter.py
+++ b/nova/scheduler/filters/affinity_filter.py
@@ -18,8 +18,11 @@
import netaddr
from nova.compute import api as compute
+from nova.openstack.common import log as logging
from nova.scheduler import filters
+LOG = logging.getLogger(__name__)
+
class AffinityFilter(filters.BaseHostFilter):
def __init__(self):
@@ -80,3 +83,20 @@ class SimpleCIDRAffinityFilter(AffinityFilter):
# We don't have an affinity host address.
return True
+
+
+class GroupAntiAffinityFilter(AffinityFilter):
+ """Schedule the instance on a different host from a set of group
+ instances.
+ """
+
+ def host_passes(self, host_state, filter_properties):
+ group_hosts = filter_properties.get('group_hosts') or []
+ LOG.debug(_("Group affinity: %(host)s in %(configured)s"),
+ {'host': host_state.host,
+ 'configured': group_hosts})
+ if group_hosts:
+ return not host_state.host in group_hosts
+
+ # No groups configured
+ return True
diff --git a/nova/tests/scheduler/test_filter_scheduler.py b/nova/tests/scheduler/test_filter_scheduler.py
index b4d73ec0c..ffc228786 100644
--- a/nova/tests/scheduler/test_filter_scheduler.py
+++ b/nova/tests/scheduler/test_filter_scheduler.py
@@ -40,6 +40,16 @@ def fake_get_filtered_hosts(hosts, filter_properties):
return list(hosts)
+def fake_get_group_filtered_hosts(hosts, filter_properties):
+ group_hosts = filter_properties.get('group_hosts') or []
+ if group_hosts:
+ hosts = list(hosts)
+ hosts.pop(0)
+ return hosts
+ else:
+ return list(hosts)
+
+
class FilterSchedulerTestCase(test_scheduler.SchedulerTestCase):
"""Test case for Filter Scheduler."""
@@ -480,3 +490,69 @@ class FilterSchedulerTestCase(test_scheduler.SchedulerTestCase):
self.assertRaises(exception.MigrationError,
self.driver._assert_compute_node_has_enough_memory,
self.context, instance, dest)
+
+ def test_basic_schedule_run_instances_anti_affinity(self):
+ filter_properties = {'scheduler_hints':
+ {'group': 'cats'}}
+ # Request spec 1
+ instance_opts1 = {'project_id': 1, 'os_type': 'Linux',
+ 'memory_mb': 512, 'root_gb': 512,
+ 'ephemeral_gb': 0, 'vcpus': 1,
+ 'system_metadata': {'system': 'metadata'}}
+ request_spec1 = {'instance_uuids': ['fake-uuid1-1', 'fake-uuid1-2'],
+ 'instance_properties': instance_opts1,
+ 'instance_type': {'memory_mb': 512, 'root_gb': 512,
+ 'ephemeral_gb': 0, 'vcpus': 1}}
+ self.next_weight = 1.0
+
+ def _fake_weigh_objects(_self, functions, hosts, options):
+ self.next_weight += 2.0
+ host_state = hosts[0]
+ return [weights.WeighedHost(host_state, self.next_weight)]
+
+ sched = fakes.FakeFilterScheduler()
+
+ fake_context = context.RequestContext('user', 'project',
+ is_admin=True)
+
+ self.stubs.Set(sched.host_manager, 'get_filtered_hosts',
+ fake_get_group_filtered_hosts)
+ self.stubs.Set(weights.HostWeightHandler,
+ 'get_weighed_objects', _fake_weigh_objects)
+ fakes.mox_host_manager_db_calls(self.mox, fake_context)
+
+ self.mox.StubOutWithMock(driver, 'instance_update_db')
+ self.mox.StubOutWithMock(compute_rpcapi.ComputeAPI, 'run_instance')
+ self.mox.StubOutWithMock(sched, 'group_hosts')
+
+ instance1_1 = {'uuid': 'fake-uuid1-1'}
+ instance1_2 = {'uuid': 'fake-uuid1-2'}
+
+ sched.group_hosts(mox.IgnoreArg(), 'cats').AndReturn([])
+
+ def inc_launch_index1(*args, **kwargs):
+ request_spec1['instance_properties']['launch_index'] = (
+ request_spec1['instance_properties']['launch_index'] + 1)
+
+ expected_metadata = {'system_metadata':
+ {'system': 'metadata', 'group': 'cats'}}
+ driver.instance_update_db(fake_context, instance1_1['uuid'],
+ extra_values=expected_metadata).WithSideEffects(
+ inc_launch_index1).AndReturn(instance1_1)
+ compute_rpcapi.ComputeAPI.run_instance(fake_context, host='host3',
+ instance=instance1_1, requested_networks=None,
+ injected_files=None, admin_password=None, is_first_time=None,
+ request_spec=request_spec1, filter_properties=mox.IgnoreArg(),
+ node='node3')
+
+ driver.instance_update_db(fake_context, instance1_2['uuid'],
+ extra_values=expected_metadata).WithSideEffects(
+ inc_launch_index1).AndReturn(instance1_2)
+ compute_rpcapi.ComputeAPI.run_instance(fake_context, host='host4',
+ instance=instance1_2, requested_networks=None,
+ injected_files=None, admin_password=None, is_first_time=None,
+ request_spec=request_spec1, filter_properties=mox.IgnoreArg(),
+ node='node4')
+ self.mox.ReplayAll()
+ sched.schedule_run_instance(fake_context, request_spec1,
+ None, None, None, None, filter_properties)
diff --git a/nova/tests/scheduler/test_host_filters.py b/nova/tests/scheduler/test_host_filters.py
index 230e2ea03..edd2e0d61 100644
--- a/nova/tests/scheduler/test_host_filters.py
+++ b/nova/tests/scheduler/test_host_filters.py
@@ -1400,3 +1400,17 @@ class HostFiltersTestCase(test.TestCase):
{'num_instances': 5})
filter_properties = {}
self.assertFalse(filt_cls.host_passes(host, filter_properties))
+
+ def test_group_anti_affinity_filter_passes(self):
+ filt_cls = self.class_map['GroupAntiAffinityFilter']()
+ host = fakes.FakeHostState('host1', 'node1', {})
+ filter_properties = {'group_hosts': []}
+ self.assertTrue(filt_cls.host_passes(host, filter_properties))
+ filter_properties = {'group_hosts': ['host2']}
+ self.assertTrue(filt_cls.host_passes(host, filter_properties))
+
+ def test_group_anti_affinity_filter_fails(self):
+ filt_cls = self.class_map['GroupAntiAffinityFilter']()
+ host = fakes.FakeHostState('host1', 'node1', {})
+ filter_properties = {'group_hosts': ['host1']}
+ self.assertFalse(filt_cls.host_passes(host, filter_properties))