summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--doc/source/devref/filter_scheduler.rst6
-rw-r--r--nova/scheduler/filters/ram_filter.py60
-rw-r--r--nova/tests/scheduler/test_host_filters.py58
3 files changed, 119 insertions, 5 deletions
diff --git a/doc/source/devref/filter_scheduler.rst b/doc/source/devref/filter_scheduler.rst
index 1a5ed11a2..6cde0d61c 100644
--- a/doc/source/devref/filter_scheduler.rst
+++ b/doc/source/devref/filter_scheduler.rst
@@ -78,6 +78,11 @@ There are some standard filter classes to use (:mod:`nova.scheduler.filters`):
* |JsonFilter| - allows simple JSON-based grammar for selecting hosts.
* |RamFilter| - filters hosts by their RAM. Only hosts with sufficient RAM
to host the instance are passed.
+* |AggregateRamFilter| - filters hosts by RAM with per-aggregate
+ ram_allocation_ratio setting. If no per-aggregate value is found, it will
+ fall back to the global default ram_allocation_ratio. If more than one value
+ is found for a host (meaning the host is in two differenet aggregate with
+ different ratio settings), the minimum value will be used.
* |SimpleCIDRAffinityFilter| - allows to put a new instance on a host within
the same IP block.
* |DifferentHostFilter| - allows to put the instance on a different host from a
@@ -285,6 +290,7 @@ in :mod:`nova.tests.scheduler`.
.. |IsolatedHostsFilter| replace:: :class:`IsolatedHostsFilter <nova.scheduler.filters.isolated_hosts_filter>`
.. |JsonFilter| replace:: :class:`JsonFilter <nova.scheduler.filters.json_filter.JsonFilter>`
.. |RamFilter| replace:: :class:`RamFilter <nova.scheduler.filters.ram_filter.RamFilter>`
+.. |AggregateRamFilter| replace:: :class:`AggregateRamFilter <nova.scheduler.filters.ram_filter.AggregateRamFilter>`
.. |SimpleCIDRAffinityFilter| replace:: :class:`SimpleCIDRAffinityFilter <nova.scheduler.filters.affinity_filter.SimpleCIDRAffinityFilter>`
.. |GroupAntiAffinityFilter| replace:: :class:`GroupAntiAffinityFilter <nova.scheduler.filters.affinity_filter.GroupAntiAffinityFilter>`
.. |DifferentHostFilter| replace:: :class:`DifferentHostFilter <nova.scheduler.filters.affinity_filter.DifferentHostFilter>`
diff --git a/nova/scheduler/filters/ram_filter.py b/nova/scheduler/filters/ram_filter.py
index 7e20e3d15..ed601ec32 100644
--- a/nova/scheduler/filters/ram_filter.py
+++ b/nova/scheduler/filters/ram_filter.py
@@ -16,21 +16,27 @@
from oslo.config import cfg
+from nova import db
from nova.openstack.common import log as logging
from nova.scheduler import filters
LOG = logging.getLogger(__name__)
-ram_allocation_ratio_opt = cfg.FloatOpt("ram_allocation_ratio",
+ram_allocation_ratio_opt = cfg.FloatOpt('ram_allocation_ratio',
default=1.5,
- help="virtual ram to physical ram allocation ratio")
+ help='Virtual ram to physical ram allocation ratio which affects '
+ 'all ram filters. This configuration specifies a global ratio '
+ 'for RamFilter. For AggregateRamFilter, it will fall back to '
+ 'this configuration value if no per-aggregate setting found.')
CONF = cfg.CONF
CONF.register_opt(ram_allocation_ratio_opt)
-class RamFilter(filters.BaseHostFilter):
- """Ram Filter with over subscription flag."""
+class BaseRamFilter(filters.BaseHostFilter):
+
+ def _get_ram_allocation_ratio(self, host_state, filter_properties):
+ raise NotImplementedError
def host_passes(self, host_state, filter_properties):
"""Only return hosts with sufficient available RAM."""
@@ -39,7 +45,10 @@ class RamFilter(filters.BaseHostFilter):
free_ram_mb = host_state.free_ram_mb
total_usable_ram_mb = host_state.total_usable_ram_mb
- memory_mb_limit = total_usable_ram_mb * CONF.ram_allocation_ratio
+ ram_allocation_ratio = self._get_ram_allocation_ratio(host_state,
+ filter_properties)
+
+ memory_mb_limit = total_usable_ram_mb * ram_allocation_ratio
used_ram_mb = total_usable_ram_mb - free_ram_mb
usable_ram = memory_mb_limit - used_ram_mb
if not usable_ram >= requested_ram:
@@ -53,3 +62,44 @@ class RamFilter(filters.BaseHostFilter):
# save oversubscription limit for compute node to test against:
host_state.limits['memory_mb'] = memory_mb_limit
return True
+
+
+class RamFilter(BaseRamFilter):
+ """Ram Filter with over subscription flag."""
+
+ def _get_ram_allocation_ratio(self, host_state, filter_properties):
+ return CONF.ram_allocation_ratio
+
+
+class AggregateRamFilter(BaseRamFilter):
+ """AggregateRamFilter with per-aggregate ram subscription flag.
+
+ Fall back to global ram_allocation_ratio if no per-aggregate setting found.
+ """
+
+ def _get_ram_allocation_ratio(self, host_state, filter_properties):
+ context = filter_properties['context'].elevated()
+ # TODO(uni): DB query in filter is a performance hit, especially for
+ # system with lots of hosts. Will need a general solution here to fix
+ # all filters with aggregate DB call things.
+ metadata = db.aggregate_metadata_get_by_host(
+ context, host_state.host, key='ram_allocation_ratio')
+ aggregate_vals = metadata.get('ram_allocation_ratio', set())
+ num_values = len(aggregate_vals)
+
+ if num_values == 0:
+ return CONF.ram_allocation_ratio
+
+ if num_values > 1:
+ LOG.warning(_("%(num_values)d ratio values found, "
+ "of which the minimum value will be used."),
+ {'num_values': num_values})
+
+ try:
+ ratio = float(min(aggregate_vals))
+ except ValueError as e:
+ LOG.warning(_("Could not decode ram_allocation_ratio: "
+ "'%(e)s'") % locals())
+ ratio = CONF.ram_allocation_ratio
+
+ return ratio
diff --git a/nova/tests/scheduler/test_host_filters.py b/nova/tests/scheduler/test_host_filters.py
index 9306615ed..9b1f127b2 100644
--- a/nova/tests/scheduler/test_host_filters.py
+++ b/nova/tests/scheduler/test_host_filters.py
@@ -542,6 +542,64 @@ class HostFiltersTestCase(test.NoDBTestCase):
self.assertTrue(filt_cls.host_passes(host, filter_properties))
self.assertEqual(2048 * 2.0, host.limits['memory_mb'])
+ def test_aggregate_ram_filter_value_error(self):
+ self._stub_service_is_up(True)
+ filt_cls = self.class_map['AggregateRamFilter']()
+ self.flags(ram_allocation_ratio=1.0)
+ filter_properties = {'context': self.context,
+ 'instance_type': {'memory_mb': 1024}}
+ capabilities = {'enabled': True}
+ service = {'disabled': False}
+ host = fakes.FakeHostState('host1', 'node1',
+ {'free_ram_mb': 1024, 'total_usable_ram_mb': 1024,
+ 'capabilities': capabilities, 'service': service})
+ self._create_aggregate_with_host(name='fake_aggregate',
+ hosts=['host1'],
+ metadata={'ram_allocation_ratio': 'XXX'})
+ self.assertTrue(filt_cls.host_passes(host, filter_properties))
+ self.assertEqual(1024 * 1.0, host.limits['memory_mb'])
+
+ def test_aggregate_ram_filter_default_value(self):
+ self._stub_service_is_up(True)
+ filt_cls = self.class_map['AggregateRamFilter']()
+ self.flags(ram_allocation_ratio=1.0)
+ filter_properties = {'context': self.context,
+ 'instance_type': {'memory_mb': 1024}}
+ capabilities = {'enabled': True}
+ service = {'disabled': False}
+ host = fakes.FakeHostState('host1', 'node1',
+ {'free_ram_mb': 1023, 'total_usable_ram_mb': 1024,
+ 'capabilities': capabilities, 'service': service})
+ # False: fallback to default flag w/o aggregates
+ self.assertFalse(filt_cls.host_passes(host, filter_properties))
+ self._create_aggregate_with_host(name='fake_aggregate',
+ hosts=['host1'],
+ metadata={'ram_allocation_ratio': '2.0'})
+ # True: use ratio from aggregates
+ self.assertTrue(filt_cls.host_passes(host, filter_properties))
+ self.assertEqual(1024 * 2.0, host.limits['memory_mb'])
+
+ def test_aggregate_ram_filter_conflict_values(self):
+ self._stub_service_is_up(True)
+ filt_cls = self.class_map['AggregateRamFilter']()
+ self.flags(ram_allocation_ratio=1.0)
+ filter_properties = {'context': self.context,
+ 'instance_type': {'memory_mb': 1024}}
+ capabilities = {'enabled': True}
+ service = {'disabled': False}
+ host = fakes.FakeHostState('host1', 'node1',
+ {'free_ram_mb': 1023, 'total_usable_ram_mb': 1024,
+ 'capabilities': capabilities, 'service': service})
+ self._create_aggregate_with_host(name='fake_aggregate1',
+ hosts=['host1'],
+ metadata={'ram_allocation_ratio': '1.5'})
+ self._create_aggregate_with_host(name='fake_aggregate2',
+ hosts=['host1'],
+ metadata={'ram_allocation_ratio': '2.0'})
+ # use the minimum ratio from aggregates
+ self.assertTrue(filt_cls.host_passes(host, filter_properties))
+ self.assertEqual(1024 * 1.5, host.limits['memory_mb'])
+
def test_disk_filter_passes(self):
self._stub_service_is_up(True)
filt_cls = self.class_map['DiskFilter']()