summaryrefslogtreecommitdiffstats
path: root/nova
diff options
context:
space:
mode:
authorChris Behrens <cbehrens@codestud.com>2012-11-09 10:12:03 +0000
committerChris Behrens <cbehrens@codestud.com>2012-11-09 10:12:03 +0000
commit7fc83893587d4e48cbfeabdfebe62aa71c66ab35 (patch)
treed87d99f760bcbbcd73146bb2e6d4e528dad1a317 /nova
parentab9ee3d232e0000dd8b8f1d2623d8ac72a6c9247 (diff)
downloadnova-7fc83893587d4e48cbfeabdfebe62aa71c66ab35.tar.gz
nova-7fc83893587d4e48cbfeabdfebe62aa71c66ab35.tar.xz
nova-7fc83893587d4e48cbfeabdfebe62aa71c66ab35.zip
Remove generic topic support from filter scheduler
The only topic that we do scheduling on is 'compute', so clean up all of the cases where we were checking this. Change-Id: If16eaf48a89342ca99a741c25679389e706e0e48
Diffstat (limited to 'nova')
-rw-r--r--nova/scheduler/filter_scheduler.py32
-rw-r--r--nova/scheduler/filters/compute_filter.py3
-rw-r--r--nova/scheduler/filters/core_filter.py3
-rw-r--r--nova/scheduler/host_manager.py38
-rw-r--r--nova/tests/scheduler/fakes.py4
-rw-r--r--nova/tests/scheduler/test_filter_scheduler.py35
-rw-r--r--nova/tests/scheduler/test_host_filters.py171
-rw-r--r--nova/tests/scheduler/test_host_manager.py37
-rw-r--r--nova/tests/scheduler/test_least_cost.py5
9 files changed, 132 insertions, 196 deletions
diff --git a/nova/scheduler/filter_scheduler.py b/nova/scheduler/filter_scheduler.py
index b2802931c..776797d68 100644
--- a/nova/scheduler/filter_scheduler.py
+++ b/nova/scheduler/filter_scheduler.py
@@ -39,7 +39,6 @@ class FilterScheduler(driver.Scheduler):
"""Scheduler that can be used for filtering and weighing."""
def __init__(self, *args, **kwargs):
super(FilterScheduler, self).__init__(*args, **kwargs)
- self.cost_function_cache = {}
self.options = scheduler_options.SchedulerOptions()
def schedule_run_instance(self, context, request_spec,
@@ -61,9 +60,8 @@ class FilterScheduler(driver.Scheduler):
notifier.notify(context, notifier.publisher_id("scheduler"),
'scheduler.run_instance.start', notifier.INFO, payload)
- weighted_hosts = self._schedule(context, CONF.compute_topic,
- request_spec, filter_properties,
- instance_uuids)
+ weighted_hosts = self._schedule(context, request_spec,
+ filter_properties, instance_uuids)
# NOTE(comstud): Make sure we do not pass this through. It
# contains an instance of RpcContext that cannot be serialized.
@@ -108,8 +106,8 @@ class FilterScheduler(driver.Scheduler):
the prep_resize operation to it.
"""
- hosts = self._schedule(context, CONF.compute_topic, request_spec,
- filter_properties, [instance['uuid']])
+ hosts = self._schedule(context, request_spec, filter_properties,
+ [instance['uuid']])
if not hosts:
raise exception.NoValidHost(reason="")
host = hosts.pop(0)
@@ -220,16 +218,12 @@ class FilterScheduler(driver.Scheduler):
"instance %(instance_uuid)s") % locals()
raise exception.NoValidHost(reason=msg)
- def _schedule(self, context, topic, request_spec, filter_properties,
+ def _schedule(self, context, request_spec, filter_properties,
instance_uuids=None):
"""Returns a list of hosts that meet the required specs,
ordered by their fitness.
"""
elevated = context.elevated()
- if topic != CONF.compute_topic:
- msg = _("Scheduler only understands Compute nodes (for now)")
- raise NotImplementedError(msg)
-
instance_properties = request_spec['instance_properties']
instance_type = request_spec.get("instance_type", None)
@@ -260,8 +254,7 @@ class FilterScheduler(driver.Scheduler):
# Note: remember, we are using an iterator here. So only
# traverse this list once. This can bite you if the hosts
# are being scanned in a filter or weighing function.
- hosts = self.host_manager.get_all_host_states(
- elevated, topic)
+ hosts = self.host_manager.get_all_host_states(elevated)
selected_hosts = []
if instance_uuids:
@@ -297,15 +290,12 @@ class FilterScheduler(driver.Scheduler):
selected_hosts.sort(key=operator.attrgetter('weight'))
return selected_hosts
- def get_cost_functions(self, topic=None):
+ def get_cost_functions(self):
"""Returns a list of tuples containing weights and cost functions to
use for weighing hosts
"""
- if topic is None:
- # Schedulers only support compute right now.
- topic = CONF.compute_topic
- if topic in self.cost_function_cache:
- return self.cost_function_cache[topic]
+ if getattr(self, 'cost_function_cache', None) is not None:
+ return self.cost_function_cache
cost_fns = []
for cost_fn_str in CONF.least_cost_functions:
@@ -315,7 +305,7 @@ class FilterScheduler(driver.Scheduler):
short_name = cost_fn_str
cost_fn_str = "%s.%s.%s" % (
__name__, self.__class__.__name__, short_name)
- if not (short_name.startswith('%s_' % topic) or
+ if not (short_name.startswith('compute_') or
short_name.startswith('noop')):
continue
@@ -336,5 +326,5 @@ class FilterScheduler(driver.Scheduler):
flag_name=flag_name)
cost_fns.append((weight, cost_fn))
- self.cost_function_cache[topic] = cost_fns
+ self.cost_function_cache = cost_fns
return cost_fns
diff --git a/nova/scheduler/filters/compute_filter.py b/nova/scheduler/filters/compute_filter.py
index 202f8232a..019068269 100644
--- a/nova/scheduler/filters/compute_filter.py
+++ b/nova/scheduler/filters/compute_filter.py
@@ -14,7 +14,6 @@
# under the License.
from nova import config
-from nova import flags
from nova.openstack.common import log as logging
from nova.scheduler import filters
from nova import utils
@@ -30,7 +29,7 @@ class ComputeFilter(filters.BaseHostFilter):
def host_passes(self, host_state, filter_properties):
"""Returns True for only active compute nodes"""
instance_type = filter_properties.get('instance_type')
- if host_state.topic != CONF.compute_topic or not instance_type:
+ if not instance_type:
return True
capabilities = host_state.capabilities
service = host_state.service
diff --git a/nova/scheduler/filters/core_filter.py b/nova/scheduler/filters/core_filter.py
index 9c93df930..aec05ecc0 100644
--- a/nova/scheduler/filters/core_filter.py
+++ b/nova/scheduler/filters/core_filter.py
@@ -16,7 +16,6 @@
# under the License.
from nova import config
-from nova import flags
from nova.openstack.common import cfg
from nova.openstack.common import log as logging
from nova.scheduler import filters
@@ -38,7 +37,7 @@ class CoreFilter(filters.BaseHostFilter):
def host_passes(self, host_state, filter_properties):
"""Return True if host has sufficient CPU cores."""
instance_type = filter_properties.get('instance_type')
- if host_state.topic != CONF.compute_topic or not instance_type:
+ if not instance_type:
return True
if not host_state.vcpus_total:
diff --git a/nova/scheduler/host_manager.py b/nova/scheduler/host_manager.py
index 1f4b6d956..faaf3e258 100644
--- a/nova/scheduler/host_manager.py
+++ b/nova/scheduler/host_manager.py
@@ -24,7 +24,6 @@ from nova.compute import vm_states
from nova import config
from nova import db
from nova import exception
-from nova import flags
from nova.openstack.common import cfg
from nova.openstack.common import log as logging
from nova.openstack.common import timeutils
@@ -94,10 +93,9 @@ class HostState(object):
previously used and lock down access.
"""
- def __init__(self, host, topic, capabilities=None, service=None):
+ def __init__(self, host, capabilities=None, service=None):
self.host = host
- self.topic = topic
- self.update_capabilities(topic, capabilities, service)
+ self.update_capabilities(capabilities, service)
# Mutable available resources.
# These will change as resources are virtually "consumed".
@@ -126,12 +124,12 @@ class HostState(object):
self.updated = None
- def update_capabilities(self, topic, capabilities=None, service=None):
+ def update_capabilities(self, capabilities=None, service=None):
# Read-only capability dicts
if capabilities is None:
capabilities = {}
- self.capabilities = ReadOnlyDict(capabilities.get(topic, None))
+ self.capabilities = ReadOnlyDict(capabilities)
if service is None:
service = {}
self.service = ReadOnlyDict(service)
@@ -326,28 +324,24 @@ class HostManager(object):
def update_service_capabilities(self, service_name, host, capabilities):
"""Update the per-service capabilities based on this notification."""
+
+ if service_name != 'compute':
+ LOG.debug(_('Ignoring %(service_name)s service update '
+ 'from %(host)s'), locals())
+ return
+
LOG.debug(_("Received %(service_name)s service update from "
- "%(host)s.") % locals())
- service_caps = self.service_states.get(host, {})
+ "%(host)s."), locals())
# Copy the capabilities, so we don't modify the original dict
capab_copy = dict(capabilities)
capab_copy["timestamp"] = timeutils.utcnow() # Reported time
- service_caps[service_name] = capab_copy
- self.service_states[host] = service_caps
+ self.service_states[host] = capab_copy
- def get_all_host_states(self, context, topic):
+ def get_all_host_states(self, context):
"""Returns a list of HostStates that represents all the hosts
the HostManager knows about. Also, each of the consumable resources
in HostState are pre-populated and adjusted based on data in the db.
-
- Note: this can be very slow with a lot of instances.
- InstanceType table isn't required since a copy is stored
- with the instance (in case the InstanceType changed since the
- instance was created)."""
-
- if topic != CONF.compute_topic:
- raise NotImplementedError(_(
- "host_manager only implemented for 'compute'"))
+ """
# Get resource usage across the available compute nodes:
compute_nodes = db.compute_node_get_all(context)
@@ -360,10 +354,10 @@ class HostManager(object):
capabilities = self.service_states.get(host, None)
host_state = self.host_state_map.get(host)
if host_state:
- host_state.update_capabilities(topic, capabilities,
+ host_state.update_capabilities(capabilities,
dict(service.iteritems()))
else:
- host_state = self.host_state_cls(host, topic,
+ host_state = self.host_state_cls(host,
capabilities=capabilities,
service=dict(service.iteritems()))
self.host_state_map[host] = host_state
diff --git a/nova/tests/scheduler/fakes.py b/nova/tests/scheduler/fakes.py
index ba255c32c..58aa2ffc7 100644
--- a/nova/tests/scheduler/fakes.py
+++ b/nova/tests/scheduler/fakes.py
@@ -96,8 +96,8 @@ class FakeHostManager(host_manager.HostManager):
class FakeHostState(host_manager.HostState):
- def __init__(self, host, topic, attribute_dict):
- super(FakeHostState, self).__init__(host, topic)
+ def __init__(self, host, attribute_dict):
+ super(FakeHostState, self).__init__(host)
for (key, val) in attribute_dict.iteritems():
setattr(self, key, val)
diff --git a/nova/tests/scheduler/test_filter_scheduler.py b/nova/tests/scheduler/test_filter_scheduler.py
index be6bc3317..4238ad916 100644
--- a/nova/tests/scheduler/test_filter_scheduler.py
+++ b/nova/tests/scheduler/test_filter_scheduler.py
@@ -98,17 +98,8 @@ class FilterSchedulerTestCase(test_scheduler.SchedulerTestCase):
fake_context, request_spec, None, None, None, None, {})
self.assertTrue(self.was_admin)
- def test_schedule_bad_topic(self):
- """Parameter checking."""
- sched = fakes.FakeFilterScheduler()
- fake_context = context.RequestContext('user', 'project')
- self.assertRaises(NotImplementedError, sched._schedule, fake_context,
- "foo", {}, {})
-
def test_scheduler_includes_launch_index(self):
fake_context = context.RequestContext('user', 'project')
- fake_kwargs = {'fake_kwarg1': 'fake_value1',
- 'fake_kwarg2': 'fake_value2'}
instance_opts = {'fake_opt1': 'meow'}
request_spec = {'instance_uuids': ['fake-uuid1', 'fake-uuid2'],
'instance_properties': instance_opts}
@@ -129,9 +120,8 @@ class FilterSchedulerTestCase(test_scheduler.SchedulerTestCase):
self.mox.StubOutWithMock(self.driver, '_schedule')
self.mox.StubOutWithMock(self.driver, '_provision_resource')
- self.driver._schedule(fake_context, 'compute',
- request_spec, {}, ['fake-uuid1', 'fake-uuid2']
- ).AndReturn(['host1', 'host2'])
+ self.driver._schedule(fake_context, request_spec, {},
+ ['fake-uuid1', 'fake-uuid2']).AndReturn(['host1', 'host2'])
# instance 1
self.driver._provision_resource(
fake_context, 'host1',
@@ -181,8 +171,7 @@ class FilterSchedulerTestCase(test_scheduler.SchedulerTestCase):
'vcpus': 1,
'os_type': 'Linux'}}
self.mox.ReplayAll()
- weighted_hosts = sched._schedule(fake_context, 'compute',
- request_spec, {})
+ weighted_hosts = sched._schedule(fake_context, request_spec, {})
self.assertEquals(len(weighted_hosts), 10)
for weighted_host in weighted_hosts:
self.assertTrue(weighted_host.host_state is not None)
@@ -194,7 +183,7 @@ class FilterSchedulerTestCase(test_scheduler.SchedulerTestCase):
sched = fakes.FakeFilterScheduler()
def _return_hosts(*args, **kwargs):
- host_state = host_manager.HostState('host2', 'compute')
+ host_state = host_manager.HostState('host2')
return [least_cost.WeightedHost(1.0, host_state=host_state)]
self.stubs.Set(sched, '_schedule', _return_hosts)
@@ -220,7 +209,7 @@ class FilterSchedulerTestCase(test_scheduler.SchedulerTestCase):
self.assertEquals(len(fns), 1)
weight, fn = fns[0]
self.assertEquals(weight, -1.0)
- hostinfo = host_manager.HostState('host', 'compute')
+ hostinfo = host_manager.HostState('host')
hostinfo.update_from_compute_node(dict(memory_mb=1000,
local_gb=0, vcpus=1, disk_available_least=1000,
free_disk_mb=1000, free_ram_mb=872, vcpus_used=0,
@@ -248,7 +237,7 @@ class FilterSchedulerTestCase(test_scheduler.SchedulerTestCase):
request_spec = dict(instance_properties=instance_properties)
filter_properties = {}
- sched._schedule(self.context, 'compute', request_spec,
+ sched._schedule(self.context, request_spec,
filter_properties=filter_properties)
# should not have retry info in the populated filter properties:
@@ -263,7 +252,7 @@ class FilterSchedulerTestCase(test_scheduler.SchedulerTestCase):
request_spec = dict(instance_properties=instance_properties)
filter_properties = {}
- sched._schedule(self.context, 'compute', request_spec,
+ sched._schedule(self.context, request_spec,
filter_properties=filter_properties)
num_attempts = filter_properties['retry']['num_attempts']
@@ -280,7 +269,7 @@ class FilterSchedulerTestCase(test_scheduler.SchedulerTestCase):
retry = dict(num_attempts=1)
filter_properties = dict(retry=retry)
- sched._schedule(self.context, 'compute', request_spec,
+ sched._schedule(self.context, request_spec,
filter_properties=filter_properties)
num_attempts = filter_properties['retry']['num_attempts']
@@ -298,7 +287,7 @@ class FilterSchedulerTestCase(test_scheduler.SchedulerTestCase):
filter_properties = dict(retry=retry)
self.assertRaises(exception.NoValidHost, sched._schedule, self.context,
- 'compute', request_spec, filter_properties=filter_properties)
+ request_spec, filter_properties=filter_properties)
def test_add_retry_host(self):
retry = dict(num_attempts=1, hosts=[])
@@ -318,7 +307,7 @@ class FilterSchedulerTestCase(test_scheduler.SchedulerTestCase):
filter_properties = {'retry': retry}
sched = fakes.FakeFilterScheduler()
- host_state = host_manager.HostState('host', 'compute')
+ host_state = host_manager.HostState('host')
host_state.limits['vcpus'] = 5
sched._post_select_populate_filter_properties(filter_properties,
host_state)
@@ -342,14 +331,14 @@ class FilterSchedulerTestCase(test_scheduler.SchedulerTestCase):
filter_properties = {'retry': retry}
reservations = None
- host = fakes.FakeHostState('host', 'compute', {})
+ host = fakes.FakeHostState('host', {})
weighted_host = least_cost.WeightedHost(1, host)
hosts = [weighted_host]
self.mox.StubOutWithMock(sched, '_schedule')
self.mox.StubOutWithMock(sched.compute_rpcapi, 'prep_resize')
- sched._schedule(self.context, 'compute', request_spec,
+ sched._schedule(self.context, request_spec,
filter_properties, [instance['uuid']]).AndReturn(hosts)
sched.compute_rpcapi.prep_resize(self.context, image, instance,
instance_type, 'host', reservations, request_spec=request_spec,
diff --git a/nova/tests/scheduler/test_host_filters.py b/nova/tests/scheduler/test_host_filters.py
index e789f169e..d2facbfc5 100644
--- a/nova/tests/scheduler/test_host_filters.py
+++ b/nova/tests/scheduler/test_host_filters.py
@@ -291,7 +291,7 @@ class HostFiltersTestCase(test.TestCase):
def test_all_host_filter(self):
filt_cls = self.class_map['AllHostsFilter']()
- host = fakes.FakeHostState('host1', 'compute', {})
+ host = fakes.FakeHostState('host1', {})
self.assertTrue(filt_cls.host_passes(host, {}))
def _stub_service_is_up(self, ret_value):
@@ -301,7 +301,7 @@ class HostFiltersTestCase(test.TestCase):
def test_affinity_different_filter_passes(self):
filt_cls = self.class_map['DifferentHostFilter']()
- host = fakes.FakeHostState('host1', 'compute', {})
+ host = fakes.FakeHostState('host1', {})
instance = fakes.FakeInstance(context=self.context,
params={'host': 'host2'})
instance_uuid = instance.uuid
@@ -314,7 +314,7 @@ class HostFiltersTestCase(test.TestCase):
def test_affinity_different_filter_no_list_passes(self):
filt_cls = self.class_map['DifferentHostFilter']()
- host = fakes.FakeHostState('host1', 'compute', {})
+ host = fakes.FakeHostState('host1', {})
instance = fakes.FakeInstance(context=self.context,
params={'host': 'host2'})
instance_uuid = instance.uuid
@@ -327,7 +327,7 @@ class HostFiltersTestCase(test.TestCase):
def test_affinity_different_filter_fails(self):
filt_cls = self.class_map['DifferentHostFilter']()
- host = fakes.FakeHostState('host1', 'compute', {})
+ host = fakes.FakeHostState('host1', {})
instance = fakes.FakeInstance(context=self.context,
params={'host': 'host1'})
instance_uuid = instance.uuid
@@ -340,11 +340,7 @@ class HostFiltersTestCase(test.TestCase):
def test_affinity_different_filter_handles_none(self):
filt_cls = self.class_map['DifferentHostFilter']()
- host = fakes.FakeHostState('host1', 'compute', {})
- instance = fakes.FakeInstance(context=self.context,
- params={'host': 'host2'})
- instance_uuid = instance.uuid
-
+ host = fakes.FakeHostState('host1', {})
filter_properties = {'context': self.context.elevated(),
'scheduler_hints': None}
@@ -352,7 +348,7 @@ class HostFiltersTestCase(test.TestCase):
def test_affinity_same_filter_no_list_passes(self):
filt_cls = self.class_map['SameHostFilter']()
- host = fakes.FakeHostState('host1', 'compute', {})
+ host = fakes.FakeHostState('host1', {})
instance = fakes.FakeInstance(context=self.context,
params={'host': 'host1'})
instance_uuid = instance.uuid
@@ -365,7 +361,7 @@ class HostFiltersTestCase(test.TestCase):
def test_affinity_same_filter_passes(self):
filt_cls = self.class_map['SameHostFilter']()
- host = fakes.FakeHostState('host1', 'compute', {})
+ host = fakes.FakeHostState('host1', {})
instance = fakes.FakeInstance(context=self.context,
params={'host': 'host1'})
instance_uuid = instance.uuid
@@ -378,7 +374,7 @@ class HostFiltersTestCase(test.TestCase):
def test_affinity_same_filter_fails(self):
filt_cls = self.class_map['SameHostFilter']()
- host = fakes.FakeHostState('host1', 'compute', {})
+ host = fakes.FakeHostState('host1', {})
instance = fakes.FakeInstance(context=self.context,
params={'host': 'host2'})
instance_uuid = instance.uuid
@@ -391,11 +387,7 @@ class HostFiltersTestCase(test.TestCase):
def test_affinity_same_filter_handles_none(self):
filt_cls = self.class_map['SameHostFilter']()
- host = fakes.FakeHostState('host1', 'compute', {})
- instance = fakes.FakeInstance(context=self.context,
- params={'host': 'host2'})
- instance_uuid = instance.uuid
-
+ host = fakes.FakeHostState('host1', {})
filter_properties = {'context': self.context.elevated(),
'scheduler_hints': None}
@@ -403,7 +395,7 @@ class HostFiltersTestCase(test.TestCase):
def test_affinity_simple_cidr_filter_passes(self):
filt_cls = self.class_map['SimpleCIDRAffinityFilter']()
- host = fakes.FakeHostState('host1', 'compute', {})
+ host = fakes.FakeHostState('host1', {})
host.capabilities = {'host_ip': '10.8.1.1'}
affinity_ip = "10.8.1.100"
@@ -417,7 +409,7 @@ class HostFiltersTestCase(test.TestCase):
def test_affinity_simple_cidr_filter_fails(self):
filt_cls = self.class_map['SimpleCIDRAffinityFilter']()
- host = fakes.FakeHostState('host1', 'compute', {})
+ host = fakes.FakeHostState('host1', {})
host.capabilities = {'host_ip': '10.8.1.1'}
affinity_ip = "10.8.1.100"
@@ -431,7 +423,7 @@ class HostFiltersTestCase(test.TestCase):
def test_affinity_simple_cidr_filter_handles_none(self):
filt_cls = self.class_map['SimpleCIDRAffinityFilter']()
- host = fakes.FakeHostState('host1', 'compute', {})
+ host = fakes.FakeHostState('host1', {})
affinity_ip = flags.FLAGS.my_ip.split('.')[0:3]
affinity_ip.append('100')
@@ -448,7 +440,7 @@ class HostFiltersTestCase(test.TestCase):
filter_properties = {'instance_type': {'memory_mb': 1024}}
capabilities = {'enabled': True}
service = {'disabled': False}
- host = fakes.FakeHostState('host1', 'compute',
+ host = fakes.FakeHostState('host1',
{'free_ram_mb': 1024, 'capabilities': capabilities,
'service': service})
self.assertTrue(filt_cls.host_passes(host, filter_properties))
@@ -464,7 +456,7 @@ class HostFiltersTestCase(test.TestCase):
capabilities = {'enabled': True}
service = {'disabled': False}
- host = fakes.FakeHostState('fake_host', 'compute',
+ host = fakes.FakeHostState('fake_host',
{'capabilities': capabilities,
'service': service})
#True since empty
@@ -490,7 +482,7 @@ class HostFiltersTestCase(test.TestCase):
'instance_type': {'name': 'fake2'}}
capabilities = {'enabled': True}
service = {'disabled': False}
- host = fakes.FakeHostState('fake_host', 'compute',
+ host = fakes.FakeHostState('fake_host',
{'capabilities': capabilities,
'service': service})
#True since no aggregates
@@ -509,7 +501,7 @@ class HostFiltersTestCase(test.TestCase):
filter_properties = {'instance_type': {'memory_mb': 1024}}
capabilities = {'enabled': True}
service = {'disabled': False}
- host = fakes.FakeHostState('host1', 'compute',
+ host = fakes.FakeHostState('host1',
{'free_ram_mb': 1023, 'total_usable_ram_mb': 1024,
'capabilities': capabilities, 'service': service})
self.assertFalse(filt_cls.host_passes(host, filter_properties))
@@ -521,7 +513,7 @@ class HostFiltersTestCase(test.TestCase):
filter_properties = {'instance_type': {'memory_mb': 1024}}
capabilities = {'enabled': True}
service = {'disabled': False}
- host = fakes.FakeHostState('host1', 'compute',
+ host = fakes.FakeHostState('host1',
{'free_ram_mb': 1024, 'total_usable_ram_mb': 1024,
'capabilities': capabilities, 'service': service})
self.assertTrue(filt_cls.host_passes(host, filter_properties))
@@ -533,7 +525,7 @@ class HostFiltersTestCase(test.TestCase):
filter_properties = {'instance_type': {'memory_mb': 1024}}
capabilities = {'enabled': True}
service = {'disabled': False}
- host = fakes.FakeHostState('host1', 'compute',
+ host = fakes.FakeHostState('host1',
{'free_ram_mb': -1024, 'total_usable_ram_mb': 2048,
'capabilities': capabilities, 'service': service})
self.assertTrue(filt_cls.host_passes(host, filter_properties))
@@ -547,7 +539,7 @@ class HostFiltersTestCase(test.TestCase):
'ephemeral_gb': 1}}
capabilities = {'enabled': True}
service = {'disabled': False}
- host = fakes.FakeHostState('host1', 'compute',
+ host = fakes.FakeHostState('host1',
{'free_disk_mb': 11 * 1024, 'total_usable_disk_gb': 13,
'capabilities': capabilities, 'service': service})
self.assertTrue(filt_cls.host_passes(host, filter_properties))
@@ -560,7 +552,7 @@ class HostFiltersTestCase(test.TestCase):
'ephemeral_gb': 1}}
capabilities = {'enabled': True}
service = {'disabled': False}
- host = fakes.FakeHostState('host1', 'compute',
+ host = fakes.FakeHostState('host1',
{'free_disk_mb': 11 * 1024, 'total_usable_disk_gb': 13,
'capabilities': capabilities, 'service': service})
self.assertFalse(filt_cls.host_passes(host, filter_properties))
@@ -574,7 +566,7 @@ class HostFiltersTestCase(test.TestCase):
capabilities = {'enabled': True}
service = {'disabled': False}
# 1GB used... so 119GB allowed...
- host = fakes.FakeHostState('host1', 'compute',
+ host = fakes.FakeHostState('host1',
{'free_disk_mb': 11 * 1024, 'total_usable_disk_gb': 12,
'capabilities': capabilities, 'service': service})
self.assertTrue(filt_cls.host_passes(host, filter_properties))
@@ -589,7 +581,7 @@ class HostFiltersTestCase(test.TestCase):
capabilities = {'enabled': True}
service = {'disabled': False}
# 1GB used... so 119GB allowed...
- host = fakes.FakeHostState('host1', 'compute',
+ host = fakes.FakeHostState('host1',
{'free_disk_mb': 11 * 1024, 'total_usable_disk_gb': 12,
'capabilities': capabilities, 'service': service})
self.assertFalse(filt_cls.host_passes(host, filter_properties))
@@ -600,7 +592,7 @@ class HostFiltersTestCase(test.TestCase):
filter_properties = {'instance_type': {'memory_mb': 1024}}
capabilities = {'enabled': True}
service = {'disabled': True}
- host = fakes.FakeHostState('host1', 'compute',
+ host = fakes.FakeHostState('host1',
{'free_ram_mb': 1024, 'capabilities': capabilities,
'service': service})
self.assertFalse(filt_cls.host_passes(host, filter_properties))
@@ -611,7 +603,7 @@ class HostFiltersTestCase(test.TestCase):
filter_properties = {'instance_type': {'memory_mb': 1024}}
capabilities = {'enabled': True}
service = {'disabled': False}
- host = fakes.FakeHostState('host1', 'compute',
+ host = fakes.FakeHostState('host1',
{'free_ram_mb': 1024, 'capabilities': capabilities,
'service': service})
self.assertFalse(filt_cls.host_passes(host, filter_properties))
@@ -622,29 +614,18 @@ class HostFiltersTestCase(test.TestCase):
filter_properties = {'instance_type': {'memory_mb': 1024}}
capabilities = {'enabled': False}
service = {'disabled': False}
- host = fakes.FakeHostState('host1', 'compute',
+ host = fakes.FakeHostState('host1',
{'free_ram_mb': 1024, 'capabilities': capabilities,
'service': service})
self.assertFalse(filt_cls.host_passes(host, filter_properties))
- def test_compute_filter_passes_on_volume(self):
- self._stub_service_is_up(True)
- filt_cls = self.class_map['ComputeFilter']()
- filter_properties = {'instance_type': {'memory_mb': 1024}}
- capabilities = {'enabled': False}
- service = {'disabled': False}
- host = fakes.FakeHostState('host1', 'volume',
- {'free_ram_mb': 1024, 'capabilities': capabilities,
- 'service': service})
- self.assertTrue(filt_cls.host_passes(host, filter_properties))
-
def test_compute_filter_passes_on_no_instance_type(self):
self._stub_service_is_up(True)
filt_cls = self.class_map['ComputeFilter']()
filter_properties = {}
capabilities = {'enabled': False}
service = {'disabled': False}
- host = fakes.FakeHostState('host1', 'compute',
+ host = fakes.FakeHostState('host1',
{'free_ram_mb': 1024, 'capabilities': capabilities,
'service': service})
self.assertTrue(filt_cls.host_passes(host, filter_properties))
@@ -659,7 +640,7 @@ class HostFiltersTestCase(test.TestCase):
capabilities = {'enabled': True,
'supported_instances': [
('x86_64', 'kvm', 'hvm')]}
- host = fakes.FakeHostState('host1', 'compute',
+ host = fakes.FakeHostState('host1',
{'capabilities': capabilities})
self.assertTrue(filt_cls.host_passes(host, filter_properties))
@@ -673,7 +654,7 @@ class HostFiltersTestCase(test.TestCase):
capabilities = {'enabled': True,
'supported_instances': [
('x86_64', 'kvm', 'hvm')]}
- host = fakes.FakeHostState('host1', 'compute',
+ host = fakes.FakeHostState('host1',
{'capabilities': capabilities})
self.assertFalse(filt_cls.host_passes(host, filter_properties))
@@ -686,7 +667,7 @@ class HostFiltersTestCase(test.TestCase):
capabilities = {'enabled': True,
'supported_instances': [
('x86_64', 'kvm', 'hvm')]}
- host = fakes.FakeHostState('host1', 'compute',
+ host = fakes.FakeHostState('host1',
{'capabilities': capabilities})
self.assertTrue(filt_cls.host_passes(host, filter_properties))
@@ -699,7 +680,7 @@ class HostFiltersTestCase(test.TestCase):
capabilities = {'enabled': True,
'supported_instances': [
('x86_64', 'xen', 'xen')]}
- host = fakes.FakeHostState('host1', 'compute',
+ host = fakes.FakeHostState('host1',
{'capabilities': capabilities})
self.assertFalse(filt_cls.host_passes(host, filter_properties))
@@ -710,7 +691,7 @@ class HostFiltersTestCase(test.TestCase):
capabilities = {'enabled': True,
'supported_instances': [
('x86_64', 'kvm', 'hvm')]}
- host = fakes.FakeHostState('host1', 'compute',
+ host = fakes.FakeHostState('host1',
{'capabilities': capabilities})
self.assertTrue(filt_cls.host_passes(host, filter_properties))
@@ -722,7 +703,7 @@ class HostFiltersTestCase(test.TestCase):
'vm_mode': 'hvm'}}
filter_properties = {'request_spec': {'image': img_props}}
capabilities = {'enabled': True}
- host = fakes.FakeHostState('host1', 'compute',
+ host = fakes.FakeHostState('host1',
{'capabilities': capabilities})
self.assertFalse(filt_cls.host_passes(host, filter_properties))
@@ -734,7 +715,7 @@ class HostFiltersTestCase(test.TestCase):
service = {'disabled': False}
filter_properties = {'instance_type': {'memory_mb': 1024,
'extra_specs': especs}}
- host = fakes.FakeHostState('host1', 'compute',
+ host = fakes.FakeHostState('host1',
{'free_ram_mb': 1024, 'capabilities': capabilities,
'service': service})
assertion = self.assertTrue if passes else self.assertFalse
@@ -759,7 +740,7 @@ class HostFiltersTestCase(test.TestCase):
filter_properties = {'context': self.context, 'instance_type':
{'memory_mb': 1024}}
- host = fakes.FakeHostState('host1', 'compute',
+ host = fakes.FakeHostState('host1',
{'capabilities': capabilities})
self.assertTrue(filt_cls.host_passes(host, filter_properties))
@@ -779,7 +760,7 @@ class HostFiltersTestCase(test.TestCase):
self._create_aggregate_with_host(name='fake2', metadata=emeta)
filter_properties = {'context': self.context,
'instance_type': {'memory_mb': 1024, 'extra_specs': especs}}
- host = fakes.FakeHostState('host1', 'compute', {'free_ram_mb': 1024})
+ host = fakes.FakeHostState('host1', {'free_ram_mb': 1024})
assertion = self.assertTrue if passes else self.assertFalse
assertion(filt_cls.host_passes(host, filter_properties))
@@ -793,7 +774,7 @@ class HostFiltersTestCase(test.TestCase):
metadata={'opt2': '2'})
filter_properties = {'context': self.context, 'instance_type':
{'memory_mb': 1024, 'extra_specs': extra_specs}}
- host = fakes.FakeHostState('host1', 'compute', {'free_ram_mb': 1024})
+ host = fakes.FakeHostState('host1', {'free_ram_mb': 1024})
db.aggregate_host_delete(self.context.elevated(), agg2.id, 'host1')
self.assertFalse(filt_cls.host_passes(host, filter_properties))
@@ -819,7 +800,7 @@ class HostFiltersTestCase(test.TestCase):
'instance_properties': {'image_ref': 'isolated'}
}
}
- host = fakes.FakeHostState('non-isolated', 'compute', {})
+ host = fakes.FakeHostState('non-isolated', {})
self.assertFalse(filt_cls.host_passes(host, filter_properties))
def test_isolated_hosts_fails_non_isolated_on_isolated(self):
@@ -830,7 +811,7 @@ class HostFiltersTestCase(test.TestCase):
'instance_properties': {'image_ref': 'non-isolated'}
}
}
- host = fakes.FakeHostState('isolated', 'compute', {})
+ host = fakes.FakeHostState('isolated', {})
self.assertFalse(filt_cls.host_passes(host, filter_properties))
def test_isolated_hosts_passes_isolated_on_isolated(self):
@@ -841,7 +822,7 @@ class HostFiltersTestCase(test.TestCase):
'instance_properties': {'image_ref': 'isolated'}
}
}
- host = fakes.FakeHostState('isolated', 'compute', {})
+ host = fakes.FakeHostState('isolated', {})
self.assertTrue(filt_cls.host_passes(host, filter_properties))
def test_isolated_hosts_passes_non_isolated_on_non_isolated(self):
@@ -852,7 +833,7 @@ class HostFiltersTestCase(test.TestCase):
'instance_properties': {'image_ref': 'non-isolated'}
}
}
- host = fakes.FakeHostState('non-isolated', 'compute', {})
+ host = fakes.FakeHostState('non-isolated', {})
self.assertTrue(filt_cls.host_passes(host, filter_properties))
def test_json_filter_passes(self):
@@ -862,7 +843,7 @@ class HostFiltersTestCase(test.TestCase):
'ephemeral_gb': 0},
'scheduler_hints': {'query': self.json_query}}
capabilities = {'enabled': True}
- host = fakes.FakeHostState('host1', 'compute',
+ host = fakes.FakeHostState('host1',
{'free_ram_mb': 1024,
'free_disk_mb': 200 * 1024,
'capabilities': capabilities})
@@ -874,7 +855,7 @@ class HostFiltersTestCase(test.TestCase):
'root_gb': 200,
'ephemeral_gb': 0}}
capabilities = {'enabled': True}
- host = fakes.FakeHostState('host1', 'compute',
+ host = fakes.FakeHostState('host1',
{'free_ram_mb': 0,
'free_disk_mb': 0,
'capabilities': capabilities})
@@ -887,7 +868,7 @@ class HostFiltersTestCase(test.TestCase):
'ephemeral_gb': 0},
'scheduler_hints': {'query': self.json_query}}
capabilities = {'enabled': True}
- host = fakes.FakeHostState('host1', 'compute',
+ host = fakes.FakeHostState('host1',
{'free_ram_mb': 1023,
'free_disk_mb': 200 * 1024,
'capabilities': capabilities})
@@ -900,7 +881,7 @@ class HostFiltersTestCase(test.TestCase):
'ephemeral_gb': 0},
'scheduler_hints': {'query': self.json_query}}
capabilities = {'enabled': True}
- host = fakes.FakeHostState('host1', 'compute',
+ host = fakes.FakeHostState('host1',
{'free_ram_mb': 1024,
'free_disk_mb': (200 * 1024) - 1,
'capabilities': capabilities})
@@ -917,7 +898,7 @@ class HostFiltersTestCase(test.TestCase):
'ephemeral_gb': 0},
'scheduler_hints': {'query': json_query}}
capabilities = {'enabled': False}
- host = fakes.FakeHostState('host1', 'compute',
+ host = fakes.FakeHostState('host1',
{'free_ram_mb': 1024,
'free_disk_mb': 200 * 1024,
'capabilities': capabilities})
@@ -934,7 +915,7 @@ class HostFiltersTestCase(test.TestCase):
'scheduler_hints': {'query': json_query}}
capabilities = {'enabled': True}
service = {'disabled': True}
- host = fakes.FakeHostState('host1', 'compute',
+ host = fakes.FakeHostState('host1',
{'free_ram_mb': 1024,
'free_disk_mb': 200 * 1024,
'capabilities': capabilities})
@@ -962,7 +943,7 @@ class HostFiltersTestCase(test.TestCase):
# Passes
capabilities = {'enabled': True, 'opt1': 'match'}
service = {'disabled': False}
- host = fakes.FakeHostState('host1', 'compute',
+ host = fakes.FakeHostState('host1',
{'free_ram_mb': 10,
'free_disk_mb': 200,
'capabilities': capabilities,
@@ -972,7 +953,7 @@ class HostFiltersTestCase(test.TestCase):
# Passes
capabilities = {'enabled': True, 'opt1': 'match'}
service = {'disabled': False}
- host = fakes.FakeHostState('host1', 'compute',
+ host = fakes.FakeHostState('host1',
{'free_ram_mb': 40,
'free_disk_mb': 400,
'capabilities': capabilities,
@@ -982,7 +963,7 @@ class HostFiltersTestCase(test.TestCase):
# Fails due to capabilities being disabled
capabilities = {'enabled': False, 'opt1': 'match'}
service = {'disabled': False}
- host = fakes.FakeHostState('host1', 'instance_type',
+ host = fakes.FakeHostState('host1',
{'free_ram_mb': 40,
'free_disk_mb': 400,
'capabilities': capabilities,
@@ -992,7 +973,7 @@ class HostFiltersTestCase(test.TestCase):
# Fails due to being exact memory/disk we don't want
capabilities = {'enabled': True, 'opt1': 'match'}
service = {'disabled': False}
- host = fakes.FakeHostState('host1', 'compute',
+ host = fakes.FakeHostState('host1',
{'free_ram_mb': 30,
'free_disk_mb': 300,
'capabilities': capabilities,
@@ -1002,7 +983,7 @@ class HostFiltersTestCase(test.TestCase):
# Fails due to memory lower but disk higher
capabilities = {'enabled': True, 'opt1': 'match'}
service = {'disabled': False}
- host = fakes.FakeHostState('host1', 'compute',
+ host = fakes.FakeHostState('host1',
{'free_ram_mb': 20,
'free_disk_mb': 400,
'capabilities': capabilities,
@@ -1012,7 +993,7 @@ class HostFiltersTestCase(test.TestCase):
# Fails due to capabilities 'opt1' not equal
capabilities = {'enabled': True, 'opt1': 'no-match'}
service = {'enabled': True}
- host = fakes.FakeHostState('host1', 'compute',
+ host = fakes.FakeHostState('host1',
{'free_ram_mb': 20,
'free_disk_mb': 400,
'capabilities': capabilities,
@@ -1021,7 +1002,7 @@ class HostFiltersTestCase(test.TestCase):
def test_json_filter_basic_operators(self):
filt_cls = self.class_map['JsonFilter']()
- host = fakes.FakeHostState('host1', 'compute',
+ host = fakes.FakeHostState('host1',
{'capabilities': {'enabled': True}})
# (operator, arguments, expected_result)
ops_to_test = [
@@ -1090,14 +1071,14 @@ class HostFiltersTestCase(test.TestCase):
'query': jsonutils.dumps(raw),
},
}
- host = fakes.FakeHostState('host1', 'compute',
+ host = fakes.FakeHostState('host1',
{'capabilities': {'enabled': True}})
self.assertRaises(KeyError,
filt_cls.host_passes, host, filter_properties)
def test_json_filter_empty_filters_pass(self):
filt_cls = self.class_map['JsonFilter']()
- host = fakes.FakeHostState('host1', 'compute',
+ host = fakes.FakeHostState('host1',
{'capabilities': {'enabled': True}})
raw = []
@@ -1117,7 +1098,7 @@ class HostFiltersTestCase(test.TestCase):
def test_json_filter_invalid_num_arguments_fails(self):
filt_cls = self.class_map['JsonFilter']()
- host = fakes.FakeHostState('host1', 'compute',
+ host = fakes.FakeHostState('host1',
{'capabilities': {'enabled': True}})
raw = ['>', ['and', ['or', ['not', ['<', ['>=', ['<=', ['in', ]]]]]]]]
@@ -1138,7 +1119,7 @@ class HostFiltersTestCase(test.TestCase):
def test_json_filter_unknown_variable_ignored(self):
filt_cls = self.class_map['JsonFilter']()
- host = fakes.FakeHostState('host1', 'compute',
+ host = fakes.FakeHostState('host1',
{'capabilities': {'enabled': True}})
raw = ['=', '$........', 1, 1]
@@ -1161,7 +1142,7 @@ class HostFiltersTestCase(test.TestCase):
self._stub_service_is_up(True)
filt_cls = self.class_map['TrustedFilter']()
filter_properties = {'instance_type': {'memory_mb': 1024}}
- host = fakes.FakeHostState('host1', 'compute', {})
+ host = fakes.FakeHostState('host1', {})
self.assertTrue(filt_cls.host_passes(host, filter_properties))
def test_trusted_filter_trusted_and_trusted_passes(self):
@@ -1172,7 +1153,7 @@ class HostFiltersTestCase(test.TestCase):
extra_specs = {'trust:trusted_host': 'trusted'}
filter_properties = {'instance_type': {'memory_mb': 1024,
'extra_specs': extra_specs}}
- host = fakes.FakeHostState('host1', 'compute', {})
+ host = fakes.FakeHostState('host1', {})
self.assertTrue(filt_cls.host_passes(host, filter_properties))
def test_trusted_filter_trusted_and_untrusted_fails(self):
@@ -1183,7 +1164,7 @@ class HostFiltersTestCase(test.TestCase):
extra_specs = {'trust:trusted_host': 'trusted'}
filter_properties = {'instance_type': {'memory_mb': 1024,
'extra_specs': extra_specs}}
- host = fakes.FakeHostState('host1', 'compute', {})
+ host = fakes.FakeHostState('host1', {})
self.assertFalse(filt_cls.host_passes(host, filter_properties))
def test_trusted_filter_untrusted_and_trusted_fails(self):
@@ -1194,7 +1175,7 @@ class HostFiltersTestCase(test.TestCase):
extra_specs = {'trust:trusted_host': 'untrusted'}
filter_properties = {'instance_type': {'memory_mb': 1024,
'extra_specs': extra_specs}}
- host = fakes.FakeHostState('host1', 'compute', {})
+ host = fakes.FakeHostState('host1', {})
self.assertFalse(filt_cls.host_passes(host, filter_properties))
def test_trusted_filter_untrusted_and_untrusted_passes(self):
@@ -1205,28 +1186,28 @@ class HostFiltersTestCase(test.TestCase):
extra_specs = {'trust:trusted_host': 'untrusted'}
filter_properties = {'instance_type': {'memory_mb': 1024,
'extra_specs': extra_specs}}
- host = fakes.FakeHostState('host1', 'compute', {})
+ host = fakes.FakeHostState('host1', {})
self.assertTrue(filt_cls.host_passes(host, filter_properties))
def test_core_filter_passes(self):
filt_cls = self.class_map['CoreFilter']()
filter_properties = {'instance_type': {'vcpus': 1}}
self.flags(cpu_allocation_ratio=2)
- host = fakes.FakeHostState('host1', 'compute',
+ host = fakes.FakeHostState('host1',
{'vcpus_total': 4, 'vcpus_used': 7})
self.assertTrue(filt_cls.host_passes(host, filter_properties))
def test_core_filter_fails_safe(self):
filt_cls = self.class_map['CoreFilter']()
filter_properties = {'instance_type': {'vcpus': 1}}
- host = fakes.FakeHostState('host1', 'compute', {})
+ host = fakes.FakeHostState('host1', {})
self.assertTrue(filt_cls.host_passes(host, filter_properties))
def test_core_filter_fails(self):
filt_cls = self.class_map['CoreFilter']()
filter_properties = {'instance_type': {'vcpus': 1}}
self.flags(cpu_allocation_ratio=2)
- host = fakes.FakeHostState('host1', 'compute',
+ host = fakes.FakeHostState('host1',
{'vcpus_total': 4, 'vcpus_used': 8})
self.assertFalse(filt_cls.host_passes(host, filter_properties))
@@ -1246,27 +1227,27 @@ class HostFiltersTestCase(test.TestCase):
filt_cls = self.class_map['AvailabilityZoneFilter']()
service = {'availability_zone': 'nova'}
request = self._make_zone_request('nova')
- host = fakes.FakeHostState('host1', 'compute', {'service': service})
+ host = fakes.FakeHostState('host1', {'service': service})
self.assertTrue(filt_cls.host_passes(host, request))
def test_availability_zone_filter_different(self):
filt_cls = self.class_map['AvailabilityZoneFilter']()
service = {'availability_zone': 'nova'}
request = self._make_zone_request('bad')
- host = fakes.FakeHostState('host1', 'compute', {'service': service})
+ host = fakes.FakeHostState('host1', {'service': service})
self.assertFalse(filt_cls.host_passes(host, request))
def test_retry_filter_disabled(self):
"""Test case where retry/re-scheduling is disabled"""
filt_cls = self.class_map['RetryFilter']()
- host = fakes.FakeHostState('host1', 'compute', {})
+ host = fakes.FakeHostState('host1', {})
filter_properties = {}
self.assertTrue(filt_cls.host_passes(host, filter_properties))
def test_retry_filter_pass(self):
"""Host not previously tried"""
filt_cls = self.class_map['RetryFilter']()
- host = fakes.FakeHostState('host1', 'compute', {})
+ host = fakes.FakeHostState('host1', {})
retry = dict(num_attempts=1, hosts=['host2', 'host3'])
filter_properties = dict(retry=retry)
self.assertTrue(filt_cls.host_passes(host, filter_properties))
@@ -1274,7 +1255,7 @@ class HostFiltersTestCase(test.TestCase):
def test_retry_filter_fail(self):
"""Host was already tried"""
filt_cls = self.class_map['RetryFilter']()
- host = fakes.FakeHostState('host1', 'compute', {})
+ host = fakes.FakeHostState('host1', {})
retry = dict(num_attempts=1, hosts=['host3', 'host1'])
filter_properties = dict(retry=retry)
self.assertFalse(filt_cls.host_passes(host, filter_properties))
@@ -1282,29 +1263,25 @@ class HostFiltersTestCase(test.TestCase):
def test_filter_num_iops_passes(self):
self.flags(max_io_ops_per_host=8)
filt_cls = self.class_map['IoOpsFilter']()
- host = fakes.FakeHostState('host1', 'compute',
- {'num_io_ops': 7})
+ host = fakes.FakeHostState('host1', {'num_io_ops': 7})
filter_properties = {}
self.assertTrue(filt_cls.host_passes(host, filter_properties))
def test_filter_num_iops_fails(self):
self.flags(max_io_ops_per_host=8)
filt_cls = self.class_map['IoOpsFilter']()
- host = fakes.FakeHostState('host1', 'compute',
- {'num_io_ops': 8})
+ host = fakes.FakeHostState('host1', {'num_io_ops': 8})
def test_filter_num_instances_passes(self):
self.flags(max_instances_per_host=5)
filt_cls = self.class_map['NumInstancesFilter']()
- host = fakes.FakeHostState('host1', 'compute',
- {'num_instances': 4})
+ host = fakes.FakeHostState('host1', {'num_instances': 4})
filter_properties = {}
self.assertTrue(filt_cls.host_passes(host, filter_properties))
def test_filter_num_instances_fails(self):
self.flags(max_instances_per_host=5)
filt_cls = self.class_map['NumInstancesFilter']()
- host = fakes.FakeHostState('host1', 'compute',
- {'num_instances': 5})
+ host = fakes.FakeHostState('host1', {'num_instances': 5})
filter_properties = {}
self.assertFalse(filt_cls.host_passes(host, filter_properties))
diff --git a/nova/tests/scheduler/test_host_manager.py b/nova/tests/scheduler/test_host_manager.py
index 4d1e00852..b95803d8e 100644
--- a/nova/tests/scheduler/test_host_manager.py
+++ b/nova/tests/scheduler/test_host_manager.py
@@ -56,18 +56,16 @@ class HostManagerTestCase(test.TestCase):
self.host_manager.filter_classes = [ComputeFilterClass1,
ComputeFilterClass2]
- # Test 'compute' returns 1 correct function
+ # Test we returns 1 correct function
filter_fns = self.host_manager._choose_host_filters(None)
self.assertEqual(len(filter_fns), 1)
self.assertEqual(filter_fns[0].__func__,
ComputeFilterClass2.host_passes.__func__)
def test_filter_hosts(self):
- topic = 'fake_topic'
-
filters = ['fake-filter1', 'fake-filter2']
- fake_host1 = host_manager.HostState('host1', topic)
- fake_host2 = host_manager.HostState('host2', topic)
+ fake_host1 = host_manager.HostState('host1')
+ fake_host2 = host_manager.HostState('host2')
hosts = [fake_host1, fake_host2]
filter_properties = {'fake_prop': 'fake_val'}
@@ -93,40 +91,31 @@ class HostManagerTestCase(test.TestCase):
self.assertDictMatch(service_states, {})
self.mox.StubOutWithMock(timeutils, 'utcnow')
timeutils.utcnow().AndReturn(31337)
- timeutils.utcnow().AndReturn(31338)
timeutils.utcnow().AndReturn(31339)
host1_compute_capabs = dict(free_memory=1234, host_memory=5678,
timestamp=1)
- host1_volume_capabs = dict(free_disk=4321, timestamp=1)
host2_compute_capabs = dict(free_memory=8756, timestamp=1)
self.mox.ReplayAll()
self.host_manager.update_service_capabilities('compute', 'host1',
host1_compute_capabs)
- self.host_manager.update_service_capabilities('volume', 'host1',
- host1_volume_capabs)
self.host_manager.update_service_capabilities('compute', 'host2',
host2_compute_capabs)
- # Make sure dictionary isn't re-assigned
- self.assertEqual(self.host_manager.service_states, service_states)
# Make sure original dictionary wasn't copied
self.assertEqual(host1_compute_capabs['timestamp'], 1)
host1_compute_capabs['timestamp'] = 31337
- host1_volume_capabs['timestamp'] = 31338
host2_compute_capabs['timestamp'] = 31339
- expected = {'host1': {'compute': host1_compute_capabs,
- 'volume': host1_volume_capabs},
- 'host2': {'compute': host2_compute_capabs}}
+ expected = {'host1': host1_compute_capabs,
+ 'host2': host2_compute_capabs}
self.assertDictMatch(service_states, expected)
def test_get_all_host_states(self):
context = 'fake_context'
- topic = 'compute'
self.mox.StubOutWithMock(db, 'compute_node_get_all')
self.mox.StubOutWithMock(host_manager.LOG, 'warn')
@@ -136,7 +125,7 @@ class HostManagerTestCase(test.TestCase):
host_manager.LOG.warn("No service for compute ID 5")
self.mox.ReplayAll()
- self.host_manager.get_all_host_states(context, topic)
+ self.host_manager.get_all_host_states(context)
host_states_map = self.host_manager.host_state_map
self.assertEqual(len(host_states_map), 4)
@@ -167,7 +156,7 @@ class HostStateTestCase(test.TestCase):
# in HostManagerTestCase.test_get_all_host_states()
def test_host_state_passes_filters_passes(self):
- fake_host = host_manager.HostState('host1', 'compute')
+ fake_host = host_manager.HostState('host1')
filter_properties = {}
cls1 = ComputeFilterClass1()
@@ -184,7 +173,7 @@ class HostStateTestCase(test.TestCase):
self.assertTrue(result)
def test_host_state_passes_filters_passes_with_ignore(self):
- fake_host = host_manager.HostState('host1', 'compute')
+ fake_host = host_manager.HostState('host1')
filter_properties = {'ignore_hosts': ['host2']}
cls1 = ComputeFilterClass1()
@@ -201,7 +190,7 @@ class HostStateTestCase(test.TestCase):
self.assertTrue(result)
def test_host_state_passes_filters_fails(self):
- fake_host = host_manager.HostState('host1', 'compute')
+ fake_host = host_manager.HostState('host1')
filter_properties = {}
cls1 = ComputeFilterClass1()
@@ -218,7 +207,7 @@ class HostStateTestCase(test.TestCase):
self.assertFalse(result)
def test_host_state_passes_filters_fails_from_ignore(self):
- fake_host = host_manager.HostState('host1', 'compute')
+ fake_host = host_manager.HostState('host1')
filter_properties = {'ignore_hosts': ['host1']}
cls1 = ComputeFilterClass1()
@@ -235,7 +224,7 @@ class HostStateTestCase(test.TestCase):
self.assertFalse(result)
def test_host_state_passes_filters_skipped_from_force(self):
- fake_host = host_manager.HostState('host1', 'compute')
+ fake_host = host_manager.HostState('host1')
filter_properties = {'force_hosts': ['host1']}
cls1 = ComputeFilterClass1()
@@ -268,7 +257,7 @@ class HostStateTestCase(test.TestCase):
local_gb_used=0, free_ram_mb=0, vcpus=0, vcpus_used=0,
updated_at=None)
- host = host_manager.HostState("fakehost", "faketopic")
+ host = host_manager.HostState("fakehost")
host.update_from_compute_node(compute)
self.assertEqual(5, host.num_instances)
@@ -283,7 +272,7 @@ class HostStateTestCase(test.TestCase):
self.assertEqual(42, host.num_io_ops)
def test_stat_consumption_from_instance(self):
- host = host_manager.HostState("fakehost", "faketopic")
+ host = host_manager.HostState("fakehost")
instance = dict(root_gb=0, ephemeral_gb=0, memory_mb=0, vcpus=0,
project_id='12345', vm_state=vm_states.BUILDING,
diff --git a/nova/tests/scheduler/test_least_cost.py b/nova/tests/scheduler/test_least_cost.py
index 64cda0b2a..e64cc9e81 100644
--- a/nova/tests/scheduler/test_least_cost.py
+++ b/nova/tests/scheduler/test_least_cost.py
@@ -40,8 +40,7 @@ class LeastCostTestCase(test.TestCase):
ctxt = context.get_admin_context()
fakes.mox_host_manager_db_calls(self.mox, ctxt)
self.mox.ReplayAll()
- host_states = self.host_manager.get_all_host_states(ctxt,
- 'compute')
+ host_states = self.host_manager.get_all_host_states(ctxt)
self.mox.VerifyAll()
self.mox.ResetAll()
return host_states
@@ -96,7 +95,7 @@ class TestWeightedHost(test.TestCase):
self.assertDictMatch(host.to_dict(), expected)
def test_dict_conversion_with_host_state(self):
- host_state = host_manager.HostState('somehost', 'sometopic')
+ host_state = host_manager.HostState('somehost')
host = least_cost.WeightedHost('someweight', host_state)
expected = {'weight': 'someweight',
'host': 'somehost'}