summaryrefslogtreecommitdiffstats
path: root/nova/tests
diff options
context:
space:
mode:
authorJenkins <jenkins@review.openstack.org>2012-01-18 16:42:39 +0000
committerGerrit Code Review <review@openstack.org>2012-01-18 16:42:39 +0000
commit885b9aa70da338307c37d8eba84b3bc1533058bb (patch)
tree47ecccd6e6f67586372321f22aed9d22cbf5d22c /nova/tests
parent0c2eb242580caff24dc95a9e2b3092cf0b04e958 (diff)
parentd328ddcadb24d1b1961bd05a7676bc8f54b6776f (diff)
Merge "Separate scheduler host management"
Diffstat (limited to 'nova/tests')
-rw-r--r--nova/tests/scheduler/fakes.py (renamed from nova/tests/scheduler/fake_zone_manager.py)62
-rw-r--r--nova/tests/scheduler/test_distributed_scheduler.py185
-rw-r--r--nova/tests/scheduler/test_host_filter.py252
-rw-r--r--nova/tests/scheduler/test_host_filters.py333
-rw-r--r--nova/tests/scheduler/test_host_manager.py360
-rw-r--r--nova/tests/scheduler/test_least_cost.py42
-rw-r--r--nova/tests/scheduler/test_zone_manager.py189
-rw-r--r--nova/tests/test_zones.py377
8 files changed, 1041 insertions, 759 deletions
diff --git a/nova/tests/scheduler/fake_zone_manager.py b/nova/tests/scheduler/fakes.py
index c1991d9b0..5fb60a206 100644
--- a/nova/tests/scheduler/fake_zone_manager.py
+++ b/nova/tests/scheduler/fakes.py
@@ -13,25 +13,52 @@
# License for the specific language governing permissions and limitations
# under the License.
"""
-Fakes For Distributed Scheduler tests.
+Fakes For Scheduler tests.
"""
+from nova import db
from nova.scheduler import distributed_scheduler
+from nova.scheduler import host_manager
from nova.scheduler import zone_manager
+COMPUTE_NODES = [
+ dict(id=1, local_gb=1024, memory_mb=1024, service=dict(host='host1')),
+ dict(id=2, local_gb=2048, memory_mb=2048, service=dict(host='host2')),
+ dict(id=3, local_gb=4096, memory_mb=4096, service=dict(host='host3')),
+ dict(id=4, local_gb=8192, memory_mb=8192, service=dict(host='host4')),
+ # Broken entry
+ dict(id=5, local_gb=1024, memory_mb=1024, service=None),
+]
+
+INSTANCES = [
+ dict(local_gb=512, memory_mb=512, host='host1'),
+ dict(local_gb=512, memory_mb=512, host='host2'),
+ dict(local_gb=512, memory_mb=512, host='host2'),
+ dict(local_gb=1024, memory_mb=1024, host='host3'),
+ # Broken host
+ dict(local_gb=1024, memory_mb=1024, host=None),
+ # No matching host
+ dict(local_gb=1024, memory_mb=1024, host='host5'),
+]
+
+
class FakeDistributedScheduler(distributed_scheduler.DistributedScheduler):
- # No need to stub anything at the moment
- pass
+ def __init__(self, *args, **kwargs):
+ super(FakeDistributedScheduler, self).__init__(*args, **kwargs)
+ self.zone_manager = zone_manager.ZoneManager()
+ self.host_manager = host_manager.HostManager()
-class FakeZoneManager(zone_manager.ZoneManager):
+class FakeHostManager(host_manager.HostManager):
"""host1: free_ram_mb=1024-512-512=0, free_disk_gb=1024-512-512=0
host2: free_ram_mb=2048-512=1536 free_disk_gb=2048-512=1536
host3: free_ram_mb=4096-1024=3072 free_disk_gb=4096-1024=3072
host4: free_ram_mb=8192 free_disk_gb=8192"""
def __init__(self):
+ super(FakeHostManager, self).__init__()
+
self.service_states = {
'host1': {
'compute': {'host_memory_free': 1073741824},
@@ -55,18 +82,17 @@ class FakeZoneManager(zone_manager.ZoneManager):
('host4', dict(free_disk_gb=8192, free_ram_mb=8192)),
]
- def _compute_node_get_all(self, context):
- return [
- dict(local_gb=1024, memory_mb=1024, service=dict(host='host1')),
- dict(local_gb=2048, memory_mb=2048, service=dict(host='host2')),
- dict(local_gb=4096, memory_mb=4096, service=dict(host='host3')),
- dict(local_gb=8192, memory_mb=8192, service=dict(host='host4')),
- ]
- def _instance_get_all(self, context):
- return [
- dict(local_gb=512, memory_mb=512, host='host1'),
- dict(local_gb=512, memory_mb=512, host='host1'),
- dict(local_gb=512, memory_mb=512, host='host2'),
- dict(local_gb=1024, memory_mb=1024, host='host3'),
- ]
+class FakeHostState(host_manager.HostState):
+ def __init__(self, host, topic, attribute_dict):
+ super(FakeHostState, self).__init__(host, topic)
+ for (key, val) in attribute_dict.iteritems():
+ setattr(self, key, val)
+
+
+def mox_host_manager_db_calls(mox, context):
+ mox.StubOutWithMock(db, 'compute_node_get_all')
+ mox.StubOutWithMock(db, 'instance_get_all')
+
+ db.compute_node_get_all(context).AndReturn(COMPUTE_NODES)
+ db.instance_get_all(context).AndReturn(INSTANCES)
diff --git a/nova/tests/scheduler/test_distributed_scheduler.py b/nova/tests/scheduler/test_distributed_scheduler.py
index 412c981c5..05c5d18e1 100644
--- a/nova/tests/scheduler/test_distributed_scheduler.py
+++ b/nova/tests/scheduler/test_distributed_scheduler.py
@@ -18,29 +18,15 @@ Tests For Distributed Scheduler.
import json
-import nova.db
-
+from nova.compute import api as compute_api
from nova import context
+from nova import db
from nova import exception
-from nova import test
from nova.scheduler import distributed_scheduler
from nova.scheduler import least_cost
-from nova.scheduler import zone_manager
-from nova.tests.scheduler import fake_zone_manager as ds_fakes
-
-
-class FakeEmptyZoneManager(zone_manager.ZoneManager):
- def __init__(self):
- self.service_states = {}
-
- def get_host_list_from_db(self, context):
- return []
-
- def _compute_node_get_all(*args, **kwargs):
- return []
-
- def _instance_get_all(*args, **kwargs):
- return []
+from nova.scheduler import host_manager
+from nova import test
+from nova.tests.scheduler import fakes
def fake_call_zone_method(context, method, specs, zones):
@@ -80,8 +66,8 @@ def fake_zone_get_all(context):
]
-def fake_filter_hosts(topic, request_info, unfiltered_hosts, options):
- return unfiltered_hosts
+def fake_filter_hosts(hosts, filter_properties):
+ return list(hosts)
class DistributedSchedulerTestCase(test.TestCase):
@@ -92,7 +78,7 @@ class DistributedSchedulerTestCase(test.TestCase):
properly adjusted based on the scale/offset in the zone
db entries.
"""
- sched = ds_fakes.FakeDistributedScheduler()
+ sched = fakes.FakeDistributedScheduler()
child_results = fake_call_zone_method(None, None, None, None)
zones = fake_zone_get_all(None)
weighted_hosts = sched._adjust_child_weights(child_results, zones)
@@ -113,14 +99,14 @@ class DistributedSchedulerTestCase(test.TestCase):
def _fake_empty_call_zone_method(*args, **kwargs):
return []
- sched = ds_fakes.FakeDistributedScheduler()
- sched.zone_manager = FakeEmptyZoneManager()
+ sched = fakes.FakeDistributedScheduler()
self.stubs.Set(sched, '_call_zone_method',
_fake_empty_call_zone_method)
- self.stubs.Set(nova.db, 'zone_get_all', fake_zone_get_all)
+ self.stubs.Set(db, 'zone_get_all', fake_zone_get_all)
fake_context = context.RequestContext('user', 'project')
- request_spec = dict(instance_type=dict(memory_mb=1, local_gb=1))
+ request_spec = {'instance_type': {'memory_mb': 1, 'local_gb': 1},
+ 'instance_properties': {'project_id': 1}}
self.assertRaises(exception.NoValidHost, sched.schedule_run_instance,
fake_context, request_spec)
@@ -150,7 +136,7 @@ class DistributedSchedulerTestCase(test.TestCase):
self.child_zone_called = True
return 2
- sched = ds_fakes.FakeDistributedScheduler()
+ sched = fakes.FakeDistributedScheduler()
self.stubs.Set(sched, '_schedule', _fake_schedule)
self.stubs.Set(sched, '_make_weighted_host_from_blob',
_fake_make_weighted_host_from_blob)
@@ -185,7 +171,7 @@ class DistributedSchedulerTestCase(test.TestCase):
self.was_admin = context.is_admin
return []
- sched = ds_fakes.FakeDistributedScheduler()
+ sched = fakes.FakeDistributedScheduler()
self.stubs.Set(sched, '_schedule', fake_schedule)
fake_context = context.RequestContext('user', 'project')
@@ -196,15 +182,16 @@ class DistributedSchedulerTestCase(test.TestCase):
def test_schedule_bad_topic(self):
"""Parameter checking."""
- sched = ds_fakes.FakeDistributedScheduler()
+ sched = fakes.FakeDistributedScheduler()
self.assertRaises(NotImplementedError, sched._schedule, None, "foo",
{})
def test_schedule_no_instance_type(self):
"""Parameter checking."""
- sched = ds_fakes.FakeDistributedScheduler()
+ sched = fakes.FakeDistributedScheduler()
+ request_spec = {'instance_properties': {}}
self.assertRaises(NotImplementedError, sched._schedule, None,
- "compute", {})
+ "compute", request_spec=request_spec)
def test_schedule_happy_day(self):
"""Make sure there's nothing glaringly wrong with _schedule()
@@ -218,26 +205,31 @@ class DistributedSchedulerTestCase(test.TestCase):
return least_cost.WeightedHost(self.next_weight, host=host,
hostinfo=hostinfo)
- sched = ds_fakes.FakeDistributedScheduler()
- fake_context = context.RequestContext('user', 'project')
- sched.zone_manager = ds_fakes.FakeZoneManager()
- self.stubs.Set(sched, '_filter_hosts', fake_filter_hosts)
+ sched = fakes.FakeDistributedScheduler()
+ fake_context = context.RequestContext('user', 'project',
+ is_admin=True)
+
+ self.stubs.Set(sched.host_manager, 'filter_hosts',
+ fake_filter_hosts)
self.stubs.Set(least_cost, 'weighted_sum', _fake_weighted_sum)
- self.stubs.Set(nova.db, 'zone_get_all', fake_zone_get_all)
+ self.stubs.Set(db, 'zone_get_all', fake_zone_get_all)
self.stubs.Set(sched, '_call_zone_method', fake_call_zone_method)
- instance_type = dict(memory_mb=512, local_gb=512)
- request_spec = dict(num_instances=10, instance_type=instance_type)
+ request_spec = {'num_instances': 10,
+ 'instance_type': {'memory_mb': 512, 'local_gb': 512},
+ 'instance_properties': {'project_id': 1}}
+ self.mox.ReplayAll()
weighted_hosts = sched._schedule(fake_context, 'compute',
- request_spec)
+ request_spec)
+ self.mox.VerifyAll()
self.assertEquals(len(weighted_hosts), 10)
for weighted_host in weighted_hosts:
# We set this up so remote hosts have even weights ...
if int(weighted_host.weight) % 2 == 0:
self.assertTrue(weighted_host.zone is not None)
- self.assertTrue(weighted_host.host is None)
+ self.assertTrue(weighted_host.host_state is None)
else:
- self.assertTrue(weighted_host.host is not None)
+ self.assertTrue(weighted_host.host_state is not None)
self.assertTrue(weighted_host.zone is None)
def test_schedule_local_zone(self):
@@ -248,33 +240,41 @@ class DistributedSchedulerTestCase(test.TestCase):
def _fake_weighted_sum(functions, hosts, options):
self.next_weight += 2.0
- host, hostinfo = hosts[0]
- return least_cost.WeightedHost(self.next_weight, host=host,
- hostinfo=hostinfo)
+ host = hosts[0]
+ return least_cost.WeightedHost(self.next_weight, host_state=host)
- sched = ds_fakes.FakeDistributedScheduler()
- fake_context = context.RequestContext('user', 'project')
- sched.zone_manager = ds_fakes.FakeZoneManager()
- self.stubs.Set(sched, '_filter_hosts', fake_filter_hosts)
+ sched = fakes.FakeDistributedScheduler()
+ fake_context = context.RequestContext('user', 'project',
+ is_admin=True)
+
+ fakes.mox_host_manager_db_calls(self.mox, fake_context)
+
+ self.stubs.Set(sched.host_manager, 'filter_hosts',
+ fake_filter_hosts)
self.stubs.Set(least_cost, 'weighted_sum', _fake_weighted_sum)
- self.stubs.Set(nova.db, 'zone_get_all', fake_zone_get_all)
+ self.stubs.Set(db, 'zone_get_all', fake_zone_get_all)
self.stubs.Set(sched, '_call_zone_method', fake_call_zone_method)
- instance_type = dict(memory_mb=512, local_gb=512)
- request_spec = dict(num_instances=10, instance_type=instance_type,
- local_zone=True)
+ request_spec = {'num_instances': 10,
+ 'instance_type': {'memory_mb': 512, 'local_gb': 512},
+ 'instance_properties': {'project_id': 1,
+ 'memory_mb': 512,
+ 'local_gb': 512},
+ 'local_zone': True}
+ self.mox.ReplayAll()
weighted_hosts = sched._schedule(fake_context, 'compute',
request_spec)
+ self.mox.VerifyAll()
self.assertEquals(len(weighted_hosts), 10)
for weighted_host in weighted_hosts:
# There should be no remote hosts
- self.assertTrue(weighted_host.host is not None)
+ self.assertTrue(weighted_host.host_state is not None)
self.assertTrue(weighted_host.zone is None)
def test_decrypt_blob(self):
"""Test that the decrypt method works."""
- fixture = ds_fakes.FakeDistributedScheduler()
+ fixture = fakes.FakeDistributedScheduler()
test_data = {'weight': 1, 'host': 'x', 'blob': 'y', 'zone': 'z'}
class StubDecryptor(object):
@@ -290,49 +290,42 @@ class DistributedSchedulerTestCase(test.TestCase):
blob='y', zone='z'))
def test_get_cost_functions(self):
- fixture = ds_fakes.FakeDistributedScheduler()
+ self.flags(reserved_host_memory_mb=128)
+ fixture = fakes.FakeDistributedScheduler()
fns = fixture.get_cost_functions()
self.assertEquals(len(fns), 1)
weight, fn = fns[0]
self.assertEquals(weight, 1.0)
- hostinfo = zone_manager.HostInfo('host', free_ram_mb=1000)
- self.assertEquals(1000, fn(hostinfo))
-
- def test_filter_hosts_avoid(self):
- """Test to make sure _filter_hosts() filters original hosts if
- avoid_original_host is True."""
-
- def _fake_choose_host_filters():
- return []
-
- sched = ds_fakes.FakeDistributedScheduler()
- fake_context = context.RequestContext('user', 'project')
- self.stubs.Set(sched, '_choose_host_filters',
- _fake_choose_host_filters)
-
- hosts = [('host1', '1info'), ('host2', '2info'), ('host3', '3info')]
- request_spec = dict(instance_properties=dict(host='host2'),
- avoid_original_host=True)
-
- filtered = sched._filter_hosts('compute', request_spec, hosts, {})
- self.assertEqual(filtered,
- [('host1', '1info'), ('host3', '3info')])
-
- def test_filter_hosts_no_avoid(self):
- """Test to make sure _filter_hosts() does not filter original
- hosts if avoid_original_host is False."""
-
- def _fake_choose_host_filters():
- return []
-
- sched = ds_fakes.FakeDistributedScheduler()
- fake_context = context.RequestContext('user', 'project')
- self.stubs.Set(sched, '_choose_host_filters',
- _fake_choose_host_filters)
-
- hosts = [('host1', '1info'), ('host2', '2info'), ('host3', '3info')]
- request_spec = dict(instance_properties=dict(host='host2'),
- avoid_original_host=False)
-
- filtered = sched._filter_hosts('compute', request_spec, hosts, {})
- self.assertEqual(filtered, hosts)
+ hostinfo = host_manager.HostState('host', 'compute')
+ hostinfo.update_from_compute_node(dict(memory_mb=1000,
+ local_gb=0))
+ self.assertEquals(1000 - 128, fn(hostinfo, {}))
+
+ def test_populate_filter_properties(self):
+ request_spec = {'instance_properties': {}}
+ fixture = fakes.FakeDistributedScheduler()
+ filter_properties = {'ignore_hosts': []}
+ fixture.populate_filter_properties(request_spec, filter_properties)
+ self.assertEqual(len(filter_properties['ignore_hosts']), 0)
+
+ # No original host results in not ignoring
+ request_spec = {'instance_properties': {},
+ 'avoid_original_host': True}
+ fixture = fakes.FakeDistributedScheduler()
+ fixture.populate_filter_properties(request_spec, filter_properties)
+ self.assertEqual(len(filter_properties['ignore_hosts']), 0)
+
+ # Original host but avoid is False should not ignore it
+ request_spec = {'instance_properties': {'host': 'foo'},
+ 'avoid_original_host': False}
+ fixture = fakes.FakeDistributedScheduler()
+ fixture.populate_filter_properties(request_spec, filter_properties)
+ self.assertEqual(len(filter_properties['ignore_hosts']), 0)
+
+ # Original host but avoid is True should ignore it
+ request_spec = {'instance_properties': {'host': 'foo'},
+ 'avoid_original_host': True}
+ fixture = fakes.FakeDistributedScheduler()
+ fixture.populate_filter_properties(request_spec, filter_properties)
+ self.assertEqual(len(filter_properties['ignore_hosts']), 1)
+ self.assertEqual(filter_properties['ignore_hosts'][0], 'foo')
diff --git a/nova/tests/scheduler/test_host_filter.py b/nova/tests/scheduler/test_host_filter.py
deleted file mode 100644
index 797ec3fe9..000000000
--- a/nova/tests/scheduler/test_host_filter.py
+++ /dev/null
@@ -1,252 +0,0 @@
-# Copyright 2011 OpenStack LLC.
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-"""
-Tests For Scheduler Host Filters.
-"""
-
-import json
-
-import nova
-from nova import exception
-from nova import test
-from nova.scheduler import distributed_scheduler as dist
-from nova.tests.scheduler import fake_zone_manager as ds_fakes
-
-
-class HostFilterTestCase(test.TestCase):
- """Test case for host filters."""
-
- def _host_caps(self, multiplier):
- # Returns host capabilities in the following way:
- # host1 = memory:free 10 (100max)
- # disk:available 100 (1000max)
- # hostN = memory:free 10 + 10N
- # disk:available 100 + 100N
- # in other words: hostN has more resources than host0
- # which means ... don't go above 10 hosts.
- return {'host_name-description': 'XenServer %s' % multiplier,
- 'host_hostname': 'xs-%s' % multiplier,
- 'host_memory_total': 100,
- 'host_memory_overhead': 10,
- 'host_memory_free': 10 + multiplier * 10,
- 'host_memory_free-computed': 10 + multiplier * 10,
- 'host_other-config': {},
- 'host_ip_address': '192.168.1.%d' % (100 + multiplier),
- 'host_cpu_info': {},
- 'disk_available': 100 + multiplier * 100,
- 'disk_total': 1000,
- 'disk_used': 0,
- 'host_uuid': 'xxx-%d' % multiplier,
- 'host_name-label': 'xs-%s' % multiplier,
- 'enabled': True}
-
- def setUp(self):
- super(HostFilterTestCase, self).setUp()
- default_host_filters = ['AllHostsFilter']
- self.flags(default_host_filters=default_host_filters,
- reserved_host_disk_mb=0, reserved_host_memory_mb=0)
- self.instance_type = dict(name='tiny',
- memory_mb=30,
- vcpus=10,
- local_gb=300,
- flavorid=1,
- swap=500,
- rxtx_quota=30000,
- rxtx_cap=200,
- extra_specs={})
- self.gpu_instance_type = dict(name='tiny.gpu',
- memory_mb=30,
- vcpus=10,
- local_gb=300,
- flavorid=2,
- swap=500,
- rxtx_quota=30000,
- rxtx_cap=200,
- extra_specs={'xpu_arch': 'fermi',
- 'xpu_info': 'Tesla 2050'})
-
- self.zone_manager = ds_fakes.FakeZoneManager()
- states = {}
- for x in xrange(4):
- states['host%d' % (x + 1)] = {'compute': self._host_caps(x)}
- self.zone_manager.service_states = states
-
- # Add some extra capabilities to some hosts
- host4 = self.zone_manager.service_states['host4']['compute']
- host4['xpu_arch'] = 'fermi'
- host4['xpu_info'] = 'Tesla 2050'
-
- host2 = self.zone_manager.service_states['host2']['compute']
- host2['xpu_arch'] = 'radeon'
-
- host3 = self.zone_manager.service_states['host3']['compute']
- host3['xpu_arch'] = 'fermi'
- host3['xpu_info'] = 'Tesla 2150'
-
- def _get_all_hosts(self):
- return self.zone_manager.get_all_host_data(None).items()
-
- def test_choose_filter(self):
- # Test default filter ...
- sched = dist.DistributedScheduler()
- hfs = sched._choose_host_filters()
- hf = hfs[0]
- self.assertEquals(hf._full_name().split(".")[-1], 'AllHostsFilter')
- # Test valid filter ...
- hfs = sched._choose_host_filters('InstanceTypeFilter')
- hf = hfs[0]
- self.assertEquals(hf._full_name().split(".")[-1], 'InstanceTypeFilter')
- # Test invalid filter ...
- try:
- sched._choose_host_filters('does not exist')
- self.fail("Should not find host filter.")
- except exception.SchedulerHostFilterNotFound:
- pass
-
- def test_all_host_filter(self):
- sched = dist.DistributedScheduler()
- hfs = sched._choose_host_filters('AllHostsFilter')
- hf = hfs[0]
- all_hosts = self._get_all_hosts()
- cooked = hf.instance_type_to_filter(self.instance_type)
- hosts = hf.filter_hosts(all_hosts, cooked, {})
- self.assertEquals(4, len(hosts))
- for host, capabilities in hosts:
- self.assertTrue(host.startswith('host'))
-
- def test_instance_type_filter(self):
- hf = nova.scheduler.filters.InstanceTypeFilter()
- # filter all hosts that can support 30 ram and 300 disk
- cooked = hf.instance_type_to_filter(self.instance_type)
- all_hosts = self._get_all_hosts()
- hosts = hf.filter_hosts(all_hosts, cooked, {})
- self.assertEquals(3, len(hosts))
- just_hosts = [host for host, hostinfo in hosts]
- just_hosts.sort()
- self.assertEquals('host4', just_hosts[2])
- self.assertEquals('host3', just_hosts[1])
- self.assertEquals('host2', just_hosts[0])
-
- def test_instance_type_filter_reserved_memory(self):
- self.flags(reserved_host_memory_mb=2048)
- hf = nova.scheduler.filters.InstanceTypeFilter()
- # filter all hosts that can support 30 ram and 300 disk after
- # reserving 2048 ram
- cooked = hf.instance_type_to_filter(self.instance_type)
- all_hosts = self._get_all_hosts()
- hosts = hf.filter_hosts(all_hosts, cooked, {})
- self.assertEquals(2, len(hosts))
- just_hosts = [host for host, hostinfo in hosts]
- just_hosts.sort()
- self.assertEquals('host4', just_hosts[1])
- self.assertEquals('host3', just_hosts[0])
-
- def test_instance_type_filter_extra_specs(self):
- hf = nova.scheduler.filters.InstanceTypeFilter()
- # filter all hosts that can support 30 ram and 300 disk
- cooked = hf.instance_type_to_filter(self.gpu_instance_type)
- all_hosts = self._get_all_hosts()
- hosts = hf.filter_hosts(all_hosts, cooked, {})
- self.assertEquals(1, len(hosts))
- just_hosts = [host for host, caps in hosts]
- self.assertEquals('host4', just_hosts[0])
-
- def test_json_filter(self):
- hf = nova.scheduler.filters.JsonFilter()
- # filter all hosts that can support 30 ram and 300 disk
- cooked = hf.instance_type_to_filter(self.instance_type)
- all_hosts = self._get_all_hosts()
- hosts = hf.filter_hosts(all_hosts, cooked, {})
- self.assertEquals(2, len(hosts))
- just_hosts = [host for host, caps in hosts]
- just_hosts.sort()
- self.assertEquals('host3', just_hosts[0])
- self.assertEquals('host4', just_hosts[1])
-
- # Try some custom queries
-
- raw = ['or',
- ['and',
- ['<', '$compute.host_memory_free', 30],
- ['<', '$compute.disk_available', 300],
- ],
- ['and',
- ['>', '$compute.host_memory_free', 30],
- ['>', '$compute.disk_available', 300],
- ]
- ]
- cooked = json.dumps(raw)
- hosts = hf.filter_hosts(all_hosts, cooked, {})
-
- self.assertEquals(3, len(hosts))
- just_hosts = [host for host, caps in hosts]
- just_hosts.sort()
- for index, host in zip([1, 2, 4], just_hosts):
- self.assertEquals('host%d' % index, host)
-
- raw = ['not',
- ['=', '$compute.host_memory_free', 30],
- ]
- cooked = json.dumps(raw)
- hosts = hf.filter_hosts(all_hosts, cooked, {})
-
- self.assertEquals(3, len(hosts))
- just_hosts = [host for host, caps in hosts]
- just_hosts.sort()
- for index, host in zip([1, 2, 4], just_hosts):
- self.assertEquals('host%d' % index, host)
-
- raw = ['in', '$compute.host_memory_free', 20, 40, 60, 80, 100]
- cooked = json.dumps(raw)
- hosts = hf.filter_hosts(all_hosts, cooked, {})
- self.assertEquals(2, len(hosts))
- just_hosts = [host for host, caps in hosts]
- just_hosts.sort()
- for index, host in zip([2, 4], just_hosts):
- self.assertEquals('host%d' % index, host)
-
- # Try some bogus input ...
- raw = ['unknown command', ]
- cooked = json.dumps(raw)
- try:
- hf.filter_hosts(all_hosts, cooked, {})
- self.fail("Should give KeyError")
- except KeyError, e:
- pass
-
- self.assertTrue(hf.filter_hosts(all_hosts, json.dumps([]), {}))
- self.assertTrue(hf.filter_hosts(all_hosts, json.dumps({}), {}))
- self.assertTrue(hf.filter_hosts(all_hosts, json.dumps(
- ['not', True, False, True, False],
- ), {}))
-
- try:
- hf.filter_hosts(all_hosts, json.dumps(
- 'not', True, False, True, False,), {})
- self.fail("Should give KeyError")
- except KeyError, e:
- pass
-
- self.assertFalse(hf.filter_hosts(all_hosts,
- json.dumps(['=', '$foo', 100]), {}))
- self.assertFalse(hf.filter_hosts(all_hosts,
- json.dumps(['=', '$.....', 100]), {}))
- self.assertFalse(hf.filter_hosts(all_hosts,
- json.dumps(
- ['>', ['and', ['or', ['not', ['<', ['>=', ['<=', ['in', ]]]]]]]]),
- {}))
-
- self.assertFalse(hf.filter_hosts(all_hosts,
- json.dumps(['=', {}, ['>', '$missing....foo']]), {}))
diff --git a/nova/tests/scheduler/test_host_filters.py b/nova/tests/scheduler/test_host_filters.py
new file mode 100644
index 000000000..40f869902
--- /dev/null
+++ b/nova/tests/scheduler/test_host_filters.py
@@ -0,0 +1,333 @@
+# Copyright 2011 OpenStack LLC.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+"""
+Tests For Scheduler Host Filters.
+"""
+
+import json
+
+from nova.scheduler import filters
+from nova import test
+from nova.tests.scheduler import fakes
+
+
+class HostFiltersTestCase(test.TestCase):
+ """Test case for host filters."""
+
+ def setUp(self):
+ super(HostFiltersTestCase, self).setUp()
+ self.json_query = json.dumps(
+ ['and', ['>=', '$free_ram_mb', 1024],
+ ['>=', '$free_disk_mb', 200 * 1024]])
+
+ def test_all_host_filter(self):
+ filt_cls = filters.AllHostsFilter()
+ host = fakes.FakeHostState('host1', 'compute', {})
+ self.assertTrue(filt_cls.host_passes(host, {}))
+
+ def test_compute_filter_passes(self):
+ filt_cls = filters.ComputeFilter()
+ filter_properties = {'instance_type': {'memory_mb': 1024}}
+ capabilities = {'enabled': True}
+ host = fakes.FakeHostState('host1', 'compute',
+ {'free_ram_mb': 1024, 'capabilities': capabilities})
+ self.assertTrue(filt_cls.host_passes(host, filter_properties))
+
+ def test_compute_filter_fails_on_memory(self):
+ filt_cls = filters.ComputeFilter()
+ filter_properties = {'instance_type': {'memory_mb': 1024}}
+ capabilities = {'enabled': True}
+ host = fakes.FakeHostState('host1', 'compute',
+ {'free_ram_mb': 1023, 'capabilities': capabilities})
+ self.assertFalse(filt_cls.host_passes(host, filter_properties))
+
+ def test_compute_filter_fails_on_disabled(self):
+ filt_cls = filters.ComputeFilter()
+ filter_properties = {'instance_type': {'memory_mb': 1024}}
+ capabilities = {'enabled': False}
+ host = fakes.FakeHostState('host1', 'compute',
+ {'free_ram_mb': 1024, 'capabilities': capabilities})
+ self.assertFalse(filt_cls.host_passes(host, filter_properties))
+
+ def test_compute_filter_passes_on_volume(self):
+ filt_cls = filters.ComputeFilter()
+ filter_properties = {'instance_type': {'memory_mb': 1024}}
+ capabilities = {'enabled': False}
+ host = fakes.FakeHostState('host1', 'volume',
+ {'free_ram_mb': 1024, 'capabilities': capabilities})
+ self.assertTrue(filt_cls.host_passes(host, filter_properties))
+
+ def test_compute_filter_passes_on_no_instance_type(self):
+ filt_cls = filters.ComputeFilter()
+ filter_properties = {}
+ capabilities = {'enabled': False}
+ host = fakes.FakeHostState('host1', 'compute',
+ {'free_ram_mb': 1024, 'capabilities': capabilities})
+ self.assertTrue(filt_cls.host_passes(host, filter_properties))
+
+ def test_compute_filter_passes_extra_specs(self):
+ filt_cls = filters.ComputeFilter()
+ extra_specs = {'opt1': 1, 'opt2': 2}
+ capabilities = {'enabled': True, 'opt1': 1, 'opt2': 2}
+ filter_properties = {'instance_type': {'memory_mb': 1024,
+ 'extra_specs': extra_specs}}
+ host = fakes.FakeHostState('host1', 'compute',
+ {'free_ram_mb': 1024, 'capabilities': capabilities})
+ self.assertTrue(filt_cls.host_passes(host, filter_properties))
+
+ def test_compute_filter_fails_extra_specs(self):
+ filt_cls = filters.ComputeFilter()
+ extra_specs = {'opt1': 1, 'opt2': 3}
+ capabilities = {'enabled': True, 'opt1': 1, 'opt2': 2}
+ filter_properties = {'instance_type': {'memory_mb': 1024,
+ 'extra_specs': extra_specs}}
+ host = fakes.FakeHostState('host1', 'compute',
+ {'free_ram_mb': 1024, 'capabilities': capabilities})
+ self.assertFalse(filt_cls.host_passes(host, filter_properties))
+
+ def test_json_filter_passes(self):
+ filt_cls = filters.JsonFilter()
+ filter_properties = {'instance_type': {'memory_mb': 1024,
+ 'local_gb': 200},
+ 'query': self.json_query}
+ capabilities = {'enabled': True}
+ host = fakes.FakeHostState('host1', 'compute',
+ {'free_ram_mb': 1024,
+ 'free_disk_mb': 200 * 1024,
+ 'capabilities': capabilities})
+ self.assertTrue(filt_cls.host_passes(host, filter_properties))
+
+ def test_json_filter_passes_with_no_query(self):
+ filt_cls = filters.JsonFilter()
+ filter_properties = {'instance_type': {'memory_mb': 1024,
+ 'local_gb': 200}}
+ capabilities = {'enabled': True}
+ host = fakes.FakeHostState('host1', 'compute',
+ {'free_ram_mb': 0,
+ 'free_disk_mb': 0,
+ 'capabilities': capabilities})
+ self.assertTrue(filt_cls.host_passes(host, filter_properties))
+
+ def test_json_filter_fails_on_memory(self):
+ filt_cls = filters.JsonFilter()
+ filter_properties = {'instance_type': {'memory_mb': 1024,
+ 'local_gb': 200},
+ 'query': self.json_query}
+ capabilities = {'enabled': True}
+ host = fakes.FakeHostState('host1', 'compute',
+ {'free_ram_mb': 1023,
+ 'free_disk_mb': 200 * 1024,
+ 'capabilities': capabilities})
+ self.assertFalse(filt_cls.host_passes(host, filter_properties))
+
+ def test_json_filter_fails_on_disk(self):
+ filt_cls = filters.JsonFilter()
+ filter_properties = {'instance_type': {'memory_mb': 1024,
+ 'local_gb': 200},
+ 'query': self.json_query}
+ capabilities = {'enabled': True}
+ host = fakes.FakeHostState('host1', 'compute',
+ {'free_ram_mb': 1024,
+ 'free_disk_mb': (200 * 1024) - 1,
+ 'capabilities': capabilities})
+ self.assertFalse(filt_cls.host_passes(host, filter_properties))
+
+ def test_json_filter_fails_on_disk(self):
+ filt_cls = filters.JsonFilter()
+ filter_properties = {'instance_type': {'memory_mb': 1024,
+ 'local_gb': 200},
+ 'query': self.json_query}
+ capabilities = {'enabled': True}
+ host = fakes.FakeHostState('host1', 'compute',
+ {'free_ram_mb': 1024,
+ 'free_disk_mb': (200 * 1024) - 1,
+ 'capabilities': capabilities})
+ self.assertFalse(filt_cls.host_passes(host, filter_properties))
+
+ def test_json_filter_fails_on_disabled(self):
+ filt_cls = filters.JsonFilter()
+ filter_properties = {'instance_type': {'memory_mb': 1024,
+ 'local_gb': 200},
+ 'query': self.json_query}
+ capabilities = {'enabled': False}
+ host = fakes.FakeHostState('host1', 'compute',
+ {'free_ram_mb': 1024,
+ 'free_disk_mb': 200 * 1024,
+ 'capabilities': capabilities})
+ self.assertFalse(filt_cls.host_passes(host, filter_properties))
+
+ def test_json_filter_happy_day(self):
+ """Test json filter more thoroughly"""
+ filt_cls = filters.JsonFilter()
+ raw = ['and',
+ ['=', '$capabilities.opt1', 'match'],
+ ['or',
+ ['and',
+ ['<', '$free_ram_mb', 30],
+ ['<', '$free_disk_mb', 300]],
+ ['and',
+ ['>', '$free_ram_mb', 30],
+ ['>', '$free_disk_mb', 300]]]]
+ filter_properties = {'query': json.dumps(raw)}
+
+ # Passes
+ capabilities = {'enabled': True, 'opt1': 'match'}
+ host = fakes.FakeHostState('host1', 'compute',
+ {'free_ram_mb': 10,
+ 'free_disk_mb': 200,
+ 'capabilities': capabilities})
+ self.assertTrue(filt_cls.host_passes(host, filter_properties))
+
+ # Passes
+ capabilities = {'enabled': True, 'opt1': 'match'}
+ host = fakes.FakeHostState('host1', 'compute',
+ {'free_ram_mb': 40,
+ 'free_disk_mb': 400,
+ 'capabilities': capabilities})
+ self.assertTrue(filt_cls.host_passes(host, filter_properties))
+
+ # Failes due to disabled
+ capabilities = {'enabled': False, 'opt1': 'match'}
+ host = fakes.FakeHostState('host1', 'instance_type',
+ {'free_ram_mb': 40,
+ 'free_disk_mb': 400,
+ 'capabilities': capabilities})
+ self.assertFalse(filt_cls.host_passes(host, filter_properties))
+
+ # Fails due to being exact memory/disk we don't want
+ capabilities = {'enabled': True, 'opt1': 'match'}
+ host = fakes.FakeHostState('host1', 'compute',
+ {'free_ram_mb': 30,
+ 'free_disk_mb': 300,
+ 'capabilities': capabilities})
+ self.assertFalse(filt_cls.host_passes(host, filter_properties))
+
+ # Fails due to memory lower but disk higher
+ capabilities = {'enabled': True, 'opt1': 'match'}
+ host = fakes.FakeHostState('host1', 'compute',
+ {'free_ram_mb': 20,
+ 'free_disk_mb': 400,
+ 'capabilities': capabilities})
+ self.assertFalse(filt_cls.host_passes(host, filter_properties))
+
+ # Fails due to capabilities 'opt1' not equal
+ capabilities = {'enabled': True, 'opt1': 'no-match'}
+ host = fakes.FakeHostState('host1', 'compute',
+ {'free_ram_mb': 20,
+ 'free_disk_mb': 400,
+ 'capabilities': capabilities})
+ self.assertFalse(filt_cls.host_passes(host, filter_properties))
+
+ def test_json_filter_basic_operators(self):
+ filt_cls = filters.JsonFilter()
+ host = fakes.FakeHostState('host1', 'compute',
+ {'capabilities': {'enabled': True}})
+ # (operator, arguments, expected_result)
+ ops_to_test = [
+ ['=', [1, 1], True],
+ ['=', [1, 2], False],
+ ['<', [1, 2], True],
+ ['<', [1, 1], False],
+ ['<', [2, 1], False],
+ ['>', [2, 1], True],
+ ['>', [2, 2], False],
+ ['>', [2, 3], False],
+ ['<=', [1, 2], True],
+ ['<=', [1, 1], True],
+ ['<=', [2, 1], False],
+ ['>=', [2, 1], True],
+ ['>=', [2, 2], True],
+ ['>=', [2, 3], False],
+ ['in', [1, 1], True],
+ ['in', [1, 1, 2, 3], True],
+ ['in', [4, 1, 2, 3], False],
+ ['not', [True], False],
+ ['not', [False], True],
+ ['or', [True, False], True],
+ ['or', [False, False], False],
+ ['and', [True, True], True],
+ ['and', [False, False], False],
+ ['and', [True, False], False],
+ # Nested ((True or False) and (2 > 1)) == Passes
+ ['and', [['or', True, False], ['>', 2, 1]], True]]
+
+ for (op, args, expected) in ops_to_test:
+ raw = [op] + args
+ filter_properties = {'query': json.dumps(raw)}
+ self.assertEqual(expected,
+ filt_cls.host_passes(host, filter_properties))
+
+ # This results in [False, True, False, True] and if any are True
+ # then it passes...
+ raw = ['not', True, False, True, False]
+ filter_properties = {'query': json.dumps(raw)}
+ self.assertTrue(filt_cls.host_passes(host, filter_properties))
+
+ # This results in [False, False, False] and if any are True
+ # then it passes...which this doesn't
+ raw = ['not', True, True, True]
+ filter_properties = {'query': json.dumps(raw)}
+ self.assertFalse(filt_cls.host_passes(host, filter_properties))
+
+ def test_json_filter_unknown_operator_raises(self):
+ filt_cls = filters.JsonFilter()
+ raw = ['!=', 1, 2]
+ filter_properties = {'query': json.dumps(raw)}
+ capabilities = {'enabled': True, 'opt1': 'no-match'}
+ host = fakes.FakeHostState('host1', 'compute',
+ {'capabilities': {'enabled': True}})
+ self.assertRaises(KeyError,
+ filt_cls.host_passes, host, filter_properties)
+
+ def test_json_filter_empty_filters_pass(self):
+ filt_cls = filters.JsonFilter()
+ capabilities = {'enabled': True, 'opt1': 'no-match'}
+ host = fakes.FakeHostState('host1', 'compute',
+ {'capabilities': {'enabled': True}})
+
+ raw = []
+ filter_properties = {'query': json.dumps(raw)}
+ self.assertTrue(filt_cls.host_passes(host, filter_properties))
+ raw = {}
+ filter_properties = {'query': json.dumps(raw)}
+ self.assertTrue(filt_cls.host_passes(host, filter_properties))
+
+ def test_json_filter_invalid_num_arguments_fails(self):
+ filt_cls = filters.JsonFilter()
+ capabilities = {'enabled': True, 'opt1': 'no-match'}
+ host = fakes.FakeHostState('host1', 'compute',
+ {'capabilities': {'enabled': True}})
+
+ raw = ['>', ['and', ['or', ['not', ['<', ['>=', ['<=', ['in', ]]]]]]]]
+ filter_properties = {'query': json.dumps(raw)}
+ self.assertFalse(filt_cls.host_passes(host, filter_properties))
+
+ raw = ['>', 1]
+ filter_properties = {'query': json.dumps(raw)}
+ self.assertFalse(filt_cls.host_passes(host, filter_properties))
+
+ def test_json_filter_unknown_variable_ignored(self):
+ filt_cls = filters.JsonFilter()
+ capabilities = {'enabled': True, 'opt1': 'no-match'}
+ host = fakes.FakeHostState('host1', 'compute',
+ {'capabilities': {'enabled': True}})
+
+ raw = ['=', '$........', 1, 1]
+ filter_properties = {'query': json.dumps(raw)}
+ self.assertTrue(filt_cls.host_passes(host, filter_properties))
+
+ raw = ['=', '$foo', 2, 2]
+ filter_properties = {'query': json.dumps(raw)}
+ self.assertTrue(filt_cls.host_passes(host, filter_properties))
diff --git a/nova/tests/scheduler/test_host_manager.py b/nova/tests/scheduler/test_host_manager.py
new file mode 100644
index 000000000..ed0fb3d63
--- /dev/null
+++ b/nova/tests/scheduler/test_host_manager.py
@@ -0,0 +1,360 @@
+# Copyright (c) 2011 Openstack, LLC
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+"""
+Tests For HostManager
+"""
+
+import datetime
+
+import mox
+
+from nova import db
+from nova import exception
+from nova import log as logging
+from nova.scheduler import host_manager
+from nova import test
+from nova.tests.scheduler import fakes
+from nova import utils
+
+
+class ComputeFilterClass1(object):
+ def host_passes(self, *args, **kwargs):
+ pass
+
+
+class ComputeFilterClass2(object):
+ def host_passes(self, *args, **kwargs):
+ pass
+
+
+class HostManagerTestCase(test.TestCase):
+ """Test case for HostManager class"""
+
+ def setUp(self):
+ super(HostManagerTestCase, self).setUp()
+ self.host_manager = host_manager.HostManager()
+
+ def test_choose_host_filters_not_found(self):
+ self.flags(default_host_filters='ComputeFilterClass3')
+ self.host_manager.filter_classes = [ComputeFilterClass1,
+ ComputeFilterClass2]
+ self.assertRaises(exception.SchedulerHostFilterNotFound,
+ self.host_manager._choose_host_filters, None)
+
+ def test_choose_host_filters(self):
+ self.flags(default_host_filters=['ComputeFilterClass2'])
+ self.host_manager.filter_classes = [ComputeFilterClass1,
+ ComputeFilterClass2]
+
+ # Test 'compute' returns 1 correct function
+ filter_fns = self.host_manager._choose_host_filters(None)
+ self.assertEqual(len(filter_fns), 1)
+ self.assertEqual(filter_fns[0].__func__,
+ ComputeFilterClass2.host_passes.__func__)
+
+ def test_filter_hosts(self):
+ topic = 'fake_topic'
+
+ filters = ['fake-filter1', 'fake-filter2']
+ fake_host1 = host_manager.HostState('host1', topic)
+ fake_host2 = host_manager.HostState('host2', topic)
+ hosts = [fake_host1, fake_host2]
+ filter_properties = 'fake_properties'
+
+ self.mox.StubOutWithMock(self.host_manager,
+ '_choose_host_filters')
+ self.mox.StubOutWithMock(fake_host1, 'passes_filters')
+ self.mox.StubOutWithMock(fake_host2, 'passes_filters')
+
+ self.host_manager._choose_host_filters(None).AndReturn(filters)
+ fake_host1.passes_filters(filters, filter_properties).AndReturn(
+ False)
+ fake_host2.passes_filters(filters, filter_properties).AndReturn(
+ True)
+
+ self.mox.ReplayAll()
+ filtered_hosts = self.host_manager.filter_hosts(hosts,
+ filter_properties, filters=None)
+ self.mox.VerifyAll()
+ self.assertEqual(len(filtered_hosts), 1)
+ self.assertEqual(filtered_hosts[0], fake_host2)
+
+ def test_update_service_capabilities(self):
+ service_states = self.host_manager.service_states
+ self.assertDictMatch(service_states, {})
+ self.mox.StubOutWithMock(utils, 'utcnow')
+ utils.utcnow().AndReturn(31337)
+ utils.utcnow().AndReturn(31338)
+ utils.utcnow().AndReturn(31339)
+
+ host1_compute_capabs = dict(free_memory=1234, host_memory=5678,
+ timestamp=1)
+ host1_volume_capabs = dict(free_disk=4321, timestamp=1)
+ host2_compute_capabs = dict(free_memory=8756, timestamp=1)
+
+ self.mox.ReplayAll()
+ self.host_manager.update_service_capabilities('compute', 'host1',
+ host1_compute_capabs)
+ self.host_manager.update_service_capabilities('volume', 'host1',
+ host1_volume_capabs)
+ self.host_manager.update_service_capabilities('compute', 'host2',
+ host2_compute_capabs)
+ self.mox.VerifyAll()
+
+ # Make sure dictionary isn't re-assigned
+ self.assertEqual(self.host_manager.service_states, service_states)
+ # Make sure original dictionary wasn't copied
+ self.assertEqual(host1_compute_capabs['timestamp'], 1)
+
+ host1_compute_capabs['timestamp'] = 31337
+ host1_volume_capabs['timestamp'] = 31338
+ host2_compute_capabs['timestamp'] = 31339
+
+ expected = {'host1': {'compute': host1_compute_capabs,
+ 'volume': host1_volume_capabs},
+ 'host2': {'compute': host2_compute_capabs}}
+ self.assertDictMatch(service_states, expected)
+
+ def test_host_service_caps_stale(self):
+ self.flags(periodic_interval=5)
+
+ host1_compute_capabs = dict(free_memory=1234, host_memory=5678,
+ timestamp=datetime.datetime.fromtimestamp(3000))
+ host1_volume_capabs = dict(free_disk=4321,
+ timestamp=datetime.datetime.fromtimestamp(3005))
+ host2_compute_capabs = dict(free_memory=8756,
+ timestamp=datetime.datetime.fromtimestamp(3010))
+
+ service_states = {'host1': {'compute': host1_compute_capabs,
+ 'volume': host1_volume_capabs},
+ 'host2': {'compute': host2_compute_capabs}}
+
+ self.host_manager.service_states = service_states
+
+ self.mox.StubOutWithMock(utils, 'utcnow')
+ utils.utcnow().AndReturn(datetime.datetime.fromtimestamp(3020))
+ utils.utcnow().AndReturn(datetime.datetime.fromtimestamp(3020))
+ utils.utcnow().AndReturn(datetime.datetime.fromtimestamp(3020))
+
+ self.mox.ReplayAll()
+ res1 = self.host_manager.host_service_caps_stale('host1', 'compute')
+ res2 = self.host_manager.host_service_caps_stale('host1', 'volume')
+ res3 = self.host_manager.host_service_caps_stale('host2', 'compute')
+ self.mox.VerifyAll()
+
+ self.assertEqual(res1, True)
+ self.assertEqual(res2, False)
+ self.assertEqual(res3, False)
+
+ def test_delete_expired_host_services(self):
+ host1_compute_capabs = dict(free_memory=1234, host_memory=5678,
+ timestamp=datetime.datetime.fromtimestamp(3000))
+ host1_volume_capabs = dict(free_disk=4321,
+ timestamp=datetime.datetime.fromtimestamp(3005))
+ host2_compute_capabs = dict(free_memory=8756,
+ timestamp=datetime.datetime.fromtimestamp(3010))
+
+ service_states = {'host1': {'compute': host1_compute_capabs,
+ 'volume': host1_volume_capabs},
+ 'host2': {'compute': host2_compute_capabs}}
+ self.host_manager.service_states = service_states
+
+ to_delete = {'host1': {'volume': host1_volume_capabs},
+ 'host2': {'compute': host2_compute_capabs}}
+
+ self.host_manager.delete_expired_host_services(to_delete)
+ # Make sure dictionary isn't re-assigned
+ self.assertEqual(self.host_manager.service_states, service_states)
+
+ expected = {'host1': {'compute': host1_compute_capabs}}
+ self.assertEqual(service_states, expected)
+
+ def test_get_service_capabilities(self):
+ host1_compute_capabs = dict(free_memory=1000, host_memory=5678,
+ timestamp=datetime.datetime.fromtimestamp(3000))
+ host1_volume_capabs = dict(free_disk=4321,
+ timestamp=datetime.datetime.fromtimestamp(3005))
+ host2_compute_capabs = dict(free_memory=8756,
+ timestamp=datetime.datetime.fromtimestamp(3010))
+ host2_volume_capabs = dict(free_disk=8756,
+ enabled=False,
+ timestamp=datetime.datetime.fromtimestamp(3010))
+ host3_compute_capabs = dict(free_memory=1234, host_memory=4000,
+ timestamp=datetime.datetime.fromtimestamp(3010))
+ host3_volume_capabs = dict(free_disk=2000,
+ timestamp=datetime.datetime.fromtimestamp(3010))
+
+ service_states = {'host1': {'compute': host1_compute_capabs,
+ 'volume': host1_volume_capabs},
+ 'host2': {'compute': host2_compute_capabs,
+ 'volume': host2_volume_capabs},
+ 'host3': {'compute': host3_compute_capabs,
+ 'volume': host3_volume_capabs}}
+ self.host_manager.service_states = service_states
+
+ info = {'called': 0}
+
+ # This tests with 1 volume disabled (host2), and 1 volume node
+ # as stale (host1)
+ def _fake_host_service_caps_stale(host, service):
+ info['called'] += 1
+ if host == 'host1':
+ if service == 'compute':
+ return False
+ elif service == 'volume':
+ return True
+ elif host == 'host2':
+ # Shouldn't get here for 'volume' because the service
+ # is disabled
+ self.assertEqual(service, 'compute')
+ return False
+ self.assertEqual(host, 'host3')
+ return False
+
+ self.stubs.Set(self.host_manager, 'host_service_caps_stale',
+ _fake_host_service_caps_stale)
+
+ self.mox.StubOutWithMock(self.host_manager,
+ 'delete_expired_host_services')
+ self.host_manager.delete_expired_host_services({'host1': ['volume']})
+
+ self.mox.ReplayAll()
+ result = self.host_manager.get_service_capabilities()
+ self.mox.VerifyAll()
+
+ self.assertEqual(info['called'], 5)
+
+ # only 1 volume node active == 'host3', so min/max is 2000
+ expected = {'volume_free_disk': (2000, 2000),
+ 'compute_host_memory': (4000, 5678),
+ 'compute_free_memory': (1000, 8756)}
+
+ self.assertDictMatch(result, expected)
+
+ def test_get_all_host_states(self):
+ self.flags(reserved_host_memory_mb=512,
+ reserved_host_disk_mb=1024)
+
+ context = 'fake_context'
+ topic = 'compute'
+
+ self.mox.StubOutWithMock(db, 'compute_node_get_all')
+ self.mox.StubOutWithMock(logging, 'warn')
+ self.mox.StubOutWithMock(db, 'instance_get_all')
+
+ db.compute_node_get_all(context).AndReturn(fakes.COMPUTE_NODES)
+ # Invalid service
+ logging.warn("No service for compute ID 5")
+ db.instance_get_all(context).AndReturn(fakes.INSTANCES)
+
+ self.mox.ReplayAll()
+ host_states = self.host_manager.get_all_host_states(context, topic)
+ self.mox.VerifyAll()
+
+ self.assertEqual(len(host_states), 4)
+ self.assertEqual(host_states['host1'].free_ram_mb, 0)
+ # 511GB
+ self.assertEqual(host_states['host1'].free_disk_mb, 523264)
+ self.assertEqual(host_states['host2'].free_ram_mb, 512)
+ # 1023GB
+ self.assertEqual(host_states['host2'].free_disk_mb, 1047552)
+ self.assertEqual(host_states['host3'].free_ram_mb, 2560)
+ # 3071GB
+ self.assertEqual(host_states['host3'].free_disk_mb, 3144704)
+ self.assertEqual(host_states['host4'].free_ram_mb, 7680)
+ # 8191GB
+ self.assertEqual(host_states['host4'].free_disk_mb, 8387584)
+
+
+class HostStateTestCase(test.TestCase):
+ """Test case for HostState class"""
+
+ def setUp(self):
+ super(HostStateTestCase, self).setUp()
+
+ # update_from_compute_node() and consume_from_instance() are tested
+ # in HostManagerTestCase.test_get_all_host_states()
+
+ def test_host_state_passes_filters_passes(self):
+ fake_host = host_manager.HostState('host1', 'compute')
+ filter_properties = {}
+
+ cls1 = ComputeFilterClass1()
+ cls2 = ComputeFilterClass2()
+ self.mox.StubOutWithMock(cls1, 'host_passes')
+ self.mox.StubOutWithMock(cls2, 'host_passes')
+ filter_fns = [cls1.host_passes, cls2.host_passes]
+
+ cls1.host_passes(fake_host, filter_properties).AndReturn(True)
+ cls2.host_passes(fake_host, filter_properties).AndReturn(True)
+
+ self.mox.ReplayAll()
+ result = fake_host.passes_filters(filter_fns, filter_properties)
+ self.mox.VerifyAll()
+ self.assertTrue(result)
+
+ def test_host_state_passes_filters_passes_with_ignore(self):
+ fake_host = host_manager.HostState('host1', 'compute')
+ filter_properties = {'ignore_hosts': ['host2']}
+
+ cls1 = ComputeFilterClass1()
+ cls2 = ComputeFilterClass2()
+ self.mox.StubOutWithMock(cls1, 'host_passes')
+ self.mox.StubOutWithMock(cls2, 'host_passes')
+ filter_fns = [cls1.host_passes, cls2.host_passes]
+
+ cls1.host_passes(fake_host, filter_properties).AndReturn(True)
+ cls2.host_passes(fake_host, filter_properties).AndReturn(True)
+
+ self.mox.ReplayAll()
+ result = fake_host.passes_filters(filter_fns, filter_properties)
+ self.mox.VerifyAll()
+ self.assertTrue(result)
+
+ def test_host_state_passes_filters_fails(self):
+ fake_host = host_manager.HostState('host1', 'compute')
+ filter_properties = {}
+
+ cls1 = ComputeFilterClass1()
+ cls2 = ComputeFilterClass2()
+ self.mox.StubOutWithMock(cls1, 'host_passes')
+ self.mox.StubOutWithMock(cls2, 'host_passes')
+ filter_fns = [cls1.host_passes, cls2.host_passes]
+
+ cls1.host_passes(fake_host, filter_properties).AndReturn(False)
+ # cls2.host_passes() not called because of short circuit
+
+ self.mox.ReplayAll()
+ result = fake_host.passes_filters(filter_fns, filter_properties)
+ self.mox.VerifyAll()
+ self.assertFalse(result)
+
+ def test_host_state_passes_filters_fails_from_ignore(self):
+ fake_host = host_manager.HostState('host1', 'compute')
+ filter_properties = {'ignore_hosts': ['host1']}
+
+ cls1 = ComputeFilterClass1()
+ cls2 = ComputeFilterClass2()
+ self.mox.StubOutWithMock(cls1, 'host_passes')
+ self.mox.StubOutWithMock(cls2, 'host_passes')
+ filter_fns = [cls1.host_passes, cls2.host_passes]
+
+ # cls[12].host_passes() not called because of short circuit
+ # with matching host to ignore
+
+ self.mox.ReplayAll()
+ result = fake_host.passes_filters(filter_fns, filter_properties)
+ self.mox.VerifyAll()
+ self.assertFalse(result)
diff --git a/nova/tests/scheduler/test_least_cost.py b/nova/tests/scheduler/test_least_cost.py
index 65a4268d3..6b72b026d 100644
--- a/nova/tests/scheduler/test_least_cost.py
+++ b/nova/tests/scheduler/test_least_cost.py
@@ -15,9 +15,10 @@
"""
Tests For Least Cost functions.
"""
+from nova import context
from nova.scheduler import least_cost
from nova import test
-from nova.tests.scheduler import fake_zone_manager
+from nova.tests.scheduler import fakes
def offset(hostinfo, options):
@@ -32,38 +33,47 @@ class LeastCostTestCase(test.TestCase):
def setUp(self):
super(LeastCostTestCase, self).setUp()
self.flags(reserved_host_disk_mb=0, reserved_host_memory_mb=0)
-
- self.zone_manager = fake_zone_manager.FakeZoneManager()
+ self.host_manager = fakes.FakeHostManager()
def tearDown(self):
super(LeastCostTestCase, self).tearDown()
+ def _get_all_hosts(self):
+ ctxt = context.get_admin_context()
+ fakes.mox_host_manager_db_calls(self.mox, ctxt)
+ self.mox.ReplayAll()
+ host_states = self.host_manager.get_all_host_states(ctxt,
+ 'compute').values()
+ self.mox.VerifyAll()
+ self.mox.ResetAll()
+ return host_states
+
def test_weighted_sum_happy_day(self):
fn_tuples = [(1.0, offset), (1.0, scale)]
- hostinfo_list = self.zone_manager.get_all_host_data(None).items()
+ hostinfo_list = self._get_all_hosts()
- # host1: free_ram_mb=0
- # host2: free_ram_mb=1536
+ # host1: free_ram_mb=512
+ # host2: free_ram_mb=1024
# host3: free_ram_mb=3072
# host4: free_ram_mb=8192
# [offset, scale]=
- # [10000, 11536, 13072, 18192]
- # [0, 768, 1536, 4096]
+ # [10512, 11024, 13072, 18192]
+ # [1024, 2048, 6144, 16384]
# adjusted [ 1.0 * x + 1.0 * y] =
- # [10000, 12304, 14608, 22288]
+ # [11536, 13072, 19216, 34576]
# so, host1 should win:
options = {}
weighted_host = least_cost.weighted_sum(fn_tuples, hostinfo_list,
- options)
- self.assertEqual(weighted_host.weight, 10000)
- self.assertEqual(weighted_host.host, 'host1')
+ options)
+ self.assertEqual(weighted_host.weight, 11536)
+ self.assertEqual(weighted_host.host_state.host, 'host1')
def test_weighted_sum_single_function(self):
fn_tuples = [(1.0, offset), ]
- hostinfo_list = self.zone_manager.get_all_host_data(None).items()
+ hostinfo_list = self._get_all_hosts()
# host1: free_ram_mb=0
# host2: free_ram_mb=1536
@@ -71,11 +81,11 @@ class LeastCostTestCase(test.TestCase):
# host4: free_ram_mb=8192
# [offset, ]=
- # [10000, 11536, 13072, 18192]
+ # [10512, 11024, 13072, 18192]
# so, host1 should win:
options = {}
weighted_host = least_cost.weighted_sum(fn_tuples, hostinfo_list,
options)
- self.assertEqual(weighted_host.weight, 10000)
- self.assertEqual(weighted_host.host, 'host1')
+ self.assertEqual(weighted_host.weight, 10512)
+ self.assertEqual(weighted_host.host_state.host, 'host1')
diff --git a/nova/tests/scheduler/test_zone_manager.py b/nova/tests/scheduler/test_zone_manager.py
new file mode 100644
index 000000000..364384c1c
--- /dev/null
+++ b/nova/tests/scheduler/test_zone_manager.py
@@ -0,0 +1,189 @@
+# Copyright 2010 United States Government as represented by the
+# All Rights Reserved.
+# Copyright 2011 OpenStack LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+"""
+Tests For ZoneManager
+"""
+
+import mox
+
+from nova import db
+from nova import flags
+from nova.scheduler import zone_manager
+from nova import test
+
+FLAGS = flags.FLAGS
+
+
+def _create_zone(zone_id=1, name=None, api_url=None, username=None):
+ if api_url is None:
+ api_url = "http://foo.com"
+ if username is None:
+ username = "user1"
+ if name is None:
+ name = "child1"
+ return dict(id=zone_id, name=name, api_url=api_url,
+ username=username, password="pass1", weight_offset=0.0,
+ weight_scale=1.0)
+
+
+def exploding_novaclient(zone):
+ """Used when we want to simulate a novaclient call failing."""
+ raise Exception("kaboom")
+
+
+class ZoneManagerTestCase(test.TestCase):
+ """Test case for zone manager"""
+
+ zone_manager_cls = zone_manager.ZoneManager
+ zone_state_cls = zone_manager.ZoneState
+
+ def setUp(self):
+ super(ZoneManagerTestCase, self).setUp()
+ self.zone_manager = self.zone_manager_cls()
+
+ def _create_zone_state(self, zone_id=1, name=None, api_url=None,
+ username=None):
+ zone = self.zone_state_cls()
+ zone.zone_info = _create_zone(zone_id, name, api_url, username)
+ return zone
+
+ def test_update(self):
+ zm = self.zone_manager
+ self.mox.StubOutWithMock(zm, '_refresh_from_db')
+ self.mox.StubOutWithMock(zm, '_poll_zones')
+ zm._refresh_from_db(mox.IgnoreArg())
+ zm._poll_zones()
+
+ self.mox.ReplayAll()
+ zm.update(None)
+ self.mox.VerifyAll()
+
+ def test_refresh_from_db_new(self):
+ zone = _create_zone(zone_id=1, username='user1')
+ self.mox.StubOutWithMock(db, 'zone_get_all')
+ db.zone_get_all(mox.IgnoreArg()).AndReturn([zone])
+
+ zm = self.zone_manager
+ self.assertEquals(len(zm.zone_states), 0)
+
+ self.mox.ReplayAll()
+ zm._refresh_from_db(None)
+ self.mox.VerifyAll()
+
+ self.assertEquals(len(zm.zone_states), 1)
+ self.assertIn(1, zm.zone_states)
+ self.assertEquals(zm.zone_states[1].zone_info['username'], 'user1')
+
+ def test_refresh_from_db_replace_existing(self):
+ zone_state = self._create_zone_state(zone_id=1, username='user1')
+ zm = self.zone_manager
+ zm.zone_states[1] = zone_state
+
+ zone = _create_zone(zone_id=1, username='user2')
+ self.mox.StubOutWithMock(db, 'zone_get_all')
+ db.zone_get_all(mox.IgnoreArg()).AndReturn([zone])
+ self.assertEquals(len(zm.zone_states), 1)
+
+ self.mox.ReplayAll()
+ zm._refresh_from_db(None)
+ self.mox.VerifyAll()
+
+ self.assertEquals(len(zm.zone_states), 1)
+ self.assertEquals(zm.zone_states[1].zone_info['username'], 'user2')
+
+ def test_refresh_from_db_missing(self):
+ zone_state = self._create_zone_state(zone_id=1, username='user1')
+ zm = self.zone_manager
+ zm.zone_states[1] = zone_state
+
+ self.mox.StubOutWithMock(db, 'zone_get_all')
+ db.zone_get_all(mox.IgnoreArg()).AndReturn([])
+
+ self.assertEquals(len(zm.zone_states), 1)
+
+ self.mox.ReplayAll()
+ zm._refresh_from_db(None)
+ self.mox.VerifyAll()
+
+ self.assertEquals(len(zm.zone_states), 0)
+
+ def test_refresh_from_db_add(self):
+ zone_state = self._create_zone_state(zone_id=1, username='user1')
+ zm = self.zone_manager
+ zm.zone_states[1] = zone_state
+
+ zone1 = _create_zone(zone_id=1, username='user1')
+ zone2 = _create_zone(zone_id=2, username='user2')
+ self.mox.StubOutWithMock(db, 'zone_get_all')
+ db.zone_get_all(mox.IgnoreArg()).AndReturn([zone1, zone2])
+
+ self.mox.ReplayAll()
+ zm._refresh_from_db(None)
+ self.mox.VerifyAll()
+
+ self.assertEquals(len(zm.zone_states), 2)
+ self.assertIn(1, zm.zone_states)
+ self.assertIn(2, zm.zone_states)
+ self.assertEquals(zm.zone_states[1].zone_info['username'], 'user1')
+ self.assertEquals(zm.zone_states[2].zone_info['username'], 'user2')
+
+ def test_refresh_from_db_add_and_delete(self):
+ zone_state = self._create_zone_state(zone_id=1, username='user1')
+ zm = self.zone_manager
+ zm.zone_states[1] = zone_state
+
+ zone2 = _create_zone(zone_id=2, username='user2')
+ self.mox.StubOutWithMock(db, 'zone_get_all')
+ db.zone_get_all(mox.IgnoreArg()).AndReturn([zone2])
+
+ self.mox.ReplayAll()
+ zm._refresh_from_db(None)
+ self.mox.VerifyAll()
+
+ self.assertEquals(len(zm.zone_states), 1)
+ self.assertIn(2, zm.zone_states)
+ self.assertEquals(zm.zone_states[2].zone_info['username'], 'user2')
+
+ def test_poll_zone(self):
+ zone_state = self._create_zone_state(zone_id=1, name='child1')
+ zone_state.attempt = 1
+
+ self.mox.StubOutWithMock(zone_state, 'call_novaclient')
+ zone_state.call_novaclient().AndReturn(
+ dict(name=zone_state.zone_info['name'],
+ hairdresser='dietz'))
+ self.assertDictMatch(zone_state.capabilities, {})
+
+ self.mox.ReplayAll()
+ zone_state.poll()
+ self.mox.VerifyAll()
+ self.assertEquals(zone_state.attempt, 0)
+ self.assertDictMatch(zone_state.capabilities,
+ dict(hairdresser='dietz'))
+ self.assertTrue(zone_state.is_active)
+
+ def test_poll_zones_with_failure(self):
+ zone_state = self._create_zone_state(zone_id=1)
+ zone_state.attempt = FLAGS.zone_failures_to_offline - 1
+
+ self.mox.StubOutWithMock(zone_state, 'call_novaclient')
+ zone_state.call_novaclient().AndRaise(Exception('foo'))
+
+ self.mox.ReplayAll()
+ zone_state.poll()
+ self.mox.VerifyAll()
+ self.assertEquals(zone_state.attempt, 3)
+ self.assertFalse(zone_state.is_active)
diff --git a/nova/tests/test_zones.py b/nova/tests/test_zones.py
deleted file mode 100644
index 703ff0bf9..000000000
--- a/nova/tests/test_zones.py
+++ /dev/null
@@ -1,377 +0,0 @@
-# Copyright 2010 United States Government as represented by the
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-"""
-Tests For ZoneManager
-"""
-
-import datetime
-import mox
-
-from nova import db
-from nova import flags
-from nova import test
-from nova import utils
-from nova.scheduler import zone_manager
-
-FLAGS = flags.FLAGS
-
-
-class FakeZone:
- """Represents a fake zone from the db"""
- def __init__(self, *args, **kwargs):
- for k, v in kwargs.iteritems():
- setattr(self, k, v)
-
-
-def exploding_novaclient(zone):
- """Used when we want to simulate a novaclient call failing."""
- raise Exception("kaboom")
-
-
-class ZoneManagerTestCase(test.TestCase):
- """Test case for zone manager"""
- def test_ping(self):
- zm = zone_manager.ZoneManager()
- self.mox.StubOutWithMock(zm, '_refresh_from_db')
- self.mox.StubOutWithMock(zm, '_poll_zones')
- zm._refresh_from_db(mox.IgnoreArg())
- zm._poll_zones(mox.IgnoreArg())
-
- self.mox.ReplayAll()
- zm.ping(None)
- self.mox.VerifyAll()
-
- def test_refresh_from_db_new(self):
- zm = zone_manager.ZoneManager()
-
- self.mox.StubOutWithMock(db, 'zone_get_all')
- db.zone_get_all(mox.IgnoreArg()).AndReturn([
- FakeZone(id=1, api_url='http://foo.com', username='user1',
- password='pass1', name='child', weight_offset=0.0,
- weight_scale=1.0),
- ])
-
- self.assertEquals(len(zm.zone_states), 0)
-
- self.mox.ReplayAll()
- zm._refresh_from_db(None)
- self.mox.VerifyAll()
-
- self.assertEquals(len(zm.zone_states), 1)
- self.assertEquals(zm.zone_states[1].username, 'user1')
-
- def test_service_capabilities(self):
- zm = zone_manager.ZoneManager()
- caps = zm.get_zone_capabilities(None)
- self.assertEquals(caps, {})
-
- zm.update_service_capabilities("svc1", "host1", dict(a=1, b=2))
- caps = zm.get_zone_capabilities(None)
- self.assertEquals(caps, dict(svc1_a=(1, 1), svc1_b=(2, 2)))
-
- zm.update_service_capabilities("svc1", "host1", dict(a=2, b=3))
- caps = zm.get_zone_capabilities(None)
- self.assertEquals(caps, dict(svc1_a=(2, 2), svc1_b=(3, 3)))
-
- zm.update_service_capabilities("svc1", "host2", dict(a=20, b=30))
- caps = zm.get_zone_capabilities(None)
- self.assertEquals(caps, dict(svc1_a=(2, 20), svc1_b=(3, 30)))
-
- zm.update_service_capabilities("svc10", "host1", dict(a=99, b=99))
- caps = zm.get_zone_capabilities(None)
- self.assertEquals(caps, dict(svc1_a=(2, 20), svc1_b=(3, 30),
- svc10_a=(99, 99), svc10_b=(99, 99)))
-
- zm.update_service_capabilities("svc1", "host3", dict(c=5))
- caps = zm.get_zone_capabilities(None)
- self.assertEquals(caps, dict(svc1_a=(2, 20), svc1_b=(3, 30),
- svc1_c=(5, 5), svc10_a=(99, 99),
- svc10_b=(99, 99)))
-
- def test_refresh_from_db_replace_existing(self):
- zm = zone_manager.ZoneManager()
- zone_state = zone_manager.ZoneState()
- zone_state.update_credentials(FakeZone(id=1, api_url='http://foo.com',
- username='user1', password='pass1', name='child',
- weight_offset=0.0, weight_scale=1.0))
- zm.zone_states[1] = zone_state
-
- self.mox.StubOutWithMock(db, 'zone_get_all')
- db.zone_get_all(mox.IgnoreArg()).AndReturn([
- FakeZone(id=1, api_url='http://foo.com', username='user2',
- password='pass2', name='child',
- weight_offset=0.0, weight_scale=1.0),
- ])
-
- self.assertEquals(len(zm.zone_states), 1)
-
- self.mox.ReplayAll()
- zm._refresh_from_db(None)
- self.mox.VerifyAll()
-
- self.assertEquals(len(zm.zone_states), 1)
- self.assertEquals(zm.zone_states[1].username, 'user2')
-
- def test_refresh_from_db_missing(self):
- zm = zone_manager.ZoneManager()
- zone_state = zone_manager.ZoneState()
- zone_state.update_credentials(FakeZone(id=1, api_url='http://foo.com',
- username='user1', password='pass1', name='child',
- weight_offset=0.0, weight_scale=1.0))
- zm.zone_states[1] = zone_state
-
- self.mox.StubOutWithMock(db, 'zone_get_all')
- db.zone_get_all(mox.IgnoreArg()).AndReturn([])
-
- self.assertEquals(len(zm.zone_states), 1)
-
- self.mox.ReplayAll()
- zm._refresh_from_db(None)
- self.mox.VerifyAll()
-
- self.assertEquals(len(zm.zone_states), 0)
-
- def test_refresh_from_db_add_and_delete(self):
- zm = zone_manager.ZoneManager()
- zone_state = zone_manager.ZoneState()
- zone_state.update_credentials(FakeZone(id=1, api_url='http://foo.com',
- username='user1', password='pass1', name='child',
- weight_offset=2.0, weight_scale=3.0))
- zm.zone_states[1] = zone_state
-
- self.mox.StubOutWithMock(db, 'zone_get_all')
-
- db.zone_get_all(mox.IgnoreArg()).AndReturn([
- FakeZone(id=2, api_url='http://foo.com', username='user2',
- password='pass2', name='child', weight_offset=2.0,
- weight_scale=3.0),
- ])
- self.assertEquals(len(zm.zone_states), 1)
-
- self.mox.ReplayAll()
- zm._refresh_from_db(None)
- self.mox.VerifyAll()
-
- self.assertEquals(len(zm.zone_states), 1)
- self.assertEquals(zm.zone_states[2].username, 'user2')
-
- def test_poll_zone(self):
- self.mox.StubOutWithMock(zone_manager, '_call_novaclient')
- zone_manager._call_novaclient(mox.IgnoreArg()).AndReturn(
- dict(name='child', capabilities='hairdresser'))
-
- zone_state = zone_manager.ZoneState()
- zone_state.update_credentials(FakeZone(id=2,
- api_url='http://foo.com', username='user2',
- password='pass2', name='child',
- weight_offset=0.0, weight_scale=1.0))
- zone_state.attempt = 1
-
- self.mox.ReplayAll()
- zone_manager._poll_zone(zone_state)
- self.mox.VerifyAll()
- self.assertEquals(zone_state.attempt, 0)
- self.assertEquals(zone_state.name, 'child')
-
- def test_poll_zone_fails(self):
- self.stubs.Set(zone_manager, "_call_novaclient", exploding_novaclient)
-
- zone_state = zone_manager.ZoneState()
- zone_state.update_credentials(FakeZone(id=2,
- api_url='http://foo.com', username='user2',
- password='pass2', name='child',
- weight_offset=0.0, weight_scale=1.0))
- zone_state.attempt = FLAGS.zone_failures_to_offline - 1
-
- self.mox.ReplayAll()
- zone_manager._poll_zone(zone_state)
- self.mox.VerifyAll()
- self.assertEquals(zone_state.attempt, 3)
- self.assertFalse(zone_state.is_active)
-
- def test_host_service_caps_stale_no_stale_service(self):
- zm = zone_manager.ZoneManager()
-
- # services just updated capabilities
- zm.update_service_capabilities("svc1", "host1", dict(a=1, b=2))
- zm.update_service_capabilities("svc2", "host1", dict(a=3, b=4))
- self.assertFalse(zm.host_service_caps_stale("host1", "svc1"))
- self.assertFalse(zm.host_service_caps_stale("host1", "svc2"))
-
- def test_host_service_caps_stale_all_stale_services(self):
- zm = zone_manager.ZoneManager()
- expiry_time = (FLAGS.periodic_interval * 3) + 1
-
- # Both services became stale
- zm.update_service_capabilities("svc1", "host1", dict(a=1, b=2))
- zm.update_service_capabilities("svc2", "host1", dict(a=3, b=4))
- time_future = utils.utcnow() + datetime.timedelta(seconds=expiry_time)
- utils.set_time_override(time_future)
- self.assertTrue(zm.host_service_caps_stale("host1", "svc1"))
- self.assertTrue(zm.host_service_caps_stale("host1", "svc2"))
- utils.clear_time_override()
-
- def test_host_service_caps_stale_one_stale_service(self):
- zm = zone_manager.ZoneManager()
- expiry_time = (FLAGS.periodic_interval * 3) + 1
-
- # One service became stale
- zm.update_service_capabilities("svc1", "host1", dict(a=1, b=2))
- zm.update_service_capabilities("svc2", "host1", dict(a=3, b=4))
- caps = zm.service_states["host1"]["svc1"]
- caps["timestamp"] = utils.utcnow() - \
- datetime.timedelta(seconds=expiry_time)
- self.assertTrue(zm.host_service_caps_stale("host1", "svc1"))
- self.assertFalse(zm.host_service_caps_stale("host1", "svc2"))
-
- def test_delete_expired_host_services_del_one_service(self):
- zm = zone_manager.ZoneManager()
-
- # Delete one service in a host
- zm.update_service_capabilities("svc1", "host1", dict(a=1, b=2))
- zm.update_service_capabilities("svc2", "host1", dict(a=3, b=4))
- stale_host_services = {"host1": ["svc1"]}
- zm.delete_expired_host_services(stale_host_services)
- self.assertFalse("svc1" in zm.service_states["host1"])
- self.assertTrue("svc2" in zm.service_states["host1"])
-
- def test_delete_expired_host_services_del_all_hosts(self):
- zm = zone_manager.ZoneManager()
-
- # Delete all services in a host
- zm.update_service_capabilities("svc2", "host1", dict(a=3, b=4))
- zm.update_service_capabilities("svc1", "host1", dict(a=1, b=2))
- stale_host_services = {"host1": ["svc1", "svc2"]}
- zm.delete_expired_host_services(stale_host_services)
- self.assertFalse("host1" in zm.service_states)
-
- def test_delete_expired_host_services_del_one_service_per_host(self):
- zm = zone_manager.ZoneManager()
-
- # Delete one service per host
- zm.update_service_capabilities("svc1", "host1", dict(a=1, b=2))
- zm.update_service_capabilities("svc1", "host2", dict(a=3, b=4))
- stale_host_services = {"host1": ["svc1"], "host2": ["svc1"]}
- zm.delete_expired_host_services(stale_host_services)
- self.assertFalse("host1" in zm.service_states)
- self.assertFalse("host2" in zm.service_states)
-
- def test_get_zone_capabilities_one_host(self):
- zm = zone_manager.ZoneManager()
-
- # Service capabilities recent
- zm.update_service_capabilities("svc1", "host1", dict(a=1, b=2))
- caps = zm.get_zone_capabilities(None)
- self.assertEquals(caps, dict(svc1_a=(1, 1), svc1_b=(2, 2)))
-
- def test_get_zone_capabilities_expired_host(self):
- zm = zone_manager.ZoneManager()
- expiry_time = (FLAGS.periodic_interval * 3) + 1
-
- # Service capabilities stale
- zm.update_service_capabilities("svc1", "host1", dict(a=1, b=2))
- time_future = utils.utcnow() + datetime.timedelta(seconds=expiry_time)
- utils.set_time_override(time_future)
- caps = zm.get_zone_capabilities(None)
- self.assertEquals(caps, {})
- utils.clear_time_override()
-
- def test_get_zone_capabilities_multiple_hosts(self):
- zm = zone_manager.ZoneManager()
-
- # Both host service capabilities recent
- zm.update_service_capabilities("svc1", "host1", dict(a=1, b=2))
- zm.update_service_capabilities("svc1", "host2", dict(a=3, b=4))
- caps = zm.get_zone_capabilities(None)
- self.assertEquals(caps, dict(svc1_a=(1, 3), svc1_b=(2, 4)))
-
- def test_get_zone_capabilities_one_stale_host(self):
- zm = zone_manager.ZoneManager()
- expiry_time = (FLAGS.periodic_interval * 3) + 1
-
- # One host service capabilities become stale
- zm.update_service_capabilities("svc1", "host1", dict(a=1, b=2))
- zm.update_service_capabilities("svc1", "host2", dict(a=3, b=4))
- serv_caps = zm.service_states["host1"]["svc1"]
- serv_caps["timestamp"] = utils.utcnow() - \
- datetime.timedelta(seconds=expiry_time)
- caps = zm.get_zone_capabilities(None)
- self.assertEquals(caps, dict(svc1_a=(3, 3), svc1_b=(4, 4)))
-
- def test_get_zone_capabilities_multiple_service_per_host(self):
- zm = zone_manager.ZoneManager()
-
- # Multiple services per host
- zm.update_service_capabilities("svc1", "host1", dict(a=1, b=2))
- zm.update_service_capabilities("svc1", "host2", dict(a=3, b=4))
- zm.update_service_capabilities("svc2", "host1", dict(a=5, b=6))
- zm.update_service_capabilities("svc2", "host2", dict(a=7, b=8))
- caps = zm.get_zone_capabilities(None)
- self.assertEquals(caps, dict(svc1_a=(1, 3), svc1_b=(2, 4),
- svc2_a=(5, 7), svc2_b=(6, 8)))
-
- def test_get_zone_capabilities_one_stale_service_per_host(self):
- zm = zone_manager.ZoneManager()
- expiry_time = (FLAGS.periodic_interval * 3) + 1
-
- # Two host services among four become stale
- zm.update_service_capabilities("svc1", "host1", dict(a=1, b=2))
- zm.update_service_capabilities("svc1", "host2", dict(a=3, b=4))
- zm.update_service_capabilities("svc2", "host1", dict(a=5, b=6))
- zm.update_service_capabilities("svc2", "host2", dict(a=7, b=8))
- serv_caps_1 = zm.service_states["host1"]["svc2"]
- serv_caps_1["timestamp"] = utils.utcnow() - \
- datetime.timedelta(seconds=expiry_time)
- serv_caps_2 = zm.service_states["host2"]["svc1"]
- serv_caps_2["timestamp"] = utils.utcnow() - \
- datetime.timedelta(seconds=expiry_time)
- caps = zm.get_zone_capabilities(None)
- self.assertEquals(caps, dict(svc1_a=(1, 1), svc1_b=(2, 2),
- svc2_a=(7, 7), svc2_b=(8, 8)))
-
- def test_get_zone_capabilities_three_stale_host_services(self):
- zm = zone_manager.ZoneManager()
- expiry_time = (FLAGS.periodic_interval * 3) + 1
-
- # Three host services among four become stale
- zm.update_service_capabilities("svc1", "host1", dict(a=1, b=2))
- zm.update_service_capabilities("svc1", "host2", dict(a=3, b=4))
- zm.update_service_capabilities("svc2", "host1", dict(a=5, b=6))
- zm.update_service_capabilities("svc2", "host2", dict(a=7, b=8))
- serv_caps_1 = zm.service_states["host1"]["svc2"]
- serv_caps_1["timestamp"] = utils.utcnow() - \
- datetime.timedelta(seconds=expiry_time)
- serv_caps_2 = zm.service_states["host2"]["svc1"]
- serv_caps_2["timestamp"] = utils.utcnow() - \
- datetime.timedelta(seconds=expiry_time)
- serv_caps_3 = zm.service_states["host2"]["svc2"]
- serv_caps_3["timestamp"] = utils.utcnow() - \
- datetime.timedelta(seconds=expiry_time)
- caps = zm.get_zone_capabilities(None)
- self.assertEquals(caps, dict(svc1_a=(1, 1), svc1_b=(2, 2)))
-
- def test_get_zone_capabilities_all_stale_host_services(self):
- zm = zone_manager.ZoneManager()
- expiry_time = (FLAGS.periodic_interval * 3) + 1
-
- # All the host services become stale
- zm.update_service_capabilities("svc1", "host1", dict(a=1, b=2))
- zm.update_service_capabilities("svc1", "host2", dict(a=3, b=4))
- zm.update_service_capabilities("svc2", "host1", dict(a=5, b=6))
- zm.update_service_capabilities("svc2", "host2", dict(a=7, b=8))
- time_future = utils.utcnow() + datetime.timedelta(seconds=expiry_time)
- utils.set_time_override(time_future)
- caps = zm.get_zone_capabilities(None)
- self.assertEquals(caps, {})