summaryrefslogtreecommitdiffstats
path: root/nova/tests
diff options
context:
space:
mode:
authorMikyung Kang <mkkang@isi.edu>2012-11-10 10:20:45 +0900
committerArata Notsu <notsu@virtualtech.jp>2012-11-10 10:20:45 +0900
commit885cf0949ab2e116da39143c2f6469362367ec7d (patch)
tree3eb769baee1d77955b6ae357466e66bccf993690 /nova/tests
parent910a5487352dc0d36a4520f5b2787ec2290d965f (diff)
Updated scheduler and compute for multiple capabilities.
Part 1 of 6: blueprint general-bare-metal-provisioning-framework. This patch includes updates on scheduler and compute codes for multiple capabilities. This feature is needed in bare-metal provisioning which is implemented in later patches --- a bare-metal nova-compute manages multiple bare-metal nodes where instances are provisioned. Nova DB's compute_nodes entry needs to be created for each bare-metal node, and a scheduler can choose an appropriate bare-metal node to provision an instance. With this patch, one service entry with multiple compute_node entries can be registered by nova-compute. Distinct 'node name' is given for each node and is stored at compute_node['hypervisor_hostname']. And we added a new column "node" to "instances" table in Nova DB to associate instances with compute_node. FilterScheduler puts <nodename> to the column when it provisions the instance. And nova-computes respect <nodename> when run/stop instances and when calculate resources. Also, 'capability’ is extended from a dictionary to a list of dictionaries to describe the multiple capabilities of the multiple nodes. Change-Id: I527febe4dbd887b2e6596ce7226c1ae3386e2ae6 Co-authored-by: Mikyung Kang <mkkang@isi.edu> Co-authored-by: David Kang <dkang@isi.edu> Co-authored-by: Ken Igarashi <igarashik@nttdocomo.co.jp> Co-authored-by: Arata Notsu <notsu@virtualtech.jp>
Diffstat (limited to 'nova/tests')
-rw-r--r--nova/tests/compute/test_compute.py24
-rw-r--r--nova/tests/compute/test_multiple_nodes.py99
-rw-r--r--nova/tests/compute/test_resource_tracker.py14
-rw-r--r--nova/tests/scheduler/fakes.py26
-rw-r--r--nova/tests/scheduler/test_filter_scheduler.py8
-rw-r--r--nova/tests/scheduler/test_host_filters.py164
-rw-r--r--nova/tests/scheduler/test_host_manager.py78
-rw-r--r--nova/tests/scheduler/test_least_cost.py2
-rw-r--r--nova/tests/test_hypervapi.py2
9 files changed, 289 insertions, 128 deletions
diff --git a/nova/tests/compute/test_compute.py b/nova/tests/compute/test_compute.py
index a7b1b3061..d47a861c3 100644
--- a/nova/tests/compute/test_compute.py
+++ b/nova/tests/compute/test_compute.py
@@ -61,6 +61,7 @@ from nova.tests import fake_network
from nova.tests import fake_network_cache_model
from nova.tests.image import fake as fake_image
from nova import utils
+from nova.virt import fake
from nova.volume import cinder
@@ -73,6 +74,8 @@ CONF.import_opt('live_migration_retry_count', 'nova.compute.manager')
FAKE_IMAGE_REF = 'fake-image-ref'
+NODENAME = 'fakenode1'
+
def nop_report_driver_status(self):
pass
@@ -101,12 +104,13 @@ class BaseTestCase(test.TestCase):
self.flags(compute_driver='nova.virt.fake.FakeDriver',
notification_driver=[test_notifier.__name__],
network_manager='nova.network.manager.FlatManager')
+ fake.set_nodes([NODENAME])
self.compute = importutils.import_object(FLAGS.compute_manager)
# override tracker with a version that doesn't need the database:
- self.compute.resource_tracker = \
- fake_resource_tracker.FakeResourceTracker(self.compute.host,
- self.compute.driver)
+ fake_rt = fake_resource_tracker.FakeResourceTracker(self.compute.host,
+ self.compute.driver, NODENAME)
+ self.compute._resource_tracker_dict[NODENAME] = fake_rt
self.compute.update_available_resource(
context.get_admin_context())
@@ -137,6 +141,7 @@ class BaseTestCase(test.TestCase):
notifier_api._reset_drivers()
for instance in instances:
db.instance_destroy(self.context.elevated(), instance['uuid'])
+ fake.restore_nodes()
super(BaseTestCase, self).tearDown()
def _create_fake_instance(self, params=None, type_name='m1.tiny'):
@@ -152,6 +157,7 @@ class BaseTestCase(test.TestCase):
inst['user_id'] = self.user_id
inst['project_id'] = self.project_id
inst['host'] = 'fake_host'
+ inst['node'] = NODENAME
type_id = instance_types.get_instance_type_by_name(type_name)['id']
inst['instance_type_id'] = type_id
inst['ami_launch_index'] = 0
@@ -210,7 +216,7 @@ class ComputeTestCase(BaseTestCase):
fake_get_nw_info)
self.compute_api = compute.API()
# Just to make long lines short
- self.rt = self.compute.resource_tracker
+ self.rt = self.compute._get_resource_tracker(NODENAME)
def tearDown(self):
super(ComputeTestCase, self).tearDown()
@@ -358,7 +364,7 @@ class ComputeTestCase(BaseTestCase):
self.rt.update_available_resource(self.context.elevated())
# get total memory as reported by virt driver:
- resources = self.compute.driver.get_available_resource()
+ resources = self.compute.driver.get_available_resource(NODENAME)
total_mem_mb = resources['memory_mb']
oversub_limit_mb = total_mem_mb * 1.5
@@ -385,7 +391,7 @@ class ComputeTestCase(BaseTestCase):
self.rt.update_available_resource(self.context.elevated())
# get total memory as reported by virt driver:
- resources = self.compute.driver.get_available_resource()
+ resources = self.compute.driver.get_available_resource(NODENAME)
total_mem_mb = resources['memory_mb']
oversub_limit_mb = total_mem_mb * 1.5
@@ -412,7 +418,7 @@ class ComputeTestCase(BaseTestCase):
filter_properties = {'limits': limits}
# get total memory as reported by virt driver:
- resources = self.compute.driver.get_available_resource()
+ resources = self.compute.driver.get_available_resource(NODENAME)
self.assertEqual(1, resources['vcpus'])
# build an instance, specifying an amount of memory that exceeds
@@ -459,7 +465,7 @@ class ComputeTestCase(BaseTestCase):
self.rt.update_available_resource(self.context.elevated())
# get total memory as reported by virt driver:
- resources = self.compute.driver.get_available_resource()
+ resources = self.compute.driver.get_available_resource(NODENAME)
total_disk_gb = resources['local_gb']
oversub_limit_gb = total_disk_gb * 1.5
@@ -485,7 +491,7 @@ class ComputeTestCase(BaseTestCase):
self.rt.update_available_resource(self.context.elevated())
# get total memory as reported by virt driver:
- resources = self.compute.driver.get_available_resource()
+ resources = self.compute.driver.get_available_resource(NODENAME)
total_disk_gb = resources['local_gb']
oversub_limit_gb = total_disk_gb * 1.5
diff --git a/nova/tests/compute/test_multiple_nodes.py b/nova/tests/compute/test_multiple_nodes.py
new file mode 100644
index 000000000..830897954
--- /dev/null
+++ b/nova/tests/compute/test_multiple_nodes.py
@@ -0,0 +1,99 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright (c) 2012 NTT DOCOMO, INC.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+"""Tests for compute service with multiple compute nodes"""
+
+import mox
+
+from nova import config
+from nova import context
+from nova import exception
+from nova.openstack.common import importutils
+from nova import test
+from nova.virt import fake
+
+
+CONF = config.CONF
+
+
+class BaseTestCase(test.TestCase):
+ def tearDown(self):
+ fake.restore_nodes()
+ super(BaseTestCase, self).tearDown()
+
+
+class FakeDriverSingleNodeTestCase(BaseTestCase):
+ def setUp(self):
+ super(FakeDriverSingleNodeTestCase, self).setUp()
+ self.driver = fake.FakeDriver(virtapi=None)
+ fake.set_nodes(['xyz'])
+
+ def test_get_host_stats(self):
+ stats = self.driver.get_host_stats()
+ self.assertTrue(isinstance(stats, dict))
+ self.assertEqual(stats['hypervisor_hostname'], 'xyz')
+
+ def test_get_available_resource(self):
+ res = self.driver.get_available_resource('xyz')
+ self.assertEqual(res['hypervisor_hostname'], 'xyz')
+
+
+class FakeDriverMultiNodeTestCase(BaseTestCase):
+ def setUp(self):
+ super(FakeDriverMultiNodeTestCase, self).setUp()
+ self.driver = fake.FakeDriver(virtapi=None)
+ fake.set_nodes(['aaa', 'bbb'])
+
+ def test_get_host_stats(self):
+ stats = self.driver.get_host_stats()
+ self.assertTrue(isinstance(stats, list))
+ self.assertEqual(len(stats), 2)
+ self.assertEqual(stats[0]['hypervisor_hostname'], 'aaa')
+ self.assertEqual(stats[1]['hypervisor_hostname'], 'bbb')
+
+ def test_get_available_resource(self):
+ res_a = self.driver.get_available_resource('aaa')
+ self.assertEqual(res_a['hypervisor_hostname'], 'aaa')
+
+ res_b = self.driver.get_available_resource('bbb')
+ self.assertEqual(res_b['hypervisor_hostname'], 'bbb')
+
+ self.assertRaises(exception.NovaException,
+ self.driver.get_available_resource, 'xxx')
+
+
+class MultiNodeComputeTestCase(BaseTestCase):
+ def setUp(self):
+ super(MultiNodeComputeTestCase, self).setUp()
+ self.flags(compute_driver='nova.virt.fake.FakeDriver')
+ self.compute = importutils.import_object(CONF.compute_manager)
+
+ def test_update_available_resource_add_remove_node(self):
+ ctx = context.get_admin_context()
+ fake.set_nodes(['A', 'B', 'C'])
+ self.compute.update_available_resource(ctx)
+ self.assertEqual(sorted(self.compute._resource_tracker_dict.keys()),
+ ['A', 'B', 'C'])
+
+ fake.set_nodes(['A', 'B'])
+ self.compute.update_available_resource(ctx)
+ self.assertEqual(sorted(self.compute._resource_tracker_dict.keys()),
+ ['A', 'B'])
+
+ fake.set_nodes(['A', 'B', 'C'])
+ self.compute.update_available_resource(ctx)
+ self.assertEqual(sorted(self.compute._resource_tracker_dict.keys()),
+ ['A', 'B', 'C'])
diff --git a/nova/tests/compute/test_resource_tracker.py b/nova/tests/compute/test_resource_tracker.py
index 64cdb8d53..3997de133 100644
--- a/nova/tests/compute/test_resource_tracker.py
+++ b/nova/tests/compute/test_resource_tracker.py
@@ -43,7 +43,7 @@ class UnsupportedVirtDriver(driver.ComputeDriver):
def __init__(self):
super(UnsupportedVirtDriver, self).__init__(None)
- def get_available_resource(self):
+ def get_available_resource(self, nodename):
# no support for getting resource usage info
return {}
@@ -59,7 +59,7 @@ class FakeVirtDriver(driver.ComputeDriver):
self.memory_mb_used = 0
self.local_gb_used = 0
- def get_available_resource(self):
+ def get_available_resource(self, nodename):
d = {
'vcpus': self.vcpus,
'memory_mb': self.memory_mb,
@@ -86,8 +86,8 @@ class BaseTestCase(test.TestCase):
self.context = context.RequestContext('fake', 'fake')
self._instances = {}
- self.stubs.Set(db, 'instance_get_all_by_host',
- lambda c, h: self._instances.values())
+ self.stubs.Set(db, 'instance_get_all_by_host_and_node',
+ lambda c, h, n: self._instances.values())
self.stubs.Set(db, 'instance_update_and_get_original',
self._fake_instance_update_and_get_original)
@@ -106,7 +106,8 @@ class BaseTestCase(test.TestCase):
"current_workload": 1,
"running_vms": 0,
"cpu_info": None,
- "stats": [{"key": "num_instances", "value": "1"}]
+ "stats": [{"key": "num_instances", "value": "1"}],
+ "hypervisor_hostname": "fakenode",
}
if values:
compute.update(values)
@@ -155,13 +156,14 @@ class BaseTestCase(test.TestCase):
def _tracker(self, unsupported=False):
host = "fakehost"
+ node = "fakenode"
if unsupported:
driver = UnsupportedVirtDriver()
else:
driver = FakeVirtDriver()
- tracker = resource_tracker.ResourceTracker(host, driver)
+ tracker = resource_tracker.ResourceTracker(host, driver, node)
return tracker
diff --git a/nova/tests/scheduler/fakes.py b/nova/tests/scheduler/fakes.py
index 58aa2ffc7..3c7b462d0 100644
--- a/nova/tests/scheduler/fakes.py
+++ b/nova/tests/scheduler/fakes.py
@@ -29,38 +29,42 @@ COMPUTE_NODES = [
dict(id=1, local_gb=1024, memory_mb=1024, vcpus=1,
disk_available_least=512, free_ram_mb=512, vcpus_used=1,
free_disk_mb=512, local_gb_used=0, updated_at=None,
- service=dict(host='host1', disabled=False)),
+ service=dict(host='host1', disabled=False),
+ hypervisor_hostname='node1'),
dict(id=2, local_gb=2048, memory_mb=2048, vcpus=2,
disk_available_least=1024, free_ram_mb=1024, vcpus_used=2,
free_disk_mb=1024, local_gb_used=0, updated_at=None,
- service=dict(host='host2', disabled=True)),
+ service=dict(host='host2', disabled=True),
+ hypervisor_hostname='node2'),
dict(id=3, local_gb=4096, memory_mb=4096, vcpus=4,
disk_available_least=3072, free_ram_mb=3072, vcpus_used=1,
free_disk_mb=3072, local_gb_used=0, updated_at=None,
- service=dict(host='host3', disabled=False)),
+ service=dict(host='host3', disabled=False),
+ hypervisor_hostname='node3'),
dict(id=4, local_gb=8192, memory_mb=8192, vcpus=8,
disk_available_least=8192, free_ram_mb=8192, vcpus_used=0,
free_disk_mb=8192, local_gb_used=0, updated_at=None,
- service=dict(host='host4', disabled=False)),
+ service=dict(host='host4', disabled=False),
+ hypervisor_hostname='node4'),
# Broken entry
dict(id=5, local_gb=1024, memory_mb=1024, vcpus=1, service=None),
]
INSTANCES = [
dict(root_gb=512, ephemeral_gb=0, memory_mb=512, vcpus=1,
- host='host1'),
+ host='host1', node='node1'),
dict(root_gb=512, ephemeral_gb=0, memory_mb=512, vcpus=1,
- host='host2'),
+ host='host2', node='node2'),
dict(root_gb=512, ephemeral_gb=0, memory_mb=512, vcpus=1,
- host='host2'),
+ host='host2', node='node2'),
dict(root_gb=1024, ephemeral_gb=0, memory_mb=1024, vcpus=1,
- host='host3'),
+ host='host3', node='node3'),
# Broken host
dict(root_gb=1024, ephemeral_gb=0, memory_mb=1024, vcpus=1,
host=None),
# No matching host
dict(root_gb=1024, ephemeral_gb=0, memory_mb=1024, vcpus=1,
- host='host5'),
+ host='host5', node='node5'),
]
@@ -96,8 +100,8 @@ class FakeHostManager(host_manager.HostManager):
class FakeHostState(host_manager.HostState):
- def __init__(self, host, attribute_dict):
- super(FakeHostState, self).__init__(host)
+ def __init__(self, host, node, attribute_dict):
+ super(FakeHostState, self).__init__(host, node)
for (key, val) in attribute_dict.iteritems():
setattr(self, key, val)
diff --git a/nova/tests/scheduler/test_filter_scheduler.py b/nova/tests/scheduler/test_filter_scheduler.py
index 4238ad916..6add77cbe 100644
--- a/nova/tests/scheduler/test_filter_scheduler.py
+++ b/nova/tests/scheduler/test_filter_scheduler.py
@@ -183,7 +183,7 @@ class FilterSchedulerTestCase(test_scheduler.SchedulerTestCase):
sched = fakes.FakeFilterScheduler()
def _return_hosts(*args, **kwargs):
- host_state = host_manager.HostState('host2')
+ host_state = host_manager.HostState('host2', 'node2')
return [least_cost.WeightedHost(1.0, host_state=host_state)]
self.stubs.Set(sched, '_schedule', _return_hosts)
@@ -209,7 +209,7 @@ class FilterSchedulerTestCase(test_scheduler.SchedulerTestCase):
self.assertEquals(len(fns), 1)
weight, fn = fns[0]
self.assertEquals(weight, -1.0)
- hostinfo = host_manager.HostState('host')
+ hostinfo = host_manager.HostState('host', 'node')
hostinfo.update_from_compute_node(dict(memory_mb=1000,
local_gb=0, vcpus=1, disk_available_least=1000,
free_disk_mb=1000, free_ram_mb=872, vcpus_used=0,
@@ -307,7 +307,7 @@ class FilterSchedulerTestCase(test_scheduler.SchedulerTestCase):
filter_properties = {'retry': retry}
sched = fakes.FakeFilterScheduler()
- host_state = host_manager.HostState('host')
+ host_state = host_manager.HostState('host', 'node')
host_state.limits['vcpus'] = 5
sched._post_select_populate_filter_properties(filter_properties,
host_state)
@@ -331,7 +331,7 @@ class FilterSchedulerTestCase(test_scheduler.SchedulerTestCase):
filter_properties = {'retry': retry}
reservations = None
- host = fakes.FakeHostState('host', {})
+ host = fakes.FakeHostState('host', 'node', {})
weighted_host = least_cost.WeightedHost(1, host)
hosts = [weighted_host]
diff --git a/nova/tests/scheduler/test_host_filters.py b/nova/tests/scheduler/test_host_filters.py
index d2facbfc5..6dffe061e 100644
--- a/nova/tests/scheduler/test_host_filters.py
+++ b/nova/tests/scheduler/test_host_filters.py
@@ -291,7 +291,7 @@ class HostFiltersTestCase(test.TestCase):
def test_all_host_filter(self):
filt_cls = self.class_map['AllHostsFilter']()
- host = fakes.FakeHostState('host1', {})
+ host = fakes.FakeHostState('host1', 'node1', {})
self.assertTrue(filt_cls.host_passes(host, {}))
def _stub_service_is_up(self, ret_value):
@@ -301,7 +301,7 @@ class HostFiltersTestCase(test.TestCase):
def test_affinity_different_filter_passes(self):
filt_cls = self.class_map['DifferentHostFilter']()
- host = fakes.FakeHostState('host1', {})
+ host = fakes.FakeHostState('host1', 'node1', {})
instance = fakes.FakeInstance(context=self.context,
params={'host': 'host2'})
instance_uuid = instance.uuid
@@ -314,7 +314,7 @@ class HostFiltersTestCase(test.TestCase):
def test_affinity_different_filter_no_list_passes(self):
filt_cls = self.class_map['DifferentHostFilter']()
- host = fakes.FakeHostState('host1', {})
+ host = fakes.FakeHostState('host1', 'node1', {})
instance = fakes.FakeInstance(context=self.context,
params={'host': 'host2'})
instance_uuid = instance.uuid
@@ -327,7 +327,7 @@ class HostFiltersTestCase(test.TestCase):
def test_affinity_different_filter_fails(self):
filt_cls = self.class_map['DifferentHostFilter']()
- host = fakes.FakeHostState('host1', {})
+ host = fakes.FakeHostState('host1', 'node1', {})
instance = fakes.FakeInstance(context=self.context,
params={'host': 'host1'})
instance_uuid = instance.uuid
@@ -340,7 +340,11 @@ class HostFiltersTestCase(test.TestCase):
def test_affinity_different_filter_handles_none(self):
filt_cls = self.class_map['DifferentHostFilter']()
- host = fakes.FakeHostState('host1', {})
+ host = fakes.FakeHostState('host1', 'node1', {})
+ instance = fakes.FakeInstance(context=self.context,
+ params={'host': 'host2'})
+ instance_uuid = instance.uuid
+
filter_properties = {'context': self.context.elevated(),
'scheduler_hints': None}
@@ -348,7 +352,7 @@ class HostFiltersTestCase(test.TestCase):
def test_affinity_same_filter_no_list_passes(self):
filt_cls = self.class_map['SameHostFilter']()
- host = fakes.FakeHostState('host1', {})
+ host = fakes.FakeHostState('host1', 'node1', {})
instance = fakes.FakeInstance(context=self.context,
params={'host': 'host1'})
instance_uuid = instance.uuid
@@ -361,7 +365,7 @@ class HostFiltersTestCase(test.TestCase):
def test_affinity_same_filter_passes(self):
filt_cls = self.class_map['SameHostFilter']()
- host = fakes.FakeHostState('host1', {})
+ host = fakes.FakeHostState('host1', 'node1', {})
instance = fakes.FakeInstance(context=self.context,
params={'host': 'host1'})
instance_uuid = instance.uuid
@@ -374,7 +378,7 @@ class HostFiltersTestCase(test.TestCase):
def test_affinity_same_filter_fails(self):
filt_cls = self.class_map['SameHostFilter']()
- host = fakes.FakeHostState('host1', {})
+ host = fakes.FakeHostState('host1', 'node1', {})
instance = fakes.FakeInstance(context=self.context,
params={'host': 'host2'})
instance_uuid = instance.uuid
@@ -387,7 +391,11 @@ class HostFiltersTestCase(test.TestCase):
def test_affinity_same_filter_handles_none(self):
filt_cls = self.class_map['SameHostFilter']()
- host = fakes.FakeHostState('host1', {})
+ host = fakes.FakeHostState('host1', 'node1', {})
+ instance = fakes.FakeInstance(context=self.context,
+ params={'host': 'host2'})
+ instance_uuid = instance.uuid
+
filter_properties = {'context': self.context.elevated(),
'scheduler_hints': None}
@@ -395,7 +403,7 @@ class HostFiltersTestCase(test.TestCase):
def test_affinity_simple_cidr_filter_passes(self):
filt_cls = self.class_map['SimpleCIDRAffinityFilter']()
- host = fakes.FakeHostState('host1', {})
+ host = fakes.FakeHostState('host1', 'node1', {})
host.capabilities = {'host_ip': '10.8.1.1'}
affinity_ip = "10.8.1.100"
@@ -409,7 +417,7 @@ class HostFiltersTestCase(test.TestCase):
def test_affinity_simple_cidr_filter_fails(self):
filt_cls = self.class_map['SimpleCIDRAffinityFilter']()
- host = fakes.FakeHostState('host1', {})
+ host = fakes.FakeHostState('host1', 'node1', {})
host.capabilities = {'host_ip': '10.8.1.1'}
affinity_ip = "10.8.1.100"
@@ -423,7 +431,7 @@ class HostFiltersTestCase(test.TestCase):
def test_affinity_simple_cidr_filter_handles_none(self):
filt_cls = self.class_map['SimpleCIDRAffinityFilter']()
- host = fakes.FakeHostState('host1', {})
+ host = fakes.FakeHostState('host1', 'node1', {})
affinity_ip = flags.FLAGS.my_ip.split('.')[0:3]
affinity_ip.append('100')
@@ -440,7 +448,7 @@ class HostFiltersTestCase(test.TestCase):
filter_properties = {'instance_type': {'memory_mb': 1024}}
capabilities = {'enabled': True}
service = {'disabled': False}
- host = fakes.FakeHostState('host1',
+ host = fakes.FakeHostState('host1', 'node1',
{'free_ram_mb': 1024, 'capabilities': capabilities,
'service': service})
self.assertTrue(filt_cls.host_passes(host, filter_properties))
@@ -456,7 +464,7 @@ class HostFiltersTestCase(test.TestCase):
capabilities = {'enabled': True}
service = {'disabled': False}
- host = fakes.FakeHostState('fake_host',
+ host = fakes.FakeHostState('fake_host', 'fake_node',
{'capabilities': capabilities,
'service': service})
#True since empty
@@ -482,7 +490,7 @@ class HostFiltersTestCase(test.TestCase):
'instance_type': {'name': 'fake2'}}
capabilities = {'enabled': True}
service = {'disabled': False}
- host = fakes.FakeHostState('fake_host',
+ host = fakes.FakeHostState('fake_host', 'fake_node',
{'capabilities': capabilities,
'service': service})
#True since no aggregates
@@ -501,7 +509,7 @@ class HostFiltersTestCase(test.TestCase):
filter_properties = {'instance_type': {'memory_mb': 1024}}
capabilities = {'enabled': True}
service = {'disabled': False}
- host = fakes.FakeHostState('host1',
+ host = fakes.FakeHostState('host1', 'node1',
{'free_ram_mb': 1023, 'total_usable_ram_mb': 1024,
'capabilities': capabilities, 'service': service})
self.assertFalse(filt_cls.host_passes(host, filter_properties))
@@ -513,7 +521,7 @@ class HostFiltersTestCase(test.TestCase):
filter_properties = {'instance_type': {'memory_mb': 1024}}
capabilities = {'enabled': True}
service = {'disabled': False}
- host = fakes.FakeHostState('host1',
+ host = fakes.FakeHostState('host1', 'node1',
{'free_ram_mb': 1024, 'total_usable_ram_mb': 1024,
'capabilities': capabilities, 'service': service})
self.assertTrue(filt_cls.host_passes(host, filter_properties))
@@ -525,7 +533,7 @@ class HostFiltersTestCase(test.TestCase):
filter_properties = {'instance_type': {'memory_mb': 1024}}
capabilities = {'enabled': True}
service = {'disabled': False}
- host = fakes.FakeHostState('host1',
+ host = fakes.FakeHostState('host1', 'node1',
{'free_ram_mb': -1024, 'total_usable_ram_mb': 2048,
'capabilities': capabilities, 'service': service})
self.assertTrue(filt_cls.host_passes(host, filter_properties))
@@ -539,7 +547,7 @@ class HostFiltersTestCase(test.TestCase):
'ephemeral_gb': 1}}
capabilities = {'enabled': True}
service = {'disabled': False}
- host = fakes.FakeHostState('host1',
+ host = fakes.FakeHostState('host1', 'node1',
{'free_disk_mb': 11 * 1024, 'total_usable_disk_gb': 13,
'capabilities': capabilities, 'service': service})
self.assertTrue(filt_cls.host_passes(host, filter_properties))
@@ -552,7 +560,7 @@ class HostFiltersTestCase(test.TestCase):
'ephemeral_gb': 1}}
capabilities = {'enabled': True}
service = {'disabled': False}
- host = fakes.FakeHostState('host1',
+ host = fakes.FakeHostState('host1', 'node1',
{'free_disk_mb': 11 * 1024, 'total_usable_disk_gb': 13,
'capabilities': capabilities, 'service': service})
self.assertFalse(filt_cls.host_passes(host, filter_properties))
@@ -566,7 +574,7 @@ class HostFiltersTestCase(test.TestCase):
capabilities = {'enabled': True}
service = {'disabled': False}
# 1GB used... so 119GB allowed...
- host = fakes.FakeHostState('host1',
+ host = fakes.FakeHostState('host1', 'node1',
{'free_disk_mb': 11 * 1024, 'total_usable_disk_gb': 12,
'capabilities': capabilities, 'service': service})
self.assertTrue(filt_cls.host_passes(host, filter_properties))
@@ -581,7 +589,7 @@ class HostFiltersTestCase(test.TestCase):
capabilities = {'enabled': True}
service = {'disabled': False}
# 1GB used... so 119GB allowed...
- host = fakes.FakeHostState('host1',
+ host = fakes.FakeHostState('host1', 'node1',
{'free_disk_mb': 11 * 1024, 'total_usable_disk_gb': 12,
'capabilities': capabilities, 'service': service})
self.assertFalse(filt_cls.host_passes(host, filter_properties))
@@ -592,7 +600,7 @@ class HostFiltersTestCase(test.TestCase):
filter_properties = {'instance_type': {'memory_mb': 1024}}
capabilities = {'enabled': True}
service = {'disabled': True}
- host = fakes.FakeHostState('host1',
+ host = fakes.FakeHostState('host1', 'node1',
{'free_ram_mb': 1024, 'capabilities': capabilities,
'service': service})
self.assertFalse(filt_cls.host_passes(host, filter_properties))
@@ -603,7 +611,7 @@ class HostFiltersTestCase(test.TestCase):
filter_properties = {'instance_type': {'memory_mb': 1024}}
capabilities = {'enabled': True}
service = {'disabled': False}
- host = fakes.FakeHostState('host1',
+ host = fakes.FakeHostState('host1', 'node1',
{'free_ram_mb': 1024, 'capabilities': capabilities,
'service': service})
self.assertFalse(filt_cls.host_passes(host, filter_properties))
@@ -614,7 +622,7 @@ class HostFiltersTestCase(test.TestCase):
filter_properties = {'instance_type': {'memory_mb': 1024}}
capabilities = {'enabled': False}
service = {'disabled': False}
- host = fakes.FakeHostState('host1',
+ host = fakes.FakeHostState('host1', 'node1',
{'free_ram_mb': 1024, 'capabilities': capabilities,
'service': service})
self.assertFalse(filt_cls.host_passes(host, filter_properties))
@@ -625,7 +633,7 @@ class HostFiltersTestCase(test.TestCase):
filter_properties = {}
capabilities = {'enabled': False}
service = {'disabled': False}
- host = fakes.FakeHostState('host1',
+ host = fakes.FakeHostState('host1', 'node1',
{'free_ram_mb': 1024, 'capabilities': capabilities,
'service': service})
self.assertTrue(filt_cls.host_passes(host, filter_properties))
@@ -640,7 +648,7 @@ class HostFiltersTestCase(test.TestCase):
capabilities = {'enabled': True,
'supported_instances': [
('x86_64', 'kvm', 'hvm')]}
- host = fakes.FakeHostState('host1',
+ host = fakes.FakeHostState('host1', 'node1',
{'capabilities': capabilities})
self.assertTrue(filt_cls.host_passes(host, filter_properties))
@@ -654,7 +662,7 @@ class HostFiltersTestCase(test.TestCase):
capabilities = {'enabled': True,
'supported_instances': [
('x86_64', 'kvm', 'hvm')]}
- host = fakes.FakeHostState('host1',
+ host = fakes.FakeHostState('host1', 'node1',
{'capabilities': capabilities})
self.assertFalse(filt_cls.host_passes(host, filter_properties))
@@ -667,7 +675,7 @@ class HostFiltersTestCase(test.TestCase):
capabilities = {'enabled': True,
'supported_instances': [
('x86_64', 'kvm', 'hvm')]}
- host = fakes.FakeHostState('host1',
+ host = fakes.FakeHostState('host1', 'node1',
{'capabilities': capabilities})
self.assertTrue(filt_cls.host_passes(host, filter_properties))
@@ -680,7 +688,7 @@ class HostFiltersTestCase(test.TestCase):
capabilities = {'enabled': True,
'supported_instances': [
('x86_64', 'xen', 'xen')]}
- host = fakes.FakeHostState('host1',
+ host = fakes.FakeHostState('host1', 'node1',
{'capabilities': capabilities})
self.assertFalse(filt_cls.host_passes(host, filter_properties))
@@ -691,7 +699,7 @@ class HostFiltersTestCase(test.TestCase):
capabilities = {'enabled': True,
'supported_instances': [
('x86_64', 'kvm', 'hvm')]}
- host = fakes.FakeHostState('host1',
+ host = fakes.FakeHostState('host1', 'node1',
{'capabilities': capabilities})
self.assertTrue(filt_cls.host_passes(host, filter_properties))
@@ -703,7 +711,7 @@ class HostFiltersTestCase(test.TestCase):
'vm_mode': 'hvm'}}
filter_properties = {'request_spec': {'image': img_props}}
capabilities = {'enabled': True}
- host = fakes.FakeHostState('host1',
+ host = fakes.FakeHostState('host1', 'node1',
{'capabilities': capabilities})
self.assertFalse(filt_cls.host_passes(host, filter_properties))
@@ -715,7 +723,7 @@ class HostFiltersTestCase(test.TestCase):
service = {'disabled': False}
filter_properties = {'instance_type': {'memory_mb': 1024,
'extra_specs': especs}}
- host = fakes.FakeHostState('host1',
+ host = fakes.FakeHostState('host1', 'node1',
{'free_ram_mb': 1024, 'capabilities': capabilities,
'service': service})
assertion = self.assertTrue if passes else self.assertFalse
@@ -740,7 +748,7 @@ class HostFiltersTestCase(test.TestCase):
filter_properties = {'context': self.context, 'instance_type':
{'memory_mb': 1024}}
- host = fakes.FakeHostState('host1',
+ host = fakes.FakeHostState('host1', 'node1',
{'capabilities': capabilities})
self.assertTrue(filt_cls.host_passes(host, filter_properties))
@@ -760,7 +768,8 @@ class HostFiltersTestCase(test.TestCase):
self._create_aggregate_with_host(name='fake2', metadata=emeta)
filter_properties = {'context': self.context,
'instance_type': {'memory_mb': 1024, 'extra_specs': especs}}
- host = fakes.FakeHostState('host1', {'free_ram_mb': 1024})
+ host = fakes.FakeHostState('host1', 'node1',
+ {'free_ram_mb': 1024})
assertion = self.assertTrue if passes else self.assertFalse
assertion(filt_cls.host_passes(host, filter_properties))
@@ -774,7 +783,8 @@ class HostFiltersTestCase(test.TestCase):
metadata={'opt2': '2'})
filter_properties = {'context': self.context, 'instance_type':
{'memory_mb': 1024, 'extra_specs': extra_specs}}
- host = fakes.FakeHostState('host1', {'free_ram_mb': 1024})
+ host = fakes.FakeHostState('host1', 'node1',
+ {'free_ram_mb': 1024})
db.aggregate_host_delete(self.context.elevated(), agg2.id, 'host1')
self.assertFalse(filt_cls.host_passes(host, filter_properties))
@@ -800,7 +810,7 @@ class HostFiltersTestCase(test.TestCase):
'instance_properties': {'image_ref': 'isolated'}
}
}
- host = fakes.FakeHostState('non-isolated', {})
+ host = fakes.FakeHostState('non-isolated', 'node', {})
self.assertFalse(filt_cls.host_passes(host, filter_properties))
def test_isolated_hosts_fails_non_isolated_on_isolated(self):
@@ -811,7 +821,7 @@ class HostFiltersTestCase(test.TestCase):
'instance_properties': {'image_ref': 'non-isolated'}
}
}
- host = fakes.FakeHostState('isolated', {})
+ host = fakes.FakeHostState('isolated', 'node', {})
self.assertFalse(filt_cls.host_passes(host, filter_properties))
def test_isolated_hosts_passes_isolated_on_isolated(self):
@@ -822,7 +832,7 @@ class HostFiltersTestCase(test.TestCase):
'instance_properties': {'image_ref': 'isolated'}
}
}
- host = fakes.FakeHostState('isolated', {})
+ host = fakes.FakeHostState('isolated', 'node', {})
self.assertTrue(filt_cls.host_passes(host, filter_properties))
def test_isolated_hosts_passes_non_isolated_on_non_isolated(self):
@@ -833,7 +843,7 @@ class HostFiltersTestCase(test.TestCase):
'instance_properties': {'image_ref': 'non-isolated'}
}
}
- host = fakes.FakeHostState('non-isolated', {})
+ host = fakes.FakeHostState('non-isolated', 'node', {})
self.assertTrue(filt_cls.host_passes(host, filter_properties))
def test_json_filter_passes(self):
@@ -843,7 +853,7 @@ class HostFiltersTestCase(test.TestCase):
'ephemeral_gb': 0},
'scheduler_hints': {'query': self.json_query}}
capabilities = {'enabled': True}
- host = fakes.FakeHostState('host1',
+ host = fakes.FakeHostState('host1', 'node1',
{'free_ram_mb': 1024,
'free_disk_mb': 200 * 1024,
'capabilities': capabilities})
@@ -855,7 +865,7 @@ class HostFiltersTestCase(test.TestCase):
'root_gb': 200,
'ephemeral_gb': 0}}
capabilities = {'enabled': True}
- host = fakes.FakeHostState('host1',
+ host = fakes.FakeHostState('host1', 'node1',
{'free_ram_mb': 0,
'free_disk_mb': 0,
'capabilities': capabilities})
@@ -868,7 +878,7 @@ class HostFiltersTestCase(test.TestCase):
'ephemeral_gb': 0},
'scheduler_hints': {'query': self.json_query}}
capabilities = {'enabled': True}
- host = fakes.FakeHostState('host1',
+ host = fakes.FakeHostState('host1', 'node1',
{'free_ram_mb': 1023,
'free_disk_mb': 200 * 1024,
'capabilities': capabilities})
@@ -881,7 +891,7 @@ class HostFiltersTestCase(test.TestCase):
'ephemeral_gb': 0},
'scheduler_hints': {'query': self.json_query}}
capabilities = {'enabled': True}
- host = fakes.FakeHostState('host1',
+ host = fakes.FakeHostState('host1', 'node1',
{'free_ram_mb': 1024,
'free_disk_mb': (200 * 1024) - 1,
'capabilities': capabilities})
@@ -898,7 +908,7 @@ class HostFiltersTestCase(test.TestCase):
'ephemeral_gb': 0},
'scheduler_hints': {'query': json_query}}
capabilities = {'enabled': False}
- host = fakes.FakeHostState('host1',
+ host = fakes.FakeHostState('host1', 'node1',
{'free_ram_mb': 1024,
'free_disk_mb': 200 * 1024,
'capabilities': capabilities})
@@ -915,7 +925,7 @@ class HostFiltersTestCase(test.TestCase):
'scheduler_hints': {'query': json_query}}
capabilities = {'enabled': True}
service = {'disabled': True}
- host = fakes.FakeHostState('host1',
+ host = fakes.FakeHostState('host1', 'node1',
{'free_ram_mb': 1024,
'free_disk_mb': 200 * 1024,
'capabilities': capabilities})
@@ -943,7 +953,7 @@ class HostFiltersTestCase(test.TestCase):
# Passes
capabilities = {'enabled': True, 'opt1': 'match'}
service = {'disabled': False}
- host = fakes.FakeHostState('host1',
+ host = fakes.FakeHostState('host1', 'node1',
{'free_ram_mb': 10,
'free_disk_mb': 200,
'capabilities': capabilities,
@@ -953,7 +963,7 @@ class HostFiltersTestCase(test.TestCase):
# Passes
capabilities = {'enabled': True, 'opt1': 'match'}
service = {'disabled': False}
- host = fakes.FakeHostState('host1',
+ host = fakes.FakeHostState('host1', 'node1',
{'free_ram_mb': 40,
'free_disk_mb': 400,
'capabilities': capabilities,
@@ -963,7 +973,7 @@ class HostFiltersTestCase(test.TestCase):
# Fails due to capabilities being disabled
capabilities = {'enabled': False, 'opt1': 'match'}
service = {'disabled': False}
- host = fakes.FakeHostState('host1',
+ host = fakes.FakeHostState('host1', 'node1',
{'free_ram_mb': 40,
'free_disk_mb': 400,
'capabilities': capabilities,
@@ -973,7 +983,7 @@ class HostFiltersTestCase(test.TestCase):
# Fails due to being exact memory/disk we don't want
capabilities = {'enabled': True, 'opt1': 'match'}
service = {'disabled': False}
- host = fakes.FakeHostState('host1',
+ host = fakes.FakeHostState('host1', 'node1',
{'free_ram_mb': 30,
'free_disk_mb': 300,
'capabilities': capabilities,
@@ -983,7 +993,7 @@ class HostFiltersTestCase(test.TestCase):
# Fails due to memory lower but disk higher
capabilities = {'enabled': True, 'opt1': 'match'}
service = {'disabled': False}
- host = fakes.FakeHostState('host1',
+ host = fakes.FakeHostState('host1', 'node1',
{'free_ram_mb': 20,
'free_disk_mb': 400,
'capabilities': capabilities,
@@ -993,7 +1003,7 @@ class HostFiltersTestCase(test.TestCase):
# Fails due to capabilities 'opt1' not equal
capabilities = {'enabled': True, 'opt1': 'no-match'}
service = {'enabled': True}
- host = fakes.FakeHostState('host1',
+ host = fakes.FakeHostState('host1', 'node1',
{'free_ram_mb': 20,
'free_disk_mb': 400,
'capabilities': capabilities,
@@ -1002,7 +1012,7 @@ class HostFiltersTestCase(test.TestCase):
def test_json_filter_basic_operators(self):
filt_cls = self.class_map['JsonFilter']()
- host = fakes.FakeHostState('host1',
+ host = fakes.FakeHostState('host1', 'node1',
{'capabilities': {'enabled': True}})
# (operator, arguments, expected_result)
ops_to_test = [
@@ -1071,14 +1081,14 @@ class HostFiltersTestCase(test.TestCase):
'query': jsonutils.dumps(raw),
},
}
- host = fakes.FakeHostState('host1',
+ host = fakes.FakeHostState('host1', 'node1',
{'capabilities': {'enabled': True}})
self.assertRaises(KeyError,
filt_cls.host_passes, host, filter_properties)
def test_json_filter_empty_filters_pass(self):
filt_cls = self.class_map['JsonFilter']()
- host = fakes.FakeHostState('host1',
+ host = fakes.FakeHostState('host1', 'node1',
{'capabilities': {'enabled': True}})
raw = []
@@ -1098,7 +1108,7 @@ class HostFiltersTestCase(test.TestCase):
def test_json_filter_invalid_num_arguments_fails(self):
filt_cls = self.class_map['JsonFilter']()
- host = fakes.FakeHostState('host1',
+ host = fakes.FakeHostState('host1', 'node1',
{'capabilities': {'enabled': True}})
raw = ['>', ['and', ['or', ['not', ['<', ['>=', ['<=', ['in', ]]]]]]]]
@@ -1119,7 +1129,7 @@ class HostFiltersTestCase(test.TestCase):
def test_json_filter_unknown_variable_ignored(self):
filt_cls = self.class_map['JsonFilter']()
- host = fakes.FakeHostState('host1',
+ host = fakes.FakeHostState('host1', 'node1',
{'capabilities': {'enabled': True}})
raw = ['=', '$........', 1, 1]
@@ -1142,7 +1152,7 @@ class HostFiltersTestCase(test.TestCase):
self._stub_service_is_up(True)
filt_cls = self.class_map['TrustedFilter']()
filter_properties = {'instance_type': {'memory_mb': 1024}}
- host = fakes.FakeHostState('host1', {})
+ host = fakes.FakeHostState('host1', 'node1', {})
self.assertTrue(filt_cls.host_passes(host, filter_properties))
def test_trusted_filter_trusted_and_trusted_passes(self):
@@ -1153,7 +1163,7 @@ class HostFiltersTestCase(test.TestCase):
extra_specs = {'trust:trusted_host': 'trusted'}
filter_properties = {'instance_type': {'memory_mb': 1024,
'extra_specs': extra_specs}}
- host = fakes.FakeHostState('host1', {})
+ host = fakes.FakeHostState('host1', 'node1', {})
self.assertTrue(filt_cls.host_passes(host, filter_properties))
def test_trusted_filter_trusted_and_untrusted_fails(self):
@@ -1164,7 +1174,7 @@ class HostFiltersTestCase(test.TestCase):
extra_specs = {'trust:trusted_host': 'trusted'}
filter_properties = {'instance_type': {'memory_mb': 1024,
'extra_specs': extra_specs}}
- host = fakes.FakeHostState('host1', {})
+ host = fakes.FakeHostState('host1', 'node1', {})
self.assertFalse(filt_cls.host_passes(host, filter_properties))
def test_trusted_filter_untrusted_and_trusted_fails(self):
@@ -1175,7 +1185,7 @@ class HostFiltersTestCase(test.TestCase):
extra_specs = {'trust:trusted_host': 'untrusted'}
filter_properties = {'instance_type': {'memory_mb': 1024,
'extra_specs': extra_specs}}
- host = fakes.FakeHostState('host1', {})
+ host = fakes.FakeHostState('host1', 'node1', {})
self.assertFalse(filt_cls.host_passes(host, filter_properties))
def test_trusted_filter_untrusted_and_untrusted_passes(self):
@@ -1186,28 +1196,28 @@ class HostFiltersTestCase(test.TestCase):
extra_specs = {'trust:trusted_host': 'untrusted'}
filter_properties = {'instance_type': {'memory_mb': 1024,
'extra_specs': extra_specs}}
- host = fakes.FakeHostState('host1', {})
+ host = fakes.FakeHostState('host1', 'node1', {})
self.assertTrue(filt_cls.host_passes(host, filter_properties))
def test_core_filter_passes(self):
filt_cls = self.class_map['CoreFilter']()
filter_properties = {'instance_type': {'vcpus': 1}}
self.flags(cpu_allocation_ratio=2)
- host = fakes.FakeHostState('host1',
+ host = fakes.FakeHostState('host1', 'node1',
{'vcpus_total': 4, 'vcpus_used': 7})
self.assertTrue(filt_cls.host_passes(host, filter_properties))
def test_core_filter_fails_safe(self):
filt_cls = self.class_map['CoreFilter']()
filter_properties = {'instance_type': {'vcpus': 1}}
- host = fakes.FakeHostState('host1', {})
+ host = fakes.FakeHostState('host1', 'node1', {})
self.assertTrue(filt_cls.host_passes(host, filter_properties))
def test_core_filter_fails(self):
filt_cls = self.class_map['CoreFilter']()
filter_properties = {'instance_type': {'vcpus': 1}}
self.flags(cpu_allocation_ratio=2)
- host = fakes.FakeHostState('host1',
+ host = fakes.FakeHostState('host1', 'node1',
{'vcpus_total': 4, 'vcpus_used': 8})
self.assertFalse(filt_cls.host_passes(host, filter_properties))
@@ -1227,27 +1237,29 @@ class HostFiltersTestCase(test.TestCase):
filt_cls = self.class_map['AvailabilityZoneFilter']()
service = {'availability_zone': 'nova'}
request = self._make_zone_request('nova')
- host = fakes.FakeHostState('host1', {'service': service})
+ host = fakes.FakeHostState('host1', 'node1',
+ {'service': service})
self.assertTrue(filt_cls.host_passes(host, request))
def test_availability_zone_filter_different(self):
filt_cls = self.class_map['AvailabilityZoneFilter']()
service = {'availability_zone': 'nova'}
request = self._make_zone_request('bad')
- host = fakes.FakeHostState('host1', {'service': service})
+ host = fakes.FakeHostState('host1', 'node1',
+ {'service': service})
self.assertFalse(filt_cls.host_passes(host, request))
def test_retry_filter_disabled(self):
"""Test case where retry/re-scheduling is disabled"""
filt_cls = self.class_map['RetryFilter']()
- host = fakes.FakeHostState('host1', {})
+ host = fakes.FakeHostState('host1', 'node1', {})
filter_properties = {}
self.assertTrue(filt_cls.host_passes(host, filter_properties))
def test_retry_filter_pass(self):
"""Host not previously tried"""
filt_cls = self.class_map['RetryFilter']()
- host = fakes.FakeHostState('host1', {})
+ host = fakes.FakeHostState('host1', 'node1', {})
retry = dict(num_attempts=1, hosts=['host2', 'host3'])
filter_properties = dict(retry=retry)
self.assertTrue(filt_cls.host_passes(host, filter_properties))
@@ -1255,7 +1267,7 @@ class HostFiltersTestCase(test.TestCase):
def test_retry_filter_fail(self):
"""Host was already tried"""
filt_cls = self.class_map['RetryFilter']()
- host = fakes.FakeHostState('host1', {})
+ host = fakes.FakeHostState('host1', 'node1', {})
retry = dict(num_attempts=1, hosts=['host3', 'host1'])
filter_properties = dict(retry=retry)
self.assertFalse(filt_cls.host_passes(host, filter_properties))
@@ -1263,25 +1275,29 @@ class HostFiltersTestCase(test.TestCase):
def test_filter_num_iops_passes(self):
self.flags(max_io_ops_per_host=8)
filt_cls = self.class_map['IoOpsFilter']()
- host = fakes.FakeHostState('host1', {'num_io_ops': 7})
+ host = fakes.FakeHostState('host1', 'node1',
+ {'num_io_ops': 7})
filter_properties = {}
self.assertTrue(filt_cls.host_passes(host, filter_properties))
def test_filter_num_iops_fails(self):
self.flags(max_io_ops_per_host=8)
filt_cls = self.class_map['IoOpsFilter']()
- host = fakes.FakeHostState('host1', {'num_io_ops': 8})
+ host = fakes.FakeHostState('host1', 'node1',
+ {'num_io_ops': 8})
def test_filter_num_instances_passes(self):
self.flags(max_instances_per_host=5)
filt_cls = self.class_map['NumInstancesFilter']()
- host = fakes.FakeHostState('host1', {'num_instances': 4})
+ host = fakes.FakeHostState('host1', 'node1',
+ {'num_instances': 4})
filter_properties = {}
self.assertTrue(filt_cls.host_passes(host, filter_properties))
def test_filter_num_instances_fails(self):
self.flags(max_instances_per_host=5)
filt_cls = self.class_map['NumInstancesFilter']()
- host = fakes.FakeHostState('host1', {'num_instances': 5})
+ host = fakes.FakeHostState('host1', 'node1',
+ {'num_instances': 5})
filter_properties = {}
self.assertFalse(filt_cls.host_passes(host, filter_properties))
diff --git a/nova/tests/scheduler/test_host_manager.py b/nova/tests/scheduler/test_host_manager.py
index b95803d8e..d7d732d34 100644
--- a/nova/tests/scheduler/test_host_manager.py
+++ b/nova/tests/scheduler/test_host_manager.py
@@ -44,6 +44,10 @@ class HostManagerTestCase(test.TestCase):
super(HostManagerTestCase, self).setUp()
self.host_manager = host_manager.HostManager()
+ def tearDown(self):
+ timeutils.clear_time_override()
+ super(HostManagerTestCase, self).tearDown()
+
def test_choose_host_filters_not_found(self):
self.flags(scheduler_default_filters='ComputeFilterClass3')
self.host_manager.filter_classes = [ComputeFilterClass1,
@@ -64,8 +68,8 @@ class HostManagerTestCase(test.TestCase):
def test_filter_hosts(self):
filters = ['fake-filter1', 'fake-filter2']
- fake_host1 = host_manager.HostState('host1')
- fake_host2 = host_manager.HostState('host2')
+ fake_host1 = host_manager.HostState('host1', 'node1')
+ fake_host2 = host_manager.HostState('host2', 'node2')
hosts = [fake_host1, fake_host2]
filter_properties = {'fake_prop': 'fake_val'}
@@ -94,8 +98,9 @@ class HostManagerTestCase(test.TestCase):
timeutils.utcnow().AndReturn(31339)
host1_compute_capabs = dict(free_memory=1234, host_memory=5678,
- timestamp=1)
- host2_compute_capabs = dict(free_memory=8756, timestamp=1)
+ timestamp=1, hypervisor_hostname='node1')
+ host2_compute_capabs = dict(free_memory=8756, timestamp=1,
+ hypervisor_hostname='node2')
self.mox.ReplayAll()
self.host_manager.update_service_capabilities('compute', 'host1',
@@ -109,8 +114,27 @@ class HostManagerTestCase(test.TestCase):
host1_compute_capabs['timestamp'] = 31337
host2_compute_capabs['timestamp'] = 31339
- expected = {'host1': host1_compute_capabs,
- 'host2': host2_compute_capabs}
+ expected = {('host1', 'node1'): host1_compute_capabs,
+ ('host2', 'node2'): host2_compute_capabs}
+ self.assertDictMatch(service_states, expected)
+
+ def test_update_service_capabilities_node_key(self):
+ service_states = self.host_manager.service_states
+ self.assertDictMatch(service_states, {})
+
+ host1_cap = {'hypervisor_hostname': 'host1-hvhn'}
+ host2_cap = {}
+
+ timeutils.set_time_override(31337)
+ self.host_manager.update_service_capabilities('compute', 'host1',
+ host1_cap)
+ timeutils.set_time_override(31338)
+ self.host_manager.update_service_capabilities('compute', 'host2',
+ host2_cap)
+ host1_cap['timestamp'] = 31337
+ host2_cap['timestamp'] = 31338
+ expected = {('host1', 'host1-hvhn'): host1_cap,
+ ('host2', None): host2_cap}
self.assertDictMatch(service_states, expected)
def test_get_all_host_states(self):
@@ -133,20 +157,30 @@ class HostManagerTestCase(test.TestCase):
for i in xrange(4):
compute_node = fakes.COMPUTE_NODES[i]
host = compute_node['service']['host']
- self.assertEqual(host_states_map[host].service,
+ node = compute_node['hypervisor_hostname']
+ state_key = (host, node)
+ self.assertEqual(host_states_map[state_key].service,
compute_node['service'])
- self.assertEqual(host_states_map['host1'].free_ram_mb, 512)
+ self.assertEqual(host_states_map[('host1', 'node1')].free_ram_mb,
+ 512)
# 511GB
- self.assertEqual(host_states_map['host1'].free_disk_mb, 524288)
- self.assertEqual(host_states_map['host2'].free_ram_mb, 1024)
+ self.assertEqual(host_states_map[('host1', 'node1')].free_disk_mb,
+ 524288)
+ self.assertEqual(host_states_map[('host2', 'node2')].free_ram_mb,
+ 1024)
# 1023GB
- self.assertEqual(host_states_map['host2'].free_disk_mb, 1048576)
- self.assertEqual(host_states_map['host3'].free_ram_mb, 3072)
+ self.assertEqual(host_states_map[('host2', 'node2')].free_disk_mb,
+ 1048576)
+ self.assertEqual(host_states_map[('host3', 'node3')].free_ram_mb,
+ 3072)
# 3071GB
- self.assertEqual(host_states_map['host3'].free_disk_mb, 3145728)
- self.assertEqual(host_states_map['host4'].free_ram_mb, 8192)
+ self.assertEqual(host_states_map[('host3', 'node3')].free_disk_mb,
+ 3145728)
+ self.assertEqual(host_states_map[('host4', 'node4')].free_ram_mb,
+ 8192)
# 8191GB
- self.assertEqual(host_states_map['host4'].free_disk_mb, 8388608)
+ self.assertEqual(host_states_map[('host4', 'node4')].free_disk_mb,
+ 8388608)
class HostStateTestCase(test.TestCase):
@@ -156,7 +190,7 @@ class HostStateTestCase(test.TestCase):
# in HostManagerTestCase.test_get_all_host_states()
def test_host_state_passes_filters_passes(self):
- fake_host = host_manager.HostState('host1')
+ fake_host = host_manager.HostState('host1', 'node1')
filter_properties = {}
cls1 = ComputeFilterClass1()
@@ -173,7 +207,7 @@ class HostStateTestCase(test.TestCase):
self.assertTrue(result)
def test_host_state_passes_filters_passes_with_ignore(self):
- fake_host = host_manager.HostState('host1')
+ fake_host = host_manager.HostState('host1', 'node1')
filter_properties = {'ignore_hosts': ['host2']}
cls1 = ComputeFilterClass1()
@@ -190,7 +224,7 @@ class HostStateTestCase(test.TestCase):
self.assertTrue(result)
def test_host_state_passes_filters_fails(self):
- fake_host = host_manager.HostState('host1')
+ fake_host = host_manager.HostState('host1', 'node1')
filter_properties = {}
cls1 = ComputeFilterClass1()
@@ -207,7 +241,7 @@ class HostStateTestCase(test.TestCase):
self.assertFalse(result)
def test_host_state_passes_filters_fails_from_ignore(self):
- fake_host = host_manager.HostState('host1')
+ fake_host = host_manager.HostState('host1', 'node1')
filter_properties = {'ignore_hosts': ['host1']}
cls1 = ComputeFilterClass1()
@@ -224,7 +258,7 @@ class HostStateTestCase(test.TestCase):
self.assertFalse(result)
def test_host_state_passes_filters_skipped_from_force(self):
- fake_host = host_manager.HostState('host1')
+ fake_host = host_manager.HostState('host1', 'node1')
filter_properties = {'force_hosts': ['host1']}
cls1 = ComputeFilterClass1()
@@ -257,7 +291,7 @@ class HostStateTestCase(test.TestCase):
local_gb_used=0, free_ram_mb=0, vcpus=0, vcpus_used=0,
updated_at=None)
- host = host_manager.HostState("fakehost")
+ host = host_manager.HostState("fakehost", "fakenode")
host.update_from_compute_node(compute)
self.assertEqual(5, host.num_instances)
@@ -272,7 +306,7 @@ class HostStateTestCase(test.TestCase):
self.assertEqual(42, host.num_io_ops)
def test_stat_consumption_from_instance(self):
- host = host_manager.HostState("fakehost")
+ host = host_manager.HostState("fakehost", "fakenode")
instance = dict(root_gb=0, ephemeral_gb=0, memory_mb=0, vcpus=0,
project_id='12345', vm_state=vm_states.BUILDING,
diff --git a/nova/tests/scheduler/test_least_cost.py b/nova/tests/scheduler/test_least_cost.py
index e64cc9e81..3689a30bd 100644
--- a/nova/tests/scheduler/test_least_cost.py
+++ b/nova/tests/scheduler/test_least_cost.py
@@ -95,7 +95,7 @@ class TestWeightedHost(test.TestCase):
self.assertDictMatch(host.to_dict(), expected)
def test_dict_conversion_with_host_state(self):
- host_state = host_manager.HostState('somehost')
+ host_state = host_manager.HostState('somehost', None)
host = least_cost.WeightedHost('someweight', host_state)
expected = {'weight': 'someweight',
'host': 'somehost'}
diff --git a/nova/tests/test_hypervapi.py b/nova/tests/test_hypervapi.py
index 6d2396350..b995be3bd 100644
--- a/nova/tests/test_hypervapi.py
+++ b/nova/tests/test_hypervapi.py
@@ -155,7 +155,7 @@ class HyperVAPITestCase(basetestcase.BaseTestCase):
super(HyperVAPITestCase, self).tearDown()
def test_get_available_resource(self):
- dic = self._conn.get_available_resource()
+ dic = self._conn.get_available_resource(None)
self.assertEquals(dic['hypervisor_hostname'], platform.node())