summaryrefslogtreecommitdiffstats
path: root/nova
diff options
context:
space:
mode:
authorMikyung Kang <mkkang@isi.edu>2012-11-10 10:20:45 +0900
committerArata Notsu <notsu@virtualtech.jp>2012-11-10 10:20:45 +0900
commit885cf0949ab2e116da39143c2f6469362367ec7d (patch)
tree3eb769baee1d77955b6ae357466e66bccf993690 /nova
parent910a5487352dc0d36a4520f5b2787ec2290d965f (diff)
Updated scheduler and compute for multiple capabilities.
Part 1 of 6: blueprint general-bare-metal-provisioning-framework. This patch includes updates on scheduler and compute codes for multiple capabilities. This feature is needed in bare-metal provisioning which is implemented in later patches --- a bare-metal nova-compute manages multiple bare-metal nodes where instances are provisioned. Nova DB's compute_nodes entry needs to be created for each bare-metal node, and a scheduler can choose an appropriate bare-metal node to provision an instance. With this patch, one service entry with multiple compute_node entries can be registered by nova-compute. Distinct 'node name' is given for each node and is stored at compute_node['hypervisor_hostname']. And we added a new column "node" to "instances" table in Nova DB to associate instances with compute_node. FilterScheduler puts <nodename> to the column when it provisions the instance. And nova-computes respect <nodename> when run/stop instances and when calculate resources. Also, 'capability’ is extended from a dictionary to a list of dictionaries to describe the multiple capabilities of the multiple nodes. Change-Id: I527febe4dbd887b2e6596ce7226c1ae3386e2ae6 Co-authored-by: Mikyung Kang <mkkang@isi.edu> Co-authored-by: David Kang <dkang@isi.edu> Co-authored-by: Ken Igarashi <igarashik@nttdocomo.co.jp> Co-authored-by: Arata Notsu <notsu@virtualtech.jp>
Diffstat (limited to 'nova')
-rw-r--r--nova/compute/manager.py31
-rw-r--r--nova/compute/resource_tracker.py19
-rw-r--r--nova/db/api.py5
-rw-r--r--nova/db/sqlalchemy/api.py6
-rw-r--r--nova/db/sqlalchemy/migrate_repo/versions/135_add_node_to_instances.py55
-rw-r--r--nova/db/sqlalchemy/migrate_repo/versions/136_add_index_to_instances.py41
-rw-r--r--nova/db/sqlalchemy/models.py4
-rw-r--r--nova/manager.py9
-rw-r--r--nova/scheduler/driver.py9
-rw-r--r--nova/scheduler/filter_scheduler.py6
-rw-r--r--nova/scheduler/host_manager.py25
-rw-r--r--nova/tests/compute/test_compute.py24
-rw-r--r--nova/tests/compute/test_multiple_nodes.py99
-rw-r--r--nova/tests/compute/test_resource_tracker.py14
-rw-r--r--nova/tests/scheduler/fakes.py26
-rw-r--r--nova/tests/scheduler/test_filter_scheduler.py8
-rw-r--r--nova/tests/scheduler/test_host_filters.py164
-rw-r--r--nova/tests/scheduler/test_host_manager.py78
-rw-r--r--nova/tests/scheduler/test_least_cost.py2
-rw-r--r--nova/tests/test_hypervapi.py2
-rw-r--r--nova/virt/baremetal/driver.py2
-rw-r--r--nova/virt/driver.py18
-rw-r--r--nova/virt/fake.py51
-rw-r--r--nova/virt/hyperv/driver.py2
-rw-r--r--nova/virt/libvirt/driver.py3
-rw-r--r--nova/virt/powervm/driver.py2
-rw-r--r--nova/virt/vmwareapi/driver.py2
-rw-r--r--nova/virt/xenapi/driver.py3
28 files changed, 544 insertions, 166 deletions
diff --git a/nova/compute/manager.py b/nova/compute/manager.py
index 6d69fbac4..e3d2c3a3b 100644
--- a/nova/compute/manager.py
+++ b/nova/compute/manager.py
@@ -269,15 +269,24 @@ class ComputeManager(manager.SchedulerDependentManager):
super(ComputeManager, self).__init__(service_name="compute",
*args, **kwargs)
- self.resource_tracker = resource_tracker.ResourceTracker(self.host,
- self.driver)
+ self._resource_tracker_dict = {}
+
+ def _get_resource_tracker(self, nodename):
+ rt = self._resource_tracker_dict.get(nodename)
+ if not rt:
+ rt = resource_tracker.ResourceTracker(self.host,
+ self.driver,
+ nodename)
+ self._resource_tracker_dict[nodename] = rt
+ return rt
def _instance_update(self, context, instance_uuid, **kwargs):
"""Update an instance in the database using kwargs as value."""
(old_ref, instance_ref) = self.db.instance_update_and_get_original(
context, instance_uuid, kwargs)
- self.resource_tracker.update_usage(context, instance_ref)
+ rt = self._get_resource_tracker(instance_ref.get('node'))
+ rt.update_usage(context, instance_ref)
notifications.send_update(context, old_ref, instance_ref)
return instance_ref
@@ -520,10 +529,10 @@ class ComputeManager(manager.SchedulerDependentManager):
context, instance, "create.start",
extra_usage_info=extra_usage_info)
network_info = None
+ rt = self._get_resource_tracker(instance.get('node'))
try:
limits = filter_properties.get('limits', {})
- with self.resource_tracker.instance_claim(context, instance,
- limits):
+ with rt.instance_claim(context, instance, limits):
network_info = self._allocate_network(context, instance,
requested_networks)
@@ -2855,7 +2864,9 @@ class ComputeManager(manager.SchedulerDependentManager):
# This will grab info about the host and queue it
# to be sent to the Schedulers.
capabilities = self.driver.get_host_stats(refresh=True)
- capabilities['host_ip'] = FLAGS.my_ip
+ for capability in (capabilities if isinstance(capabilities, list)
+ else [capabilities]):
+ capability['host_ip'] = FLAGS.my_ip
self.update_service_capabilities(capabilities)
@manager.periodic_task(ticks_between_runs=10)
@@ -3024,7 +3035,13 @@ class ComputeManager(manager.SchedulerDependentManager):
:param context: security context
"""
- self.resource_tracker.update_available_resource(context)
+ new_resource_tracker_dict = {}
+ nodenames = self.driver.get_available_nodes()
+ for nodename in nodenames:
+ rt = self._get_resource_tracker(nodename)
+ rt.update_available_resource(context)
+ new_resource_tracker_dict[nodename] = rt
+ self._resource_tracker_dict = new_resource_tracker_dict
@manager.periodic_task(
ticks_between_runs=FLAGS.running_deleted_instance_poll_interval)
diff --git a/nova/compute/resource_tracker.py b/nova/compute/resource_tracker.py
index e4a65c081..c2ebf3357 100644
--- a/nova/compute/resource_tracker.py
+++ b/nova/compute/resource_tracker.py
@@ -54,9 +54,10 @@ class ResourceTracker(object):
are built and destroyed.
"""
- def __init__(self, host, driver):
+ def __init__(self, host, driver, nodename):
self.host = host
self.driver = driver
+ self.nodename = nodename
self.compute_node = None
self.stats = importutils.import_object(FLAGS.compute_stats_class)
self.tracked_instances = {}
@@ -157,7 +158,7 @@ class ResourceTracker(object):
declared a need for resources, but not necessarily retrieved them from
the hypervisor layer yet.
"""
- resources = self.driver.get_available_resource()
+ resources = self.driver.get_available_resource(self.nodename)
if not resources:
# The virt driver does not support this function
LOG.audit(_("Virt driver does not support "
@@ -169,8 +170,9 @@ class ResourceTracker(object):
self._report_hypervisor_resource_view(resources)
- # Grab all instances assigned to this host:
- instances = db.instance_get_all_by_host(context, self.host)
+ # Grab all instances assigned to this node:
+ instances = db.instance_get_all_by_host_and_node(context, self.host,
+ self.nodename)
# Now calculate usage based on instance utilization:
self._update_usage_from_instances(resources, instances)
@@ -187,9 +189,12 @@ class ResourceTracker(object):
# no service record, disable resource
return
- compute_node_ref = service['compute_node']
- if compute_node_ref:
- self.compute_node = compute_node_ref[0]
+ compute_node_refs = service['compute_node']
+ if compute_node_refs:
+ for cn in compute_node_refs:
+ if cn.get('hypervisor_hostname') == self.nodename:
+ self.compute_node = cn
+ break
if not self.compute_node:
# Need to create the ComputeNode record:
diff --git a/nova/db/api.py b/nova/db/api.py
index 2d432fc8f..8349c7c25 100644
--- a/nova/db/api.py
+++ b/nova/db/api.py
@@ -614,6 +614,11 @@ def instance_get_all_by_host(context, host):
return IMPL.instance_get_all_by_host(context, host)
+def instance_get_all_by_host_and_node(context, host, node):
+ """Get all instances belonging to a node."""
+ return IMPL.instance_get_all_by_host_and_node(context, host, node)
+
+
def instance_get_all_by_host_and_not_type(context, host, type_id=None):
"""Get all instances belonging to a host with a different type_id."""
return IMPL.instance_get_all_by_host_and_not_type(context, host, type_id)
diff --git a/nova/db/sqlalchemy/api.py b/nova/db/sqlalchemy/api.py
index c840947e8..810d88eb0 100644
--- a/nova/db/sqlalchemy/api.py
+++ b/nova/db/sqlalchemy/api.py
@@ -1647,6 +1647,12 @@ def instance_get_all_by_host(context, host):
@require_admin_context
+def instance_get_all_by_host_and_node(context, host, node):
+ return _instance_get_all_query(context).filter_by(host=host).\
+ filter_by(node=node).all()
+
+
+@require_admin_context
def instance_get_all_by_host_and_not_type(context, host, type_id=None):
return _instance_get_all_query(context).filter_by(host=host).\
filter(models.Instance.instance_type_id != type_id).all()
diff --git a/nova/db/sqlalchemy/migrate_repo/versions/135_add_node_to_instances.py b/nova/db/sqlalchemy/migrate_repo/versions/135_add_node_to_instances.py
new file mode 100644
index 000000000..a208aecf6
--- /dev/null
+++ b/nova/db/sqlalchemy/migrate_repo/versions/135_add_node_to_instances.py
@@ -0,0 +1,55 @@
+# Copyright 2012 OpenStack LLC.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from sqlalchemy import and_, String, Column, MetaData, select, Table
+
+
+def upgrade(migrate_engine):
+ meta = MetaData()
+ meta.bind = migrate_engine
+
+ instances = Table('instances', meta, autoload=True)
+ node = Column('node', String(length=255))
+
+ instances.create_column(node)
+
+ c_nodes = Table('compute_nodes', meta, autoload=True)
+ services = Table('services', meta, autoload=True)
+
+ # set instances.node = compute_nodes.hypervisore_hostname
+ q = select(
+ [instances.c.id, c_nodes.c.hypervisor_hostname],
+ whereclause=and_(
+ instances.c.deleted != True,
+ services.c.deleted != True,
+ services.c.binary == 'nova-compute',
+ c_nodes.c.deleted != True),
+ from_obj=instances.join(services,
+ instances.c.host == services.c.host)
+ .join(c_nodes,
+ services.c.id == c_nodes.c.service_id))
+ for (instance_id, hypervisor_hostname) in q.execute():
+ instances.update().where(instances.c.id == instance_id).\
+ values(node=hypervisor_hostname).\
+ execute()
+
+
+def downgrade(migrate_engine):
+ meta = MetaData()
+ meta.bind = migrate_engine
+
+ instances = Table('instances', meta, autoload=True)
+ node = Column('node', String(length=255))
+
+ instances.drop_column(node)
diff --git a/nova/db/sqlalchemy/migrate_repo/versions/136_add_index_to_instances.py b/nova/db/sqlalchemy/migrate_repo/versions/136_add_index_to_instances.py
new file mode 100644
index 000000000..397f8a62b
--- /dev/null
+++ b/nova/db/sqlalchemy/migrate_repo/versions/136_add_index_to_instances.py
@@ -0,0 +1,41 @@
+# Copyright 2012 OpenStack LLC.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from sqlalchemy import MetaData, Table, Index
+
+INDEX_NAME = 'instances_host_node_deleted_idx'
+
+
+def upgrade(migrate_engine):
+ meta = MetaData()
+ meta.bind = migrate_engine
+
+ instances = Table('instances', meta, autoload=True)
+
+ # Based on instance_get_all_host_and_node
+ # from: nova/db/sqlalchemy/api.py
+ index = Index(INDEX_NAME,
+ instances.c.host, instances.c.node, instances.c.deleted)
+ index.create(migrate_engine)
+
+
+def downgrade(migrate_engine):
+ meta = MetaData()
+ meta.bind = migrate_engine
+
+ instances = Table('instances', meta, autoload=True)
+
+ index = Index(INDEX_NAME,
+ instances.c.host, instances.c.node, instances.c.deleted)
+ index.drop(migrate_engine)
diff --git a/nova/db/sqlalchemy/models.py b/nova/db/sqlalchemy/models.py
index 085976373..4f1dd2b25 100644
--- a/nova/db/sqlalchemy/models.py
+++ b/nova/db/sqlalchemy/models.py
@@ -250,7 +250,11 @@ class Instance(BASE, NovaBase):
ephemeral_gb = Column(Integer)
hostname = Column(String(255))
+ # To identify the "Service" which the instance resides in.
host = Column(String(255)) # , ForeignKey('hosts.id'))
+ # To identify the "ComputeNode" which the instance resides in.
+ # This equals to ComputeNode.hypervisor_hostname.
+ node = Column(String(255))
# *not* flavor_id
instance_type_id = Column(Integer)
diff --git a/nova/manager.py b/nova/manager.py
index 22a42d2d3..2aff3d04e 100644
--- a/nova/manager.py
+++ b/nova/manager.py
@@ -240,6 +240,8 @@ class SchedulerDependentManager(Manager):
def update_service_capabilities(self, capabilities):
"""Remember these capabilities to send on next periodic update."""
+ if not isinstance(capabilities, list):
+ capabilities = [capabilities]
self.last_capabilities = capabilities
@periodic_task
@@ -251,5 +253,8 @@ class SchedulerDependentManager(Manager):
"""
if self.last_capabilities:
LOG.debug(_('Notifying Schedulers of capabilities ...'))
- self.scheduler_rpcapi.update_service_capabilities(context,
- self.service_name, self.host, self.last_capabilities)
+ for capability_item in self.last_capabilities:
+ self.scheduler_rpcapi.update_service_capabilities(context,
+ self.service_name, self.host, capability_item)
+ # TODO(NTTdocomo): Make update_service_capabilities() accept a list
+ # of capabilities
diff --git a/nova/scheduler/driver.py b/nova/scheduler/driver.py
index f93268906..e9cbd509b 100644
--- a/nova/scheduler/driver.py
+++ b/nova/scheduler/driver.py
@@ -99,6 +99,15 @@ def instance_update_db(context, instance_uuid):
return db.instance_update(context, instance_uuid, values)
+def db_instance_node_set(context, instance_uuid, node):
+ '''Set the node field of an Instance.
+
+ :returns: An Instance with the updated fields set properly.
+ '''
+ values = {'node': node}
+ return db.instance_update(context, instance_uuid, values)
+
+
def cast_to_compute_host(context, host, method, **kwargs):
"""Cast request to a compute host queue"""
diff --git a/nova/scheduler/filter_scheduler.py b/nova/scheduler/filter_scheduler.py
index 776797d68..751dfa136 100644
--- a/nova/scheduler/filter_scheduler.py
+++ b/nova/scheduler/filter_scheduler.py
@@ -134,7 +134,11 @@ class FilterScheduler(driver.Scheduler):
'scheduler.run_instance.scheduled', notifier.INFO,
payload)
- updated_instance = driver.instance_update_db(context, instance_uuid)
+ # TODO(NTTdocomo): Combine the next two updates into one
+ driver.db_instance_node_set(context,
+ instance_uuid, weighted_host.host_state.nodename)
+ updated_instance = driver.instance_update_db(context,
+ instance_uuid)
self._post_select_populate_filter_properties(filter_properties,
weighted_host.host_state)
diff --git a/nova/scheduler/host_manager.py b/nova/scheduler/host_manager.py
index faaf3e258..2d6c25e9b 100644
--- a/nova/scheduler/host_manager.py
+++ b/nova/scheduler/host_manager.py
@@ -93,8 +93,9 @@ class HostState(object):
previously used and lock down access.
"""
- def __init__(self, host, capabilities=None, service=None):
+ def __init__(self, host, node, capabilities=None, service=None):
self.host = host
+ self.nodename = node
self.update_capabilities(capabilities, service)
# Mutable available resources.
@@ -265,8 +266,8 @@ class HostState(object):
return True
def __repr__(self):
- return ("%s ram:%s disk:%s io_ops:%s instances:%s vm_type:%s" %
- (self.host, self.free_ram_mb, self.free_disk_mb,
+ return ("(%s, %s) ram:%s disk:%s io_ops:%s instances:%s vm_type:%s" %
+ (self.host, self.nodename, self.free_ram_mb, self.free_disk_mb,
self.num_io_ops, self.num_instances, self.allowed_vm_type))
@@ -277,7 +278,8 @@ class HostManager(object):
host_state_cls = HostState
def __init__(self):
- self.service_states = {} # { <host> : { <service> : { cap k : v }}}
+ # { (host, hypervisor_hostname) : { <service> : { cap k : v }}}
+ self.service_states = {}
self.host_state_map = {}
self.filter_classes = filters.get_filter_classes(
CONF.scheduler_available_filters)
@@ -330,12 +332,13 @@ class HostManager(object):
'from %(host)s'), locals())
return
+ state_key = (host, capabilities.get('hypervisor_hostname'))
LOG.debug(_("Received %(service_name)s service update from "
- "%(host)s."), locals())
+ "%(state_key)s.") % locals())
# Copy the capabilities, so we don't modify the original dict
capab_copy = dict(capabilities)
capab_copy["timestamp"] = timeutils.utcnow() # Reported time
- self.service_states[host] = capab_copy
+ self.service_states[state_key] = capab_copy
def get_all_host_states(self, context):
"""Returns a list of HostStates that represents all the hosts
@@ -351,16 +354,18 @@ class HostManager(object):
LOG.warn(_("No service for compute ID %s") % compute['id'])
continue
host = service['host']
- capabilities = self.service_states.get(host, None)
- host_state = self.host_state_map.get(host)
+ node = compute.get('hypervisor_hostname')
+ state_key = (host, node)
+ capabilities = self.service_states.get(state_key, None)
+ host_state = self.host_state_map.get(state_key)
if host_state:
host_state.update_capabilities(capabilities,
dict(service.iteritems()))
else:
- host_state = self.host_state_cls(host,
+ host_state = self.host_state_cls(host, node,
capabilities=capabilities,
service=dict(service.iteritems()))
- self.host_state_map[host] = host_state
+ self.host_state_map[state_key] = host_state
host_state.update_from_compute_node(compute)
return self.host_state_map.itervalues()
diff --git a/nova/tests/compute/test_compute.py b/nova/tests/compute/test_compute.py
index a7b1b3061..d47a861c3 100644
--- a/nova/tests/compute/test_compute.py
+++ b/nova/tests/compute/test_compute.py
@@ -61,6 +61,7 @@ from nova.tests import fake_network
from nova.tests import fake_network_cache_model
from nova.tests.image import fake as fake_image
from nova import utils
+from nova.virt import fake
from nova.volume import cinder
@@ -73,6 +74,8 @@ CONF.import_opt('live_migration_retry_count', 'nova.compute.manager')
FAKE_IMAGE_REF = 'fake-image-ref'
+NODENAME = 'fakenode1'
+
def nop_report_driver_status(self):
pass
@@ -101,12 +104,13 @@ class BaseTestCase(test.TestCase):
self.flags(compute_driver='nova.virt.fake.FakeDriver',
notification_driver=[test_notifier.__name__],
network_manager='nova.network.manager.FlatManager')
+ fake.set_nodes([NODENAME])
self.compute = importutils.import_object(FLAGS.compute_manager)
# override tracker with a version that doesn't need the database:
- self.compute.resource_tracker = \
- fake_resource_tracker.FakeResourceTracker(self.compute.host,
- self.compute.driver)
+ fake_rt = fake_resource_tracker.FakeResourceTracker(self.compute.host,
+ self.compute.driver, NODENAME)
+ self.compute._resource_tracker_dict[NODENAME] = fake_rt
self.compute.update_available_resource(
context.get_admin_context())
@@ -137,6 +141,7 @@ class BaseTestCase(test.TestCase):
notifier_api._reset_drivers()
for instance in instances:
db.instance_destroy(self.context.elevated(), instance['uuid'])
+ fake.restore_nodes()
super(BaseTestCase, self).tearDown()
def _create_fake_instance(self, params=None, type_name='m1.tiny'):
@@ -152,6 +157,7 @@ class BaseTestCase(test.TestCase):
inst['user_id'] = self.user_id
inst['project_id'] = self.project_id
inst['host'] = 'fake_host'
+ inst['node'] = NODENAME
type_id = instance_types.get_instance_type_by_name(type_name)['id']
inst['instance_type_id'] = type_id
inst['ami_launch_index'] = 0
@@ -210,7 +216,7 @@ class ComputeTestCase(BaseTestCase):
fake_get_nw_info)
self.compute_api = compute.API()
# Just to make long lines short
- self.rt = self.compute.resource_tracker
+ self.rt = self.compute._get_resource_tracker(NODENAME)
def tearDown(self):
super(ComputeTestCase, self).tearDown()
@@ -358,7 +364,7 @@ class ComputeTestCase(BaseTestCase):
self.rt.update_available_resource(self.context.elevated())
# get total memory as reported by virt driver:
- resources = self.compute.driver.get_available_resource()
+ resources = self.compute.driver.get_available_resource(NODENAME)
total_mem_mb = resources['memory_mb']
oversub_limit_mb = total_mem_mb * 1.5
@@ -385,7 +391,7 @@ class ComputeTestCase(BaseTestCase):
self.rt.update_available_resource(self.context.elevated())
# get total memory as reported by virt driver:
- resources = self.compute.driver.get_available_resource()
+ resources = self.compute.driver.get_available_resource(NODENAME)
total_mem_mb = resources['memory_mb']
oversub_limit_mb = total_mem_mb * 1.5
@@ -412,7 +418,7 @@ class ComputeTestCase(BaseTestCase):
filter_properties = {'limits': limits}
# get total memory as reported by virt driver:
- resources = self.compute.driver.get_available_resource()
+ resources = self.compute.driver.get_available_resource(NODENAME)
self.assertEqual(1, resources['vcpus'])
# build an instance, specifying an amount of memory that exceeds
@@ -459,7 +465,7 @@ class ComputeTestCase(BaseTestCase):
self.rt.update_available_resource(self.context.elevated())
# get total memory as reported by virt driver:
- resources = self.compute.driver.get_available_resource()
+ resources = self.compute.driver.get_available_resource(NODENAME)
total_disk_gb = resources['local_gb']
oversub_limit_gb = total_disk_gb * 1.5
@@ -485,7 +491,7 @@ class ComputeTestCase(BaseTestCase):
self.rt.update_available_resource(self.context.elevated())
# get total memory as reported by virt driver:
- resources = self.compute.driver.get_available_resource()
+ resources = self.compute.driver.get_available_resource(NODENAME)
total_disk_gb = resources['local_gb']
oversub_limit_gb = total_disk_gb * 1.5
diff --git a/nova/tests/compute/test_multiple_nodes.py b/nova/tests/compute/test_multiple_nodes.py
new file mode 100644
index 000000000..830897954
--- /dev/null
+++ b/nova/tests/compute/test_multiple_nodes.py
@@ -0,0 +1,99 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright (c) 2012 NTT DOCOMO, INC.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+"""Tests for compute service with multiple compute nodes"""
+
+import mox
+
+from nova import config
+from nova import context
+from nova import exception
+from nova.openstack.common import importutils
+from nova import test
+from nova.virt import fake
+
+
+CONF = config.CONF
+
+
+class BaseTestCase(test.TestCase):
+ def tearDown(self):
+ fake.restore_nodes()
+ super(BaseTestCase, self).tearDown()
+
+
+class FakeDriverSingleNodeTestCase(BaseTestCase):
+ def setUp(self):
+ super(FakeDriverSingleNodeTestCase, self).setUp()
+ self.driver = fake.FakeDriver(virtapi=None)
+ fake.set_nodes(['xyz'])
+
+ def test_get_host_stats(self):
+ stats = self.driver.get_host_stats()
+ self.assertTrue(isinstance(stats, dict))
+ self.assertEqual(stats['hypervisor_hostname'], 'xyz')
+
+ def test_get_available_resource(self):
+ res = self.driver.get_available_resource('xyz')
+ self.assertEqual(res['hypervisor_hostname'], 'xyz')
+
+
+class FakeDriverMultiNodeTestCase(BaseTestCase):
+ def setUp(self):
+ super(FakeDriverMultiNodeTestCase, self).setUp()
+ self.driver = fake.FakeDriver(virtapi=None)
+ fake.set_nodes(['aaa', 'bbb'])
+
+ def test_get_host_stats(self):
+ stats = self.driver.get_host_stats()
+ self.assertTrue(isinstance(stats, list))
+ self.assertEqual(len(stats), 2)
+ self.assertEqual(stats[0]['hypervisor_hostname'], 'aaa')
+ self.assertEqual(stats[1]['hypervisor_hostname'], 'bbb')
+
+ def test_get_available_resource(self):
+ res_a = self.driver.get_available_resource('aaa')
+ self.assertEqual(res_a['hypervisor_hostname'], 'aaa')
+
+ res_b = self.driver.get_available_resource('bbb')
+ self.assertEqual(res_b['hypervisor_hostname'], 'bbb')
+
+ self.assertRaises(exception.NovaException,
+ self.driver.get_available_resource, 'xxx')
+
+
+class MultiNodeComputeTestCase(BaseTestCase):
+ def setUp(self):
+ super(MultiNodeComputeTestCase, self).setUp()
+ self.flags(compute_driver='nova.virt.fake.FakeDriver')
+ self.compute = importutils.import_object(CONF.compute_manager)
+
+ def test_update_available_resource_add_remove_node(self):
+ ctx = context.get_admin_context()
+ fake.set_nodes(['A', 'B', 'C'])
+ self.compute.update_available_resource(ctx)
+ self.assertEqual(sorted(self.compute._resource_tracker_dict.keys()),
+ ['A', 'B', 'C'])
+
+ fake.set_nodes(['A', 'B'])
+ self.compute.update_available_resource(ctx)
+ self.assertEqual(sorted(self.compute._resource_tracker_dict.keys()),
+ ['A', 'B'])
+
+ fake.set_nodes(['A', 'B', 'C'])
+ self.compute.update_available_resource(ctx)
+ self.assertEqual(sorted(self.compute._resource_tracker_dict.keys()),
+ ['A', 'B', 'C'])
diff --git a/nova/tests/compute/test_resource_tracker.py b/nova/tests/compute/test_resource_tracker.py
index 64cdb8d53..3997de133 100644
--- a/nova/tests/compute/test_resource_tracker.py
+++ b/nova/tests/compute/test_resource_tracker.py
@@ -43,7 +43,7 @@ class UnsupportedVirtDriver(driver.ComputeDriver):
def __init__(self):
super(UnsupportedVirtDriver, self).__init__(None)
- def get_available_resource(self):
+ def get_available_resource(self, nodename):
# no support for getting resource usage info
return {}
@@ -59,7 +59,7 @@ class FakeVirtDriver(driver.ComputeDriver):
self.memory_mb_used = 0
self.local_gb_used = 0
- def get_available_resource(self):
+ def get_available_resource(self, nodename):
d = {
'vcpus': self.vcpus,
'memory_mb': self.memory_mb,
@@ -86,8 +86,8 @@ class BaseTestCase(test.TestCase):
self.context = context.RequestContext('fake', 'fake')
self._instances = {}
- self.stubs.Set(db, 'instance_get_all_by_host',
- lambda c, h: self._instances.values())
+ self.stubs.Set(db, 'instance_get_all_by_host_and_node',
+ lambda c, h, n: self._instances.values())
self.stubs.Set(db, 'instance_update_and_get_original',
self._fake_instance_update_and_get_original)
@@ -106,7 +106,8 @@ class BaseTestCase(test.TestCase):
"current_workload": 1,
"running_vms": 0,
"cpu_info": None,
- "stats": [{"key": "num_instances", "value": "1"}]
+ "stats": [{"key": "num_instances", "value": "1"}],
+ "hypervisor_hostname": "fakenode",
}
if values:
compute.update(values)
@@ -155,13 +156,14 @@ class BaseTestCase(test.TestCase):
def _tracker(self, unsupported=False):
host = "fakehost"
+ node = "fakenode"
if unsupported:
driver = UnsupportedVirtDriver()
else:
driver = FakeVirtDriver()
- tracker = resource_tracker.ResourceTracker(host, driver)
+ tracker = resource_tracker.ResourceTracker(host, driver, node)
return tracker
diff --git a/nova/tests/scheduler/fakes.py b/nova/tests/scheduler/fakes.py
index 58aa2ffc7..3c7b462d0 100644
--- a/nova/tests/scheduler/fakes.py
+++ b/nova/tests/scheduler/fakes.py
@@ -29,38 +29,42 @@ COMPUTE_NODES = [
dict(id=1, local_gb=1024, memory_mb=1024, vcpus=1,
disk_available_least=512, free_ram_mb=512, vcpus_used=1,
free_disk_mb=512, local_gb_used=0, updated_at=None,
- service=dict(host='host1', disabled=False)),
+ service=dict(host='host1', disabled=False),
+ hypervisor_hostname='node1'),
dict(id=2, local_gb=2048, memory_mb=2048, vcpus=2,
disk_available_least=1024, free_ram_mb=1024, vcpus_used=2,
free_disk_mb=1024, local_gb_used=0, updated_at=None,
- service=dict(host='host2', disabled=True)),
+ service=dict(host='host2', disabled=True),
+ hypervisor_hostname='node2'),
dict(id=3, local_gb=4096, memory_mb=4096, vcpus=4,
disk_available_least=3072, free_ram_mb=3072, vcpus_used=1,
free_disk_mb=3072, local_gb_used=0, updated_at=None,
- service=dict(host='host3', disabled=False)),
+ service=dict(host='host3', disabled=False),
+ hypervisor_hostname='node3'),
dict(id=4, local_gb=8192, memory_mb=8192, vcpus=8,
disk_available_least=8192, free_ram_mb=8192, vcpus_used=0,
free_disk_mb=8192, local_gb_used=0, updated_at=None,
- service=dict(host='host4', disabled=False)),
+ service=dict(host='host4', disabled=False),
+ hypervisor_hostname='node4'),
# Broken entry
dict(id=5, local_gb=1024, memory_mb=1024, vcpus=1, service=None),
]
INSTANCES = [
dict(root_gb=512, ephemeral_gb=0, memory_mb=512, vcpus=1,
- host='host1'),
+ host='host1', node='node1'),
dict(root_gb=512, ephemeral_gb=0, memory_mb=512, vcpus=1,
- host='host2'),
+ host='host2', node='node2'),
dict(root_gb=512, ephemeral_gb=0, memory_mb=512, vcpus=1,
- host='host2'),
+ host='host2', node='node2'),
dict(root_gb=1024, ephemeral_gb=0, memory_mb=1024, vcpus=1,
- host='host3'),
+ host='host3', node='node3'),
# Broken host
dict(root_gb=1024, ephemeral_gb=0, memory_mb=1024, vcpus=1,
host=None),
# No matching host
dict(root_gb=1024, ephemeral_gb=0, memory_mb=1024, vcpus=1,
- host='host5'),
+ host='host5', node='node5'),
]
@@ -96,8 +100,8 @@ class FakeHostManager(host_manager.HostManager):
class FakeHostState(host_manager.HostState):
- def __init__(self, host, attribute_dict):
- super(FakeHostState, self).__init__(host)
+ def __init__(self, host, node, attribute_dict):
+ super(FakeHostState, self).__init__(host, node)
for (key, val) in attribute_dict.iteritems():
setattr(self, key, val)
diff --git a/nova/tests/scheduler/test_filter_scheduler.py b/nova/tests/scheduler/test_filter_scheduler.py
index 4238ad916..6add77cbe 100644
--- a/nova/tests/scheduler/test_filter_scheduler.py
+++ b/nova/tests/scheduler/test_filter_scheduler.py
@@ -183,7 +183,7 @@ class FilterSchedulerTestCase(test_scheduler.SchedulerTestCase):
sched = fakes.FakeFilterScheduler()
def _return_hosts(*args, **kwargs):
- host_state = host_manager.HostState('host2')
+ host_state = host_manager.HostState('host2', 'node2')
return [least_cost.WeightedHost(1.0, host_state=host_state)]
self.stubs.Set(sched, '_schedule', _return_hosts)
@@ -209,7 +209,7 @@ class FilterSchedulerTestCase(test_scheduler.SchedulerTestCase):
self.assertEquals(len(fns), 1)
weight, fn = fns[0]
self.assertEquals(weight, -1.0)
- hostinfo = host_manager.HostState('host')
+ hostinfo = host_manager.HostState('host', 'node')
hostinfo.update_from_compute_node(dict(memory_mb=1000,
local_gb=0, vcpus=1, disk_available_least=1000,
free_disk_mb=1000, free_ram_mb=872, vcpus_used=0,
@@ -307,7 +307,7 @@ class FilterSchedulerTestCase(test_scheduler.SchedulerTestCase):
filter_properties = {'retry': retry}
sched = fakes.FakeFilterScheduler()
- host_state = host_manager.HostState('host')
+ host_state = host_manager.HostState('host', 'node')
host_state.limits['vcpus'] = 5
sched._post_select_populate_filter_properties(filter_properties,
host_state)
@@ -331,7 +331,7 @@ class FilterSchedulerTestCase(test_scheduler.SchedulerTestCase):
filter_properties = {'retry': retry}
reservations = None
- host = fakes.FakeHostState('host', {})
+ host = fakes.FakeHostState('host', 'node', {})
weighted_host = least_cost.WeightedHost(1, host)
hosts = [weighted_host]
diff --git a/nova/tests/scheduler/test_host_filters.py b/nova/tests/scheduler/test_host_filters.py
index d2facbfc5..6dffe061e 100644
--- a/nova/tests/scheduler/test_host_filters.py
+++ b/nova/tests/scheduler/test_host_filters.py
@@ -291,7 +291,7 @@ class HostFiltersTestCase(test.TestCase):
def test_all_host_filter(self):
filt_cls = self.class_map['AllHostsFilter']()
- host = fakes.FakeHostState('host1', {})
+ host = fakes.FakeHostState('host1', 'node1', {})
self.assertTrue(filt_cls.host_passes(host, {}))
def _stub_service_is_up(self, ret_value):
@@ -301,7 +301,7 @@ class HostFiltersTestCase(test.TestCase):
def test_affinity_different_filter_passes(self):
filt_cls = self.class_map['DifferentHostFilter']()
- host = fakes.FakeHostState('host1', {})
+ host = fakes.FakeHostState('host1', 'node1', {})
instance = fakes.FakeInstance(context=self.context,
params={'host': 'host2'})
instance_uuid = instance.uuid
@@ -314,7 +314,7 @@ class HostFiltersTestCase(test.TestCase):
def test_affinity_different_filter_no_list_passes(self):
filt_cls = self.class_map['DifferentHostFilter']()
- host = fakes.FakeHostState('host1', {})
+ host = fakes.FakeHostState('host1', 'node1', {})
instance = fakes.FakeInstance(context=self.context,
params={'host': 'host2'})
instance_uuid = instance.uuid
@@ -327,7 +327,7 @@ class HostFiltersTestCase(test.TestCase):
def test_affinity_different_filter_fails(self):
filt_cls = self.class_map['DifferentHostFilter']()
- host = fakes.FakeHostState('host1', {})
+ host = fakes.FakeHostState('host1', 'node1', {})
instance = fakes.FakeInstance(context=self.context,
params={'host': 'host1'})
instance_uuid = instance.uuid
@@ -340,7 +340,11 @@ class HostFiltersTestCase(test.TestCase):
def test_affinity_different_filter_handles_none(self):
filt_cls = self.class_map['DifferentHostFilter']()
- host = fakes.FakeHostState('host1', {})
+ host = fakes.FakeHostState('host1', 'node1', {})
+ instance = fakes.FakeInstance(context=self.context,
+ params={'host': 'host2'})
+ instance_uuid = instance.uuid
+
filter_properties = {'context': self.context.elevated(),
'scheduler_hints': None}
@@ -348,7 +352,7 @@ class HostFiltersTestCase(test.TestCase):
def test_affinity_same_filter_no_list_passes(self):
filt_cls = self.class_map['SameHostFilter']()
- host = fakes.FakeHostState('host1', {})
+ host = fakes.FakeHostState('host1', 'node1', {})
instance = fakes.FakeInstance(context=self.context,
params={'host': 'host1'})
instance_uuid = instance.uuid
@@ -361,7 +365,7 @@ class HostFiltersTestCase(test.TestCase):
def test_affinity_same_filter_passes(self):
filt_cls = self.class_map['SameHostFilter']()
- host = fakes.FakeHostState('host1', {})
+ host = fakes.FakeHostState('host1', 'node1', {})
instance = fakes.FakeInstance(context=self.context,
params={'host': 'host1'})
instance_uuid = instance.uuid
@@ -374,7 +378,7 @@ class HostFiltersTestCase(test.TestCase):
def test_affinity_same_filter_fails(self):
filt_cls = self.class_map['SameHostFilter']()
- host = fakes.FakeHostState('host1', {})
+ host = fakes.FakeHostState('host1', 'node1', {})
instance = fakes.FakeInstance(context=self.context,
params={'host': 'host2'})
instance_uuid = instance.uuid
@@ -387,7 +391,11 @@ class HostFiltersTestCase(test.TestCase):
def test_affinity_same_filter_handles_none(self):
filt_cls = self.class_map['SameHostFilter']()
- host = fakes.FakeHostState('host1', {})
+ host = fakes.FakeHostState('host1', 'node1', {})
+ instance = fakes.FakeInstance(context=self.context,
+ params={'host': 'host2'})
+ instance_uuid = instance.uuid
+
filter_properties = {'context': self.context.elevated(),
'scheduler_hints': None}
@@ -395,7 +403,7 @@ class HostFiltersTestCase(test.TestCase):
def test_affinity_simple_cidr_filter_passes(self):
filt_cls = self.class_map['SimpleCIDRAffinityFilter']()
- host = fakes.FakeHostState('host1', {})
+ host = fakes.FakeHostState('host1', 'node1', {})
host.capabilities = {'host_ip': '10.8.1.1'}
affinity_ip = "10.8.1.100"
@@ -409,7 +417,7 @@ class HostFiltersTestCase(test.TestCase):
def test_affinity_simple_cidr_filter_fails(self):
filt_cls = self.class_map['SimpleCIDRAffinityFilter']()
- host = fakes.FakeHostState('host1', {})
+ host = fakes.FakeHostState('host1', 'node1', {})
host.capabilities = {'host_ip': '10.8.1.1'}
affinity_ip = "10.8.1.100"
@@ -423,7 +431,7 @@ class HostFiltersTestCase(test.TestCase):
def test_affinity_simple_cidr_filter_handles_none(self):
filt_cls = self.class_map['SimpleCIDRAffinityFilter']()
- host = fakes.FakeHostState('host1', {})
+ host = fakes.FakeHostState('host1', 'node1', {})
affinity_ip = flags.FLAGS.my_ip.split('.')[0:3]
affinity_ip.append('100')
@@ -440,7 +448,7 @@ class HostFiltersTestCase(test.TestCase):
filter_properties = {'instance_type': {'memory_mb': 1024}}
capabilities = {'enabled': True}
service = {'disabled': False}
- host = fakes.FakeHostState('host1',
+ host = fakes.FakeHostState('host1', 'node1',
{'free_ram_mb': 1024, 'capabilities': capabilities,
'service': service})
self.assertTrue(filt_cls.host_passes(host, filter_properties))
@@ -456,7 +464,7 @@ class HostFiltersTestCase(test.TestCase):
capabilities = {'enabled': True}
service = {'disabled': False}
- host = fakes.FakeHostState('fake_host',
+ host = fakes.FakeHostState('fake_host', 'fake_node',
{'capabilities': capabilities,
'service': service})
#True since empty
@@ -482,7 +490,7 @@ class HostFiltersTestCase(test.TestCase):
'instance_type': {'name': 'fake2'}}
capabilities = {'enabled': True}
service = {'disabled': False}
- host = fakes.FakeHostState('fake_host',
+ host = fakes.FakeHostState('fake_host', 'fake_node',
{'capabilities': capabilities,
'service': service})
#True since no aggregates
@@ -501,7 +509,7 @@ class HostFiltersTestCase(test.TestCase):
filter_properties = {'instance_type': {'memory_mb': 1024}}
capabilities = {'enabled': True}
service = {'disabled': False}
- host = fakes.FakeHostState('host1',
+ host = fakes.FakeHostState('host1', 'node1',
{'free_ram_mb': 1023, 'total_usable_ram_mb': 1024,
'capabilities': capabilities, 'service': service})
self.assertFalse(filt_cls.host_passes(host, filter_properties))
@@ -513,7 +521,7 @@ class HostFiltersTestCase(test.TestCase):
filter_properties = {'instance_type': {'memory_mb': 1024}}
capabilities = {'enabled': True}
service = {'disabled': False}
- host = fakes.FakeHostState('host1',
+ host = fakes.FakeHostState('host1', 'node1',
{'free_ram_mb': 1024, 'total_usable_ram_mb': 1024,
'capabilities': capabilities, 'service': service})
self.assertTrue(filt_cls.host_passes(host, filter_properties))
@@ -525,7 +533,7 @@ class HostFiltersTestCase(test.TestCase):
filter_properties = {'instance_type': {'memory_mb': 1024}}
capabilities = {'enabled': True}
service = {'disabled': False}
- host = fakes.FakeHostState('host1',
+ host = fakes.FakeHostState('host1', 'node1',
{'free_ram_mb': -1024, 'total_usable_ram_mb': 2048,
'capabilities': capabilities, 'service': service})
self.assertTrue(filt_cls.host_passes(host, filter_properties))
@@ -539,7 +547,7 @@ class HostFiltersTestCase(test.TestCase):
'ephemeral_gb': 1}}
capabilities = {'enabled': True}
service = {'disabled': False}
- host = fakes.FakeHostState('host1',
+ host = fakes.FakeHostState('host1', 'node1',
{'free_disk_mb': 11 * 1024, 'total_usable_disk_gb': 13,
'capabilities': capabilities, 'service': service})
self.assertTrue(filt_cls.host_passes(host, filter_properties))
@@ -552,7 +560,7 @@ class HostFiltersTestCase(test.TestCase):
'ephemeral_gb': 1}}
capabilities = {'enabled': True}
service = {'disabled': False}
- host = fakes.FakeHostState('host1',
+ host = fakes.FakeHostState('host1', 'node1',
{'free_disk_mb': 11 * 1024, 'total_usable_disk_gb': 13,
'capabilities': capabilities, 'service': service})
self.assertFalse(filt_cls.host_passes(host, filter_properties))
@@ -566,7 +574,7 @@ class HostFiltersTestCase(test.TestCase):
capabilities = {'enabled': True}
service = {'disabled': False}
# 1GB used... so 119GB allowed...
- host = fakes.FakeHostState('host1',
+ host = fakes.FakeHostState('host1', 'node1',
{'free_disk_mb': 11 * 1024, 'total_usable_disk_gb': 12,
'capabilities': capabilities, 'service': service})
self.assertTrue(filt_cls.host_passes(host, filter_properties))
@@ -581,7 +589,7 @@ class HostFiltersTestCase(test.TestCase):
capabilities = {'enabled': True}
service = {'disabled': False}
# 1GB used... so 119GB allowed...
- host = fakes.FakeHostState('host1',
+ host = fakes.FakeHostState('host1', 'node1',
{'free_disk_mb': 11 * 1024, 'total_usable_disk_gb': 12,
'capabilities': capabilities, 'service': service})
self.assertFalse(filt_cls.host_passes(host, filter_properties))
@@ -592,7 +600,7 @@ class HostFiltersTestCase(test.TestCase):
filter_properties = {'instance_type': {'memory_mb': 1024}}
capabilities = {'enabled': True}
service = {'disabled': True}
- host = fakes.FakeHostState('host1',
+ host = fakes.FakeHostState('host1', 'node1',
{'free_ram_mb': 1024, 'capabilities': capabilities,
'service': service})
self.assertFalse(filt_cls.host_passes(host, filter_properties))
@@ -603,7 +611,7 @@ class HostFiltersTestCase(test.TestCase):
filter_properties = {'instance_type': {'memory_mb': 1024}}
capabilities = {'enabled': True}
service = {'disabled': False}
- host = fakes.FakeHostState('host1',
+ host = fakes.FakeHostState('host1', 'node1',
{'free_ram_mb': 1024, 'capabilities': capabilities,
'service': service})
self.assertFalse(filt_cls.host_passes(host, filter_properties))
@@ -614,7 +622,7 @@ class HostFiltersTestCase(test.TestCase):
filter_properties = {'instance_type': {'memory_mb': 1024}}
capabilities = {'enabled': False}
service = {'disabled': False}
- host = fakes.FakeHostState('host1',
+ host = fakes.FakeHostState('host1', 'node1',
{'free_ram_mb': 1024, 'capabilities': capabilities,
'service': service})
self.assertFalse(filt_cls.host_passes(host, filter_properties))
@@ -625,7 +633,7 @@ class HostFiltersTestCase(test.TestCase):
filter_properties = {}
capabilities = {'enabled': False}
service = {'disabled': False}
- host = fakes.FakeHostState('host1',
+ host = fakes.FakeHostState('host1', 'node1',
{'free_ram_mb': 1024, 'capabilities': capabilities,
'service': service})
self.assertTrue(filt_cls.host_passes(host, filter_properties))
@@ -640,7 +648,7 @@ class HostFiltersTestCase(test.TestCase):
capabilities = {'enabled': True,
'supported_instances': [
('x86_64', 'kvm', 'hvm')]}
- host = fakes.FakeHostState('host1',
+ host = fakes.FakeHostState('host1', 'node1',
{'capabilities': capabilities})
self.assertTrue(filt_cls.host_passes(host, filter_properties))
@@ -654,7 +662,7 @@ class HostFiltersTestCase(test.TestCase):
capabilities = {'enabled': True,
'supported_instances': [
('x86_64', 'kvm', 'hvm')]}
- host = fakes.FakeHostState('host1',
+ host = fakes.FakeHostState('host1', 'node1',
{'capabilities': capabilities})
self.assertFalse(filt_cls.host_passes(host, filter_properties))
@@ -667,7 +675,7 @@ class HostFiltersTestCase(test.TestCase):
capabilities = {'enabled': True,
'supported_instances': [
('x86_64', 'kvm', 'hvm')]}
- host = fakes.FakeHostState('host1',
+ host = fakes.FakeHostState('host1', 'node1',
{'capabilities': capabilities})
self.assertTrue(filt_cls.host_passes(host, filter_properties))
@@ -680,7 +688,7 @@ class HostFiltersTestCase(test.TestCase):
capabilities = {'enabled': True,
'supported_instances': [
('x86_64', 'xen', 'xen')]}
- host = fakes.FakeHostState('host1',
+ host = fakes.FakeHostState('host1', 'node1',
{'capabilities': capabilities})
self.assertFalse(filt_cls.host_passes(host, filter_properties))
@@ -691,7 +699,7 @@ class HostFiltersTestCase(test.TestCase):
capabilities = {'enabled': True,
'supported_instances': [
('x86_64', 'kvm', 'hvm')]}
- host = fakes.FakeHostState('host1',
+ host = fakes.FakeHostState('host1', 'node1',
{'capabilities': capabilities})
self.assertTrue(filt_cls.host_passes(host, filter_properties))
@@ -703,7 +711,7 @@ class HostFiltersTestCase(test.TestCase):
'vm_mode': 'hvm'}}
filter_properties = {'request_spec': {'image': img_props}}
capabilities = {'enabled': True}
- host = fakes.FakeHostState('host1',
+ host = fakes.FakeHostState('host1', 'node1',
{'capabilities': capabilities})
self.assertFalse(filt_cls.host_passes(host, filter_properties))
@@ -715,7 +723,7 @@ class HostFiltersTestCase(test.TestCase):
service = {'disabled': False}
filter_properties = {'instance_type': {'memory_mb': 1024,
'extra_specs': especs}}
- host = fakes.FakeHostState('host1',
+ host = fakes.FakeHostState('host1', 'node1',
{'free_ram_mb': 1024, 'capabilities': capabilities,
'service': service})
assertion = self.assertTrue if passes else self.assertFalse
@@ -740,7 +748,7 @@ class HostFiltersTestCase(test.TestCase):
filter_properties = {'context': self.context, 'instance_type':
{'memory_mb': 1024}}
- host = fakes.FakeHostState('host1',
+ host = fakes.FakeHostState('host1', 'node1',
{'capabilities': capabilities})
self.assertTrue(filt_cls.host_passes(host, filter_properties))
@@ -760,7 +768,8 @@ class HostFiltersTestCase(test.TestCase):
self._create_aggregate_with_host(name='fake2', metadata=emeta)
filter_properties = {'context': self.context,
'instance_type': {'memory_mb': 1024, 'extra_specs': especs}}
- host = fakes.FakeHostState('host1', {'free_ram_mb': 1024})
+ host = fakes.FakeHostState('host1', 'node1',
+ {'free_ram_mb': 1024})
assertion = self.assertTrue if passes else self.assertFalse
assertion(filt_cls.host_passes(host, filter_properties))
@@ -774,7 +783,8 @@ class HostFiltersTestCase(test.TestCase):
metadata={'opt2': '2'})
filter_properties = {'context': self.context, 'instance_type':
{'memory_mb': 1024, 'extra_specs': extra_specs}}
- host = fakes.FakeHostState('host1', {'free_ram_mb': 1024})
+ host = fakes.FakeHostState('host1', 'node1',
+ {'free_ram_mb': 1024})
db.aggregate_host_delete(self.context.elevated(), agg2.id, 'host1')
self.assertFalse(filt_cls.host_passes(host, filter_properties))
@@ -800,7 +810,7 @@ class HostFiltersTestCase(test.TestCase):
'instance_properties': {'image_ref': 'isolated'}
}
}
- host = fakes.FakeHostState('non-isolated', {})
+ host = fakes.FakeHostState('non-isolated', 'node', {})
self.assertFalse(filt_cls.host_passes(host, filter_properties))
def test_isolated_hosts_fails_non_isolated_on_isolated(self):
@@ -811,7 +821,7 @@ class HostFiltersTestCase(test.TestCase):
'instance_properties': {'image_ref': 'non-isolated'}
}
}
- host = fakes.FakeHostState('isolated', {})
+ host = fakes.FakeHostState('isolated', 'node', {})
self.assertFalse(filt_cls.host_passes(host, filter_properties))
def test_isolated_hosts_passes_isolated_on_isolated(self):
@@ -822,7 +832,7 @@ class HostFiltersTestCase(test.TestCase):
'instance_properties': {'image_ref': 'isolated'}
}
}
- host = fakes.FakeHostState('isolated', {})
+ host = fakes.FakeHostState('isolated', 'node', {})
self.assertTrue(filt_cls.host_passes(host, filter_properties))
def test_isolated_hosts_passes_non_isolated_on_non_isolated(self):
@@ -833,7 +843,7 @@ class HostFiltersTestCase(test.TestCase):
'instance_properties': {'image_ref': 'non-isolated'}
}
}
- host = fakes.FakeHostState('non-isolated', {})
+ host = fakes.FakeHostState('non-isolated', 'node', {})
self.assertTrue(filt_cls.host_passes(host, filter_properties))
def test_json_filter_passes(self):
@@ -843,7 +853,7 @@ class HostFiltersTestCase(test.TestCase):
'ephemeral_gb': 0},
'scheduler_hints': {'query': self.json_query}}
capabilities = {'enabled': True}
- host = fakes.FakeHostState('host1',
+ host = fakes.FakeHostState('host1', 'node1',
{'free_ram_mb': 1024,
'free_disk_mb': 200 * 1024,
'capabilities': capabilities})
@@ -855,7 +865,7 @@ class HostFiltersTestCase(test.TestCase):
'root_gb': 200,
'ephemeral_gb': 0}}
capabilities = {'enabled': True}
- host = fakes.FakeHostState('host1',
+ host = fakes.FakeHostState('host1', 'node1',
{'free_ram_mb': 0,
'free_disk_mb': 0,
'capabilities': capabilities})
@@ -868,7 +878,7 @@ class HostFiltersTestCase(test.TestCase):
'ephemeral_gb': 0},
'scheduler_hints': {'query': self.json_query}}
capabilities = {'enabled': True}
- host = fakes.FakeHostState('host1',
+ host = fakes.FakeHostState('host1', 'node1',
{'free_ram_mb': 1023,
'free_disk_mb': 200 * 1024,
'capabilities': capabilities})
@@ -881,7 +891,7 @@ class HostFiltersTestCase(test.TestCase):
'ephemeral_gb': 0},
'scheduler_hints': {'query': self.json_query}}
capabilities = {'enabled': True}
- host = fakes.FakeHostState('host1',
+ host = fakes.FakeHostState('host1', 'node1',
{'free_ram_mb': 1024,
'free_disk_mb': (200 * 1024) - 1,
'capabilities': capabilities})
@@ -898,7 +908,7 @@ class HostFiltersTestCase(test.TestCase):
'ephemeral_gb': 0},
'scheduler_hints': {'query': json_query}}
capabilities = {'enabled': False}
- host = fakes.FakeHostState('host1',
+ host = fakes.FakeHostState('host1', 'node1',
{'free_ram_mb': 1024,
'free_disk_mb': 200 * 1024,
'capabilities': capabilities})
@@ -915,7 +925,7 @@ class HostFiltersTestCase(test.TestCase):
'scheduler_hints': {'query': json_query}}
capabilities = {'enabled': True}
service = {'disabled': True}
- host = fakes.FakeHostState('host1',
+ host = fakes.FakeHostState('host1', 'node1',
{'free_ram_mb': 1024,
'free_disk_mb': 200 * 1024,
'capabilities': capabilities})
@@ -943,7 +953,7 @@ class HostFiltersTestCase(test.TestCase):
# Passes
capabilities = {'enabled': True, 'opt1': 'match'}
service = {'disabled': False}
- host = fakes.FakeHostState('host1',
+ host = fakes.FakeHostState('host1', 'node1',
{'free_ram_mb': 10,
'free_disk_mb': 200,
'capabilities': capabilities,
@@ -953,7 +963,7 @@ class HostFiltersTestCase(test.TestCase):
# Passes
capabilities = {'enabled': True, 'opt1': 'match'}
service = {'disabled': False}
- host = fakes.FakeHostState('host1',
+ host = fakes.FakeHostState('host1', 'node1',
{'free_ram_mb': 40,
'free_disk_mb': 400,
'capabilities': capabilities,
@@ -963,7 +973,7 @@ class HostFiltersTestCase(test.TestCase):
# Fails due to capabilities being disabled
capabilities = {'enabled': False, 'opt1': 'match'}
service = {'disabled': False}
- host = fakes.FakeHostState('host1',
+ host = fakes.FakeHostState('host1', 'node1',
{'free_ram_mb': 40,
'free_disk_mb': 400,
'capabilities': capabilities,
@@ -973,7 +983,7 @@ class HostFiltersTestCase(test.TestCase):
# Fails due to being exact memory/disk we don't want
capabilities = {'enabled': True, 'opt1': 'match'}
service = {'disabled': False}
- host = fakes.FakeHostState('host1',
+ host = fakes.FakeHostState('host1', 'node1',
{'free_ram_mb': 30,
'free_disk_mb': 300,
'capabilities': capabilities,
@@ -983,7 +993,7 @@ class HostFiltersTestCase(test.TestCase):
# Fails due to memory lower but disk higher
capabilities = {'enabled': True, 'opt1': 'match'}
service = {'disabled': False}
- host = fakes.FakeHostState('host1',
+ host = fakes.FakeHostState('host1', 'node1',
{'free_ram_mb': 20,
'free_disk_mb': 400,
'capabilities': capabilities,
@@ -993,7 +1003,7 @@ class HostFiltersTestCase(test.TestCase):
# Fails due to capabilities 'opt1' not equal
capabilities = {'enabled': True, 'opt1': 'no-match'}
service = {'enabled': True}
- host = fakes.FakeHostState('host1',
+ host = fakes.FakeHostState('host1', 'node1',
{'free_ram_mb': 20,
'free_disk_mb': 400,
'capabilities': capabilities,
@@ -1002,7 +1012,7 @@ class HostFiltersTestCase(test.TestCase):
def test_json_filter_basic_operators(self):
filt_cls = self.class_map['JsonFilter']()
- host = fakes.FakeHostState('host1',
+ host = fakes.FakeHostState('host1', 'node1',
{'capabilities': {'enabled': True}})
# (operator, arguments, expected_result)
ops_to_test = [
@@ -1071,14 +1081,14 @@ class HostFiltersTestCase(test.TestCase):
'query': jsonutils.dumps(raw),
},
}
- host = fakes.FakeHostState('host1',
+ host = fakes.FakeHostState('host1', 'node1',
{'capabilities': {'enabled': True}})
self.assertRaises(KeyError,
filt_cls.host_passes, host, filter_properties)
def test_json_filter_empty_filters_pass(self):
filt_cls = self.class_map['JsonFilter']()
- host = fakes.FakeHostState('host1',
+ host = fakes.FakeHostState('host1', 'node1',
{'capabilities': {'enabled': True}})
raw = []
@@ -1098,7 +1108,7 @@ class HostFiltersTestCase(test.TestCase):
def test_json_filter_invalid_num_arguments_fails(self):
filt_cls = self.class_map['JsonFilter']()
- host = fakes.FakeHostState('host1',
+ host = fakes.FakeHostState('host1', 'node1',
{'capabilities': {'enabled': True}})
raw = ['>', ['and', ['or', ['not', ['<', ['>=', ['<=', ['in', ]]]]]]]]
@@ -1119,7 +1129,7 @@ class HostFiltersTestCase(test.TestCase):
def test_json_filter_unknown_variable_ignored(self):
filt_cls = self.class_map['JsonFilter']()
- host = fakes.FakeHostState('host1',
+ host = fakes.FakeHostState('host1', 'node1',
{'capabilities': {'enabled': True}})
raw = ['=', '$........', 1, 1]
@@ -1142,7 +1152,7 @@ class HostFiltersTestCase(test.TestCase):
self._stub_service_is_up(True)
filt_cls = self.class_map['TrustedFilter']()
filter_properties = {'instance_type': {'memory_mb': 1024}}
- host = fakes.FakeHostState('host1', {})
+ host = fakes.FakeHostState('host1', 'node1', {})
self.assertTrue(filt_cls.host_passes(host, filter_properties))
def test_trusted_filter_trusted_and_trusted_passes(self):
@@ -1153,7 +1163,7 @@ class HostFiltersTestCase(test.TestCase):
extra_specs = {'trust:trusted_host': 'trusted'}
filter_properties = {'instance_type': {'memory_mb': 1024,
'extra_specs': extra_specs}}
- host = fakes.FakeHostState('host1', {})
+ host = fakes.FakeHostState('host1', 'node1', {})
self.assertTrue(filt_cls.host_passes(host, filter_properties))
def test_trusted_filter_trusted_and_untrusted_fails(self):
@@ -1164,7 +1174,7 @@ class HostFiltersTestCase(test.TestCase):
extra_specs = {'trust:trusted_host': 'trusted'}
filter_properties = {'instance_type': {'memory_mb': 1024,
'extra_specs': extra_specs}}
- host = fakes.FakeHostState('host1', {})
+ host = fakes.FakeHostState('host1', 'node1', {})
self.assertFalse(filt_cls.host_passes(host, filter_properties))
def test_trusted_filter_untrusted_and_trusted_fails(self):
@@ -1175,7 +1185,7 @@ class HostFiltersTestCase(test.TestCase):
extra_specs = {'trust:trusted_host': 'untrusted'}
filter_properties = {'instance_type': {'memory_mb': 1024,
'extra_specs': extra_specs}}
- host = fakes.FakeHostState('host1', {})
+ host = fakes.FakeHostState('host1', 'node1', {})
self.assertFalse(filt_cls.host_passes(host, filter_properties))
def test_trusted_filter_untrusted_and_untrusted_passes(self):
@@ -1186,28 +1196,28 @@ class HostFiltersTestCase(test.TestCase):
extra_specs = {'trust:trusted_host': 'untrusted'}
filter_properties = {'instance_type': {'memory_mb': 1024,
'extra_specs': extra_specs}}
- host = fakes.FakeHostState('host1', {})
+ host = fakes.FakeHostState('host1', 'node1', {})
self.assertTrue(filt_cls.host_passes(host, filter_properties))
def test_core_filter_passes(self):
filt_cls = self.class_map['CoreFilter']()
filter_properties = {'instance_type': {'vcpus': 1}}
self.flags(cpu_allocation_ratio=2)
- host = fakes.FakeHostState('host1',
+ host = fakes.FakeHostState('host1', 'node1',
{'vcpus_total': 4, 'vcpus_used': 7})
self.assertTrue(filt_cls.host_passes(host, filter_properties))
def test_core_filter_fails_safe(self):
filt_cls = self.class_map['CoreFilter']()
filter_properties = {'instance_type': {'vcpus': 1}}
- host = fakes.FakeHostState('host1', {})
+ host = fakes.FakeHostState('host1', 'node1', {})
self.assertTrue(filt_cls.host_passes(host, filter_properties))
def test_core_filter_fails(self):
filt_cls = self.class_map['CoreFilter']()
filter_properties = {'instance_type': {'vcpus': 1}}
self.flags(cpu_allocation_ratio=2)
- host = fakes.FakeHostState('host1',
+ host = fakes.FakeHostState('host1', 'node1',
{'vcpus_total': 4, 'vcpus_used': 8})
self.assertFalse(filt_cls.host_passes(host, filter_properties))
@@ -1227,27 +1237,29 @@ class HostFiltersTestCase(test.TestCase):
filt_cls = self.class_map['AvailabilityZoneFilter']()
service = {'availability_zone': 'nova'}
request = self._make_zone_request('nova')
- host = fakes.FakeHostState('host1', {'service': service})
+ host = fakes.FakeHostState('host1', 'node1',
+ {'service': service})
self.assertTrue(filt_cls.host_passes(host, request))
def test_availability_zone_filter_different(self):
filt_cls = self.class_map['AvailabilityZoneFilter']()
service = {'availability_zone': 'nova'}
request = self._make_zone_request('bad')
- host = fakes.FakeHostState('host1', {'service': service})
+ host = fakes.FakeHostState('host1', 'node1',
+ {'service': service})
self.assertFalse(filt_cls.host_passes(host, request))
def test_retry_filter_disabled(self):
"""Test case where retry/re-scheduling is disabled"""
filt_cls = self.class_map['RetryFilter']()
- host = fakes.FakeHostState('host1', {})
+ host = fakes.FakeHostState('host1', 'node1', {})
filter_properties = {}
self.assertTrue(filt_cls.host_passes(host, filter_properties))
def test_retry_filter_pass(self):
"""Host not previously tried"""
filt_cls = self.class_map['RetryFilter']()
- host = fakes.FakeHostState('host1', {})
+ host = fakes.FakeHostState('host1', 'node1', {})
retry = dict(num_attempts=1, hosts=['host2', 'host3'])
filter_properties = dict(retry=retry)
self.assertTrue(filt_cls.host_passes(host, filter_properties))
@@ -1255,7 +1267,7 @@ class HostFiltersTestCase(test.TestCase):
def test_retry_filter_fail(self):
"""Host was already tried"""
filt_cls = self.class_map['RetryFilter']()
- host = fakes.FakeHostState('host1', {})
+ host = fakes.FakeHostState('host1', 'node1', {})
retry = dict(num_attempts=1, hosts=['host3', 'host1'])
filter_properties = dict(retry=retry)
self.assertFalse(filt_cls.host_passes(host, filter_properties))
@@ -1263,25 +1275,29 @@ class HostFiltersTestCase(test.TestCase):
def test_filter_num_iops_passes(self):
self.flags(max_io_ops_per_host=8)
filt_cls = self.class_map['IoOpsFilter']()
- host = fakes.FakeHostState('host1', {'num_io_ops': 7})
+ host = fakes.FakeHostState('host1', 'node1',
+ {'num_io_ops': 7})
filter_properties = {}
self.assertTrue(filt_cls.host_passes(host, filter_properties))
def test_filter_num_iops_fails(self):
self.flags(max_io_ops_per_host=8)
filt_cls = self.class_map['IoOpsFilter']()
- host = fakes.FakeHostState('host1', {'num_io_ops': 8})
+ host = fakes.FakeHostState('host1', 'node1',
+ {'num_io_ops': 8})
def test_filter_num_instances_passes(self):
self.flags(max_instances_per_host=5)
filt_cls = self.class_map['NumInstancesFilter']()
- host = fakes.FakeHostState('host1', {'num_instances': 4})
+ host = fakes.FakeHostState('host1', 'node1',
+ {'num_instances': 4})
filter_properties = {}
self.assertTrue(filt_cls.host_passes(host, filter_properties))
def test_filter_num_instances_fails(self):
self.flags(max_instances_per_host=5)
filt_cls = self.class_map['NumInstancesFilter']()
- host = fakes.FakeHostState('host1', {'num_instances': 5})
+ host = fakes.FakeHostState('host1', 'node1',
+ {'num_instances': 5})
filter_properties = {}
self.assertFalse(filt_cls.host_passes(host, filter_properties))
diff --git a/nova/tests/scheduler/test_host_manager.py b/nova/tests/scheduler/test_host_manager.py
index b95803d8e..d7d732d34 100644
--- a/nova/tests/scheduler/test_host_manager.py
+++ b/nova/tests/scheduler/test_host_manager.py
@@ -44,6 +44,10 @@ class HostManagerTestCase(test.TestCase):
super(HostManagerTestCase, self).setUp()
self.host_manager = host_manager.HostManager()
+ def tearDown(self):
+ timeutils.clear_time_override()
+ super(HostManagerTestCase, self).tearDown()
+
def test_choose_host_filters_not_found(self):
self.flags(scheduler_default_filters='ComputeFilterClass3')
self.host_manager.filter_classes = [ComputeFilterClass1,
@@ -64,8 +68,8 @@ class HostManagerTestCase(test.TestCase):
def test_filter_hosts(self):
filters = ['fake-filter1', 'fake-filter2']
- fake_host1 = host_manager.HostState('host1')
- fake_host2 = host_manager.HostState('host2')
+ fake_host1 = host_manager.HostState('host1', 'node1')
+ fake_host2 = host_manager.HostState('host2', 'node2')
hosts = [fake_host1, fake_host2]
filter_properties = {'fake_prop': 'fake_val'}
@@ -94,8 +98,9 @@ class HostManagerTestCase(test.TestCase):
timeutils.utcnow().AndReturn(31339)
host1_compute_capabs = dict(free_memory=1234, host_memory=5678,
- timestamp=1)
- host2_compute_capabs = dict(free_memory=8756, timestamp=1)
+ timestamp=1, hypervisor_hostname='node1')
+ host2_compute_capabs = dict(free_memory=8756, timestamp=1,
+ hypervisor_hostname='node2')
self.mox.ReplayAll()
self.host_manager.update_service_capabilities('compute', 'host1',
@@ -109,8 +114,27 @@ class HostManagerTestCase(test.TestCase):
host1_compute_capabs['timestamp'] = 31337
host2_compute_capabs['timestamp'] = 31339
- expected = {'host1': host1_compute_capabs,
- 'host2': host2_compute_capabs}
+ expected = {('host1', 'node1'): host1_compute_capabs,
+ ('host2', 'node2'): host2_compute_capabs}
+ self.assertDictMatch(service_states, expected)
+
+ def test_update_service_capabilities_node_key(self):
+ service_states = self.host_manager.service_states
+ self.assertDictMatch(service_states, {})
+
+ host1_cap = {'hypervisor_hostname': 'host1-hvhn'}
+ host2_cap = {}
+
+ timeutils.set_time_override(31337)
+ self.host_manager.update_service_capabilities('compute', 'host1',
+ host1_cap)
+ timeutils.set_time_override(31338)
+ self.host_manager.update_service_capabilities('compute', 'host2',
+ host2_cap)
+ host1_cap['timestamp'] = 31337
+ host2_cap['timestamp'] = 31338
+ expected = {('host1', 'host1-hvhn'): host1_cap,
+ ('host2', None): host2_cap}
self.assertDictMatch(service_states, expected)
def test_get_all_host_states(self):
@@ -133,20 +157,30 @@ class HostManagerTestCase(test.TestCase):
for i in xrange(4):
compute_node = fakes.COMPUTE_NODES[i]
host = compute_node['service']['host']
- self.assertEqual(host_states_map[host].service,
+ node = compute_node['hypervisor_hostname']
+ state_key = (host, node)
+ self.assertEqual(host_states_map[state_key].service,
compute_node['service'])
- self.assertEqual(host_states_map['host1'].free_ram_mb, 512)
+ self.assertEqual(host_states_map[('host1', 'node1')].free_ram_mb,
+ 512)
# 511GB
- self.assertEqual(host_states_map['host1'].free_disk_mb, 524288)
- self.assertEqual(host_states_map['host2'].free_ram_mb, 1024)
+ self.assertEqual(host_states_map[('host1', 'node1')].free_disk_mb,
+ 524288)
+ self.assertEqual(host_states_map[('host2', 'node2')].free_ram_mb,
+ 1024)
# 1023GB
- self.assertEqual(host_states_map['host2'].free_disk_mb, 1048576)
- self.assertEqual(host_states_map['host3'].free_ram_mb, 3072)
+ self.assertEqual(host_states_map[('host2', 'node2')].free_disk_mb,
+ 1048576)
+ self.assertEqual(host_states_map[('host3', 'node3')].free_ram_mb,
+ 3072)
# 3071GB
- self.assertEqual(host_states_map['host3'].free_disk_mb, 3145728)
- self.assertEqual(host_states_map['host4'].free_ram_mb, 8192)
+ self.assertEqual(host_states_map[('host3', 'node3')].free_disk_mb,
+ 3145728)
+ self.assertEqual(host_states_map[('host4', 'node4')].free_ram_mb,
+ 8192)
# 8191GB
- self.assertEqual(host_states_map['host4'].free_disk_mb, 8388608)
+ self.assertEqual(host_states_map[('host4', 'node4')].free_disk_mb,
+ 8388608)
class HostStateTestCase(test.TestCase):
@@ -156,7 +190,7 @@ class HostStateTestCase(test.TestCase):
# in HostManagerTestCase.test_get_all_host_states()
def test_host_state_passes_filters_passes(self):
- fake_host = host_manager.HostState('host1')
+ fake_host = host_manager.HostState('host1', 'node1')
filter_properties = {}
cls1 = ComputeFilterClass1()
@@ -173,7 +207,7 @@ class HostStateTestCase(test.TestCase):
self.assertTrue(result)
def test_host_state_passes_filters_passes_with_ignore(self):
- fake_host = host_manager.HostState('host1')
+ fake_host = host_manager.HostState('host1', 'node1')
filter_properties = {'ignore_hosts': ['host2']}
cls1 = ComputeFilterClass1()
@@ -190,7 +224,7 @@ class HostStateTestCase(test.TestCase):
self.assertTrue(result)
def test_host_state_passes_filters_fails(self):
- fake_host = host_manager.HostState('host1')
+ fake_host = host_manager.HostState('host1', 'node1')
filter_properties = {}
cls1 = ComputeFilterClass1()
@@ -207,7 +241,7 @@ class HostStateTestCase(test.TestCase):
self.assertFalse(result)
def test_host_state_passes_filters_fails_from_ignore(self):
- fake_host = host_manager.HostState('host1')
+ fake_host = host_manager.HostState('host1', 'node1')
filter_properties = {'ignore_hosts': ['host1']}
cls1 = ComputeFilterClass1()
@@ -224,7 +258,7 @@ class HostStateTestCase(test.TestCase):
self.assertFalse(result)
def test_host_state_passes_filters_skipped_from_force(self):
- fake_host = host_manager.HostState('host1')
+ fake_host = host_manager.HostState('host1', 'node1')
filter_properties = {'force_hosts': ['host1']}
cls1 = ComputeFilterClass1()
@@ -257,7 +291,7 @@ class HostStateTestCase(test.TestCase):
local_gb_used=0, free_ram_mb=0, vcpus=0, vcpus_used=0,
updated_at=None)
- host = host_manager.HostState("fakehost")
+ host = host_manager.HostState("fakehost", "fakenode")
host.update_from_compute_node(compute)
self.assertEqual(5, host.num_instances)
@@ -272,7 +306,7 @@ class HostStateTestCase(test.TestCase):
self.assertEqual(42, host.num_io_ops)
def test_stat_consumption_from_instance(self):
- host = host_manager.HostState("fakehost")
+ host = host_manager.HostState("fakehost", "fakenode")
instance = dict(root_gb=0, ephemeral_gb=0, memory_mb=0, vcpus=0,
project_id='12345', vm_state=vm_states.BUILDING,
diff --git a/nova/tests/scheduler/test_least_cost.py b/nova/tests/scheduler/test_least_cost.py
index e64cc9e81..3689a30bd 100644
--- a/nova/tests/scheduler/test_least_cost.py
+++ b/nova/tests/scheduler/test_least_cost.py
@@ -95,7 +95,7 @@ class TestWeightedHost(test.TestCase):
self.assertDictMatch(host.to_dict(), expected)
def test_dict_conversion_with_host_state(self):
- host_state = host_manager.HostState('somehost')
+ host_state = host_manager.HostState('somehost', None)
host = least_cost.WeightedHost('someweight', host_state)
expected = {'weight': 'someweight',
'host': 'somehost'}
diff --git a/nova/tests/test_hypervapi.py b/nova/tests/test_hypervapi.py
index 6d2396350..b995be3bd 100644
--- a/nova/tests/test_hypervapi.py
+++ b/nova/tests/test_hypervapi.py
@@ -155,7 +155,7 @@ class HyperVAPITestCase(basetestcase.BaseTestCase):
super(HyperVAPITestCase, self).tearDown()
def test_get_available_resource(self):
- dic = self._conn.get_available_resource()
+ dic = self._conn.get_available_resource(None)
self.assertEquals(dic['hypervisor_hostname'], platform.node())
diff --git a/nova/virt/baremetal/driver.py b/nova/virt/baremetal/driver.py
index b0576aa38..4e63d35ed 100644
--- a/nova/virt/baremetal/driver.py
+++ b/nova/virt/baremetal/driver.py
@@ -661,7 +661,7 @@ class BareMetalDriver(driver.ComputeDriver):
# Bare metal doesn't currently support security groups
pass
- def get_available_resource(self):
+ def get_available_resource(self, nodename):
"""Updates compute manager resource info on ComputeNode table.
This method is called when nova-coompute launches, and
diff --git a/nova/virt/driver.py b/nova/virt/driver.py
index cb960466f..d5edcffed 100644
--- a/nova/virt/driver.py
+++ b/nova/virt/driver.py
@@ -347,12 +347,15 @@ class ComputeDriver(object):
"""Restore the specified instance"""
raise NotImplementedError()
- def get_available_resource(self):
+ def get_available_resource(self, nodename):
"""Retrieve resource information.
This method is called when nova-compute launches, and
as part of a periodic task
+ :param nodename:
+ node which the caller want to get resources from
+ a driver that manages only one node can safely ignore this
:returns: Dictionary describing resources
"""
raise NotImplementedError()
@@ -735,3 +738,16 @@ class ComputeDriver(object):
}
"""
raise NotImplementedError()
+
+ def get_available_nodes(self):
+ """Returns nodenames of all nodes managed by the compute service.
+
+ This method is for multi compute-nodes support. If a driver supports
+ multi compute-nodes, this method returns a list of nodenames managed
+ by the service. Otherwise, this method should return
+ [hypervisor_hostname].
+ """
+ stats = self.get_host_stats(refresh=True)
+ if not isinstance(stats, list):
+ stats = [stats]
+ return [s['hypervisor_hostname'] for s in stats]
diff --git a/nova/virt/fake.py b/nova/virt/fake.py
index 79d98c5cf..f2415e131 100644
--- a/nova/virt/fake.py
+++ b/nova/virt/fake.py
@@ -36,6 +36,32 @@ from nova.virt import virtapi
LOG = logging.getLogger(__name__)
+_FAKE_NODES = ['fake-mini']
+
+
+def set_nodes(nodes):
+ """Sets FakeDriver's node.list.
+
+ It has effect on the following methods:
+ get_available_nodes()
+ get_available_resource
+ get_host_stats()
+
+ To restore the change, call restore_nodes()
+ """
+ global _FAKE_NODES
+ _FAKE_NODES = nodes
+
+
+def restore_nodes():
+ """Resets FakeDriver's node list modified by set_nodes().
+
+ Usually called from tearDown().
+ """
+ global _FAKE_NODES
+ _FAKE_NODES = ['fake-mini']
+
+
class FakeInstance(object):
def __init__(self, name, state):
@@ -56,7 +82,7 @@ class FakeDriver(driver.ComputeDriver):
def __init__(self, virtapi, read_only=False):
super(FakeDriver, self).__init__(virtapi)
self.instances = {}
- self.host_status = {
+ self.host_status_base = {
'host_name-description': 'Fake Host',
'host_hostname': 'fake-mini',
'host_memory_total': 8000000000,
@@ -252,12 +278,14 @@ class FakeDriver(driver.ComputeDriver):
def refresh_provider_fw_rules(self):
pass
- def get_available_resource(self):
+ def get_available_resource(self, nodename):
"""Updates compute manager resource info on ComputeNode table.
Since we don't have a real hypervisor, pretend we have lots of
disk and ram.
"""
+ if nodename not in _FAKE_NODES:
+ raise exception.NovaException("node %s is not found" % nodename)
dic = {'vcpus': 1,
'memory_mb': 8192,
@@ -267,7 +295,7 @@ class FakeDriver(driver.ComputeDriver):
'local_gb_used': 0,
'hypervisor_type': 'fake',
'hypervisor_version': '1.0',
- 'hypervisor_hostname': 'fake-mini',
+ 'hypervisor_hostname': nodename,
'cpu_info': '?'}
return dic
@@ -319,7 +347,19 @@ class FakeDriver(driver.ComputeDriver):
def get_host_stats(self, refresh=False):
"""Return fake Host Status of ram, disk, network."""
- return self.host_status
+ stats = []
+ for nodename in _FAKE_NODES:
+ host_status = self.host_status_base.copy()
+ host_status['hypervisor_hostname'] = nodename
+ host_status['host_hostname'] = nodename
+ host_status['host_name_label'] = nodename
+ stats.append(host_status)
+ if len(stats) == 0:
+ raise exception.NovaException("FakeDriver has no node")
+ elif len(stats) == 1:
+ return stats[0]
+ else:
+ return stats
def host_power_action(self, host, action):
"""Reboots, shuts down or powers up the host."""
@@ -341,6 +381,9 @@ class FakeDriver(driver.ComputeDriver):
def get_volume_connector(self, instance):
return {'ip': '127.0.0.1', 'initiator': 'fake', 'host': 'fakehost'}
+ def get_available_nodes(self):
+ return _FAKE_NODES
+
class FakeVirtAPI(virtapi.VirtAPI):
def instance_update(self, context, instance_uuid, updates):
diff --git a/nova/virt/hyperv/driver.py b/nova/virt/hyperv/driver.py
index 6d9f66ff8..c9a7ed855 100644
--- a/nova/virt/hyperv/driver.py
+++ b/nova/virt/hyperv/driver.py
@@ -122,7 +122,7 @@ class HyperVDriver(driver.ComputeDriver):
def poll_rescued_instances(self, timeout):
pass
- def get_available_resource(self):
+ def get_available_resource(self, nodename):
return self._hostops.get_available_resource()
def get_host_stats(self, refresh=False):
diff --git a/nova/virt/libvirt/driver.py b/nova/virt/libvirt/driver.py
index 7f962b45d..b0806537b 100644
--- a/nova/virt/libvirt/driver.py
+++ b/nova/virt/libvirt/driver.py
@@ -2204,12 +2204,13 @@ class LibvirtDriver(driver.ComputeDriver):
def refresh_provider_fw_rules(self):
self.firewall_driver.refresh_provider_fw_rules()
- def get_available_resource(self):
+ def get_available_resource(self, nodename):
"""Retrieve resource info.
This method is called as a periodic task and is used only
in live migration currently.
+ :param nodename: ignored in this driver
:returns: dictionary containing resource info
"""
dic = {'vcpus': self.get_vcpu_total(),
diff --git a/nova/virt/powervm/driver.py b/nova/virt/powervm/driver.py
index 7a0da0b88..eea6e482d 100644
--- a/nova/virt/powervm/driver.py
+++ b/nova/virt/powervm/driver.py
@@ -142,7 +142,7 @@ class PowerVMDriver(driver.ComputeDriver):
"""Power on the specified instance"""
self._powervm.power_on(instance['name'])
- def get_available_resource(self):
+ def get_available_resource(self, nodename):
"""Retrieve resource info."""
return self._powervm.get_available_resource()
diff --git a/nova/virt/vmwareapi/driver.py b/nova/virt/vmwareapi/driver.py
index 25cd05778..9a8d350d9 100644
--- a/nova/virt/vmwareapi/driver.py
+++ b/nova/virt/vmwareapi/driver.py
@@ -197,7 +197,7 @@ class VMWareESXDriver(driver.ComputeDriver):
'username': CONF.vmwareapi_host_username,
'password': CONF.vmwareapi_host_password}
- def get_available_resource(self):
+ def get_available_resource(self, nodename):
"""This method is supported only by libvirt."""
return
diff --git a/nova/virt/xenapi/driver.py b/nova/virt/xenapi/driver.py
index 55b67a931..ce45ca9de 100644
--- a/nova/virt/xenapi/driver.py
+++ b/nova/virt/xenapi/driver.py
@@ -390,12 +390,13 @@ class XenAPIDriver(driver.ComputeDriver):
'username': CONF.xenapi_connection_username,
'password': CONF.xenapi_connection_password}
- def get_available_resource(self):
+ def get_available_resource(self, nodename):
"""Retrieve resource info.
This method is called when nova-compute launches, and
as part of a periodic task.
+ :param nodename: ignored in this driver
:returns: dictionary describing resources
"""