summaryrefslogtreecommitdiffstats
path: root/nova/scheduler
diff options
context:
space:
mode:
authorChris Behrens <cbehrens@codestud.com>2012-11-09 10:12:03 +0000
committerChris Behrens <cbehrens@codestud.com>2012-11-09 10:12:03 +0000
commit7fc83893587d4e48cbfeabdfebe62aa71c66ab35 (patch)
treed87d99f760bcbbcd73146bb2e6d4e528dad1a317 /nova/scheduler
parentab9ee3d232e0000dd8b8f1d2623d8ac72a6c9247 (diff)
Remove generic topic support from filter scheduler
The only topic that we do scheduling on is 'compute', so clean up all of the cases where we were checking this. Change-Id: If16eaf48a89342ca99a741c25679389e706e0e48
Diffstat (limited to 'nova/scheduler')
-rw-r--r--nova/scheduler/filter_scheduler.py32
-rw-r--r--nova/scheduler/filters/compute_filter.py3
-rw-r--r--nova/scheduler/filters/core_filter.py3
-rw-r--r--nova/scheduler/host_manager.py38
4 files changed, 29 insertions, 47 deletions
diff --git a/nova/scheduler/filter_scheduler.py b/nova/scheduler/filter_scheduler.py
index b2802931c..776797d68 100644
--- a/nova/scheduler/filter_scheduler.py
+++ b/nova/scheduler/filter_scheduler.py
@@ -39,7 +39,6 @@ class FilterScheduler(driver.Scheduler):
"""Scheduler that can be used for filtering and weighing."""
def __init__(self, *args, **kwargs):
super(FilterScheduler, self).__init__(*args, **kwargs)
- self.cost_function_cache = {}
self.options = scheduler_options.SchedulerOptions()
def schedule_run_instance(self, context, request_spec,
@@ -61,9 +60,8 @@ class FilterScheduler(driver.Scheduler):
notifier.notify(context, notifier.publisher_id("scheduler"),
'scheduler.run_instance.start', notifier.INFO, payload)
- weighted_hosts = self._schedule(context, CONF.compute_topic,
- request_spec, filter_properties,
- instance_uuids)
+ weighted_hosts = self._schedule(context, request_spec,
+ filter_properties, instance_uuids)
# NOTE(comstud): Make sure we do not pass this through. It
# contains an instance of RpcContext that cannot be serialized.
@@ -108,8 +106,8 @@ class FilterScheduler(driver.Scheduler):
the prep_resize operation to it.
"""
- hosts = self._schedule(context, CONF.compute_topic, request_spec,
- filter_properties, [instance['uuid']])
+ hosts = self._schedule(context, request_spec, filter_properties,
+ [instance['uuid']])
if not hosts:
raise exception.NoValidHost(reason="")
host = hosts.pop(0)
@@ -220,16 +218,12 @@ class FilterScheduler(driver.Scheduler):
"instance %(instance_uuid)s") % locals()
raise exception.NoValidHost(reason=msg)
- def _schedule(self, context, topic, request_spec, filter_properties,
+ def _schedule(self, context, request_spec, filter_properties,
instance_uuids=None):
"""Returns a list of hosts that meet the required specs,
ordered by their fitness.
"""
elevated = context.elevated()
- if topic != CONF.compute_topic:
- msg = _("Scheduler only understands Compute nodes (for now)")
- raise NotImplementedError(msg)
-
instance_properties = request_spec['instance_properties']
instance_type = request_spec.get("instance_type", None)
@@ -260,8 +254,7 @@ class FilterScheduler(driver.Scheduler):
# Note: remember, we are using an iterator here. So only
# traverse this list once. This can bite you if the hosts
# are being scanned in a filter or weighing function.
- hosts = self.host_manager.get_all_host_states(
- elevated, topic)
+ hosts = self.host_manager.get_all_host_states(elevated)
selected_hosts = []
if instance_uuids:
@@ -297,15 +290,12 @@ class FilterScheduler(driver.Scheduler):
selected_hosts.sort(key=operator.attrgetter('weight'))
return selected_hosts
- def get_cost_functions(self, topic=None):
+ def get_cost_functions(self):
"""Returns a list of tuples containing weights and cost functions to
use for weighing hosts
"""
- if topic is None:
- # Schedulers only support compute right now.
- topic = CONF.compute_topic
- if topic in self.cost_function_cache:
- return self.cost_function_cache[topic]
+ if getattr(self, 'cost_function_cache', None) is not None:
+ return self.cost_function_cache
cost_fns = []
for cost_fn_str in CONF.least_cost_functions:
@@ -315,7 +305,7 @@ class FilterScheduler(driver.Scheduler):
short_name = cost_fn_str
cost_fn_str = "%s.%s.%s" % (
__name__, self.__class__.__name__, short_name)
- if not (short_name.startswith('%s_' % topic) or
+ if not (short_name.startswith('compute_') or
short_name.startswith('noop')):
continue
@@ -336,5 +326,5 @@ class FilterScheduler(driver.Scheduler):
flag_name=flag_name)
cost_fns.append((weight, cost_fn))
- self.cost_function_cache[topic] = cost_fns
+ self.cost_function_cache = cost_fns
return cost_fns
diff --git a/nova/scheduler/filters/compute_filter.py b/nova/scheduler/filters/compute_filter.py
index 202f8232a..019068269 100644
--- a/nova/scheduler/filters/compute_filter.py
+++ b/nova/scheduler/filters/compute_filter.py
@@ -14,7 +14,6 @@
# under the License.
from nova import config
-from nova import flags
from nova.openstack.common import log as logging
from nova.scheduler import filters
from nova import utils
@@ -30,7 +29,7 @@ class ComputeFilter(filters.BaseHostFilter):
def host_passes(self, host_state, filter_properties):
"""Returns True for only active compute nodes"""
instance_type = filter_properties.get('instance_type')
- if host_state.topic != CONF.compute_topic or not instance_type:
+ if not instance_type:
return True
capabilities = host_state.capabilities
service = host_state.service
diff --git a/nova/scheduler/filters/core_filter.py b/nova/scheduler/filters/core_filter.py
index 9c93df930..aec05ecc0 100644
--- a/nova/scheduler/filters/core_filter.py
+++ b/nova/scheduler/filters/core_filter.py
@@ -16,7 +16,6 @@
# under the License.
from nova import config
-from nova import flags
from nova.openstack.common import cfg
from nova.openstack.common import log as logging
from nova.scheduler import filters
@@ -38,7 +37,7 @@ class CoreFilter(filters.BaseHostFilter):
def host_passes(self, host_state, filter_properties):
"""Return True if host has sufficient CPU cores."""
instance_type = filter_properties.get('instance_type')
- if host_state.topic != CONF.compute_topic or not instance_type:
+ if not instance_type:
return True
if not host_state.vcpus_total:
diff --git a/nova/scheduler/host_manager.py b/nova/scheduler/host_manager.py
index 1f4b6d956..faaf3e258 100644
--- a/nova/scheduler/host_manager.py
+++ b/nova/scheduler/host_manager.py
@@ -24,7 +24,6 @@ from nova.compute import vm_states
from nova import config
from nova import db
from nova import exception
-from nova import flags
from nova.openstack.common import cfg
from nova.openstack.common import log as logging
from nova.openstack.common import timeutils
@@ -94,10 +93,9 @@ class HostState(object):
previously used and lock down access.
"""
- def __init__(self, host, topic, capabilities=None, service=None):
+ def __init__(self, host, capabilities=None, service=None):
self.host = host
- self.topic = topic
- self.update_capabilities(topic, capabilities, service)
+ self.update_capabilities(capabilities, service)
# Mutable available resources.
# These will change as resources are virtually "consumed".
@@ -126,12 +124,12 @@ class HostState(object):
self.updated = None
- def update_capabilities(self, topic, capabilities=None, service=None):
+ def update_capabilities(self, capabilities=None, service=None):
# Read-only capability dicts
if capabilities is None:
capabilities = {}
- self.capabilities = ReadOnlyDict(capabilities.get(topic, None))
+ self.capabilities = ReadOnlyDict(capabilities)
if service is None:
service = {}
self.service = ReadOnlyDict(service)
@@ -326,28 +324,24 @@ class HostManager(object):
def update_service_capabilities(self, service_name, host, capabilities):
"""Update the per-service capabilities based on this notification."""
+
+ if service_name != 'compute':
+ LOG.debug(_('Ignoring %(service_name)s service update '
+ 'from %(host)s'), locals())
+ return
+
LOG.debug(_("Received %(service_name)s service update from "
- "%(host)s.") % locals())
- service_caps = self.service_states.get(host, {})
+ "%(host)s."), locals())
# Copy the capabilities, so we don't modify the original dict
capab_copy = dict(capabilities)
capab_copy["timestamp"] = timeutils.utcnow() # Reported time
- service_caps[service_name] = capab_copy
- self.service_states[host] = service_caps
+ self.service_states[host] = capab_copy
- def get_all_host_states(self, context, topic):
+ def get_all_host_states(self, context):
"""Returns a list of HostStates that represents all the hosts
the HostManager knows about. Also, each of the consumable resources
in HostState are pre-populated and adjusted based on data in the db.
-
- Note: this can be very slow with a lot of instances.
- InstanceType table isn't required since a copy is stored
- with the instance (in case the InstanceType changed since the
- instance was created)."""
-
- if topic != CONF.compute_topic:
- raise NotImplementedError(_(
- "host_manager only implemented for 'compute'"))
+ """
# Get resource usage across the available compute nodes:
compute_nodes = db.compute_node_get_all(context)
@@ -360,10 +354,10 @@ class HostManager(object):
capabilities = self.service_states.get(host, None)
host_state = self.host_state_map.get(host)
if host_state:
- host_state.update_capabilities(topic, capabilities,
+ host_state.update_capabilities(capabilities,
dict(service.iteritems()))
else:
- host_state = self.host_state_cls(host, topic,
+ host_state = self.host_state_cls(host,
capabilities=capabilities,
service=dict(service.iteritems()))
self.host_state_map[host] = host_state