summaryrefslogtreecommitdiffstats
path: root/nova
diff options
context:
space:
mode:
authorJenkins <jenkins@review.openstack.org>2012-07-26 04:13:33 +0000
committerGerrit Code Review <review@openstack.org>2012-07-26 04:13:33 +0000
commit8207cd84440a426cd4035c068aeb7de71eb4c70a (patch)
tree7ab1cc2f455896cb8c136a4155f9cc0524d86464 /nova
parent2999e1143dafe175a494ee648d44b3c5dd147959 (diff)
parent32bb352f4cf413397e8616e898ee9a52bbbe0a55 (diff)
downloadnova-8207cd84440a426cd4035c068aeb7de71eb4c70a.tar.gz
nova-8207cd84440a426cd4035c068aeb7de71eb4c70a.tar.xz
nova-8207cd84440a426cd4035c068aeb7de71eb4c70a.zip
Merge "Shrink Simple Scheduler"
Diffstat (limited to 'nova')
-rw-r--r--nova/scheduler/simple.py71
-rw-r--r--nova/tests/scheduler/test_multi_scheduler.py40
2 files changed, 43 insertions, 68 deletions
diff --git a/nova/scheduler/simple.py b/nova/scheduler/simple.py
index 820eaaa35..45e6dc4cb 100644
--- a/nova/scheduler/simple.py
+++ b/nova/scheduler/simple.py
@@ -18,7 +18,9 @@
# under the License.
"""
-Simple Scheduler
+Simple Scheduler - for Volumes
+
+Note: Deprecated in Folsom. Will be removed along with nova-volumes
"""
from nova import db
@@ -31,18 +33,9 @@ from nova import utils
simple_scheduler_opts = [
- cfg.IntOpt("max_cores",
- default=16,
- help="maximum number of instance cores to allow per host"),
cfg.IntOpt("max_gigabytes",
default=10000,
help="maximum number of volume gigabytes to allow per host"),
- cfg.IntOpt("max_networks",
- default=1000,
- help="maximum number of networks to allow per host"),
- cfg.BoolOpt('skip_isolated_core_check',
- default=True,
- help='Allow overcommitting vcpus on isolated hosts'),
]
FLAGS = flags.FLAGS
@@ -52,64 +45,6 @@ FLAGS.register_opts(simple_scheduler_opts)
class SimpleScheduler(chance.ChanceScheduler):
"""Implements Naive Scheduler that tries to find least loaded host."""
- def _schedule_instance(self, context, instance_opts, *_args, **_kwargs):
- """Picks a host that is up and has the fewest running instances."""
- elevated = context.elevated()
-
- availability_zone = instance_opts.get('availability_zone')
-
- zone, host = FLAGS.default_schedule_zone, None
- if availability_zone:
- zone, _x, host = availability_zone.partition(':')
-
- if host and context.is_admin:
- service = db.service_get_by_args(elevated, host, 'nova-compute')
- if not utils.service_is_up(service):
- raise exception.WillNotSchedule(host=host)
- return host
-
- results = db.service_get_all_compute_sorted(elevated)
- in_isolation = instance_opts['image_ref'] in FLAGS.isolated_images
- check_cores = not in_isolation or not FLAGS.skip_isolated_core_check
- if zone:
- results = [(service, cores) for (service, cores) in results
- if service['availability_zone'] == zone]
- for result in results:
- (service, instance_cores) = result
- if in_isolation and service['host'] not in FLAGS.isolated_hosts:
- # isloated images run on isolated hosts
- continue
- if service['host'] in FLAGS.isolated_hosts and not in_isolation:
- # images that aren't isolated only run on general hosts
- continue
- if (check_cores and
- instance_cores + instance_opts['vcpus'] > FLAGS.max_cores):
- msg = _("Not enough allocatable CPU cores remaining")
- raise exception.NoValidHost(reason=msg)
- if utils.service_is_up(service) and not service['disabled']:
- return service['host']
- msg = _("Is the appropriate service running?")
- raise exception.NoValidHost(reason=msg)
-
- def schedule_run_instance(self, context, request_spec, reservations,
- *_args, **_kwargs):
- num_instances = request_spec.get('num_instances', 1)
- instances = []
- for num in xrange(num_instances):
- host = self._schedule_instance(context,
- request_spec['instance_properties'], *_args, **_kwargs)
- request_spec['instance_properties']['launch_index'] = num
- instance_ref = self.create_instance_db_entry(context,
- request_spec, reservations)
- driver.cast_to_compute_host(context, host, 'run_instance',
- instance_uuid=instance_ref['uuid'], **_kwargs)
- instances.append(driver.encode_instance(instance_ref))
- # So if we loop around, create_instance_db_entry will actually
- # create a new entry, instead of assume it's been created
- # already
- del request_spec['instance_properties']['uuid']
- return instances
-
def schedule_create_volume(self, context, volume_id, *_args, **_kwargs):
"""Picks a host that is up and has the fewest volumes."""
elevated = context.elevated()
diff --git a/nova/tests/scheduler/test_multi_scheduler.py b/nova/tests/scheduler/test_multi_scheduler.py
index d6a59450e..fd79fffb3 100644
--- a/nova/tests/scheduler/test_multi_scheduler.py
+++ b/nova/tests/scheduler/test_multi_scheduler.py
@@ -143,3 +143,43 @@ class MultiDriverTestCase(test_scheduler.SchedulerTestCase):
mgr.update_service_capabilities('foo_svc', 'foo_host', 'foo_caps')
self.assertTrue(mgr.drivers['compute'].is_update_caps_called)
self.assertTrue(mgr.drivers['volume'].is_update_caps_called)
+
+
+class SimpleSchedulerTestCase(MultiDriverTestCase):
+ """Test case for simple driver."""
+
+ driver_cls = multi.MultiScheduler
+
+ def setUp(self):
+ super(SimpleSchedulerTestCase, self).setUp()
+ base_name = 'nova.tests.scheduler.test_multi_scheduler.%s'
+ compute_cls_name = base_name % 'FakeComputeScheduler'
+ volume_cls_name = 'nova.scheduler.simple.SimpleScheduler'
+ default_cls_name = base_name % 'FakeDefaultScheduler'
+ self.flags(compute_scheduler_driver=compute_cls_name,
+ volume_scheduler_driver=volume_cls_name,
+ default_scheduler_driver=default_cls_name)
+ self._manager = multi.MultiScheduler()
+
+ def test_update_service_capabilities(self):
+ def fake_update_service_capabilities(self, service, host, caps):
+ self.is_update_caps_called = True
+
+ mgr = self._manager
+ self.stubs.Set(driver.Scheduler,
+ 'update_service_capabilities',
+ fake_update_service_capabilities)
+ self.assertFalse(mgr.drivers['compute'].is_update_caps_called)
+ mgr.update_service_capabilities('foo_svc', 'foo_host', 'foo_caps')
+ self.assertTrue(mgr.drivers['compute'].is_update_caps_called)
+ self.assertTrue(mgr.drivers['volume'].is_update_caps_called)
+
+ def test_drivers_inited(self):
+ mgr = self._manager
+ self.assertEqual(len(mgr.drivers), 3)
+ self.assertTrue(mgr.drivers['compute'].is_fake_compute)
+ self.assertTrue(mgr.drivers['volume'] is not None)
+ self.assertTrue(mgr.drivers['default'].is_fake_default)
+
+ def test_proxy_calls(self):
+ pass