summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorVishvananda Ishaya <vishvananda@yahoo.com>2010-09-02 13:43:39 -0700
committerVishvananda Ishaya <vishvananda@yahoo.com>2010-09-02 13:43:39 -0700
commit57d02103c4d33c009fef0405e541eb30d273b21e (patch)
tree586f2256f9981484bd63578aad28cc96766f5653
parente55234bd86b96aa3f05b06eb1efbb1baa61a93f1 (diff)
parentf6be77447c625e16511611b74c77a4cb3baa9ee0 (diff)
downloadnova-57d02103c4d33c009fef0405e541eb30d273b21e.tar.gz
nova-57d02103c4d33c009fef0405e541eb30d273b21e.tar.xz
nova-57d02103c4d33c009fef0405e541eb30d273b21e.zip
fix test to specify host
-rw-r--r--nova/compute/manager.py2
-rw-r--r--nova/manager.py5
-rw-r--r--nova/network/manager.py4
-rw-r--r--nova/service.py6
-rw-r--r--nova/tests/scheduler_unittest.py30
-rw-r--r--nova/volume/manager.py4
6 files changed, 37 insertions, 14 deletions
diff --git a/nova/compute/manager.py b/nova/compute/manager.py
index c15c9e1f5..1d6e0b556 100644
--- a/nova/compute/manager.py
+++ b/nova/compute/manager.py
@@ -74,7 +74,7 @@ class ComputeManager(manager.Manager):
self.network_manager.setup_compute_network(context, project_id)
db.instance_update(context,
instance_id,
- {'host': FLAGS.host})
+ {'host': self.host})
# TODO(vish) check to make sure the availability zone matches
db.instance_state(context,
diff --git a/nova/manager.py b/nova/manager.py
index 4cc27f05b..e9aa50c56 100644
--- a/nova/manager.py
+++ b/nova/manager.py
@@ -30,7 +30,10 @@ flags.DEFINE_string('db_driver', 'nova.db.api',
class Manager(object):
"""DB driver is injected in the init method"""
- def __init__(self, db_driver=None):
+ def __init__(self, host=None, db_driver=None):
+ if not host:
+ host = FLAGS.host
+ self.host = host
if not db_driver:
db_driver = FLAGS.db_driver
self.db = utils.import_object(db_driver) # pylint: disable-msg=C0103
diff --git a/nova/network/manager.py b/nova/network/manager.py
index dbb8e66da..785104b2f 100644
--- a/nova/network/manager.py
+++ b/nova/network/manager.py
@@ -93,7 +93,7 @@ class NetworkManager(manager.Manager):
network_id = network_ref['id']
host = self.db.network_set_host(context,
network_id,
- FLAGS.host)
+ self.host)
self._on_set_network_host(context, network_id)
return host
@@ -117,7 +117,7 @@ class NetworkManager(manager.Manager):
"""Gets an floating ip from the pool"""
# TODO(vish): add floating ips through manage command
return self.db.floating_ip_allocate_address(context,
- FLAGS.host,
+ self.host,
project_id)
def associate_floating_ip(self, context, floating_address, fixed_address):
diff --git a/nova/service.py b/nova/service.py
index e3104fbaa..738816631 100644
--- a/nova/service.py
+++ b/nova/service.py
@@ -44,11 +44,13 @@ flags.DEFINE_integer('report_interval', 10,
class Service(object, service.Service):
"""Base class for workers that run on hosts."""
- def __init__(self, host, binary, topic, manager, *args, **kwargs):
+ def __init__(self, host, binary, topic, manager,
+ db_driver=None, *args, **kwargs):
self.host = host
self.binary = binary
self.topic = topic
- self.manager = utils.import_object(manager)
+ manager_class = utils.import_class(manager)
+ self.manager = manager_class(host, db_driver)
self.model_disconnected = False
super(Service, self).__init__(*args, **kwargs)
diff --git a/nova/tests/scheduler_unittest.py b/nova/tests/scheduler_unittest.py
index d3616dd6f..4d4f649d0 100644
--- a/nova/tests/scheduler_unittest.py
+++ b/nova/tests/scheduler_unittest.py
@@ -29,9 +29,11 @@ from nova import test
from nova import utils
from nova.auth import manager as auth_manager
from nova.scheduler import manager
+from nova.scheduler import driver
FLAGS = flags.FLAGS
+flags.DECLARE('max_instances', 'nova.scheduler.simple')
class SchedulerTestCase(test.TrialTestCase):
@@ -39,6 +41,7 @@ class SchedulerTestCase(test.TrialTestCase):
def setUp(self): # pylint: disable-msg=C0103
super(SchedulerTestCase, self).setUp()
self.flags(connection_type='fake',
+ max_instances=4,
scheduler_driver='nova.scheduler.simple.SimpleScheduler')
self.scheduler = manager.SchedulerManager()
self.context = None
@@ -75,14 +78,14 @@ class SchedulerTestCase(test.TrialTestCase):
'nova-compute',
'compute',
FLAGS.compute_manager)
+ hosts = self.scheduler.driver.hosts_up(self.context, 'compute')
+ self.assertEqual(len(hosts), 0)
service1.report_state()
service2.report_state()
hosts = self.scheduler.driver.hosts_up(self.context, 'compute')
self.assertEqual(len(hosts), 2)
def test_least_busy_host_gets_instance(self):
- # NOTE(vish): constructing service without create method
- # because we are going to use it without queue
service1 = service.Service('host1',
'nova-compute',
'compute',
@@ -94,10 +97,25 @@ class SchedulerTestCase(test.TrialTestCase):
service1.report_state()
service2.report_state()
instance_id = self._create_instance()
- FLAGS.host = 'host1'
- service1.run_instance(self.context,
- instance_id)
- print type(self.scheduler.driver)
+ service1.run_instance(self.context, instance_id)
host = self.scheduler.driver.pick_compute_host(self.context,
instance_id)
self.assertEqual(host, 'host2')
+ service1.terminate_instance(self.context, instance_id)
+
+ def test_too_many_instances(self):
+ service1 = service.Service('host',
+ 'nova-compute',
+ 'compute',
+ FLAGS.compute_manager)
+ instance_ids = []
+ for index in xrange(FLAGS.max_instances):
+ instance_ids.append(self._create_instance())
+ service1.run_instance(self.context, instance_ids[index])
+ instance_id = self._create_instance()
+ self.assertRaises(driver.NoValidHost,
+ self.scheduler.driver.pick_compute_host,
+ self.context,
+ instance_id)
+ for instance_id in instance_ids:
+ service1.terminate_instance(self.context, instance_id)
diff --git a/nova/volume/manager.py b/nova/volume/manager.py
index e5f4805a1..11f1ab366 100644
--- a/nova/volume/manager.py
+++ b/nova/volume/manager.py
@@ -78,7 +78,7 @@ class AOEManager(manager.Manager):
self.db.volume_update(context,
volume_id,
- {'host': FLAGS.host})
+ {'host': self.host})
size = volume_ref['size']
logging.debug("volume %s: creating lv of size %sG", volume_id, size)
@@ -111,7 +111,7 @@ class AOEManager(manager.Manager):
volume_ref = self.db.volume_get(context, volume_id)
if volume_ref['attach_status'] == "attached":
raise exception.Error("Volume is still attached")
- if volume_ref['host'] != FLAGS.host:
+ if volume_ref['host'] != self.host:
raise exception.Error("Volume is not local to this node")
shelf_id, blade_id = self.db.volume_get_shelf_and_blade(context,
volume_id)