summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--nova/compute/api.py24
-rw-r--r--nova/compute/manager.py4
-rw-r--r--nova/compute/resource_tracker.py3
-rw-r--r--nova/conductor/api.py16
-rw-r--r--nova/conductor/manager.py5
-rw-r--r--nova/db/api.py9
-rw-r--r--nova/db/sqlalchemy/api.py4
-rw-r--r--nova/scheduler/driver.py11
-rw-r--r--nova/scheduler/manager.py7
-rw-r--r--nova/tests/compute/test_host_api.py24
-rw-r--r--nova/tests/compute/test_resource_tracker.py16
-rw-r--r--nova/tests/conductor/test_conductor.py34
-rw-r--r--nova/tests/integrated/test_api_samples.py4
-rw-r--r--nova/tests/scheduler/test_scheduler.py113
14 files changed, 140 insertions, 134 deletions
diff --git a/nova/compute/api.py b/nova/compute/api.py
index d0a039644..1ad9e2aee 100644
--- a/nova/compute/api.py
+++ b/nova/compute/api.py
@@ -954,19 +954,16 @@ class API(base.Base):
host=src_host, cast=False,
reservations=downsize_reservations)
- # NOTE(jogo): db allows for multiple compute services per host
+ is_up = False
try:
- services = self.db.service_get_all_compute_by_host(
+ service = self.db.service_get_by_compute_host(
context.elevated(), instance['host'])
- except exception.ComputeHostNotFound:
- services = []
-
- is_up = False
- for service in services:
if self.servicegroup_api.service_is_up(service):
is_up = True
cb(context, instance, bdms)
- break
+ except exception.ComputeHostNotFound:
+ pass
+
if not is_up:
# If compute node isn't up, just delete from DB
self._local_delete(context, instance, bdms)
@@ -2238,9 +2235,8 @@ class HostAPI(base.Base):
"""
# Getting compute node info and related instances info
try:
- compute_ref = self.db.service_get_all_compute_by_host(context,
- host_name)
- compute_ref = compute_ref[0]
+ compute_ref = self.db.service_get_by_compute_host(context,
+ host_name)
except exception.ComputeHostNotFound:
raise exception.HostNotFound(host=host_name)
instance_refs = self.db.instance_get_all_by_host(context,
@@ -2360,8 +2356,7 @@ class AggregateAPI(base.Base):
def add_host_to_aggregate(self, context, aggregate_id, host_name):
"""Adds the host to an aggregate."""
# validates the host; ComputeHostNotFound is raised if invalid
- service = self.db.service_get_all_compute_by_host(
- context, host_name)[0]
+ self.db.service_get_by_compute_host(context, host_name)
aggregate = self.db.aggregate_get(context, aggregate_id)
self.db.aggregate_host_add(context, aggregate_id, host_name)
#NOTE(jogo): Send message to host to support resource pools
@@ -2372,8 +2367,7 @@ class AggregateAPI(base.Base):
def remove_host_from_aggregate(self, context, aggregate_id, host_name):
"""Removes host from the aggregate."""
# validates the host; ComputeHostNotFound is raised if invalid
- service = self.db.service_get_all_compute_by_host(
- context, host_name)[0]
+ self.db.service_get_by_compute_host(context, host_name)
aggregate = self.db.aggregate_get(context, aggregate_id)
self.db.aggregate_host_delete(context, aggregate_id, host_name)
self.compute_rpcapi.remove_aggregate_host(context,
diff --git a/nova/compute/manager.py b/nova/compute/manager.py
index 85942541f..d8021ac7d 100644
--- a/nova/compute/manager.py
+++ b/nova/compute/manager.py
@@ -2592,10 +2592,10 @@ class ComputeManager(manager.SchedulerDependentManager):
pass
def _get_compute_info(self, context, host):
- compute_node_ref = self.conductor_api.service_get_all_compute_by_host(
+ compute_node_ref = self.conductor_api.service_get_by_compute_host(
context, host)
try:
- return compute_node_ref[0]['compute_node'][0]
+ return compute_node_ref['compute_node'][0]
except IndexError:
raise exception.NotFound(_("Host %(host)s not found") % locals())
diff --git a/nova/compute/resource_tracker.py b/nova/compute/resource_tracker.py
index 075d59ec8..d2afcaa27 100644
--- a/nova/compute/resource_tracker.py
+++ b/nova/compute/resource_tracker.py
@@ -308,8 +308,7 @@ class ResourceTracker(object):
def _get_service(self, context):
try:
- return db.service_get_all_compute_by_host(context,
- self.host)[0]
+ return db.service_get_by_compute_host(context, self.host)
except exception.NotFound:
LOG.warn(_("No service record for host %s"), self.host)
diff --git a/nova/conductor/api.py b/nova/conductor/api.py
index 4cc10604b..63b64f830 100644
--- a/nova/conductor/api.py
+++ b/nova/conductor/api.py
@@ -249,8 +249,11 @@ class LocalAPI(object):
def service_get_by_host_and_topic(self, context, host, topic):
return self._manager.service_get_all_by(context, topic, host)
- def service_get_all_compute_by_host(self, context, host):
- return self._manager.service_get_all_by(context, 'compute', host)
+ def service_get_by_compute_host(self, context, host):
+ result = self._manager.service_get_all_by(context, 'compute', host)
+ # FIXME(comstud): A major revision bump to 2.0 should return a
+ # single entry, so we should just return 'result' at that point.
+ return result[0]
def service_get_by_args(self, context, host, binary):
return self._manager.service_get_all_by(context, host=host,
@@ -493,9 +496,12 @@ class API(object):
def service_get_by_host_and_topic(self, context, host, topic):
return self.conductor_rpcapi.service_get_all_by(context, topic, host)
- def service_get_all_compute_by_host(self, context, host):
- return self.conductor_rpcapi.service_get_all_by(context, 'compute',
- host)
+ def service_get_by_compute_host(self, context, host):
+ result = self.conductor_rpcapi.service_get_all_by(context, 'compute',
+ host)
+ # FIXME(comstud): A major revision bump to 2.0 should return a
+ # single entry, so we should just return 'result' at that point.
+ return result[0]
def service_get_by_args(self, context, host, binary):
return self.conductor_rpcapi.service_get_all_by(context, host=host,
diff --git a/nova/conductor/manager.py b/nova/conductor/manager.py
index 8c6f39f02..b0d4011ad 100644
--- a/nova/conductor/manager.py
+++ b/nova/conductor/manager.py
@@ -261,8 +261,9 @@ class ConductorManager(manager.SchedulerDependentManager):
result = self.db.service_get_all(context)
elif all((topic, host)):
if topic == 'compute':
- result = self.db.service_get_all_compute_by_host(context,
- host)
+ result = self.db.service_get_by_compute_host(context, host)
+ # FIXME(comstud) Potentially remove this on bump to v2.0
+ result = [result]
else:
result = self.db.service_get_by_host_and_topic(context,
host, topic)
diff --git a/nova/db/api.py b/nova/db/api.py
index b1552b480..d7d9bd0d2 100644
--- a/nova/db/api.py
+++ b/nova/db/api.py
@@ -151,9 +151,12 @@ def service_get_all_by_host(context, host):
return IMPL.service_get_all_by_host(context, host)
-def service_get_all_compute_by_host(context, host):
- """Get all compute services for a given host."""
- return IMPL.service_get_all_compute_by_host(context, host)
+def service_get_by_compute_host(context, host):
+ """Get the service entry for a given compute host.
+
+ Returns the service entry joined with the compute_node entry.
+ """
+ return IMPL.service_get_by_compute_host(context, host)
def service_get_all_compute_sorted(context):
diff --git a/nova/db/sqlalchemy/api.py b/nova/db/sqlalchemy/api.py
index 8930f6ccc..e51d7b685 100644
--- a/nova/db/sqlalchemy/api.py
+++ b/nova/db/sqlalchemy/api.py
@@ -370,12 +370,12 @@ def service_get_all_by_host(context, host):
@require_admin_context
-def service_get_all_compute_by_host(context, host):
+def service_get_by_compute_host(context, host):
result = model_query(context, models.Service, read_deleted="no").\
options(joinedload('compute_node')).\
filter_by(host=host).\
filter_by(topic=CONF.compute_topic).\
- all()
+ first()
if not result:
raise exception.ComputeHostNotFound(host=host)
diff --git a/nova/scheduler/driver.py b/nova/scheduler/driver.py
index dc494af8f..d1ae1cd6e 100644
--- a/nova/scheduler/driver.py
+++ b/nova/scheduler/driver.py
@@ -192,12 +192,12 @@ class Scheduler(object):
# Checking src host exists and compute node
src = instance_ref['host']
try:
- services = db.service_get_all_compute_by_host(context, src)
+ service = db.service_get_by_compute_host(context, src)
except exception.NotFound:
raise exception.ComputeServiceUnavailable(host=src)
# Checking src host is alive.
- if not self.servicegroup_api.service_is_up(services[0]):
+ if not self.servicegroup_api.service_is_up(service):
raise exception.ComputeServiceUnavailable(host=src)
def _live_migration_dest_check(self, context, instance_ref, dest):
@@ -209,8 +209,7 @@ class Scheduler(object):
"""
# Checking dest exists and compute node.
- dservice_refs = db.service_get_all_compute_by_host(context, dest)
- dservice_ref = dservice_refs[0]
+ dservice_ref = db.service_get_by_compute_host(context, dest)
# Checking dest host is alive.
if not self.servicegroup_api.service_is_up(dservice_ref):
@@ -290,5 +289,5 @@ class Scheduler(object):
:return: value specified by key
"""
- compute_node_ref = db.service_get_all_compute_by_host(context, host)
- return compute_node_ref[0]['compute_node'][0]
+ service_ref = db.service_get_by_compute_host(context, host)
+ return service_ref['compute_node'][0]
diff --git a/nova/scheduler/manager.py b/nova/scheduler/manager.py
index 033ee9cc8..84bdcddb5 100644
--- a/nova/scheduler/manager.py
+++ b/nova/scheduler/manager.py
@@ -220,13 +220,12 @@ class SchedulerManager(manager.Manager):
"""
# Getting compute node info and related instances info
- compute_ref = db.service_get_all_compute_by_host(context, host)
- compute_ref = compute_ref[0]
+ service_ref = db.service_get_by_compute_host(context, host)
instance_refs = db.instance_get_all_by_host(context,
- compute_ref['host'])
+ service_ref['host'])
# Getting total available/used resource
- compute_ref = compute_ref['compute_node'][0]
+ compute_ref = service_ref['compute_node'][0]
resource = {'vcpus': compute_ref['vcpus'],
'memory_mb': compute_ref['memory_mb'],
'local_gb': compute_ref['local_gb'],
diff --git a/nova/tests/compute/test_host_api.py b/nova/tests/compute/test_host_api.py
index f00245d1e..0af1d6766 100644
--- a/nova/tests/compute/test_host_api.py
+++ b/nova/tests/compute/test_host_api.py
@@ -57,19 +57,19 @@ class HostApiTestCase(test.TestCase):
given our fake input.
"""
ctx = context.get_admin_context()
- self.mox.StubOutWithMock(db, 'service_get_all_compute_by_host')
+ self.mox.StubOutWithMock(db, 'service_get_by_compute_host')
host_name = 'host_c1'
- db.service_get_all_compute_by_host(ctx, host_name).AndReturn(
- [{'host': 'fake_host',
- 'compute_node': [
- {'vcpus': 4,
- 'vcpus_used': 1,
- 'memory_mb': 8192,
- 'memory_mb_used': 2048,
- 'local_gb': 1024,
- 'local_gb_used': 648}
- ]
- }])
+ db.service_get_by_compute_host(ctx, host_name).AndReturn(
+ {'host': 'fake_host',
+ 'compute_node': [
+ {'vcpus': 4,
+ 'vcpus_used': 1,
+ 'memory_mb': 8192,
+ 'memory_mb_used': 2048,
+ 'local_gb': 1024,
+ 'local_gb_used': 648}
+ ]
+ })
self.mox.StubOutWithMock(db, 'instance_get_all_by_host')
db.instance_get_all_by_host(ctx, 'fake_host').AndReturn(
[{'project_id': 42,
diff --git a/nova/tests/compute/test_resource_tracker.py b/nova/tests/compute/test_resource_tracker.py
index 3bfd51461..53d92a13f 100644
--- a/nova/tests/compute/test_resource_tracker.py
+++ b/nova/tests/compute/test_resource_tracker.py
@@ -297,8 +297,8 @@ class MissingComputeNodeTestCase(BaseTestCase):
super(MissingComputeNodeTestCase, self).setUp()
self.tracker = self._tracker()
- self.stubs.Set(db, 'service_get_all_compute_by_host',
- self._fake_service_get_all_compute_by_host)
+ self.stubs.Set(db, 'service_get_by_compute_host',
+ self._fake_service_get_by_compute_host)
self.stubs.Set(db, 'compute_node_create',
self._fake_create_compute_node)
@@ -306,10 +306,10 @@ class MissingComputeNodeTestCase(BaseTestCase):
self.created = True
return self._create_compute_node()
- def _fake_service_get_all_compute_by_host(self, ctx, host):
+ def _fake_service_get_by_compute_host(self, ctx, host):
# return a service with no joined compute
service = self._create_service()
- return [service]
+ return service
def test_create_compute_node(self):
self.tracker.update_available_resource(self.context)
@@ -330,8 +330,8 @@ class BaseTrackerTestCase(BaseTestCase):
self.tracker = self._tracker()
self._migrations = {}
- self.stubs.Set(db, 'service_get_all_compute_by_host',
- self._fake_service_get_all_compute_by_host)
+ self.stubs.Set(db, 'service_get_by_compute_host',
+ self._fake_service_get_by_compute_host)
self.stubs.Set(db, 'compute_node_update',
self._fake_compute_node_update)
self.stubs.Set(db, 'migration_update',
@@ -342,10 +342,10 @@ class BaseTrackerTestCase(BaseTestCase):
self.tracker.update_available_resource(self.context)
self.limits = self._limits()
- def _fake_service_get_all_compute_by_host(self, ctx, host):
+ def _fake_service_get_by_compute_host(self, ctx, host):
self.compute = self._create_compute_node()
self.service = self._create_service(host, compute=self.compute)
- return [self.service]
+ return self.service
def _fake_compute_node_update(self, ctx, compute_node_id, values,
prune_stats=False):
diff --git a/nova/tests/conductor/test_conductor.py b/nova/tests/conductor/test_conductor.py
index 46fadf4f0..cc3dbfcc0 100644
--- a/nova/tests/conductor/test_conductor.py
+++ b/nova/tests/conductor/test_conductor.py
@@ -451,12 +451,16 @@ class ConductorTestCase(_BaseTestCase, test.TestCase):
self.conductor.instance_get_all_by_filters(self.context, filters,
'fake-key', 'fake-sort')
- def _test_stubbed(self, name, dbargs, condargs):
+ def _test_stubbed(self, name, dbargs, condargs,
+ db_result_listified=False):
self.mox.StubOutWithMock(db, name)
getattr(db, name)(self.context, *dbargs).AndReturn('fake-result')
self.mox.ReplayAll()
result = self.conductor.service_get_all_by(self.context, **condargs)
- self.assertEqual(result, 'fake-result')
+ if db_result_listified:
+ self.assertEqual(['fake-result'], result)
+ else:
+ self.assertEqual('fake-result', result)
def test_service_get_all(self):
self._test_stubbed('service_get_all', (), {})
@@ -476,10 +480,11 @@ class ConductorTestCase(_BaseTestCase, test.TestCase):
('host',),
dict(host='host'))
- def test_service_get_all_compute_by_host(self):
- self._test_stubbed('service_get_all_compute_by_host',
+ def test_service_get_by_compute_host(self):
+ self._test_stubbed('service_get_by_compute_host',
('host',),
- dict(topic='compute', host='host'))
+ dict(topic='compute', host='host'),
+ db_result_listified=True)
def test_service_get_by_args(self):
self._test_stubbed('service_get_by_args',
@@ -547,12 +552,16 @@ class ConductorRPCAPITestCase(_BaseTestCase, test.TestCase):
self.conductor.instance_get_all_by_filters(self.context, filters,
'fake-key', 'fake-sort')
- def _test_stubbed(self, name, dbargs, condargs):
+ def _test_stubbed(self, name, dbargs, condargs,
+ db_result_listified=False):
self.mox.StubOutWithMock(db, name)
getattr(db, name)(self.context, *dbargs).AndReturn('fake-result')
self.mox.ReplayAll()
result = self.conductor.service_get_all_by(self.context, **condargs)
- self.assertEqual(result, 'fake-result')
+ if db_result_listified:
+ self.assertEqual(['fake-result'], result)
+ else:
+ self.assertEqual('fake-result', result)
def test_service_get_all(self):
self._test_stubbed('service_get_all', (), {})
@@ -572,10 +581,11 @@ class ConductorRPCAPITestCase(_BaseTestCase, test.TestCase):
('host',),
dict(host='host'))
- def test_service_get_all_compute_by_host(self):
- self._test_stubbed('service_get_all_compute_by_host',
+ def test_service_get_by_compute_host(self):
+ self._test_stubbed('service_get_by_compute_host',
('host',),
- dict(topic='compute', host='host'))
+ dict(topic='compute', host='host'),
+ db_result_listified=True)
class ConductorAPITestCase(_BaseTestCase, test.TestCase):
@@ -681,8 +691,8 @@ class ConductorAPITestCase(_BaseTestCase, test.TestCase):
def test_service_get_all_by_host(self):
self._test_stubbed('service_get_all_by_host', 'host')
- def test_service_get_all_compute_by_host(self):
- self._test_stubbed('service_get_all_compute_by_host', 'host')
+ def test_service_get_by_compute_host(self):
+ self._test_stubbed('service_get_by_compute_host', 'host')
def test_service_create(self):
self._test_stubbed('service_create', {})
diff --git a/nova/tests/integrated/test_api_samples.py b/nova/tests/integrated/test_api_samples.py
index 5bdaa977a..0fb0e9107 100644
--- a/nova/tests/integrated/test_api_samples.py
+++ b/nova/tests/integrated/test_api_samples.py
@@ -2091,8 +2091,8 @@ class AdminActionsSamplesJsonTest(ServersSampleBase):
hypervisor_type='bar',
hypervisor_version='1',
disabled=False)
- return [{'compute_node': [service]}]
- self.stubs.Set(db, "service_get_all_compute_by_host", fake_get_compute)
+ return {'compute_node': [service]}
+ self.stubs.Set(db, "service_get_by_compute_host", fake_get_compute)
response = self._do_post('servers/%s/action' % self.uuid,
'admin-actions-live-migrate',
diff --git a/nova/tests/scheduler/test_scheduler.py b/nova/tests/scheduler/test_scheduler.py
index ceea74e70..dd5b0ae32 100644
--- a/nova/tests/scheduler/test_scheduler.py
+++ b/nova/tests/scheduler/test_scheduler.py
@@ -111,13 +111,13 @@ class SchedulerManagerTestCase(test.TestCase):
def test_show_host_resources(self):
host = 'fake_host'
- computes = [{'host': host,
- 'compute_node': [{'vcpus': 4,
- 'vcpus_used': 2,
- 'memory_mb': 1024,
- 'memory_mb_used': 512,
- 'local_gb': 1024,
- 'local_gb_used': 512}]}]
+ compute_node = {'host': host,
+ 'compute_node': [{'vcpus': 4,
+ 'vcpus_used': 2,
+ 'memory_mb': 1024,
+ 'memory_mb_used': 512,
+ 'local_gb': 1024,
+ 'local_gb_used': 512}]}
instances = [{'project_id': 'project1',
'vcpus': 1,
'memory_mb': 128,
@@ -134,11 +134,11 @@ class SchedulerManagerTestCase(test.TestCase):
'root_gb': 256,
'ephemeral_gb': 0}]
- self.mox.StubOutWithMock(db, 'service_get_all_compute_by_host')
+ self.mox.StubOutWithMock(db, 'service_get_by_compute_host')
self.mox.StubOutWithMock(db, 'instance_get_all_by_host')
- db.service_get_all_compute_by_host(self.context, host).AndReturn(
- computes)
+ db.service_get_by_compute_host(self.context, host).AndReturn(
+ compute_node)
db.instance_get_all_by_host(self.context, host).AndReturn(instances)
self.mox.ReplayAll()
@@ -338,8 +338,6 @@ class SchedulerTestCase(test.TestCase):
block_migration = False
disk_over_commit = False
instance = jsonutils.to_primitive(self._live_migration_instance())
- instance_id = instance['id']
- instance_uuid = instance['uuid']
self.driver._live_migration_src_check(self.context, instance)
self.driver._live_migration_dest_check(self.context, instance, dest)
@@ -362,7 +360,7 @@ class SchedulerTestCase(test.TestCase):
# Test live migration when all checks pass.
self.mox.StubOutWithMock(servicegroup.API, 'service_is_up')
- self.mox.StubOutWithMock(db, 'service_get_all_compute_by_host')
+ self.mox.StubOutWithMock(db, 'service_get_by_compute_host')
self.mox.StubOutWithMock(db, 'instance_get_all_by_host')
self.mox.StubOutWithMock(rpc, 'call')
self.mox.StubOutWithMock(rpc, 'cast')
@@ -373,34 +371,32 @@ class SchedulerTestCase(test.TestCase):
block_migration = True
disk_over_commit = True
instance = jsonutils.to_primitive(self._live_migration_instance())
- instance_id = instance['id']
- instance_uuid = instance['uuid']
# Source checks
- db.service_get_all_compute_by_host(self.context,
- instance['host']).AndReturn(['fake_service2'])
+ db.service_get_by_compute_host(self.context,
+ instance['host']).AndReturn('fake_service2')
self.servicegroup_api.service_is_up('fake_service2').AndReturn(True)
# Destination checks (compute is up, enough memory, disk)
- db.service_get_all_compute_by_host(self.context,
- dest).AndReturn(['fake_service3'])
+ db.service_get_by_compute_host(self.context,
+ dest).AndReturn('fake_service3')
self.servicegroup_api.service_is_up('fake_service3').AndReturn(True)
# assert_compute_node_has_enough_memory()
- db.service_get_all_compute_by_host(self.context, dest).AndReturn(
- [{'compute_node': [{'memory_mb': 2048,
- 'hypervisor_version': 1}]}])
+ db.service_get_by_compute_host(self.context, dest).AndReturn(
+ {'compute_node': [{'memory_mb': 2048,
+ 'hypervisor_version': 1}]})
db.instance_get_all_by_host(self.context, dest).AndReturn(
[dict(memory_mb=256), dict(memory_mb=512)])
# Common checks (same hypervisor, etc)
- db.service_get_all_compute_by_host(self.context, dest).AndReturn(
- [{'compute_node': [{'hypervisor_type': 'xen',
- 'hypervisor_version': 1}]}])
- db.service_get_all_compute_by_host(self.context,
+ db.service_get_by_compute_host(self.context, dest).AndReturn(
+ {'compute_node': [{'hypervisor_type': 'xen',
+ 'hypervisor_version': 1}]})
+ db.service_get_by_compute_host(self.context,
instance['host']).AndReturn(
- [{'compute_node': [{'hypervisor_type': 'xen',
- 'hypervisor_version': 1,
- 'cpu_info': 'fake_cpu_info'}]}])
+ {'compute_node': [{'hypervisor_type': 'xen',
+ 'hypervisor_version': 1,
+ 'cpu_info': 'fake_cpu_info'}]})
rpc.call(self.context, "compute.fake_host2",
{"method": 'check_can_live_migrate_destination',
@@ -440,7 +436,7 @@ class SchedulerTestCase(test.TestCase):
# Raise exception when src compute node is does not exist.
self.mox.StubOutWithMock(servicegroup.API, 'service_is_up')
- self.mox.StubOutWithMock(db, 'service_get_all_compute_by_host')
+ self.mox.StubOutWithMock(db, 'service_get_by_compute_host')
dest = 'fake_host2'
block_migration = False
@@ -448,9 +444,9 @@ class SchedulerTestCase(test.TestCase):
instance = self._live_migration_instance()
# Compute down
- db.service_get_all_compute_by_host(self.context,
+ db.service_get_by_compute_host(self.context,
instance['host']).AndRaise(
- exception.NotFound())
+ exception.ComputeHostNotFound(host='fake'))
self.mox.ReplayAll()
self.assertRaises(exception.ComputeServiceUnavailable,
@@ -463,7 +459,7 @@ class SchedulerTestCase(test.TestCase):
# Raise exception when src compute node is not alive.
self.mox.StubOutWithMock(servicegroup.API, 'service_is_up')
- self.mox.StubOutWithMock(db, 'service_get_all_compute_by_host')
+ self.mox.StubOutWithMock(db, 'service_get_by_compute_host')
dest = 'fake_host2'
block_migration = False
@@ -471,8 +467,8 @@ class SchedulerTestCase(test.TestCase):
instance = self._live_migration_instance()
# Compute down
- db.service_get_all_compute_by_host(self.context,
- instance['host']).AndReturn(['fake_service2'])
+ db.service_get_by_compute_host(self.context,
+ instance['host']).AndReturn('fake_service2')
self.servicegroup_api.service_is_up('fake_service2').AndReturn(False)
self.mox.ReplayAll()
@@ -486,7 +482,7 @@ class SchedulerTestCase(test.TestCase):
# Raise exception when dest compute node is not alive.
self.mox.StubOutWithMock(self.driver, '_live_migration_src_check')
- self.mox.StubOutWithMock(db, 'service_get_all_compute_by_host')
+ self.mox.StubOutWithMock(db, 'service_get_by_compute_host')
self.mox.StubOutWithMock(servicegroup.API, 'service_is_up')
dest = 'fake_host2'
@@ -495,8 +491,8 @@ class SchedulerTestCase(test.TestCase):
instance = self._live_migration_instance()
self.driver._live_migration_src_check(self.context, instance)
- db.service_get_all_compute_by_host(self.context,
- dest).AndReturn(['fake_service3'])
+ db.service_get_by_compute_host(self.context,
+ dest).AndReturn('fake_service3')
# Compute is down
self.servicegroup_api.service_is_up('fake_service3').AndReturn(False)
@@ -511,17 +507,16 @@ class SchedulerTestCase(test.TestCase):
# Confirms exception raises in case dest and src is same host.
self.mox.StubOutWithMock(self.driver, '_live_migration_src_check')
- self.mox.StubOutWithMock(db, 'service_get_all_compute_by_host')
+ self.mox.StubOutWithMock(db, 'service_get_by_compute_host')
self.mox.StubOutWithMock(servicegroup.API, 'service_is_up')
block_migration = False
- disk_over_commit = False
instance = self._live_migration_instance()
# make dest same as src
dest = instance['host']
self.driver._live_migration_src_check(self.context, instance)
- db.service_get_all_compute_by_host(self.context,
- dest).AndReturn(['fake_service3'])
+ db.service_get_by_compute_host(self.context,
+ dest).AndReturn('fake_service3')
self.servicegroup_api.service_is_up('fake_service3').AndReturn(True)
self.mox.ReplayAll()
@@ -535,7 +530,7 @@ class SchedulerTestCase(test.TestCase):
# Confirms exception raises when dest doesn't have enough memory.
self.mox.StubOutWithMock(self.driver, '_live_migration_src_check')
- self.mox.StubOutWithMock(db, 'service_get_all_compute_by_host')
+ self.mox.StubOutWithMock(db, 'service_get_by_compute_host')
self.mox.StubOutWithMock(servicegroup.API, 'service_is_up')
self.mox.StubOutWithMock(self.driver, '_get_compute_info')
self.mox.StubOutWithMock(db, 'instance_get_all_by_host')
@@ -546,8 +541,8 @@ class SchedulerTestCase(test.TestCase):
instance = self._live_migration_instance()
self.driver._live_migration_src_check(self.context, instance)
- db.service_get_all_compute_by_host(self.context,
- dest).AndReturn(['fake_service3'])
+ db.service_get_by_compute_host(self.context,
+ dest).AndReturn('fake_service3')
self.servicegroup_api.service_is_up('fake_service3').AndReturn(True)
self.driver._get_compute_info(self.context, dest).AndReturn(
@@ -569,7 +564,7 @@ class SchedulerTestCase(test.TestCase):
self.mox.StubOutWithMock(rpc, 'queue_get_for')
self.mox.StubOutWithMock(rpc, 'call')
self.mox.StubOutWithMock(rpc, 'cast')
- self.mox.StubOutWithMock(db, 'service_get_all_compute_by_host')
+ self.mox.StubOutWithMock(db, 'service_get_by_compute_host')
dest = 'fake_host2'
block_migration = False
@@ -579,13 +574,13 @@ class SchedulerTestCase(test.TestCase):
self.driver._live_migration_src_check(self.context, instance)
self.driver._live_migration_dest_check(self.context, instance, dest)
- db.service_get_all_compute_by_host(self.context, dest).AndReturn(
- [{'compute_node': [{'hypervisor_type': 'xen',
- 'hypervisor_version': 1}]}])
- db.service_get_all_compute_by_host(self.context,
+ db.service_get_by_compute_host(self.context, dest).AndReturn(
+ {'compute_node': [{'hypervisor_type': 'xen',
+ 'hypervisor_version': 1}]})
+ db.service_get_by_compute_host(self.context,
instance['host']).AndReturn(
- [{'compute_node': [{'hypervisor_type': 'not-xen',
- 'hypervisor_version': 1}]}])
+ {'compute_node': [{'hypervisor_type': 'not-xen',
+ 'hypervisor_version': 1}]})
self.mox.ReplayAll()
self.assertRaises(exception.InvalidHypervisorType,
@@ -601,7 +596,7 @@ class SchedulerTestCase(test.TestCase):
self.mox.StubOutWithMock(rpc, 'queue_get_for')
self.mox.StubOutWithMock(rpc, 'call')
self.mox.StubOutWithMock(rpc, 'cast')
- self.mox.StubOutWithMock(db, 'service_get_all_compute_by_host')
+ self.mox.StubOutWithMock(db, 'service_get_by_compute_host')
dest = 'fake_host2'
block_migration = False
@@ -611,13 +606,13 @@ class SchedulerTestCase(test.TestCase):
self.driver._live_migration_src_check(self.context, instance)
self.driver._live_migration_dest_check(self.context, instance, dest)
- db.service_get_all_compute_by_host(self.context, dest).AndReturn(
- [{'compute_node': [{'hypervisor_type': 'xen',
- 'hypervisor_version': 1}]}])
- db.service_get_all_compute_by_host(self.context,
+ db.service_get_by_compute_host(self.context, dest).AndReturn(
+ {'compute_node': [{'hypervisor_type': 'xen',
+ 'hypervisor_version': 1}]})
+ db.service_get_by_compute_host(self.context,
instance['host']).AndReturn(
- [{'compute_node': [{'hypervisor_type': 'xen',
- 'hypervisor_version': 2}]}])
+ {'compute_node': [{'hypervisor_type': 'xen',
+ 'hypervisor_version': 2}]})
self.mox.ReplayAll()
self.assertRaises(exception.DestinationHypervisorTooOld,
self.driver.schedule_live_migration, self.context,