summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorHans Lindgren <hanlind@kth.se>2013-04-17 15:52:56 +0200
committerHans Lindgren <hanlind@kth.se>2013-05-08 15:10:17 +0200
commit0f56d8ddb02f54ae389380dcd0790e55f2dcb479 (patch)
tree8cdcb69a616ea10b75390c2c8fb30ba5905a83c0
parent36b10384724fec9657784980cd2bd38e72b445bc (diff)
Optimize instance queries in compute manager
Some instance queries against the db through the conductor can be optimized to return a reduced set of instances as needed for the job at hand. Most of these are part of periodic tasks, so the win is kind of big. Compute methods where queries can be made more efficient: _get_instances_on_driver _poll_rebooting_instances _poll_rescued_instances _reclaim_queued_deletes _run_image_cache_manager_pass Resolves bug 1169970. Change-Id: I7c2fab48944e34765b3fff8ce10bc64a5cd826c8
-rwxr-xr-xnova/compute/manager.py93
-rw-r--r--nova/conductor/api.py14
-rw-r--r--nova/conductor/manager.py2
-rw-r--r--nova/conductor/rpcapi.py9
-rw-r--r--nova/db/api.py1
-rw-r--r--nova/db/sqlalchemy/api.py52
-rw-r--r--nova/tests/compute/test_compute.py61
-rw-r--r--nova/tests/conductor/test_conductor.py18
-rw-r--r--nova/tests/test_imagecache.py5
9 files changed, 126 insertions, 129 deletions
diff --git a/nova/compute/manager.py b/nova/compute/manager.py
index dc0725933..d59e4a1f0 100755
--- a/nova/compute/manager.py
+++ b/nova/compute/manager.py
@@ -393,15 +393,17 @@ class ComputeManager(manager.SchedulerDependentManager):
'trying to set it to ERROR'),
instance_uuid=instance_uuid)
- def _get_instances_on_driver(self, context):
+ def _get_instances_on_driver(self, context, filters=None):
"""Return a list of instance records that match the instances found
on the hypervisor.
"""
+ if not filters:
+ filters = {}
try:
driver_uuids = self.driver.list_instance_uuids()
+ filters['uuid'] = driver_uuids
local_instances = self.conductor_api.instance_get_all_by_filters(
- context, {'uuid': driver_uuids},
- columns_to_join=[])
+ context, filters, columns_to_join=[])
local_instance_uuids = [inst['uuid'] for inst in local_instances]
for uuid in set(driver_uuids) - set(local_instance_uuids):
LOG.error(_('Instance %(uuid)s found in the hypervisor, but '
@@ -413,8 +415,8 @@ class ComputeManager(manager.SchedulerDependentManager):
# The driver doesn't support uuids listing, so we'll have
# to brute force.
driver_instances = self.driver.list_instances()
- instances = self.conductor_api.instance_get_all_by_host(
- context, self.host, columns_to_join=[])
+ instances = self.conductor_api.instance_get_all_by_filters(
+ context, filters, columns_to_join=[])
name_map = dict((instance['name'], instance) for instance in instances)
local_instances = []
for driver_instance in driver_instances:
@@ -436,11 +438,11 @@ class ComputeManager(manager.SchedulerDependentManager):
not, destroy them.
"""
our_host = self.host
- local_instances = self._get_instances_on_driver(context)
+ filters = {'deleted': False}
+ local_instances = self._get_instances_on_driver(context, filters)
for instance in local_instances:
instance_host = instance['host']
- instance_name = instance['name']
- if instance['host'] != our_host:
+ if instance_host != our_host:
LOG.info(_('Deleting instance as its host ('
'%(instance_host)s) is not equal to our '
'host (%(our_host)s).'),
@@ -3464,21 +3466,26 @@ class ComputeManager(manager.SchedulerDependentManager):
@manager.periodic_task
def _poll_rebooting_instances(self, context):
if CONF.reboot_timeout > 0:
- instances = self.conductor_api.instance_get_all_hung_in_rebooting(
- context, CONF.reboot_timeout)
- self.driver.poll_rebooting_instances(CONF.reboot_timeout,
- instances)
+ filters = {'task_state': task_states.REBOOTING,
+ 'host': self.host}
+ rebooting = self.conductor_api.instance_get_all_by_filters(
+ context, filters, columns_to_join=[])
+
+ to_poll = []
+ for instance in rebooting:
+ if timeutils.is_older_than(instance['updated_at'],
+ CONF.reboot_timeout):
+ to_poll.append(instance)
+
+ self.driver.poll_rebooting_instances(CONF.reboot_timeout, to_poll)
@manager.periodic_task
def _poll_rescued_instances(self, context):
if CONF.rescue_timeout > 0:
- instances = self.conductor_api.instance_get_all_by_host(
- context, self.host, columns_to_join=[])
-
- rescued_instances = []
- for instance in instances:
- if instance['vm_state'] == vm_states.RESCUED:
- rescued_instances.append(instance)
+ filters = {'vm_state': vm_states.RESCUED,
+ 'host': self.host}
+ rescued_instances = self.conductor_api.instance_get_all_by_filters(
+ context, filters, columns_to_join=[])
to_unrescue = []
for instance in rescued_instances:
@@ -3915,23 +3922,15 @@ class ComputeManager(manager.SchedulerDependentManager):
LOG.debug(_("CONF.reclaim_instance_interval <= 0, skipping..."))
return
- instances = self.conductor_api.instance_get_all_by_host(
- context, self.host, columns_to_join=[])
+ filters = {'vm_state': vm_states.SOFT_DELETED,
+ 'host': self.host}
+ instances = self.conductor_api.instance_get_all_by_filters(context,
+ filters)
for instance in instances:
- old_enough = (not instance['deleted_at'] or
- timeutils.is_older_than(instance['deleted_at'],
- interval))
- soft_deleted = instance['vm_state'] == vm_states.SOFT_DELETED
-
- if soft_deleted and old_enough:
+ if self._deleted_old_enough(instance, interval):
capi = self.conductor_api
bdms = capi.block_device_mapping_get_all_by_instance(
context, instance)
- # NOTE(danms): We fetched instances above without the
- # system_metadata for efficiency. If we get here, we need
- # to re-fetch with it so that _delete_instace() can extract
- # instance_type information.
- instance = capi.instance_get_by_uuid(context, instance['uuid'])
LOG.info(_('Reclaiming deleted instance'), instance=instance)
# NOTE(comstud): Quotas were already accounted for when
# the instance was soft deleted, so there's no need to
@@ -4033,18 +4032,15 @@ class ComputeManager(manager.SchedulerDependentManager):
but the hypervisor thinks is still running.
"""
timeout = CONF.running_deleted_instance_timeout
+ filters = {'deleted': True,
+ 'soft_deleted': False,
+ 'host': self.host}
+ instances = self._get_instances_on_driver(context, filters)
+ return [i for i in instances if self._deleted_old_enough(i, timeout)]
- def deleted_instance(instance):
- erroneously_running = instance['deleted']
- old_enough = (not instance['deleted_at'] or
- timeutils.is_older_than(instance['deleted_at'],
- timeout))
- if erroneously_running and old_enough:
- return True
- return False
-
- instances = self._get_instances_on_driver(context)
- return [i for i in instances if deleted_instance(i)]
+ def _deleted_old_enough(self, instance, timeout):
+ return (not instance['deleted_at'] or
+ timeutils.is_older_than(instance['deleted_at'], timeout))
@contextlib.contextmanager
def _error_out_instance_on_exception(self, context, instance_uuid,
@@ -4104,8 +4100,6 @@ class ComputeManager(manager.SchedulerDependentManager):
if CONF.image_cache_manager_interval == 0:
return
- all_instances = self.conductor_api.instance_get_all(context)
-
# Determine what other nodes use this storage
storage_users.register_storage_use(CONF.instances_path, CONF.host)
nodes = storage_users.get_storage_users(CONF.instances_path)
@@ -4115,9 +4109,10 @@ class ComputeManager(manager.SchedulerDependentManager):
# TODO(mikal): this should be further refactored so that the cache
# cleanup code doesn't know what those instances are, just a remote
# count, and then this logic should be pushed up the stack.
- filtered_instances = []
- for instance in all_instances:
- if instance['host'] in nodes:
- filtered_instances.append(instance)
+ filters = {'deleted': False,
+ 'soft_deleted': True,
+ 'host': nodes}
+ filtered_instances = self.conductor_api.instance_get_all_by_filters(
+ context, filters, columns_to_join=[])
self.driver.manage_image_cache(context, filtered_instances)
diff --git a/nova/conductor/api.py b/nova/conductor/api.py
index c7e824df4..8d3961157 100644
--- a/nova/conductor/api.py
+++ b/nova/conductor/api.py
@@ -73,9 +73,6 @@ class LocalAPI(object):
def instance_destroy(self, context, instance):
return self._manager.instance_destroy(context, instance)
- def instance_get_all(self, context):
- return self._manager.instance_get_all(context)
-
def instance_get_all_by_host(self, context, host, columns_to_join=None):
return self._manager.instance_get_all_by_host(
context, host, columns_to_join=columns_to_join)
@@ -93,10 +90,6 @@ class LocalAPI(object):
sort_dir,
columns_to_join)
- def instance_get_all_hung_in_rebooting(self, context, timeout):
- return self._manager.instance_get_all_hung_in_rebooting(context,
- timeout)
-
def instance_get_active_by_window_joined(self, context, begin, end=None,
project_id=None, host=None):
return self._manager.instance_get_active_by_window_joined(
@@ -401,9 +394,6 @@ class API(object):
return self.conductor_rpcapi.instance_get_by_uuid(context,
instance_uuid)
- def instance_get_all(self, context):
- return self.conductor_rpcapi.instance_get_all(context)
-
def instance_get_all_by_host(self, context, host, columns_to_join=None):
return self.conductor_rpcapi.instance_get_all_by_host(
context, host, columns_to_join=columns_to_join)
@@ -419,10 +409,6 @@ class API(object):
return self.conductor_rpcapi.instance_get_all_by_filters(
context, filters, sort_key, sort_dir, columns_to_join)
- def instance_get_all_hung_in_rebooting(self, context, timeout):
- return self.conductor_rpcapi.instance_get_all_hung_in_rebooting(
- context, timeout)
-
def instance_get_active_by_window_joined(self, context, begin, end=None,
project_id=None, host=None):
return self.conductor_rpcapi.instance_get_active_by_window_joined(
diff --git a/nova/conductor/manager.py b/nova/conductor/manager.py
index 604d2ab4b..70581b7c5 100644
--- a/nova/conductor/manager.py
+++ b/nova/conductor/manager.py
@@ -106,6 +106,7 @@ class ConductorManager(manager.Manager):
return jsonutils.to_primitive(
self.db.instance_get_by_uuid(context, instance_uuid))
+ # NOTE(hanlind): This method can be removed in v2.0 of the RPC API.
def instance_get_all(self, context):
return jsonutils.to_primitive(self.db.instance_get_all(context))
@@ -262,6 +263,7 @@ class ConductorManager(manager.Manager):
columns_to_join=columns_to_join)
return jsonutils.to_primitive(result)
+ # NOTE(hanlind): This method can be removed in v2.0 of the RPC API.
def instance_get_all_hung_in_rebooting(self, context, timeout):
result = self.db.instance_get_all_hung_in_rebooting(context, timeout)
return jsonutils.to_primitive(result)
diff --git a/nova/conductor/rpcapi.py b/nova/conductor/rpcapi.py
index adfe752ca..50d7eb093 100644
--- a/nova/conductor/rpcapi.py
+++ b/nova/conductor/rpcapi.py
@@ -257,11 +257,6 @@ class ConductorAPI(nova.openstack.common.rpc.proxy.RpcProxy):
sort_dir=sort_dir, columns_to_join=columns_to_join)
return self.call(context, msg, version='1.47')
- def instance_get_all_hung_in_rebooting(self, context, timeout):
- msg = self.make_msg('instance_get_all_hung_in_rebooting',
- timeout=timeout)
- return self.call(context, msg, version='1.15')
-
def instance_get_active_by_window_joined(self, context, begin, end=None,
project_id=None, host=None):
msg = self.make_msg('instance_get_active_by_window_joined',
@@ -305,10 +300,6 @@ class ConductorAPI(nova.openstack.common.rpc.proxy.RpcProxy):
binary=binary)
return self.call(context, msg, version='1.28')
- def instance_get_all(self, context):
- msg = self.make_msg('instance_get_all')
- return self.call(context, msg, version='1.23')
-
def instance_get_all_by_host(self, context, host, node=None,
columns_to_join=None):
msg = self.make_msg('instance_get_all_by_host', host=host, node=node,
diff --git a/nova/db/api.py b/nova/db/api.py
index c1d28e33a..b8a85a5cf 100644
--- a/nova/db/api.py
+++ b/nova/db/api.py
@@ -649,6 +649,7 @@ def instance_floating_address_get_all(context, instance_uuid):
return IMPL.instance_floating_address_get_all(context, instance_uuid)
+# NOTE(hanlind): This method can be removed as conductor RPC API moves to v2.0.
def instance_get_all_hung_in_rebooting(context, reboot_window):
"""Get all instances stuck in a rebooting state."""
return IMPL.instance_get_all_hung_in_rebooting(context, reboot_window)
diff --git a/nova/db/sqlalchemy/api.py b/nova/db/sqlalchemy/api.py
index 4224d9c06..316a1d14b 100644
--- a/nova/db/sqlalchemy/api.py
+++ b/nova/db/sqlalchemy/api.py
@@ -1624,7 +1624,37 @@ def instance_get_all_by_filters(context, filters, sort_key, sort_dir,
session=None):
"""Return instances that match all filters. Deleted instances
will be returned by default, unless there's a filter that says
- otherwise"""
+ otherwise.
+
+ Depending on the name of a filter, matching for that filter is
+ performed using either exact matching or as regular expression
+ matching. Exact matching is applied for the following filters:
+
+ ['project_id', 'user_id', 'image_ref',
+ 'vm_state', 'instance_type_id', 'uuid',
+ 'metadata', 'host']
+
+
+ A third type of filter (also using exact matching), filters
+ based on instance metadata tags when supplied under a special
+ key named 'filter'.
+
+ filters = {
+ 'filter': [
+ {'name': 'tag-key', 'value': '<metakey>'},
+ {'name': 'tag-value', 'value': '<metaval>'},
+ {'name': 'tag:<metakey>', 'value': '<metaval>'}
+ ]
+ }
+
+ Special keys are used to tweek the query further:
+
+ 'changes-since' - only return instances updated after
+ 'deleted' - only return (or exclude) deleted instances
+ 'soft-deleted' - modify behavior of 'deleted' to either
+ include or exclude instances whose
+ vm_state is SOFT_DELETED.
+ """
sort_fn = {'desc': desc, 'asc': asc}
@@ -1657,12 +1687,21 @@ def instance_get_all_by_filters(context, filters, sort_key, sort_dir,
# Instances can be soft or hard deleted and the query needs to
# include or exclude both
if filters.pop('deleted'):
- deleted = or_(models.Instance.deleted == models.Instance.id,
- models.Instance.vm_state == vm_states.SOFT_DELETED)
- query_prefix = query_prefix.filter(deleted)
+ if filters.pop('soft_deleted', False):
+ query_prefix = query_prefix.\
+ filter(models.Instance.deleted == models.Instance.id)
+ else:
+ deleted = or_(
+ models.Instance.deleted == models.Instance.id,
+ models.Instance.vm_state == vm_states.SOFT_DELETED
+ )
+ query_prefix = query_prefix.\
+ filter(deleted)
else:
query_prefix = query_prefix.\
- filter_by(deleted=0).\
+ filter_by(deleted=0)
+ if not filters.pop('soft_deleted', False):
+ query_prefix = query_prefix.\
filter(models.Instance.vm_state != vm_states.SOFT_DELETED)
if not context.is_admin:
@@ -1676,7 +1715,7 @@ def instance_get_all_by_filters(context, filters, sort_key, sort_dir,
# For other filters that don't match this, we will do regexp matching
exact_match_filter_names = ['project_id', 'user_id', 'image_ref',
'vm_state', 'instance_type_id', 'uuid',
- 'metadata']
+ 'metadata', 'host']
# Filter the query
query_prefix = exact_filter(query_prefix, models.Instance,
@@ -1898,6 +1937,7 @@ def instance_floating_address_get_all(context, instance_uuid):
return floating_ips
+# NOTE(hanlind): This method can be removed as conductor RPC API moves to v2.0.
@require_admin_context
def instance_get_all_hung_in_rebooting(context, reboot_window):
reboot_window = (timeutils.utcnow() -
diff --git a/nova/tests/compute/test_compute.py b/nova/tests/compute/test_compute.py
index 88434078d..810d8e91e 100644
--- a/nova/tests/compute/test_compute.py
+++ b/nova/tests/compute/test_compute.py
@@ -3594,13 +3594,15 @@ class ComputeTestCase(BaseTestCase):
deleted_at = (timeutils.utcnow() -
datetime.timedelta(hours=1, minutes=5))
instance = self._create_fake_instance({"deleted_at": deleted_at,
- "deleted": True})
+ "deleted": True})
self.compute.host = instance['host']
self.mox.StubOutWithMock(self.compute, '_get_instances_on_driver')
- self.compute._get_instances_on_driver(admin_context).AndReturn(
- [instance])
+ self.compute._get_instances_on_driver(
+ admin_context, {'deleted': True,
+ 'soft_deleted': False,
+ 'host': self.compute.host}).AndReturn([instance])
self.flags(running_deleted_instance_timeout=3600,
running_deleted_instance_action='reap')
@@ -3628,13 +3630,11 @@ class ComputeTestCase(BaseTestCase):
instance1['deleted'] = True
instance1['deleted_at'] = "sometimeago"
- instance2 = {}
- instance2['deleted'] = False
- instance2['deleted_at'] = None
-
self.mox.StubOutWithMock(self.compute, '_get_instances_on_driver')
- self.compute._get_instances_on_driver(admin_context).AndReturn(
- [instance1, instance2])
+ self.compute._get_instances_on_driver(
+ admin_context, {'deleted': True,
+ 'soft_deleted': False,
+ 'host': self.compute.host}).AndReturn([instance1])
self.mox.StubOutWithMock(timeutils, 'is_older_than')
timeutils.is_older_than('sometimeago',
@@ -3752,25 +3752,23 @@ class ComputeTestCase(BaseTestCase):
instances = [{'uuid': 'fake_uuid1', 'vm_state': vm_states.RESCUED,
'launched_at': timed_out_time},
- {'uuid': 'fake_uuid2', 'vm_state': vm_states.ACTIVE,
- 'launched_at': timed_out_time},
- {'uuid': 'fake_uuid3', 'vm_state': vm_states.ACTIVE,
- 'launched_at': not_timed_out_time},
- {'uuid': 'fake_uuid4', 'vm_state': vm_states.RESCUED,
+ {'uuid': 'fake_uuid2', 'vm_state': vm_states.RESCUED,
'launched_at': timed_out_time},
- {'uuid': 'fake_uuid5', 'vm_state': vm_states.RESCUED,
+ {'uuid': 'fake_uuid3', 'vm_state': vm_states.RESCUED,
'launched_at': not_timed_out_time}]
- unrescued_instances = {'fake_uuid1': False, 'fake_uuid4': False}
+ unrescued_instances = {'fake_uuid1': False, 'fake_uuid2': False}
- def fake_instance_get_all_by_host(context, host, columns_to_join):
+ def fake_instance_get_all_by_filters(context, filters,
+ columns_to_join):
self.assertEqual(columns_to_join, [])
return instances
def fake_unrescue(context, instance):
unrescued_instances[instance['uuid']] = True
- self.stubs.Set(self.compute.conductor_api, 'instance_get_all_by_host',
- fake_instance_get_all_by_host)
+ self.stubs.Set(self.compute.conductor_api,
+ 'instance_get_all_by_filters',
+ fake_instance_get_all_by_filters)
self.stubs.Set(self.compute.conductor_api, 'compute_unrescue',
fake_unrescue)
@@ -4020,8 +4018,8 @@ class ComputeTestCase(BaseTestCase):
self.mox.StubOutWithMock(self.compute, '_legacy_nw_info')
self.mox.StubOutWithMock(self.compute.driver, 'destroy')
- self.compute._get_instances_on_driver(fake_context).AndReturn(
- instances)
+ self.compute._get_instances_on_driver(
+ fake_context, {'deleted': False}).AndReturn(instances)
self.compute._get_instance_nw_info(fake_context,
evacuated_instance).AndReturn(
'fake_network_info')
@@ -4073,8 +4071,8 @@ class ComputeTestCase(BaseTestCase):
self.mox.StubOutWithMock(self.compute, '_legacy_nw_info')
self.mox.StubOutWithMock(self.compute.driver, 'destroy')
- self.compute._get_instances_on_driver(fake_context).AndReturn(
- instances)
+ self.compute._get_instances_on_driver(
+ fake_context, {'deleted': False}).AndReturn(instances)
self.compute._get_instance_nw_info(fake_context,
evacuated_instance).AndReturn(
'fake_network_info')
@@ -4131,8 +4129,8 @@ class ComputeTestCase(BaseTestCase):
self.mox.StubOutWithMock(self.compute, '_legacy_nw_info')
self.mox.StubOutWithMock(self.compute.driver, 'destroy')
- self.compute._get_instances_on_driver(fake_context).AndReturn(
- instances)
+ self.compute._get_instances_on_driver(
+ fake_context, {'deleted': False}).AndReturn(instances)
self.compute._get_instance_nw_info(fake_context,
evacuated_instance).AndReturn(
'fake_network_info')
@@ -4238,8 +4236,8 @@ class ComputeTestCase(BaseTestCase):
self.compute.init_virt_events()
# simulate failed instance
- self.compute._get_instances_on_driver(fake_context).AndReturn([
- deleted_instance])
+ self.compute._get_instances_on_driver(
+ fake_context, {'deleted': False}).AndReturn([deleted_instance])
self.compute._get_instance_nw_info(fake_context, deleted_instance
).AndRaise(exception.InstanceNotFound(
instance_id=deleted_instance['uuid']))
@@ -4398,6 +4396,7 @@ class ComputeTestCase(BaseTestCase):
# Test getting instances when driver doesn't support
# 'list_instance_uuids'
self.compute.host = 'host'
+ filters = {'host': self.compute.host}
fake_context = context.get_admin_context()
all_instances = []
@@ -4413,19 +4412,19 @@ class ComputeTestCase(BaseTestCase):
self.mox.StubOutWithMock(self.compute.driver,
'list_instances')
self.mox.StubOutWithMock(self.compute.conductor_api,
- 'instance_get_all_by_host')
+ 'instance_get_all_by_filters')
self.compute.driver.list_instance_uuids().AndRaise(
NotImplementedError())
self.compute.driver.list_instances().AndReturn(
[inst['name'] for inst in driver_instances])
- self.compute.conductor_api.instance_get_all_by_host(
- fake_context, self.compute.host,
+ self.compute.conductor_api.instance_get_all_by_filters(
+ fake_context, filters,
columns_to_join=[]).AndReturn(all_instances)
self.mox.ReplayAll()
- result = self.compute._get_instances_on_driver(fake_context)
+ result = self.compute._get_instances_on_driver(fake_context, filters)
self.assertEqual(driver_instances, result)
def test_instance_usage_audit(self):
diff --git a/nova/tests/conductor/test_conductor.py b/nova/tests/conductor/test_conductor.py
index fee919a33..9fc184bea 100644
--- a/nova/tests/conductor/test_conductor.py
+++ b/nova/tests/conductor/test_conductor.py
@@ -341,12 +341,6 @@ class _BaseTestCase(object):
self.context, fake_inst)
self.assertEqual(result, 'fake-result')
- def test_instance_get_all_hung_in_rebooting(self):
- self.mox.StubOutWithMock(db, 'instance_get_all_hung_in_rebooting')
- db.instance_get_all_hung_in_rebooting(self.context, 123)
- self.mox.ReplayAll()
- self.conductor.instance_get_all_hung_in_rebooting(self.context, 123)
-
def test_instance_get_active_by_window_joined(self):
self.mox.StubOutWithMock(db, 'instance_get_active_by_window_joined')
db.instance_get_active_by_window_joined(self.context, 'fake-begin',
@@ -978,18 +972,6 @@ class ConductorAPITestCase(_BaseTestCase, test.TestCase):
self.conductor.block_device_mapping_destroy_by_instance_and_volume(
self.context, fake_inst, 'fake-volume')
- def test_instance_get_all(self):
- self.mox.StubOutWithMock(db, 'instance_get_all_by_filters')
- db.instance_get_all(self.context)
- db.instance_get_all_by_filters(self.context, {'name': 'fake-inst'},
- 'updated_at', 'asc',
- columns_to_join=None)
- self.mox.ReplayAll()
- self.conductor.instance_get_all(self.context)
- self.conductor.instance_get_all_by_filters(self.context,
- {'name': 'fake-inst'},
- 'updated_at', 'asc')
-
def _test_stubbed(self, name, *args, **kwargs):
if args and isinstance(args[0], FakeContext):
ctxt = args[0]
diff --git a/nova/tests/test_imagecache.py b/nova/tests/test_imagecache.py
index 02cc7337a..d771eff83 100644
--- a/nova/tests/test_imagecache.py
+++ b/nova/tests/test_imagecache.py
@@ -945,7 +945,7 @@ class ImageCacheManagerTestCase(test.TestCase):
def test_compute_manager(self):
was = {'called': False}
- def fake_get_all(context, *args, **kwargs):
+ def fake_get_all_by_filters(context, *args, **kwargs):
was['called'] = True
return [{'image_ref': '1',
'host': CONF.host,
@@ -963,7 +963,8 @@ class ImageCacheManagerTestCase(test.TestCase):
with utils.tempdir() as tmpdir:
self.flags(instances_path=tmpdir)
- self.stubs.Set(db, 'instance_get_all', fake_get_all)
+ self.stubs.Set(db, 'instance_get_all_by_filters',
+ fake_get_all_by_filters)
compute = importutils.import_object(CONF.compute_manager)
self.flags(use_local=True, group='conductor')
compute.conductor_api = conductor.API()