summaryrefslogtreecommitdiffstats
path: root/nova
diff options
context:
space:
mode:
authorJenkins <jenkins@review.openstack.org>2013-05-19 12:29:40 +0000
committerGerrit Code Review <review@openstack.org>2013-05-19 12:29:40 +0000
commit6426dc56475c62d3893d5720eae095b473da76ae (patch)
tree52e441ec59d2d9ede4c75cca521907d6362bfc58 /nova
parent2f0a0bf5c6130889c9554058c787507237f00d24 (diff)
parent0f56d8ddb02f54ae389380dcd0790e55f2dcb479 (diff)
Merge "Optimize instance queries in compute manager"
Diffstat (limited to 'nova')
-rwxr-xr-xnova/compute/manager.py93
-rw-r--r--nova/conductor/api.py14
-rw-r--r--nova/conductor/manager.py2
-rw-r--r--nova/conductor/rpcapi.py9
-rw-r--r--nova/db/api.py1
-rw-r--r--nova/db/sqlalchemy/api.py52
-rw-r--r--nova/tests/compute/test_compute.py61
-rw-r--r--nova/tests/conductor/test_conductor.py18
-rw-r--r--nova/tests/test_imagecache.py5
9 files changed, 126 insertions, 129 deletions
diff --git a/nova/compute/manager.py b/nova/compute/manager.py
index a12cd085e..b63ca10a1 100755
--- a/nova/compute/manager.py
+++ b/nova/compute/manager.py
@@ -393,15 +393,17 @@ class ComputeManager(manager.SchedulerDependentManager):
'trying to set it to ERROR'),
instance_uuid=instance_uuid)
- def _get_instances_on_driver(self, context):
+ def _get_instances_on_driver(self, context, filters=None):
"""Return a list of instance records that match the instances found
on the hypervisor.
"""
+ if not filters:
+ filters = {}
try:
driver_uuids = self.driver.list_instance_uuids()
+ filters['uuid'] = driver_uuids
local_instances = self.conductor_api.instance_get_all_by_filters(
- context, {'uuid': driver_uuids},
- columns_to_join=[])
+ context, filters, columns_to_join=[])
local_instance_uuids = [inst['uuid'] for inst in local_instances]
for uuid in set(driver_uuids) - set(local_instance_uuids):
LOG.error(_('Instance %(uuid)s found in the hypervisor, but '
@@ -413,8 +415,8 @@ class ComputeManager(manager.SchedulerDependentManager):
# The driver doesn't support uuids listing, so we'll have
# to brute force.
driver_instances = self.driver.list_instances()
- instances = self.conductor_api.instance_get_all_by_host(
- context, self.host, columns_to_join=[])
+ instances = self.conductor_api.instance_get_all_by_filters(
+ context, filters, columns_to_join=[])
name_map = dict((instance['name'], instance) for instance in instances)
local_instances = []
for driver_instance in driver_instances:
@@ -436,11 +438,11 @@ class ComputeManager(manager.SchedulerDependentManager):
not, destroy them.
"""
our_host = self.host
- local_instances = self._get_instances_on_driver(context)
+ filters = {'deleted': False}
+ local_instances = self._get_instances_on_driver(context, filters)
for instance in local_instances:
instance_host = instance['host']
- instance_name = instance['name']
- if instance['host'] != our_host:
+ if instance_host != our_host:
LOG.info(_('Deleting instance as its host ('
'%(instance_host)s) is not equal to our '
'host (%(our_host)s).'),
@@ -3518,21 +3520,26 @@ class ComputeManager(manager.SchedulerDependentManager):
@periodic_task.periodic_task
def _poll_rebooting_instances(self, context):
if CONF.reboot_timeout > 0:
- instances = self.conductor_api.instance_get_all_hung_in_rebooting(
- context, CONF.reboot_timeout)
- self.driver.poll_rebooting_instances(CONF.reboot_timeout,
- instances)
+ filters = {'task_state': task_states.REBOOTING,
+ 'host': self.host}
+ rebooting = self.conductor_api.instance_get_all_by_filters(
+ context, filters, columns_to_join=[])
+
+ to_poll = []
+ for instance in rebooting:
+ if timeutils.is_older_than(instance['updated_at'],
+ CONF.reboot_timeout):
+ to_poll.append(instance)
+
+ self.driver.poll_rebooting_instances(CONF.reboot_timeout, to_poll)
@periodic_task.periodic_task
def _poll_rescued_instances(self, context):
if CONF.rescue_timeout > 0:
- instances = self.conductor_api.instance_get_all_by_host(
- context, self.host, columns_to_join=[])
-
- rescued_instances = []
- for instance in instances:
- if instance['vm_state'] == vm_states.RESCUED:
- rescued_instances.append(instance)
+ filters = {'vm_state': vm_states.RESCUED,
+ 'host': self.host}
+ rescued_instances = self.conductor_api.instance_get_all_by_filters(
+ context, filters, columns_to_join=[])
to_unrescue = []
for instance in rescued_instances:
@@ -3968,23 +3975,15 @@ class ComputeManager(manager.SchedulerDependentManager):
LOG.debug(_("CONF.reclaim_instance_interval <= 0, skipping..."))
return
- instances = self.conductor_api.instance_get_all_by_host(
- context, self.host, columns_to_join=[])
+ filters = {'vm_state': vm_states.SOFT_DELETED,
+ 'host': self.host}
+ instances = self.conductor_api.instance_get_all_by_filters(context,
+ filters)
for instance in instances:
- old_enough = (not instance['deleted_at'] or
- timeutils.is_older_than(instance['deleted_at'],
- interval))
- soft_deleted = instance['vm_state'] == vm_states.SOFT_DELETED
-
- if soft_deleted and old_enough:
+ if self._deleted_old_enough(instance, interval):
capi = self.conductor_api
bdms = capi.block_device_mapping_get_all_by_instance(
context, instance)
- # NOTE(danms): We fetched instances above without the
- # system_metadata for efficiency. If we get here, we need
- # to re-fetch with it so that _delete_instace() can extract
- # instance_type information.
- instance = capi.instance_get_by_uuid(context, instance['uuid'])
LOG.info(_('Reclaiming deleted instance'), instance=instance)
# NOTE(comstud): Quotas were already accounted for when
# the instance was soft deleted, so there's no need to
@@ -4087,18 +4086,15 @@ class ComputeManager(manager.SchedulerDependentManager):
but the hypervisor thinks is still running.
"""
timeout = CONF.running_deleted_instance_timeout
+ filters = {'deleted': True,
+ 'soft_deleted': False,
+ 'host': self.host}
+ instances = self._get_instances_on_driver(context, filters)
+ return [i for i in instances if self._deleted_old_enough(i, timeout)]
- def deleted_instance(instance):
- erroneously_running = instance['deleted']
- old_enough = (not instance['deleted_at'] or
- timeutils.is_older_than(instance['deleted_at'],
- timeout))
- if erroneously_running and old_enough:
- return True
- return False
-
- instances = self._get_instances_on_driver(context)
- return [i for i in instances if deleted_instance(i)]
+ def _deleted_old_enough(self, instance, timeout):
+ return (not instance['deleted_at'] or
+ timeutils.is_older_than(instance['deleted_at'], timeout))
@contextlib.contextmanager
def _error_out_instance_on_exception(self, context, instance_uuid,
@@ -4165,8 +4161,6 @@ class ComputeManager(manager.SchedulerDependentManager):
if CONF.image_cache_manager_interval == 0:
return
- all_instances = self.conductor_api.instance_get_all(context)
-
# Determine what other nodes use this storage
storage_users.register_storage_use(CONF.instances_path, CONF.host)
nodes = storage_users.get_storage_users(CONF.instances_path)
@@ -4176,9 +4170,10 @@ class ComputeManager(manager.SchedulerDependentManager):
# TODO(mikal): this should be further refactored so that the cache
# cleanup code doesn't know what those instances are, just a remote
# count, and then this logic should be pushed up the stack.
- filtered_instances = []
- for instance in all_instances:
- if instance['host'] in nodes:
- filtered_instances.append(instance)
+ filters = {'deleted': False,
+ 'soft_deleted': True,
+ 'host': nodes}
+ filtered_instances = self.conductor_api.instance_get_all_by_filters(
+ context, filters, columns_to_join=[])
self.driver.manage_image_cache(context, filtered_instances)
diff --git a/nova/conductor/api.py b/nova/conductor/api.py
index e8fcc2c2c..6706440f5 100644
--- a/nova/conductor/api.py
+++ b/nova/conductor/api.py
@@ -72,9 +72,6 @@ class LocalAPI(object):
def instance_destroy(self, context, instance):
return self._manager.instance_destroy(context, instance)
- def instance_get_all(self, context):
- return self._manager.instance_get_all(context)
-
def instance_get_all_by_host(self, context, host, columns_to_join=None):
return self._manager.instance_get_all_by_host(
context, host, columns_to_join=columns_to_join)
@@ -92,10 +89,6 @@ class LocalAPI(object):
sort_dir,
columns_to_join)
- def instance_get_all_hung_in_rebooting(self, context, timeout):
- return self._manager.instance_get_all_hung_in_rebooting(context,
- timeout)
-
def instance_get_active_by_window_joined(self, context, begin, end=None,
project_id=None, host=None):
return self._manager.instance_get_active_by_window_joined(
@@ -398,9 +391,6 @@ class API(object):
instance_uuid,
columns_to_join)
- def instance_get_all(self, context):
- return self.conductor_rpcapi.instance_get_all(context)
-
def instance_get_all_by_host(self, context, host, columns_to_join=None):
return self.conductor_rpcapi.instance_get_all_by_host(
context, host, columns_to_join=columns_to_join)
@@ -416,10 +406,6 @@ class API(object):
return self.conductor_rpcapi.instance_get_all_by_filters(
context, filters, sort_key, sort_dir, columns_to_join)
- def instance_get_all_hung_in_rebooting(self, context, timeout):
- return self.conductor_rpcapi.instance_get_all_hung_in_rebooting(
- context, timeout)
-
def instance_get_active_by_window_joined(self, context, begin, end=None,
project_id=None, host=None):
return self.conductor_rpcapi.instance_get_active_by_window_joined(
diff --git a/nova/conductor/manager.py b/nova/conductor/manager.py
index fad849043..a45c83b8d 100644
--- a/nova/conductor/manager.py
+++ b/nova/conductor/manager.py
@@ -111,6 +111,7 @@ class ConductorManager(manager.Manager):
self.db.instance_get_by_uuid(context, instance_uuid,
columns_to_join))
+ # NOTE(hanlind): This method can be removed in v2.0 of the RPC API.
def instance_get_all(self, context):
return jsonutils.to_primitive(self.db.instance_get_all(context))
@@ -269,6 +270,7 @@ class ConductorManager(manager.Manager):
columns_to_join=columns_to_join)
return jsonutils.to_primitive(result)
+ # NOTE(hanlind): This method can be removed in v2.0 of the RPC API.
def instance_get_all_hung_in_rebooting(self, context, timeout):
result = self.db.instance_get_all_hung_in_rebooting(context, timeout)
return jsonutils.to_primitive(result)
diff --git a/nova/conductor/rpcapi.py b/nova/conductor/rpcapi.py
index e86a0acaa..c9be5dd79 100644
--- a/nova/conductor/rpcapi.py
+++ b/nova/conductor/rpcapi.py
@@ -251,11 +251,6 @@ class ConductorAPI(nova.openstack.common.rpc.proxy.RpcProxy):
sort_dir=sort_dir, columns_to_join=columns_to_join)
return self.call(context, msg, version='1.47')
- def instance_get_all_hung_in_rebooting(self, context, timeout):
- msg = self.make_msg('instance_get_all_hung_in_rebooting',
- timeout=timeout)
- return self.call(context, msg, version='1.15')
-
def instance_get_active_by_window_joined(self, context, begin, end=None,
project_id=None, host=None):
msg = self.make_msg('instance_get_active_by_window_joined',
@@ -299,10 +294,6 @@ class ConductorAPI(nova.openstack.common.rpc.proxy.RpcProxy):
binary=binary)
return self.call(context, msg, version='1.28')
- def instance_get_all(self, context):
- msg = self.make_msg('instance_get_all')
- return self.call(context, msg, version='1.23')
-
def instance_get_all_by_host(self, context, host, node=None,
columns_to_join=None):
msg = self.make_msg('instance_get_all_by_host', host=host, node=node,
diff --git a/nova/db/api.py b/nova/db/api.py
index d294ee1d7..3130f7029 100644
--- a/nova/db/api.py
+++ b/nova/db/api.py
@@ -654,6 +654,7 @@ def instance_floating_address_get_all(context, instance_uuid):
return IMPL.instance_floating_address_get_all(context, instance_uuid)
+# NOTE(hanlind): This method can be removed as conductor RPC API moves to v2.0.
def instance_get_all_hung_in_rebooting(context, reboot_window):
"""Get all instances stuck in a rebooting state."""
return IMPL.instance_get_all_hung_in_rebooting(context, reboot_window)
diff --git a/nova/db/sqlalchemy/api.py b/nova/db/sqlalchemy/api.py
index a35e8154d..a0f679d73 100644
--- a/nova/db/sqlalchemy/api.py
+++ b/nova/db/sqlalchemy/api.py
@@ -1635,7 +1635,37 @@ def instance_get_all_by_filters(context, filters, sort_key, sort_dir,
session=None):
"""Return instances that match all filters. Deleted instances
will be returned by default, unless there's a filter that says
- otherwise"""
+ otherwise.
+
+ Depending on the name of a filter, matching for that filter is
+ performed using either exact matching or as regular expression
+ matching. Exact matching is applied for the following filters:
+
+ ['project_id', 'user_id', 'image_ref',
+ 'vm_state', 'instance_type_id', 'uuid',
+ 'metadata', 'host']
+
+
+ A third type of filter (also using exact matching), filters
+ based on instance metadata tags when supplied under a special
+ key named 'filter'.
+
+ filters = {
+ 'filter': [
+ {'name': 'tag-key', 'value': '<metakey>'},
+ {'name': 'tag-value', 'value': '<metaval>'},
+ {'name': 'tag:<metakey>', 'value': '<metaval>'}
+ ]
+ }
+
+ Special keys are used to tweek the query further:
+
+ 'changes-since' - only return instances updated after
+ 'deleted' - only return (or exclude) deleted instances
+ 'soft-deleted' - modify behavior of 'deleted' to either
+ include or exclude instances whose
+ vm_state is SOFT_DELETED.
+ """
sort_fn = {'desc': desc, 'asc': asc}
@@ -1668,12 +1698,21 @@ def instance_get_all_by_filters(context, filters, sort_key, sort_dir,
# Instances can be soft or hard deleted and the query needs to
# include or exclude both
if filters.pop('deleted'):
- deleted = or_(models.Instance.deleted == models.Instance.id,
- models.Instance.vm_state == vm_states.SOFT_DELETED)
- query_prefix = query_prefix.filter(deleted)
+ if filters.pop('soft_deleted', False):
+ query_prefix = query_prefix.\
+ filter(models.Instance.deleted == models.Instance.id)
+ else:
+ deleted = or_(
+ models.Instance.deleted == models.Instance.id,
+ models.Instance.vm_state == vm_states.SOFT_DELETED
+ )
+ query_prefix = query_prefix.\
+ filter(deleted)
else:
query_prefix = query_prefix.\
- filter_by(deleted=0).\
+ filter_by(deleted=0)
+ if not filters.pop('soft_deleted', False):
+ query_prefix = query_prefix.\
filter(models.Instance.vm_state != vm_states.SOFT_DELETED)
if not context.is_admin:
@@ -1687,7 +1726,7 @@ def instance_get_all_by_filters(context, filters, sort_key, sort_dir,
# For other filters that don't match this, we will do regexp matching
exact_match_filter_names = ['project_id', 'user_id', 'image_ref',
'vm_state', 'instance_type_id', 'uuid',
- 'metadata']
+ 'metadata', 'host']
# Filter the query
query_prefix = exact_filter(query_prefix, models.Instance,
@@ -1933,6 +1972,7 @@ def instance_floating_address_get_all(context, instance_uuid):
return [floating_ip.address for floating_ip in floating_ips]
+# NOTE(hanlind): This method can be removed as conductor RPC API moves to v2.0.
@require_admin_context
def instance_get_all_hung_in_rebooting(context, reboot_window):
reboot_window = (timeutils.utcnow() -
diff --git a/nova/tests/compute/test_compute.py b/nova/tests/compute/test_compute.py
index 22dd7bd2f..3101e3aa2 100644
--- a/nova/tests/compute/test_compute.py
+++ b/nova/tests/compute/test_compute.py
@@ -3888,13 +3888,15 @@ class ComputeTestCase(BaseTestCase):
deleted_at = (timeutils.utcnow() -
datetime.timedelta(hours=1, minutes=5))
instance = self._create_fake_instance({"deleted_at": deleted_at,
- "deleted": True})
+ "deleted": True})
self.compute.host = instance['host']
self.mox.StubOutWithMock(self.compute, '_get_instances_on_driver')
- self.compute._get_instances_on_driver(admin_context).AndReturn(
- [instance])
+ self.compute._get_instances_on_driver(
+ admin_context, {'deleted': True,
+ 'soft_deleted': False,
+ 'host': self.compute.host}).AndReturn([instance])
self.flags(running_deleted_instance_timeout=3600,
running_deleted_instance_action='reap')
@@ -3922,13 +3924,11 @@ class ComputeTestCase(BaseTestCase):
instance1['deleted'] = True
instance1['deleted_at'] = "sometimeago"
- instance2 = {}
- instance2['deleted'] = False
- instance2['deleted_at'] = None
-
self.mox.StubOutWithMock(self.compute, '_get_instances_on_driver')
- self.compute._get_instances_on_driver(admin_context).AndReturn(
- [instance1, instance2])
+ self.compute._get_instances_on_driver(
+ admin_context, {'deleted': True,
+ 'soft_deleted': False,
+ 'host': self.compute.host}).AndReturn([instance1])
self.mox.StubOutWithMock(timeutils, 'is_older_than')
timeutils.is_older_than('sometimeago',
@@ -4046,25 +4046,23 @@ class ComputeTestCase(BaseTestCase):
instances = [{'uuid': 'fake_uuid1', 'vm_state': vm_states.RESCUED,
'launched_at': timed_out_time},
- {'uuid': 'fake_uuid2', 'vm_state': vm_states.ACTIVE,
- 'launched_at': timed_out_time},
- {'uuid': 'fake_uuid3', 'vm_state': vm_states.ACTIVE,
- 'launched_at': not_timed_out_time},
- {'uuid': 'fake_uuid4', 'vm_state': vm_states.RESCUED,
+ {'uuid': 'fake_uuid2', 'vm_state': vm_states.RESCUED,
'launched_at': timed_out_time},
- {'uuid': 'fake_uuid5', 'vm_state': vm_states.RESCUED,
+ {'uuid': 'fake_uuid3', 'vm_state': vm_states.RESCUED,
'launched_at': not_timed_out_time}]
- unrescued_instances = {'fake_uuid1': False, 'fake_uuid4': False}
+ unrescued_instances = {'fake_uuid1': False, 'fake_uuid2': False}
- def fake_instance_get_all_by_host(context, host, columns_to_join):
+ def fake_instance_get_all_by_filters(context, filters,
+ columns_to_join):
self.assertEqual(columns_to_join, [])
return instances
def fake_unrescue(context, instance):
unrescued_instances[instance['uuid']] = True
- self.stubs.Set(self.compute.conductor_api, 'instance_get_all_by_host',
- fake_instance_get_all_by_host)
+ self.stubs.Set(self.compute.conductor_api,
+ 'instance_get_all_by_filters',
+ fake_instance_get_all_by_filters)
self.stubs.Set(self.compute.conductor_api, 'compute_unrescue',
fake_unrescue)
@@ -4315,8 +4313,8 @@ class ComputeTestCase(BaseTestCase):
self.mox.StubOutWithMock(self.compute, '_legacy_nw_info')
self.mox.StubOutWithMock(self.compute.driver, 'destroy')
- self.compute._get_instances_on_driver(fake_context).AndReturn(
- instances)
+ self.compute._get_instances_on_driver(
+ fake_context, {'deleted': False}).AndReturn(instances)
self.compute._get_instance_nw_info(fake_context,
evacuated_instance).AndReturn(
'fake_network_info')
@@ -4368,8 +4366,8 @@ class ComputeTestCase(BaseTestCase):
self.mox.StubOutWithMock(self.compute, '_legacy_nw_info')
self.mox.StubOutWithMock(self.compute.driver, 'destroy')
- self.compute._get_instances_on_driver(fake_context).AndReturn(
- instances)
+ self.compute._get_instances_on_driver(
+ fake_context, {'deleted': False}).AndReturn(instances)
self.compute._get_instance_nw_info(fake_context,
evacuated_instance).AndReturn(
'fake_network_info')
@@ -4426,8 +4424,8 @@ class ComputeTestCase(BaseTestCase):
self.mox.StubOutWithMock(self.compute, '_legacy_nw_info')
self.mox.StubOutWithMock(self.compute.driver, 'destroy')
- self.compute._get_instances_on_driver(fake_context).AndReturn(
- instances)
+ self.compute._get_instances_on_driver(
+ fake_context, {'deleted': False}).AndReturn(instances)
self.compute._get_instance_nw_info(fake_context,
evacuated_instance).AndReturn(
'fake_network_info')
@@ -4533,8 +4531,8 @@ class ComputeTestCase(BaseTestCase):
self.compute.init_virt_events()
# simulate failed instance
- self.compute._get_instances_on_driver(fake_context).AndReturn([
- deleted_instance])
+ self.compute._get_instances_on_driver(
+ fake_context, {'deleted': False}).AndReturn([deleted_instance])
self.compute._get_instance_nw_info(fake_context, deleted_instance
).AndRaise(exception.InstanceNotFound(
instance_id=deleted_instance['uuid']))
@@ -4693,6 +4691,7 @@ class ComputeTestCase(BaseTestCase):
# Test getting instances when driver doesn't support
# 'list_instance_uuids'
self.compute.host = 'host'
+ filters = {'host': self.compute.host}
fake_context = context.get_admin_context()
all_instances = []
@@ -4708,19 +4707,19 @@ class ComputeTestCase(BaseTestCase):
self.mox.StubOutWithMock(self.compute.driver,
'list_instances')
self.mox.StubOutWithMock(self.compute.conductor_api,
- 'instance_get_all_by_host')
+ 'instance_get_all_by_filters')
self.compute.driver.list_instance_uuids().AndRaise(
NotImplementedError())
self.compute.driver.list_instances().AndReturn(
[inst['name'] for inst in driver_instances])
- self.compute.conductor_api.instance_get_all_by_host(
- fake_context, self.compute.host,
+ self.compute.conductor_api.instance_get_all_by_filters(
+ fake_context, filters,
columns_to_join=[]).AndReturn(all_instances)
self.mox.ReplayAll()
- result = self.compute._get_instances_on_driver(fake_context)
+ result = self.compute._get_instances_on_driver(fake_context, filters)
self.assertEqual(driver_instances, result)
def test_instance_usage_audit(self):
diff --git a/nova/tests/conductor/test_conductor.py b/nova/tests/conductor/test_conductor.py
index 3780fc7cf..3aebf76af 100644
--- a/nova/tests/conductor/test_conductor.py
+++ b/nova/tests/conductor/test_conductor.py
@@ -318,12 +318,6 @@ class _BaseTestCase(object):
self.context, fake_inst)
self.assertEqual(result, 'fake-result')
- def test_instance_get_all_hung_in_rebooting(self):
- self.mox.StubOutWithMock(db, 'instance_get_all_hung_in_rebooting')
- db.instance_get_all_hung_in_rebooting(self.context, 123)
- self.mox.ReplayAll()
- self.conductor.instance_get_all_hung_in_rebooting(self.context, 123)
-
def test_instance_get_active_by_window_joined(self):
self.mox.StubOutWithMock(db, 'instance_get_active_by_window_joined')
db.instance_get_active_by_window_joined(self.context, 'fake-begin',
@@ -952,18 +946,6 @@ class ConductorAPITestCase(_BaseTestCase, test.TestCase):
self.conductor.block_device_mapping_destroy_by_instance_and_volume(
self.context, fake_inst, 'fake-volume')
- def test_instance_get_all(self):
- self.mox.StubOutWithMock(db, 'instance_get_all_by_filters')
- db.instance_get_all(self.context)
- db.instance_get_all_by_filters(self.context, {'name': 'fake-inst'},
- 'updated_at', 'asc',
- columns_to_join=None)
- self.mox.ReplayAll()
- self.conductor.instance_get_all(self.context)
- self.conductor.instance_get_all_by_filters(self.context,
- {'name': 'fake-inst'},
- 'updated_at', 'asc')
-
def _test_stubbed(self, name, *args, **kwargs):
if args and isinstance(args[0], FakeContext):
ctxt = args[0]
diff --git a/nova/tests/test_imagecache.py b/nova/tests/test_imagecache.py
index 256732ace..bdc895474 100644
--- a/nova/tests/test_imagecache.py
+++ b/nova/tests/test_imagecache.py
@@ -945,7 +945,7 @@ class ImageCacheManagerTestCase(test.TestCase):
def test_compute_manager(self):
was = {'called': False}
- def fake_get_all(context, *args, **kwargs):
+ def fake_get_all_by_filters(context, *args, **kwargs):
was['called'] = True
return [{'image_ref': '1',
'host': CONF.host,
@@ -963,7 +963,8 @@ class ImageCacheManagerTestCase(test.TestCase):
with utils.tempdir() as tmpdir:
self.flags(instances_path=tmpdir)
- self.stubs.Set(db, 'instance_get_all', fake_get_all)
+ self.stubs.Set(db, 'instance_get_all_by_filters',
+ fake_get_all_by_filters)
compute = importutils.import_object(CONF.compute_manager)
self.flags(use_local=True, group='conductor')
compute.conductor_api = conductor.API()