summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorChris Behrens <cbehrens@codestud.com>2013-03-11 00:20:23 -0700
committerChris Behrens <cbehrens@codestud.com>2013-03-11 19:26:49 -0700
commit652a487ed9daba9ae97f7df77ae35720322d1af3 (patch)
treed21de2ac493af0334aa7f4942e2893e141861006
parentf543f347c84e7f5de2c584ca55363e4dee5b0a3d (diff)
Fix quota issues with instance deletes.
In order to keep quotas in sync as much as possible, only commit quota changes for delete when: 1) An instance's vm_state is updated to be SOFT_DELETED. 2) The DB record is marked as deleted (and the instance's vm_state is not SOFT_DELETED) If a host is down and we delete the instance in the API, this means quotas are committed within the API. Otherwise, quotas are committed on the manager side. Fixes bug 1098380 Also needed for proper testing: Fixed compute cells tests so that pseudo child cells use NoopQuotaDriver. This uncovered inconsistencies in the NoopQuotaDriver wrt the DBQuotaDriver. Those issues were fixed as well. Change-Id: Ib72de1a457f0c5056d55a5c7dd4d8d7c69708996
-rw-r--r--nova/compute/api.py37
-rwxr-xr-xnova/compute/manager.py145
-rw-r--r--nova/compute/rpcapi.py16
-rw-r--r--nova/conductor/api.py42
-rw-r--r--nova/quota.py21
-rw-r--r--nova/tests/compute/test_compute.py89
-rw-r--r--nova/tests/compute/test_compute_cells.py25
-rw-r--r--nova/tests/compute/test_rpcapi.py7
-rw-r--r--nova/tests/conductor/test_conductor.py34
9 files changed, 290 insertions, 126 deletions
diff --git a/nova/compute/api.py b/nova/compute/api.py
index dc90748a4..5ee6d1c52 100644
--- a/nova/compute/api.py
+++ b/nova/compute/api.py
@@ -1031,15 +1031,13 @@ class API(base.Base):
instance,
**attrs)
- # Avoid double-counting the quota usage reduction
- # where delete is already in progress
- if (old['vm_state'] != vm_states.SOFT_DELETED and
- old['task_state'] not in (task_states.DELETING,
- task_states.SOFT_DELETING)):
- reservations = self._create_reservations(context,
- old,
- updated,
- project_id)
+ # NOTE(comstud): If we delete the instance locally, we'll
+ # commit the reservations here. Otherwise, the manager side
+ # will commit or rollback the reservations based on success.
+ reservations = self._create_reservations(context,
+ old,
+ updated,
+ project_id)
if not host:
# Just update database, nothing else we can do
@@ -1099,17 +1097,18 @@ class API(base.Base):
self._record_action_start(context, instance,
instance_actions.DELETE)
- cb(context, instance, bdms)
+ cb(context, instance, bdms, reservations=reservations)
except exception.ComputeHostNotFound:
pass
if not is_up:
# If compute node isn't up, just delete from DB
self._local_delete(context, instance, bdms)
- if reservations:
- QUOTAS.commit(context,
- reservations,
- project_id=project_id)
+ if reservations:
+ QUOTAS.commit(context,
+ reservations,
+ project_id=project_id)
+ reservations = None
except exception.InstanceNotFound:
# NOTE(comstud): Race condition. Instance already gone.
if reservations:
@@ -1210,16 +1209,18 @@ class API(base.Base):
LOG.debug(_('Going to try to soft delete instance'),
instance=instance)
- def soft_delete(context, instance, bdms):
- self.compute_rpcapi.soft_delete_instance(context, instance)
+ def soft_delete(context, instance, bdms, reservations=None):
+ self.compute_rpcapi.soft_delete_instance(context, instance,
+ reservations=reservations)
self._delete(context, instance, soft_delete,
task_state=task_states.SOFT_DELETING,
deleted_at=timeutils.utcnow())
def _delete_instance(self, context, instance):
- def terminate(context, instance, bdms):
- self.compute_rpcapi.terminate_instance(context, instance, bdms)
+ def terminate(context, instance, bdms, reservations=None):
+ self.compute_rpcapi.terminate_instance(context, instance, bdms,
+ reservations=reservations)
self._delete(context, instance, terminate,
task_state=task_states.DELETING)
diff --git a/nova/compute/manager.py b/nova/compute/manager.py
index 338708f4e..b1c5a9a7e 100755
--- a/nova/compute/manager.py
+++ b/nova/compute/manager.py
@@ -67,7 +67,6 @@ from nova.openstack.common.notifier import api as notifier
from nova.openstack.common import rpc
from nova.openstack.common import timeutils
from nova import paths
-from nova import quota
from nova import safe_utils
from nova.scheduler import rpcapi as scheduler_rpcapi
from nova import utils
@@ -178,8 +177,6 @@ CONF.import_opt('my_ip', 'nova.netconf')
CONF.import_opt('vnc_enabled', 'nova.vnc')
CONF.import_opt('enabled', 'nova.spice', group='spice')
-QUOTAS = quota.QUOTAS
-
LOG = logging.getLogger(__name__)
@@ -325,7 +322,7 @@ class ComputeVirtAPI(virtapi.VirtAPI):
class ComputeManager(manager.SchedulerDependentManager):
"""Manages the running instances from creation to destruction."""
- RPC_API_VERSION = '2.26'
+ RPC_API_VERSION = '2.27'
def __init__(self, compute_driver=None, *args, **kwargs):
"""Load configuration options and connect to the hypervisor."""
@@ -1225,35 +1222,63 @@ class ComputeManager(manager.SchedulerDependentManager):
# NOTE(vish): bdms will be deleted on instance destroy
@hooks.add_hook("delete_instance")
- def _delete_instance(self, context, instance, bdms):
- """Delete an instance on this host."""
+ def _delete_instance(self, context, instance, bdms,
+ reservations=None):
+ """Delete an instance on this host. Commit or rollback quotas
+ as necessary.
+ """
instance_uuid = instance['uuid']
- self.conductor_api.instance_info_cache_delete(context, instance)
- self._notify_about_instance_usage(context, instance, "delete.start")
- self._shutdown_instance(context, instance, bdms)
- # NOTE(vish): We have already deleted the instance, so we have
- # to ignore problems cleaning up the volumes. It would
- # be nice to let the user know somehow that the volume
- # deletion failed, but it is not acceptable to have an
- # instance that can not be deleted. Perhaps this could
- # be reworked in the future to set an instance fault
- # the first time and to only ignore the failure if the
- # instance is already in ERROR.
+
+ if context.is_admin and context.project_id != instance['project_id']:
+ project_id = instance['project_id']
+ else:
+ project_id = context.project_id
+
+ was_soft_deleted = instance['vm_state'] == vm_states.SOFT_DELETED
+ if was_soft_deleted:
+ # Instances in SOFT_DELETED vm_state have already had quotas
+ # decremented.
+ try:
+ self._quota_rollback(context, reservations,
+ project_id=project_id)
+ except Exception:
+ pass
+ reservations = None
+
try:
- self._cleanup_volumes(context, instance_uuid, bdms)
- except Exception as exc:
- LOG.warn(_("Ignoring volume cleanup failure due to %s") % exc,
- instance_uuid=instance_uuid)
- # if a delete task succeed, always update vm state and task state
- # without expecting task state to be DELETING
- instance = self._instance_update(context,
- instance_uuid,
- vm_state=vm_states.DELETED,
- task_state=None,
- terminated_at=timeutils.utcnow())
- system_meta = utils.metadata_to_dict(instance['system_metadata'])
- self.conductor_api.instance_destroy(context, instance)
+ self.conductor_api.instance_info_cache_delete(context, instance)
+ self._notify_about_instance_usage(context, instance,
+ "delete.start")
+ self._shutdown_instance(context, instance, bdms)
+ # NOTE(vish): We have already deleted the instance, so we have
+ # to ignore problems cleaning up the volumes. It
+ # would be nice to let the user know somehow that
+ # the volume deletion failed, but it is not
+ # acceptable to have an instance that can not be
+ # deleted. Perhaps this could be reworked in the
+ # future to set an instance fault the first time
+ # and to only ignore the failure if the instance
+ # is already in ERROR.
+ try:
+ self._cleanup_volumes(context, instance_uuid, bdms)
+ except Exception as exc:
+ err_str = _("Ignoring volume cleanup failure due to %s")
+ LOG.warn(err_str % exc, instance=instance)
+ # if a delete task succeed, always update vm state and task
+ # state without expecting task state to be DELETING
+ instance = self._instance_update(context,
+ instance_uuid,
+ vm_state=vm_states.DELETED,
+ task_state=None,
+ terminated_at=timeutils.utcnow())
+ system_meta = utils.metadata_to_dict(instance['system_metadata'])
+ self.conductor_api.instance_destroy(context, instance)
+ except Exception:
+ with excutils.save_and_reraise_exception():
+ self._quota_rollback(context, reservations,
+ project_id=project_id)
+ self._quota_commit(context, reservations, project_id=project_id)
# ensure block device mappings are not leaked
self.conductor_api.block_device_mapping_destroy(context, bdms)
@@ -1267,7 +1292,8 @@ class ComputeManager(manager.SchedulerDependentManager):
@exception.wrap_exception(notifier=notifier, publisher_id=publisher_id())
@wrap_instance_event
@wrap_instance_fault
- def terminate_instance(self, context, instance, bdms=None):
+ def terminate_instance(self, context, instance, bdms=None,
+ reservations=None):
"""Terminate an instance on this host."""
# Note(eglynn): we do not decorate this action with reverts_task_state
# because a failure during termination should leave the task state as
@@ -1275,7 +1301,6 @@ class ComputeManager(manager.SchedulerDependentManager):
# attempt should not result in a further decrement of the quota_usages
# in_use count (see bug 1046236).
- elevated = context.elevated()
# NOTE(danms): remove this compatibility in the future
if not bdms:
bdms = self._get_instance_volume_bdms(context, instance)
@@ -1283,7 +1308,8 @@ class ComputeManager(manager.SchedulerDependentManager):
@lockutils.synchronized(instance['uuid'], 'nova-')
def do_terminate_instance(instance, bdms):
try:
- self._delete_instance(context, instance, bdms)
+ self._delete_instance(context, instance, bdms,
+ reservations=reservations)
except exception.InstanceTerminationFailure as error:
msg = _('%s. Setting instance vm_state to ERROR')
LOG.error(msg % error, instance=instance)
@@ -1337,22 +1363,34 @@ class ComputeManager(manager.SchedulerDependentManager):
@reverts_task_state
@wrap_instance_event
@wrap_instance_fault
- def soft_delete_instance(self, context, instance):
+ def soft_delete_instance(self, context, instance, reservations=None):
"""Soft delete an instance on this host."""
- self._notify_about_instance_usage(context, instance,
- "soft_delete.start")
+
+ if context.is_admin and context.project_id != instance['project_id']:
+ project_id = instance['project_id']
+ else:
+ project_id = context.project_id
+
try:
- self.driver.soft_delete(instance)
- except NotImplementedError:
- # Fallback to just powering off the instance if the hypervisor
- # doesn't implement the soft_delete method
- self.driver.power_off(instance)
- current_power_state = self._get_power_state(context, instance)
- instance = self._instance_update(context, instance['uuid'],
- power_state=current_power_state,
- vm_state=vm_states.SOFT_DELETED,
- expected_task_state=task_states.SOFT_DELETING,
- task_state=None)
+ self._notify_about_instance_usage(context, instance,
+ "soft_delete.start")
+ try:
+ self.driver.soft_delete(instance)
+ except NotImplementedError:
+ # Fallback to just powering off the instance if the
+ # hypervisor doesn't implement the soft_delete method
+ self.driver.power_off(instance)
+ current_power_state = self._get_power_state(context, instance)
+ instance = self._instance_update(context, instance['uuid'],
+ power_state=current_power_state,
+ vm_state=vm_states.SOFT_DELETED,
+ expected_task_state=task_states.SOFT_DELETING,
+ task_state=None)
+ except Exception:
+ with excutils.save_and_reraise_exception():
+ self._quota_rollback(context, reservations,
+ project_id=project_id)
+ self._quota_commit(context, reservations, project_id=project_id)
self._notify_about_instance_usage(context, instance, "soft_delete.end")
@exception.wrap_exception(notifier=notifier, publisher_id=publisher_id())
@@ -2080,13 +2118,15 @@ class ComputeManager(manager.SchedulerDependentManager):
self._quota_commit(context, reservations)
- def _quota_commit(self, context, reservations):
+ def _quota_commit(self, context, reservations, project_id=None):
if reservations:
- self.conductor_api.quota_commit(context, reservations)
+ self.conductor_api.quota_commit(context, reservations,
+ project_id=project_id)
- def _quota_rollback(self, context, reservations):
+ def _quota_rollback(self, context, reservations, project_id=None):
if reservations:
- self.conductor_api.quota_rollback(context, reservations)
+ self.conductor_api.quota_rollback(context, reservations,
+ project_id=project_id)
def _prep_resize(self, context, image, instance, instance_type,
reservations, request_spec, filter_properties, node):
@@ -3709,6 +3749,9 @@ class ComputeManager(manager.SchedulerDependentManager):
bdms = capi.block_device_mapping_get_all_by_instance(
context, instance)
LOG.info(_('Reclaiming deleted instance'), instance=instance)
+ # NOTE(comstud): Quotas were already accounted for when
+ # the instance was soft deleted, so there's no need to
+ # pass reservations here.
self._delete_instance(context, instance, bdms)
@manager.periodic_task
diff --git a/nova/compute/rpcapi.py b/nova/compute/rpcapi.py
index 914c45471..62c1ed9a0 100644
--- a/nova/compute/rpcapi.py
+++ b/nova/compute/rpcapi.py
@@ -163,6 +163,8 @@ class ComputeAPI(nova.openstack.common.rpc.proxy.RpcProxy):
2.25 - Add attach_interface() and detach_interface()
2.26 - Add validate_console_token to ensure the service connects to
vnc on the correct port
+ 2.27 - Adds 'reservations' to terminate_instance() and
+ soft_delete_instance()
'''
#
@@ -588,13 +590,14 @@ class ComputeAPI(nova.openstack.common.rpc.proxy.RpcProxy):
instance=instance_p),
topic=_compute_topic(self.topic, ctxt, None, instance))
- def terminate_instance(self, ctxt, instance, bdms):
+ def terminate_instance(self, ctxt, instance, bdms, reservations=None):
instance_p = jsonutils.to_primitive(instance)
bdms_p = jsonutils.to_primitive(bdms)
self.cast(ctxt, self.make_msg('terminate_instance',
- instance=instance_p, bdms=bdms_p),
+ instance=instance_p, bdms=bdms_p,
+ reservations=reservations),
topic=_compute_topic(self.topic, ctxt, None, instance),
- version='2.4')
+ version='2.27')
def unpause_instance(self, ctxt, instance):
instance_p = jsonutils.to_primitive(instance)
@@ -615,11 +618,12 @@ class ComputeAPI(nova.openstack.common.rpc.proxy.RpcProxy):
def publish_service_capabilities(self, ctxt):
self.fanout_cast(ctxt, self.make_msg('publish_service_capabilities'))
- def soft_delete_instance(self, ctxt, instance):
+ def soft_delete_instance(self, ctxt, instance, reservations=None):
instance_p = jsonutils.to_primitive(instance)
self.cast(ctxt, self.make_msg('soft_delete_instance',
- instance=instance_p),
- topic=_compute_topic(self.topic, ctxt, None, instance))
+ instance=instance_p, reservations=reservations),
+ topic=_compute_topic(self.topic, ctxt, None, instance),
+ version='2.27')
def restore_instance(self, ctxt, instance):
instance_p = jsonutils.to_primitive(instance)
diff --git a/nova/conductor/api.py b/nova/conductor/api.py
index 0b46c3d2f..a8a6e9f53 100644
--- a/nova/conductor/api.py
+++ b/nova/conductor/api.py
@@ -323,11 +323,23 @@ class LocalAPI(object):
instance,
migration)
- def quota_commit(self, context, reservations):
- return self._manager.quota_commit(context, reservations)
-
- def quota_rollback(self, context, reservations):
- return self._manager.quota_rollback(context, reservations)
+ def quota_commit(self, context, reservations, project_id=None):
+ # FIXME(comstud): bug 1153795: Conductor manager should accept
+ # a project_id kwarg to be able to pass to the quota commit call.
+ if project_id is None:
+ project_id = context.project_id
+ with utils.temporary_mutation(context, project_id=project_id):
+ return self._manager.quota_commit(context,
+ reservations=reservations)
+
+ def quota_rollback(self, context, reservations, project_id=None):
+ # FIXME(comstud): bug 1153795: Conductor manager should accept
+ # a project_id kwarg to be able to pass to the quota rollback call.
+ if project_id is None:
+ project_id = context.project_id
+ with utils.temporary_mutation(context, project_id=project_id):
+ return self._manager.quota_rollback(context,
+ reservations=reservations)
def get_ec2_ids(self, context, instance):
return self._manager.get_ec2_ids(context, instance)
@@ -656,11 +668,21 @@ class API(object):
instance,
migration)
- def quota_commit(self, context, reservations):
- return self.conductor_rpcapi.quota_commit(context, reservations)
-
- def quota_rollback(self, context, reservations):
- return self.conductor_rpcapi.quota_rollback(context, reservations)
+ def quota_commit(self, context, reservations, project_id=None):
+ # FIXME(comstud): bug 1153795: Conductor manager should accept
+ # a project_id kwarg to be able to pass to the quota commit call.
+ if project_id is None:
+ project_id = context.project_id
+ with utils.temporary_mutation(context, project_id=project_id):
+ return self.conductor_rpcapi.quota_commit(context, reservations)
+
+ def quota_rollback(self, context, reservations, project_id=None):
+ # FIXME(comstud): bug 1153795: Conductor manager should accept
+ # a project_id kwarg to be able to pass to the quota rollback call.
+ if project_id is None:
+ project_id = context.project_id
+ with utils.temporary_mutation(context, project_id=project_id):
+ return self.conductor_rpcapi.quota_rollback(context, reservations)
def get_ec2_ids(self, context, instance):
return self.conductor_rpcapi.get_ec2_ids(context, instance)
diff --git a/nova/quota.py b/nova/quota.py
index 3361154dd..2bd36b2a6 100644
--- a/nova/quota.py
+++ b/nova/quota.py
@@ -508,7 +508,7 @@ class NoopQuotaDriver(object):
quotas[resource.name] = -1
return quotas
- def limit_check(self, context, resources, values):
+ def limit_check(self, context, resources, values, project_id=None):
"""Check simple quota limits.
For limits--those quotas for which there is no usage
@@ -528,10 +528,14 @@ class NoopQuotaDriver(object):
:param resources: A dictionary of the registered resources.
:param values: A dictionary of the values to check against the
quota.
+ :param project_id: Specify the project_id if current context
+ is admin and admin wants to impact on
+ common user's tenant.
"""
pass
- def reserve(self, context, resources, deltas, expire=None):
+ def reserve(self, context, resources, deltas, expire=None,
+ project_id=None):
"""Check quotas and reserve resources.
For counting quotas--those quotas for which there is a usage
@@ -561,24 +565,33 @@ class NoopQuotaDriver(object):
default expiration time set by
--default-reservation-expire will be used (this
value will be treated as a number of seconds).
+ :param project_id: Specify the project_id if current context
+ is admin and admin wants to impact on
+ common user's tenant.
"""
return []
- def commit(self, context, reservations):
+ def commit(self, context, reservations, project_id=None):
"""Commit reservations.
:param context: The request context, for access checks.
:param reservations: A list of the reservation UUIDs, as
returned by the reserve() method.
+ :param project_id: Specify the project_id if current context
+ is admin and admin wants to impact on
+ common user's tenant.
"""
pass
- def rollback(self, context, reservations):
+ def rollback(self, context, reservations, project_id=None):
"""Roll back reservations.
:param context: The request context, for access checks.
:param reservations: A list of the reservation UUIDs, as
returned by the reserve() method.
+ :param project_id: Specify the project_id if current context
+ is admin and admin wants to impact on
+ common user's tenant.
"""
pass
diff --git a/nova/tests/compute/test_compute.py b/nova/tests/compute/test_compute.py
index e94d8b788..ec35f2c5a 100644
--- a/nova/tests/compute/test_compute.py
+++ b/nova/tests/compute/test_compute.py
@@ -1828,7 +1828,8 @@ class ComputeTestCase(BaseTestCase):
"""
instance = self._create_fake_instance()
- def fake_delete_instance(context, instance, bdms):
+ def fake_delete_instance(context, instance, bdms,
+ reservations=None):
raise exception.InstanceTerminationFailure(reason='')
self.stubs.Set(self.compute, '_delete_instance',
@@ -1990,6 +1991,59 @@ class ComputeTestCase(BaseTestCase):
self.mox.ReplayAll()
return reservations
+ def test_quotas_succesful_delete(self):
+ instance = jsonutils.to_primitive(self._create_fake_instance())
+ resvs = self._ensure_quota_reservations_committed()
+ self.compute.terminate_instance(self.context, instance,
+ bdms=None, reservations=resvs)
+
+ def test_quotas_failed_delete(self):
+ instance = jsonutils.to_primitive(self._create_fake_instance())
+
+ def fake_shutdown_instance(*args, **kwargs):
+ raise test.TestingException()
+
+ self.stubs.Set(self.compute, '_shutdown_instance',
+ fake_shutdown_instance)
+
+ resvs = self._ensure_quota_reservations_rolledback()
+ self.assertRaises(test.TestingException,
+ self.compute.terminate_instance,
+ self.context, instance,
+ bdms=None, reservations=resvs)
+
+ def test_quotas_succesful_soft_delete(self):
+ instance = jsonutils.to_primitive(self._create_fake_instance(
+ params=dict(task_state=task_states.SOFT_DELETING)))
+ resvs = self._ensure_quota_reservations_committed()
+ self.compute.soft_delete_instance(self.context, instance,
+ reservations=resvs)
+
+ def test_quotas_failed_soft_delete(self):
+ instance = jsonutils.to_primitive(self._create_fake_instance(
+ params=dict(task_state=task_states.SOFT_DELETING)))
+
+ def fake_soft_delete(*args, **kwargs):
+ raise test.TestingException()
+
+ self.stubs.Set(self.compute.driver, 'soft_delete',
+ fake_soft_delete)
+
+ resvs = self._ensure_quota_reservations_rolledback()
+ self.assertRaises(test.TestingException,
+ self.compute.soft_delete_instance,
+ self.context, instance,
+ reservations=resvs)
+
+ def test_quotas_destroy_of_soft_deleted_instance(self):
+ instance = jsonutils.to_primitive(self._create_fake_instance(
+ params=dict(vm_state=vm_states.SOFT_DELETED)))
+ # Termination should be successful, but quota reservations
+ # rolled back because the instance was in SOFT_DELETED state.
+ resvs = self._ensure_quota_reservations_rolledback()
+ self.compute.terminate_instance(self.context, instance,
+ bdms=None, reservations=resvs)
+
def test_finish_resize(self):
# Contrived test to ensure finish_resize doesn't raise anything.
@@ -4302,33 +4356,6 @@ class ComputeAPITestCase(BaseTestCase):
self.assertEqual(instance['task_state'], None)
self.assertTrue(instance['deleted'])
- def test_repeated_delete_quota(self):
- in_use = {'instances': 1}
-
- def fake_reserve(context, expire=None, project_id=None, **deltas):
- return dict(deltas.iteritems())
-
- self.stubs.Set(QUOTAS, 'reserve', fake_reserve)
-
- def fake_commit(context, deltas, project_id=None):
- for k, v in deltas.iteritems():
- in_use[k] = in_use.get(k, 0) + v
-
- self.stubs.Set(QUOTAS, 'commit', fake_commit)
-
- instance, instance_uuid = self._run_instance(params={
- 'host': CONF.host})
-
- self.compute_api.delete(self.context, instance)
- self.compute_api.delete(self.context, instance)
-
- instance = db.instance_get_by_uuid(self.context, instance_uuid)
- self.assertEqual(instance['task_state'], task_states.DELETING)
-
- self.assertEquals(in_use['instances'], 0)
-
- db.instance_destroy(self.context, instance['uuid'])
-
def test_delete_fast_if_host_not_set(self):
instance = self._create_fake_instance({'host': None})
self.compute_api.delete(self.context, instance)
@@ -4363,9 +4390,8 @@ class ComputeAPITestCase(BaseTestCase):
instance, instance_uuid = self._run_instance(params={
'host': CONF.host})
+ # Make sure this is not called on the API side.
self.mox.StubOutWithMock(nova.quota.QUOTAS, 'commit')
- nova.quota.QUOTAS.commit(mox.IgnoreArg(), mox.IgnoreArg(),
- project_id=mox.IgnoreArg())
self.mox.ReplayAll()
self.compute_api.soft_delete(self.context, instance)
@@ -4521,9 +4547,6 @@ class ComputeAPITestCase(BaseTestCase):
# Ensure quotas are committed
self.mox.StubOutWithMock(nova.quota.QUOTAS, 'commit')
nova.quota.QUOTAS.commit(mox.IgnoreArg(), mox.IgnoreArg())
- if self.__class__.__name__ == 'CellsComputeAPITestCase':
- # Called a 2nd time (for the child cell) when testing cells
- nova.quota.QUOTAS.commit(mox.IgnoreArg(), mox.IgnoreArg())
self.mox.ReplayAll()
self.compute_api.restore(self.context, instance)
diff --git a/nova/tests/compute/test_compute_cells.py b/nova/tests/compute/test_compute_cells.py
index 8ba35e033..190d75a9d 100644
--- a/nova/tests/compute/test_compute_cells.py
+++ b/nova/tests/compute/test_compute_cells.py
@@ -18,10 +18,12 @@ Tests For Compute w/ Cells
"""
import functools
+from nova.compute import api as compute_api
from nova.compute import cells_api as compute_cells_api
from nova import db
from nova.openstack.common import jsonutils
from nova.openstack.common import log as logging
+from nova import quota
from nova.tests.compute import test_compute
@@ -40,7 +42,16 @@ def stub_call_to_cells(context, instance, method, *args, **kwargs):
dict(vm_state=instance['vm_state'],
task_state=instance['task_state']))
- return fn(context, instance, *args, **kwargs)
+ # Use NoopQuotaDriver in child cells.
+ saved_quotas = quota.QUOTAS
+ quota.QUOTAS = quota.QuotaEngine(
+ quota_driver_class=quota.NoopQuotaDriver())
+ compute_api.QUOTAS = quota.QUOTAS
+ try:
+ return fn(context, instance, *args, **kwargs)
+ finally:
+ quota.QUOTAS = saved_quotas
+ compute_api.QUOTAS = saved_quotas
def stub_cast_to_cells(context, instance, method, *args, **kwargs):
@@ -52,7 +63,17 @@ def stub_cast_to_cells(context, instance, method, *args, **kwargs):
db.instance_update(context, instance['uuid'],
dict(vm_state=instance['vm_state'],
task_state=instance['task_state']))
- fn(context, instance, *args, **kwargs)
+
+ # Use NoopQuotaDriver in child cells.
+ saved_quotas = quota.QUOTAS
+ quota.QUOTAS = quota.QuotaEngine(
+ quota_driver_class=quota.NoopQuotaDriver())
+ compute_api.QUOTAS = quota.QUOTAS
+ try:
+ fn(context, instance, *args, **kwargs)
+ finally:
+ quota.QUOTAS = saved_quotas
+ compute_api.QUOTAS = saved_quotas
def deploy_stubs(stubs, api, original_instance=None):
diff --git a/nova/tests/compute/test_rpcapi.py b/nova/tests/compute/test_rpcapi.py
index 6c40a95e2..a089e9dc6 100644
--- a/nova/tests/compute/test_rpcapi.py
+++ b/nova/tests/compute/test_rpcapi.py
@@ -221,7 +221,9 @@ class ComputeRpcAPITestCase(test.TestCase):
def test_soft_delete_instance(self):
self._test_compute_api('soft_delete_instance', 'cast',
- instance=self.fake_instance)
+ instance=self.fake_instance,
+ reservations=['uuid1', 'uuid2'],
+ version='2.27')
def test_restore_instance(self):
self._test_compute_api('restore_instance', 'cast',
@@ -368,7 +370,8 @@ class ComputeRpcAPITestCase(test.TestCase):
def test_terminate_instance(self):
self._test_compute_api('terminate_instance', 'cast',
instance=self.fake_instance, bdms=[],
- version='2.4')
+ reservations=['uuid1', 'uuid2'],
+ version='2.27')
def test_unpause_instance(self):
self._test_compute_api('unpause_instance', 'cast',
diff --git a/nova/tests/conductor/test_conductor.py b/nova/tests/conductor/test_conductor.py
index 72c04e427..00f7faac5 100644
--- a/nova/tests/conductor/test_conductor.py
+++ b/nova/tests/conductor/test_conductor.py
@@ -990,6 +990,40 @@ class ConductorAPITestCase(_BaseTestCase, test.TestCase):
self.conductor.security_groups_trigger_handler(self.context,
'event', 'arg')
+ def test_quota_commit_with_project_id(self):
+ diff_proj_id = 'diff_fake_proj_id'
+ self.assertNotEqual(self.context.project_id, diff_proj_id)
+ call_info = {}
+
+ def mgr_quota_commit(ctxt, reservations):
+ call_info['resvs'] = reservations
+ call_info['project_id'] = ctxt.project_id
+
+ self.stubs.Set(self.conductor_manager, 'quota_commit',
+ mgr_quota_commit)
+
+ self.conductor.quota_commit(self.context, 'fake_resvs',
+ project_id=diff_proj_id)
+ self.assertEqual(diff_proj_id, call_info['project_id'])
+ self.assertEqual('fake_resvs', call_info['resvs'])
+
+ def test_quota_rollback_with_project_id(self):
+ diff_proj_id = 'diff_fake_proj_id'
+ self.assertNotEqual(self.context.project_id, diff_proj_id)
+ call_info = {}
+
+ def mgr_quota_rollback(ctxt, reservations):
+ call_info['resvs'] = reservations
+ call_info['project_id'] = ctxt.project_id
+
+ self.stubs.Set(self.conductor_manager, 'quota_rollback',
+ mgr_quota_rollback)
+
+ self.conductor.quota_rollback(self.context, 'fake_resvs',
+ project_id=diff_proj_id)
+ self.assertEqual(diff_proj_id, call_info['project_id'])
+ self.assertEqual('fake_resvs', call_info['resvs'])
+
class ConductorLocalAPITestCase(ConductorAPITestCase):
"""Conductor LocalAPI Tests."""