From 28e6abf200d50d2d1c4a043c37cd3b3318d8933d Mon Sep 17 00:00:00 2001 From: Zhiteng Huang Date: Tue, 18 Sep 2012 08:50:17 +0800 Subject: Update quota when deleting volume that failed to be scheduled If one volume was failed to get scheduled, removing such volume should also clean up reservation. Also when create_volume is ready to send to scheduler, reservation should be committed no matter backend can successfully create that volume or not since deleting volume call will do a minus reservation even on volume with 'error' status. This change updates RPC API to version 2.2 Fix bug 1052052 Change-Id: Ia632a0e49318d534f0acbd3df5c9f6bb86eefa2a --- nova/volume/api.py | 24 ++++++++++++++++++++---- nova/volume/manager.py | 6 ------ 2 files changed, 20 insertions(+), 10 deletions(-) (limited to 'nova/volume') diff --git a/nova/volume/api.py b/nova/volume/api.py index 0342c0ac2..6beb771f3 100644 --- a/nova/volume/api.py +++ b/nova/volume/api.py @@ -109,6 +109,8 @@ class API(base.Base): msg = (_("Volume size '%s' must be an integer and greater than 0") % size) raise exception.InvalidInput(reason=msg) + + reservations = None try: reservations = QUOTAS.reserve(context, volumes=1, gigabytes=size) except exception.OverQuota as e: @@ -165,12 +167,16 @@ class API(base.Base): 'metadata': metadata, } volume = self.db.volume_create(context, options) + + if reservations: + QUOTAS.commit(context, reservations) + self._cast_create_volume(context, volume['id'], - snapshot_id, image_id, reservations) + snapshot_id, image_id) return volume def _cast_create_volume(self, context, volume_id, - snapshot_id, image_id, reservations): + snapshot_id, image_id): # NOTE(Rongze Zhu): It is a simple solution for bug 1008866 # If snapshot_id is set, make the call create volume directly to @@ -189,19 +195,29 @@ class API(base.Base): {"method": "create_volume", "args": {"volume_id": volume_id, "snapshot_id": snapshot_id, - "reservations": reservations, "image_id": image_id}}) else: self.scheduler_rpcapi.create_volume( - context, volume_id, snapshot_id, image_id, reservations) + context, volume_id, snapshot_id, image_id) @wrap_check_policy def delete(self, context, volume, force=False): volume_id = volume['id'] if not volume['host']: # NOTE(vish): scheduling failed, so delete it + # Note(zhiteng): update volume quota reservation + try: + reservations = QUOTAS.reserve(context, volumes=-1, + gigabytes=-volume['size']) + except Exception: + reservations = None + LOG.exception(_("Failed to update quota for deleting volume.")) + self.db.volume_destroy(context, volume_id) + + if reservations: + QUOTAS.commit(context, reservations) return if not force and volume['status'] not in ["available", "error"]: msg = _("Volume status must be available or error") diff --git a/nova/volume/manager.py b/nova/volume/manager.py index e25f3daf1..fafdcd5be 100644 --- a/nova/volume/manager.py +++ b/nova/volume/manager.py @@ -160,14 +160,8 @@ class VolumeManager(manager.SchedulerDependentManager): model_update = self.driver.create_export(context, volume_ref) if model_update: self.db.volume_update(context, volume_ref['id'], model_update) - - # Commit the reservation - if reservations: - QUOTAS.commit(context, reservations) except Exception: with excutils.save_and_reraise_exception(): - if reservations: - QUOTAS.rollback(context, reservations) self.db.volume_update(context, volume_ref['id'], {'status': 'error'}) -- cgit