summaryrefslogtreecommitdiffstats
path: root/nova/volume/api.py
diff options
context:
space:
mode:
authorBrian Waldon <bcwaldon@gmail.com>2012-02-03 13:29:57 -0800
committerBrian Waldon <bcwaldon@gmail.com>2012-02-14 12:20:20 -0800
commitafd5b22368076fc640563b7df6fb71dab57fe627 (patch)
tree9656e48da966bcb95106d99b3a1f43725870a627 /nova/volume/api.py
parentc9ca372b0b9fe887dd3ac6bdb02514b5495a1917 (diff)
downloadnova-afd5b22368076fc640563b7df6fb71dab57fe627.tar.gz
nova-afd5b22368076fc640563b7df6fb71dab57fe627.tar.xz
nova-afd5b22368076fc640563b7df6fb71dab57fe627.zip
Replace ApiError with new exceptions
* Convert ApiError to EC2APIError * Add new exceptions to replace ApiError where it didn't belong * Fixes bug 926250 Change-Id: Ia711440ee0313faf8ea8c87e2c0a2f5b39cc55a2
Diffstat (limited to 'nova/volume/api.py')
-rw-r--r--nova/volume/api.py25
1 files changed, 15 insertions, 10 deletions
diff --git a/nova/volume/api.py b/nova/volume/api.py
index b5198137f..8646f65e9 100644
--- a/nova/volume/api.py
+++ b/nova/volume/api.py
@@ -71,8 +71,8 @@ class API(base.Base):
check_policy(context, 'create')
if snapshot is not None:
if snapshot['status'] != "available":
- raise exception.ApiError(
- _("Snapshot status must be available"))
+ msg = _("status must be available")
+ raise exception.InvalidSnapshot(reason=msg)
if not size:
size = snapshot['volume_size']
@@ -84,8 +84,7 @@ class API(base.Base):
pid = context.project_id
LOG.warn(_("Quota exceeded for %(pid)s, tried to create"
" %(size)sG volume") % locals())
- raise exception.QuotaError(_("Volume quota exceeded. You cannot "
- "create a volume of size %sG") % size)
+ raise exception.QuotaError(code="VolumeSizeTooLarge")
if availability_zone is None:
availability_zone = FLAGS.storage_availability_zone
@@ -131,7 +130,8 @@ class API(base.Base):
def delete(self, context, volume):
volume_id = volume['id']
if volume['status'] != "available":
- raise exception.ApiError(_("Volume status must be available"))
+ msg = _("Volume status must be available")
+ raise exception.InvalidVolume(reason=msg)
now = utils.utcnow()
self.db.volume_update(context, volume_id, {'status': 'deleting',
'terminated_at': now})
@@ -207,15 +207,18 @@ class API(base.Base):
def check_attach(self, context, volume):
# TODO(vish): abstract status checking?
if volume['status'] != "available":
- raise exception.ApiError(_("Volume status must be available"))
+ msg = _("status must be available")
+ raise exception.InvalidVolume(reason=msg)
if volume['attach_status'] == "attached":
- raise exception.ApiError(_("Volume is already attached"))
+ msg = _("already attached")
+ raise exception.InvalidVolume(reason=msg)
@wrap_check_policy
def check_detach(self, context, volume):
# TODO(vish): abstract status checking?
if volume['status'] == "available":
- raise exception.ApiError(_("Volume is already detached"))
+ msg = _("already detached")
+ raise exception.InvalidVolume(reason=msg)
def remove_from_compute(self, context, volume, instance_id, host):
"""Remove volume from specified compute host."""
@@ -266,7 +269,8 @@ class API(base.Base):
check_policy(context, 'create_snapshot', volume)
if ((not force) and (volume['status'] != "available")):
- raise exception.ApiError(_("Volume status must be available"))
+ msg = _("must be available")
+ raise exception.InvalidVolume(reason=msg)
options = {
'volume_id': volume['id'],
@@ -298,7 +302,8 @@ class API(base.Base):
@wrap_check_policy
def delete_snapshot(self, context, snapshot):
if snapshot['status'] != "available":
- raise exception.ApiError(_("Snapshot status must be available"))
+ msg = _("must be available")
+ raise exception.InvalidVolume(reason=msg)
self.db.snapshot_update(context, snapshot['id'],
{'status': 'deleting'})
rpc.cast(context,