summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorEric Day <eday@oddments.org>2010-12-29 16:29:15 -0800
committerEric Day <eday@oddments.org>2010-12-29 16:29:15 -0800
commit64078137ce12ee52fff710f5a262d57b4ace2809 (patch)
tree58c5603f43521a447b7731f193ccc7a107c9742a
parent6debe20395d6ab476bfd2a237df8c2b08050e0e6 (diff)
downloadnova-64078137ce12ee52fff710f5a262d57b4ace2809.tar.gz
nova-64078137ce12ee52fff710f5a262d57b4ace2809.tar.xz
nova-64078137ce12ee52fff710f5a262d57b4ace2809.zip
Moved ec2 volume operations into a volume API interface for other components to use. Added attach/detach as compute.api methods, since they operate in the context of instances (and to avoid a dependency loop).
-rw-r--r--nova/api/ec2/cloud.py141
-rw-r--r--nova/compute/api.py34
-rw-r--r--nova/volume/__init__.py91
3 files changed, 149 insertions, 117 deletions
diff --git a/nova/api/ec2/cloud.py b/nova/api/ec2/cloud.py
index 8c687f173..74c73e0dd 100644
--- a/nova/api/ec2/cloud.py
+++ b/nova/api/ec2/cloud.py
@@ -38,12 +38,12 @@ from nova import flags
from nova import quota
from nova import rpc
from nova import utils
+from nova import volume
from nova.compute import api as compute_api
from nova.compute import instance_types
FLAGS = flags.FLAGS
-flags.DECLARE('storage_availability_zone', 'nova.volume.manager')
InvalidInputException = exception.InvalidInputException
@@ -89,8 +89,10 @@ class CloudController(object):
def __init__(self):
self.network_manager = utils.import_object(FLAGS.network_manager)
self.image_service = utils.import_object(FLAGS.image_service)
+ self.volume_api = volume.API()
self.compute_api = compute_api.ComputeAPI(self.network_manager,
- self.image_service)
+ self.image_service,
+ self.volume_api)
self.setup()
def __str__(self):
@@ -451,15 +453,10 @@ class CloudController(object):
"output": base64.b64encode(output)}
def describe_volumes(self, context, volume_id=None, **kwargs):
- if context.user.is_admin():
- volumes = db.volume_get_all(context)
- else:
- volumes = db.volume_get_all_by_project(context, context.project_id)
-
+ volumes = self.volume_api.get(context)
# NOTE(vish): volume_id is an optional list of volume ids to filter by.
volumes = [self._format_volume(context, v) for v in volumes
if volume_id is None or v['id'] in volume_id]
-
return {'volumeSet': volumes}
def _format_volume(self, context, volume):
@@ -498,88 +495,47 @@ class CloudController(object):
return v
def create_volume(self, context, size, **kwargs):
- # check quota
- if quota.allowed_volumes(context, 1, size) < 1:
- logging.warn("Quota exceeeded for %s, tried to create %sG volume",
- context.project_id, size)
- raise quota.QuotaError("Volume quota exceeded. You cannot "
- "create a volume of size %s" % size)
- vol = {}
- vol['size'] = size
- vol['user_id'] = context.user.id
- vol['project_id'] = context.project_id
- vol['availability_zone'] = FLAGS.storage_availability_zone
- vol['status'] = "creating"
- vol['attach_status'] = "detached"
- vol['display_name'] = kwargs.get('display_name')
- vol['display_description'] = kwargs.get('display_description')
- volume_ref = db.volume_create(context, vol)
-
- rpc.cast(context,
- FLAGS.scheduler_topic,
- {"method": "create_volume",
- "args": {"topic": FLAGS.volume_topic,
- "volume_id": volume_ref['id']}})
-
+ volume = self.volume_api.create(context, size,
+ kwargs.get('display_name'),
+ kwargs.get('display_description'))
# TODO(vish): Instance should be None at db layer instead of
# trying to lazy load, but for now we turn it into
# a dict to avoid an error.
return {'volumeSet': [self._format_volume(context, dict(volume_ref))]}
+ def delete_volume(self, context, volume_id, **kwargs):
+ self.volume_api.delete(context, volume_id)
+ return True
+
+ def update_volume(self, context, volume_id, **kwargs):
+ updatable_fields = ['display_name', 'display_description']
+ changes = {}
+ for field in updatable_fields:
+ if field in kwargs:
+ changes[field] = kwargs[field]
+ if changes:
+ self.volume_api.update(context, volume_id, kwargs)
+ return True
+
def attach_volume(self, context, volume_id, instance_id, device, **kwargs):
- volume_ref = db.volume_get(context, volume_id)
- if not re.match("^/dev/[a-z]d[a-z]+$", device):
- raise exception.ApiError(_("Invalid device specified: %s. "
- "Example device: /dev/vdb") % device)
- # TODO(vish): abstract status checking?
- if volume_ref['status'] != "available":
- raise exception.ApiError(_("Volume status must be available"))
- if volume_ref['attach_status'] == "attached":
- raise exception.ApiError(_("Volume is already attached"))
- instance_id = ec2_id_to_id(instance_id)
- instance_ref = self.compute_api.get_instance(context, instance_id)
- host = instance_ref['host']
- rpc.cast(context,
- db.queue_get_for(context, FLAGS.compute_topic, host),
- {"method": "attach_volume",
- "args": {"volume_id": volume_ref['id'],
- "instance_id": instance_ref['id'],
- "mountpoint": device}})
- return {'attachTime': volume_ref['attach_time'],
- 'device': volume_ref['mountpoint'],
- 'instanceId': instance_ref['id'],
+ self.compute_api.attach_volume(context, instance_id, volume_id, device)
+ volume = self.volume_api.get(context, volume_id)
+ return {'attachTime': volume['attach_time'],
+ 'device': volume['mountpoint'],
+ 'instanceId': instance_id,
'requestId': context.request_id,
- 'status': volume_ref['attach_status'],
- 'volumeId': volume_ref['id']}
+ 'status': volume['attach_status'],
+ 'volumeId': volume_id}
def detach_volume(self, context, volume_id, **kwargs):
- volume_ref = db.volume_get(context, volume_id)
- instance_ref = db.volume_get_instance(context.elevated(),
- volume_ref['id'])
- if not instance_ref:
- raise exception.ApiError(_("Volume isn't attached to anything!"))
- # TODO(vish): abstract status checking?
- if volume_ref['status'] == "available":
- raise exception.ApiError(_("Volume is already detached"))
- try:
- host = instance_ref['host']
- rpc.cast(context,
- db.queue_get_for(context, FLAGS.compute_topic, host),
- {"method": "detach_volume",
- "args": {"instance_id": instance_ref['id'],
- "volume_id": volume_ref['id']}})
- except exception.NotFound:
- # If the instance doesn't exist anymore,
- # then we need to call detach blind
- db.volume_detached(context)
- instance_id = instance_ref['id']
- ec2_id = id_to_ec2_id(instance_id)
- return {'attachTime': volume_ref['attach_time'],
- 'device': volume_ref['mountpoint'],
- 'instanceId': instance_id,
+ volume = self.volume_api.get(context, volume_id)
+ instance = self.compute_api.detach_volume(context, volume_id)
+ return {'attachTime': volume['attach_time'],
+ 'device': volume['mountpoint'],
+ 'instanceId': id_to_ec2_id(instance['id']),
'requestId': context.request_id,
- 'status': volume_ref['attach_status'],
- 'volumeId': volume_ref['id']}
+ 'status': volume['attach_status'],
+ 'volumeId': volume_id}
def _convert_to_set(self, lst, label):
if lst == None or lst == []:
@@ -588,16 +544,6 @@ class CloudController(object):
lst = [lst]
return [{label: x} for x in lst]
- def update_volume(self, context, volume_id, **kwargs):
- updatable_fields = ['display_name', 'display_description']
- changes = {}
- for field in updatable_fields:
- if field in kwargs:
- changes[field] = kwargs[field]
- if changes:
- db.volume_update(context, volume_id, kwargs)
- return True
-
def describe_instances(self, context, **kwargs):
return self._format_describe_instances(context)
@@ -805,21 +751,6 @@ class CloudController(object):
db.instance_update(context, inst['id'], kwargs)
return True
- def delete_volume(self, context, volume_id, **kwargs):
- # TODO: return error if not authorized
- volume_ref = db.volume_get(context, volume_id)
- if volume_ref['status'] != "available":
- raise exception.ApiError(_("Volume status must be available"))
- now = datetime.datetime.utcnow()
- db.volume_update(context, volume_ref['id'], {'status': 'deleting',
- 'terminated_at': now})
- host = volume_ref['host']
- rpc.cast(context,
- db.queue_get_for(context, FLAGS.volume_topic, host),
- {"method": "delete_volume",
- "args": {"volume_id": volume_ref['id']}})
- return True
-
def describe_images(self, context, image_id=None, **kwargs):
# Note: image_id is a list!
images = self.image_service.index(context)
diff --git a/nova/compute/api.py b/nova/compute/api.py
index 28af434e3..870fcdbe4 100644
--- a/nova/compute/api.py
+++ b/nova/compute/api.py
@@ -30,6 +30,7 @@ from nova import flags
from nova import quota
from nova import rpc
from nova import utils
+from nova import volume
from nova.compute import instance_types
from nova.db import base
@@ -44,13 +45,17 @@ def generate_default_hostname(instance_id):
class ComputeAPI(base.Base):
"""API for interacting with the compute manager."""
- def __init__(self, network_manager=None, image_service=None, **kwargs):
+ def __init__(self, network_manager=None, image_service=None,
+ volume_api=None, **kwargs):
if not network_manager:
network_manager = utils.import_object(FLAGS.network_manager)
self.network_manager = network_manager
if not image_service:
image_service = utils.import_object(FLAGS.image_service)
self.image_service = image_service
+ if not volume_api:
+ volume_api = volume.API()
+ self.volume_api = volume_api
super(ComputeAPI, self).__init__(**kwargs)
def get_network_topic(self, context, instance_id):
@@ -298,3 +303,30 @@ class ComputeAPI(base.Base):
self.db.queue_get_for(context, FLAGS.compute_topic, host),
{"method": "unrescue_instance",
"args": {"instance_id": instance['id']}})
+
+ def attach_volume(self, context, instance_id, volume_id, device):
+ if not re.match("^/dev/[a-z]d[a-z]+$", device):
+ raise exception.ApiError(_("Invalid device specified: %s. "
+ "Example device: /dev/vdb") % device)
+ self.volume_api.check_attach(context, volume_id)
+ instance = self.get_instance(context, instance_id)
+ host = instance['host']
+ rpc.cast(context,
+ self.db.queue_get_for(context, FLAGS.compute_topic, host),
+ {"method": "attach_volume",
+ "args": {"volume_id": volume_id,
+ "instance_id": instance_id,
+ "mountpoint": device}})
+
+ def detach_volume(self, context, volume_id):
+ instance = self.db.volume_get_instance(context.elevated(), volume_id)
+ if not instance:
+ raise exception.ApiError(_("Volume isn't attached to anything!"))
+ self.volume_api.check_detach(context, volume_id)
+ host = instance['host']
+ rpc.cast(context,
+ self.db.queue_get_for(context, FLAGS.compute_topic, host),
+ {"method": "detach_volume",
+ "args": {"instance_id": instance['id'],
+ "volume_id": volume_id}})
+ return instance
diff --git a/nova/volume/__init__.py b/nova/volume/__init__.py
index d6e944fc0..48ecdbe68 100644
--- a/nova/volume/__init__.py
+++ b/nova/volume/__init__.py
@@ -17,15 +17,84 @@
# under the License.
"""
-:mod:`nova.volume` -- Nova Block Storage
-=====================================================
-
-.. automodule:: nova.volume
- :platform: Unix
-.. moduleauthor:: Jesse Andrews <jesse@ansolabs.com>
-.. moduleauthor:: Devin Carlen <devin.carlen@gmail.com>
-.. moduleauthor:: Vishvananda Ishaya <vishvananda@yahoo.com>
-.. moduleauthor:: Joshua McKenty <joshua@cognition.ca>
-.. moduleauthor:: Manish Singh <yosh@gimp.org>
-.. moduleauthor:: Andy Smith <andy@anarkystic.com>
+Handles all requests relating to volumes.
"""
+
+import datetime
+import logging
+
+from nova import db
+from nova import exception
+from nova import flags
+from nova import quota
+from nova import rpc
+from nova.db import base
+
+FLAGS = flags.FLAGS
+flags.DECLARE('storage_availability_zone', 'nova.volume.manager')
+
+
+class API(base.Base):
+ """API for interacting with the volume manager."""
+
+ def create(self, context, size, name, description):
+ if quota.allowed_volumes(context, 1, size) < 1:
+ logging.warn("Quota exceeeded for %s, tried to create %sG volume",
+ context.project_id, size)
+ raise quota.QuotaError("Volume quota exceeded. You cannot "
+ "create a volume of size %s" % size)
+
+ options = {
+ 'size': size,
+ 'user_id': context.user.id,
+ 'project_id': context.project_id,
+ 'availability_zone': FLAGS.storage_availability_zone,
+ 'status': "creating",
+ 'attach_status': "detached",
+ 'display_name': name,
+ 'display_description': description}
+
+ volume = self.db.volume_create(context, options)
+ rpc.cast(context,
+ FLAGS.scheduler_topic,
+ {"method": "create_volume",
+ "args": {"topic": FLAGS.volume_topic,
+ "volume_id": volume['id']}})
+ return volume
+
+ def delete(self, context, volume_id):
+ volume = self.db.volume_get(context, volume_id)
+ if volume['status'] != "available":
+ raise exception.ApiError(_("Volume status must be available"))
+ now = datetime.datetime.utcnow()
+ self.db.volume_update(context, volume_id, {'status': 'deleting',
+ 'terminated_at': now})
+ host = volume['host']
+ rpc.cast(context,
+ self.db.queue_get_for(context, FLAGS.volume_topic, host),
+ {"method": "delete_volume",
+ "args": {"volume_id": volume_id}})
+
+ def update(self, context, volume_id, fields):
+ self.db.volume_update(context, volume_id, fields)
+
+ def get(self, context, volume_id=None):
+ if volume_id is not None:
+ return self.db.volume_get(context, volume_id)
+ if context.user.is_admin():
+ return self.db.volume_get_all(context)
+ return self.db.volume_get_all_by_project(context, context.project_id)
+
+ def check_attach(self, context, volume_id):
+ volume = self.db.volume_get(context, volume_id)
+ # TODO(vish): abstract status checking?
+ if volume['status'] != "available":
+ raise exception.ApiError(_("Volume status must be available"))
+ if volume['attach_status'] == "attached":
+ raise exception.ApiError(_("Volume is already attached"))
+
+ def check_detach(self, context, volume_id):
+ volume = self.db.volume_get(context, volume_id)
+ # TODO(vish): abstract status checking?
+ if volume['status'] == "available":
+ raise exception.ApiError(_("Volume is already detached"))