summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorVishvananda Ishaya <vishvananda@yahoo.com>2010-08-25 13:14:02 -0700
committerVishvananda Ishaya <vishvananda@yahoo.com>2010-08-25 13:14:02 -0700
commit199b9b828d75ec6fa08481585aa5df462497c50f (patch)
tree7bf9a626cde91411a3ade6fc6543adb1459258d9
parent548ae499c29341d58ad18ed5262f965ad0b5b0a9 (diff)
parent9fa4543e9f6c6c5bb0954954649b7c691e462e3c (diff)
downloadnova-199b9b828d75ec6fa08481585aa5df462497c50f.tar.gz
nova-199b9b828d75ec6fa08481585aa5df462497c50f.tar.xz
nova-199b9b828d75ec6fa08481585aa5df462497c50f.zip
merged jesse
-rw-r--r--nova/db/api.py11
-rw-r--r--nova/db/sqlalchemy/api.py12
-rw-r--r--nova/endpoint/cloud.py55
-rw-r--r--nova/volume/service.py54
4 files changed, 82 insertions, 50 deletions
diff --git a/nova/db/api.py b/nova/db/api.py
index e4d79d16f..edc3b7bdc 100644
--- a/nova/db/api.py
+++ b/nova/db/api.py
@@ -195,7 +195,6 @@ def instance_get_by_name(context, name):
return _impl.instance_get_by_project(context, name)
-
def instance_get_by_project(context, project_id):
"""Get all instance belonging to a project."""
return _impl.instance_get_by_project(context, project_id)
@@ -356,6 +355,16 @@ def volume_get(context, volume_id):
return _impl.volume_get(context, volume_id)
+def volume_get_all(context):
+ """Get all volumes."""
+ return _impl.volume_get_all(context)
+
+
+def volume_get_by_project(context, project_id):
+ """Get all volumes belonging to a project."""
+ return _impl.volume_get_by_project(context, project_id)
+
+
def volume_get_shelf_and_blade(context, volume_id):
"""Get the shelf and blade allocated to the volume."""
return _impl.volume_get_shelf_and_blade(context, volume_id)
diff --git a/nova/db/sqlalchemy/api.py b/nova/db/sqlalchemy/api.py
index 2c5434b8f..2ce54a1d7 100644
--- a/nova/db/sqlalchemy/api.py
+++ b/nova/db/sqlalchemy/api.py
@@ -441,6 +441,18 @@ def volume_get(context, volume_id):
return models.Volume.find(volume_id)
+def volume_get_all(context):
+ return models.Volume.all()
+
+
+def volume_get_by_project(context, project_id):
+ session = models.NovaBase.get_session()
+ query = session.query(models.Volume)
+ results = query.filter_by(project_id=project_id).all()
+ session.commit()
+ return results
+
+
def volume_get_shelf_and_blade(context, volume_id):
volume_ref = volume_get(context, volume_id)
export_device = volume_ref.export_device
diff --git a/nova/endpoint/cloud.py b/nova/endpoint/cloud.py
index 97d978ccd..64a705e6d 100644
--- a/nova/endpoint/cloud.py
+++ b/nova/endpoint/cloud.py
@@ -256,27 +256,29 @@ class CloudController(object):
@rbac.allow('projectmanager', 'sysadmin')
def describe_volumes(self, context, **kwargs):
- volumes = []
- for volume in self.volumes:
- if context.user.is_admin() or volume['project_id'] == context.project.id:
- v = self.format_volume(context, volume)
- volumes.append(v)
- return defer.succeed({'volumeSet': volumes})
-
- def format_volume(self, context, volume):
+ if context.user.is_admin():
+ volumes = db.volume_get_all(context)
+ else:
+ volumes = db.volume_get_by_project(context, context.project.id)
+
+ volumes = [self._format_volume(context, v) for v in volumes]
+
+ return {'volumeSet': volumes}
+
+ def _format_volume(self, context, volume):
v = {}
- v['volumeId'] = volume['volume_id']
+ v['volumeId'] = volume['id']
v['status'] = volume['status']
v['size'] = volume['size']
v['availabilityZone'] = volume['availability_zone']
- v['createTime'] = volume['create_time']
+ # v['createTime'] = volume['create_time']
if context.user.is_admin():
v['status'] = '%s (%s, %s, %s, %s)' % (
- volume.get('status', None),
- volume.get('user_id', None),
- volume.get('node_name', None),
- volume.get('instance_id', ''),
- volume.get('mountpoint', ''))
+ volume['status'],
+ volume['user_id'],
+ 'node_name',
+ volume['instance_id'],
+ volume['mountpoint'])
if volume['attach_status'] == 'attached':
v['attachmentSet'] = [{'attachTime': volume['attach_time'],
'deleteOnTermination': volume['delete_on_termination'],
@@ -291,14 +293,20 @@ class CloudController(object):
@rbac.allow('projectmanager', 'sysadmin')
@defer.inlineCallbacks
def create_volume(self, context, size, **kwargs):
- # TODO(vish): refactor this to create the volume object here and tell service to create it
- volume_id = yield rpc.call(FLAGS.volume_topic, {"method": "create_volume",
- "args": {"size": size,
- "user_id": context.user.id,
- "project_id": context.project.id}})
- # NOTE(vish): rpc returned value is in the result key in the dictionary
+ vol = {}
+ vol['size'] = size
+ vol['user_id'] = context.user.id
+ vol['project_id'] = context.project.id
+ vol['availability_zone'] = FLAGS.storage_availability_zone
+ vol['status'] = "creating"
+ vol['attach_status'] = "detached"
+ volume_id = db.volume_create(context, vol)
+
+ yield rpc.cast(FLAGS.volume_topic, {"method": "create_volume",
+ "args": {"volume_id": volume_id}})
+
volume = db.volume_get(context, volume_id)
- defer.returnValue({'volumeSet': [self.format_volume(context, volume)]})
+ defer.returnValue({'volumeSet': [self._format_volume(context, volume)]})
def _get_address(self, context, public_ip):
# FIXME(vish) this should move into network.py
@@ -334,8 +342,7 @@ class CloudController(object):
@rbac.allow('projectmanager', 'sysadmin')
def detach_volume(self, context, volume_id, **kwargs):
volume = db.volume_get(context, volume_id)
- instance_id = volume.get('instance_id', None)
- if not instance_id:
+ if volume['instance_id'] is None:
raise exception.Error("Volume isn't attached to anything!")
if volume['status'] == "available":
raise exception.Error("Volume is already detached")
diff --git a/nova/volume/service.py b/nova/volume/service.py
index 37781252a..7e32f2d8d 100644
--- a/nova/volume/service.py
+++ b/nova/volume/service.py
@@ -65,34 +65,40 @@ class VolumeService(service.Service):
self._exec_init_volumes()
@defer.inlineCallbacks
- @validate.rangetest(size=(0, 1000))
- def create_volume(self, size, user_id, project_id, context=None):
+ # @validate.rangetest(size=(0, 1000))
+ def create_volume(self, volume_id, context=None):
"""
Creates an exported volume (fake or real),
restarts exports to make it available.
Volume at this point has size, owner, and zone.
"""
- logging.debug("Creating volume of size: %s" % (size))
-
- vol = {}
- vol['node_name'] = FLAGS.node_name
- vol['size'] = size
- vol['user_id'] = user_id
- vol['project_id'] = project_id
- vol['availability_zone'] = FLAGS.storage_availability_zone
- vol['status'] = "creating" # creating | available | in-use
- # attaching | attached | detaching | detached
- vol['attach_status'] = "detached"
- volume_id = db.volume_create(context, vol)
+ logging.info("volume %s: creating" % (volume_id))
+
+ volume_ref = db.volume_get(context, volume_id)
+
+ # db.volume_update(context, volume_id, {'node_name': FLAGS.node_name})
+
+ size = volume_ref['size']
+ logging.debug("volume %s: creating lv of size %sG" % (volume_id, size))
yield self._exec_create_volume(volume_id, size)
+
+ logging.debug("volume %s: allocating shelf & blade" % (volume_id))
(shelf_id, blade_id) = db.volume_allocate_shelf_and_blade(context,
volume_id)
+
+ logging.debug("volume %s: exporting shelf %s & blade %s" % (volume_id,
+ shelf_id, blade_id))
+
yield self._exec_create_export(volume_id, shelf_id, blade_id)
# TODO(joshua): We need to trigger a fanout message
# for aoe-discover on all the nodes
- yield self._exec_ensure_exports()
+
db.volume_update(context, volume_id, {'status': 'available'})
- logging.debug("restarting exports")
+
+ logging.debug("volume %s: re-exporting all values" % (volume_id))
+ yield self._exec_ensure_exports()
+
+ logging.debug("volume %s: created successfully" % (volume_id))
defer.returnValue(volume_id)
@defer.inlineCallbacks
@@ -139,8 +145,7 @@ class VolumeService(service.Service):
defer.returnValue(None)
yield process.simple_execute(
"sudo vblade-persist setup %s %s %s /dev/%s/%s" %
- (self,
- shelf_id,
+ (shelf_id,
blade_id,
FLAGS.aoe_eth_dev,
FLAGS.volume_group,
@@ -152,23 +157,22 @@ class VolumeService(service.Service):
if FLAGS.fake_storage:
defer.returnValue(None)
yield process.simple_execute(
- "sudo vblade-persist stop %s %s" % (self, shelf_id,
- blade_id),
+ "sudo vblade-persist stop %s %s" % (shelf_id, blade_id),
terminate_on_stderr=False)
yield process.simple_execute(
- "sudo vblade-persist destroy %s %s" % (self, shelf_id,
- blade_id),
+ "sudo vblade-persist destroy %s %s" % (shelf_id, blade_id),
terminate_on_stderr=False)
@defer.inlineCallbacks
def _exec_ensure_exports(self):
if FLAGS.fake_storage:
defer.returnValue(None)
- # NOTE(vish): these commands sometimes sends output to stderr for warnings
+
+ yield process.simple_execute("sleep 5") # wait for blades to appear
yield process.simple_execute("sudo vblade-persist auto all",
- terminate_on_stderr=False)
+ check_exit_code=False)
yield process.simple_execute("sudo vblade-persist start all",
- terminate_on_stderr=False)
+ check_exit_code=False)
@defer.inlineCallbacks
def _exec_init_volumes(self):