summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorJoshua McKenty <jmckenty@gmail.com>2010-06-24 04:11:54 +0100
committerandy <github@anarkystic.com>2010-06-24 04:11:54 +0100
commit32850264fdec24971683f06ff4d1420691d0bf0d (patch)
tree2168748fd2efa4ad4ce0bab36854e4c7168d2fb3
parentc02283d077c667fa406d7e43d7a312c0f1fbdd86 (diff)
Volumes support intermediate state. Don't have to cast to storage nodes for attach/detach anymore, just let node update redis with state.
-rw-r--r--nova/compute/model.py14
-rw-r--r--nova/compute/node.py18
-rw-r--r--nova/datastore.py13
-rw-r--r--nova/endpoint/cloud.py65
-rw-r--r--nova/tests/storage_unittest.py19
-rw-r--r--nova/volume/storage.py72
6 files changed, 108 insertions, 93 deletions
diff --git a/nova/compute/model.py b/nova/compute/model.py
index 2754e9e6d..03de7a455 100644
--- a/nova/compute/model.py
+++ b/nova/compute/model.py
@@ -190,20 +190,6 @@ class Instance(object):
""" Returns a reservation object """
pass
-# class Reservation(object):
-# """ ORM wrapper for a batch of launched instances """
-# def __init__(self):
-# pass
-#
-# def userdata(self):
-# """ """
-# pass
-#
-#
-# class NodeDirectory(object):
-# def __init__(self):
-# pass
-#
if __name__ == "__main__":
import doctest
diff --git a/nova/compute/node.py b/nova/compute/node.py
index 72c2f2b70..b8b46d1e2 100644
--- a/nova/compute/node.py
+++ b/nova/compute/node.py
@@ -46,6 +46,7 @@ from nova import utils
from nova.compute import disk
from nova.compute import model
from nova.compute import network
+from nova.volume import storage
from nova.objectstore import image # for image_path flag
FLAGS = flags.FLAGS
@@ -203,24 +204,29 @@ class Node(object, service.Service):
@defer.inlineCallbacks
@exception.wrap_exception
def attach_volume(self, instance_id = None,
- aoe_device = None, mountpoint = None):
- utils.runthis("Attached Volume: %s",
+ volume_id = None, mountpoint = None):
+ volume = storage.get_volume(volume_id)
+ yield self._init_aoe()
+ yield utils.runthis("Attached Volume: %s",
"sudo virsh attach-disk %s /dev/etherd/%s %s"
- % (instance_id, aoe_device, mountpoint.split("/")[-1]))
- return defer.succeed(True)
+ % (instance_id, volume['aoe_device'], mountpoint.split("/")[-1]))
+ volume.finish_attach()
+ defer.returnValue(True)
def _init_aoe(self):
utils.runthis("Doin an AoE discover, returns %s", "sudo aoe-discover")
utils.runthis("Doin an AoE stat, returns %s", "sudo aoe-stat")
@exception.wrap_exception
- def detach_volume(self, instance_id, mountpoint):
+ def detach_volume(self, instance_id, volume_id):
""" detach a volume from an instance """
# despite the documentation, virsh detach-disk just wants the device
# name without the leading /dev/
- target = mountpoint.rpartition('/dev/')[2]
+ volume = storage.get_volume(volume_id)
+ target = volume['mountpoint'].rpartition('/dev/')[2]
utils.runthis("Detached Volume: %s", "sudo virsh detach-disk %s %s "
% (instance_id, target))
+ volume.finish_detach()
return defer.succeed(True)
diff --git a/nova/datastore.py b/nova/datastore.py
index 0da89d594..055a30a79 100644
--- a/nova/datastore.py
+++ b/nova/datastore.py
@@ -76,10 +76,19 @@ class RedisModel(object):
self.set_default_state()
def set_default_state(self):
- self.state = {'state' : 'pending'}
+ self.state = {'state' : 'pending',
+ 'node_name': 'unassigned',
+ 'project_id': 'unassigned',
+ 'user_id': 'unassigned'}
self.state[self.object_type+"_id"] = self.object_id
self.state["create_time"] = time.strftime('%Y-%m-%dT%H:%M:%SZ', time.gmtime())
-
+
+ @property
+ def project(self):
+ if self.state.get('project_id', None):
+ return self.state['project_id']
+ return self.state.get('owner_id', 'unassigned')
+
@property
def __redis_key(self):
""" Magic string for keys """
diff --git a/nova/endpoint/cloud.py b/nova/endpoint/cloud.py
index 2d7e82387..9ad5816b4 100644
--- a/nova/endpoint/cloud.py
+++ b/nova/endpoint/cloud.py
@@ -263,6 +263,15 @@ class CloudController(object):
volume.get('node_name', None),
volume.get('instance_id', ''),
volume.get('mountpoint', ''))
+ if volume['status'] == 'attached':
+ v['attachmentSet'] = [{'attachTime': volume['attachTime'],
+ 'deleteOnTermination': volume['deleteOnTermination'],
+ 'device' : volume['mountpoint'],
+ 'instanceId' : volume['instance_id'],
+ 'status' : 'attached',
+ 'volume_id' : volume['volume_id']}]
+ else:
+ v['attachmentSet'] = [{}]
return v
@rbac.allow('projectmanager', 'sysadmin')
@@ -302,54 +311,58 @@ class CloudController(object):
raise exception.NotFound('Instance %s could not be found' % instance_id)
def _get_volume(self, context, volume_id):
- for volume in self.volumes:
- if volume['volume_id'] == volume_id:
- if context.user.is_admin() or volume['project_id'] == context.project.id:
- return volume
+ volume = storage.get_volume(volume_id)
+ if context.user.is_admin() or volume['project_id'] == context.project.id:
+ return volume
raise exception.NotFound('Volume %s could not be found' % volume_id)
@rbac.allow('projectmanager', 'sysadmin')
def attach_volume(self, context, volume_id, instance_id, device, **kwargs):
volume = self._get_volume(context, volume_id)
- storage_node = volume['node_name']
- # TODO: (joshua) Fix volumes to store creator id
+ if volume['status'] == "attached":
+ raise exception.Error("Volume is already attached")
+ volume.start_attach(instance_id, device)
instance = self._get_instance(context, instance_id)
compute_node = instance['node_name']
- aoe_device = volume['aoe_device']
rpc.cast('%s.%s' % (FLAGS.compute_topic, compute_node),
{"method": "attach_volume",
- "args" : {"aoe_device": aoe_device,
- "instance_id" : instance_id,
- "mountpoint" : device}})
- rpc.cast('%s.%s' % (FLAGS.storage_topic, storage_node),
- {"method": "attach_volume",
"args" : {"volume_id": volume_id,
"instance_id" : instance_id,
"mountpoint" : device}})
- return defer.succeed(True)
+ return defer.succeed({'attachTime' : volume['attachTime'],
+ 'device' : volume['mountpoint'],
+ 'instanceId' : instance_id,
+ 'requestId' : context.request_id,
+ 'status' : volume['attachStatus'],
+ 'volumeId' : volume_id})
@rbac.allow('projectmanager', 'sysadmin')
def detach_volume(self, context, volume_id, **kwargs):
- # TODO(joshua): Make sure the updated state has been received first
volume = self._get_volume(context, volume_id)
- storage_node = volume['node_name']
- if 'instance_id' in volume.keys():
- instance_id = volume['instance_id']
+ instance_id = volume.get('instance_id', None)
+ if volume['status'] == "available":
+ raise exception.Error("Volume is already detached")
+ volume.start_detach()
+ if instance_id:
try:
instance = self._get_instance(context, instance_id)
- compute_node = instance['node_name']
- mountpoint = volume['mountpoint']
- rpc.cast('%s.%s' % (FLAGS.compute_topic, compute_node),
+ rpc.cast('%s.%s' % (FLAGS.compute_topic, instance['node_name']),
{"method": "detach_volume",
"args" : {"instance_id": instance_id,
- "mountpoint": mountpoint}})
+ "mountpoint": volume['mountpoint']}})
except exception.NotFound:
- pass
- rpc.cast('%s.%s' % (FLAGS.storage_topic, storage_node),
- {"method": "detach_volume",
- "args" : {"volume_id": volume_id}})
- return defer.succeed(True)
+ # If the instance doesn't exist anymore,
+ # then we need to call detach blind
+ volume.finish_detach()
+ else:
+ raise exception.Error("Volume isn't attached to anything!")
+ return defer.succeed({'attachTime' : volume['attachTime'],
+ 'device' : volume['mountpoint'],
+ 'instanceId' : instance_id,
+ 'requestId' : context.request_id,
+ 'status' : volume['attachStatus'],
+ 'volumeId' : volume_id})
def _convert_to_set(self, lst, str):
if lst == None or lst == []:
diff --git a/nova/tests/storage_unittest.py b/nova/tests/storage_unittest.py
index 80cfaebec..cbfedbfc1 100644
--- a/nova/tests/storage_unittest.py
+++ b/nova/tests/storage_unittest.py
@@ -54,11 +54,11 @@ class StorageTestCase(test.TrialTestCase):
volume_id = self.mystorage.create_volume(vol_size, user_id)
# TODO(termie): get_volume returns differently than create_volume
self.assertEqual(volume_id,
- self.mystorage.get_volume(volume_id)['volume_id'])
+ storage.get_volume(volume_id)['volume_id'])
rv = self.mystorage.delete_volume(volume_id)
self.assertRaises(exception.Error,
- self.mystorage.get_volume,
+ storage.get_volume,
volume_id)
def test_run_attach_detach_volume(self):
@@ -68,11 +68,14 @@ class StorageTestCase(test.TrialTestCase):
user_id = "fake"
mountpoint = "/dev/sdf"
volume_id = self.mystorage.create_volume(vol_size, user_id)
- rv = self.mystorage.attach_volume(volume_id,
+
+ volume_obj = storage.get_volume(volume_id)
+ volume_obj.start_attach(instance_id, mountpoint)
+ rv = yield self.mynode.attach_volume(volume_id,
instance_id,
mountpoint)
- volume_obj = self.mystorage.get_volume(volume_id)
- self.assertEqual(volume_obj['status'], "attached")
+ self.assertEqual(volume_obj['status'], "in-use")
+ self.assertEqual(volume_obj['attachStatus'], "attached")
self.assertEqual(volume_obj['instance_id'], instance_id)
self.assertEqual(volume_obj['mountpoint'], mountpoint)
@@ -80,13 +83,13 @@ class StorageTestCase(test.TrialTestCase):
self.mystorage.delete_volume,
volume_id)
- rv = self.mystorage.detach_volume(volume_id)
- volume_obj = self.mystorage.get_volume(volume_id)
+ rv = yield self.mystorage.detach_volume(volume_id)
+ volume_obj = storage.get_volume(volume_id)
self.assertEqual(volume_obj['status'], "available")
rv = self.mystorage.delete_volume(volume_id)
self.assertRaises(exception.Error,
- self.mystorage.get_volume,
+ storage.get_volume,
volume_id)
def test_multi_node(self):
diff --git a/nova/volume/storage.py b/nova/volume/storage.py
index 3efef085f..a409ec0db 100644
--- a/nova/volume/storage.py
+++ b/nova/volume/storage.py
@@ -58,6 +58,15 @@ flags.DEFINE_boolean('fake_storage', False,
# TODO(joshua) Index of volumes by project
+def get_volume(volume_id):
+ """ Returns a redis-backed volume object """
+ volume_class = Volume
+ if FLAGS.fake_storage:
+ volume_class = FakeVolume
+ if datastore.Keeper('storage-').set_is_member('volumes', volume_id):
+ return volume_class(volume_id=volume_id)
+ raise exception.Error("Volume does not exist")
+
class BlockStore(object):
"""
There is one BlockStore running on each volume node.
@@ -89,17 +98,10 @@ class BlockStore(object):
self._restart_exports()
return vol['volume_id']
- def get_volume(self, volume_id):
- """ Returns a redis-backed volume object """
- if self.keeper.set_is_member('volumes', volume_id):
- return self.volume_class(volume_id=volume_id)
- raise exception.Error("Volume does not exist")
-
def by_node(self, node_id):
""" returns a list of volumes for a node """
- for volume in self.all:
- if volume['node_name'] == node_id:
- yield volume
+ for volume_id in self.keeper.set_members('volumes:%s' % (node_id)):
+ yield self.volume_class(volume_id=volume_id)
@property
def all(self):
@@ -107,15 +109,9 @@ class BlockStore(object):
for volume_id in self.keeper.set_members('volumes'):
yield self.volume_class(volume_id=volume_id)
- @property
- def local(self):
- """ returns a list of locally attached volumes """
- for volume_id in self.keeper.set_members('volumes:%s' % (FLAGS.storage_name)):
- yield self.volume_class(volume_id=volume_id)
-
def delete_volume(self, volume_id):
logging.debug("Deleting volume with id of: %s" % (volume_id))
- vol = self.get_volume(volume_id)
+ vol = get_volume(volume_id)
if vol['status'] == "attached":
raise exception.Error("Volume is still attached")
if vol['node_name'] != FLAGS.storage_name:
@@ -125,22 +121,6 @@ class BlockStore(object):
self.keeper.set_remove('volumes:%s' % (FLAGS.storage_name), vol['volume_id'])
return True
- def attach_volume(self, volume_id, instance_id, mountpoint):
- vol = self.get_volume(volume_id)
- if vol['status'] == "attached":
- raise exception.Error("Volume is already attached")
- if vol['node_name'] != FLAGS.storage_name:
- raise exception.Error("Volume is not local to this node")
- vol.attach(instance_id, mountpoint)
-
- def detach_volume(self, volume_id):
- vol = self.get_volume(volume_id)
- if vol['status'] == "available":
- raise exception.Error("Volume is already detached")
- if vol['node_name'] != FLAGS.storage_name:
- raise exception.Error("Volume is not local to this node")
- vol.detach()
-
def _restart_exports(self):
if FLAGS.fake_storage:
return
@@ -186,23 +166,41 @@ class Volume(datastore.RedisModel):
vol["instance_id"] = 'none'
vol["mountpoint"] = 'none'
vol["create_time"] = time.strftime('%Y-%m-%dT%H:%M:%SZ', time.gmtime())
- vol["attachment_set"] = ''
+ vol['status'] = "creating" # creating | available | in-use
+ vol['attachStatus'] = "detached" # attaching | attached | detaching | detached
+ vol.save()
vol.create_lv()
vol.setup_export()
- vol['status'] = "available"
+ # TODO(joshua) - We need to trigger a fanout message for aoe-discover on all the nodes
+ # TODO(joshua
vol.save()
return vol
- def attach(self, instance_id, mountpoint):
+ def start_attach(self, instance_id, mountpoint):
+ """ """
self['instance_id'] = instance_id
self['mountpoint'] = mountpoint
- self['status'] = "attached"
+ self['status'] = "in-use"
+ self['attachStatus'] = "attaching"
+ self['attachTime'] = time.strftime('%Y-%m-%dT%H:%M:%SZ', time.gmtime())
+ self['deleteOnTermination'] = 'False'
+ self.save()
+
+ def finish_attach(self):
+ """ """
+ self['attachStatus'] = "attached"
+ self.save()
+
+ def start_detach(self):
+ """ """
+ self['attachStatus'] = "detaching"
self.save()
- def detach(self):
+ def finish_detach(self):
self['instance_id'] = None
self['mountpoint'] = None
self['status'] = "available"
+ self['attachStatus'] = "detached"
self.save()
def destroy(self):