summaryrefslogtreecommitdiffstats
path: root/nova/volume/api.py
diff options
context:
space:
mode:
authorZhuRongze <zrzhit@gmail.com>2012-08-01 13:23:13 +0000
committerZhuRongze <zrzhit@gmail.com>2012-08-03 06:27:08 +0000
commit6795de644b8a8a1879543101d85ba90674219c8b (patch)
tree2ecb3e22f7dd0565cfe6ec72c3d27ff761c980cf /nova/volume/api.py
parent87dcb13117459da0e92b98feadab8a4ecba2c4f9 (diff)
downloadnova-6795de644b8a8a1879543101d85ba90674219c8b.tar.gz
nova-6795de644b8a8a1879543101d85ba90674219c8b.tar.xz
nova-6795de644b8a8a1879543101d85ba90674219c8b.zip
Send 'create volume from snapshot' to the proper host
A simple solution for bug 1008866. When creating volume from snapshot on multicluster, in volume it will check if snapshot_id is set. If snapshot_id is set and FLAGS.snapshot_same_host is true, make the call create volume directly to the volume host where the snapshot resides instead of passing it through the scheduler. So snapshot can be copy to new volume. The same as review 9761. Change-Id: Ic182eb4563b9462704c5969d5116629442df316a
Diffstat (limited to 'nova/volume/api.py')
-rw-r--r--nova/volume/api.py44
1 files changed, 37 insertions, 7 deletions
diff --git a/nova/volume/api.py b/nova/volume/api.py
index 90eef1e9c..a9ebeda28 100644
--- a/nova/volume/api.py
+++ b/nova/volume/api.py
@@ -25,13 +25,19 @@ import functools
from nova.db import base
from nova import exception
from nova import flags
+from nova.openstack.common import cfg
from nova.openstack.common import log as logging
from nova.openstack.common import rpc
from nova.openstack.common import timeutils
import nova.policy
from nova import quota
+volume_host_opt = cfg.BoolOpt('snapshot_same_host',
+ default=True,
+ help='Create volume from snapshot at the host where snapshot resides')
+
FLAGS = flags.FLAGS
+FLAGS.register_opt(volume_host_opt)
flags.DECLARE('storage_availability_zone', 'nova.volume.manager')
LOG = logging.getLogger(__name__)
@@ -131,15 +137,39 @@ class API(base.Base):
}
volume = self.db.volume_create(context, options)
- rpc.cast(context,
- FLAGS.scheduler_topic,
- {"method": "create_volume",
- "args": {"topic": FLAGS.volume_topic,
- "volume_id": volume['id'],
- "snapshot_id": snapshot_id,
- "reservations": reservations}})
+ self._cast_create_volume(context, volume['id'],
+ snapshot_id, reservations)
return volume
+ def _cast_create_volume(self, context, volume_id,
+ snapshot_id, reservations):
+
+ # NOTE(Rongze Zhu): It is a simple solution for bug 1008866
+ # If snapshot_id is set, make the call create volume directly to
+ # the volume host where the snapshot resides instead of passing it
+ # through the scheduer. So snapshot can be copy to new volume.
+
+ if snapshot_id and FLAGS.snapshot_same_host:
+ snapshot_ref = self.db.snapshot_get(context, snapshot_id)
+ src_volume_ref = self.db.volume_get(context,
+ snapshot_ref['volume_id'])
+ topic = rpc.queue_get_for(context,
+ FLAGS.volume_topic,
+ src_volume_ref['host'])
+ rpc.cast(context,
+ topic,
+ {"method": "create_volume",
+ "args": {"volume_id": volume_id,
+ "snapshot_id": snapshot_id}})
+ else:
+ rpc.cast(context,
+ FLAGS.scheduler_topic,
+ {"method": "create_volume",
+ "args": {"topic": FLAGS.volume_topic,
+ "volume_id": volume_id,
+ "snapshot_id": snapshot_id,
+ "reservations": reservations}})
+
@wrap_check_policy
def delete(self, context, volume):
volume_id = volume['id']