From 6795de644b8a8a1879543101d85ba90674219c8b Mon Sep 17 00:00:00 2001 From: ZhuRongze Date: Wed, 1 Aug 2012 13:23:13 +0000 Subject: Send 'create volume from snapshot' to the proper host A simple solution for bug 1008866. When creating volume from snapshot on multicluster, in volume it will check if snapshot_id is set. If snapshot_id is set and FLAGS.snapshot_same_host is true, make the call create volume directly to the volume host where the snapshot resides instead of passing it through the scheduler. So snapshot can be copy to new volume. The same as review 9761. Change-Id: Ic182eb4563b9462704c5969d5116629442df316a --- etc/nova/nova.conf.sample | 6 ++++++ nova/tests/api/ec2/test_cloud.py | 2 +- nova/volume/api.py | 44 +++++++++++++++++++++++++++++++++------- 3 files changed, 44 insertions(+), 8 deletions(-) diff --git a/etc/nova/nova.conf.sample b/etc/nova/nova.conf.sample index e03b8363e..dbb798462 100644 --- a/etc/nova/nova.conf.sample +++ b/etc/nova/nova.conf.sample @@ -1662,6 +1662,12 @@ #### (StrOpt) Address that the XCP VNC proxy should bind to +######## defined in nova.volume.api ######## + +# snapshot_same_host=true +#### (BoolOpt) Create volume form snapshot at the host where snapshot resides. + + ######## defined in nova.volume.driver ######## # volume_group=nova-volumes diff --git a/nova/tests/api/ec2/test_cloud.py b/nova/tests/api/ec2/test_cloud.py index ef78c007b..fe5f0c969 100644 --- a/nova/tests/api/ec2/test_cloud.py +++ b/nova/tests/api/ec2/test_cloud.py @@ -1239,7 +1239,7 @@ class CloudTestCase(test.TestCase): if 'snapshot_id' in bdm: snap = db.snapshot_create(self.context, {'id': bdm['snapshot_id'], - 'volume_id': 76543210, + 'volume_id': 01234567, 'status': "available", 'volume_size': 1}) snapshots.append(snap['id']) diff --git a/nova/volume/api.py b/nova/volume/api.py index 90eef1e9c..a9ebeda28 100644 --- a/nova/volume/api.py +++ b/nova/volume/api.py @@ -25,13 +25,19 @@ import functools from nova.db import base from nova import exception from nova import flags +from nova.openstack.common import cfg from nova.openstack.common import log as logging from nova.openstack.common import rpc from nova.openstack.common import timeutils import nova.policy from nova import quota +volume_host_opt = cfg.BoolOpt('snapshot_same_host', + default=True, + help='Create volume from snapshot at the host where snapshot resides') + FLAGS = flags.FLAGS +FLAGS.register_opt(volume_host_opt) flags.DECLARE('storage_availability_zone', 'nova.volume.manager') LOG = logging.getLogger(__name__) @@ -131,15 +137,39 @@ class API(base.Base): } volume = self.db.volume_create(context, options) - rpc.cast(context, - FLAGS.scheduler_topic, - {"method": "create_volume", - "args": {"topic": FLAGS.volume_topic, - "volume_id": volume['id'], - "snapshot_id": snapshot_id, - "reservations": reservations}}) + self._cast_create_volume(context, volume['id'], + snapshot_id, reservations) return volume + def _cast_create_volume(self, context, volume_id, + snapshot_id, reservations): + + # NOTE(Rongze Zhu): It is a simple solution for bug 1008866 + # If snapshot_id is set, make the call create volume directly to + # the volume host where the snapshot resides instead of passing it + # through the scheduer. So snapshot can be copy to new volume. + + if snapshot_id and FLAGS.snapshot_same_host: + snapshot_ref = self.db.snapshot_get(context, snapshot_id) + src_volume_ref = self.db.volume_get(context, + snapshot_ref['volume_id']) + topic = rpc.queue_get_for(context, + FLAGS.volume_topic, + src_volume_ref['host']) + rpc.cast(context, + topic, + {"method": "create_volume", + "args": {"volume_id": volume_id, + "snapshot_id": snapshot_id}}) + else: + rpc.cast(context, + FLAGS.scheduler_topic, + {"method": "create_volume", + "args": {"topic": FLAGS.volume_topic, + "volume_id": volume_id, + "snapshot_id": snapshot_id, + "reservations": reservations}}) + @wrap_check_policy def delete(self, context, volume): volume_id = volume['id'] -- cgit