summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorLoganathan Parthipan <parthipan@hp.com>2013-04-30 13:13:33 +0000
committerLoganathan Parthipan <parthipan@hp.com>2013-05-02 01:41:11 +0000
commit8e4eec840d504d27a3a4c640dba67b3b47e81730 (patch)
tree3ef15fd69381b0be9e94641fa6d2762305cac2ae
parentb6aac988e46147154d49e938ecf9b67831613636 (diff)
downloadnova-8e4eec840d504d27a3a4c640dba67b3b47e81730.tar.gz
nova-8e4eec840d504d27a3a4c640dba67b3b47e81730.tar.xz
nova-8e4eec840d504d27a3a4c640dba67b3b47e81730.zip
Checks if volume can be attached
This patches makes a boot fail fast if one of the volumes cannot be attached at boot time. Currently the request is accepted but spawning fails in addition to leaving the volume state as available which is an inconsistent state Change-Id: I35b2034f8aa9574fcb8a34b58ead9f5e9ef27664 fixes: bug 1166770
-rw-r--r--nova/compute/api.py7
-rw-r--r--nova/tests/compute/test_compute.py53
2 files changed, 59 insertions, 1 deletions
diff --git a/nova/compute/api.py b/nova/compute/api.py
index 3f3078d5b..ac52b805d 100644
--- a/nova/compute/api.py
+++ b/nova/compute/api.py
@@ -780,11 +780,16 @@ class API(base.Base):
for bdm in self.db.block_device_mapping_get_all_by_instance(
context, instance['uuid']):
# NOTE(vish): For now, just make sure the volumes are accessible.
+ # Additionally, check that the volume can be attached to this
+ # instance.
snapshot_id = bdm.get('snapshot_id')
volume_id = bdm.get('volume_id')
if volume_id is not None:
try:
- self.volume_api.get(context, volume_id)
+ volume = self.volume_api.get(context, volume_id)
+ self.volume_api.check_attach(context,
+ volume,
+ instance=instance)
except Exception:
raise exception.InvalidBDMVolume(id=volume_id)
elif snapshot_id is not None:
diff --git a/nova/tests/compute/test_compute.py b/nova/tests/compute/test_compute.py
index 277e804ac..bd253b8ee 100644
--- a/nova/tests/compute/test_compute.py
+++ b/nova/tests/compute/test_compute.py
@@ -541,6 +541,59 @@ class ComputeVolumeTestCase(BaseTestCase):
self.assertEqual(1, volume_usage['tot_writes'])
self.assertEqual(20, volume_usage['tot_write_bytes'])
+ def test_validate_bdm(self):
+ # Test if volume is checked for availability before being attached
+ # at boot time
+
+ def fake_bdms(context, instance_uuid):
+ block_device_mapping = [{
+ 'id': 1,
+ 'no_device': None,
+ 'virtual_name': None,
+ 'snapshot_id': None,
+ 'volume_id': self.volume_id,
+ 'device_name': 'vda',
+ 'delete_on_termination': False,
+ }]
+ return block_device_mapping
+ self.stubs.Set(self.compute.db,
+ 'block_device_mapping_get_all_by_instance',
+ fake_bdms)
+
+ # Check that the volume status is 'available' and reject if not
+ def fake_volume_get_1(self, context, volume_id):
+ return {'id': volume_id,
+ 'status': 'creating',
+ 'attach_status': 'detached'}
+ self.stubs.Set(cinder.API, 'get', fake_volume_get_1)
+
+ self.assertRaises(exception.InvalidBDMVolume,
+ self.compute_api._validate_bdm,
+ self.context,
+ instance=self.instance)
+
+ # Check that the volume attach_status is 'detached' and reject if not
+ def fake_volume_get_2(self, context, volume_id):
+ return {'id': volume_id,
+ 'status': 'available',
+ 'attach_status': 'attached'}
+ self.stubs.Set(cinder.API, 'get', fake_volume_get_2)
+
+ self.assertRaises(exception.InvalidBDMVolume,
+ self.compute_api._validate_bdm,
+ self.context,
+ instance=self.instance)
+
+ # Check that the volume status is 'available' and attach_status is
+ # 'detached' and accept the request if so
+ def fake_volume_get_3(self, context, volume_id):
+ return {'id': volume_id,
+ 'status': 'available',
+ 'attach_status': 'detached'}
+ self.stubs.Set(cinder.API, 'get', fake_volume_get_3)
+
+ self.compute_api._validate_bdm(self.context, instance=self.instance)
+
class ComputeTestCase(BaseTestCase):
def test_wrap_instance_fault(self):