summaryrefslogtreecommitdiffstats
path: root/nova/tests
diff options
context:
space:
mode:
authorvladimir.p <vladimir@zadarastorage.com>2011-07-27 22:49:16 -0700
committervladimir.p <vladimir@zadarastorage.com>2011-07-27 22:49:16 -0700
commit401de172b86a13010885e70bc78351e72a7dfde3 (patch)
tree4a3a892c646234b41791d5c9c7c3c3caf502edbf /nova/tests
parent336b2703ef90fcd7b422434434c9967880b97204 (diff)
prior to nova-1336 merge
Diffstat (limited to 'nova/tests')
-rw-r--r--nova/tests/api/openstack/test_extensions.py7
-rw-r--r--nova/tests/scheduler/test_vsa_scheduler.py616
-rw-r--r--nova/tests/test_vsa.py2
-rw-r--r--nova/tests/test_vsa_volumes.py23
4 files changed, 644 insertions, 4 deletions
diff --git a/nova/tests/api/openstack/test_extensions.py b/nova/tests/api/openstack/test_extensions.py
index d459c694f..2febe50e5 100644
--- a/nova/tests/api/openstack/test_extensions.py
+++ b/nova/tests/api/openstack/test_extensions.py
@@ -97,8 +97,9 @@ class ExtensionControllerTest(unittest.TestCase):
data = json.loads(response.body)
names = [x['name'] for x in data['extensions']]
names.sort()
- self.assertEqual(names, ["FlavorExtraSpecs", "Floating_ips",
- "Fox In Socks", "Hosts", "Multinic", "Volumes"])
+ self.assertEqual(names, ["DriveTypes", "FlavorExtraSpecs",
+ "Floating_ips", "Fox In Socks", "Hosts", "Multinic", "VSAs",
+ "Volumes"])
# Make sure that at least Fox in Sox is correct.
(fox_ext,) = [
@@ -145,7 +146,7 @@ class ExtensionControllerTest(unittest.TestCase):
# Make sure we have all the extensions.
exts = root.findall('{0}extension'.format(NS))
- self.assertEqual(len(exts), 6)
+ self.assertEqual(len(exts), 8)
# Make sure that at least Fox in Sox is correct.
(fox_ext,) = [x for x in exts if x.get('alias') == 'FOXNSOX']
diff --git a/nova/tests/scheduler/test_vsa_scheduler.py b/nova/tests/scheduler/test_vsa_scheduler.py
new file mode 100644
index 000000000..697ad3842
--- /dev/null
+++ b/nova/tests/scheduler/test_vsa_scheduler.py
@@ -0,0 +1,616 @@
+# Copyright 2011 OpenStack LLC.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import stubout
+
+import nova
+from nova import exception
+from nova import flags
+from nova import db
+from nova import context
+from nova import test
+from nova import utils
+from nova import log as logging
+
+from nova.scheduler import vsa as vsa_sched
+from nova.scheduler import driver
+
+FLAGS = flags.FLAGS
+LOG = logging.getLogger('nova.tests.scheduler.vsa')
+
+scheduled_volumes = []
+scheduled_volume = {}
+global_volume = {}
+
+
+class FakeVsaLeastUsedScheduler(
+ vsa_sched.VsaSchedulerLeastUsedHost):
+ # No need to stub anything at the moment
+ pass
+
+
+class FakeVsaMostAvailCapacityScheduler(
+ vsa_sched.VsaSchedulerMostAvailCapacity):
+ # No need to stub anything at the moment
+ pass
+
+
+class VsaSchedulerTestCase(test.TestCase):
+
+ def _get_vol_creation_request(self, num_vols, drive_ix, size=0):
+ volume_params = []
+ for i in range(num_vols):
+ drive_type = {'id': i,
+ 'name': 'name_' + str(drive_ix),
+ 'type': 'type_' + str(drive_ix),
+ 'size_gb': 1 + 100 * (drive_ix)}
+ volume = {'size': size,
+ 'snapshot_id': None,
+ 'name': 'vol_' + str(i),
+ 'description': None,
+ 'drive_ref': drive_type}
+ volume_params.append(volume)
+
+ return {'num_volumes': len(volume_params),
+ 'vsa_id': 123,
+ 'volumes': volume_params}
+
+ def _generate_default_service_states(self):
+ service_states = {}
+ for i in range(self.host_num):
+ host = {}
+ hostname = 'host_' + str(i)
+ if hostname in self.exclude_host_list:
+ continue
+
+ host['volume'] = {'timestamp': utils.utcnow(),
+ 'drive_qos_info': {}}
+
+ for j in range(self.drive_type_start_ix,
+ self.drive_type_start_ix + self.drive_type_num):
+ dtype = {}
+ dtype['Name'] = 'name_' + str(j)
+ dtype['DriveType'] = 'type_' + str(j)
+ dtype['TotalDrives'] = 2 * (self.init_num_drives + i)
+ dtype['DriveCapacity'] = vsa_sched.GB_TO_BYTES(1 + 100 * j)
+ dtype['TotalCapacity'] = dtype['TotalDrives'] * \
+ dtype['DriveCapacity']
+ dtype['AvailableCapacity'] = (dtype['TotalDrives'] - i) * \
+ dtype['DriveCapacity']
+ dtype['DriveRpm'] = 7200
+ dtype['DifCapable'] = 0
+ dtype['SedCapable'] = 0
+ dtype['PartitionDrive'] = {
+ 'PartitionSize': 0,
+ 'NumOccupiedPartitions': 0,
+ 'NumFreePartitions': 0}
+ dtype['FullDrive'] = {
+ 'NumFreeDrives': dtype['TotalDrives'] - i,
+ 'NumOccupiedDrives': i}
+ host['volume']['drive_qos_info'][dtype['Name']] = dtype
+
+ service_states[hostname] = host
+
+ return service_states
+
+ def _print_service_states(self):
+ for host, host_val in self.service_states.iteritems():
+ LOG.info(_("Host %s"), host)
+ total_used = 0
+ total_available = 0
+ qos = host_val['volume']['drive_qos_info']
+
+ for k, d in qos.iteritems():
+ LOG.info("\t%s: type %s: drives (used %2d, total %2d) "\
+ "size %3d, total %4d, used %4d, avail %d",
+ k, d['DriveType'],
+ d['FullDrive']['NumOccupiedDrives'], d['TotalDrives'],
+ vsa_sched.BYTES_TO_GB(d['DriveCapacity']),
+ vsa_sched.BYTES_TO_GB(d['TotalCapacity']),
+ vsa_sched.BYTES_TO_GB(d['TotalCapacity'] - \
+ d['AvailableCapacity']),
+ vsa_sched.BYTES_TO_GB(d['AvailableCapacity']))
+
+ total_used += vsa_sched.BYTES_TO_GB(d['TotalCapacity'] - \
+ d['AvailableCapacity'])
+ total_available += vsa_sched.BYTES_TO_GB(
+ d['AvailableCapacity'])
+ LOG.info("Host %s: used %d, avail %d",
+ host, total_used, total_available)
+
+ def _set_service_states(self, host_num,
+ drive_type_start_ix, drive_type_num,
+ init_num_drives=10,
+ exclude_host_list=[]):
+ self.host_num = host_num
+ self.drive_type_start_ix = drive_type_start_ix
+ self.drive_type_num = drive_type_num
+ self.exclude_host_list = exclude_host_list
+ self.init_num_drives = init_num_drives
+ self.service_states = self._generate_default_service_states()
+
+ def _get_service_states(self):
+ return self.service_states
+
+ def _fake_get_service_states(self):
+ return self._get_service_states()
+
+ def _fake_provision_volume(self, context, vol, vsa_id, availability_zone):
+ global scheduled_volumes
+ scheduled_volumes.append(dict(vol=vol,
+ vsa_id=vsa_id,
+ az=availability_zone))
+ name = vol['name']
+ host = vol['host']
+ LOG.debug(_("Test: provision vol %(name)s on host %(host)s"),
+ locals())
+ LOG.debug(_("\t vol=%(vol)s"), locals())
+ pass
+
+ def _fake_vsa_update(self, context, vsa_id, values):
+ LOG.debug(_("Test: VSA update request: vsa_id=%(vsa_id)s "\
+ "values=%(values)s"), locals())
+ pass
+
+ def _fake_volume_create(self, context, options):
+ LOG.debug(_("Test: Volume create: %s"), options)
+ options['id'] = 123
+ global global_volume
+ global_volume = options
+ return options
+
+ def _fake_volume_get(self, context, volume_id):
+ LOG.debug(_("Test: Volume get request: id=%(volume_id)s"), locals())
+ global global_volume
+ global_volume['id'] = volume_id
+ global_volume['availability_zone'] = None
+ return global_volume
+
+ def _fake_volume_update(self, context, volume_id, values):
+ LOG.debug(_("Test: Volume update request: id=%(volume_id)s "\
+ "values=%(values)s"), locals())
+ global scheduled_volume
+ scheduled_volume = {'id': volume_id, 'host': values['host']}
+ pass
+
+ def _fake_service_get_by_args(self, context, host, binary):
+ return "service"
+
+ def _fake_service_is_up_True(self, service):
+ return True
+
+ def _fake_service_is_up_False(self, service):
+ return False
+
+ def setUp(self, sched_class=None):
+ super(VsaSchedulerTestCase, self).setUp()
+ self.stubs = stubout.StubOutForTesting()
+ self.context_non_admin = context.RequestContext(None, None)
+ self.context = context.get_admin_context()
+
+ if sched_class is None:
+ self.sched = FakeVsaLeastUsedScheduler()
+ else:
+ self.sched = sched_class
+
+ self.host_num = 10
+ self.drive_type_num = 5
+
+ self.stubs.Set(self.sched,
+ '_get_service_states', self._fake_get_service_states)
+ self.stubs.Set(self.sched,
+ '_provision_volume', self._fake_provision_volume)
+ self.stubs.Set(nova.db, 'vsa_update', self._fake_vsa_update)
+
+ self.stubs.Set(nova.db, 'volume_get', self._fake_volume_get)
+ self.stubs.Set(nova.db, 'volume_update', self._fake_volume_update)
+
+ def tearDown(self):
+ self.stubs.UnsetAll()
+ super(VsaSchedulerTestCase, self).tearDown()
+
+ def test_vsa_sched_create_volumes_simple(self):
+ global scheduled_volumes
+ scheduled_volumes = []
+ self._set_service_states(host_num=10,
+ drive_type_start_ix=0,
+ drive_type_num=5,
+ init_num_drives=10,
+ exclude_host_list=['host_1', 'host_3'])
+ prev = self._generate_default_service_states()
+ request_spec = self._get_vol_creation_request(num_vols=3, drive_ix=2)
+
+ self.sched.schedule_create_volumes(self.context,
+ request_spec,
+ availability_zone=None)
+
+ self.assertEqual(len(scheduled_volumes), 3)
+ self.assertEqual(scheduled_volumes[0]['vol']['host'], 'host_0')
+ self.assertEqual(scheduled_volumes[1]['vol']['host'], 'host_2')
+ self.assertEqual(scheduled_volumes[2]['vol']['host'], 'host_4')
+
+ cur = self._get_service_states()
+ for host in ['host_0', 'host_2', 'host_4']:
+ cur_dtype = cur[host]['volume']['drive_qos_info']['name_2']
+ prev_dtype = prev[host]['volume']['drive_qos_info']['name_2']
+ self.assertEqual(cur_dtype['DriveType'], prev_dtype['DriveType'])
+ self.assertEqual(cur_dtype['FullDrive']['NumFreeDrives'],
+ prev_dtype['FullDrive']['NumFreeDrives'] - 1)
+ self.assertEqual(cur_dtype['FullDrive']['NumOccupiedDrives'],
+ prev_dtype['FullDrive']['NumOccupiedDrives'] + 1)
+
+ def test_vsa_sched_no_drive_type(self):
+ self._set_service_states(host_num=10,
+ drive_type_start_ix=0,
+ drive_type_num=5,
+ init_num_drives=1)
+ request_spec = self._get_vol_creation_request(num_vols=1, drive_ix=6)
+ self.assertRaises(driver.WillNotSchedule,
+ self.sched.schedule_create_volumes,
+ self.context,
+ request_spec,
+ availability_zone=None)
+
+ def test_vsa_sched_no_enough_drives(self):
+ global scheduled_volumes
+ scheduled_volumes = []
+
+ self._set_service_states(host_num=3,
+ drive_type_start_ix=0,
+ drive_type_num=1,
+ init_num_drives=0)
+ prev = self._generate_default_service_states()
+ request_spec = self._get_vol_creation_request(num_vols=3, drive_ix=0)
+
+ self.assertRaises(driver.WillNotSchedule,
+ self.sched.schedule_create_volumes,
+ self.context,
+ request_spec,
+ availability_zone=None)
+
+ # check that everything was returned back
+ cur = self._get_service_states()
+ for k, v in prev.iteritems():
+ self.assertEqual(prev[k]['volume']['drive_qos_info'],
+ cur[k]['volume']['drive_qos_info'])
+
+ def test_vsa_sched_wrong_topic(self):
+ self._set_service_states(host_num=1,
+ drive_type_start_ix=0,
+ drive_type_num=5,
+ init_num_drives=1)
+ states = self._get_service_states()
+ new_states = {}
+ new_states['host_0'] = {'compute': states['host_0']['volume']}
+ self.service_states = new_states
+ request_spec = self._get_vol_creation_request(num_vols=1, drive_ix=0)
+
+ self.assertRaises(driver.WillNotSchedule,
+ self.sched.schedule_create_volumes,
+ self.context,
+ request_spec,
+ availability_zone=None)
+
+ def test_vsa_sched_provision_volume(self):
+ global global_volume
+ global_volume = {}
+ self._set_service_states(host_num=1,
+ drive_type_start_ix=0,
+ drive_type_num=1,
+ init_num_drives=1)
+ request_spec = self._get_vol_creation_request(num_vols=1, drive_ix=0)
+
+ self.stubs.UnsetAll()
+ self.stubs.Set(self.sched,
+ '_get_service_states', self._fake_get_service_states)
+ self.stubs.Set(nova.db, 'volume_create', self._fake_volume_create)
+
+ self.sched.schedule_create_volumes(self.context,
+ request_spec,
+ availability_zone=None)
+
+ self.assertEqual(request_spec['volumes'][0]['name'],
+ global_volume['display_name'])
+
+ def test_vsa_sched_no_free_drives(self):
+ self._set_service_states(host_num=1,
+ drive_type_start_ix=0,
+ drive_type_num=1,
+ init_num_drives=1)
+ request_spec = self._get_vol_creation_request(num_vols=1, drive_ix=0)
+
+ self.sched.schedule_create_volumes(self.context,
+ request_spec,
+ availability_zone=None)
+
+ cur = self._get_service_states()
+ cur_dtype = cur['host_0']['volume']['drive_qos_info']['name_0']
+ self.assertEqual(cur_dtype['FullDrive']['NumFreeDrives'], 1)
+
+ new_request = self._get_vol_creation_request(num_vols=1, drive_ix=0)
+
+ self.sched.schedule_create_volumes(self.context,
+ request_spec,
+ availability_zone=None)
+ self._print_service_states()
+
+ self.assertRaises(driver.WillNotSchedule,
+ self.sched.schedule_create_volumes,
+ self.context,
+ new_request,
+ availability_zone=None)
+
+ def test_vsa_sched_forced_host(self):
+ global scheduled_volumes
+ scheduled_volumes = []
+
+ self._set_service_states(host_num=10,
+ drive_type_start_ix=0,
+ drive_type_num=5,
+ init_num_drives=10)
+
+ request_spec = self._get_vol_creation_request(num_vols=3, drive_ix=2)
+
+ self.assertRaises(exception.HostBinaryNotFound,
+ self.sched.schedule_create_volumes,
+ self.context,
+ request_spec,
+ availability_zone="nova:host_5")
+
+ self.stubs.Set(nova.db,
+ 'service_get_by_args', self._fake_service_get_by_args)
+ self.stubs.Set(self.sched,
+ 'service_is_up', self._fake_service_is_up_False)
+
+ self.assertRaises(driver.WillNotSchedule,
+ self.sched.schedule_create_volumes,
+ self.context,
+ request_spec,
+ availability_zone="nova:host_5")
+
+ self.stubs.Set(self.sched,
+ 'service_is_up', self._fake_service_is_up_True)
+
+ self.sched.schedule_create_volumes(self.context,
+ request_spec,
+ availability_zone="nova:host_5")
+
+ self.assertEqual(len(scheduled_volumes), 3)
+ self.assertEqual(scheduled_volumes[0]['vol']['host'], 'host_5')
+ self.assertEqual(scheduled_volumes[1]['vol']['host'], 'host_5')
+ self.assertEqual(scheduled_volumes[2]['vol']['host'], 'host_5')
+
+ def test_vsa_sched_create_volumes_partition(self):
+ global scheduled_volumes
+ scheduled_volumes = []
+ self._set_service_states(host_num=5,
+ drive_type_start_ix=0,
+ drive_type_num=5,
+ init_num_drives=1,
+ exclude_host_list=['host_0', 'host_2'])
+ prev = self._generate_default_service_states()
+ request_spec = self._get_vol_creation_request(num_vols=3,
+ drive_ix=3,
+ size=50)
+ self.sched.schedule_create_volumes(self.context,
+ request_spec,
+ availability_zone=None)
+
+ self.assertEqual(len(scheduled_volumes), 3)
+ self.assertEqual(scheduled_volumes[0]['vol']['host'], 'host_1')
+ self.assertEqual(scheduled_volumes[1]['vol']['host'], 'host_3')
+ self.assertEqual(scheduled_volumes[2]['vol']['host'], 'host_4')
+
+ cur = self._get_service_states()
+ for host in ['host_1', 'host_3', 'host_4']:
+ cur_dtype = cur[host]['volume']['drive_qos_info']['name_3']
+ prev_dtype = prev[host]['volume']['drive_qos_info']['name_3']
+
+ self.assertEqual(cur_dtype['DriveType'], prev_dtype['DriveType'])
+ self.assertEqual(cur_dtype['FullDrive']['NumFreeDrives'],
+ prev_dtype['FullDrive']['NumFreeDrives'] - 1)
+ self.assertEqual(cur_dtype['FullDrive']['NumOccupiedDrives'],
+ prev_dtype['FullDrive']['NumOccupiedDrives'] + 1)
+
+ self.assertEqual(prev_dtype['PartitionDrive']
+ ['NumOccupiedPartitions'], 0)
+ self.assertEqual(cur_dtype['PartitionDrive']
+ ['NumOccupiedPartitions'], 1)
+ self.assertEqual(cur_dtype['PartitionDrive']
+ ['NumFreePartitions'], 5)
+
+ self.assertEqual(prev_dtype['PartitionDrive']
+ ['NumFreePartitions'], 0)
+ self.assertEqual(prev_dtype['PartitionDrive']
+ ['PartitionSize'], 0)
+
+ def test_vsa_sched_create_single_volume_az(self):
+ global scheduled_volume
+ scheduled_volume = {}
+
+ def _fake_volume_get_az(context, volume_id):
+ LOG.debug(_("Test: Volume get: id=%(volume_id)s"), locals())
+ return {'id': volume_id, 'availability_zone': 'nova:host_3'}
+
+ self.stubs.Set(nova.db, 'volume_get', _fake_volume_get_az)
+ self.stubs.Set(nova.db,
+ 'service_get_by_args', self._fake_service_get_by_args)
+ self.stubs.Set(self.sched,
+ 'service_is_up', self._fake_service_is_up_True)
+
+ host = self.sched.schedule_create_volume(self.context,
+ 123, availability_zone=None)
+
+ self.assertEqual(host, 'host_3')
+ self.assertEqual(scheduled_volume['id'], 123)
+ self.assertEqual(scheduled_volume['host'], 'host_3')
+
+ def test_vsa_sched_create_single_non_vsa_volume(self):
+ global scheduled_volume
+ scheduled_volume = {}
+
+ global global_volume
+ global_volume = {}
+ global_volume['drive_type'] = None
+
+ self.assertRaises(driver.NoValidHost,
+ self.sched.schedule_create_volume,
+ self.context,
+ 123,
+ availability_zone=None)
+
+ def test_vsa_sched_create_single_volume(self):
+ global scheduled_volume
+ scheduled_volume = {}
+ self._set_service_states(host_num=10,
+ drive_type_start_ix=0,
+ drive_type_num=5,
+ init_num_drives=10,
+ exclude_host_list=['host_0', 'host_1'])
+ prev = self._generate_default_service_states()
+
+ global global_volume
+ global_volume = {}
+
+ drive_ix = 2
+ drive_type = {'id': drive_ix,
+ 'name': 'name_' + str(drive_ix),
+ 'type': 'type_' + str(drive_ix),
+ 'size_gb': 1 + 100 * (drive_ix)}
+
+ global_volume['drive_type'] = drive_type
+ global_volume['size'] = 0
+
+ host = self.sched.schedule_create_volume(self.context,
+ 123, availability_zone=None)
+
+ self.assertEqual(host, 'host_2')
+ self.assertEqual(scheduled_volume['id'], 123)
+ self.assertEqual(scheduled_volume['host'], 'host_2')
+
+
+class VsaSchedulerTestCaseMostAvail(VsaSchedulerTestCase):
+
+ def setUp(self):
+ super(VsaSchedulerTestCaseMostAvail, self).setUp(
+ FakeVsaMostAvailCapacityScheduler())
+
+ def tearDown(self):
+ self.stubs.UnsetAll()
+ super(VsaSchedulerTestCaseMostAvail, self).tearDown()
+
+ def test_vsa_sched_create_single_volume(self):
+ global scheduled_volume
+ scheduled_volume = {}
+ self._set_service_states(host_num=10,
+ drive_type_start_ix=0,
+ drive_type_num=5,
+ init_num_drives=10,
+ exclude_host_list=['host_0', 'host_1'])
+ prev = self._generate_default_service_states()
+
+ global global_volume
+ global_volume = {}
+
+ drive_ix = 2
+ drive_type = {'id': drive_ix,
+ 'name': 'name_' + str(drive_ix),
+ 'type': 'type_' + str(drive_ix),
+ 'size_gb': 1 + 100 * (drive_ix)}
+
+ global_volume['drive_type'] = drive_type
+ global_volume['size'] = 0
+
+ host = self.sched.schedule_create_volume(self.context,
+ 123, availability_zone=None)
+
+ self.assertEqual(host, 'host_9')
+ self.assertEqual(scheduled_volume['id'], 123)
+ self.assertEqual(scheduled_volume['host'], 'host_9')
+
+ def test_vsa_sched_create_volumes_simple(self):
+ global scheduled_volumes
+ scheduled_volumes = []
+ self._set_service_states(host_num=10,
+ drive_type_start_ix=0,
+ drive_type_num=5,
+ init_num_drives=10,
+ exclude_host_list=['host_1', 'host_3'])
+ prev = self._generate_default_service_states()
+ request_spec = self._get_vol_creation_request(num_vols=3, drive_ix=2)
+
+ self._print_service_states()
+
+ self.sched.schedule_create_volumes(self.context,
+ request_spec,
+ availability_zone=None)
+
+ self.assertEqual(len(scheduled_volumes), 3)
+ self.assertEqual(scheduled_volumes[0]['vol']['host'], 'host_9')
+ self.assertEqual(scheduled_volumes[1]['vol']['host'], 'host_8')
+ self.assertEqual(scheduled_volumes[2]['vol']['host'], 'host_7')
+
+ cur = self._get_service_states()
+ for host in ['host_9', 'host_8', 'host_7']:
+ cur_dtype = cur[host]['volume']['drive_qos_info']['name_2']
+ prev_dtype = prev[host]['volume']['drive_qos_info']['name_2']
+ self.assertEqual(cur_dtype['DriveType'], prev_dtype['DriveType'])
+ self.assertEqual(cur_dtype['FullDrive']['NumFreeDrives'],
+ prev_dtype['FullDrive']['NumFreeDrives'] - 1)
+ self.assertEqual(cur_dtype['FullDrive']['NumOccupiedDrives'],
+ prev_dtype['FullDrive']['NumOccupiedDrives'] + 1)
+
+ def test_vsa_sched_create_volumes_partition(self):
+ global scheduled_volumes
+ scheduled_volumes = []
+ self._set_service_states(host_num=5,
+ drive_type_start_ix=0,
+ drive_type_num=5,
+ init_num_drives=1,
+ exclude_host_list=['host_0', 'host_2'])
+ prev = self._generate_default_service_states()
+ request_spec = self._get_vol_creation_request(num_vols=3,
+ drive_ix=3,
+ size=50)
+ self.sched.schedule_create_volumes(self.context,
+ request_spec,
+ availability_zone=None)
+
+ self.assertEqual(len(scheduled_volumes), 3)
+ self.assertEqual(scheduled_volumes[0]['vol']['host'], 'host_4')
+ self.assertEqual(scheduled_volumes[1]['vol']['host'], 'host_3')
+ self.assertEqual(scheduled_volumes[2]['vol']['host'], 'host_1')
+
+ cur = self._get_service_states()
+ for host in ['host_1', 'host_3', 'host_4']:
+ cur_dtype = cur[host]['volume']['drive_qos_info']['name_3']
+ prev_dtype = prev[host]['volume']['drive_qos_info']['name_3']
+
+ self.assertEqual(cur_dtype['DriveType'], prev_dtype['DriveType'])
+ self.assertEqual(cur_dtype['FullDrive']['NumFreeDrives'],
+ prev_dtype['FullDrive']['NumFreeDrives'] - 1)
+ self.assertEqual(cur_dtype['FullDrive']['NumOccupiedDrives'],
+ prev_dtype['FullDrive']['NumOccupiedDrives'] + 1)
+
+ self.assertEqual(prev_dtype['PartitionDrive']
+ ['NumOccupiedPartitions'], 0)
+ self.assertEqual(cur_dtype['PartitionDrive']
+ ['NumOccupiedPartitions'], 1)
+ self.assertEqual(cur_dtype['PartitionDrive']
+ ['NumFreePartitions'], 5)
+ self.assertEqual(prev_dtype['PartitionDrive']
+ ['NumFreePartitions'], 0)
+ self.assertEqual(prev_dtype['PartitionDrive']
+ ['PartitionSize'], 0)
diff --git a/nova/tests/test_vsa.py b/nova/tests/test_vsa.py
index 8e4d58960..cff23a800 100644
--- a/nova/tests/test_vsa.py
+++ b/nova/tests/test_vsa.py
@@ -22,6 +22,7 @@ from xml.etree.ElementTree import Element, SubElement
from nova import exception
from nova import flags
from nova import vsa
+from nova import volume
from nova import db
from nova import context
from nova import test
@@ -50,6 +51,7 @@ class VsaTestCase(test.TestCase):
super(VsaTestCase, self).setUp()
self.stubs = stubout.StubOutForTesting()
self.vsa_api = vsa.API()
+ self.volume_api = volume.API()
self.context_non_admin = context.RequestContext(None, None)
self.context = context.get_admin_context()
diff --git a/nova/tests/test_vsa_volumes.py b/nova/tests/test_vsa_volumes.py
index e1d4cd756..d451a4377 100644
--- a/nova/tests/test_vsa_volumes.py
+++ b/nova/tests/test_vsa_volumes.py
@@ -61,7 +61,8 @@ class VsaVolumesTestCase(test.TestCase):
self.vsa_id = vsa_ref['id']
def tearDown(self):
- self.vsa_api.delete(self.context, self.vsa_id)
+ if self.vsa_id:
+ self.vsa_api.delete(self.context, self.vsa_id)
self.stubs.UnsetAll()
super(VsaVolumesTestCase, self).tearDown()
@@ -106,3 +107,23 @@ class VsaVolumesTestCase(test.TestCase):
self.volume_api.update(self.context,
volume_ref['id'], {'status': 'error'})
self.volume_api.delete(self.context, volume_ref['id'])
+
+ def test_vsa_volume_delete_vsa_with_volumes(self):
+ """ Check volume deleton in different states. """
+
+ vols1 = self.volume_api.get_all_by_vsa(self.context,
+ self.vsa_id, "from")
+ for i in range(3):
+ volume_param = _default_volume_param()
+ volume_param['from_vsa_id'] = self.vsa_id
+ volume_ref = self.volume_api.create(self.context, **volume_param)
+
+ vols2 = self.volume_api.get_all_by_vsa(self.context,
+ self.vsa_id, "from")
+ self.assertEqual(len(vols1) + 3, len(vols2))
+
+ self.vsa_api.delete(self.context, self.vsa_id)
+
+ vols3 = self.volume_api.get_all_by_vsa(self.context,
+ self.vsa_id, "from")
+ self.assertEqual(len(vols1), len(vols3))