diff options
| author | Chris Behrens <cbehrens@codestud.com> | 2011-08-29 08:07:34 -0700 |
|---|---|---|
| committer | Chris Behrens <cbehrens@codestud.com> | 2011-08-29 08:07:34 -0700 |
| commit | c0775bfd7d6f2b21b95e459e4c78c5ed9b445efe (patch) | |
| tree | abaf7375f24384def97c4f19dbc655a9cb1895c6 /nova/tests | |
| parent | 25cd526a72a98f184ed57fc85e7be2997305ce31 (diff) | |
| parent | e103b1c471d1f507075542b795d8620a89132de9 (diff) | |
merged trunk
Diffstat (limited to 'nova/tests')
| -rw-r--r-- | nova/tests/api/openstack/contrib/test_vsa.py | 450 | ||||
| -rw-r--r-- | nova/tests/api/openstack/test_extensions.py | 1 | ||||
| -rw-r--r-- | nova/tests/scheduler/test_vsa_scheduler.py | 641 | ||||
| -rw-r--r-- | nova/tests/test_ipv6.py | 2 | ||||
| -rw-r--r-- | nova/tests/test_vsa.py | 182 | ||||
| -rw-r--r-- | nova/tests/test_vsa_volumes.py | 136 |
6 files changed, 1411 insertions, 1 deletions
diff --git a/nova/tests/api/openstack/contrib/test_vsa.py b/nova/tests/api/openstack/contrib/test_vsa.py new file mode 100644 index 000000000..311b6cb8d --- /dev/null +++ b/nova/tests/api/openstack/contrib/test_vsa.py @@ -0,0 +1,450 @@ +# Copyright 2011 OpenStack LLC. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import json +import stubout +import unittest +import webob + +from nova import context +from nova import db +from nova import exception +from nova import flags +from nova import log as logging +from nova import test +from nova import volume +from nova import vsa +from nova.api import openstack +from nova.tests.api.openstack import fakes +import nova.wsgi + +from nova.api.openstack.contrib.virtual_storage_arrays import _vsa_view + +FLAGS = flags.FLAGS + +LOG = logging.getLogger('nova.tests.api.openstack.vsa') + +last_param = {} + + +def _get_default_vsa_param(): + return { + 'display_name': 'Test_VSA_name', + 'display_description': 'Test_VSA_description', + 'vc_count': 1, + 'instance_type': 'm1.small', + 'instance_type_id': 5, + 'image_name': None, + 'availability_zone': None, + 'storage': [], + 'shared': False + } + + +def stub_vsa_create(self, context, **param): + global last_param + LOG.debug(_("_create: param=%s"), param) + param['id'] = 123 + param['name'] = 'Test name' + param['instance_type_id'] = 5 + last_param = param + return param + + +def stub_vsa_delete(self, context, vsa_id): + global last_param + last_param = dict(vsa_id=vsa_id) + + LOG.debug(_("_delete: %s"), locals()) + if vsa_id != '123': + raise exception.NotFound + + +def stub_vsa_get(self, context, vsa_id): + global last_param + last_param = dict(vsa_id=vsa_id) + + LOG.debug(_("_get: %s"), locals()) + if vsa_id != '123': + raise exception.NotFound + + param = _get_default_vsa_param() + param['id'] = vsa_id + return param + + +def stub_vsa_get_all(self, context): + LOG.debug(_("_get_all: %s"), locals()) + param = _get_default_vsa_param() + param['id'] = 123 + return [param] + + +class VSAApiTest(test.TestCase): + def setUp(self): + super(VSAApiTest, self).setUp() + self.stubs = stubout.StubOutForTesting() + fakes.FakeAuthManager.reset_fake_data() + fakes.FakeAuthDatabase.data = {} + fakes.stub_out_networking(self.stubs) + fakes.stub_out_rate_limiting(self.stubs) + fakes.stub_out_auth(self.stubs) + self.stubs.Set(vsa.api.API, "create", stub_vsa_create) + self.stubs.Set(vsa.api.API, "delete", stub_vsa_delete) + self.stubs.Set(vsa.api.API, "get", stub_vsa_get) + self.stubs.Set(vsa.api.API, "get_all", stub_vsa_get_all) + + self.context = context.get_admin_context() + + def tearDown(self): + self.stubs.UnsetAll() + super(VSAApiTest, self).tearDown() + + def test_vsa_create(self): + global last_param + last_param = {} + + vsa = {"displayName": "VSA Test Name", + "displayDescription": "VSA Test Desc"} + body = dict(vsa=vsa) + req = webob.Request.blank('/v1.1/777/zadr-vsa') + req.method = 'POST' + req.body = json.dumps(body) + req.headers['content-type'] = 'application/json' + + resp = req.get_response(fakes.wsgi_app()) + self.assertEqual(resp.status_int, 200) + + # Compare if parameters were correctly passed to stub + self.assertEqual(last_param['display_name'], "VSA Test Name") + self.assertEqual(last_param['display_description'], "VSA Test Desc") + + resp_dict = json.loads(resp.body) + self.assertTrue('vsa' in resp_dict) + self.assertEqual(resp_dict['vsa']['displayName'], vsa['displayName']) + self.assertEqual(resp_dict['vsa']['displayDescription'], + vsa['displayDescription']) + + def test_vsa_create_no_body(self): + req = webob.Request.blank('/v1.1/777/zadr-vsa') + req.method = 'POST' + req.body = json.dumps({}) + req.headers['content-type'] = 'application/json' + + resp = req.get_response(fakes.wsgi_app()) + self.assertEqual(resp.status_int, 422) + + def test_vsa_delete(self): + global last_param + last_param = {} + + vsa_id = 123 + req = webob.Request.blank('/v1.1/777/zadr-vsa/%d' % vsa_id) + req.method = 'DELETE' + + resp = req.get_response(fakes.wsgi_app()) + self.assertEqual(resp.status_int, 200) + self.assertEqual(str(last_param['vsa_id']), str(vsa_id)) + + def test_vsa_delete_invalid_id(self): + global last_param + last_param = {} + + vsa_id = 234 + req = webob.Request.blank('/v1.1/777/zadr-vsa/%d' % vsa_id) + req.method = 'DELETE' + + resp = req.get_response(fakes.wsgi_app()) + self.assertEqual(resp.status_int, 404) + self.assertEqual(str(last_param['vsa_id']), str(vsa_id)) + + def test_vsa_show(self): + global last_param + last_param = {} + + vsa_id = 123 + req = webob.Request.blank('/v1.1/777/zadr-vsa/%d' % vsa_id) + req.method = 'GET' + resp = req.get_response(fakes.wsgi_app()) + self.assertEqual(resp.status_int, 200) + self.assertEqual(str(last_param['vsa_id']), str(vsa_id)) + + resp_dict = json.loads(resp.body) + self.assertTrue('vsa' in resp_dict) + self.assertEqual(resp_dict['vsa']['id'], str(vsa_id)) + + def test_vsa_show_invalid_id(self): + global last_param + last_param = {} + + vsa_id = 234 + req = webob.Request.blank('/v1.1/777/zadr-vsa/%d' % vsa_id) + req.method = 'GET' + resp = req.get_response(fakes.wsgi_app()) + self.assertEqual(resp.status_int, 404) + self.assertEqual(str(last_param['vsa_id']), str(vsa_id)) + + def test_vsa_index(self): + req = webob.Request.blank('/v1.1/777/zadr-vsa') + req.method = 'GET' + resp = req.get_response(fakes.wsgi_app()) + self.assertEqual(resp.status_int, 200) + + resp_dict = json.loads(resp.body) + + self.assertTrue('vsaSet' in resp_dict) + resp_vsas = resp_dict['vsaSet'] + self.assertEqual(len(resp_vsas), 1) + + resp_vsa = resp_vsas.pop() + self.assertEqual(resp_vsa['id'], 123) + + def test_vsa_detail(self): + req = webob.Request.blank('/v1.1/777/zadr-vsa/detail') + req.method = 'GET' + resp = req.get_response(fakes.wsgi_app()) + self.assertEqual(resp.status_int, 200) + + resp_dict = json.loads(resp.body) + + self.assertTrue('vsaSet' in resp_dict) + resp_vsas = resp_dict['vsaSet'] + self.assertEqual(len(resp_vsas), 1) + + resp_vsa = resp_vsas.pop() + self.assertEqual(resp_vsa['id'], 123) + + +def _get_default_volume_param(): + return { + 'id': 123, + 'status': 'available', + 'size': 100, + 'availability_zone': 'nova', + 'created_at': None, + 'attach_status': 'detached', + 'name': 'vol name', + 'display_name': 'Default vol name', + 'display_description': 'Default vol description', + 'volume_type_id': 1, + 'volume_metadata': [], + } + + +def stub_get_vsa_volume_type(self, context): + return {'id': 1, + 'name': 'VSA volume type', + 'extra_specs': {'type': 'vsa_volume'}} + + +def stub_volume_create(self, context, size, snapshot_id, name, description, + **param): + LOG.debug(_("_create: param=%s"), size) + vol = _get_default_volume_param() + vol['size'] = size + vol['display_name'] = name + vol['display_description'] = description + return vol + + +def stub_volume_update(self, context, **param): + LOG.debug(_("_volume_update: param=%s"), param) + pass + + +def stub_volume_delete(self, context, **param): + LOG.debug(_("_volume_delete: param=%s"), param) + pass + + +def stub_volume_get(self, context, volume_id): + LOG.debug(_("_volume_get: volume_id=%s"), volume_id) + vol = _get_default_volume_param() + vol['id'] = volume_id + meta = {'key': 'from_vsa_id', 'value': '123'} + if volume_id == '345': + meta = {'key': 'to_vsa_id', 'value': '123'} + vol['volume_metadata'].append(meta) + return vol + + +def stub_volume_get_notfound(self, context, volume_id): + raise exception.NotFound + + +def stub_volume_get_all(self, context, search_opts): + vol = stub_volume_get(self, context, '123') + vol['metadata'] = search_opts['metadata'] + return [vol] + + +def return_vsa(context, vsa_id): + return {'id': vsa_id} + + +class VSAVolumeApiTest(test.TestCase): + + def setUp(self, test_obj=None, test_objs=None): + super(VSAVolumeApiTest, self).setUp() + self.stubs = stubout.StubOutForTesting() + fakes.FakeAuthManager.reset_fake_data() + fakes.FakeAuthDatabase.data = {} + fakes.stub_out_networking(self.stubs) + fakes.stub_out_rate_limiting(self.stubs) + fakes.stub_out_auth(self.stubs) + self.stubs.Set(nova.db.api, 'vsa_get', return_vsa) + self.stubs.Set(vsa.api.API, "get_vsa_volume_type", + stub_get_vsa_volume_type) + + self.stubs.Set(volume.api.API, "update", stub_volume_update) + self.stubs.Set(volume.api.API, "delete", stub_volume_delete) + self.stubs.Set(volume.api.API, "get", stub_volume_get) + self.stubs.Set(volume.api.API, "get_all", stub_volume_get_all) + + self.context = context.get_admin_context() + self.test_obj = test_obj if test_obj else "volume" + self.test_objs = test_objs if test_objs else "volumes" + + def tearDown(self): + self.stubs.UnsetAll() + super(VSAVolumeApiTest, self).tearDown() + + def test_vsa_volume_create(self): + self.stubs.Set(volume.api.API, "create", stub_volume_create) + + vol = {"size": 100, + "displayName": "VSA Volume Test Name", + "displayDescription": "VSA Volume Test Desc"} + body = {self.test_obj: vol} + req = webob.Request.blank('/v1.1/777/zadr-vsa/123/%s' % self.test_objs) + req.method = 'POST' + req.body = json.dumps(body) + req.headers['content-type'] = 'application/json' + resp = req.get_response(fakes.wsgi_app()) + + if self.test_obj == "volume": + self.assertEqual(resp.status_int, 200) + + resp_dict = json.loads(resp.body) + self.assertTrue(self.test_obj in resp_dict) + self.assertEqual(resp_dict[self.test_obj]['size'], + vol['size']) + self.assertEqual(resp_dict[self.test_obj]['displayName'], + vol['displayName']) + self.assertEqual(resp_dict[self.test_obj]['displayDescription'], + vol['displayDescription']) + else: + self.assertEqual(resp.status_int, 400) + + def test_vsa_volume_create_no_body(self): + req = webob.Request.blank('/v1.1/777/zadr-vsa/123/%s' % self.test_objs) + req.method = 'POST' + req.body = json.dumps({}) + req.headers['content-type'] = 'application/json' + + resp = req.get_response(fakes.wsgi_app()) + if self.test_obj == "volume": + self.assertEqual(resp.status_int, 422) + else: + self.assertEqual(resp.status_int, 400) + + def test_vsa_volume_index(self): + req = webob.Request.blank('/v1.1/777/zadr-vsa/123/%s' % self.test_objs) + resp = req.get_response(fakes.wsgi_app()) + self.assertEqual(resp.status_int, 200) + + def test_vsa_volume_detail(self): + req = webob.Request.blank('/v1.1/777/zadr-vsa/123/%s/detail' % \ + self.test_objs) + resp = req.get_response(fakes.wsgi_app()) + self.assertEqual(resp.status_int, 200) + + def test_vsa_volume_show(self): + obj_num = 234 if self.test_objs == "volumes" else 345 + req = webob.Request.blank('/v1.1/777/zadr-vsa/123/%s/%s' % \ + (self.test_objs, obj_num)) + resp = req.get_response(fakes.wsgi_app()) + self.assertEqual(resp.status_int, 200) + + def test_vsa_volume_show_no_vsa_assignment(self): + req = webob.Request.blank('/v1.1/777/zadr-vsa/4/%s/333' % \ + (self.test_objs)) + resp = req.get_response(fakes.wsgi_app()) + self.assertEqual(resp.status_int, 400) + + def test_vsa_volume_show_no_volume(self): + self.stubs.Set(volume.api.API, "get", stub_volume_get_notfound) + + req = webob.Request.blank('/v1.1/777/zadr-vsa/123/%s/333' % \ + (self.test_objs)) + resp = req.get_response(fakes.wsgi_app()) + self.assertEqual(resp.status_int, 404) + + def test_vsa_volume_update(self): + obj_num = 234 if self.test_objs == "volumes" else 345 + update = {"status": "available", + "displayName": "Test Display name"} + body = {self.test_obj: update} + req = webob.Request.blank('/v1.1/777/zadr-vsa/123/%s/%s' % \ + (self.test_objs, obj_num)) + req.method = 'PUT' + req.body = json.dumps(body) + req.headers['content-type'] = 'application/json' + + resp = req.get_response(fakes.wsgi_app()) + if self.test_obj == "volume": + self.assertEqual(resp.status_int, 202) + else: + self.assertEqual(resp.status_int, 400) + + def test_vsa_volume_delete(self): + obj_num = 234 if self.test_objs == "volumes" else 345 + req = webob.Request.blank('/v1.1/777/zadr-vsa/123/%s/%s' % \ + (self.test_objs, obj_num)) + req.method = 'DELETE' + resp = req.get_response(fakes.wsgi_app()) + if self.test_obj == "volume": + self.assertEqual(resp.status_int, 202) + else: + self.assertEqual(resp.status_int, 400) + + def test_vsa_volume_delete_no_vsa_assignment(self): + req = webob.Request.blank('/v1.1/777/zadr-vsa/4/%s/333' % \ + (self.test_objs)) + req.method = 'DELETE' + resp = req.get_response(fakes.wsgi_app()) + self.assertEqual(resp.status_int, 400) + + def test_vsa_volume_delete_no_volume(self): + self.stubs.Set(volume.api.API, "get", stub_volume_get_notfound) + + req = webob.Request.blank('/v1.1/777/zadr-vsa/123/%s/333' % \ + (self.test_objs)) + req.method = 'DELETE' + resp = req.get_response(fakes.wsgi_app()) + if self.test_obj == "volume": + self.assertEqual(resp.status_int, 404) + else: + self.assertEqual(resp.status_int, 400) + + +class VSADriveApiTest(VSAVolumeApiTest): + def setUp(self): + super(VSADriveApiTest, self).setUp(test_obj="drive", + test_objs="drives") + + def tearDown(self): + self.stubs.UnsetAll() + super(VSADriveApiTest, self).tearDown() diff --git a/nova/tests/api/openstack/test_extensions.py b/nova/tests/api/openstack/test_extensions.py index c78588d65..05267d8fb 100644 --- a/nova/tests/api/openstack/test_extensions.py +++ b/nova/tests/api/openstack/test_extensions.py @@ -95,6 +95,7 @@ class ExtensionControllerTest(test.TestCase): "Quotas", "Rescue", "SecurityGroups", + "VSAs", "VirtualInterfaces", "Volumes", "VolumeTypes", diff --git a/nova/tests/scheduler/test_vsa_scheduler.py b/nova/tests/scheduler/test_vsa_scheduler.py new file mode 100644 index 000000000..37964f00d --- /dev/null +++ b/nova/tests/scheduler/test_vsa_scheduler.py @@ -0,0 +1,641 @@ +# Copyright 2011 OpenStack LLC. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import stubout + +import nova + +from nova import context +from nova import db +from nova import exception +from nova import flags +from nova import log as logging +from nova import test +from nova import utils +from nova.volume import volume_types + +from nova.scheduler import vsa as vsa_sched +from nova.scheduler import driver + +FLAGS = flags.FLAGS +LOG = logging.getLogger('nova.tests.scheduler.vsa') + +scheduled_volumes = [] +scheduled_volume = {} +global_volume = {} + + +class FakeVsaLeastUsedScheduler( + vsa_sched.VsaSchedulerLeastUsedHost): + # No need to stub anything at the moment + pass + + +class FakeVsaMostAvailCapacityScheduler( + vsa_sched.VsaSchedulerMostAvailCapacity): + # No need to stub anything at the moment + pass + + +class VsaSchedulerTestCase(test.TestCase): + + def _get_vol_creation_request(self, num_vols, drive_ix, size=0): + volume_params = [] + for i in range(num_vols): + + name = 'name_' + str(i) + try: + volume_types.create(self.context, name, + extra_specs={'type': 'vsa_drive', + 'drive_name': name, + 'drive_type': 'type_' + str(drive_ix), + 'drive_size': 1 + 100 * (drive_ix)}) + self.created_types_lst.append(name) + except exception.ApiError: + # type is already created + pass + + volume_type = volume_types.get_volume_type_by_name(self.context, + name) + volume = {'size': size, + 'snapshot_id': None, + 'name': 'vol_' + str(i), + 'description': None, + 'volume_type_id': volume_type['id']} + volume_params.append(volume) + + return {'num_volumes': len(volume_params), + 'vsa_id': 123, + 'volumes': volume_params} + + def _generate_default_service_states(self): + service_states = {} + for i in range(self.host_num): + host = {} + hostname = 'host_' + str(i) + if hostname in self.exclude_host_list: + continue + + host['volume'] = {'timestamp': utils.utcnow(), + 'drive_qos_info': {}} + + for j in range(self.drive_type_start_ix, + self.drive_type_start_ix + self.drive_type_num): + dtype = {} + dtype['Name'] = 'name_' + str(j) + dtype['DriveType'] = 'type_' + str(j) + dtype['TotalDrives'] = 2 * (self.init_num_drives + i) + dtype['DriveCapacity'] = vsa_sched.GB_TO_BYTES(1 + 100 * j) + dtype['TotalCapacity'] = dtype['TotalDrives'] * \ + dtype['DriveCapacity'] + dtype['AvailableCapacity'] = (dtype['TotalDrives'] - i) * \ + dtype['DriveCapacity'] + dtype['DriveRpm'] = 7200 + dtype['DifCapable'] = 0 + dtype['SedCapable'] = 0 + dtype['PartitionDrive'] = { + 'PartitionSize': 0, + 'NumOccupiedPartitions': 0, + 'NumFreePartitions': 0} + dtype['FullDrive'] = { + 'NumFreeDrives': dtype['TotalDrives'] - i, + 'NumOccupiedDrives': i} + host['volume']['drive_qos_info'][dtype['Name']] = dtype + + service_states[hostname] = host + + return service_states + + def _print_service_states(self): + for host, host_val in self.service_states.iteritems(): + LOG.info(_("Host %s"), host) + total_used = 0 + total_available = 0 + qos = host_val['volume']['drive_qos_info'] + + for k, d in qos.iteritems(): + LOG.info("\t%s: type %s: drives (used %2d, total %2d) "\ + "size %3d, total %4d, used %4d, avail %d", + k, d['DriveType'], + d['FullDrive']['NumOccupiedDrives'], d['TotalDrives'], + vsa_sched.BYTES_TO_GB(d['DriveCapacity']), + vsa_sched.BYTES_TO_GB(d['TotalCapacity']), + vsa_sched.BYTES_TO_GB(d['TotalCapacity'] - \ + d['AvailableCapacity']), + vsa_sched.BYTES_TO_GB(d['AvailableCapacity'])) + + total_used += vsa_sched.BYTES_TO_GB(d['TotalCapacity'] - \ + d['AvailableCapacity']) + total_available += vsa_sched.BYTES_TO_GB( + d['AvailableCapacity']) + LOG.info("Host %s: used %d, avail %d", + host, total_used, total_available) + + def _set_service_states(self, host_num, + drive_type_start_ix, drive_type_num, + init_num_drives=10, + exclude_host_list=[]): + self.host_num = host_num + self.drive_type_start_ix = drive_type_start_ix + self.drive_type_num = drive_type_num + self.exclude_host_list = exclude_host_list + self.init_num_drives = init_num_drives + self.service_states = self._generate_default_service_states() + + def _get_service_states(self): + return self.service_states + + def _fake_get_service_states(self): + return self._get_service_states() + + def _fake_provision_volume(self, context, vol, vsa_id, availability_zone): + global scheduled_volumes + scheduled_volumes.append(dict(vol=vol, + vsa_id=vsa_id, + az=availability_zone)) + name = vol['name'] + host = vol['host'] + LOG.debug(_("Test: provision vol %(name)s on host %(host)s"), + locals()) + LOG.debug(_("\t vol=%(vol)s"), locals()) + pass + + def _fake_vsa_update(self, context, vsa_id, values): + LOG.debug(_("Test: VSA update request: vsa_id=%(vsa_id)s "\ + "values=%(values)s"), locals()) + pass + + def _fake_volume_create(self, context, options): + LOG.debug(_("Test: Volume create: %s"), options) + options['id'] = 123 + global global_volume + global_volume = options + return options + + def _fake_volume_get(self, context, volume_id): + LOG.debug(_("Test: Volume get request: id=%(volume_id)s"), locals()) + global global_volume + global_volume['id'] = volume_id + global_volume['availability_zone'] = None + return global_volume + + def _fake_volume_update(self, context, volume_id, values): + LOG.debug(_("Test: Volume update request: id=%(volume_id)s "\ + "values=%(values)s"), locals()) + global scheduled_volume + scheduled_volume = {'id': volume_id, 'host': values['host']} + pass + + def _fake_service_get_by_args(self, context, host, binary): + return "service" + + def _fake_service_is_up_True(self, service): + return True + + def _fake_service_is_up_False(self, service): + return False + + def setUp(self, sched_class=None): + super(VsaSchedulerTestCase, self).setUp() + self.stubs = stubout.StubOutForTesting() + self.context = context.get_admin_context() + + if sched_class is None: + self.sched = FakeVsaLeastUsedScheduler() + else: + self.sched = sched_class + + self.host_num = 10 + self.drive_type_num = 5 + + self.stubs.Set(self.sched, + '_get_service_states', self._fake_get_service_states) + self.stubs.Set(self.sched, + '_provision_volume', self._fake_provision_volume) + self.stubs.Set(nova.db, 'vsa_update', self._fake_vsa_update) + + self.stubs.Set(nova.db, 'volume_get', self._fake_volume_get) + self.stubs.Set(nova.db, 'volume_update', self._fake_volume_update) + + self.created_types_lst = [] + + def tearDown(self): + for name in self.created_types_lst: + volume_types.purge(self.context, name) + + self.stubs.UnsetAll() + super(VsaSchedulerTestCase, self).tearDown() + + def test_vsa_sched_create_volumes_simple(self): + global scheduled_volumes + scheduled_volumes = [] + self._set_service_states(host_num=10, + drive_type_start_ix=0, + drive_type_num=5, + init_num_drives=10, + exclude_host_list=['host_1', 'host_3']) + prev = self._generate_default_service_states() + request_spec = self._get_vol_creation_request(num_vols=3, drive_ix=2) + + self.sched.schedule_create_volumes(self.context, + request_spec, + availability_zone=None) + + self.assertEqual(len(scheduled_volumes), 3) + self.assertEqual(scheduled_volumes[0]['vol']['host'], 'host_0') + self.assertEqual(scheduled_volumes[1]['vol']['host'], 'host_2') + self.assertEqual(scheduled_volumes[2]['vol']['host'], 'host_4') + + cur = self._get_service_states() + for host in ['host_0', 'host_2', 'host_4']: + cur_dtype = cur[host]['volume']['drive_qos_info']['name_2'] + prev_dtype = prev[host]['volume']['drive_qos_info']['name_2'] + self.assertEqual(cur_dtype['DriveType'], prev_dtype['DriveType']) + self.assertEqual(cur_dtype['FullDrive']['NumFreeDrives'], + prev_dtype['FullDrive']['NumFreeDrives'] - 1) + self.assertEqual(cur_dtype['FullDrive']['NumOccupiedDrives'], + prev_dtype['FullDrive']['NumOccupiedDrives'] + 1) + + def test_vsa_sched_no_drive_type(self): + self._set_service_states(host_num=10, + drive_type_start_ix=0, + drive_type_num=5, + init_num_drives=1) + request_spec = self._get_vol_creation_request(num_vols=1, drive_ix=6) + self.assertRaises(driver.WillNotSchedule, + self.sched.schedule_create_volumes, + self.context, + request_spec, + availability_zone=None) + + def test_vsa_sched_no_enough_drives(self): + global scheduled_volumes + scheduled_volumes = [] + + self._set_service_states(host_num=3, + drive_type_start_ix=0, + drive_type_num=1, + init_num_drives=0) + prev = self._generate_default_service_states() + request_spec = self._get_vol_creation_request(num_vols=3, drive_ix=0) + + self.assertRaises(driver.WillNotSchedule, + self.sched.schedule_create_volumes, + self.context, + request_spec, + availability_zone=None) + + # check that everything was returned back + cur = self._get_service_states() + for k, v in prev.iteritems(): + self.assertEqual(prev[k]['volume']['drive_qos_info'], + cur[k]['volume']['drive_qos_info']) + + def test_vsa_sched_wrong_topic(self): + self._set_service_states(host_num=1, + drive_type_start_ix=0, + drive_type_num=5, + init_num_drives=1) + states = self._get_service_states() + new_states = {} + new_states['host_0'] = {'compute': states['host_0']['volume']} + self.service_states = new_states + request_spec = self._get_vol_creation_request(num_vols=1, drive_ix=0) + + self.assertRaises(driver.WillNotSchedule, + self.sched.schedule_create_volumes, + self.context, + request_spec, + availability_zone=None) + + def test_vsa_sched_provision_volume(self): + global global_volume + global_volume = {} + self._set_service_states(host_num=1, + drive_type_start_ix=0, + drive_type_num=1, + init_num_drives=1) + request_spec = self._get_vol_creation_request(num_vols=1, drive_ix=0) + + self.stubs.UnsetAll() + self.stubs.Set(self.sched, + '_get_service_states', self._fake_get_service_states) + self.stubs.Set(nova.db, 'volume_create', self._fake_volume_create) + + self.sched.schedule_create_volumes(self.context, + request_spec, + availability_zone=None) + + self.assertEqual(request_spec['volumes'][0]['name'], + global_volume['display_name']) + + def test_vsa_sched_no_free_drives(self): + self._set_service_states(host_num=1, + drive_type_start_ix=0, + drive_type_num=1, + init_num_drives=1) + request_spec = self._get_vol_creation_request(num_vols=1, drive_ix=0) + + self.sched.schedule_create_volumes(self.context, + request_spec, + availability_zone=None) + + cur = self._get_service_states() + cur_dtype = cur['host_0']['volume']['drive_qos_info']['name_0'] + self.assertEqual(cur_dtype['FullDrive']['NumFreeDrives'], 1) + + new_request = self._get_vol_creation_request(num_vols=1, drive_ix=0) + + self.sched.schedule_create_volumes(self.context, + request_spec, + availability_zone=None) + self._print_service_states() + + self.assertRaises(driver.WillNotSchedule, + self.sched.schedule_create_volumes, + self.context, + new_request, + availability_zone=None) + + def test_vsa_sched_forced_host(self): + global scheduled_volumes + scheduled_volumes = [] + + self._set_service_states(host_num=10, + drive_type_start_ix=0, + drive_type_num=5, + init_num_drives=10) + + request_spec = self._get_vol_creation_request(num_vols=3, drive_ix=2) + + self.assertRaises(exception.HostBinaryNotFound, + self.sched.schedule_create_volumes, + self.context, + request_spec, + availability_zone="nova:host_5") + + self.stubs.Set(nova.db, + 'service_get_by_args', self._fake_service_get_by_args) + self.stubs.Set(self.sched, + 'service_is_up', self._fake_service_is_up_False) + + self.assertRaises(driver.WillNotSchedule, + self.sched.schedule_create_volumes, + self.context, + request_spec, + availability_zone="nova:host_5") + + self.stubs.Set(self.sched, + 'service_is_up', self._fake_service_is_up_True) + + self.sched.schedule_create_volumes(self.context, + request_spec, + availability_zone="nova:host_5") + + self.assertEqual(len(scheduled_volumes), 3) + self.assertEqual(scheduled_volumes[0]['vol']['host'], 'host_5') + self.assertEqual(scheduled_volumes[1]['vol']['host'], 'host_5') + self.assertEqual(scheduled_volumes[2]['vol']['host'], 'host_5') + + def test_vsa_sched_create_volumes_partition(self): + global scheduled_volumes + scheduled_volumes = [] + self._set_service_states(host_num=5, + drive_type_start_ix=0, + drive_type_num=5, + init_num_drives=1, + exclude_host_list=['host_0', 'host_2']) + prev = self._generate_default_service_states() + request_spec = self._get_vol_creation_request(num_vols=3, + drive_ix=3, + size=50) + self.sched.schedule_create_volumes(self.context, + request_spec, + availability_zone=None) + + self.assertEqual(len(scheduled_volumes), 3) + self.assertEqual(scheduled_volumes[0]['vol']['host'], 'host_1') + self.assertEqual(scheduled_volumes[1]['vol']['host'], 'host_3') + self.assertEqual(scheduled_volumes[2]['vol']['host'], 'host_4') + + cur = self._get_service_states() + for host in ['host_1', 'host_3', 'host_4']: + cur_dtype = cur[host]['volume']['drive_qos_info']['name_3'] + prev_dtype = prev[host]['volume']['drive_qos_info']['name_3'] + + self.assertEqual(cur_dtype['DriveType'], prev_dtype['DriveType']) + self.assertEqual(cur_dtype['FullDrive']['NumFreeDrives'], + prev_dtype['FullDrive']['NumFreeDrives'] - 1) + self.assertEqual(cur_dtype['FullDrive']['NumOccupiedDrives'], + prev_dtype['FullDrive']['NumOccupiedDrives'] + 1) + + self.assertEqual(prev_dtype['PartitionDrive'] + ['NumOccupiedPartitions'], 0) + self.assertEqual(cur_dtype['PartitionDrive'] + ['NumOccupiedPartitions'], 1) + self.assertEqual(cur_dtype['PartitionDrive'] + ['NumFreePartitions'], 5) + + self.assertEqual(prev_dtype['PartitionDrive'] + ['NumFreePartitions'], 0) + self.assertEqual(prev_dtype['PartitionDrive'] + ['PartitionSize'], 0) + + def test_vsa_sched_create_single_volume_az(self): + global scheduled_volume + scheduled_volume = {} + + def _fake_volume_get_az(context, volume_id): + LOG.debug(_("Test: Volume get: id=%(volume_id)s"), locals()) + return {'id': volume_id, 'availability_zone': 'nova:host_3'} + + self.stubs.Set(nova.db, 'volume_get', _fake_volume_get_az) + self.stubs.Set(nova.db, + 'service_get_by_args', self._fake_service_get_by_args) + self.stubs.Set(self.sched, + 'service_is_up', self._fake_service_is_up_True) + + host = self.sched.schedule_create_volume(self.context, + 123, availability_zone=None) + + self.assertEqual(host, 'host_3') + self.assertEqual(scheduled_volume['id'], 123) + self.assertEqual(scheduled_volume['host'], 'host_3') + + def test_vsa_sched_create_single_non_vsa_volume(self): + global scheduled_volume + scheduled_volume = {} + + global global_volume + global_volume = {} + global_volume['volume_type_id'] = None + + self.assertRaises(driver.NoValidHost, + self.sched.schedule_create_volume, + self.context, + 123, + availability_zone=None) + + def test_vsa_sched_create_single_volume(self): + global scheduled_volume + scheduled_volume = {} + self._set_service_states(host_num=10, + drive_type_start_ix=0, + drive_type_num=5, + init_num_drives=10, + exclude_host_list=['host_0', 'host_1']) + prev = self._generate_default_service_states() + + global global_volume + global_volume = {} + + drive_ix = 2 + name = 'name_' + str(drive_ix) + volume_types.create(self.context, name, + extra_specs={'type': 'vsa_drive', + 'drive_name': name, + 'drive_type': 'type_' + str(drive_ix), + 'drive_size': 1 + 100 * (drive_ix)}) + self.created_types_lst.append(name) + volume_type = volume_types.get_volume_type_by_name(self.context, name) + + global_volume['volume_type_id'] = volume_type['id'] + global_volume['size'] = 0 + + host = self.sched.schedule_create_volume(self.context, + 123, availability_zone=None) + + self.assertEqual(host, 'host_2') + self.assertEqual(scheduled_volume['id'], 123) + self.assertEqual(scheduled_volume['host'], 'host_2') + + +class VsaSchedulerTestCaseMostAvail(VsaSchedulerTestCase): + + def setUp(self): + super(VsaSchedulerTestCaseMostAvail, self).setUp( + FakeVsaMostAvailCapacityScheduler()) + + def tearDown(self): + self.stubs.UnsetAll() + super(VsaSchedulerTestCaseMostAvail, self).tearDown() + + def test_vsa_sched_create_single_volume(self): + global scheduled_volume + scheduled_volume = {} + self._set_service_states(host_num=10, + drive_type_start_ix=0, + drive_type_num=5, + init_num_drives=10, + exclude_host_list=['host_0', 'host_1']) + prev = self._generate_default_service_states() + + global global_volume + global_volume = {} + + drive_ix = 2 + name = 'name_' + str(drive_ix) + volume_types.create(self.context, name, + extra_specs={'type': 'vsa_drive', + 'drive_name': name, + 'drive_type': 'type_' + str(drive_ix), + 'drive_size': 1 + 100 * (drive_ix)}) + self.created_types_lst.append(name) + volume_type = volume_types.get_volume_type_by_name(self.context, name) + + global_volume['volume_type_id'] = volume_type['id'] + global_volume['size'] = 0 + + host = self.sched.schedule_create_volume(self.context, + 123, availability_zone=None) + + self.assertEqual(host, 'host_9') + self.assertEqual(scheduled_volume['id'], 123) + self.assertEqual(scheduled_volume['host'], 'host_9') + + def test_vsa_sched_create_volumes_simple(self): + global scheduled_volumes + scheduled_volumes = [] + self._set_service_states(host_num=10, + drive_type_start_ix=0, + drive_type_num=5, + init_num_drives=10, + exclude_host_list=['host_1', 'host_3']) + prev = self._generate_default_service_states() + request_spec = self._get_vol_creation_request(num_vols=3, drive_ix=2) + + self._print_service_states() + + self.sched.schedule_create_volumes(self.context, + request_spec, + availability_zone=None) + + self.assertEqual(len(scheduled_volumes), 3) + self.assertEqual(scheduled_volumes[0]['vol']['host'], 'host_9') + self.assertEqual(scheduled_volumes[1]['vol']['host'], 'host_8') + self.assertEqual(scheduled_volumes[2]['vol']['host'], 'host_7') + + cur = self._get_service_states() + for host in ['host_9', 'host_8', 'host_7']: + cur_dtype = cur[host]['volume']['drive_qos_info']['name_2'] + prev_dtype = prev[host]['volume']['drive_qos_info']['name_2'] + self.assertEqual(cur_dtype['DriveType'], prev_dtype['DriveType']) + self.assertEqual(cur_dtype['FullDrive']['NumFreeDrives'], + prev_dtype['FullDrive']['NumFreeDrives'] - 1) + self.assertEqual(cur_dtype['FullDrive']['NumOccupiedDrives'], + prev_dtype['FullDrive']['NumOccupiedDrives'] + 1) + + def test_vsa_sched_create_volumes_partition(self): + global scheduled_volumes + scheduled_volumes = [] + self._set_service_states(host_num=5, + drive_type_start_ix=0, + drive_type_num=5, + init_num_drives=1, + exclude_host_list=['host_0', 'host_2']) + prev = self._generate_default_service_states() + request_spec = self._get_vol_creation_request(num_vols=3, + drive_ix=3, + size=50) + self.sched.schedule_create_volumes(self.context, + request_spec, + availability_zone=None) + + self.assertEqual(len(scheduled_volumes), 3) + self.assertEqual(scheduled_volumes[0]['vol']['host'], 'host_4') + self.assertEqual(scheduled_volumes[1]['vol']['host'], 'host_3') + self.assertEqual(scheduled_volumes[2]['vol']['host'], 'host_1') + + cur = self._get_service_states() + for host in ['host_1', 'host_3', 'host_4']: + cur_dtype = cur[host]['volume']['drive_qos_info']['name_3'] + prev_dtype = prev[host]['volume']['drive_qos_info']['name_3'] + + self.assertEqual(cur_dtype['DriveType'], prev_dtype['DriveType']) + self.assertEqual(cur_dtype['FullDrive']['NumFreeDrives'], + prev_dtype['FullDrive']['NumFreeDrives'] - 1) + self.assertEqual(cur_dtype['FullDrive']['NumOccupiedDrives'], + prev_dtype['FullDrive']['NumOccupiedDrives'] + 1) + + self.assertEqual(prev_dtype['PartitionDrive'] + ['NumOccupiedPartitions'], 0) + self.assertEqual(cur_dtype['PartitionDrive'] + ['NumOccupiedPartitions'], 1) + self.assertEqual(cur_dtype['PartitionDrive'] + ['NumFreePartitions'], 5) + self.assertEqual(prev_dtype['PartitionDrive'] + ['NumFreePartitions'], 0) + self.assertEqual(prev_dtype['PartitionDrive'] + ['PartitionSize'], 0) diff --git a/nova/tests/test_ipv6.py b/nova/tests/test_ipv6.py index 04c1b5598..e1ba4aafb 100644 --- a/nova/tests/test_ipv6.py +++ b/nova/tests/test_ipv6.py @@ -48,7 +48,7 @@ class IPv6RFC2462TestCase(test.TestCase): def test_to_global_with_bad_prefix(self): bad_prefix = '82' self.assertRaises(TypeError, ipv6.to_global, - bad_prefix, + bad_prefix, '2001:db8::216:3eff:fe33:4455', 'test') diff --git a/nova/tests/test_vsa.py b/nova/tests/test_vsa.py new file mode 100644 index 000000000..3d2d2de13 --- /dev/null +++ b/nova/tests/test_vsa.py @@ -0,0 +1,182 @@ +# Copyright 2011 OpenStack LLC. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import base64 +import stubout + +from xml.etree import ElementTree +from xml.etree.ElementTree import Element, SubElement + +from nova import context +from nova import db +from nova import exception +from nova import flags +from nova import log as logging +from nova import test +from nova import vsa +from nova import volume +from nova.volume import volume_types +from nova.vsa import utils as vsa_utils + +import nova.image.fake + +FLAGS = flags.FLAGS +LOG = logging.getLogger('nova.tests.vsa') + + +class VsaTestCase(test.TestCase): + + def setUp(self): + super(VsaTestCase, self).setUp() + self.stubs = stubout.StubOutForTesting() + self.vsa_api = vsa.API() + self.volume_api = volume.API() + + FLAGS.quota_volumes = 100 + FLAGS.quota_gigabytes = 10000 + + self.context = context.get_admin_context() + + volume_types.create(self.context, + 'SATA_500_7200', + extra_specs={'type': 'vsa_drive', + 'drive_name': 'SATA_500_7200', + 'drive_type': 'SATA', + 'drive_size': '500', + 'drive_rpm': '7200'}) + + def fake_show_by_name(meh, context, name): + if name == 'wrong_image_name': + LOG.debug(_("Test: Emulate wrong VSA name. Raise")) + raise exception.ImageNotFound + return {'id': 1, 'properties': {'kernel_id': 1, 'ramdisk_id': 1}} + + self.stubs.Set(nova.image.fake._FakeImageService, + 'show_by_name', + fake_show_by_name) + + def tearDown(self): + self.stubs.UnsetAll() + super(VsaTestCase, self).tearDown() + + def test_vsa_create_delete_defaults(self): + param = {'display_name': 'VSA name test'} + vsa_ref = self.vsa_api.create(self.context, **param) + self.assertEqual(vsa_ref['display_name'], param['display_name']) + self.vsa_api.delete(self.context, vsa_ref['id']) + + def test_vsa_create_delete_check_in_db(self): + vsa_list1 = self.vsa_api.get_all(self.context) + vsa_ref = self.vsa_api.create(self.context) + vsa_list2 = self.vsa_api.get_all(self.context) + self.assertEqual(len(vsa_list2), len(vsa_list1) + 1) + + self.vsa_api.delete(self.context, vsa_ref['id']) + vsa_list3 = self.vsa_api.get_all(self.context) + self.assertEqual(len(vsa_list3), len(vsa_list2) - 1) + + def test_vsa_create_delete_high_vc_count(self): + param = {'vc_count': FLAGS.max_vcs_in_vsa + 1} + vsa_ref = self.vsa_api.create(self.context, **param) + self.assertEqual(vsa_ref['vc_count'], FLAGS.max_vcs_in_vsa) + self.vsa_api.delete(self.context, vsa_ref['id']) + + def test_vsa_create_wrong_image_name(self): + param = {'image_name': 'wrong_image_name'} + self.assertRaises(exception.ApiError, + self.vsa_api.create, self.context, **param) + + def test_vsa_create_db_error(self): + + def fake_vsa_create(context, options): + LOG.debug(_("Test: Emulate DB error. Raise")) + raise exception.Error + + self.stubs.Set(nova.db.api, 'vsa_create', fake_vsa_create) + self.assertRaises(exception.ApiError, + self.vsa_api.create, self.context) + + def test_vsa_create_wrong_storage_params(self): + vsa_list1 = self.vsa_api.get_all(self.context) + param = {'storage': [{'stub': 1}]} + self.assertRaises(exception.ApiError, + self.vsa_api.create, self.context, **param) + vsa_list2 = self.vsa_api.get_all(self.context) + self.assertEqual(len(vsa_list2), len(vsa_list1)) + + param = {'storage': [{'drive_name': 'wrong name'}]} + self.assertRaises(exception.ApiError, + self.vsa_api.create, self.context, **param) + + def test_vsa_create_with_storage(self, multi_vol_creation=True): + """Test creation of VSA with BE storage""" + + FLAGS.vsa_multi_vol_creation = multi_vol_creation + + param = {'storage': [{'drive_name': 'SATA_500_7200', + 'num_drives': 3}]} + vsa_ref = self.vsa_api.create(self.context, **param) + self.assertEqual(vsa_ref['vol_count'], 3) + self.vsa_api.delete(self.context, vsa_ref['id']) + + param = {'storage': [{'drive_name': 'SATA_500_7200', + 'num_drives': 3}], + 'shared': True} + vsa_ref = self.vsa_api.create(self.context, **param) + self.assertEqual(vsa_ref['vol_count'], 15) + self.vsa_api.delete(self.context, vsa_ref['id']) + + def test_vsa_create_with_storage_single_volumes(self): + self.test_vsa_create_with_storage(multi_vol_creation=False) + + def test_vsa_update(self): + vsa_ref = self.vsa_api.create(self.context) + + param = {'vc_count': FLAGS.max_vcs_in_vsa + 1} + vsa_ref = self.vsa_api.update(self.context, vsa_ref['id'], **param) + self.assertEqual(vsa_ref['vc_count'], FLAGS.max_vcs_in_vsa) + + param = {'vc_count': 2} + vsa_ref = self.vsa_api.update(self.context, vsa_ref['id'], **param) + self.assertEqual(vsa_ref['vc_count'], 2) + + self.vsa_api.delete(self.context, vsa_ref['id']) + + def test_vsa_generate_user_data(self): + + FLAGS.vsa_multi_vol_creation = False + param = {'display_name': 'VSA name test', + 'display_description': 'VSA desc test', + 'vc_count': 2, + 'storage': [{'drive_name': 'SATA_500_7200', + 'num_drives': 3}]} + vsa_ref = self.vsa_api.create(self.context, **param) + volumes = self.vsa_api.get_all_vsa_drives(self.context, + vsa_ref['id']) + + user_data = vsa_utils.generate_user_data(vsa_ref, volumes) + user_data = base64.b64decode(user_data) + + LOG.debug(_("Test: user_data = %s"), user_data) + + elem = ElementTree.fromstring(user_data) + self.assertEqual(elem.findtext('name'), + param['display_name']) + self.assertEqual(elem.findtext('description'), + param['display_description']) + self.assertEqual(elem.findtext('vc_count'), + str(param['vc_count'])) + + self.vsa_api.delete(self.context, vsa_ref['id']) diff --git a/nova/tests/test_vsa_volumes.py b/nova/tests/test_vsa_volumes.py new file mode 100644 index 000000000..b7cd4e840 --- /dev/null +++ b/nova/tests/test_vsa_volumes.py @@ -0,0 +1,136 @@ +# Copyright 2011 OpenStack LLC. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import stubout + +from nova import exception +from nova import flags +from nova import vsa +from nova import volume +from nova import db +from nova import context +from nova import test +from nova import log as logging +import nova.image.fake + +FLAGS = flags.FLAGS +LOG = logging.getLogger('nova.tests.vsa.volumes') + + +class VsaVolumesTestCase(test.TestCase): + + def setUp(self): + super(VsaVolumesTestCase, self).setUp() + self.stubs = stubout.StubOutForTesting() + self.vsa_api = vsa.API() + self.volume_api = volume.API() + self.context = context.get_admin_context() + + self.default_vol_type = self.vsa_api.get_vsa_volume_type(self.context) + + def fake_show_by_name(meh, context, name): + return {'id': 1, 'properties': {'kernel_id': 1, 'ramdisk_id': 1}} + + self.stubs.Set(nova.image.fake._FakeImageService, + 'show_by_name', + fake_show_by_name) + + param = {'display_name': 'VSA name test'} + vsa_ref = self.vsa_api.create(self.context, **param) + self.vsa_id = vsa_ref['id'] + + def tearDown(self): + if self.vsa_id: + self.vsa_api.delete(self.context, self.vsa_id) + self.stubs.UnsetAll() + super(VsaVolumesTestCase, self).tearDown() + + def _default_volume_param(self): + return { + 'size': 1, + 'snapshot_id': None, + 'name': 'Test volume name', + 'description': 'Test volume desc name', + 'volume_type': self.default_vol_type, + 'metadata': {'from_vsa_id': self.vsa_id} + } + + def _get_all_volumes_by_vsa(self): + return self.volume_api.get_all(self.context, + search_opts={'metadata': {"from_vsa_id": str(self.vsa_id)}}) + + def test_vsa_volume_create_delete(self): + """ Check if volume properly created and deleted. """ + volume_param = self._default_volume_param() + volume_ref = self.volume_api.create(self.context, **volume_param) + + self.assertEqual(volume_ref['display_name'], + volume_param['name']) + self.assertEqual(volume_ref['display_description'], + volume_param['description']) + self.assertEqual(volume_ref['size'], + volume_param['size']) + self.assertEqual(volume_ref['status'], + 'creating') + + vols2 = self._get_all_volumes_by_vsa() + self.assertEqual(1, len(vols2)) + volume_ref = vols2[0] + + self.assertEqual(volume_ref['display_name'], + volume_param['name']) + self.assertEqual(volume_ref['display_description'], + volume_param['description']) + self.assertEqual(volume_ref['size'], + volume_param['size']) + self.assertEqual(volume_ref['status'], + 'creating') + + self.volume_api.update(self.context, + volume_ref['id'], {'status': 'available'}) + self.volume_api.delete(self.context, volume_ref['id']) + + vols3 = self._get_all_volumes_by_vsa() + self.assertEqual(1, len(vols2)) + volume_ref = vols3[0] + self.assertEqual(volume_ref['status'], + 'deleting') + + def test_vsa_volume_delete_nonavail_volume(self): + """ Check volume deleton in different states. """ + volume_param = self._default_volume_param() + volume_ref = self.volume_api.create(self.context, **volume_param) + + self.volume_api.update(self.context, + volume_ref['id'], {'status': 'in-use'}) + self.assertRaises(exception.ApiError, + self.volume_api.delete, + self.context, volume_ref['id']) + + def test_vsa_volume_delete_vsa_with_volumes(self): + """ Check volume deleton in different states. """ + + vols1 = self._get_all_volumes_by_vsa() + for i in range(3): + volume_param = self._default_volume_param() + volume_ref = self.volume_api.create(self.context, **volume_param) + + vols2 = self._get_all_volumes_by_vsa() + self.assertEqual(len(vols1) + 3, len(vols2)) + + self.vsa_api.delete(self.context, self.vsa_id) + + vols3 = self._get_all_volumes_by_vsa() + self.assertEqual(len(vols1), len(vols3)) |
