From 942169b3b21439c83822d25d213e515c97c85135 Mon Sep 17 00:00:00 2001 From: Rick Harris Date: Tue, 21 May 2013 00:16:44 +0000 Subject: xenapi: Moving tests under tests/virt/xenapi/ We have both a tests/xenapi/ and a tests/virt/xenapi/ directory which is confusing. This patch standardizes on tests/virt/xenapi/ which matches the nova/ directory layout. Change-Id: I324a4730e2477aa1863b17c7ea404ce2932a681c --- nova/tests/test_xenapi.py | 3477 ------------------------------ nova/tests/virt/xenapi/stubs.py | 353 +++ nova/tests/virt/xenapi/test_vm_utils.py | 375 +++- nova/tests/virt/xenapi/test_volumeops.py | 2 +- nova/tests/virt/xenapi/test_xenapi.py | 3477 ++++++++++++++++++++++++++++++ nova/tests/virt/xenapi/vm_rrd.xml | 1101 ++++++++++ nova/tests/xenapi/__init__.py | 20 - nova/tests/xenapi/stubs.py | 353 --- nova/tests/xenapi/test_vm_utils.py | 377 ---- nova/tests/xenapi/vm_rrd.xml | 1101 ---------- 10 files changed, 5306 insertions(+), 5330 deletions(-) delete mode 100644 nova/tests/test_xenapi.py create mode 100644 nova/tests/virt/xenapi/stubs.py create mode 100644 nova/tests/virt/xenapi/test_xenapi.py create mode 100644 nova/tests/virt/xenapi/vm_rrd.xml delete mode 100644 nova/tests/xenapi/__init__.py delete mode 100644 nova/tests/xenapi/stubs.py delete mode 100644 nova/tests/xenapi/test_vm_utils.py delete mode 100644 nova/tests/xenapi/vm_rrd.xml diff --git a/nova/tests/test_xenapi.py b/nova/tests/test_xenapi.py deleted file mode 100644 index 2628f2582..000000000 --- a/nova/tests/test_xenapi.py +++ /dev/null @@ -1,3477 +0,0 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 - -# Copyright (c) 2010 Citrix Systems, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -"""Test suite for XenAPI.""" - -import ast -import base64 -import contextlib -import functools -import os -import re - -from oslo.config import cfg - -from nova.compute import api as compute_api -from nova.compute import flavors -from nova.compute import power_state -from nova.compute import task_states -from nova.compute import vm_states -from nova import context -from nova import db -from nova import exception -from nova.openstack.common import importutils -from nova.openstack.common import jsonutils -from nova.openstack.common import log as logging -from nova import test -from nova.tests.db import fakes as db_fakes -from nova.tests import fake_network -from nova.tests import fake_processutils -import nova.tests.image.fake as fake_image -from nova.tests import matchers -from nova.tests.xenapi import stubs -from nova.virt import fake -from nova.virt.xenapi import agent -from nova.virt.xenapi import driver as xenapi_conn -from nova.virt.xenapi import fake as xenapi_fake -from nova.virt.xenapi import host -from nova.virt.xenapi.imageupload import glance -from nova.virt.xenapi import pool -from nova.virt.xenapi import pool_states -from nova.virt.xenapi import vm_utils -from nova.virt.xenapi import vmops -from nova.virt.xenapi import volume_utils - -LOG = logging.getLogger(__name__) - -CONF = cfg.CONF -CONF.import_opt('compute_manager', 'nova.service') -CONF.import_opt('network_manager', 'nova.service') -CONF.import_opt('compute_driver', 'nova.virt.driver') -CONF.import_opt('host', 'nova.netconf') -CONF.import_opt('default_availability_zone', 'nova.availability_zones') - -IMAGE_MACHINE = '1' -IMAGE_KERNEL = '2' -IMAGE_RAMDISK = '3' -IMAGE_RAW = '4' -IMAGE_VHD = '5' -IMAGE_ISO = '6' - -IMAGE_FIXTURES = { - IMAGE_MACHINE: { - 'image_meta': {'name': 'fakemachine', 'size': 0, - 'disk_format': 'ami', - 'container_format': 'ami'}, - }, - IMAGE_KERNEL: { - 'image_meta': {'name': 'fakekernel', 'size': 0, - 'disk_format': 'aki', - 'container_format': 'aki'}, - }, - IMAGE_RAMDISK: { - 'image_meta': {'name': 'fakeramdisk', 'size': 0, - 'disk_format': 'ari', - 'container_format': 'ari'}, - }, - IMAGE_RAW: { - 'image_meta': {'name': 'fakeraw', 'size': 0, - 'disk_format': 'raw', - 'container_format': 'bare'}, - }, - IMAGE_VHD: { - 'image_meta': {'name': 'fakevhd', 'size': 0, - 'disk_format': 'vhd', - 'container_format': 'ovf'}, - }, - IMAGE_ISO: { - 'image_meta': {'name': 'fakeiso', 'size': 0, - 'disk_format': 'iso', - 'container_format': 'bare'}, - }, -} - - -def set_image_fixtures(): - image_service = fake_image.FakeImageService() - image_service.images.clear() - for image_id, image_meta in IMAGE_FIXTURES.items(): - image_meta = image_meta['image_meta'] - image_meta['id'] = image_id - image_service.create(None, image_meta) - - -def get_fake_device_info(): - # FIXME: 'sr_uuid', 'introduce_sr_keys', sr_type and vdi_uuid - # can be removed from the dict when LP bug #1087308 is fixed - fake_vdi_ref = xenapi_fake.create_vdi('fake-vdi', None) - fake_vdi_uuid = xenapi_fake.get_record('VDI', fake_vdi_ref)['uuid'] - fake = {'block_device_mapping': - [{'connection_info': {'driver_volume_type': 'iscsi', - 'data': {'sr_uuid': 'falseSR', - 'introduce_sr_keys': ['sr_type'], - 'sr_type': 'iscsi', - 'vdi_uuid': fake_vdi_uuid, - 'target_discovered': False, - 'target_iqn': 'foo_iqn:foo_volid', - 'target_portal': 'localhost:3260', - 'volume_id': 'foo_volid', - 'target_lun': 1, - 'auth_password': 'my-p@55w0rd', - 'auth_username': 'johndoe', - 'auth_method': u'CHAP'}, }, - 'mount_device': 'vda', - 'delete_on_termination': False}, ], - 'root_device_name': '/dev/sda', - 'ephemerals': [], - 'swap': None, } - return fake - - -def stub_vm_utils_with_vdi_attached_here(function, should_return=True): - """ - vm_utils.with_vdi_attached_here needs to be stubbed out because it - calls down to the filesystem to attach a vdi. This provides a - decorator to handle that. - """ - @functools.wraps(function) - def decorated_function(self, *args, **kwargs): - @contextlib.contextmanager - def fake_vdi_attached_here(*args, **kwargs): - fake_dev = 'fakedev' - yield fake_dev - - def fake_image_download(*args, **kwargs): - pass - - def fake_is_vdi_pv(*args, **kwargs): - return should_return - - orig_vdi_attached_here = vm_utils.vdi_attached_here - orig_image_download = fake_image._FakeImageService.download - orig_is_vdi_pv = vm_utils._is_vdi_pv - try: - vm_utils.vdi_attached_here = fake_vdi_attached_here - fake_image._FakeImageService.download = fake_image_download - vm_utils._is_vdi_pv = fake_is_vdi_pv - return function(self, *args, **kwargs) - finally: - vm_utils._is_vdi_pv = orig_is_vdi_pv - fake_image._FakeImageService.download = orig_image_download - vm_utils.vdi_attached_here = orig_vdi_attached_here - - return decorated_function - - -def create_instance_with_system_metadata(context, instance_values): - instance_type = db.instance_type_get(context, - instance_values['instance_type_id']) - sys_meta = flavors.save_instance_type_info({}, - instance_type) - instance_values['system_metadata'] = sys_meta - return db.instance_create(context, instance_values) - - -class XenAPIVolumeTestCase(stubs.XenAPITestBase): - """Unit tests for Volume operations.""" - def setUp(self): - super(XenAPIVolumeTestCase, self).setUp() - self.user_id = 'fake' - self.project_id = 'fake' - self.context = context.RequestContext(self.user_id, self.project_id) - self.flags(disable_process_locking=True, - firewall_driver='nova.virt.xenapi.firewall.' - 'Dom0IptablesFirewallDriver', - xenapi_connection_url='test_url', - xenapi_connection_password='test_pass') - db_fakes.stub_out_db_instance_api(self.stubs) - self.instance_values = {'id': 1, - 'project_id': self.user_id, - 'user_id': 'fake', - 'image_ref': 1, - 'kernel_id': 2, - 'ramdisk_id': 3, - 'root_gb': 20, - 'instance_type_id': '3', # m1.large - 'os_type': 'linux', - 'architecture': 'x86-64'} - - def _create_volume(self, size=0): - """Create a volume object.""" - vol = {} - vol['size'] = size - vol['user_id'] = 'fake' - vol['project_id'] = 'fake' - vol['host'] = 'localhost' - vol['availability_zone'] = CONF.default_availability_zone - vol['status'] = "creating" - vol['attach_status'] = "detached" - return db.volume_create(self.context, vol) - - @staticmethod - def _make_connection_data(): - return { - 'volume_id': 1, - 'target_iqn': 'iqn.2010-10.org.openstack:volume-00000001', - 'target_portal': '127.0.0.1:3260,fake', - 'target_lun': None, - 'auth_method': 'CHAP', - 'auth_username': 'username', - 'auth_password': 'password', - } - - @classmethod - def _make_connection_info(cls): - return { - 'driver_volume_type': 'iscsi', - 'data': cls._make_connection_data() - } - - def test_mountpoint_to_number(self): - cases = { - 'sda': 0, - 'sdp': 15, - 'hda': 0, - 'hdp': 15, - 'vda': 0, - 'xvda': 0, - '0': 0, - '10': 10, - 'vdq': -1, - 'sdq': -1, - 'hdq': -1, - 'xvdq': -1, - } - - for (input, expected) in cases.iteritems(): - actual = volume_utils.mountpoint_to_number(input) - self.assertEqual(actual, expected, - '%s yielded %s, not %s' % (input, actual, expected)) - - def test_parse_volume_info_parsing_auth_details(self): - result = volume_utils.parse_volume_info( - self._make_connection_data()) - - self.assertEquals('username', result['chapuser']) - self.assertEquals('password', result['chappassword']) - - def test_get_device_number_raise_exception_on_wrong_mountpoint(self): - self.assertRaises( - volume_utils.StorageError, - volume_utils.get_device_number, - 'dev/sd') - - def test_attach_volume(self): - # This shows how to test Ops classes' methods. - stubs.stubout_session(self.stubs, stubs.FakeSessionForVolumeTests) - conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False) - instance = db.instance_create(self.context, self.instance_values) - vm = xenapi_fake.create_vm(instance['name'], 'Running') - result = conn.attach_volume(self._make_connection_info(), - instance, '/dev/sdc') - - # check that the VM has a VBD attached to it - # Get XenAPI record for VBD - vbds = xenapi_fake.get_all('VBD') - vbd = xenapi_fake.get_record('VBD', vbds[0]) - vm_ref = vbd['VM'] - self.assertEqual(vm_ref, vm) - - def test_attach_volume_raise_exception(self): - # This shows how to test when exceptions are raised. - stubs.stubout_session(self.stubs, - stubs.FakeSessionForVolumeFailedTests) - conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False) - instance = db.instance_create(self.context, self.instance_values) - xenapi_fake.create_vm(instance['name'], 'Running') - self.assertRaises(exception.VolumeDriverNotFound, - conn.attach_volume, - {'driver_volume_type': 'nonexist'}, - instance, - '/dev/sdc') - - -class XenAPIVMTestCase(stubs.XenAPITestBase): - """Unit tests for VM operations.""" - def setUp(self): - super(XenAPIVMTestCase, self).setUp() - self.network = importutils.import_object(CONF.network_manager) - self.flags(disable_process_locking=True, - instance_name_template='%d', - firewall_driver='nova.virt.xenapi.firewall.' - 'Dom0IptablesFirewallDriver', - xenapi_connection_url='test_url', - xenapi_connection_password='test_pass',) - xenapi_fake.create_local_srs() - xenapi_fake.create_local_pifs() - db_fakes.stub_out_db_instance_api(self.stubs) - xenapi_fake.create_network('fake', CONF.flat_network_bridge) - stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests) - stubs.stubout_get_this_vm_uuid(self.stubs) - stubs.stubout_is_vdi_pv(self.stubs) - stubs.stub_out_vm_methods(self.stubs) - fake_processutils.stub_out_processutils_execute(self.stubs) - self.user_id = 'fake' - self.project_id = 'fake' - self.context = context.RequestContext(self.user_id, self.project_id) - self.conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False) - - fake_image.stub_out_image_service(self.stubs) - set_image_fixtures() - stubs.stubout_image_service_download(self.stubs) - stubs.stubout_stream_disk(self.stubs) - - def fake_inject_instance_metadata(self, instance, vm): - pass - self.stubs.Set(vmops.VMOps, 'inject_instance_metadata', - fake_inject_instance_metadata) - - def fake_safe_copy_vdi(session, sr_ref, instance, vdi_to_copy_ref): - name_label = "fakenamelabel" - disk_type = "fakedisktype" - virtual_size = 777 - return vm_utils.create_vdi( - session, sr_ref, instance, name_label, disk_type, - virtual_size) - self.stubs.Set(vm_utils, '_safe_copy_vdi', fake_safe_copy_vdi) - - def tearDown(self): - fake_image.FakeImageService_reset() - super(XenAPIVMTestCase, self).tearDown() - - def test_init_host(self): - session = xenapi_conn.XenAPISession('test_url', 'root', 'test_pass', - fake.FakeVirtAPI()) - vm = vm_utils._get_this_vm_ref(session) - # Local root disk - vdi0 = xenapi_fake.create_vdi('compute', None) - vbd0 = xenapi_fake.create_vbd(vm, vdi0) - # Instance VDI - vdi1 = xenapi_fake.create_vdi('instance-aaaa', None, - other_config={'nova_instance_uuid': 'aaaa'}) - vbd1 = xenapi_fake.create_vbd(vm, vdi1) - # Only looks like instance VDI - vdi2 = xenapi_fake.create_vdi('instance-bbbb', None) - vbd2 = xenapi_fake.create_vbd(vm, vdi2) - - self.conn.init_host(None) - self.assertEquals(set(xenapi_fake.get_all('VBD')), set([vbd0, vbd2])) - - def test_list_instances_0(self): - instances = self.conn.list_instances() - self.assertEquals(instances, []) - - def test_list_instance_uuids_0(self): - instance_uuids = self.conn.list_instance_uuids() - self.assertEquals(instance_uuids, []) - - def test_list_instance_uuids(self): - uuids = [] - for x in xrange(1, 4): - instance = self._create_instance(x) - uuids.append(instance['uuid']) - instance_uuids = self.conn.list_instance_uuids() - self.assertEqual(len(uuids), len(instance_uuids)) - self.assertEqual(set(uuids), set(instance_uuids)) - - def test_get_rrd_server(self): - self.flags(xenapi_connection_url='myscheme://myaddress/') - server_info = vm_utils._get_rrd_server() - self.assertEqual(server_info[0], 'myscheme') - self.assertEqual(server_info[1], 'myaddress') - - def test_get_diagnostics(self): - def fake_get_rrd(host, vm_uuid): - path = os.path.dirname(os.path.realpath(__file__)) - with open(os.path.join(path, 'xenapi/vm_rrd.xml')) as f: - return re.sub(r'\s', '', f.read()) - self.stubs.Set(vm_utils, '_get_rrd', fake_get_rrd) - - fake_diagnostics = { - 'vbd_xvdb_write': '0.0', - 'memory_target': '4294967296.0000', - 'memory_internal_free': '1415564.0000', - 'memory': '4294967296.0000', - 'vbd_xvda_write': '0.0', - 'cpu0': '0.0042', - 'vif_0_tx': '287.4134', - 'vbd_xvda_read': '0.0', - 'vif_0_rx': '1816.0144', - 'vif_2_rx': '0.0', - 'vif_2_tx': '0.0', - 'vbd_xvdb_read': '0.0', - 'last_update': '1328795567', - } - instance = self._create_instance() - expected = self.conn.get_diagnostics(instance) - self.assertThat(fake_diagnostics, matchers.DictMatches(expected)) - - def test_get_vnc_console(self): - instance = self._create_instance() - session = xenapi_conn.XenAPISession('test_url', 'root', 'test_pass', - fake.FakeVirtAPI()) - conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False) - vm_ref = vm_utils.lookup(session, instance['name']) - - console = conn.get_vnc_console(instance) - - # Note(sulo): We dont care about session id in test - # they will always differ so strip that out - actual_path = console['internal_access_path'].split('&')[0] - expected_path = "/console?ref=%s" % str(vm_ref) - - self.assertEqual(expected_path, actual_path) - - def test_get_vnc_console_for_rescue(self): - instance = self._create_instance() - session = xenapi_conn.XenAPISession('test_url', 'root', 'test_pass', - fake.FakeVirtAPI()) - conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False) - rescue_vm = xenapi_fake.create_vm(instance['name'] + '-rescue', - 'Running') - # Set instance state to rescued - instance['vm_state'] = 'rescued' - - console = conn.get_vnc_console(instance) - - # Note(sulo): We dont care about session id in test - # they will always differ so strip that out - actual_path = console['internal_access_path'].split('&')[0] - expected_path = "/console?ref=%s" % str(rescue_vm) - - self.assertEqual(expected_path, actual_path) - - def test_get_vnc_console_instance_not_ready(self): - instance = {} - # set instance name and state - instance['name'] = 'fake-instance' - instance['uuid'] = '00000000-0000-0000-0000-000000000000' - instance['vm_state'] = 'building' - - conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False) - self.assertRaises(exception.InstanceNotFound, - conn.get_vnc_console, instance) - - def test_get_vnc_console_rescue_not_ready(self): - instance = {} - instance['name'] = 'fake-rescue' - instance['uuid'] = '00000000-0000-0000-0000-000000000001' - instance['vm_state'] = 'rescued' - - conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False) - self.assertRaises(exception.InstanceNotReady, - conn.get_vnc_console, instance) - - def test_instance_snapshot_fails_with_no_primary_vdi(self): - - def create_bad_vbd(session, vm_ref, vdi_ref, userdevice, - vbd_type='disk', read_only=False, bootable=False, - osvol=False): - vbd_rec = {'VM': vm_ref, - 'VDI': vdi_ref, - 'userdevice': 'fake', - 'currently_attached': False} - vbd_ref = xenapi_fake._create_object('VBD', vbd_rec) - xenapi_fake.after_VBD_create(vbd_ref, vbd_rec) - return vbd_ref - - self.stubs.Set(vm_utils, 'create_vbd', create_bad_vbd) - stubs.stubout_instance_snapshot(self.stubs) - # Stubbing out firewall driver as previous stub sets alters - # xml rpc result parsing - stubs.stubout_firewall_driver(self.stubs, self.conn) - instance = self._create_instance() - - image_id = "my_snapshot_id" - self.assertRaises(exception.NovaException, self.conn.snapshot, - self.context, instance, image_id, - lambda *args, **kwargs: None) - - def test_instance_snapshot(self): - expected_calls = [ - {'args': (), - 'kwargs': - {'task_state': task_states.IMAGE_PENDING_UPLOAD}}, - {'args': (), - 'kwargs': - {'task_state': task_states.IMAGE_UPLOADING, - 'expected_state': task_states.IMAGE_PENDING_UPLOAD}}] - func_call_matcher = matchers.FunctionCallMatcher(expected_calls) - image_id = "my_snapshot_id" - - stubs.stubout_instance_snapshot(self.stubs) - stubs.stubout_is_snapshot(self.stubs) - # Stubbing out firewall driver as previous stub sets alters - # xml rpc result parsing - stubs.stubout_firewall_driver(self.stubs, self.conn) - - instance = self._create_instance() - - self.fake_upload_called = False - - def fake_image_upload(_self, ctx, session, inst, vdi_uuids, - img_id): - self.fake_upload_called = True - self.assertEqual(ctx, self.context) - self.assertEqual(inst, instance) - self.assertTrue(isinstance(vdi_uuids, list)) - self.assertEqual(img_id, image_id) - - self.stubs.Set(glance.GlanceStore, 'upload_image', - fake_image_upload) - - self.conn.snapshot(self.context, instance, image_id, - func_call_matcher.call) - - # Ensure VM was torn down - vm_labels = [] - for vm_ref in xenapi_fake.get_all('VM'): - vm_rec = xenapi_fake.get_record('VM', vm_ref) - if not vm_rec["is_control_domain"]: - vm_labels.append(vm_rec["name_label"]) - - self.assertEquals(vm_labels, [instance['name']]) - - # Ensure VBDs were torn down - vbd_labels = [] - for vbd_ref in xenapi_fake.get_all('VBD'): - vbd_rec = xenapi_fake.get_record('VBD', vbd_ref) - vbd_labels.append(vbd_rec["vm_name_label"]) - - self.assertEquals(vbd_labels, [instance['name']]) - - # Ensure task states changed in correct order - self.assertIsNone(func_call_matcher.match()) - - # Ensure VDIs were torn down - for vdi_ref in xenapi_fake.get_all('VDI'): - vdi_rec = xenapi_fake.get_record('VDI', vdi_ref) - name_label = vdi_rec["name_label"] - self.assert_(not name_label.endswith('snapshot')) - - self.assertTrue(self.fake_upload_called) - - def create_vm_record(self, conn, os_type, name): - instances = conn.list_instances() - self.assertEquals(instances, [name]) - - # Get Nova record for VM - vm_info = conn.get_info({'name': name}) - # Get XenAPI record for VM - vms = [rec for ref, rec - in xenapi_fake.get_all_records('VM').iteritems() - if not rec['is_control_domain']] - vm = vms[0] - self.vm_info = vm_info - self.vm = vm - - def check_vm_record(self, conn, check_injection=False): - # Check that m1.large above turned into the right thing. - instance_type = db.instance_type_get_by_name(conn, 'm1.large') - mem_kib = long(instance_type['memory_mb']) << 10 - mem_bytes = str(mem_kib << 10) - vcpus = instance_type['vcpus'] - self.assertEquals(self.vm_info['max_mem'], mem_kib) - self.assertEquals(self.vm_info['mem'], mem_kib) - self.assertEquals(self.vm['memory_static_max'], mem_bytes) - self.assertEquals(self.vm['memory_dynamic_max'], mem_bytes) - self.assertEquals(self.vm['memory_dynamic_min'], mem_bytes) - self.assertEquals(self.vm['VCPUs_max'], str(vcpus)) - self.assertEquals(self.vm['VCPUs_at_startup'], str(vcpus)) - - # Check that the VM is running according to Nova - self.assertEquals(self.vm_info['state'], power_state.RUNNING) - - # Check that the VM is running according to XenAPI. - self.assertEquals(self.vm['power_state'], 'Running') - - if check_injection: - xenstore_data = self.vm['xenstore_data'] - self.assertEquals(xenstore_data['vm-data/hostname'], 'test') - key = 'vm-data/networking/DEADBEEF0001' - xenstore_value = xenstore_data[key] - tcpip_data = ast.literal_eval(xenstore_value) - self.assertEquals(tcpip_data, - {'broadcast': '192.168.1.255', - 'dns': ['192.168.1.4', '192.168.1.3'], - 'gateway': '192.168.1.1', - 'gateway_v6': 'fe80::def', - 'ip6s': [{'enabled': '1', - 'ip': '2001:db8:0:1::1', - 'netmask': 64, - 'gateway': 'fe80::def'}], - 'ips': [{'enabled': '1', - 'ip': '192.168.1.100', - 'netmask': '255.255.255.0', - 'gateway': '192.168.1.1'}, - {'enabled': '1', - 'ip': '192.168.1.101', - 'netmask': '255.255.255.0', - 'gateway': '192.168.1.1'}], - 'label': 'test1', - 'mac': 'DE:AD:BE:EF:00:01'}) - - def check_vm_params_for_windows(self): - self.assertEquals(self.vm['platform']['nx'], 'true') - self.assertEquals(self.vm['HVM_boot_params'], {'order': 'dc'}) - self.assertEquals(self.vm['HVM_boot_policy'], 'BIOS order') - - # check that these are not set - self.assertEquals(self.vm['PV_args'], '') - self.assertEquals(self.vm['PV_bootloader'], '') - self.assertEquals(self.vm['PV_kernel'], '') - self.assertEquals(self.vm['PV_ramdisk'], '') - - def check_vm_params_for_linux(self): - self.assertEquals(self.vm['platform']['nx'], 'false') - self.assertEquals(self.vm['PV_args'], '') - self.assertEquals(self.vm['PV_bootloader'], 'pygrub') - - # check that these are not set - self.assertEquals(self.vm['PV_kernel'], '') - self.assertEquals(self.vm['PV_ramdisk'], '') - self.assertEquals(self.vm['HVM_boot_params'], {}) - self.assertEquals(self.vm['HVM_boot_policy'], '') - - def check_vm_params_for_linux_with_external_kernel(self): - self.assertEquals(self.vm['platform']['nx'], 'false') - self.assertEquals(self.vm['PV_args'], 'root=/dev/xvda1') - self.assertNotEquals(self.vm['PV_kernel'], '') - self.assertNotEquals(self.vm['PV_ramdisk'], '') - - # check that these are not set - self.assertEquals(self.vm['HVM_boot_params'], {}) - self.assertEquals(self.vm['HVM_boot_policy'], '') - - def _list_vdis(self): - url = CONF.xenapi_connection_url - username = CONF.xenapi_connection_username - password = CONF.xenapi_connection_password - session = xenapi_conn.XenAPISession(url, username, password, - fake.FakeVirtAPI()) - return session.call_xenapi('VDI.get_all') - - def _list_vms(self): - url = CONF.xenapi_connection_url - username = CONF.xenapi_connection_username - password = CONF.xenapi_connection_password - session = xenapi_conn.XenAPISession(url, username, password, - fake.FakeVirtAPI()) - return session.call_xenapi('VM.get_all') - - def _check_vdis(self, start_list, end_list): - for vdi_ref in end_list: - if vdi_ref not in start_list: - vdi_rec = xenapi_fake.get_record('VDI', vdi_ref) - # If the cache is turned on then the base disk will be - # there even after the cleanup - if 'other_config' in vdi_rec: - if 'image-id' not in vdi_rec['other_config']: - self.fail('Found unexpected VDI:%s' % vdi_ref) - else: - self.fail('Found unexpected VDI:%s' % vdi_ref) - - def _test_spawn(self, image_ref, kernel_id, ramdisk_id, - instance_type_id="3", os_type="linux", - hostname="test", architecture="x86-64", instance_id=1, - injected_files=None, check_injection=False, - create_record=True, empty_dns=False, - block_device_info=None, - key_data=None): - if injected_files is None: - injected_files = [] - - # Fake out inject_instance_metadata - def fake_inject_instance_metadata(self, instance, vm): - pass - self.stubs.Set(vmops.VMOps, 'inject_instance_metadata', - fake_inject_instance_metadata) - - if create_record: - instance_values = {'id': instance_id, - 'project_id': self.project_id, - 'user_id': self.user_id, - 'image_ref': image_ref, - 'kernel_id': kernel_id, - 'ramdisk_id': ramdisk_id, - 'root_gb': 20, - 'instance_type_id': instance_type_id, - 'os_type': os_type, - 'hostname': hostname, - 'key_data': key_data, - 'architecture': architecture} - instance = create_instance_with_system_metadata(self.context, - instance_values) - else: - instance = db.instance_get(self.context, instance_id) - - network_info = fake_network.fake_get_instance_nw_info(self.stubs, - spectacular=True) - if empty_dns: - # NOTE(tr3buchet): this is a terrible way to do this... - network_info[0]['network']['subnets'][0]['dns'] = [] - - image_meta = {} - if image_ref: - image_meta = IMAGE_FIXTURES[image_ref]["image_meta"] - self.conn.spawn(self.context, instance, image_meta, injected_files, - 'herp', network_info, block_device_info) - self.create_vm_record(self.conn, os_type, instance['name']) - self.check_vm_record(self.conn, check_injection) - self.assertTrue(instance['os_type']) - self.assertTrue(instance['architecture']) - - def test_spawn_empty_dns(self): - # Test spawning with an empty dns list. - self._test_spawn(IMAGE_VHD, None, None, - os_type="linux", architecture="x86-64", - empty_dns=True) - self.check_vm_params_for_linux() - - def test_spawn_not_enough_memory(self): - self.assertRaises(exception.InsufficientFreeMemory, - self._test_spawn, - '1', 2, 3, "4") # m1.xlarge - - def test_spawn_fail_cleanup_1(self): - """Simulates an error while downloading an image. - - Verifies that the VM and VDIs created are properly cleaned up. - """ - vdi_recs_start = self._list_vdis() - start_vms = self._list_vms() - stubs.stubout_fetch_disk_image(self.stubs, raise_failure=True) - self.assertRaises(xenapi_fake.Failure, - self._test_spawn, '1', 2, 3) - # No additional VDI should be found. - vdi_recs_end = self._list_vdis() - end_vms = self._list_vms() - self._check_vdis(vdi_recs_start, vdi_recs_end) - # No additional VMs should be found. - self.assertEqual(start_vms, end_vms) - - def test_spawn_fail_cleanup_2(self): - """Simulates an error while creating VM record. - - Verifies that the VM and VDIs created are properly cleaned up. - """ - vdi_recs_start = self._list_vdis() - start_vms = self._list_vms() - stubs.stubout_create_vm(self.stubs) - self.assertRaises(xenapi_fake.Failure, - self._test_spawn, '1', 2, 3) - # No additional VDI should be found. - vdi_recs_end = self._list_vdis() - end_vms = self._list_vms() - self._check_vdis(vdi_recs_start, vdi_recs_end) - # No additional VMs should be found. - self.assertEqual(start_vms, end_vms) - - def test_spawn_fail_cleanup_3(self): - """Simulates an error while attaching disks. - - Verifies that the VM and VDIs created are properly cleaned up. - """ - stubs.stubout_attach_disks(self.stubs) - vdi_recs_start = self._list_vdis() - start_vms = self._list_vms() - self.assertRaises(xenapi_fake.Failure, - self._test_spawn, '1', 2, 3) - # No additional VDI should be found. - vdi_recs_end = self._list_vdis() - end_vms = self._list_vms() - self._check_vdis(vdi_recs_start, vdi_recs_end) - # No additional VMs should be found. - self.assertEqual(start_vms, end_vms) - - @stub_vm_utils_with_vdi_attached_here - def test_spawn_raw_glance(self): - self._test_spawn(IMAGE_RAW, None, None) - self.check_vm_params_for_linux() - - def test_spawn_vhd_glance_linux(self): - self._test_spawn(IMAGE_VHD, None, None, - os_type="linux", architecture="x86-64") - self.check_vm_params_for_linux() - - def test_spawn_vhd_glance_windows(self): - self._test_spawn(IMAGE_VHD, None, None, - os_type="windows", architecture="i386") - self.check_vm_params_for_windows() - - def test_spawn_iso_glance(self): - self._test_spawn(IMAGE_ISO, None, None, - os_type="windows", architecture="i386") - self.check_vm_params_for_windows() - - def test_spawn_glance(self): - - def fake_fetch_disk_image(context, session, instance, name_label, - image_id, image_type): - sr_ref = vm_utils.safe_find_sr(session) - image_type_str = vm_utils.ImageType.to_string(image_type) - vdi_ref = vm_utils.create_vdi(session, sr_ref, instance, - name_label, image_type_str, "20") - vdi_role = vm_utils.ImageType.get_role(image_type) - vdi_uuid = session.call_xenapi("VDI.get_uuid", vdi_ref) - return {vdi_role: dict(uuid=vdi_uuid, file=None)} - self.stubs.Set(vm_utils, '_fetch_disk_image', - fake_fetch_disk_image) - - self._test_spawn(IMAGE_MACHINE, - IMAGE_KERNEL, - IMAGE_RAMDISK) - self.check_vm_params_for_linux_with_external_kernel() - - def test_spawn_boot_from_volume_no_image_meta(self): - dev_info = get_fake_device_info() - self._test_spawn(None, None, None, - block_device_info=dev_info) - - def test_spawn_boot_from_volume_with_image_meta(self): - dev_info = get_fake_device_info() - self._test_spawn(None, None, None, - block_device_info=dev_info) - - def test_spawn_netinject_file(self): - self.flags(flat_injected=True) - db_fakes.stub_out_db_instance_api(self.stubs, injected=True) - - self._tee_executed = False - - def _tee_handler(cmd, **kwargs): - input = kwargs.get('process_input', None) - self.assertNotEqual(input, None) - config = [line.strip() for line in input.split("\n")] - # Find the start of eth0 configuration and check it - index = config.index('auto eth0') - self.assertEquals(config[index + 1:index + 8], [ - 'iface eth0 inet static', - 'address 192.168.1.100', - 'netmask 255.255.255.0', - 'broadcast 192.168.1.255', - 'gateway 192.168.1.1', - 'dns-nameservers 192.168.1.3 192.168.1.4', - '']) - self._tee_executed = True - return '', '' - - def _readlink_handler(cmd_parts, **kwargs): - return os.path.realpath(cmd_parts[2]), '' - - fake_processutils.fake_execute_set_repliers([ - # Capture the tee .../etc/network/interfaces command - (r'tee.*interfaces', _tee_handler), - (r'readlink -nm.*', _readlink_handler), - ]) - self._test_spawn(IMAGE_MACHINE, - IMAGE_KERNEL, - IMAGE_RAMDISK, - check_injection=True) - self.assertTrue(self._tee_executed) - - def test_spawn_netinject_xenstore(self): - db_fakes.stub_out_db_instance_api(self.stubs, injected=True) - - self._tee_executed = False - - def _mount_handler(cmd, *ignore_args, **ignore_kwargs): - # When mounting, create real files under the mountpoint to simulate - # files in the mounted filesystem - - # mount point will be the last item of the command list - self._tmpdir = cmd[len(cmd) - 1] - LOG.debug(_('Creating files in %s to simulate guest agent'), - self._tmpdir) - os.makedirs(os.path.join(self._tmpdir, 'usr', 'sbin')) - # Touch the file using open - open(os.path.join(self._tmpdir, 'usr', 'sbin', - 'xe-update-networking'), 'w').close() - return '', '' - - def _umount_handler(cmd, *ignore_args, **ignore_kwargs): - # Umount would normall make files in the m,ounted filesystem - # disappear, so do that here - LOG.debug(_('Removing simulated guest agent files in %s'), - self._tmpdir) - os.remove(os.path.join(self._tmpdir, 'usr', 'sbin', - 'xe-update-networking')) - os.rmdir(os.path.join(self._tmpdir, 'usr', 'sbin')) - os.rmdir(os.path.join(self._tmpdir, 'usr')) - return '', '' - - def _tee_handler(cmd, *ignore_args, **ignore_kwargs): - self._tee_executed = True - return '', '' - - fake_processutils.fake_execute_set_repliers([ - (r'mount', _mount_handler), - (r'umount', _umount_handler), - (r'tee.*interfaces', _tee_handler)]) - self._test_spawn('1', 2, 3, check_injection=True) - - # tee must not run in this case, where an injection-capable - # guest agent is detected - self.assertFalse(self._tee_executed) - - def test_spawn_vlanmanager(self): - self.flags(network_manager='nova.network.manager.VlanManager', - vlan_interface='fake0') - - def dummy(*args, **kwargs): - pass - - self.stubs.Set(vmops.VMOps, '_create_vifs', dummy) - # Reset network table - xenapi_fake.reset_table('network') - # Instance id = 2 will use vlan network (see db/fakes.py) - ctxt = self.context.elevated() - instance = self._create_instance(2, False) - networks = self.network.db.network_get_all(ctxt) - for network in networks: - self.network.set_network_host(ctxt, network) - - self.network.allocate_for_instance(ctxt, - instance_id=2, - instance_uuid='00000000-0000-0000-0000-000000000002', - host=CONF.host, - vpn=None, - rxtx_factor=3, - project_id=self.project_id, - macs=None) - self._test_spawn(IMAGE_MACHINE, - IMAGE_KERNEL, - IMAGE_RAMDISK, - instance_id=2, - create_record=False) - # TODO(salvatore-orlando): a complete test here would require - # a check for making sure the bridge for the VM's VIF is - # consistent with bridge specified in nova db - - def test_spawn_with_network_qos(self): - self._create_instance() - for vif_ref in xenapi_fake.get_all('VIF'): - vif_rec = xenapi_fake.get_record('VIF', vif_ref) - self.assertEquals(vif_rec['qos_algorithm_type'], 'ratelimit') - self.assertEquals(vif_rec['qos_algorithm_params']['kbps'], - str(3 * 10 * 1024)) - - def test_spawn_ssh_key_injection(self): - # Test spawning with key_data on an instance. Should use - # agent file injection. - actual_injected_files = [] - - def fake_inject_file(self, method, args): - path = base64.b64decode(args['b64_path']) - contents = base64.b64decode(args['b64_contents']) - actual_injected_files.append((path, contents)) - return jsonutils.dumps({'returncode': '0', 'message': 'success'}) - - def noop(*args, **kwargs): - pass - - self.stubs.Set(stubs.FakeSessionForVMTests, - '_plugin_agent_inject_file', fake_inject_file) - self.stubs.Set(agent.XenAPIBasedAgent, - 'set_admin_password', noop) - - expected_data = ('\n# The following ssh key was injected by ' - 'Nova\nfake_keydata\n') - - injected_files = [('/root/.ssh/authorized_keys', expected_data)] - self._test_spawn(IMAGE_VHD, None, None, - os_type="linux", architecture="x86-64", - key_data='fake_keydata') - self.assertEquals(actual_injected_files, injected_files) - - def test_spawn_injected_files(self): - # Test spawning with injected_files. - actual_injected_files = [] - - def fake_inject_file(self, method, args): - path = base64.b64decode(args['b64_path']) - contents = base64.b64decode(args['b64_contents']) - actual_injected_files.append((path, contents)) - return jsonutils.dumps({'returncode': '0', 'message': 'success'}) - self.stubs.Set(stubs.FakeSessionForVMTests, - '_plugin_agent_inject_file', fake_inject_file) - - injected_files = [('/tmp/foo', 'foobar')] - self._test_spawn(IMAGE_VHD, None, None, - os_type="linux", architecture="x86-64", - injected_files=injected_files) - self.check_vm_params_for_linux() - self.assertEquals(actual_injected_files, injected_files) - - def test_rescue(self): - instance = self._create_instance() - session = xenapi_conn.XenAPISession('test_url', 'root', 'test_pass', - fake.FakeVirtAPI()) - vm_ref = vm_utils.lookup(session, instance['name']) - - swap_vdi_ref = xenapi_fake.create_vdi('swap', None) - root_vdi_ref = xenapi_fake.create_vdi('root', None) - - xenapi_fake.create_vbd(vm_ref, swap_vdi_ref, userdevice=1) - xenapi_fake.create_vbd(vm_ref, root_vdi_ref, userdevice=0) - - conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False) - image_meta = {'id': IMAGE_VHD, - 'disk_format': 'vhd'} - conn.rescue(self.context, instance, [], image_meta, '') - - vm = xenapi_fake.get_record('VM', vm_ref) - rescue_name = "%s-rescue" % vm["name_label"] - rescue_ref = vm_utils.lookup(session, rescue_name) - rescue_vm = xenapi_fake.get_record('VM', rescue_ref) - - vdi_uuids = [] - for vbd_uuid in rescue_vm["VBDs"]: - vdi_uuids.append(xenapi_fake.get_record('VBD', vbd_uuid)["VDI"]) - self.assertTrue("swap" not in vdi_uuids) - - def test_unrescue(self): - instance = self._create_instance() - conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False) - # Unrescue expects the original instance to be powered off - conn.power_off(instance) - rescue_vm = xenapi_fake.create_vm(instance['name'] + '-rescue', - 'Running') - conn.unrescue(instance, None) - - def test_unrescue_not_in_rescue(self): - instance = self._create_instance() - conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False) - # Ensure that it will not unrescue a non-rescued instance. - self.assertRaises(exception.InstanceNotInRescueMode, conn.unrescue, - instance, None) - - def test_finish_revert_migration(self): - instance = self._create_instance() - - class VMOpsMock(): - - def __init__(self): - self.finish_revert_migration_called = False - - def finish_revert_migration(self, instance, block_info): - self.finish_revert_migration_called = True - - conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False) - conn._vmops = VMOpsMock() - conn.finish_revert_migration(instance, None) - self.assertTrue(conn._vmops.finish_revert_migration_called) - - def test_reboot_hard(self): - instance = self._create_instance() - conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False) - conn.reboot(self.context, instance, None, "HARD") - - def test_reboot_soft(self): - instance = self._create_instance() - conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False) - conn.reboot(self.context, instance, None, "SOFT") - - def test_reboot_halted(self): - session = xenapi_conn.XenAPISession('test_url', 'root', 'test_pass', - fake.FakeVirtAPI()) - instance = self._create_instance(spawn=False) - conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False) - xenapi_fake.create_vm(instance['name'], 'Halted') - conn.reboot(self.context, instance, None, "SOFT") - vm_ref = vm_utils.lookup(session, instance['name']) - vm = xenapi_fake.get_record('VM', vm_ref) - self.assertEquals(vm['power_state'], 'Running') - - def test_reboot_unknown_state(self): - instance = self._create_instance(spawn=False) - conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False) - xenapi_fake.create_vm(instance['name'], 'Unknown') - self.assertRaises(xenapi_fake.Failure, conn.reboot, self.context, - instance, None, "SOFT") - - def test_reboot_rescued(self): - instance = self._create_instance() - instance['vm_state'] = vm_states.RESCUED - conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False) - - real_result = vm_utils.lookup(conn._session, instance['name']) - - self.mox.StubOutWithMock(vm_utils, 'lookup') - vm_utils.lookup(conn._session, instance['name'], - True).AndReturn(real_result) - self.mox.ReplayAll() - - conn.reboot(self.context, instance, None, "SOFT") - - def _test_maintenance_mode(self, find_host, find_aggregate): - real_call_xenapi = self.conn._session.call_xenapi - instance = self._create_instance(spawn=True) - api_calls = {} - - # Record all the xenapi calls, and return a fake list of hosts - # for the host.get_all call - def fake_call_xenapi(method, *args): - api_calls[method] = args - if method == 'host.get_all': - return ['foo', 'bar', 'baz'] - return real_call_xenapi(method, *args) - self.stubs.Set(self.conn._session, 'call_xenapi', fake_call_xenapi) - - def fake_aggregate_get(context, host, key): - if find_aggregate: - return [{'fake': 'aggregate'}] - else: - return [] - self.stubs.Set(self.conn.virtapi, 'aggregate_get_by_host', - fake_aggregate_get) - - def fake_host_find(context, session, src, dst): - if find_host: - return 'bar' - else: - raise exception.NoValidHost("I saw this one coming...") - self.stubs.Set(host, '_host_find', fake_host_find) - - result = self.conn.host_maintenance_mode('bar', 'on_maintenance') - self.assertEqual(result, 'on_maintenance') - - # We expect the VM.pool_migrate call to have been called to - # migrate our instance to the 'bar' host - expected = (instance['uuid'], 'bar', {}) - self.assertTrue(api_calls.get('VM.pool_migrate'), expected) - - instance = db.instance_get_by_uuid(self.context, instance['uuid']) - self.assertTrue(instance['vm_state'], vm_states.ACTIVE) - self.assertTrue(instance['task_state'], task_states.MIGRATING) - - def test_maintenance_mode(self): - self._test_maintenance_mode(True, True) - - def test_maintenance_mode_no_host(self): - self.assertRaises(exception.NoValidHost, - self._test_maintenance_mode, False, True) - - def test_maintenance_mode_no_aggregate(self): - self.assertRaises(exception.NotFound, - self._test_maintenance_mode, True, False) - - def test_session_virtapi(self): - was = {'called': False} - - def fake_aggregate_get_by_host(self, *args, **kwargs): - was['called'] = True - raise Exception() - self.stubs.Set(self.conn._session._virtapi, "aggregate_get_by_host", - fake_aggregate_get_by_host) - - self.stubs.Set(self.conn._session, "is_slave", True) - - try: - self.conn._session._get_host_uuid() - except Exception: - pass - self.assertTrue(was['called']) - - def test_per_instance_usage_running(self): - instance = self._create_instance(spawn=True) - instance_type = flavors.get_instance_type(3) - - expected = {instance['uuid']: {'memory_mb': instance_type['memory_mb'], - 'uuid': instance['uuid']}} - actual = self.conn.get_per_instance_usage() - self.assertEqual(expected, actual) - - # Paused instances still consume resources: - self.conn.pause(instance) - actual = self.conn.get_per_instance_usage() - self.assertEqual(expected, actual) - - def test_per_instance_usage_suspended(self): - # Suspended instances do not consume memory: - instance = self._create_instance(spawn=True) - self.conn.suspend(instance) - actual = self.conn.get_per_instance_usage() - self.assertEqual({}, actual) - - def test_per_instance_usage_halted(self): - instance = self._create_instance(spawn=True) - self.conn.power_off(instance) - actual = self.conn.get_per_instance_usage() - self.assertEqual({}, actual) - - def _create_instance(self, instance_id=1, spawn=True): - """Creates and spawns a test instance.""" - instance_values = { - 'id': instance_id, - 'uuid': '00000000-0000-0000-0000-00000000000%d' % instance_id, - 'display_name': 'host-%d' % instance_id, - 'project_id': self.project_id, - 'user_id': self.user_id, - 'image_ref': 1, - 'kernel_id': 2, - 'ramdisk_id': 3, - 'root_gb': 20, - 'instance_type_id': '3', # m1.large - 'os_type': 'linux', - 'vm_mode': 'hvm', - 'architecture': 'x86-64'} - - instance = create_instance_with_system_metadata(self.context, - instance_values) - network_info = fake_network.fake_get_instance_nw_info(self.stubs, - spectacular=True) - image_meta = {'id': IMAGE_VHD, - 'disk_format': 'vhd'} - if spawn: - self.conn.spawn(self.context, instance, image_meta, [], 'herp', - network_info) - return instance - - -class XenAPIDiffieHellmanTestCase(test.TestCase): - """Unit tests for Diffie-Hellman code.""" - def setUp(self): - super(XenAPIDiffieHellmanTestCase, self).setUp() - self.alice = agent.SimpleDH() - self.bob = agent.SimpleDH() - - def test_shared(self): - alice_pub = self.alice.get_public() - bob_pub = self.bob.get_public() - alice_shared = self.alice.compute_shared(bob_pub) - bob_shared = self.bob.compute_shared(alice_pub) - self.assertEquals(alice_shared, bob_shared) - - def _test_encryption(self, message): - enc = self.alice.encrypt(message) - self.assertFalse(enc.endswith('\n')) - dec = self.bob.decrypt(enc) - self.assertEquals(dec, message) - - def test_encrypt_simple_message(self): - self._test_encryption('This is a simple message.') - - def test_encrypt_message_with_newlines_at_end(self): - self._test_encryption('This message has a newline at the end.\n') - - def test_encrypt_many_newlines_at_end(self): - self._test_encryption('Message with lotsa newlines.\n\n\n') - - def test_encrypt_newlines_inside_message(self): - self._test_encryption('Message\nwith\ninterior\nnewlines.') - - def test_encrypt_with_leading_newlines(self): - self._test_encryption('\n\nMessage with leading newlines.') - - def test_encrypt_really_long_message(self): - self._test_encryption(''.join(['abcd' for i in xrange(1024)])) - - -class XenAPIMigrateInstance(stubs.XenAPITestBase): - """Unit test for verifying migration-related actions.""" - - def setUp(self): - super(XenAPIMigrateInstance, self).setUp() - self.flags(xenapi_connection_url='test_url', - xenapi_connection_password='test_pass', - firewall_driver='nova.virt.xenapi.firewall.' - 'Dom0IptablesFirewallDriver') - stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests) - db_fakes.stub_out_db_instance_api(self.stubs) - xenapi_fake.create_network('fake', CONF.flat_network_bridge) - self.user_id = 'fake' - self.project_id = 'fake' - self.context = context.RequestContext(self.user_id, self.project_id) - self.instance_values = {'id': 1, - 'project_id': self.project_id, - 'user_id': self.user_id, - 'image_ref': 1, - 'kernel_id': None, - 'ramdisk_id': None, - 'root_gb': 5, - 'instance_type_id': '3', # m1.large - 'os_type': 'linux', - 'architecture': 'x86-64'} - - migration_values = { - 'source_compute': 'nova-compute', - 'dest_compute': 'nova-compute', - 'dest_host': '10.127.5.114', - 'status': 'post-migrating', - 'instance_uuid': '15f23e6a-cc6e-4d22-b651-d9bdaac316f7', - 'old_instance_type_id': 5, - 'new_instance_type_id': 1 - } - self.migration = db.migration_create( - context.get_admin_context(), migration_values) - - fake_processutils.stub_out_processutils_execute(self.stubs) - stubs.stub_out_migration_methods(self.stubs) - stubs.stubout_get_this_vm_uuid(self.stubs) - - def fake_inject_instance_metadata(self, instance, vm): - pass - self.stubs.Set(vmops.VMOps, 'inject_instance_metadata', - fake_inject_instance_metadata) - - def test_resize_xenserver_6(self): - instance = db.instance_create(self.context, self.instance_values) - called = {'resize': False} - - def fake_vdi_resize(*args, **kwargs): - called['resize'] = True - - self.stubs.Set(stubs.FakeSessionForVMTests, - "VDI_resize", fake_vdi_resize) - stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests, - product_version=(6, 0, 0), - product_brand='XenServer') - conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False) - vdi_ref = xenapi_fake.create_vdi('hurr', 'fake') - vdi_uuid = xenapi_fake.get_record('VDI', vdi_ref)['uuid'] - conn._vmops._resize_instance(instance, - {'uuid': vdi_uuid, 'ref': vdi_ref}) - self.assertEqual(called['resize'], True) - - def test_resize_xcp(self): - instance = db.instance_create(self.context, self.instance_values) - called = {'resize': False} - - def fake_vdi_resize(*args, **kwargs): - called['resize'] = True - - self.stubs.Set(stubs.FakeSessionForVMTests, - "VDI_resize", fake_vdi_resize) - stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests, - product_version=(1, 4, 99), - product_brand='XCP') - conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False) - vdi_ref = xenapi_fake.create_vdi('hurr', 'fake') - vdi_uuid = xenapi_fake.get_record('VDI', vdi_ref)['uuid'] - conn._vmops._resize_instance(instance, - {'uuid': vdi_uuid, 'ref': vdi_ref}) - self.assertEqual(called['resize'], True) - - def test_migrate_disk_and_power_off(self): - instance = db.instance_create(self.context, self.instance_values) - xenapi_fake.create_vm(instance['name'], 'Running') - instance_type = db.instance_type_get_by_name(self.context, 'm1.large') - conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False) - conn.migrate_disk_and_power_off(self.context, instance, - '127.0.0.1', instance_type, None) - - def test_migrate_disk_and_power_off_passes_exceptions(self): - instance = db.instance_create(self.context, self.instance_values) - xenapi_fake.create_vm(instance['name'], 'Running') - instance_type = db.instance_type_get_by_name(self.context, 'm1.large') - - def fake_raise(*args, **kwargs): - raise exception.MigrationError(reason='test failure') - self.stubs.Set(vmops.VMOps, "_migrate_vhd", fake_raise) - - conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False) - self.assertRaises(exception.MigrationError, - conn.migrate_disk_and_power_off, - self.context, instance, - '127.0.0.1', instance_type, None) - - def test_revert_migrate(self): - instance = create_instance_with_system_metadata(self.context, - self.instance_values) - self.called = False - self.fake_vm_start_called = False - self.fake_finish_revert_migration_called = False - - def fake_vm_start(*args, **kwargs): - self.fake_vm_start_called = True - - def fake_vdi_resize(*args, **kwargs): - self.called = True - - def fake_finish_revert_migration(*args, **kwargs): - self.fake_finish_revert_migration_called = True - - self.stubs.Set(stubs.FakeSessionForVMTests, - "VDI_resize_online", fake_vdi_resize) - self.stubs.Set(vmops.VMOps, '_start', fake_vm_start) - self.stubs.Set(vmops.VMOps, 'finish_revert_migration', - fake_finish_revert_migration) - stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests, - product_version=(4, 0, 0), - product_brand='XenServer') - - conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False) - network_info = fake_network.fake_get_instance_nw_info(self.stubs, - spectacular=True) - image_meta = {'id': instance['image_ref'], 'disk_format': 'vhd'} - base = xenapi_fake.create_vdi('hurr', 'fake') - base_uuid = xenapi_fake.get_record('VDI', base)['uuid'] - cow = xenapi_fake.create_vdi('durr', 'fake') - cow_uuid = xenapi_fake.get_record('VDI', cow)['uuid'] - conn.finish_migration(self.context, self.migration, instance, - dict(base_copy=base_uuid, cow=cow_uuid), - network_info, image_meta, resize_instance=True) - self.assertEqual(self.called, True) - self.assertEqual(self.fake_vm_start_called, True) - - conn.finish_revert_migration(instance, network_info) - self.assertEqual(self.fake_finish_revert_migration_called, True) - - def test_finish_migrate(self): - instance = create_instance_with_system_metadata(self.context, - self.instance_values) - self.called = False - self.fake_vm_start_called = False - - def fake_vm_start(*args, **kwargs): - self.fake_vm_start_called = True - - def fake_vdi_resize(*args, **kwargs): - self.called = True - - self.stubs.Set(vmops.VMOps, '_start', fake_vm_start) - self.stubs.Set(stubs.FakeSessionForVMTests, - "VDI_resize_online", fake_vdi_resize) - stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests, - product_version=(4, 0, 0), - product_brand='XenServer') - - conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False) - network_info = fake_network.fake_get_instance_nw_info(self.stubs, - spectacular=True) - image_meta = {'id': instance['image_ref'], 'disk_format': 'vhd'} - conn.finish_migration(self.context, self.migration, instance, - dict(base_copy='hurr', cow='durr'), - network_info, image_meta, resize_instance=True) - self.assertEqual(self.called, True) - self.assertEqual(self.fake_vm_start_called, True) - - def test_finish_migrate_no_local_storage(self): - tiny_type = flavors.get_instance_type_by_name('m1.tiny') - tiny_type_id = tiny_type['id'] - self.instance_values.update({'instance_type_id': tiny_type_id, - 'root_gb': 0}) - instance = create_instance_with_system_metadata(self.context, - self.instance_values) - - def fake_vdi_resize(*args, **kwargs): - raise Exception("This shouldn't be called") - - self.stubs.Set(stubs.FakeSessionForVMTests, - "VDI_resize_online", fake_vdi_resize) - conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False) - network_info = fake_network.fake_get_instance_nw_info(self.stubs, - spectacular=True) - image_meta = {'id': instance['image_ref'], 'disk_format': 'vhd'} - conn.finish_migration(self.context, self.migration, instance, - dict(base_copy='hurr', cow='durr'), - network_info, image_meta, resize_instance=True) - - def test_finish_migrate_no_resize_vdi(self): - instance = create_instance_with_system_metadata(self.context, - self.instance_values) - - def fake_vdi_resize(*args, **kwargs): - raise Exception("This shouldn't be called") - - self.stubs.Set(stubs.FakeSessionForVMTests, - "VDI_resize_online", fake_vdi_resize) - conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False) - network_info = fake_network.fake_get_instance_nw_info(self.stubs, - spectacular=True) - # Resize instance would be determined by the compute call - image_meta = {'id': instance['image_ref'], 'disk_format': 'vhd'} - conn.finish_migration(self.context, self.migration, instance, - dict(base_copy='hurr', cow='durr'), - network_info, image_meta, resize_instance=False) - - def test_migrate_no_auto_disk_config_no_resize_down(self): - # Resize down should fail when auto_disk_config not set. - instance_values = self.instance_values - instance_values['root_gb'] = 40 - instance_values['auto_disk_config'] = False - instance = db.instance_create(self.context, instance_values) - xenapi_fake.create_vm(instance['name'], 'Running') - instance_type = db.instance_type_get_by_name(self.context, 'm1.small') - conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False) - self.assertRaises(exception.ResizeError, - conn.migrate_disk_and_power_off, - self.context, instance, - '127.0.0.1', instance_type, None) - - def test_migrate_rollback_when_resize_down_fs_fails(self): - conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False) - vmops = conn._vmops - virtapi = vmops._virtapi - - self.mox.StubOutWithMock(vmops, '_resize_ensure_vm_is_shutdown') - self.mox.StubOutWithMock(vmops, '_apply_orig_vm_name_label') - self.mox.StubOutWithMock(vm_utils, 'resize_disk') - self.mox.StubOutWithMock(vmops, '_migrate_vhd') - self.mox.StubOutWithMock(vm_utils, 'destroy_vdi') - self.mox.StubOutWithMock(vm_utils, 'get_vdi_for_vm_safely') - self.mox.StubOutWithMock(vmops, '_restore_orig_vm_and_cleanup_orphan') - self.mox.StubOutWithMock(virtapi, 'instance_update') - - instance = {'auto_disk_config': True, 'uuid': 'uuid'} - vm_ref = "vm_ref" - dest = "dest" - instance_type = "type" - sr_path = "sr_path" - - virtapi.instance_update(self.context, 'uuid', {'progress': 20.0}) - vmops._resize_ensure_vm_is_shutdown(instance, vm_ref) - vmops._apply_orig_vm_name_label(instance, vm_ref) - old_vdi_ref = "old_ref" - vm_utils.get_vdi_for_vm_safely(vmops._session, vm_ref).AndReturn( - (old_vdi_ref, None)) - virtapi.instance_update(self.context, 'uuid', {'progress': 40.0}) - new_vdi_ref = "new_ref" - new_vdi_uuid = "new_uuid" - vm_utils.resize_disk(vmops._session, instance, old_vdi_ref, - instance_type).AndReturn((new_vdi_ref, new_vdi_uuid)) - virtapi.instance_update(self.context, 'uuid', {'progress': 60.0}) - vmops._migrate_vhd(instance, new_vdi_uuid, dest, - sr_path, 0).AndRaise( - exception.ResizeError(reason="asdf")) - - vm_utils.destroy_vdi(vmops._session, new_vdi_ref) - vmops._restore_orig_vm_and_cleanup_orphan(instance, None) - - self.mox.ReplayAll() - - self.assertRaises(exception.InstanceFaultRollback, - vmops._migrate_disk_resizing_down, self.context, - instance, dest, instance_type, vm_ref, sr_path) - - def test_resize_ensure_vm_is_shutdown_cleanly(self): - conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False) - vmops = conn._vmops - fake_instance = {'uuid': 'uuid'} - - self.mox.StubOutWithMock(vm_utils, 'is_vm_shutdown') - self.mox.StubOutWithMock(vm_utils, 'clean_shutdown_vm') - self.mox.StubOutWithMock(vm_utils, 'hard_shutdown_vm') - - vm_utils.is_vm_shutdown(vmops._session, "ref").AndReturn(False) - vm_utils.clean_shutdown_vm(vmops._session, fake_instance, - "ref").AndReturn(True) - - self.mox.ReplayAll() - - vmops._resize_ensure_vm_is_shutdown(fake_instance, "ref") - - def test_resize_ensure_vm_is_shutdown_forced(self): - conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False) - vmops = conn._vmops - fake_instance = {'uuid': 'uuid'} - - self.mox.StubOutWithMock(vm_utils, 'is_vm_shutdown') - self.mox.StubOutWithMock(vm_utils, 'clean_shutdown_vm') - self.mox.StubOutWithMock(vm_utils, 'hard_shutdown_vm') - - vm_utils.is_vm_shutdown(vmops._session, "ref").AndReturn(False) - vm_utils.clean_shutdown_vm(vmops._session, fake_instance, - "ref").AndReturn(False) - vm_utils.hard_shutdown_vm(vmops._session, fake_instance, - "ref").AndReturn(True) - - self.mox.ReplayAll() - - vmops._resize_ensure_vm_is_shutdown(fake_instance, "ref") - - def test_resize_ensure_vm_is_shutdown_fails(self): - conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False) - vmops = conn._vmops - fake_instance = {'uuid': 'uuid'} - - self.mox.StubOutWithMock(vm_utils, 'is_vm_shutdown') - self.mox.StubOutWithMock(vm_utils, 'clean_shutdown_vm') - self.mox.StubOutWithMock(vm_utils, 'hard_shutdown_vm') - - vm_utils.is_vm_shutdown(vmops._session, "ref").AndReturn(False) - vm_utils.clean_shutdown_vm(vmops._session, fake_instance, - "ref").AndReturn(False) - vm_utils.hard_shutdown_vm(vmops._session, fake_instance, - "ref").AndReturn(False) - - self.mox.ReplayAll() - - self.assertRaises(exception.ResizeError, - vmops._resize_ensure_vm_is_shutdown, fake_instance, "ref") - - def test_resize_ensure_vm_is_shutdown_already_shutdown(self): - conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False) - vmops = conn._vmops - fake_instance = {'uuid': 'uuid'} - - self.mox.StubOutWithMock(vm_utils, 'is_vm_shutdown') - self.mox.StubOutWithMock(vm_utils, 'clean_shutdown_vm') - self.mox.StubOutWithMock(vm_utils, 'hard_shutdown_vm') - - vm_utils.is_vm_shutdown(vmops._session, "ref").AndReturn(True) - - self.mox.ReplayAll() - - vmops._resize_ensure_vm_is_shutdown(fake_instance, "ref") - - -class XenAPIImageTypeTestCase(test.TestCase): - """Test ImageType class.""" - - def test_to_string(self): - # Can convert from type id to type string. - self.assertEquals( - vm_utils.ImageType.to_string(vm_utils.ImageType.KERNEL), - vm_utils.ImageType.KERNEL_STR) - - def _assert_role(self, expected_role, image_type_id): - self.assertEquals( - expected_role, - vm_utils.ImageType.get_role(image_type_id)) - - def test_get_image_role_kernel(self): - self._assert_role('kernel', vm_utils.ImageType.KERNEL) - - def test_get_image_role_ramdisk(self): - self._assert_role('ramdisk', vm_utils.ImageType.RAMDISK) - - def test_get_image_role_disk(self): - self._assert_role('root', vm_utils.ImageType.DISK) - - def test_get_image_role_disk_raw(self): - self._assert_role('root', vm_utils.ImageType.DISK_RAW) - - def test_get_image_role_disk_vhd(self): - self._assert_role('root', vm_utils.ImageType.DISK_VHD) - - -class XenAPIDetermineDiskImageTestCase(test.TestCase): - """Unit tests for code that detects the ImageType.""" - def assert_disk_type(self, image_meta, expected_disk_type): - actual = vm_utils.determine_disk_image_type(image_meta) - self.assertEqual(expected_disk_type, actual) - - def test_machine(self): - image_meta = {'id': 'a', 'disk_format': 'ami'} - self.assert_disk_type(image_meta, vm_utils.ImageType.DISK) - - def test_raw(self): - image_meta = {'id': 'a', 'disk_format': 'raw'} - self.assert_disk_type(image_meta, vm_utils.ImageType.DISK_RAW) - - def test_vhd(self): - image_meta = {'id': 'a', 'disk_format': 'vhd'} - self.assert_disk_type(image_meta, vm_utils.ImageType.DISK_VHD) - - def test_none(self): - image_meta = None - self.assert_disk_type(image_meta, None) - - -class XenAPIDetermineIsPVTestCase(test.TestCase): - """Unit tests for code that detects the PV status based on ImageType.""" - def assert_pv_status(self, disk_image_type, os_type, expected_pv_status): - session = None - vdi_ref = None - actual = vm_utils.determine_is_pv(session, vdi_ref, - disk_image_type, os_type) - self.assertEqual(expected_pv_status, actual) - - def test_windows_vhd(self): - self.assert_pv_status(vm_utils.ImageType.DISK_VHD, 'windows', False) - - def test_linux_vhd(self): - self.assert_pv_status(vm_utils.ImageType.DISK_VHD, 'linux', True) - - @stub_vm_utils_with_vdi_attached_here - def test_raw(self): - self.assert_pv_status(vm_utils.ImageType.DISK_RAW, 'linux', True) - - def test_disk(self): - self.assert_pv_status(vm_utils.ImageType.DISK, None, True) - - def test_iso(self): - self.assert_pv_status(vm_utils.ImageType.DISK_ISO, None, False) - - @stub_vm_utils_with_vdi_attached_here - def test_none(self): - self.assert_pv_status(None, None, True) - - -class CompareVersionTestCase(test.TestCase): - def test_less_than(self): - # Test that cmp_version compares a as less than b. - self.assertTrue(vmops.cmp_version('1.2.3.4', '1.2.3.5') < 0) - - def test_greater_than(self): - # Test that cmp_version compares a as greater than b. - self.assertTrue(vmops.cmp_version('1.2.3.5', '1.2.3.4') > 0) - - def test_equal(self): - # Test that cmp_version compares a as equal to b. - self.assertTrue(vmops.cmp_version('1.2.3.4', '1.2.3.4') == 0) - - def test_non_lexical(self): - # Test that cmp_version compares non-lexically. - self.assertTrue(vmops.cmp_version('1.2.3.10', '1.2.3.4') > 0) - - def test_length(self): - # Test that cmp_version compares by length as last resort. - self.assertTrue(vmops.cmp_version('1.2.3', '1.2.3.4') < 0) - - -class XenAPIHostTestCase(stubs.XenAPITestBase): - """Tests HostState, which holds metrics from XenServer that get - reported back to the Schedulers.""" - - def setUp(self): - super(XenAPIHostTestCase, self).setUp() - self.flags(xenapi_connection_url='test_url', - xenapi_connection_password='test_pass') - stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests) - xenapi_fake.create_local_srs() - self.conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False) - - def test_host_state(self): - stats = self.conn.get_host_stats() - self.assertEquals(stats['disk_total'], 40000) - self.assertEquals(stats['disk_used'], 20000) - self.assertEquals(stats['host_memory_total'], 10) - self.assertEquals(stats['host_memory_overhead'], 20) - self.assertEquals(stats['host_memory_free'], 30) - self.assertEquals(stats['host_memory_free_computed'], 40) - self.assertEquals(stats['hypervisor_hostname'], 'fake-xenhost') - - def test_host_state_missing_sr(self): - def fake_safe_find_sr(session): - raise exception.StorageRepositoryNotFound('not there') - - self.stubs.Set(vm_utils, 'safe_find_sr', fake_safe_find_sr) - self.assertRaises(exception.StorageRepositoryNotFound, - self.conn.get_host_stats) - - def _test_host_action(self, method, action, expected=None): - result = method('host', action) - if not expected: - expected = action - self.assertEqual(result, expected) - - def test_host_reboot(self): - self._test_host_action(self.conn.host_power_action, 'reboot') - - def test_host_shutdown(self): - self._test_host_action(self.conn.host_power_action, 'shutdown') - - def test_host_startup(self): - self.assertRaises(NotImplementedError, - self.conn.host_power_action, 'host', 'startup') - - def test_host_maintenance_on(self): - self._test_host_action(self.conn.host_maintenance_mode, - True, 'on_maintenance') - - def test_host_maintenance_off(self): - self._test_host_action(self.conn.host_maintenance_mode, - False, 'off_maintenance') - - def test_set_enable_host_enable(self): - self._test_host_action(self.conn.set_host_enabled, True, 'enabled') - - def test_set_enable_host_disable(self): - self._test_host_action(self.conn.set_host_enabled, False, 'disabled') - - def test_get_host_uptime(self): - result = self.conn.get_host_uptime('host') - self.assertEqual(result, 'fake uptime') - - def test_supported_instances_is_included_in_host_state(self): - stats = self.conn.get_host_stats() - self.assertTrue('supported_instances' in stats) - - def test_supported_instances_is_calculated_by_to_supported_instances(self): - - def to_supported_instances(somedata): - self.assertEquals(None, somedata) - return "SOMERETURNVALUE" - self.stubs.Set(host, 'to_supported_instances', to_supported_instances) - - stats = self.conn.get_host_stats() - self.assertEquals("SOMERETURNVALUE", stats['supported_instances']) - - -class ToSupportedInstancesTestCase(test.TestCase): - def test_default_return_value(self): - self.assertEquals([], - host.to_supported_instances(None)) - - def test_return_value(self): - self.assertEquals([('x86_64', 'xapi', 'xen')], - host.to_supported_instances([u'xen-3.0-x86_64'])) - - def test_invalid_values_do_not_break(self): - self.assertEquals([('x86_64', 'xapi', 'xen')], - host.to_supported_instances([u'xen-3.0-x86_64', 'spam'])) - - def test_multiple_values(self): - self.assertEquals( - [ - ('x86_64', 'xapi', 'xen'), - ('x86_32', 'xapi', 'hvm') - ], - host.to_supported_instances([u'xen-3.0-x86_64', 'hvm-3.0-x86_32']) - ) - - -class XenAPIAutoDiskConfigTestCase(stubs.XenAPITestBase): - def setUp(self): - super(XenAPIAutoDiskConfigTestCase, self).setUp() - self.flags(xenapi_connection_url='test_url', - xenapi_connection_password='test_pass', - firewall_driver='nova.virt.xenapi.firewall.' - 'Dom0IptablesFirewallDriver') - stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests) - self.conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False) - - self.user_id = 'fake' - self.project_id = 'fake' - - self.instance_values = {'id': 1, - 'project_id': self.project_id, - 'user_id': self.user_id, - 'image_ref': 1, - 'kernel_id': 2, - 'ramdisk_id': 3, - 'root_gb': 20, - 'instance_type_id': '3', # m1.large - 'os_type': 'linux', - 'architecture': 'x86-64'} - - self.context = context.RequestContext(self.user_id, self.project_id) - - def fake_create_vbd(session, vm_ref, vdi_ref, userdevice, - vbd_type='disk', read_only=False, bootable=True, - osvol=False): - pass - - self.stubs.Set(vm_utils, 'create_vbd', fake_create_vbd) - - def assertIsPartitionCalled(self, called): - marker = {"partition_called": False} - - def fake_resize_part_and_fs(dev, start, old, new): - marker["partition_called"] = True - self.stubs.Set(vm_utils, "_resize_part_and_fs", - fake_resize_part_and_fs) - - ctx = context.RequestContext(self.user_id, self.project_id) - session = xenapi_conn.XenAPISession('test_url', 'root', 'test_pass', - fake.FakeVirtAPI()) - - disk_image_type = vm_utils.ImageType.DISK_VHD - instance = create_instance_with_system_metadata(self.context, - self.instance_values) - vm_ref = xenapi_fake.create_vm(instance['name'], 'Halted') - vdi_ref = xenapi_fake.create_vdi(instance['name'], 'fake') - - vdi_uuid = session.call_xenapi('VDI.get_record', vdi_ref)['uuid'] - vdis = {'root': {'uuid': vdi_uuid, 'ref': vdi_ref}} - - self.conn._vmops._attach_disks(instance, vm_ref, instance['name'], - vdis, disk_image_type) - - self.assertEqual(marker["partition_called"], called) - - def test_instance_not_auto_disk_config(self): - """Should not partition unless instance is marked as - auto_disk_config. - """ - self.instance_values['auto_disk_config'] = False - self.assertIsPartitionCalled(False) - - @stub_vm_utils_with_vdi_attached_here - def test_instance_auto_disk_config_doesnt_pass_fail_safes(self): - # Should not partition unless fail safes pass. - self.instance_values['auto_disk_config'] = True - - def fake_get_partitions(dev): - return [(1, 0, 100, 'ext4'), (2, 100, 200, 'ext4')] - self.stubs.Set(vm_utils, "_get_partitions", - fake_get_partitions) - - self.assertIsPartitionCalled(False) - - @stub_vm_utils_with_vdi_attached_here - def test_instance_auto_disk_config_passes_fail_safes(self): - """Should partition if instance is marked as auto_disk_config=True and - virt-layer specific fail-safe checks pass. - """ - self.instance_values['auto_disk_config'] = True - - def fake_get_partitions(dev): - return [(1, 0, 100, 'ext4')] - self.stubs.Set(vm_utils, "_get_partitions", - fake_get_partitions) - - self.assertIsPartitionCalled(True) - - -class XenAPIGenerateLocal(stubs.XenAPITestBase): - """Test generating of local disks, like swap and ephemeral.""" - def setUp(self): - super(XenAPIGenerateLocal, self).setUp() - self.flags(xenapi_connection_url='test_url', - xenapi_connection_password='test_pass', - firewall_driver='nova.virt.xenapi.firewall.' - 'Dom0IptablesFirewallDriver') - stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests) - db_fakes.stub_out_db_instance_api(self.stubs) - self.conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False) - - self.user_id = 'fake' - self.project_id = 'fake' - - self.instance_values = {'id': 1, - 'project_id': self.project_id, - 'user_id': self.user_id, - 'image_ref': 1, - 'kernel_id': 2, - 'ramdisk_id': 3, - 'root_gb': 20, - 'instance_type_id': '3', # m1.large - 'os_type': 'linux', - 'architecture': 'x86-64'} - - self.context = context.RequestContext(self.user_id, self.project_id) - - def fake_create_vbd(session, vm_ref, vdi_ref, userdevice, - vbd_type='disk', read_only=False, bootable=True, - osvol=False, empty=False, unpluggable=True): - return session.call_xenapi('VBD.create', {'VM': vm_ref, - 'VDI': vdi_ref}) - - self.stubs.Set(vm_utils, 'create_vbd', fake_create_vbd) - - def assertCalled(self, instance, - disk_image_type=vm_utils.ImageType.DISK_VHD): - ctx = context.RequestContext(self.user_id, self.project_id) - session = xenapi_conn.XenAPISession('test_url', 'root', 'test_pass', - fake.FakeVirtAPI()) - - vm_ref = xenapi_fake.create_vm(instance['name'], 'Halted') - vdi_ref = xenapi_fake.create_vdi(instance['name'], 'fake') - - vdi_uuid = session.call_xenapi('VDI.get_record', vdi_ref)['uuid'] - - vdi_key = 'root' - if disk_image_type == vm_utils.ImageType.DISK_ISO: - vdi_key = 'iso' - vdis = {vdi_key: {'uuid': vdi_uuid, 'ref': vdi_ref}} - - self.called = False - self.conn._vmops._attach_disks(instance, vm_ref, instance['name'], - vdis, disk_image_type) - self.assertTrue(self.called) - - def test_generate_swap(self): - # Test swap disk generation. - instance_values = dict(self.instance_values, instance_type_id=5) - instance = create_instance_with_system_metadata(self.context, - instance_values) - - def fake_generate_swap(*args, **kwargs): - self.called = True - self.stubs.Set(vm_utils, 'generate_swap', fake_generate_swap) - - self.assertCalled(instance) - - def test_generate_ephemeral(self): - # Test ephemeral disk generation. - instance_values = dict(self.instance_values, instance_type_id=4) - instance = create_instance_with_system_metadata(self.context, - instance_values) - - def fake_generate_ephemeral(*args): - self.called = True - self.stubs.Set(vm_utils, 'generate_ephemeral', fake_generate_ephemeral) - - self.assertCalled(instance) - - def test_generate_iso_blank_root_disk(self): - instance_values = dict(self.instance_values, instance_type_id=4) - instance_values.pop('kernel_id') - instance_values.pop('ramdisk_id') - instance = create_instance_with_system_metadata(self.context, - instance_values) - - def fake_generate_ephemeral(*args): - pass - self.stubs.Set(vm_utils, 'generate_ephemeral', fake_generate_ephemeral) - - def fake_generate_iso(*args): - self.called = True - self.stubs.Set(vm_utils, 'generate_iso_blank_root_disk', - fake_generate_iso) - - self.assertCalled(instance, vm_utils.ImageType.DISK_ISO) - - -class XenAPIBWCountersTestCase(stubs.XenAPITestBase): - FAKE_VMS = {'test1:ref': dict(name_label='test1', - other_config=dict(nova_uuid='hash'), - domid='12', - _vifmap={'0': "a:b:c:d...", - '1': "e:f:12:q..."}), - 'test2:ref': dict(name_label='test2', - other_config=dict(nova_uuid='hash'), - domid='42', - _vifmap={'0': "a:3:c:d...", - '1': "e:f:42:q..."}), - } - - def setUp(self): - super(XenAPIBWCountersTestCase, self).setUp() - self.stubs.Set(vm_utils, 'list_vms', - XenAPIBWCountersTestCase._fake_list_vms) - self.flags(xenapi_connection_url='test_url', - xenapi_connection_password='test_pass', - firewall_driver='nova.virt.xenapi.firewall.' - 'Dom0IptablesFirewallDriver') - stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests) - self.conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False) - - def _fake_get_vif_device_map(vm_rec): - return vm_rec['_vifmap'] - - self.stubs.Set(self.conn._vmops, "_get_vif_device_map", - _fake_get_vif_device_map) - - @classmethod - def _fake_list_vms(cls, session): - return cls.FAKE_VMS.iteritems() - - @classmethod - def _fake_fetch_bandwidth_mt(cls, session): - return {} - - @classmethod - def _fake_fetch_bandwidth(cls, session): - return {'42': - {'0': {'bw_in': 21024, 'bw_out': 22048}, - '1': {'bw_in': 231337, 'bw_out': 221212121}}, - '12': - {'0': {'bw_in': 1024, 'bw_out': 2048}, - '1': {'bw_in': 31337, 'bw_out': 21212121}}, - } - - def test_get_all_bw_counters(self): - instances = [dict(name='test1', uuid='1-2-3'), - dict(name='test2', uuid='4-5-6')] - - self.stubs.Set(vm_utils, 'fetch_bandwidth', - XenAPIBWCountersTestCase._fake_fetch_bandwidth) - result = self.conn.get_all_bw_counters(instances) - self.assertEqual(len(result), 4) - self.assertIn(dict(uuid='1-2-3', - mac_address="a:b:c:d...", - bw_in=1024, - bw_out=2048), result) - self.assertIn(dict(uuid='1-2-3', - mac_address="e:f:12:q...", - bw_in=31337, - bw_out=21212121), result) - - self.assertIn(dict(uuid='4-5-6', - mac_address="a:3:c:d...", - bw_in=21024, - bw_out=22048), result) - self.assertIn(dict(uuid='4-5-6', - mac_address="e:f:42:q...", - bw_in=231337, - bw_out=221212121), result) - - def test_get_all_bw_counters_in_failure_case(self): - """Test that get_all_bw_conters returns an empty list when - no data returned from Xenserver. c.f. bug #910045. - """ - instances = [dict(name='instance-0001', uuid='1-2-3-4-5')] - - self.stubs.Set(vm_utils, 'fetch_bandwidth', - XenAPIBWCountersTestCase._fake_fetch_bandwidth_mt) - result = self.conn.get_all_bw_counters(instances) - self.assertEqual(result, []) - - -# TODO(salvatore-orlando): this class and -# nova.tests.virt.test_libvirt.IPTablesFirewallDriverTestCase share a lot of -# code. Consider abstracting common code in a base class for firewall driver -# testing. -class XenAPIDom0IptablesFirewallTestCase(stubs.XenAPITestBase): - - _in_rules = [ - '# Generated by iptables-save v1.4.10 on Sat Feb 19 00:03:19 2011', - '*nat', - ':PREROUTING ACCEPT [1170:189210]', - ':INPUT ACCEPT [844:71028]', - ':OUTPUT ACCEPT [5149:405186]', - ':POSTROUTING ACCEPT [5063:386098]', - '# Completed on Mon Dec 6 11:54:13 2010', - '# Generated by iptables-save v1.4.4 on Mon Dec 6 11:54:13 2010', - '*mangle', - ':INPUT ACCEPT [969615:281627771]', - ':FORWARD ACCEPT [0:0]', - ':OUTPUT ACCEPT [915599:63811649]', - ':nova-block-ipv4 - [0:0]', - '[0:0] -A INPUT -i virbr0 -p tcp -m tcp --dport 67 -j ACCEPT ', - '[0:0] -A FORWARD -d 192.168.122.0/24 -o virbr0 -m state --state RELATED' - ',ESTABLISHED -j ACCEPT ', - '[0:0] -A FORWARD -s 192.168.122.0/24 -i virbr0 -j ACCEPT ', - '[0:0] -A FORWARD -i virbr0 -o virbr0 -j ACCEPT ', - '[0:0] -A FORWARD -o virbr0 -j REJECT ' - '--reject-with icmp-port-unreachable ', - '[0:0] -A FORWARD -i virbr0 -j REJECT ' - '--reject-with icmp-port-unreachable ', - 'COMMIT', - '# Completed on Mon Dec 6 11:54:13 2010', - '# Generated by iptables-save v1.4.4 on Mon Dec 6 11:54:13 2010', - '*filter', - ':INPUT ACCEPT [969615:281627771]', - ':FORWARD ACCEPT [0:0]', - ':OUTPUT ACCEPT [915599:63811649]', - ':nova-block-ipv4 - [0:0]', - '[0:0] -A INPUT -i virbr0 -p tcp -m tcp --dport 67 -j ACCEPT ', - '[0:0] -A FORWARD -d 192.168.122.0/24 -o virbr0 -m state --state RELATED' - ',ESTABLISHED -j ACCEPT ', - '[0:0] -A FORWARD -s 192.168.122.0/24 -i virbr0 -j ACCEPT ', - '[0:0] -A FORWARD -i virbr0 -o virbr0 -j ACCEPT ', - '[0:0] -A FORWARD -o virbr0 -j REJECT ' - '--reject-with icmp-port-unreachable ', - '[0:0] -A FORWARD -i virbr0 -j REJECT ' - '--reject-with icmp-port-unreachable ', - 'COMMIT', - '# Completed on Mon Dec 6 11:54:13 2010', - ] - - _in6_filter_rules = [ - '# Generated by ip6tables-save v1.4.4 on Tue Jan 18 23:47:56 2011', - '*filter', - ':INPUT ACCEPT [349155:75810423]', - ':FORWARD ACCEPT [0:0]', - ':OUTPUT ACCEPT [349256:75777230]', - 'COMMIT', - '# Completed on Tue Jan 18 23:47:56 2011', - ] - - def setUp(self): - super(XenAPIDom0IptablesFirewallTestCase, self).setUp() - self.flags(xenapi_connection_url='test_url', - xenapi_connection_password='test_pass', - instance_name_template='%d', - firewall_driver='nova.virt.xenapi.firewall.' - 'Dom0IptablesFirewallDriver') - xenapi_fake.create_local_srs() - xenapi_fake.create_local_pifs() - self.user_id = 'mappin' - self.project_id = 'fake' - stubs.stubout_session(self.stubs, stubs.FakeSessionForFirewallTests, - test_case=self) - self.context = context.RequestContext(self.user_id, self.project_id) - self.network = importutils.import_object(CONF.network_manager) - self.conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False) - self.fw = self.conn._vmops.firewall_driver - - def _create_instance_ref(self): - return db.instance_create(self.context, - {'user_id': self.user_id, - 'project_id': self.project_id, - 'instance_type_id': 1}) - - def _create_test_security_group(self): - admin_ctxt = context.get_admin_context() - secgroup = db.security_group_create(admin_ctxt, - {'user_id': self.user_id, - 'project_id': self.project_id, - 'name': 'testgroup', - 'description': 'test group'}) - db.security_group_rule_create(admin_ctxt, - {'parent_group_id': secgroup['id'], - 'protocol': 'icmp', - 'from_port': -1, - 'to_port': -1, - 'cidr': '192.168.11.0/24'}) - - db.security_group_rule_create(admin_ctxt, - {'parent_group_id': secgroup['id'], - 'protocol': 'icmp', - 'from_port': 8, - 'to_port': -1, - 'cidr': '192.168.11.0/24'}) - - db.security_group_rule_create(admin_ctxt, - {'parent_group_id': secgroup['id'], - 'protocol': 'tcp', - 'from_port': 80, - 'to_port': 81, - 'cidr': '192.168.10.0/24'}) - return secgroup - - def _validate_security_group(self): - in_rules = filter(lambda l: not l.startswith('#'), - self._in_rules) - for rule in in_rules: - if 'nova' not in rule: - self.assertTrue(rule in self._out_rules, - 'Rule went missing: %s' % rule) - - instance_chain = None - for rule in self._out_rules: - # This is pretty crude, but it'll do for now - # last two octets change - if re.search('-d 192.168.[0-9]{1,3}.[0-9]{1,3} -j', rule): - instance_chain = rule.split(' ')[-1] - break - self.assertTrue(instance_chain, "The instance chain wasn't added") - security_group_chain = None - for rule in self._out_rules: - # This is pretty crude, but it'll do for now - if '-A %s -j' % instance_chain in rule: - security_group_chain = rule.split(' ')[-1] - break - self.assertTrue(security_group_chain, - "The security group chain wasn't added") - - regex = re.compile('\[0\:0\] -A .* -j ACCEPT -p icmp' - ' -s 192.168.11.0/24') - self.assertTrue(len(filter(regex.match, self._out_rules)) > 0, - "ICMP acceptance rule wasn't added") - - regex = re.compile('\[0\:0\] -A .* -j ACCEPT -p icmp -m icmp' - ' --icmp-type 8 -s 192.168.11.0/24') - self.assertTrue(len(filter(regex.match, self._out_rules)) > 0, - "ICMP Echo Request acceptance rule wasn't added") - - regex = re.compile('\[0\:0\] -A .* -j ACCEPT -p tcp --dport 80:81' - ' -s 192.168.10.0/24') - self.assertTrue(len(filter(regex.match, self._out_rules)) > 0, - "TCP port 80/81 acceptance rule wasn't added") - - def test_static_filters(self): - instance_ref = self._create_instance_ref() - src_instance_ref = self._create_instance_ref() - admin_ctxt = context.get_admin_context() - secgroup = self._create_test_security_group() - - src_secgroup = db.security_group_create(admin_ctxt, - {'user_id': self.user_id, - 'project_id': self.project_id, - 'name': 'testsourcegroup', - 'description': 'src group'}) - db.security_group_rule_create(admin_ctxt, - {'parent_group_id': secgroup['id'], - 'protocol': 'tcp', - 'from_port': 80, - 'to_port': 81, - 'group_id': src_secgroup['id']}) - - db.instance_add_security_group(admin_ctxt, instance_ref['uuid'], - secgroup['id']) - db.instance_add_security_group(admin_ctxt, src_instance_ref['uuid'], - src_secgroup['id']) - instance_ref = db.instance_get(admin_ctxt, instance_ref['id']) - src_instance_ref = db.instance_get(admin_ctxt, src_instance_ref['id']) - - network_model = fake_network.fake_get_instance_nw_info(self.stubs, - 1, spectacular=True) - - fake_network.stub_out_nw_api_get_instance_nw_info(self.stubs, - lambda *a, **kw: network_model) - - network_info = network_model.legacy() - self.fw.prepare_instance_filter(instance_ref, network_info) - self.fw.apply_instance_filter(instance_ref, network_info) - - self._validate_security_group() - # Extra test for TCP acceptance rules - for ip in network_model.fixed_ips(): - if ip['version'] != 4: - continue - regex = re.compile('\[0\:0\] -A .* -j ACCEPT -p tcp' - ' --dport 80:81 -s %s' % ip['address']) - self.assertTrue(len(filter(regex.match, self._out_rules)) > 0, - "TCP port 80/81 acceptance rule wasn't added") - - db.instance_destroy(admin_ctxt, instance_ref['uuid']) - - def test_filters_for_instance_with_ip_v6(self): - self.flags(use_ipv6=True) - network_info = fake_network.fake_get_instance_nw_info(self.stubs, 1) - rulesv4, rulesv6 = self.fw._filters_for_instance("fake", network_info) - self.assertEquals(len(rulesv4), 2) - self.assertEquals(len(rulesv6), 1) - - def test_filters_for_instance_without_ip_v6(self): - self.flags(use_ipv6=False) - network_info = fake_network.fake_get_instance_nw_info(self.stubs, 1) - rulesv4, rulesv6 = self.fw._filters_for_instance("fake", network_info) - self.assertEquals(len(rulesv4), 2) - self.assertEquals(len(rulesv6), 0) - - def test_multinic_iptables(self): - ipv4_rules_per_addr = 1 - ipv4_addr_per_network = 2 - ipv6_rules_per_addr = 1 - ipv6_addr_per_network = 1 - networks_count = 5 - instance_ref = self._create_instance_ref() - _get_instance_nw_info = fake_network.fake_get_instance_nw_info - network_info = _get_instance_nw_info(self.stubs, - networks_count, - ipv4_addr_per_network) - ipv4_len = len(self.fw.iptables.ipv4['filter'].rules) - ipv6_len = len(self.fw.iptables.ipv6['filter'].rules) - inst_ipv4, inst_ipv6 = self.fw.instance_rules(instance_ref, - network_info) - self.fw.prepare_instance_filter(instance_ref, network_info) - ipv4 = self.fw.iptables.ipv4['filter'].rules - ipv6 = self.fw.iptables.ipv6['filter'].rules - ipv4_network_rules = len(ipv4) - len(inst_ipv4) - ipv4_len - ipv6_network_rules = len(ipv6) - len(inst_ipv6) - ipv6_len - # Extra rules are for the DHCP request - rules = (ipv4_rules_per_addr * ipv4_addr_per_network * - networks_count) + 2 - self.assertEquals(ipv4_network_rules, rules) - self.assertEquals(ipv6_network_rules, - ipv6_rules_per_addr * ipv6_addr_per_network * networks_count) - - def test_do_refresh_security_group_rules(self): - admin_ctxt = context.get_admin_context() - instance_ref = self._create_instance_ref() - network_info = fake_network.fake_get_instance_nw_info(self.stubs, 1, 1) - secgroup = self._create_test_security_group() - db.instance_add_security_group(admin_ctxt, instance_ref['uuid'], - secgroup['id']) - self.fw.prepare_instance_filter(instance_ref, network_info) - self.fw.instances[instance_ref['id']] = instance_ref - self._validate_security_group() - # add a rule to the security group - db.security_group_rule_create(admin_ctxt, - {'parent_group_id': secgroup['id'], - 'protocol': 'udp', - 'from_port': 200, - 'to_port': 299, - 'cidr': '192.168.99.0/24'}) - #validate the extra rule - self.fw.refresh_security_group_rules(secgroup) - regex = re.compile('\[0\:0\] -A .* -j ACCEPT -p udp --dport 200:299' - ' -s 192.168.99.0/24') - self.assertTrue(len(filter(regex.match, self._out_rules)) > 0, - "Rules were not updated properly." - "The rule for UDP acceptance is missing") - - def test_provider_firewall_rules(self): - # setup basic instance data - instance_ref = self._create_instance_ref() - # FRAGILE: as in libvirt tests - # peeks at how the firewall names chains - chain_name = 'inst-%s' % instance_ref['id'] - - network_info = fake_network.fake_get_instance_nw_info(self.stubs, 1, 1) - self.fw.prepare_instance_filter(instance_ref, network_info) - self.assertTrue('provider' in self.fw.iptables.ipv4['filter'].chains) - rules = [rule for rule in self.fw.iptables.ipv4['filter'].rules - if rule.chain == 'provider'] - self.assertEqual(0, len(rules)) - - admin_ctxt = context.get_admin_context() - # add a rule and send the update message, check for 1 rule - provider_fw0 = db.provider_fw_rule_create(admin_ctxt, - {'protocol': 'tcp', - 'cidr': '10.99.99.99/32', - 'from_port': 1, - 'to_port': 65535}) - self.fw.refresh_provider_fw_rules() - rules = [rule for rule in self.fw.iptables.ipv4['filter'].rules - if rule.chain == 'provider'] - self.assertEqual(1, len(rules)) - - # Add another, refresh, and make sure number of rules goes to two - provider_fw1 = db.provider_fw_rule_create(admin_ctxt, - {'protocol': 'udp', - 'cidr': '10.99.99.99/32', - 'from_port': 1, - 'to_port': 65535}) - self.fw.refresh_provider_fw_rules() - rules = [rule for rule in self.fw.iptables.ipv4['filter'].rules - if rule.chain == 'provider'] - self.assertEqual(2, len(rules)) - - # create the instance filter and make sure it has a jump rule - self.fw.prepare_instance_filter(instance_ref, network_info) - self.fw.apply_instance_filter(instance_ref, network_info) - inst_rules = [rule for rule in self.fw.iptables.ipv4['filter'].rules - if rule.chain == chain_name] - jump_rules = [rule for rule in inst_rules if '-j' in rule.rule] - provjump_rules = [] - # IptablesTable doesn't make rules unique internally - for rule in jump_rules: - if 'provider' in rule.rule and rule not in provjump_rules: - provjump_rules.append(rule) - self.assertEqual(1, len(provjump_rules)) - - # remove a rule from the db, cast to compute to refresh rule - db.provider_fw_rule_destroy(admin_ctxt, provider_fw1['id']) - self.fw.refresh_provider_fw_rules() - rules = [rule for rule in self.fw.iptables.ipv4['filter'].rules - if rule.chain == 'provider'] - self.assertEqual(1, len(rules)) - - -class XenAPISRSelectionTestCase(stubs.XenAPITestBase): - """Unit tests for testing we find the right SR.""" - def test_safe_find_sr_raise_exception(self): - # Ensure StorageRepositoryNotFound is raise when wrong filter. - self.flags(sr_matching_filter='yadayadayada') - stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests) - session = xenapi_conn.XenAPISession('test_url', 'root', 'test_pass', - fake.FakeVirtAPI()) - self.assertRaises(exception.StorageRepositoryNotFound, - vm_utils.safe_find_sr, session) - - def test_safe_find_sr_local_storage(self): - # Ensure the default local-storage is found. - self.flags(sr_matching_filter='other-config:i18n-key=local-storage') - stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests) - session = xenapi_conn.XenAPISession('test_url', 'root', 'test_pass', - fake.FakeVirtAPI()) - host_ref = xenapi_fake.get_all('host')[0] - local_sr = xenapi_fake.create_sr( - name_label='Fake Storage', - type='lvm', - other_config={'i18n-original-value-name_label': - 'Local storage', - 'i18n-key': 'local-storage'}, - host_ref=host_ref) - expected = vm_utils.safe_find_sr(session) - self.assertEqual(local_sr, expected) - - def test_safe_find_sr_by_other_criteria(self): - # Ensure the SR is found when using a different filter. - self.flags(sr_matching_filter='other-config:my_fake_sr=true') - stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests) - session = xenapi_conn.XenAPISession('test_url', 'root', 'test_pass', - fake.FakeVirtAPI()) - host_ref = xenapi_fake.get_all('host')[0] - local_sr = xenapi_fake.create_sr(name_label='Fake Storage', - type='lvm', - other_config={'my_fake_sr': 'true'}, - host_ref=host_ref) - expected = vm_utils.safe_find_sr(session) - self.assertEqual(local_sr, expected) - - def test_safe_find_sr_default(self): - # Ensure the default SR is found regardless of other-config. - self.flags(sr_matching_filter='default-sr:true') - stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests) - session = xenapi_conn.XenAPISession('test_url', 'root', 'test_pass', - fake.FakeVirtAPI()) - pool_ref = xenapi_fake.create_pool('') - expected = vm_utils.safe_find_sr(session) - self.assertEqual(session.call_xenapi('pool.get_default_SR', pool_ref), - expected) - - -def _create_service_entries(context, values={'avail_zone1': ['fake_host1', - 'fake_host2'], - 'avail_zone2': ['fake_host3'], }): - for avail_zone, hosts in values.iteritems(): - for host in hosts: - db.service_create(context, - {'host': host, - 'binary': 'nova-compute', - 'topic': 'compute', - 'report_count': 0}) - return values - - -class XenAPIAggregateTestCase(stubs.XenAPITestBase): - """Unit tests for aggregate operations.""" - def setUp(self): - super(XenAPIAggregateTestCase, self).setUp() - self.flags(xenapi_connection_url='http://test_url', - xenapi_connection_username='test_user', - xenapi_connection_password='test_pass', - instance_name_template='%d', - firewall_driver='nova.virt.xenapi.firewall.' - 'Dom0IptablesFirewallDriver', - host='host', - compute_driver='xenapi.XenAPIDriver', - default_availability_zone='avail_zone1') - self.flags(use_local=True, group='conductor') - host_ref = xenapi_fake.get_all('host')[0] - stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests) - self.context = context.get_admin_context() - self.conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False) - self.compute = importutils.import_object(CONF.compute_manager) - self.api = compute_api.AggregateAPI() - values = {'name': 'test_aggr', - 'metadata': {'availability_zone': 'test_zone', - pool_states.POOL_FLAG: 'XenAPI'}} - self.aggr = db.aggregate_create(self.context, values) - self.fake_metadata = {pool_states.POOL_FLAG: 'XenAPI', - 'master_compute': 'host', - 'availability_zone': 'fake_zone', - pool_states.KEY: pool_states.ACTIVE, - 'host': xenapi_fake.get_record('host', - host_ref)['uuid']} - - def test_pool_add_to_aggregate_called_by_driver(self): - - calls = [] - - def pool_add_to_aggregate(context, aggregate, host, slave_info=None): - self.assertEquals("CONTEXT", context) - self.assertEquals("AGGREGATE", aggregate) - self.assertEquals("HOST", host) - self.assertEquals("SLAVEINFO", slave_info) - calls.append(pool_add_to_aggregate) - self.stubs.Set(self.conn._pool, - "add_to_aggregate", - pool_add_to_aggregate) - - self.conn.add_to_aggregate("CONTEXT", "AGGREGATE", "HOST", - slave_info="SLAVEINFO") - - self.assertTrue(pool_add_to_aggregate in calls) - - def test_pool_remove_from_aggregate_called_by_driver(self): - - calls = [] - - def pool_remove_from_aggregate(context, aggregate, host, - slave_info=None): - self.assertEquals("CONTEXT", context) - self.assertEquals("AGGREGATE", aggregate) - self.assertEquals("HOST", host) - self.assertEquals("SLAVEINFO", slave_info) - calls.append(pool_remove_from_aggregate) - self.stubs.Set(self.conn._pool, - "remove_from_aggregate", - pool_remove_from_aggregate) - - self.conn.remove_from_aggregate("CONTEXT", "AGGREGATE", "HOST", - slave_info="SLAVEINFO") - - self.assertTrue(pool_remove_from_aggregate in calls) - - def test_add_to_aggregate_for_first_host_sets_metadata(self): - def fake_init_pool(id, name): - fake_init_pool.called = True - self.stubs.Set(self.conn._pool, "_init_pool", fake_init_pool) - - aggregate = self._aggregate_setup() - self.conn._pool.add_to_aggregate(self.context, aggregate, "host") - result = db.aggregate_get(self.context, aggregate['id']) - self.assertTrue(fake_init_pool.called) - self.assertThat(self.fake_metadata, - matchers.DictMatches(result['metadetails'])) - - def test_join_slave(self): - # Ensure join_slave gets called when the request gets to master. - def fake_join_slave(id, compute_uuid, host, url, user, password): - fake_join_slave.called = True - self.stubs.Set(self.conn._pool, "_join_slave", fake_join_slave) - - aggregate = self._aggregate_setup(hosts=['host', 'host2'], - metadata=self.fake_metadata) - self.conn._pool.add_to_aggregate(self.context, aggregate, "host2", - dict(compute_uuid='fake_uuid', - url='fake_url', - user='fake_user', - passwd='fake_pass', - xenhost_uuid='fake_uuid')) - self.assertTrue(fake_join_slave.called) - - def test_add_to_aggregate_first_host(self): - def fake_pool_set_name_label(self, session, pool_ref, name): - fake_pool_set_name_label.called = True - self.stubs.Set(xenapi_fake.SessionBase, "pool_set_name_label", - fake_pool_set_name_label) - self.conn._session.call_xenapi("pool.create", {"name": "asdf"}) - - values = {"name": 'fake_aggregate', - 'metadata': {'availability_zone': 'fake_zone'}} - result = db.aggregate_create(self.context, values) - metadata = {'availability_zone': 'fake_zone', - pool_states.POOL_FLAG: "XenAPI", - pool_states.KEY: pool_states.CREATED} - db.aggregate_metadata_add(self.context, result['id'], metadata) - - db.aggregate_host_add(self.context, result['id'], "host") - aggregate = db.aggregate_get(self.context, result['id']) - self.assertEqual(["host"], aggregate['hosts']) - self.assertEqual(metadata, aggregate['metadetails']) - - self.conn._pool.add_to_aggregate(self.context, aggregate, "host") - self.assertTrue(fake_pool_set_name_label.called) - - def test_remove_from_aggregate_called(self): - def fake_remove_from_aggregate(context, aggregate, host): - fake_remove_from_aggregate.called = True - self.stubs.Set(self.conn._pool, - "remove_from_aggregate", - fake_remove_from_aggregate) - - self.conn.remove_from_aggregate(None, None, None) - self.assertTrue(fake_remove_from_aggregate.called) - - def test_remove_from_empty_aggregate(self): - result = self._aggregate_setup() - self.assertRaises(exception.InvalidAggregateAction, - self.conn._pool.remove_from_aggregate, - self.context, result, "test_host") - - def test_remove_slave(self): - # Ensure eject slave gets called. - def fake_eject_slave(id, compute_uuid, host_uuid): - fake_eject_slave.called = True - self.stubs.Set(self.conn._pool, "_eject_slave", fake_eject_slave) - - self.fake_metadata['host2'] = 'fake_host2_uuid' - aggregate = self._aggregate_setup(hosts=['host', 'host2'], - metadata=self.fake_metadata, aggr_state=pool_states.ACTIVE) - self.conn._pool.remove_from_aggregate(self.context, aggregate, "host2") - self.assertTrue(fake_eject_slave.called) - - def test_remove_master_solo(self): - # Ensure metadata are cleared after removal. - def fake_clear_pool(id): - fake_clear_pool.called = True - self.stubs.Set(self.conn._pool, "_clear_pool", fake_clear_pool) - - aggregate = self._aggregate_setup(metadata=self.fake_metadata) - self.conn._pool.remove_from_aggregate(self.context, aggregate, "host") - result = db.aggregate_get(self.context, aggregate['id']) - self.assertTrue(fake_clear_pool.called) - self.assertThat({'availability_zone': 'fake_zone', - pool_states.POOL_FLAG: 'XenAPI', - pool_states.KEY: pool_states.ACTIVE}, - matchers.DictMatches(result['metadetails'])) - - def test_remote_master_non_empty_pool(self): - # Ensure AggregateError is raised if removing the master. - aggregate = self._aggregate_setup(hosts=['host', 'host2'], - metadata=self.fake_metadata) - - self.assertRaises(exception.InvalidAggregateAction, - self.conn._pool.remove_from_aggregate, - self.context, aggregate, "host") - - def _aggregate_setup(self, aggr_name='fake_aggregate', - aggr_zone='fake_zone', - aggr_state=pool_states.CREATED, - hosts=['host'], metadata=None): - values = {"name": aggr_name} - result = db.aggregate_create(self.context, values, - metadata={'availability_zone': aggr_zone}) - pool_flag = {pool_states.POOL_FLAG: "XenAPI", - pool_states.KEY: aggr_state} - db.aggregate_metadata_add(self.context, result['id'], pool_flag) - - for host in hosts: - db.aggregate_host_add(self.context, result['id'], host) - if metadata: - db.aggregate_metadata_add(self.context, result['id'], metadata) - return db.aggregate_get(self.context, result['id']) - - def test_add_host_to_aggregate_invalid_changing_status(self): - """Ensure InvalidAggregateAction is raised when adding host while - aggregate is not ready.""" - aggregate = self._aggregate_setup(aggr_state=pool_states.CHANGING) - self.assertRaises(exception.InvalidAggregateAction, - self.conn.add_to_aggregate, self.context, - aggregate, 'host') - - def test_add_host_to_aggregate_invalid_dismissed_status(self): - """Ensure InvalidAggregateAction is raised when aggregate is - deleted.""" - aggregate = self._aggregate_setup(aggr_state=pool_states.DISMISSED) - self.assertRaises(exception.InvalidAggregateAction, - self.conn.add_to_aggregate, self.context, - aggregate, 'fake_host') - - def test_add_host_to_aggregate_invalid_error_status(self): - """Ensure InvalidAggregateAction is raised when aggregate is - in error.""" - aggregate = self._aggregate_setup(aggr_state=pool_states.ERROR) - self.assertRaises(exception.InvalidAggregateAction, - self.conn.add_to_aggregate, self.context, - aggregate, 'fake_host') - - def test_remove_host_from_aggregate_error(self): - # Ensure we can remove a host from an aggregate even if in error. - values = _create_service_entries(self.context) - fake_zone = values.keys()[0] - aggr = self.api.create_aggregate(self.context, - 'fake_aggregate', fake_zone) - # let's mock the fact that the aggregate is ready! - metadata = {pool_states.POOL_FLAG: "XenAPI", - pool_states.KEY: pool_states.ACTIVE} - db.aggregate_metadata_add(self.context, aggr['id'], metadata) - for host in values[fake_zone]: - aggr = self.api.add_host_to_aggregate(self.context, - aggr['id'], host) - # let's mock the fact that the aggregate is in error! - status = {'operational_state': pool_states.ERROR} - expected = self.api.remove_host_from_aggregate(self.context, - aggr['id'], - values[fake_zone][0]) - self.assertEqual(len(aggr['hosts']) - 1, len(expected['hosts'])) - self.assertEqual(expected['metadata'][pool_states.KEY], - pool_states.ACTIVE) - - def test_remove_host_from_aggregate_invalid_dismissed_status(self): - """Ensure InvalidAggregateAction is raised when aggregate is - deleted.""" - aggregate = self._aggregate_setup(aggr_state=pool_states.DISMISSED) - self.assertRaises(exception.InvalidAggregateAction, - self.conn.remove_from_aggregate, self.context, - aggregate, 'fake_host') - - def test_remove_host_from_aggregate_invalid_changing_status(self): - """Ensure InvalidAggregateAction is raised when aggregate is - changing.""" - aggregate = self._aggregate_setup(aggr_state=pool_states.CHANGING) - self.assertRaises(exception.InvalidAggregateAction, - self.conn.remove_from_aggregate, self.context, - aggregate, 'fake_host') - - def test_add_aggregate_host_raise_err(self): - # Ensure the undo operation works correctly on add. - def fake_driver_add_to_aggregate(context, aggregate, host, **_ignore): - raise exception.AggregateError( - aggregate_id='', action='', reason='') - self.stubs.Set(self.compute.driver, "add_to_aggregate", - fake_driver_add_to_aggregate) - metadata = {pool_states.POOL_FLAG: "XenAPI", - pool_states.KEY: pool_states.ACTIVE} - db.aggregate_metadata_add(self.context, self.aggr['id'], metadata) - db.aggregate_host_add(self.context, self.aggr['id'], 'fake_host') - - self.assertRaises(exception.AggregateError, - self.compute.add_aggregate_host, - self.context, "fake_host", - aggregate=jsonutils.to_primitive(self.aggr)) - excepted = db.aggregate_get(self.context, self.aggr['id']) - self.assertEqual(excepted['metadetails'][pool_states.KEY], - pool_states.ERROR) - self.assertEqual(excepted['hosts'], []) - - -class MockComputeAPI(object): - def __init__(self): - self._mock_calls = [] - - def add_aggregate_host(self, ctxt, aggregate, - host_param, host, slave_info): - self._mock_calls.append(( - self.add_aggregate_host, ctxt, aggregate, - host_param, host, slave_info)) - - def remove_aggregate_host(self, ctxt, aggregate_id, host_param, - host, slave_info): - self._mock_calls.append(( - self.remove_aggregate_host, ctxt, aggregate_id, - host_param, host, slave_info)) - - -class StubDependencies(object): - """Stub dependencies for ResourcePool.""" - - def __init__(self): - self.compute_rpcapi = MockComputeAPI() - - def _is_hv_pool(self, *_ignore): - return True - - def _get_metadata(self, *_ignore): - return { - pool_states.KEY: {}, - 'master_compute': 'master' - } - - def _create_slave_info(self, *ignore): - return "SLAVE_INFO" - - -class ResourcePoolWithStubs(StubDependencies, pool.ResourcePool): - """A ResourcePool, use stub dependencies.""" - - -class HypervisorPoolTestCase(test.TestCase): - - fake_aggregate = { - 'id': 98, - 'hosts': [], - 'metadetails': { - 'master_compute': 'master', - pool_states.POOL_FLAG: {}, - pool_states.KEY: {} - } - } - - def test_slave_asks_master_to_add_slave_to_pool(self): - slave = ResourcePoolWithStubs() - - slave.add_to_aggregate("CONTEXT", self.fake_aggregate, "slave") - - self.assertIn( - (slave.compute_rpcapi.add_aggregate_host, - "CONTEXT", jsonutils.to_primitive(self.fake_aggregate), - "slave", "master", "SLAVE_INFO"), - slave.compute_rpcapi._mock_calls) - - def test_slave_asks_master_to_remove_slave_from_pool(self): - slave = ResourcePoolWithStubs() - - slave.remove_from_aggregate("CONTEXT", self.fake_aggregate, "slave") - - self.assertIn( - (slave.compute_rpcapi.remove_aggregate_host, - "CONTEXT", 98, "slave", "master", "SLAVE_INFO"), - slave.compute_rpcapi._mock_calls) - - -class SwapXapiHostTestCase(test.TestCase): - - def test_swapping(self): - self.assertEquals( - "http://otherserver:8765/somepath", - pool.swap_xapi_host( - "http://someserver:8765/somepath", 'otherserver')) - - def test_no_port(self): - self.assertEquals( - "http://otherserver/somepath", - pool.swap_xapi_host( - "http://someserver/somepath", 'otherserver')) - - def test_no_path(self): - self.assertEquals( - "http://otherserver", - pool.swap_xapi_host( - "http://someserver", 'otherserver')) - - -class XenAPILiveMigrateTestCase(stubs.XenAPITestBase): - """Unit tests for live_migration.""" - def setUp(self): - super(XenAPILiveMigrateTestCase, self).setUp() - self.flags(xenapi_connection_url='test_url', - xenapi_connection_password='test_pass', - firewall_driver='nova.virt.xenapi.firewall.' - 'Dom0IptablesFirewallDriver', - host='host') - db_fakes.stub_out_db_instance_api(self.stubs) - self.context = context.get_admin_context() - xenapi_fake.create_local_pifs() - - def test_live_migration_calls_vmops(self): - stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests) - self.conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False) - - def fake_live_migrate(context, instance_ref, dest, post_method, - recover_method, block_migration, migrate_data): - fake_live_migrate.called = True - - self.stubs.Set(self.conn._vmops, "live_migrate", fake_live_migrate) - - self.conn.live_migration(None, None, None, None, None) - self.assertTrue(fake_live_migrate.called) - - def test_pre_live_migration(self): - # ensure method is present - stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests) - self.conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False) - self.conn.pre_live_migration(None, None, None, None) - - def test_post_live_migration_at_destination(self): - # ensure method is present - stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests) - self.conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False) - self.conn.post_live_migration_at_destination(None, None, None, None) - - def test_check_can_live_migrate_destination_with_block_migration(self): - stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests) - self.conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False) - - self.stubs.Set(vm_utils, "safe_find_sr", lambda _x: "asdf") - - expected = {'block_migration': True, - 'migrate_data': { - 'migrate_send_data': "fake_migrate_data", - 'destination_sr_ref': 'asdf' - } - } - result = self.conn.check_can_live_migrate_destination(self.context, - {'host': 'host'}, - {}, {}, - True, False) - self.assertEqual(expected, result) - - def test_check_can_live_migrate_destination_block_migration_fails(self): - stubs.stubout_session(self.stubs, - stubs.FakeSessionForFailedMigrateTests) - self.conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False) - self.assertRaises(exception.MigrationError, - self.conn.check_can_live_migrate_destination, - self.context, {'host': 'host'}, - {}, {}, - True, False) - - def _add_default_live_migrate_stubs(self, conn): - def fake_generate_vdi_map(destination_sr_ref, _vm_ref): - pass - - def fake_get_iscsi_srs(destination_sr_ref, _vm_ref): - return [] - - def fake_get_vm_opaque_ref(instance): - return "fake_vm" - - self.stubs.Set(conn._vmops, "_generate_vdi_map", - fake_generate_vdi_map) - self.stubs.Set(conn._vmops, "_get_iscsi_srs", - fake_get_iscsi_srs) - self.stubs.Set(conn._vmops, "_get_vm_opaque_ref", - fake_get_vm_opaque_ref) - - def test_check_can_live_migrate_source_with_block_migrate(self): - stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests) - self.conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False) - - self._add_default_live_migrate_stubs(self.conn) - - dest_check_data = {'block_migration': True, - 'migrate_data': { - 'destination_sr_ref': None, - 'migrate_send_data': None - }} - result = self.conn.check_can_live_migrate_source(self.context, - {'host': 'host'}, - dest_check_data) - self.assertEqual(dest_check_data, result) - - def test_check_can_live_migrate_source_with_block_migrate_iscsi(self): - stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests) - self.conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False) - - self._add_default_live_migrate_stubs(self.conn) - - def fake_get_iscsi_srs(destination_sr_ref, _vm_ref): - return ['sr_ref'] - self.stubs.Set(self.conn._vmops, "_get_iscsi_srs", - fake_get_iscsi_srs) - - def fake_make_plugin_call(plugin, method, **args): - return "true" - self.stubs.Set(self.conn._vmops, "_make_plugin_call", - fake_make_plugin_call) - - dest_check_data = {'block_migration': True, - 'migrate_data': { - 'destination_sr_ref': None, - 'migrate_send_data': None - }} - result = self.conn.check_can_live_migrate_source(self.context, - {'host': 'host'}, - dest_check_data) - self.assertEqual(dest_check_data, result) - - def test_check_can_live_migrate_source_with_block_iscsi_fails(self): - stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests) - self.conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False) - - self._add_default_live_migrate_stubs(self.conn) - - def fake_get_iscsi_srs(destination_sr_ref, _vm_ref): - return ['sr_ref'] - self.stubs.Set(self.conn._vmops, "_get_iscsi_srs", - fake_get_iscsi_srs) - - def fake_make_plugin_call(plugin, method, **args): - return {'returncode': 'error', 'message': 'Plugin not found'} - self.stubs.Set(self.conn._vmops, "_make_plugin_call", - fake_make_plugin_call) - - dest_check_data = {'block_migration': True, - 'migrate_data': { - 'destination_sr_ref': None, - 'migrate_send_data': None - }} - - self.assertRaises(exception.MigrationError, - self.conn.check_can_live_migrate_source, - self.context, {'host': 'host'}, - {}) - - def test_check_can_live_migrate_source_with_block_migrate_fails(self): - stubs.stubout_session(self.stubs, - stubs.FakeSessionForFailedMigrateTests) - self.conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False) - - self._add_default_live_migrate_stubs(self.conn) - - dest_check_data = {'block_migration': True, - 'migrate_data': { - 'destination_sr_ref': None, - 'migrate_send_data': None - }} - self.assertRaises(exception.MigrationError, - self.conn.check_can_live_migrate_source, - self.context, - {'host': 'host'}, - dest_check_data) - - def test_check_can_live_migrate_works(self): - stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests) - self.conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False) - - class fake_aggregate: - def __init__(self): - self.metadetails = {"host": "test_host_uuid"} - - def fake_aggregate_get_by_host(context, host, key=None): - self.assertEqual(CONF.host, host) - return [fake_aggregate()] - - self.stubs.Set(db, "aggregate_get_by_host", - fake_aggregate_get_by_host) - self.conn.check_can_live_migrate_destination(self.context, - {'host': 'host'}, False, False) - - def test_check_can_live_migrate_fails(self): - stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests) - self.conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False) - - class fake_aggregate: - def __init__(self): - self.metadetails = {"dest_other": "test_host_uuid"} - - def fake_aggregate_get_by_host(context, host, key=None): - self.assertEqual(CONF.host, host) - return [fake_aggregate()] - - self.stubs.Set(db, "aggregate_get_by_host", - fake_aggregate_get_by_host) - self.assertRaises(exception.MigrationError, - self.conn.check_can_live_migrate_destination, - self.context, {'host': 'host'}, None, None) - - def test_live_migration(self): - stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests) - self.conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False) - - def fake_get_vm_opaque_ref(instance): - return "fake_vm" - self.stubs.Set(self.conn._vmops, "_get_vm_opaque_ref", - fake_get_vm_opaque_ref) - - def fake_get_host_opaque_ref(context, destination_hostname): - return "fake_host" - self.stubs.Set(self.conn._vmops, "_get_host_opaque_ref", - fake_get_host_opaque_ref) - - def post_method(context, instance, destination_hostname, - block_migration): - post_method.called = True - - self.conn.live_migration(self.conn, None, None, post_method, None) - - self.assertTrue(post_method.called, "post_method.called") - - def test_live_migration_on_failure(self): - stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests) - self.conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False) - - def fake_get_vm_opaque_ref(instance): - return "fake_vm" - self.stubs.Set(self.conn._vmops, "_get_vm_opaque_ref", - fake_get_vm_opaque_ref) - - def fake_get_host_opaque_ref(context, destination_hostname): - return "fake_host" - self.stubs.Set(self.conn._vmops, "_get_host_opaque_ref", - fake_get_host_opaque_ref) - - def fake_call_xenapi(*args): - raise NotImplementedError() - self.stubs.Set(self.conn._vmops._session, "call_xenapi", - fake_call_xenapi) - - def recover_method(context, instance, destination_hostname, - block_migration): - recover_method.called = True - - self.assertRaises(NotImplementedError, self.conn.live_migration, - self.conn, None, None, None, recover_method) - self.assertTrue(recover_method.called, "recover_method.called") - - def test_live_migration_calls_post_migration(self): - stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests) - self.conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False) - - self._add_default_live_migrate_stubs(self.conn) - - def post_method(context, instance, destination_hostname, - block_migration): - post_method.called = True - - # pass block_migration = True and migrate data - migrate_data = {"destination_sr_ref": "foo", - "migrate_send_data": "bar"} - self.conn.live_migration(self.conn, None, None, post_method, None, - True, migrate_data) - self.assertTrue(post_method.called, "post_method.called") - - def test_live_migration_block_cleans_srs(self): - stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests) - self.conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False) - - self._add_default_live_migrate_stubs(self.conn) - - def fake_get_iscsi_srs(context, instance): - return ['sr_ref'] - self.stubs.Set(self.conn._vmops, "_get_iscsi_srs", - fake_get_iscsi_srs) - - def fake_forget_sr(context, instance): - fake_forget_sr.called = True - self.stubs.Set(volume_utils, "forget_sr", - fake_forget_sr) - - def post_method(context, instance, destination_hostname, - block_migration): - post_method.called = True - - migrate_data = {"destination_sr_ref": "foo", - "migrate_send_data": "bar"} - self.conn.live_migration(self.conn, None, None, post_method, None, - True, migrate_data) - - self.assertTrue(post_method.called, "post_method.called") - self.assertTrue(fake_forget_sr.called, "forget_sr.called") - - def test_live_migration_with_block_migration_raises_invalid_param(self): - stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests) - self.conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False) - - def fake_get_vm_opaque_ref(instance): - return "fake_vm" - self.stubs.Set(self.conn._vmops, "_get_vm_opaque_ref", - fake_get_vm_opaque_ref) - - def recover_method(context, instance, destination_hostname, - block_migration): - recover_method.called = True - # pass block_migration = True and no migrate data - self.assertRaises(exception.InvalidParameterValue, - self.conn.live_migration, self.conn, - None, None, None, recover_method, True, None) - self.assertTrue(recover_method.called, "recover_method.called") - - def test_live_migration_with_block_migration_fails_migrate_send(self): - stubs.stubout_session(self.stubs, - stubs.FakeSessionForFailedMigrateTests) - self.conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False) - - self._add_default_live_migrate_stubs(self.conn) - - def recover_method(context, instance, destination_hostname, - block_migration): - recover_method.called = True - # pass block_migration = True and migrate data - migrate_data = dict(destination_sr_ref='foo', migrate_send_data='bar') - self.assertRaises(exception.MigrationError, - self.conn.live_migration, self.conn, - None, None, None, recover_method, True, migrate_data) - self.assertTrue(recover_method.called, "recover_method.called") - - def test_live_migrate_block_migration_xapi_call_parameters(self): - - fake_vdi_map = object() - - class Session(xenapi_fake.SessionBase): - def VM_migrate_send(self_, session, vmref, migrate_data, islive, - vdi_map, vif_map, options): - self.assertEquals('SOMEDATA', migrate_data) - self.assertEquals(fake_vdi_map, vdi_map) - - stubs.stubout_session(self.stubs, Session) - - conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False) - - self._add_default_live_migrate_stubs(conn) - - def fake_generate_vdi_map(destination_sr_ref, _vm_ref): - return fake_vdi_map - - self.stubs.Set(conn._vmops, "_generate_vdi_map", - fake_generate_vdi_map) - - def dummy_callback(*args, **kwargs): - pass - - conn.live_migration( - self.context, instance_ref=dict(name='ignore'), dest=None, - post_method=dummy_callback, recover_method=dummy_callback, - block_migration="SOMEDATA", - migrate_data=dict(migrate_send_data='SOMEDATA', - destination_sr_ref="TARGET_SR_OPAQUE_REF")) - - def test_generate_vdi_map(self): - stubs.stubout_session(self.stubs, xenapi_fake.SessionBase) - conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False) - - vm_ref = "fake_vm_ref" - - def fake_find_sr(_session): - self.assertEquals(conn._session, _session) - return "source_sr_ref" - self.stubs.Set(vm_utils, "safe_find_sr", fake_find_sr) - - def fake_get_instance_vdis_for_sr(_session, _vm_ref, _sr_ref): - self.assertEquals(conn._session, _session) - self.assertEquals(vm_ref, _vm_ref) - self.assertEquals("source_sr_ref", _sr_ref) - return ["vdi0", "vdi1"] - - self.stubs.Set(vm_utils, "get_instance_vdis_for_sr", - fake_get_instance_vdis_for_sr) - - result = conn._vmops._generate_vdi_map("dest_sr_ref", vm_ref) - - self.assertEquals({"vdi0": "dest_sr_ref", - "vdi1": "dest_sr_ref"}, result) - - -class XenAPIInjectMetadataTestCase(stubs.XenAPITestBase): - def setUp(self): - super(XenAPIInjectMetadataTestCase, self).setUp() - self.flags(xenapi_connection_url='test_url', - xenapi_connection_password='test_pass', - firewall_driver='nova.virt.xenapi.firewall.' - 'Dom0IptablesFirewallDriver') - stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests) - self.conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False) - - self.xenstore = dict(persist={}, ephem={}) - - def fake_get_vm_opaque_ref(inst, instance): - self.assertEqual(instance, 'instance') - return 'vm_ref' - - def fake_add_to_param_xenstore(inst, vm_ref, key, val): - self.assertEqual(vm_ref, 'vm_ref') - self.xenstore['persist'][key] = val - - def fake_remove_from_param_xenstore(inst, vm_ref, key): - self.assertEqual(vm_ref, 'vm_ref') - if key in self.xenstore['persist']: - del self.xenstore['persist'][key] - - def fake_write_to_xenstore(inst, instance, path, value, vm_ref=None): - self.assertEqual(instance, 'instance') - self.assertEqual(vm_ref, 'vm_ref') - self.xenstore['ephem'][path] = jsonutils.dumps(value) - - def fake_delete_from_xenstore(inst, instance, path, vm_ref=None): - self.assertEqual(instance, 'instance') - self.assertEqual(vm_ref, 'vm_ref') - if path in self.xenstore['ephem']: - del self.xenstore['ephem'][path] - - self.stubs.Set(vmops.VMOps, '_get_vm_opaque_ref', - fake_get_vm_opaque_ref) - self.stubs.Set(vmops.VMOps, '_add_to_param_xenstore', - fake_add_to_param_xenstore) - self.stubs.Set(vmops.VMOps, '_remove_from_param_xenstore', - fake_remove_from_param_xenstore) - self.stubs.Set(vmops.VMOps, '_write_to_xenstore', - fake_write_to_xenstore) - self.stubs.Set(vmops.VMOps, '_delete_from_xenstore', - fake_delete_from_xenstore) - - def test_inject_instance_metadata(self): - - # Add some system_metadata to ensure it doesn't get added - # to xenstore - instance = dict(metadata=[{'key': 'a', 'value': 1}, - {'key': 'b', 'value': 2}, - {'key': 'c', 'value': 3}, - # Check xenstore key sanitizing - {'key': 'hi.there', 'value': 4}, - {'key': 'hi!t.e/e', 'value': 5}], - # Check xenstore key sanitizing - system_metadata=[{'key': 'sys_a', 'value': 1}, - {'key': 'sys_b', 'value': 2}, - {'key': 'sys_c', 'value': 3}]) - self.conn._vmops.inject_instance_metadata(instance, 'vm_ref') - - self.assertEqual(self.xenstore, { - 'persist': { - 'vm-data/user-metadata/a': '1', - 'vm-data/user-metadata/b': '2', - 'vm-data/user-metadata/c': '3', - 'vm-data/user-metadata/hi_there': '4', - 'vm-data/user-metadata/hi_t_e_e': '5', - }, - 'ephem': {}, - }) - - def test_change_instance_metadata_add(self): - # Test XenStore key sanitizing here, too. - diff = {'test.key': ['+', 4]} - self.xenstore = { - 'persist': { - 'vm-data/user-metadata/a': '1', - 'vm-data/user-metadata/b': '2', - 'vm-data/user-metadata/c': '3', - }, - 'ephem': { - 'vm-data/user-metadata/a': '1', - 'vm-data/user-metadata/b': '2', - 'vm-data/user-metadata/c': '3', - }, - } - - self.conn._vmops.change_instance_metadata('instance', diff) - - self.assertEqual(self.xenstore, { - 'persist': { - 'vm-data/user-metadata/a': '1', - 'vm-data/user-metadata/b': '2', - 'vm-data/user-metadata/c': '3', - 'vm-data/user-metadata/test_key': '4', - }, - 'ephem': { - 'vm-data/user-metadata/a': '1', - 'vm-data/user-metadata/b': '2', - 'vm-data/user-metadata/c': '3', - 'vm-data/user-metadata/test_key': '4', - }, - }) - - def test_change_instance_metadata_update(self): - diff = dict(b=['+', 4]) - self.xenstore = { - 'persist': { - 'vm-data/user-metadata/a': '1', - 'vm-data/user-metadata/b': '2', - 'vm-data/user-metadata/c': '3', - }, - 'ephem': { - 'vm-data/user-metadata/a': '1', - 'vm-data/user-metadata/b': '2', - 'vm-data/user-metadata/c': '3', - }, - } - - self.conn._vmops.change_instance_metadata('instance', diff) - - self.assertEqual(self.xenstore, { - 'persist': { - 'vm-data/user-metadata/a': '1', - 'vm-data/user-metadata/b': '4', - 'vm-data/user-metadata/c': '3', - }, - 'ephem': { - 'vm-data/user-metadata/a': '1', - 'vm-data/user-metadata/b': '4', - 'vm-data/user-metadata/c': '3', - }, - }) - - def test_change_instance_metadata_delete(self): - diff = dict(b=['-']) - self.xenstore = { - 'persist': { - 'vm-data/user-metadata/a': '1', - 'vm-data/user-metadata/b': '2', - 'vm-data/user-metadata/c': '3', - }, - 'ephem': { - 'vm-data/user-metadata/a': '1', - 'vm-data/user-metadata/b': '2', - 'vm-data/user-metadata/c': '3', - }, - } - - self.conn._vmops.change_instance_metadata('instance', diff) - - self.assertEqual(self.xenstore, { - 'persist': { - 'vm-data/user-metadata/a': '1', - 'vm-data/user-metadata/c': '3', - }, - 'ephem': { - 'vm-data/user-metadata/a': '1', - 'vm-data/user-metadata/c': '3', - }, - }) - - -class XenAPISessionTestCase(test.TestCase): - def _get_mock_xapisession(self, software_version): - class XcpXapiSession(xenapi_conn.XenAPISession): - def __init__(_ignore): - "Skip the superclass's dirty init" - - def _get_software_version(_ignore): - return software_version - - return XcpXapiSession() - - def test_get_product_version_product_brand_does_not_fail(self): - session = self._get_mock_xapisession({ - 'build_number': '0', - 'date': '2012-08-03', - 'hostname': 'komainu', - 'linux': '3.2.0-27-generic', - 'network_backend': 'bridge', - 'platform_name': 'XCP_Kronos', - 'platform_version': '1.6.0', - 'xapi': '1.3', - 'xen': '4.1.2', - 'xencenter_max': '1.10', - 'xencenter_min': '1.10' - }) - - self.assertEquals( - (None, None), - session._get_product_version_and_brand() - ) - - def test_get_product_version_product_brand_xs_6(self): - session = self._get_mock_xapisession({ - 'product_brand': 'XenServer', - 'product_version': '6.0.50' - }) - - self.assertEquals( - ((6, 0, 50), 'XenServer'), - session._get_product_version_and_brand() - ) diff --git a/nova/tests/virt/xenapi/stubs.py b/nova/tests/virt/xenapi/stubs.py new file mode 100644 index 000000000..fa214b23e --- /dev/null +++ b/nova/tests/virt/xenapi/stubs.py @@ -0,0 +1,353 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright (c) 2010 Citrix Systems, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +"""Stubouts, mocks and fixtures for the test suite.""" + +import pickle +import random + +from nova.openstack.common import jsonutils +from nova import test +import nova.tests.image.fake +from nova.virt.xenapi import driver as xenapi_conn +from nova.virt.xenapi import fake +from nova.virt.xenapi import vm_utils +from nova.virt.xenapi import vmops + + +def stubout_firewall_driver(stubs, conn): + + def fake_none(self, *args): + return + + _vmops = conn._vmops + stubs.Set(_vmops.firewall_driver, 'prepare_instance_filter', fake_none) + stubs.Set(_vmops.firewall_driver, 'instance_filter_exists', fake_none) + + +def stubout_instance_snapshot(stubs): + def fake_fetch_image(context, session, instance, name_label, image, type): + return {'root': dict(uuid=_make_fake_vdi(), file=None), + 'kernel': dict(uuid=_make_fake_vdi(), file=None), + 'ramdisk': dict(uuid=_make_fake_vdi(), file=None)} + + stubs.Set(vm_utils, '_fetch_image', fake_fetch_image) + + def fake_wait_for_vhd_coalesce(*args): + #TODO(sirp): Should we actually fake out the data here + return "fakeparent", "fakebase" + + stubs.Set(vm_utils, '_wait_for_vhd_coalesce', fake_wait_for_vhd_coalesce) + + +def stubout_session(stubs, cls, product_version=(5, 6, 2), + product_brand='XenServer', **opt_args): + """Stubs out methods from XenAPISession.""" + stubs.Set(xenapi_conn.XenAPISession, '_create_session', + lambda s, url: cls(url, **opt_args)) + stubs.Set(xenapi_conn.XenAPISession, '_get_product_version_and_brand', + lambda s: (product_version, product_brand)) + + +def stubout_get_this_vm_uuid(stubs): + def f(): + vms = [rec['uuid'] for ref, rec + in fake.get_all_records('VM').iteritems() + if rec['is_control_domain']] + return vms[0] + stubs.Set(vm_utils, 'get_this_vm_uuid', f) + + +def stubout_image_service_download(stubs): + def fake_download(*args, **kwargs): + pass + stubs.Set(nova.tests.image.fake._FakeImageService, + 'download', fake_download) + + +def stubout_stream_disk(stubs): + def fake_stream_disk(*args, **kwargs): + pass + stubs.Set(vm_utils, '_stream_disk', fake_stream_disk) + + +def stubout_is_vdi_pv(stubs): + def f(_1): + return False + stubs.Set(vm_utils, '_is_vdi_pv', f) + + +def stubout_determine_is_pv_objectstore(stubs): + """Assumes VMs stu have PV kernels.""" + + def f(*args): + return False + stubs.Set(vm_utils, '_determine_is_pv_objectstore', f) + + +def stubout_is_snapshot(stubs): + """Always returns true + xenapi fake driver does not create vmrefs for snapshots """ + + def f(*args): + return True + stubs.Set(vm_utils, 'is_snapshot', f) + + +def stubout_lookup_image(stubs): + """Simulates a failure in lookup image.""" + def f(_1, _2, _3, _4): + raise Exception("Test Exception raised by fake lookup_image") + stubs.Set(vm_utils, 'lookup_image', f) + + +def stubout_fetch_disk_image(stubs, raise_failure=False): + """Simulates a failure in fetch image_glance_disk.""" + + def _fake_fetch_disk_image(context, session, instance, name_label, image, + image_type): + if raise_failure: + raise fake.Failure("Test Exception raised by " + "fake fetch_image_glance_disk") + elif image_type == vm_utils.ImageType.KERNEL: + filename = "kernel" + elif image_type == vm_utils.ImageType.RAMDISK: + filename = "ramdisk" + else: + filename = "unknown" + + vdi_type = vm_utils.ImageType.to_string(image_type) + return {vdi_type: dict(uuid=None, file=filename)} + + stubs.Set(vm_utils, '_fetch_disk_image', _fake_fetch_disk_image) + + +def stubout_create_vm(stubs): + """Simulates a failure in create_vm.""" + + def f(*args): + raise fake.Failure("Test Exception raised by fake create_vm") + stubs.Set(vm_utils, 'create_vm', f) + + +def stubout_attach_disks(stubs): + """Simulates a failure in _attach_disks.""" + + def f(*args): + raise fake.Failure("Test Exception raised by fake _attach_disks") + stubs.Set(vmops.VMOps, '_attach_disks', f) + + +def _make_fake_vdi(): + sr_ref = fake.get_all('SR')[0] + vdi_ref = fake.create_vdi('', sr_ref) + vdi_rec = fake.get_record('VDI', vdi_ref) + return vdi_rec['uuid'] + + +class FakeSessionForVMTests(fake.SessionBase): + """Stubs out a XenAPISession for VM tests.""" + + _fake_iptables_save_output = ("# Generated by iptables-save v1.4.10 on " + "Sun Nov 6 22:49:02 2011\n" + "*filter\n" + ":INPUT ACCEPT [0:0]\n" + ":FORWARD ACCEPT [0:0]\n" + ":OUTPUT ACCEPT [0:0]\n" + "COMMIT\n" + "# Completed on Sun Nov 6 22:49:02 2011\n") + + def host_call_plugin(self, _1, _2, plugin, method, _5): + if (plugin, method) == ('glance', 'download_vhd'): + root_uuid = _make_fake_vdi() + return pickle.dumps(dict(root=dict(uuid=root_uuid))) + elif (plugin, method) == ("xenhost", "iptables_config"): + return fake.as_json(out=self._fake_iptables_save_output, + err='') + else: + return (super(FakeSessionForVMTests, self). + host_call_plugin(_1, _2, plugin, method, _5)) + + def VM_start(self, _1, ref, _2, _3): + vm = fake.get_record('VM', ref) + if vm['power_state'] != 'Halted': + raise fake.Failure(['VM_BAD_POWER_STATE', ref, 'Halted', + vm['power_state']]) + vm['power_state'] = 'Running' + vm['is_a_template'] = False + vm['is_control_domain'] = False + vm['domid'] = random.randrange(1, 1 << 16) + return vm + + def VM_start_on(self, _1, vm_ref, host_ref, _2, _3): + vm_rec = self.VM_start(_1, vm_ref, _2, _3) + vm_rec['resident_on'] = host_ref + + def VDI_snapshot(self, session_ref, vm_ref, _1): + sr_ref = "fakesr" + return fake.create_vdi('fakelabel', sr_ref, read_only=True) + + def SR_scan(self, session_ref, sr_ref): + pass + + +class FakeSessionForFirewallTests(FakeSessionForVMTests): + """Stubs out a XenApi Session for doing IPTable Firewall tests.""" + + def __init__(self, uri, test_case=None): + super(FakeSessionForFirewallTests, self).__init__(uri) + if hasattr(test_case, '_in_rules'): + self._in_rules = test_case._in_rules + if hasattr(test_case, '_in6_filter_rules'): + self._in6_filter_rules = test_case._in6_filter_rules + self._test_case = test_case + + def host_call_plugin(self, _1, _2, plugin, method, args): + """Mock method four host_call_plugin to be used in unit tests + for the dom0 iptables Firewall drivers for XenAPI + + """ + if plugin == "xenhost" and method == "iptables_config": + # The command to execute is a json-encoded list + cmd_args = args.get('cmd_args', None) + cmd = jsonutils.loads(cmd_args) + if not cmd: + ret_str = '' + else: + output = '' + process_input = args.get('process_input', None) + if cmd == ['ip6tables-save', '-c']: + output = '\n'.join(self._in6_filter_rules) + if cmd == ['iptables-save', '-c']: + output = '\n'.join(self._in_rules) + if cmd == ['iptables-restore', '-c', ]: + lines = process_input.split('\n') + if '*filter' in lines: + if self._test_case is not None: + self._test_case._out_rules = lines + output = '\n'.join(lines) + if cmd == ['ip6tables-restore', '-c', ]: + lines = process_input.split('\n') + if '*filter' in lines: + output = '\n'.join(lines) + ret_str = fake.as_json(out=output, err='') + return ret_str + + +def stub_out_vm_methods(stubs): + def fake_acquire_bootlock(self, vm): + pass + + def fake_release_bootlock(self, vm): + pass + + def fake_generate_ephemeral(*args): + pass + + def fake_wait_for_device(dev): + pass + + stubs.Set(vmops.VMOps, "_acquire_bootlock", fake_acquire_bootlock) + stubs.Set(vmops.VMOps, "_release_bootlock", fake_release_bootlock) + stubs.Set(vm_utils, 'generate_ephemeral', fake_generate_ephemeral) + stubs.Set(vm_utils, '_wait_for_device', fake_wait_for_device) + + +class FakeSessionForVolumeTests(fake.SessionBase): + """Stubs out a XenAPISession for Volume tests.""" + def VDI_introduce(self, _1, uuid, _2, _3, _4, _5, + _6, _7, _8, _9, _10, _11): + valid_vdi = False + refs = fake.get_all('VDI') + for ref in refs: + rec = fake.get_record('VDI', ref) + if rec['uuid'] == uuid: + valid_vdi = True + if not valid_vdi: + raise fake.Failure([['INVALID_VDI', 'session', self._session]]) + + +class FakeSessionForVolumeFailedTests(FakeSessionForVolumeTests): + """Stubs out a XenAPISession for Volume tests: it injects failures.""" + def VDI_introduce(self, _1, uuid, _2, _3, _4, _5, + _6, _7, _8, _9, _10, _11): + # This is for testing failure + raise fake.Failure([['INVALID_VDI', 'session', self._session]]) + + def PBD_unplug(self, _1, ref): + rec = fake.get_record('PBD', ref) + rec['currently-attached'] = False + + def SR_forget(self, _1, ref): + pass + + +def stub_out_migration_methods(stubs): + fakesr = fake.create_sr() + + def fake_move_disks(self, instance, disk_info): + vdi_ref = fake.create_vdi(instance['name'], fakesr) + vdi_rec = fake.get_record('VDI', vdi_ref) + vdi_rec['other_config']['nova_disk_type'] = 'root' + return {'uuid': vdi_rec['uuid'], 'ref': vdi_ref} + + def fake_get_vdi(session, vm_ref): + vdi_ref_parent = fake.create_vdi('derp-parent', fakesr) + vdi_rec_parent = fake.get_record('VDI', vdi_ref_parent) + vdi_ref = fake.create_vdi('derp', fakesr, + sm_config={'vhd-parent': vdi_rec_parent['uuid']}) + vdi_rec = session.call_xenapi("VDI.get_record", vdi_ref) + return vdi_ref, vdi_rec + + def fake_sr(session, *args): + return fakesr + + def fake_get_sr_path(*args): + return "fake" + + def fake_destroy(*args, **kwargs): + pass + + def fake_generate_ephemeral(*args): + pass + + stubs.Set(vmops.VMOps, '_destroy', fake_destroy) + stubs.Set(vm_utils, 'move_disks', fake_move_disks) + stubs.Set(vm_utils, 'scan_default_sr', fake_sr) + stubs.Set(vm_utils, 'get_vdi_for_vm_safely', fake_get_vdi) + stubs.Set(vm_utils, 'get_sr_path', fake_get_sr_path) + stubs.Set(vm_utils, 'generate_ephemeral', fake_generate_ephemeral) + + +class FakeSessionForFailedMigrateTests(FakeSessionForVMTests): + def VM_assert_can_migrate(self, session, vmref, migrate_data, + live, vdi_map, vif_map, options): + raise fake.Failure("XenAPI VM.assert_can_migrate failed") + + def host_migrate_receive(self, session, hostref, networkref, options): + raise fake.Failure("XenAPI host.migrate_receive failed") + + def VM_migrate_send(self, session, vmref, migrate_data, islive, vdi_map, + vif_map, options): + raise fake.Failure("XenAPI VM.migrate_send failed") + + +class XenAPITestBase(test.TestCase): + def setUp(self): + super(XenAPITestBase, self).setUp() + + self.useFixture(test.ReplaceModule('XenAPI', fake)) + + fake.reset() diff --git a/nova/tests/virt/xenapi/test_vm_utils.py b/nova/tests/virt/xenapi/test_vm_utils.py index 4a7370dd9..3f7de1521 100644 --- a/nova/tests/virt/xenapi/test_vm_utils.py +++ b/nova/tests/virt/xenapi/test_vm_utils.py @@ -15,16 +15,45 @@ # License for the specific language governing permissions and limitations # under the License. - import contextlib + import fixtures import mox from nova.compute import flavors +from nova import context +from nova import db from nova import exception from nova import test +from nova.tests.virt.xenapi import stubs from nova import utils +from nova.virt.xenapi import driver as xenapi_conn +from nova.virt.xenapi import fake from nova.virt.xenapi import vm_utils +from nova.virt.xenapi import volume_utils + + +XENSM_TYPE = 'xensm' +ISCSI_TYPE = 'iscsi' + + +def get_fake_connection_data(sr_type): + fakes = {XENSM_TYPE: {'sr_uuid': 'falseSR', + 'name_label': 'fake_storage', + 'name_description': 'test purposes', + 'server': 'myserver', + 'serverpath': '/local/scratch/myname', + 'sr_type': 'nfs', + 'introduce_sr_keys': ['server', + 'serverpath', + 'sr_type'], + 'vdi_uuid': 'falseVDI'}, + ISCSI_TYPE: {'volume_id': 'fake_volume_id', + 'target_lun': 1, + 'target_iqn': 'fake_iqn:volume-fake_volume_id', + 'target_portal': u'localhost:3260', + 'target_discovered': False}, } + return fakes[sr_type] @contextlib.contextmanager @@ -414,3 +443,347 @@ class CheckVDISizeTestCase(test.TestCase): vm_utils._check_vdi_size(self.context, self.session, self.instance, self.vdi_uuid) + + +class GetInstanceForVdisForSrTestCase(stubs.XenAPITestBase): + def setUp(self): + super(GetInstanceForVdisForSrTestCase, self).setUp() + self.flags(disable_process_locking=True, + instance_name_template='%d', + firewall_driver='nova.virt.xenapi.firewall.' + 'Dom0IptablesFirewallDriver', + xenapi_connection_url='test_url', + xenapi_connection_password='test_pass',) + + def test_get_instance_vdis_for_sr(self): + vm_ref = fake.create_vm("foo", "Running") + sr_ref = fake.create_sr() + + vdi_1 = fake.create_vdi('vdiname1', sr_ref) + vdi_2 = fake.create_vdi('vdiname2', sr_ref) + + for vdi_ref in [vdi_1, vdi_2]: + fake.create_vbd(vm_ref, vdi_ref) + + stubs.stubout_session(self.stubs, fake.SessionBase) + driver = xenapi_conn.XenAPIDriver(False) + + result = list(vm_utils.get_instance_vdis_for_sr( + driver._session, vm_ref, sr_ref)) + + self.assertEquals([vdi_1, vdi_2], result) + + def test_get_instance_vdis_for_sr_no_vbd(self): + vm_ref = fake.create_vm("foo", "Running") + sr_ref = fake.create_sr() + + stubs.stubout_session(self.stubs, fake.SessionBase) + driver = xenapi_conn.XenAPIDriver(False) + + result = list(vm_utils.get_instance_vdis_for_sr( + driver._session, vm_ref, sr_ref)) + + self.assertEquals([], result) + + def test_get_vdi_uuid_for_volume_with_sr_uuid(self): + connection_data = get_fake_connection_data(XENSM_TYPE) + stubs.stubout_session(self.stubs, fake.SessionBase) + driver = xenapi_conn.XenAPIDriver(False) + + vdi_uuid = vm_utils.get_vdi_uuid_for_volume( + driver._session, connection_data) + self.assertEquals(vdi_uuid, 'falseVDI') + + def test_get_vdi_uuid_for_volume_failure(self): + stubs.stubout_session(self.stubs, fake.SessionBase) + driver = xenapi_conn.XenAPIDriver(False) + + def bad_introduce_sr(session, sr_uuid, label, sr_params): + return None + + self.stubs.Set(volume_utils, 'introduce_sr', bad_introduce_sr) + connection_data = get_fake_connection_data(XENSM_TYPE) + self.assertRaises(exception.NovaException, + vm_utils.get_vdi_uuid_for_volume, + driver._session, connection_data) + + def test_get_vdi_uuid_for_volume_from_iscsi_vol_missing_sr_uuid(self): + connection_data = get_fake_connection_data(ISCSI_TYPE) + stubs.stubout_session(self.stubs, fake.SessionBase) + driver = xenapi_conn.XenAPIDriver(False) + + vdi_uuid = vm_utils.get_vdi_uuid_for_volume( + driver._session, connection_data) + self.assertNotEquals(vdi_uuid, None) + + +class VMRefOrRaiseVMFoundTestCase(test.TestCase): + + def test_lookup_call(self): + mock = mox.Mox() + mock.StubOutWithMock(vm_utils, 'lookup') + + vm_utils.lookup('session', 'somename').AndReturn('ignored') + + mock.ReplayAll() + vm_utils.vm_ref_or_raise('session', 'somename') + mock.VerifyAll() + + def test_return_value(self): + mock = mox.Mox() + mock.StubOutWithMock(vm_utils, 'lookup') + + vm_utils.lookup(mox.IgnoreArg(), mox.IgnoreArg()).AndReturn('vmref') + + mock.ReplayAll() + self.assertEquals( + 'vmref', vm_utils.vm_ref_or_raise('session', 'somename')) + mock.VerifyAll() + + +class VMRefOrRaiseVMNotFoundTestCase(test.TestCase): + + def test_exception_raised(self): + mock = mox.Mox() + mock.StubOutWithMock(vm_utils, 'lookup') + + vm_utils.lookup('session', 'somename').AndReturn(None) + + mock.ReplayAll() + self.assertRaises( + exception.InstanceNotFound, + lambda: vm_utils.vm_ref_or_raise('session', 'somename') + ) + mock.VerifyAll() + + def test_exception_msg_contains_vm_name(self): + mock = mox.Mox() + mock.StubOutWithMock(vm_utils, 'lookup') + + vm_utils.lookup('session', 'somename').AndReturn(None) + + mock.ReplayAll() + try: + vm_utils.vm_ref_or_raise('session', 'somename') + except exception.InstanceNotFound as e: + self.assertTrue( + 'somename' in str(e)) + mock.VerifyAll() + + +class BittorrentTestCase(stubs.XenAPITestBase): + def setUp(self): + super(BittorrentTestCase, self).setUp() + self.context = context.get_admin_context() + + def test_image_uses_bittorrent(self): + sys_meta = {'image_bittorrent': True} + instance = db.instance_create(self.context, + {'system_metadata': sys_meta}) + instance = db.instance_get_by_uuid(self.context, instance['uuid']) + self.flags(xenapi_torrent_images='some') + self.assertTrue(vm_utils._image_uses_bittorrent(self.context, + instance)) + + def _test_create_image(self, cache_type): + sys_meta = {'image_cache_in_nova': True} + instance = db.instance_create(self.context, + {'system_metadata': sys_meta}) + instance = db.instance_get_by_uuid(self.context, instance['uuid']) + self.flags(cache_images=cache_type) + + was = {'called': None} + + def fake_create_cached_image(*args): + was['called'] = 'some' + return {} + self.stubs.Set(vm_utils, '_create_cached_image', + fake_create_cached_image) + + def fake_fetch_image(*args): + was['called'] = 'none' + return {} + self.stubs.Set(vm_utils, '_fetch_image', + fake_fetch_image) + + vm_utils._create_image(self.context, None, instance, + 'foo', 'bar', 'baz') + + self.assertEqual(was['called'], cache_type) + + def test_create_image_cached(self): + self._test_create_image('some') + + def test_create_image_uncached(self): + self._test_create_image('none') + + +class CreateVBDTestCase(test.TestCase): + def setUp(self): + super(CreateVBDTestCase, self).setUp() + self.session = FakeSession() + self.mock = mox.Mox() + self.mock.StubOutWithMock(self.session, 'call_xenapi') + self.vbd_rec = self._generate_vbd_rec() + + def _generate_vbd_rec(self): + vbd_rec = {} + vbd_rec['VM'] = 'vm_ref' + vbd_rec['VDI'] = 'vdi_ref' + vbd_rec['userdevice'] = '0' + vbd_rec['bootable'] = False + vbd_rec['mode'] = 'RW' + vbd_rec['type'] = 'disk' + vbd_rec['unpluggable'] = True + vbd_rec['empty'] = False + vbd_rec['other_config'] = {} + vbd_rec['qos_algorithm_type'] = '' + vbd_rec['qos_algorithm_params'] = {} + vbd_rec['qos_supported_algorithms'] = [] + return vbd_rec + + def test_create_vbd_default_args(self): + self.session.call_xenapi('VBD.create', + self.vbd_rec).AndReturn("vbd_ref") + self.mock.ReplayAll() + + result = vm_utils.create_vbd(self.session, "vm_ref", "vdi_ref", 0) + self.assertEquals(result, "vbd_ref") + self.mock.VerifyAll() + + def test_create_vbd_osvol(self): + self.session.call_xenapi('VBD.create', + self.vbd_rec).AndReturn("vbd_ref") + self.session.call_xenapi('VBD.add_to_other_config', "vbd_ref", + "osvol", "True") + self.mock.ReplayAll() + result = vm_utils.create_vbd(self.session, "vm_ref", "vdi_ref", 0, + osvol=True) + self.assertEquals(result, "vbd_ref") + self.mock.VerifyAll() + + def test_create_vbd_extra_args(self): + self.vbd_rec['VDI'] = 'OpaqueRef:NULL' + self.vbd_rec['type'] = 'a' + self.vbd_rec['mode'] = 'RO' + self.vbd_rec['bootable'] = True + self.vbd_rec['empty'] = True + self.vbd_rec['unpluggable'] = False + self.session.call_xenapi('VBD.create', + self.vbd_rec).AndReturn("vbd_ref") + self.mock.ReplayAll() + + result = vm_utils.create_vbd(self.session, "vm_ref", None, 0, + vbd_type="a", read_only=True, bootable=True, + empty=True, unpluggable=False) + self.assertEquals(result, "vbd_ref") + self.mock.VerifyAll() + + def test_attach_cd(self): + self.mock.StubOutWithMock(vm_utils, 'create_vbd') + + vm_utils.create_vbd(self.session, "vm_ref", None, 1, + vbd_type='cd', read_only=True, bootable=True, + empty=True, unpluggable=False).AndReturn("vbd_ref") + self.session.call_xenapi('VBD.insert', "vbd_ref", "vdi_ref") + self.mock.ReplayAll() + + result = vm_utils.attach_cd(self.session, "vm_ref", "vdi_ref", 1) + self.assertEquals(result, "vbd_ref") + self.mock.VerifyAll() + + +class VDIOtherConfigTestCase(stubs.XenAPITestBase): + """Tests to ensure that the code is populating VDI's `other_config` + attribute with the correct metadta. + """ + + def setUp(self): + super(VDIOtherConfigTestCase, self).setUp() + + class _FakeSession(): + def call_xenapi(self, operation, *args, **kwargs): + # VDI.add_to_other_config -> VDI_add_to_other_config + method = getattr(self, operation.replace('.', '_'), None) + if method: + return method(*args, **kwargs) + + self.operation = operation + self.args = args + self.kwargs = kwargs + + self.session = _FakeSession() + self.context = context.get_admin_context() + self.fake_instance = {'uuid': 'aaaa-bbbb-cccc-dddd', + 'name': 'myinstance'} + + def test_create_vdi(self): + # Some images are registered with XenServer explicitly by calling + # `create_vdi` + vm_utils.create_vdi(self.session, 'sr_ref', self.fake_instance, + 'myvdi', 'root', 1024, read_only=True) + + expected = {'nova_disk_type': 'root', + 'nova_instance_uuid': 'aaaa-bbbb-cccc-dddd'} + + self.assertEqual(expected, self.session.args[0]['other_config']) + + def test_create_image(self): + # Other images are registered implicitly when they are dropped into + # the SR by a dom0 plugin or some other process + self.flags(cache_images='none') + + def fake_fetch_image(*args): + return {'root': {'uuid': 'fake-uuid'}} + + self.stubs.Set(vm_utils, '_fetch_image', fake_fetch_image) + + other_config = {} + + def VDI_add_to_other_config(ref, key, value): + other_config[key] = value + + def VDI_get_record(ref): + return {'other_config': {}} + + # Stubbing on the session object and not class so we don't pollute + # other tests + self.session.VDI_add_to_other_config = VDI_add_to_other_config + self.session.VDI_get_record = VDI_get_record + + vm_utils._create_image(self.context, self.session, self.fake_instance, + 'myvdi', 'image1', vm_utils.ImageType.DISK_VHD) + + expected = {'nova_disk_type': 'root', + 'nova_instance_uuid': 'aaaa-bbbb-cccc-dddd'} + + self.assertEqual(expected, other_config) + + def test_move_disks(self): + # Migrated images should preserve the `other_config` + other_config = {} + + def VDI_add_to_other_config(ref, key, value): + other_config[key] = value + + def VDI_get_record(ref): + return {'other_config': {}} + + def call_plugin_serialized(*args, **kwargs): + return {'root': {'uuid': 'aaaa-bbbb-cccc-dddd'}} + + # Stubbing on the session object and not class so we don't pollute + # other tests + self.session.VDI_add_to_other_config = VDI_add_to_other_config + self.session.VDI_get_record = VDI_get_record + self.session.call_plugin_serialized = call_plugin_serialized + + self.stubs.Set(vm_utils, 'get_sr_path', lambda *a, **k: None) + self.stubs.Set(vm_utils, 'scan_default_sr', lambda *a, **k: None) + + vm_utils.move_disks(self.session, self.fake_instance, {}) + + expected = {'nova_disk_type': 'root', + 'nova_instance_uuid': 'aaaa-bbbb-cccc-dddd'} + + self.assertEqual(expected, other_config) diff --git a/nova/tests/virt/xenapi/test_volumeops.py b/nova/tests/virt/xenapi/test_volumeops.py index 5d4344bb0..1f94d21e3 100644 --- a/nova/tests/virt/xenapi/test_volumeops.py +++ b/nova/tests/virt/xenapi/test_volumeops.py @@ -17,7 +17,7 @@ import collections from nova import test -from nova.tests.xenapi import stubs +from nova.tests.virt.xenapi import stubs from nova.virt.xenapi import volumeops diff --git a/nova/tests/virt/xenapi/test_xenapi.py b/nova/tests/virt/xenapi/test_xenapi.py new file mode 100644 index 000000000..af2d97f67 --- /dev/null +++ b/nova/tests/virt/xenapi/test_xenapi.py @@ -0,0 +1,3477 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright (c) 2010 Citrix Systems, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +"""Test suite for XenAPI.""" + +import ast +import base64 +import contextlib +import functools +import os +import re + +from oslo.config import cfg + +from nova.compute import api as compute_api +from nova.compute import flavors +from nova.compute import power_state +from nova.compute import task_states +from nova.compute import vm_states +from nova import context +from nova import db +from nova import exception +from nova.openstack.common import importutils +from nova.openstack.common import jsonutils +from nova.openstack.common import log as logging +from nova import test +from nova.tests.db import fakes as db_fakes +from nova.tests import fake_network +from nova.tests import fake_processutils +import nova.tests.image.fake as fake_image +from nova.tests import matchers +from nova.tests.virt.xenapi import stubs +from nova.virt import fake +from nova.virt.xenapi import agent +from nova.virt.xenapi import driver as xenapi_conn +from nova.virt.xenapi import fake as xenapi_fake +from nova.virt.xenapi import host +from nova.virt.xenapi.imageupload import glance +from nova.virt.xenapi import pool +from nova.virt.xenapi import pool_states +from nova.virt.xenapi import vm_utils +from nova.virt.xenapi import vmops +from nova.virt.xenapi import volume_utils + +LOG = logging.getLogger(__name__) + +CONF = cfg.CONF +CONF.import_opt('compute_manager', 'nova.service') +CONF.import_opt('network_manager', 'nova.service') +CONF.import_opt('compute_driver', 'nova.virt.driver') +CONF.import_opt('host', 'nova.netconf') +CONF.import_opt('default_availability_zone', 'nova.availability_zones') + +IMAGE_MACHINE = '1' +IMAGE_KERNEL = '2' +IMAGE_RAMDISK = '3' +IMAGE_RAW = '4' +IMAGE_VHD = '5' +IMAGE_ISO = '6' + +IMAGE_FIXTURES = { + IMAGE_MACHINE: { + 'image_meta': {'name': 'fakemachine', 'size': 0, + 'disk_format': 'ami', + 'container_format': 'ami'}, + }, + IMAGE_KERNEL: { + 'image_meta': {'name': 'fakekernel', 'size': 0, + 'disk_format': 'aki', + 'container_format': 'aki'}, + }, + IMAGE_RAMDISK: { + 'image_meta': {'name': 'fakeramdisk', 'size': 0, + 'disk_format': 'ari', + 'container_format': 'ari'}, + }, + IMAGE_RAW: { + 'image_meta': {'name': 'fakeraw', 'size': 0, + 'disk_format': 'raw', + 'container_format': 'bare'}, + }, + IMAGE_VHD: { + 'image_meta': {'name': 'fakevhd', 'size': 0, + 'disk_format': 'vhd', + 'container_format': 'ovf'}, + }, + IMAGE_ISO: { + 'image_meta': {'name': 'fakeiso', 'size': 0, + 'disk_format': 'iso', + 'container_format': 'bare'}, + }, +} + + +def set_image_fixtures(): + image_service = fake_image.FakeImageService() + image_service.images.clear() + for image_id, image_meta in IMAGE_FIXTURES.items(): + image_meta = image_meta['image_meta'] + image_meta['id'] = image_id + image_service.create(None, image_meta) + + +def get_fake_device_info(): + # FIXME: 'sr_uuid', 'introduce_sr_keys', sr_type and vdi_uuid + # can be removed from the dict when LP bug #1087308 is fixed + fake_vdi_ref = xenapi_fake.create_vdi('fake-vdi', None) + fake_vdi_uuid = xenapi_fake.get_record('VDI', fake_vdi_ref)['uuid'] + fake = {'block_device_mapping': + [{'connection_info': {'driver_volume_type': 'iscsi', + 'data': {'sr_uuid': 'falseSR', + 'introduce_sr_keys': ['sr_type'], + 'sr_type': 'iscsi', + 'vdi_uuid': fake_vdi_uuid, + 'target_discovered': False, + 'target_iqn': 'foo_iqn:foo_volid', + 'target_portal': 'localhost:3260', + 'volume_id': 'foo_volid', + 'target_lun': 1, + 'auth_password': 'my-p@55w0rd', + 'auth_username': 'johndoe', + 'auth_method': u'CHAP'}, }, + 'mount_device': 'vda', + 'delete_on_termination': False}, ], + 'root_device_name': '/dev/sda', + 'ephemerals': [], + 'swap': None, } + return fake + + +def stub_vm_utils_with_vdi_attached_here(function, should_return=True): + """ + vm_utils.with_vdi_attached_here needs to be stubbed out because it + calls down to the filesystem to attach a vdi. This provides a + decorator to handle that. + """ + @functools.wraps(function) + def decorated_function(self, *args, **kwargs): + @contextlib.contextmanager + def fake_vdi_attached_here(*args, **kwargs): + fake_dev = 'fakedev' + yield fake_dev + + def fake_image_download(*args, **kwargs): + pass + + def fake_is_vdi_pv(*args, **kwargs): + return should_return + + orig_vdi_attached_here = vm_utils.vdi_attached_here + orig_image_download = fake_image._FakeImageService.download + orig_is_vdi_pv = vm_utils._is_vdi_pv + try: + vm_utils.vdi_attached_here = fake_vdi_attached_here + fake_image._FakeImageService.download = fake_image_download + vm_utils._is_vdi_pv = fake_is_vdi_pv + return function(self, *args, **kwargs) + finally: + vm_utils._is_vdi_pv = orig_is_vdi_pv + fake_image._FakeImageService.download = orig_image_download + vm_utils.vdi_attached_here = orig_vdi_attached_here + + return decorated_function + + +def create_instance_with_system_metadata(context, instance_values): + instance_type = db.instance_type_get(context, + instance_values['instance_type_id']) + sys_meta = flavors.save_instance_type_info({}, + instance_type) + instance_values['system_metadata'] = sys_meta + return db.instance_create(context, instance_values) + + +class XenAPIVolumeTestCase(stubs.XenAPITestBase): + """Unit tests for Volume operations.""" + def setUp(self): + super(XenAPIVolumeTestCase, self).setUp() + self.user_id = 'fake' + self.project_id = 'fake' + self.context = context.RequestContext(self.user_id, self.project_id) + self.flags(disable_process_locking=True, + firewall_driver='nova.virt.xenapi.firewall.' + 'Dom0IptablesFirewallDriver', + xenapi_connection_url='test_url', + xenapi_connection_password='test_pass') + db_fakes.stub_out_db_instance_api(self.stubs) + self.instance_values = {'id': 1, + 'project_id': self.user_id, + 'user_id': 'fake', + 'image_ref': 1, + 'kernel_id': 2, + 'ramdisk_id': 3, + 'root_gb': 20, + 'instance_type_id': '3', # m1.large + 'os_type': 'linux', + 'architecture': 'x86-64'} + + def _create_volume(self, size=0): + """Create a volume object.""" + vol = {} + vol['size'] = size + vol['user_id'] = 'fake' + vol['project_id'] = 'fake' + vol['host'] = 'localhost' + vol['availability_zone'] = CONF.default_availability_zone + vol['status'] = "creating" + vol['attach_status'] = "detached" + return db.volume_create(self.context, vol) + + @staticmethod + def _make_connection_data(): + return { + 'volume_id': 1, + 'target_iqn': 'iqn.2010-10.org.openstack:volume-00000001', + 'target_portal': '127.0.0.1:3260,fake', + 'target_lun': None, + 'auth_method': 'CHAP', + 'auth_username': 'username', + 'auth_password': 'password', + } + + @classmethod + def _make_connection_info(cls): + return { + 'driver_volume_type': 'iscsi', + 'data': cls._make_connection_data() + } + + def test_mountpoint_to_number(self): + cases = { + 'sda': 0, + 'sdp': 15, + 'hda': 0, + 'hdp': 15, + 'vda': 0, + 'xvda': 0, + '0': 0, + '10': 10, + 'vdq': -1, + 'sdq': -1, + 'hdq': -1, + 'xvdq': -1, + } + + for (input, expected) in cases.iteritems(): + actual = volume_utils.mountpoint_to_number(input) + self.assertEqual(actual, expected, + '%s yielded %s, not %s' % (input, actual, expected)) + + def test_parse_volume_info_parsing_auth_details(self): + result = volume_utils.parse_volume_info( + self._make_connection_data()) + + self.assertEquals('username', result['chapuser']) + self.assertEquals('password', result['chappassword']) + + def test_get_device_number_raise_exception_on_wrong_mountpoint(self): + self.assertRaises( + volume_utils.StorageError, + volume_utils.get_device_number, + 'dev/sd') + + def test_attach_volume(self): + # This shows how to test Ops classes' methods. + stubs.stubout_session(self.stubs, stubs.FakeSessionForVolumeTests) + conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False) + instance = db.instance_create(self.context, self.instance_values) + vm = xenapi_fake.create_vm(instance['name'], 'Running') + result = conn.attach_volume(self._make_connection_info(), + instance, '/dev/sdc') + + # check that the VM has a VBD attached to it + # Get XenAPI record for VBD + vbds = xenapi_fake.get_all('VBD') + vbd = xenapi_fake.get_record('VBD', vbds[0]) + vm_ref = vbd['VM'] + self.assertEqual(vm_ref, vm) + + def test_attach_volume_raise_exception(self): + # This shows how to test when exceptions are raised. + stubs.stubout_session(self.stubs, + stubs.FakeSessionForVolumeFailedTests) + conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False) + instance = db.instance_create(self.context, self.instance_values) + xenapi_fake.create_vm(instance['name'], 'Running') + self.assertRaises(exception.VolumeDriverNotFound, + conn.attach_volume, + {'driver_volume_type': 'nonexist'}, + instance, + '/dev/sdc') + + +class XenAPIVMTestCase(stubs.XenAPITestBase): + """Unit tests for VM operations.""" + def setUp(self): + super(XenAPIVMTestCase, self).setUp() + self.network = importutils.import_object(CONF.network_manager) + self.flags(disable_process_locking=True, + instance_name_template='%d', + firewall_driver='nova.virt.xenapi.firewall.' + 'Dom0IptablesFirewallDriver', + xenapi_connection_url='test_url', + xenapi_connection_password='test_pass',) + xenapi_fake.create_local_srs() + xenapi_fake.create_local_pifs() + db_fakes.stub_out_db_instance_api(self.stubs) + xenapi_fake.create_network('fake', CONF.flat_network_bridge) + stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests) + stubs.stubout_get_this_vm_uuid(self.stubs) + stubs.stubout_is_vdi_pv(self.stubs) + stubs.stub_out_vm_methods(self.stubs) + fake_processutils.stub_out_processutils_execute(self.stubs) + self.user_id = 'fake' + self.project_id = 'fake' + self.context = context.RequestContext(self.user_id, self.project_id) + self.conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False) + + fake_image.stub_out_image_service(self.stubs) + set_image_fixtures() + stubs.stubout_image_service_download(self.stubs) + stubs.stubout_stream_disk(self.stubs) + + def fake_inject_instance_metadata(self, instance, vm): + pass + self.stubs.Set(vmops.VMOps, 'inject_instance_metadata', + fake_inject_instance_metadata) + + def fake_safe_copy_vdi(session, sr_ref, instance, vdi_to_copy_ref): + name_label = "fakenamelabel" + disk_type = "fakedisktype" + virtual_size = 777 + return vm_utils.create_vdi( + session, sr_ref, instance, name_label, disk_type, + virtual_size) + self.stubs.Set(vm_utils, '_safe_copy_vdi', fake_safe_copy_vdi) + + def tearDown(self): + fake_image.FakeImageService_reset() + super(XenAPIVMTestCase, self).tearDown() + + def test_init_host(self): + session = xenapi_conn.XenAPISession('test_url', 'root', 'test_pass', + fake.FakeVirtAPI()) + vm = vm_utils._get_this_vm_ref(session) + # Local root disk + vdi0 = xenapi_fake.create_vdi('compute', None) + vbd0 = xenapi_fake.create_vbd(vm, vdi0) + # Instance VDI + vdi1 = xenapi_fake.create_vdi('instance-aaaa', None, + other_config={'nova_instance_uuid': 'aaaa'}) + vbd1 = xenapi_fake.create_vbd(vm, vdi1) + # Only looks like instance VDI + vdi2 = xenapi_fake.create_vdi('instance-bbbb', None) + vbd2 = xenapi_fake.create_vbd(vm, vdi2) + + self.conn.init_host(None) + self.assertEquals(set(xenapi_fake.get_all('VBD')), set([vbd0, vbd2])) + + def test_list_instances_0(self): + instances = self.conn.list_instances() + self.assertEquals(instances, []) + + def test_list_instance_uuids_0(self): + instance_uuids = self.conn.list_instance_uuids() + self.assertEquals(instance_uuids, []) + + def test_list_instance_uuids(self): + uuids = [] + for x in xrange(1, 4): + instance = self._create_instance(x) + uuids.append(instance['uuid']) + instance_uuids = self.conn.list_instance_uuids() + self.assertEqual(len(uuids), len(instance_uuids)) + self.assertEqual(set(uuids), set(instance_uuids)) + + def test_get_rrd_server(self): + self.flags(xenapi_connection_url='myscheme://myaddress/') + server_info = vm_utils._get_rrd_server() + self.assertEqual(server_info[0], 'myscheme') + self.assertEqual(server_info[1], 'myaddress') + + def test_get_diagnostics(self): + def fake_get_rrd(host, vm_uuid): + path = os.path.dirname(os.path.realpath(__file__)) + with open(os.path.join(path, 'vm_rrd.xml')) as f: + return re.sub(r'\s', '', f.read()) + self.stubs.Set(vm_utils, '_get_rrd', fake_get_rrd) + + fake_diagnostics = { + 'vbd_xvdb_write': '0.0', + 'memory_target': '4294967296.0000', + 'memory_internal_free': '1415564.0000', + 'memory': '4294967296.0000', + 'vbd_xvda_write': '0.0', + 'cpu0': '0.0042', + 'vif_0_tx': '287.4134', + 'vbd_xvda_read': '0.0', + 'vif_0_rx': '1816.0144', + 'vif_2_rx': '0.0', + 'vif_2_tx': '0.0', + 'vbd_xvdb_read': '0.0', + 'last_update': '1328795567', + } + instance = self._create_instance() + expected = self.conn.get_diagnostics(instance) + self.assertThat(fake_diagnostics, matchers.DictMatches(expected)) + + def test_get_vnc_console(self): + instance = self._create_instance() + session = xenapi_conn.XenAPISession('test_url', 'root', 'test_pass', + fake.FakeVirtAPI()) + conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False) + vm_ref = vm_utils.lookup(session, instance['name']) + + console = conn.get_vnc_console(instance) + + # Note(sulo): We dont care about session id in test + # they will always differ so strip that out + actual_path = console['internal_access_path'].split('&')[0] + expected_path = "/console?ref=%s" % str(vm_ref) + + self.assertEqual(expected_path, actual_path) + + def test_get_vnc_console_for_rescue(self): + instance = self._create_instance() + session = xenapi_conn.XenAPISession('test_url', 'root', 'test_pass', + fake.FakeVirtAPI()) + conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False) + rescue_vm = xenapi_fake.create_vm(instance['name'] + '-rescue', + 'Running') + # Set instance state to rescued + instance['vm_state'] = 'rescued' + + console = conn.get_vnc_console(instance) + + # Note(sulo): We dont care about session id in test + # they will always differ so strip that out + actual_path = console['internal_access_path'].split('&')[0] + expected_path = "/console?ref=%s" % str(rescue_vm) + + self.assertEqual(expected_path, actual_path) + + def test_get_vnc_console_instance_not_ready(self): + instance = {} + # set instance name and state + instance['name'] = 'fake-instance' + instance['uuid'] = '00000000-0000-0000-0000-000000000000' + instance['vm_state'] = 'building' + + conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False) + self.assertRaises(exception.InstanceNotFound, + conn.get_vnc_console, instance) + + def test_get_vnc_console_rescue_not_ready(self): + instance = {} + instance['name'] = 'fake-rescue' + instance['uuid'] = '00000000-0000-0000-0000-000000000001' + instance['vm_state'] = 'rescued' + + conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False) + self.assertRaises(exception.InstanceNotReady, + conn.get_vnc_console, instance) + + def test_instance_snapshot_fails_with_no_primary_vdi(self): + + def create_bad_vbd(session, vm_ref, vdi_ref, userdevice, + vbd_type='disk', read_only=False, bootable=False, + osvol=False): + vbd_rec = {'VM': vm_ref, + 'VDI': vdi_ref, + 'userdevice': 'fake', + 'currently_attached': False} + vbd_ref = xenapi_fake._create_object('VBD', vbd_rec) + xenapi_fake.after_VBD_create(vbd_ref, vbd_rec) + return vbd_ref + + self.stubs.Set(vm_utils, 'create_vbd', create_bad_vbd) + stubs.stubout_instance_snapshot(self.stubs) + # Stubbing out firewall driver as previous stub sets alters + # xml rpc result parsing + stubs.stubout_firewall_driver(self.stubs, self.conn) + instance = self._create_instance() + + image_id = "my_snapshot_id" + self.assertRaises(exception.NovaException, self.conn.snapshot, + self.context, instance, image_id, + lambda *args, **kwargs: None) + + def test_instance_snapshot(self): + expected_calls = [ + {'args': (), + 'kwargs': + {'task_state': task_states.IMAGE_PENDING_UPLOAD}}, + {'args': (), + 'kwargs': + {'task_state': task_states.IMAGE_UPLOADING, + 'expected_state': task_states.IMAGE_PENDING_UPLOAD}}] + func_call_matcher = matchers.FunctionCallMatcher(expected_calls) + image_id = "my_snapshot_id" + + stubs.stubout_instance_snapshot(self.stubs) + stubs.stubout_is_snapshot(self.stubs) + # Stubbing out firewall driver as previous stub sets alters + # xml rpc result parsing + stubs.stubout_firewall_driver(self.stubs, self.conn) + + instance = self._create_instance() + + self.fake_upload_called = False + + def fake_image_upload(_self, ctx, session, inst, vdi_uuids, + img_id): + self.fake_upload_called = True + self.assertEqual(ctx, self.context) + self.assertEqual(inst, instance) + self.assertTrue(isinstance(vdi_uuids, list)) + self.assertEqual(img_id, image_id) + + self.stubs.Set(glance.GlanceStore, 'upload_image', + fake_image_upload) + + self.conn.snapshot(self.context, instance, image_id, + func_call_matcher.call) + + # Ensure VM was torn down + vm_labels = [] + for vm_ref in xenapi_fake.get_all('VM'): + vm_rec = xenapi_fake.get_record('VM', vm_ref) + if not vm_rec["is_control_domain"]: + vm_labels.append(vm_rec["name_label"]) + + self.assertEquals(vm_labels, [instance['name']]) + + # Ensure VBDs were torn down + vbd_labels = [] + for vbd_ref in xenapi_fake.get_all('VBD'): + vbd_rec = xenapi_fake.get_record('VBD', vbd_ref) + vbd_labels.append(vbd_rec["vm_name_label"]) + + self.assertEquals(vbd_labels, [instance['name']]) + + # Ensure task states changed in correct order + self.assertIsNone(func_call_matcher.match()) + + # Ensure VDIs were torn down + for vdi_ref in xenapi_fake.get_all('VDI'): + vdi_rec = xenapi_fake.get_record('VDI', vdi_ref) + name_label = vdi_rec["name_label"] + self.assert_(not name_label.endswith('snapshot')) + + self.assertTrue(self.fake_upload_called) + + def create_vm_record(self, conn, os_type, name): + instances = conn.list_instances() + self.assertEquals(instances, [name]) + + # Get Nova record for VM + vm_info = conn.get_info({'name': name}) + # Get XenAPI record for VM + vms = [rec for ref, rec + in xenapi_fake.get_all_records('VM').iteritems() + if not rec['is_control_domain']] + vm = vms[0] + self.vm_info = vm_info + self.vm = vm + + def check_vm_record(self, conn, check_injection=False): + # Check that m1.large above turned into the right thing. + instance_type = db.instance_type_get_by_name(conn, 'm1.large') + mem_kib = long(instance_type['memory_mb']) << 10 + mem_bytes = str(mem_kib << 10) + vcpus = instance_type['vcpus'] + self.assertEquals(self.vm_info['max_mem'], mem_kib) + self.assertEquals(self.vm_info['mem'], mem_kib) + self.assertEquals(self.vm['memory_static_max'], mem_bytes) + self.assertEquals(self.vm['memory_dynamic_max'], mem_bytes) + self.assertEquals(self.vm['memory_dynamic_min'], mem_bytes) + self.assertEquals(self.vm['VCPUs_max'], str(vcpus)) + self.assertEquals(self.vm['VCPUs_at_startup'], str(vcpus)) + + # Check that the VM is running according to Nova + self.assertEquals(self.vm_info['state'], power_state.RUNNING) + + # Check that the VM is running according to XenAPI. + self.assertEquals(self.vm['power_state'], 'Running') + + if check_injection: + xenstore_data = self.vm['xenstore_data'] + self.assertEquals(xenstore_data['vm-data/hostname'], 'test') + key = 'vm-data/networking/DEADBEEF0001' + xenstore_value = xenstore_data[key] + tcpip_data = ast.literal_eval(xenstore_value) + self.assertEquals(tcpip_data, + {'broadcast': '192.168.1.255', + 'dns': ['192.168.1.4', '192.168.1.3'], + 'gateway': '192.168.1.1', + 'gateway_v6': 'fe80::def', + 'ip6s': [{'enabled': '1', + 'ip': '2001:db8:0:1::1', + 'netmask': 64, + 'gateway': 'fe80::def'}], + 'ips': [{'enabled': '1', + 'ip': '192.168.1.100', + 'netmask': '255.255.255.0', + 'gateway': '192.168.1.1'}, + {'enabled': '1', + 'ip': '192.168.1.101', + 'netmask': '255.255.255.0', + 'gateway': '192.168.1.1'}], + 'label': 'test1', + 'mac': 'DE:AD:BE:EF:00:01'}) + + def check_vm_params_for_windows(self): + self.assertEquals(self.vm['platform']['nx'], 'true') + self.assertEquals(self.vm['HVM_boot_params'], {'order': 'dc'}) + self.assertEquals(self.vm['HVM_boot_policy'], 'BIOS order') + + # check that these are not set + self.assertEquals(self.vm['PV_args'], '') + self.assertEquals(self.vm['PV_bootloader'], '') + self.assertEquals(self.vm['PV_kernel'], '') + self.assertEquals(self.vm['PV_ramdisk'], '') + + def check_vm_params_for_linux(self): + self.assertEquals(self.vm['platform']['nx'], 'false') + self.assertEquals(self.vm['PV_args'], '') + self.assertEquals(self.vm['PV_bootloader'], 'pygrub') + + # check that these are not set + self.assertEquals(self.vm['PV_kernel'], '') + self.assertEquals(self.vm['PV_ramdisk'], '') + self.assertEquals(self.vm['HVM_boot_params'], {}) + self.assertEquals(self.vm['HVM_boot_policy'], '') + + def check_vm_params_for_linux_with_external_kernel(self): + self.assertEquals(self.vm['platform']['nx'], 'false') + self.assertEquals(self.vm['PV_args'], 'root=/dev/xvda1') + self.assertNotEquals(self.vm['PV_kernel'], '') + self.assertNotEquals(self.vm['PV_ramdisk'], '') + + # check that these are not set + self.assertEquals(self.vm['HVM_boot_params'], {}) + self.assertEquals(self.vm['HVM_boot_policy'], '') + + def _list_vdis(self): + url = CONF.xenapi_connection_url + username = CONF.xenapi_connection_username + password = CONF.xenapi_connection_password + session = xenapi_conn.XenAPISession(url, username, password, + fake.FakeVirtAPI()) + return session.call_xenapi('VDI.get_all') + + def _list_vms(self): + url = CONF.xenapi_connection_url + username = CONF.xenapi_connection_username + password = CONF.xenapi_connection_password + session = xenapi_conn.XenAPISession(url, username, password, + fake.FakeVirtAPI()) + return session.call_xenapi('VM.get_all') + + def _check_vdis(self, start_list, end_list): + for vdi_ref in end_list: + if vdi_ref not in start_list: + vdi_rec = xenapi_fake.get_record('VDI', vdi_ref) + # If the cache is turned on then the base disk will be + # there even after the cleanup + if 'other_config' in vdi_rec: + if 'image-id' not in vdi_rec['other_config']: + self.fail('Found unexpected VDI:%s' % vdi_ref) + else: + self.fail('Found unexpected VDI:%s' % vdi_ref) + + def _test_spawn(self, image_ref, kernel_id, ramdisk_id, + instance_type_id="3", os_type="linux", + hostname="test", architecture="x86-64", instance_id=1, + injected_files=None, check_injection=False, + create_record=True, empty_dns=False, + block_device_info=None, + key_data=None): + if injected_files is None: + injected_files = [] + + # Fake out inject_instance_metadata + def fake_inject_instance_metadata(self, instance, vm): + pass + self.stubs.Set(vmops.VMOps, 'inject_instance_metadata', + fake_inject_instance_metadata) + + if create_record: + instance_values = {'id': instance_id, + 'project_id': self.project_id, + 'user_id': self.user_id, + 'image_ref': image_ref, + 'kernel_id': kernel_id, + 'ramdisk_id': ramdisk_id, + 'root_gb': 20, + 'instance_type_id': instance_type_id, + 'os_type': os_type, + 'hostname': hostname, + 'key_data': key_data, + 'architecture': architecture} + instance = create_instance_with_system_metadata(self.context, + instance_values) + else: + instance = db.instance_get(self.context, instance_id) + + network_info = fake_network.fake_get_instance_nw_info(self.stubs, + spectacular=True) + if empty_dns: + # NOTE(tr3buchet): this is a terrible way to do this... + network_info[0]['network']['subnets'][0]['dns'] = [] + + image_meta = {} + if image_ref: + image_meta = IMAGE_FIXTURES[image_ref]["image_meta"] + self.conn.spawn(self.context, instance, image_meta, injected_files, + 'herp', network_info, block_device_info) + self.create_vm_record(self.conn, os_type, instance['name']) + self.check_vm_record(self.conn, check_injection) + self.assertTrue(instance['os_type']) + self.assertTrue(instance['architecture']) + + def test_spawn_empty_dns(self): + # Test spawning with an empty dns list. + self._test_spawn(IMAGE_VHD, None, None, + os_type="linux", architecture="x86-64", + empty_dns=True) + self.check_vm_params_for_linux() + + def test_spawn_not_enough_memory(self): + self.assertRaises(exception.InsufficientFreeMemory, + self._test_spawn, + '1', 2, 3, "4") # m1.xlarge + + def test_spawn_fail_cleanup_1(self): + """Simulates an error while downloading an image. + + Verifies that the VM and VDIs created are properly cleaned up. + """ + vdi_recs_start = self._list_vdis() + start_vms = self._list_vms() + stubs.stubout_fetch_disk_image(self.stubs, raise_failure=True) + self.assertRaises(xenapi_fake.Failure, + self._test_spawn, '1', 2, 3) + # No additional VDI should be found. + vdi_recs_end = self._list_vdis() + end_vms = self._list_vms() + self._check_vdis(vdi_recs_start, vdi_recs_end) + # No additional VMs should be found. + self.assertEqual(start_vms, end_vms) + + def test_spawn_fail_cleanup_2(self): + """Simulates an error while creating VM record. + + Verifies that the VM and VDIs created are properly cleaned up. + """ + vdi_recs_start = self._list_vdis() + start_vms = self._list_vms() + stubs.stubout_create_vm(self.stubs) + self.assertRaises(xenapi_fake.Failure, + self._test_spawn, '1', 2, 3) + # No additional VDI should be found. + vdi_recs_end = self._list_vdis() + end_vms = self._list_vms() + self._check_vdis(vdi_recs_start, vdi_recs_end) + # No additional VMs should be found. + self.assertEqual(start_vms, end_vms) + + def test_spawn_fail_cleanup_3(self): + """Simulates an error while attaching disks. + + Verifies that the VM and VDIs created are properly cleaned up. + """ + stubs.stubout_attach_disks(self.stubs) + vdi_recs_start = self._list_vdis() + start_vms = self._list_vms() + self.assertRaises(xenapi_fake.Failure, + self._test_spawn, '1', 2, 3) + # No additional VDI should be found. + vdi_recs_end = self._list_vdis() + end_vms = self._list_vms() + self._check_vdis(vdi_recs_start, vdi_recs_end) + # No additional VMs should be found. + self.assertEqual(start_vms, end_vms) + + @stub_vm_utils_with_vdi_attached_here + def test_spawn_raw_glance(self): + self._test_spawn(IMAGE_RAW, None, None) + self.check_vm_params_for_linux() + + def test_spawn_vhd_glance_linux(self): + self._test_spawn(IMAGE_VHD, None, None, + os_type="linux", architecture="x86-64") + self.check_vm_params_for_linux() + + def test_spawn_vhd_glance_windows(self): + self._test_spawn(IMAGE_VHD, None, None, + os_type="windows", architecture="i386") + self.check_vm_params_for_windows() + + def test_spawn_iso_glance(self): + self._test_spawn(IMAGE_ISO, None, None, + os_type="windows", architecture="i386") + self.check_vm_params_for_windows() + + def test_spawn_glance(self): + + def fake_fetch_disk_image(context, session, instance, name_label, + image_id, image_type): + sr_ref = vm_utils.safe_find_sr(session) + image_type_str = vm_utils.ImageType.to_string(image_type) + vdi_ref = vm_utils.create_vdi(session, sr_ref, instance, + name_label, image_type_str, "20") + vdi_role = vm_utils.ImageType.get_role(image_type) + vdi_uuid = session.call_xenapi("VDI.get_uuid", vdi_ref) + return {vdi_role: dict(uuid=vdi_uuid, file=None)} + self.stubs.Set(vm_utils, '_fetch_disk_image', + fake_fetch_disk_image) + + self._test_spawn(IMAGE_MACHINE, + IMAGE_KERNEL, + IMAGE_RAMDISK) + self.check_vm_params_for_linux_with_external_kernel() + + def test_spawn_boot_from_volume_no_image_meta(self): + dev_info = get_fake_device_info() + self._test_spawn(None, None, None, + block_device_info=dev_info) + + def test_spawn_boot_from_volume_with_image_meta(self): + dev_info = get_fake_device_info() + self._test_spawn(None, None, None, + block_device_info=dev_info) + + def test_spawn_netinject_file(self): + self.flags(flat_injected=True) + db_fakes.stub_out_db_instance_api(self.stubs, injected=True) + + self._tee_executed = False + + def _tee_handler(cmd, **kwargs): + input = kwargs.get('process_input', None) + self.assertNotEqual(input, None) + config = [line.strip() for line in input.split("\n")] + # Find the start of eth0 configuration and check it + index = config.index('auto eth0') + self.assertEquals(config[index + 1:index + 8], [ + 'iface eth0 inet static', + 'address 192.168.1.100', + 'netmask 255.255.255.0', + 'broadcast 192.168.1.255', + 'gateway 192.168.1.1', + 'dns-nameservers 192.168.1.3 192.168.1.4', + '']) + self._tee_executed = True + return '', '' + + def _readlink_handler(cmd_parts, **kwargs): + return os.path.realpath(cmd_parts[2]), '' + + fake_processutils.fake_execute_set_repliers([ + # Capture the tee .../etc/network/interfaces command + (r'tee.*interfaces', _tee_handler), + (r'readlink -nm.*', _readlink_handler), + ]) + self._test_spawn(IMAGE_MACHINE, + IMAGE_KERNEL, + IMAGE_RAMDISK, + check_injection=True) + self.assertTrue(self._tee_executed) + + def test_spawn_netinject_xenstore(self): + db_fakes.stub_out_db_instance_api(self.stubs, injected=True) + + self._tee_executed = False + + def _mount_handler(cmd, *ignore_args, **ignore_kwargs): + # When mounting, create real files under the mountpoint to simulate + # files in the mounted filesystem + + # mount point will be the last item of the command list + self._tmpdir = cmd[len(cmd) - 1] + LOG.debug(_('Creating files in %s to simulate guest agent'), + self._tmpdir) + os.makedirs(os.path.join(self._tmpdir, 'usr', 'sbin')) + # Touch the file using open + open(os.path.join(self._tmpdir, 'usr', 'sbin', + 'xe-update-networking'), 'w').close() + return '', '' + + def _umount_handler(cmd, *ignore_args, **ignore_kwargs): + # Umount would normall make files in the m,ounted filesystem + # disappear, so do that here + LOG.debug(_('Removing simulated guest agent files in %s'), + self._tmpdir) + os.remove(os.path.join(self._tmpdir, 'usr', 'sbin', + 'xe-update-networking')) + os.rmdir(os.path.join(self._tmpdir, 'usr', 'sbin')) + os.rmdir(os.path.join(self._tmpdir, 'usr')) + return '', '' + + def _tee_handler(cmd, *ignore_args, **ignore_kwargs): + self._tee_executed = True + return '', '' + + fake_processutils.fake_execute_set_repliers([ + (r'mount', _mount_handler), + (r'umount', _umount_handler), + (r'tee.*interfaces', _tee_handler)]) + self._test_spawn('1', 2, 3, check_injection=True) + + # tee must not run in this case, where an injection-capable + # guest agent is detected + self.assertFalse(self._tee_executed) + + def test_spawn_vlanmanager(self): + self.flags(network_manager='nova.network.manager.VlanManager', + vlan_interface='fake0') + + def dummy(*args, **kwargs): + pass + + self.stubs.Set(vmops.VMOps, '_create_vifs', dummy) + # Reset network table + xenapi_fake.reset_table('network') + # Instance id = 2 will use vlan network (see db/fakes.py) + ctxt = self.context.elevated() + instance = self._create_instance(2, False) + networks = self.network.db.network_get_all(ctxt) + for network in networks: + self.network.set_network_host(ctxt, network) + + self.network.allocate_for_instance(ctxt, + instance_id=2, + instance_uuid='00000000-0000-0000-0000-000000000002', + host=CONF.host, + vpn=None, + rxtx_factor=3, + project_id=self.project_id, + macs=None) + self._test_spawn(IMAGE_MACHINE, + IMAGE_KERNEL, + IMAGE_RAMDISK, + instance_id=2, + create_record=False) + # TODO(salvatore-orlando): a complete test here would require + # a check for making sure the bridge for the VM's VIF is + # consistent with bridge specified in nova db + + def test_spawn_with_network_qos(self): + self._create_instance() + for vif_ref in xenapi_fake.get_all('VIF'): + vif_rec = xenapi_fake.get_record('VIF', vif_ref) + self.assertEquals(vif_rec['qos_algorithm_type'], 'ratelimit') + self.assertEquals(vif_rec['qos_algorithm_params']['kbps'], + str(3 * 10 * 1024)) + + def test_spawn_ssh_key_injection(self): + # Test spawning with key_data on an instance. Should use + # agent file injection. + actual_injected_files = [] + + def fake_inject_file(self, method, args): + path = base64.b64decode(args['b64_path']) + contents = base64.b64decode(args['b64_contents']) + actual_injected_files.append((path, contents)) + return jsonutils.dumps({'returncode': '0', 'message': 'success'}) + + def noop(*args, **kwargs): + pass + + self.stubs.Set(stubs.FakeSessionForVMTests, + '_plugin_agent_inject_file', fake_inject_file) + self.stubs.Set(agent.XenAPIBasedAgent, + 'set_admin_password', noop) + + expected_data = ('\n# The following ssh key was injected by ' + 'Nova\nfake_keydata\n') + + injected_files = [('/root/.ssh/authorized_keys', expected_data)] + self._test_spawn(IMAGE_VHD, None, None, + os_type="linux", architecture="x86-64", + key_data='fake_keydata') + self.assertEquals(actual_injected_files, injected_files) + + def test_spawn_injected_files(self): + # Test spawning with injected_files. + actual_injected_files = [] + + def fake_inject_file(self, method, args): + path = base64.b64decode(args['b64_path']) + contents = base64.b64decode(args['b64_contents']) + actual_injected_files.append((path, contents)) + return jsonutils.dumps({'returncode': '0', 'message': 'success'}) + self.stubs.Set(stubs.FakeSessionForVMTests, + '_plugin_agent_inject_file', fake_inject_file) + + injected_files = [('/tmp/foo', 'foobar')] + self._test_spawn(IMAGE_VHD, None, None, + os_type="linux", architecture="x86-64", + injected_files=injected_files) + self.check_vm_params_for_linux() + self.assertEquals(actual_injected_files, injected_files) + + def test_rescue(self): + instance = self._create_instance() + session = xenapi_conn.XenAPISession('test_url', 'root', 'test_pass', + fake.FakeVirtAPI()) + vm_ref = vm_utils.lookup(session, instance['name']) + + swap_vdi_ref = xenapi_fake.create_vdi('swap', None) + root_vdi_ref = xenapi_fake.create_vdi('root', None) + + xenapi_fake.create_vbd(vm_ref, swap_vdi_ref, userdevice=1) + xenapi_fake.create_vbd(vm_ref, root_vdi_ref, userdevice=0) + + conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False) + image_meta = {'id': IMAGE_VHD, + 'disk_format': 'vhd'} + conn.rescue(self.context, instance, [], image_meta, '') + + vm = xenapi_fake.get_record('VM', vm_ref) + rescue_name = "%s-rescue" % vm["name_label"] + rescue_ref = vm_utils.lookup(session, rescue_name) + rescue_vm = xenapi_fake.get_record('VM', rescue_ref) + + vdi_uuids = [] + for vbd_uuid in rescue_vm["VBDs"]: + vdi_uuids.append(xenapi_fake.get_record('VBD', vbd_uuid)["VDI"]) + self.assertTrue("swap" not in vdi_uuids) + + def test_unrescue(self): + instance = self._create_instance() + conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False) + # Unrescue expects the original instance to be powered off + conn.power_off(instance) + rescue_vm = xenapi_fake.create_vm(instance['name'] + '-rescue', + 'Running') + conn.unrescue(instance, None) + + def test_unrescue_not_in_rescue(self): + instance = self._create_instance() + conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False) + # Ensure that it will not unrescue a non-rescued instance. + self.assertRaises(exception.InstanceNotInRescueMode, conn.unrescue, + instance, None) + + def test_finish_revert_migration(self): + instance = self._create_instance() + + class VMOpsMock(): + + def __init__(self): + self.finish_revert_migration_called = False + + def finish_revert_migration(self, instance, block_info): + self.finish_revert_migration_called = True + + conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False) + conn._vmops = VMOpsMock() + conn.finish_revert_migration(instance, None) + self.assertTrue(conn._vmops.finish_revert_migration_called) + + def test_reboot_hard(self): + instance = self._create_instance() + conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False) + conn.reboot(self.context, instance, None, "HARD") + + def test_reboot_soft(self): + instance = self._create_instance() + conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False) + conn.reboot(self.context, instance, None, "SOFT") + + def test_reboot_halted(self): + session = xenapi_conn.XenAPISession('test_url', 'root', 'test_pass', + fake.FakeVirtAPI()) + instance = self._create_instance(spawn=False) + conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False) + xenapi_fake.create_vm(instance['name'], 'Halted') + conn.reboot(self.context, instance, None, "SOFT") + vm_ref = vm_utils.lookup(session, instance['name']) + vm = xenapi_fake.get_record('VM', vm_ref) + self.assertEquals(vm['power_state'], 'Running') + + def test_reboot_unknown_state(self): + instance = self._create_instance(spawn=False) + conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False) + xenapi_fake.create_vm(instance['name'], 'Unknown') + self.assertRaises(xenapi_fake.Failure, conn.reboot, self.context, + instance, None, "SOFT") + + def test_reboot_rescued(self): + instance = self._create_instance() + instance['vm_state'] = vm_states.RESCUED + conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False) + + real_result = vm_utils.lookup(conn._session, instance['name']) + + self.mox.StubOutWithMock(vm_utils, 'lookup') + vm_utils.lookup(conn._session, instance['name'], + True).AndReturn(real_result) + self.mox.ReplayAll() + + conn.reboot(self.context, instance, None, "SOFT") + + def _test_maintenance_mode(self, find_host, find_aggregate): + real_call_xenapi = self.conn._session.call_xenapi + instance = self._create_instance(spawn=True) + api_calls = {} + + # Record all the xenapi calls, and return a fake list of hosts + # for the host.get_all call + def fake_call_xenapi(method, *args): + api_calls[method] = args + if method == 'host.get_all': + return ['foo', 'bar', 'baz'] + return real_call_xenapi(method, *args) + self.stubs.Set(self.conn._session, 'call_xenapi', fake_call_xenapi) + + def fake_aggregate_get(context, host, key): + if find_aggregate: + return [{'fake': 'aggregate'}] + else: + return [] + self.stubs.Set(self.conn.virtapi, 'aggregate_get_by_host', + fake_aggregate_get) + + def fake_host_find(context, session, src, dst): + if find_host: + return 'bar' + else: + raise exception.NoValidHost("I saw this one coming...") + self.stubs.Set(host, '_host_find', fake_host_find) + + result = self.conn.host_maintenance_mode('bar', 'on_maintenance') + self.assertEqual(result, 'on_maintenance') + + # We expect the VM.pool_migrate call to have been called to + # migrate our instance to the 'bar' host + expected = (instance['uuid'], 'bar', {}) + self.assertTrue(api_calls.get('VM.pool_migrate'), expected) + + instance = db.instance_get_by_uuid(self.context, instance['uuid']) + self.assertTrue(instance['vm_state'], vm_states.ACTIVE) + self.assertTrue(instance['task_state'], task_states.MIGRATING) + + def test_maintenance_mode(self): + self._test_maintenance_mode(True, True) + + def test_maintenance_mode_no_host(self): + self.assertRaises(exception.NoValidHost, + self._test_maintenance_mode, False, True) + + def test_maintenance_mode_no_aggregate(self): + self.assertRaises(exception.NotFound, + self._test_maintenance_mode, True, False) + + def test_session_virtapi(self): + was = {'called': False} + + def fake_aggregate_get_by_host(self, *args, **kwargs): + was['called'] = True + raise Exception() + self.stubs.Set(self.conn._session._virtapi, "aggregate_get_by_host", + fake_aggregate_get_by_host) + + self.stubs.Set(self.conn._session, "is_slave", True) + + try: + self.conn._session._get_host_uuid() + except Exception: + pass + self.assertTrue(was['called']) + + def test_per_instance_usage_running(self): + instance = self._create_instance(spawn=True) + instance_type = flavors.get_instance_type(3) + + expected = {instance['uuid']: {'memory_mb': instance_type['memory_mb'], + 'uuid': instance['uuid']}} + actual = self.conn.get_per_instance_usage() + self.assertEqual(expected, actual) + + # Paused instances still consume resources: + self.conn.pause(instance) + actual = self.conn.get_per_instance_usage() + self.assertEqual(expected, actual) + + def test_per_instance_usage_suspended(self): + # Suspended instances do not consume memory: + instance = self._create_instance(spawn=True) + self.conn.suspend(instance) + actual = self.conn.get_per_instance_usage() + self.assertEqual({}, actual) + + def test_per_instance_usage_halted(self): + instance = self._create_instance(spawn=True) + self.conn.power_off(instance) + actual = self.conn.get_per_instance_usage() + self.assertEqual({}, actual) + + def _create_instance(self, instance_id=1, spawn=True): + """Creates and spawns a test instance.""" + instance_values = { + 'id': instance_id, + 'uuid': '00000000-0000-0000-0000-00000000000%d' % instance_id, + 'display_name': 'host-%d' % instance_id, + 'project_id': self.project_id, + 'user_id': self.user_id, + 'image_ref': 1, + 'kernel_id': 2, + 'ramdisk_id': 3, + 'root_gb': 20, + 'instance_type_id': '3', # m1.large + 'os_type': 'linux', + 'vm_mode': 'hvm', + 'architecture': 'x86-64'} + + instance = create_instance_with_system_metadata(self.context, + instance_values) + network_info = fake_network.fake_get_instance_nw_info(self.stubs, + spectacular=True) + image_meta = {'id': IMAGE_VHD, + 'disk_format': 'vhd'} + if spawn: + self.conn.spawn(self.context, instance, image_meta, [], 'herp', + network_info) + return instance + + +class XenAPIDiffieHellmanTestCase(test.TestCase): + """Unit tests for Diffie-Hellman code.""" + def setUp(self): + super(XenAPIDiffieHellmanTestCase, self).setUp() + self.alice = agent.SimpleDH() + self.bob = agent.SimpleDH() + + def test_shared(self): + alice_pub = self.alice.get_public() + bob_pub = self.bob.get_public() + alice_shared = self.alice.compute_shared(bob_pub) + bob_shared = self.bob.compute_shared(alice_pub) + self.assertEquals(alice_shared, bob_shared) + + def _test_encryption(self, message): + enc = self.alice.encrypt(message) + self.assertFalse(enc.endswith('\n')) + dec = self.bob.decrypt(enc) + self.assertEquals(dec, message) + + def test_encrypt_simple_message(self): + self._test_encryption('This is a simple message.') + + def test_encrypt_message_with_newlines_at_end(self): + self._test_encryption('This message has a newline at the end.\n') + + def test_encrypt_many_newlines_at_end(self): + self._test_encryption('Message with lotsa newlines.\n\n\n') + + def test_encrypt_newlines_inside_message(self): + self._test_encryption('Message\nwith\ninterior\nnewlines.') + + def test_encrypt_with_leading_newlines(self): + self._test_encryption('\n\nMessage with leading newlines.') + + def test_encrypt_really_long_message(self): + self._test_encryption(''.join(['abcd' for i in xrange(1024)])) + + +class XenAPIMigrateInstance(stubs.XenAPITestBase): + """Unit test for verifying migration-related actions.""" + + def setUp(self): + super(XenAPIMigrateInstance, self).setUp() + self.flags(xenapi_connection_url='test_url', + xenapi_connection_password='test_pass', + firewall_driver='nova.virt.xenapi.firewall.' + 'Dom0IptablesFirewallDriver') + stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests) + db_fakes.stub_out_db_instance_api(self.stubs) + xenapi_fake.create_network('fake', CONF.flat_network_bridge) + self.user_id = 'fake' + self.project_id = 'fake' + self.context = context.RequestContext(self.user_id, self.project_id) + self.instance_values = {'id': 1, + 'project_id': self.project_id, + 'user_id': self.user_id, + 'image_ref': 1, + 'kernel_id': None, + 'ramdisk_id': None, + 'root_gb': 5, + 'instance_type_id': '3', # m1.large + 'os_type': 'linux', + 'architecture': 'x86-64'} + + migration_values = { + 'source_compute': 'nova-compute', + 'dest_compute': 'nova-compute', + 'dest_host': '10.127.5.114', + 'status': 'post-migrating', + 'instance_uuid': '15f23e6a-cc6e-4d22-b651-d9bdaac316f7', + 'old_instance_type_id': 5, + 'new_instance_type_id': 1 + } + self.migration = db.migration_create( + context.get_admin_context(), migration_values) + + fake_processutils.stub_out_processutils_execute(self.stubs) + stubs.stub_out_migration_methods(self.stubs) + stubs.stubout_get_this_vm_uuid(self.stubs) + + def fake_inject_instance_metadata(self, instance, vm): + pass + self.stubs.Set(vmops.VMOps, 'inject_instance_metadata', + fake_inject_instance_metadata) + + def test_resize_xenserver_6(self): + instance = db.instance_create(self.context, self.instance_values) + called = {'resize': False} + + def fake_vdi_resize(*args, **kwargs): + called['resize'] = True + + self.stubs.Set(stubs.FakeSessionForVMTests, + "VDI_resize", fake_vdi_resize) + stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests, + product_version=(6, 0, 0), + product_brand='XenServer') + conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False) + vdi_ref = xenapi_fake.create_vdi('hurr', 'fake') + vdi_uuid = xenapi_fake.get_record('VDI', vdi_ref)['uuid'] + conn._vmops._resize_instance(instance, + {'uuid': vdi_uuid, 'ref': vdi_ref}) + self.assertEqual(called['resize'], True) + + def test_resize_xcp(self): + instance = db.instance_create(self.context, self.instance_values) + called = {'resize': False} + + def fake_vdi_resize(*args, **kwargs): + called['resize'] = True + + self.stubs.Set(stubs.FakeSessionForVMTests, + "VDI_resize", fake_vdi_resize) + stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests, + product_version=(1, 4, 99), + product_brand='XCP') + conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False) + vdi_ref = xenapi_fake.create_vdi('hurr', 'fake') + vdi_uuid = xenapi_fake.get_record('VDI', vdi_ref)['uuid'] + conn._vmops._resize_instance(instance, + {'uuid': vdi_uuid, 'ref': vdi_ref}) + self.assertEqual(called['resize'], True) + + def test_migrate_disk_and_power_off(self): + instance = db.instance_create(self.context, self.instance_values) + xenapi_fake.create_vm(instance['name'], 'Running') + instance_type = db.instance_type_get_by_name(self.context, 'm1.large') + conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False) + conn.migrate_disk_and_power_off(self.context, instance, + '127.0.0.1', instance_type, None) + + def test_migrate_disk_and_power_off_passes_exceptions(self): + instance = db.instance_create(self.context, self.instance_values) + xenapi_fake.create_vm(instance['name'], 'Running') + instance_type = db.instance_type_get_by_name(self.context, 'm1.large') + + def fake_raise(*args, **kwargs): + raise exception.MigrationError(reason='test failure') + self.stubs.Set(vmops.VMOps, "_migrate_vhd", fake_raise) + + conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False) + self.assertRaises(exception.MigrationError, + conn.migrate_disk_and_power_off, + self.context, instance, + '127.0.0.1', instance_type, None) + + def test_revert_migrate(self): + instance = create_instance_with_system_metadata(self.context, + self.instance_values) + self.called = False + self.fake_vm_start_called = False + self.fake_finish_revert_migration_called = False + + def fake_vm_start(*args, **kwargs): + self.fake_vm_start_called = True + + def fake_vdi_resize(*args, **kwargs): + self.called = True + + def fake_finish_revert_migration(*args, **kwargs): + self.fake_finish_revert_migration_called = True + + self.stubs.Set(stubs.FakeSessionForVMTests, + "VDI_resize_online", fake_vdi_resize) + self.stubs.Set(vmops.VMOps, '_start', fake_vm_start) + self.stubs.Set(vmops.VMOps, 'finish_revert_migration', + fake_finish_revert_migration) + stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests, + product_version=(4, 0, 0), + product_brand='XenServer') + + conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False) + network_info = fake_network.fake_get_instance_nw_info(self.stubs, + spectacular=True) + image_meta = {'id': instance['image_ref'], 'disk_format': 'vhd'} + base = xenapi_fake.create_vdi('hurr', 'fake') + base_uuid = xenapi_fake.get_record('VDI', base)['uuid'] + cow = xenapi_fake.create_vdi('durr', 'fake') + cow_uuid = xenapi_fake.get_record('VDI', cow)['uuid'] + conn.finish_migration(self.context, self.migration, instance, + dict(base_copy=base_uuid, cow=cow_uuid), + network_info, image_meta, resize_instance=True) + self.assertEqual(self.called, True) + self.assertEqual(self.fake_vm_start_called, True) + + conn.finish_revert_migration(instance, network_info) + self.assertEqual(self.fake_finish_revert_migration_called, True) + + def test_finish_migrate(self): + instance = create_instance_with_system_metadata(self.context, + self.instance_values) + self.called = False + self.fake_vm_start_called = False + + def fake_vm_start(*args, **kwargs): + self.fake_vm_start_called = True + + def fake_vdi_resize(*args, **kwargs): + self.called = True + + self.stubs.Set(vmops.VMOps, '_start', fake_vm_start) + self.stubs.Set(stubs.FakeSessionForVMTests, + "VDI_resize_online", fake_vdi_resize) + stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests, + product_version=(4, 0, 0), + product_brand='XenServer') + + conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False) + network_info = fake_network.fake_get_instance_nw_info(self.stubs, + spectacular=True) + image_meta = {'id': instance['image_ref'], 'disk_format': 'vhd'} + conn.finish_migration(self.context, self.migration, instance, + dict(base_copy='hurr', cow='durr'), + network_info, image_meta, resize_instance=True) + self.assertEqual(self.called, True) + self.assertEqual(self.fake_vm_start_called, True) + + def test_finish_migrate_no_local_storage(self): + tiny_type = flavors.get_instance_type_by_name('m1.tiny') + tiny_type_id = tiny_type['id'] + self.instance_values.update({'instance_type_id': tiny_type_id, + 'root_gb': 0}) + instance = create_instance_with_system_metadata(self.context, + self.instance_values) + + def fake_vdi_resize(*args, **kwargs): + raise Exception("This shouldn't be called") + + self.stubs.Set(stubs.FakeSessionForVMTests, + "VDI_resize_online", fake_vdi_resize) + conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False) + network_info = fake_network.fake_get_instance_nw_info(self.stubs, + spectacular=True) + image_meta = {'id': instance['image_ref'], 'disk_format': 'vhd'} + conn.finish_migration(self.context, self.migration, instance, + dict(base_copy='hurr', cow='durr'), + network_info, image_meta, resize_instance=True) + + def test_finish_migrate_no_resize_vdi(self): + instance = create_instance_with_system_metadata(self.context, + self.instance_values) + + def fake_vdi_resize(*args, **kwargs): + raise Exception("This shouldn't be called") + + self.stubs.Set(stubs.FakeSessionForVMTests, + "VDI_resize_online", fake_vdi_resize) + conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False) + network_info = fake_network.fake_get_instance_nw_info(self.stubs, + spectacular=True) + # Resize instance would be determined by the compute call + image_meta = {'id': instance['image_ref'], 'disk_format': 'vhd'} + conn.finish_migration(self.context, self.migration, instance, + dict(base_copy='hurr', cow='durr'), + network_info, image_meta, resize_instance=False) + + def test_migrate_no_auto_disk_config_no_resize_down(self): + # Resize down should fail when auto_disk_config not set. + instance_values = self.instance_values + instance_values['root_gb'] = 40 + instance_values['auto_disk_config'] = False + instance = db.instance_create(self.context, instance_values) + xenapi_fake.create_vm(instance['name'], 'Running') + instance_type = db.instance_type_get_by_name(self.context, 'm1.small') + conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False) + self.assertRaises(exception.ResizeError, + conn.migrate_disk_and_power_off, + self.context, instance, + '127.0.0.1', instance_type, None) + + def test_migrate_rollback_when_resize_down_fs_fails(self): + conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False) + vmops = conn._vmops + virtapi = vmops._virtapi + + self.mox.StubOutWithMock(vmops, '_resize_ensure_vm_is_shutdown') + self.mox.StubOutWithMock(vmops, '_apply_orig_vm_name_label') + self.mox.StubOutWithMock(vm_utils, 'resize_disk') + self.mox.StubOutWithMock(vmops, '_migrate_vhd') + self.mox.StubOutWithMock(vm_utils, 'destroy_vdi') + self.mox.StubOutWithMock(vm_utils, 'get_vdi_for_vm_safely') + self.mox.StubOutWithMock(vmops, '_restore_orig_vm_and_cleanup_orphan') + self.mox.StubOutWithMock(virtapi, 'instance_update') + + instance = {'auto_disk_config': True, 'uuid': 'uuid'} + vm_ref = "vm_ref" + dest = "dest" + instance_type = "type" + sr_path = "sr_path" + + virtapi.instance_update(self.context, 'uuid', {'progress': 20.0}) + vmops._resize_ensure_vm_is_shutdown(instance, vm_ref) + vmops._apply_orig_vm_name_label(instance, vm_ref) + old_vdi_ref = "old_ref" + vm_utils.get_vdi_for_vm_safely(vmops._session, vm_ref).AndReturn( + (old_vdi_ref, None)) + virtapi.instance_update(self.context, 'uuid', {'progress': 40.0}) + new_vdi_ref = "new_ref" + new_vdi_uuid = "new_uuid" + vm_utils.resize_disk(vmops._session, instance, old_vdi_ref, + instance_type).AndReturn((new_vdi_ref, new_vdi_uuid)) + virtapi.instance_update(self.context, 'uuid', {'progress': 60.0}) + vmops._migrate_vhd(instance, new_vdi_uuid, dest, + sr_path, 0).AndRaise( + exception.ResizeError(reason="asdf")) + + vm_utils.destroy_vdi(vmops._session, new_vdi_ref) + vmops._restore_orig_vm_and_cleanup_orphan(instance, None) + + self.mox.ReplayAll() + + self.assertRaises(exception.InstanceFaultRollback, + vmops._migrate_disk_resizing_down, self.context, + instance, dest, instance_type, vm_ref, sr_path) + + def test_resize_ensure_vm_is_shutdown_cleanly(self): + conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False) + vmops = conn._vmops + fake_instance = {'uuid': 'uuid'} + + self.mox.StubOutWithMock(vm_utils, 'is_vm_shutdown') + self.mox.StubOutWithMock(vm_utils, 'clean_shutdown_vm') + self.mox.StubOutWithMock(vm_utils, 'hard_shutdown_vm') + + vm_utils.is_vm_shutdown(vmops._session, "ref").AndReturn(False) + vm_utils.clean_shutdown_vm(vmops._session, fake_instance, + "ref").AndReturn(True) + + self.mox.ReplayAll() + + vmops._resize_ensure_vm_is_shutdown(fake_instance, "ref") + + def test_resize_ensure_vm_is_shutdown_forced(self): + conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False) + vmops = conn._vmops + fake_instance = {'uuid': 'uuid'} + + self.mox.StubOutWithMock(vm_utils, 'is_vm_shutdown') + self.mox.StubOutWithMock(vm_utils, 'clean_shutdown_vm') + self.mox.StubOutWithMock(vm_utils, 'hard_shutdown_vm') + + vm_utils.is_vm_shutdown(vmops._session, "ref").AndReturn(False) + vm_utils.clean_shutdown_vm(vmops._session, fake_instance, + "ref").AndReturn(False) + vm_utils.hard_shutdown_vm(vmops._session, fake_instance, + "ref").AndReturn(True) + + self.mox.ReplayAll() + + vmops._resize_ensure_vm_is_shutdown(fake_instance, "ref") + + def test_resize_ensure_vm_is_shutdown_fails(self): + conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False) + vmops = conn._vmops + fake_instance = {'uuid': 'uuid'} + + self.mox.StubOutWithMock(vm_utils, 'is_vm_shutdown') + self.mox.StubOutWithMock(vm_utils, 'clean_shutdown_vm') + self.mox.StubOutWithMock(vm_utils, 'hard_shutdown_vm') + + vm_utils.is_vm_shutdown(vmops._session, "ref").AndReturn(False) + vm_utils.clean_shutdown_vm(vmops._session, fake_instance, + "ref").AndReturn(False) + vm_utils.hard_shutdown_vm(vmops._session, fake_instance, + "ref").AndReturn(False) + + self.mox.ReplayAll() + + self.assertRaises(exception.ResizeError, + vmops._resize_ensure_vm_is_shutdown, fake_instance, "ref") + + def test_resize_ensure_vm_is_shutdown_already_shutdown(self): + conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False) + vmops = conn._vmops + fake_instance = {'uuid': 'uuid'} + + self.mox.StubOutWithMock(vm_utils, 'is_vm_shutdown') + self.mox.StubOutWithMock(vm_utils, 'clean_shutdown_vm') + self.mox.StubOutWithMock(vm_utils, 'hard_shutdown_vm') + + vm_utils.is_vm_shutdown(vmops._session, "ref").AndReturn(True) + + self.mox.ReplayAll() + + vmops._resize_ensure_vm_is_shutdown(fake_instance, "ref") + + +class XenAPIImageTypeTestCase(test.TestCase): + """Test ImageType class.""" + + def test_to_string(self): + # Can convert from type id to type string. + self.assertEquals( + vm_utils.ImageType.to_string(vm_utils.ImageType.KERNEL), + vm_utils.ImageType.KERNEL_STR) + + def _assert_role(self, expected_role, image_type_id): + self.assertEquals( + expected_role, + vm_utils.ImageType.get_role(image_type_id)) + + def test_get_image_role_kernel(self): + self._assert_role('kernel', vm_utils.ImageType.KERNEL) + + def test_get_image_role_ramdisk(self): + self._assert_role('ramdisk', vm_utils.ImageType.RAMDISK) + + def test_get_image_role_disk(self): + self._assert_role('root', vm_utils.ImageType.DISK) + + def test_get_image_role_disk_raw(self): + self._assert_role('root', vm_utils.ImageType.DISK_RAW) + + def test_get_image_role_disk_vhd(self): + self._assert_role('root', vm_utils.ImageType.DISK_VHD) + + +class XenAPIDetermineDiskImageTestCase(test.TestCase): + """Unit tests for code that detects the ImageType.""" + def assert_disk_type(self, image_meta, expected_disk_type): + actual = vm_utils.determine_disk_image_type(image_meta) + self.assertEqual(expected_disk_type, actual) + + def test_machine(self): + image_meta = {'id': 'a', 'disk_format': 'ami'} + self.assert_disk_type(image_meta, vm_utils.ImageType.DISK) + + def test_raw(self): + image_meta = {'id': 'a', 'disk_format': 'raw'} + self.assert_disk_type(image_meta, vm_utils.ImageType.DISK_RAW) + + def test_vhd(self): + image_meta = {'id': 'a', 'disk_format': 'vhd'} + self.assert_disk_type(image_meta, vm_utils.ImageType.DISK_VHD) + + def test_none(self): + image_meta = None + self.assert_disk_type(image_meta, None) + + +class XenAPIDetermineIsPVTestCase(test.TestCase): + """Unit tests for code that detects the PV status based on ImageType.""" + def assert_pv_status(self, disk_image_type, os_type, expected_pv_status): + session = None + vdi_ref = None + actual = vm_utils.determine_is_pv(session, vdi_ref, + disk_image_type, os_type) + self.assertEqual(expected_pv_status, actual) + + def test_windows_vhd(self): + self.assert_pv_status(vm_utils.ImageType.DISK_VHD, 'windows', False) + + def test_linux_vhd(self): + self.assert_pv_status(vm_utils.ImageType.DISK_VHD, 'linux', True) + + @stub_vm_utils_with_vdi_attached_here + def test_raw(self): + self.assert_pv_status(vm_utils.ImageType.DISK_RAW, 'linux', True) + + def test_disk(self): + self.assert_pv_status(vm_utils.ImageType.DISK, None, True) + + def test_iso(self): + self.assert_pv_status(vm_utils.ImageType.DISK_ISO, None, False) + + @stub_vm_utils_with_vdi_attached_here + def test_none(self): + self.assert_pv_status(None, None, True) + + +class CompareVersionTestCase(test.TestCase): + def test_less_than(self): + # Test that cmp_version compares a as less than b. + self.assertTrue(vmops.cmp_version('1.2.3.4', '1.2.3.5') < 0) + + def test_greater_than(self): + # Test that cmp_version compares a as greater than b. + self.assertTrue(vmops.cmp_version('1.2.3.5', '1.2.3.4') > 0) + + def test_equal(self): + # Test that cmp_version compares a as equal to b. + self.assertTrue(vmops.cmp_version('1.2.3.4', '1.2.3.4') == 0) + + def test_non_lexical(self): + # Test that cmp_version compares non-lexically. + self.assertTrue(vmops.cmp_version('1.2.3.10', '1.2.3.4') > 0) + + def test_length(self): + # Test that cmp_version compares by length as last resort. + self.assertTrue(vmops.cmp_version('1.2.3', '1.2.3.4') < 0) + + +class XenAPIHostTestCase(stubs.XenAPITestBase): + """Tests HostState, which holds metrics from XenServer that get + reported back to the Schedulers.""" + + def setUp(self): + super(XenAPIHostTestCase, self).setUp() + self.flags(xenapi_connection_url='test_url', + xenapi_connection_password='test_pass') + stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests) + xenapi_fake.create_local_srs() + self.conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False) + + def test_host_state(self): + stats = self.conn.get_host_stats() + self.assertEquals(stats['disk_total'], 40000) + self.assertEquals(stats['disk_used'], 20000) + self.assertEquals(stats['host_memory_total'], 10) + self.assertEquals(stats['host_memory_overhead'], 20) + self.assertEquals(stats['host_memory_free'], 30) + self.assertEquals(stats['host_memory_free_computed'], 40) + self.assertEquals(stats['hypervisor_hostname'], 'fake-xenhost') + + def test_host_state_missing_sr(self): + def fake_safe_find_sr(session): + raise exception.StorageRepositoryNotFound('not there') + + self.stubs.Set(vm_utils, 'safe_find_sr', fake_safe_find_sr) + self.assertRaises(exception.StorageRepositoryNotFound, + self.conn.get_host_stats) + + def _test_host_action(self, method, action, expected=None): + result = method('host', action) + if not expected: + expected = action + self.assertEqual(result, expected) + + def test_host_reboot(self): + self._test_host_action(self.conn.host_power_action, 'reboot') + + def test_host_shutdown(self): + self._test_host_action(self.conn.host_power_action, 'shutdown') + + def test_host_startup(self): + self.assertRaises(NotImplementedError, + self.conn.host_power_action, 'host', 'startup') + + def test_host_maintenance_on(self): + self._test_host_action(self.conn.host_maintenance_mode, + True, 'on_maintenance') + + def test_host_maintenance_off(self): + self._test_host_action(self.conn.host_maintenance_mode, + False, 'off_maintenance') + + def test_set_enable_host_enable(self): + self._test_host_action(self.conn.set_host_enabled, True, 'enabled') + + def test_set_enable_host_disable(self): + self._test_host_action(self.conn.set_host_enabled, False, 'disabled') + + def test_get_host_uptime(self): + result = self.conn.get_host_uptime('host') + self.assertEqual(result, 'fake uptime') + + def test_supported_instances_is_included_in_host_state(self): + stats = self.conn.get_host_stats() + self.assertTrue('supported_instances' in stats) + + def test_supported_instances_is_calculated_by_to_supported_instances(self): + + def to_supported_instances(somedata): + self.assertEquals(None, somedata) + return "SOMERETURNVALUE" + self.stubs.Set(host, 'to_supported_instances', to_supported_instances) + + stats = self.conn.get_host_stats() + self.assertEquals("SOMERETURNVALUE", stats['supported_instances']) + + +class ToSupportedInstancesTestCase(test.TestCase): + def test_default_return_value(self): + self.assertEquals([], + host.to_supported_instances(None)) + + def test_return_value(self): + self.assertEquals([('x86_64', 'xapi', 'xen')], + host.to_supported_instances([u'xen-3.0-x86_64'])) + + def test_invalid_values_do_not_break(self): + self.assertEquals([('x86_64', 'xapi', 'xen')], + host.to_supported_instances([u'xen-3.0-x86_64', 'spam'])) + + def test_multiple_values(self): + self.assertEquals( + [ + ('x86_64', 'xapi', 'xen'), + ('x86_32', 'xapi', 'hvm') + ], + host.to_supported_instances([u'xen-3.0-x86_64', 'hvm-3.0-x86_32']) + ) + + +class XenAPIAutoDiskConfigTestCase(stubs.XenAPITestBase): + def setUp(self): + super(XenAPIAutoDiskConfigTestCase, self).setUp() + self.flags(xenapi_connection_url='test_url', + xenapi_connection_password='test_pass', + firewall_driver='nova.virt.xenapi.firewall.' + 'Dom0IptablesFirewallDriver') + stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests) + self.conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False) + + self.user_id = 'fake' + self.project_id = 'fake' + + self.instance_values = {'id': 1, + 'project_id': self.project_id, + 'user_id': self.user_id, + 'image_ref': 1, + 'kernel_id': 2, + 'ramdisk_id': 3, + 'root_gb': 20, + 'instance_type_id': '3', # m1.large + 'os_type': 'linux', + 'architecture': 'x86-64'} + + self.context = context.RequestContext(self.user_id, self.project_id) + + def fake_create_vbd(session, vm_ref, vdi_ref, userdevice, + vbd_type='disk', read_only=False, bootable=True, + osvol=False): + pass + + self.stubs.Set(vm_utils, 'create_vbd', fake_create_vbd) + + def assertIsPartitionCalled(self, called): + marker = {"partition_called": False} + + def fake_resize_part_and_fs(dev, start, old, new): + marker["partition_called"] = True + self.stubs.Set(vm_utils, "_resize_part_and_fs", + fake_resize_part_and_fs) + + ctx = context.RequestContext(self.user_id, self.project_id) + session = xenapi_conn.XenAPISession('test_url', 'root', 'test_pass', + fake.FakeVirtAPI()) + + disk_image_type = vm_utils.ImageType.DISK_VHD + instance = create_instance_with_system_metadata(self.context, + self.instance_values) + vm_ref = xenapi_fake.create_vm(instance['name'], 'Halted') + vdi_ref = xenapi_fake.create_vdi(instance['name'], 'fake') + + vdi_uuid = session.call_xenapi('VDI.get_record', vdi_ref)['uuid'] + vdis = {'root': {'uuid': vdi_uuid, 'ref': vdi_ref}} + + self.conn._vmops._attach_disks(instance, vm_ref, instance['name'], + vdis, disk_image_type) + + self.assertEqual(marker["partition_called"], called) + + def test_instance_not_auto_disk_config(self): + """Should not partition unless instance is marked as + auto_disk_config. + """ + self.instance_values['auto_disk_config'] = False + self.assertIsPartitionCalled(False) + + @stub_vm_utils_with_vdi_attached_here + def test_instance_auto_disk_config_doesnt_pass_fail_safes(self): + # Should not partition unless fail safes pass. + self.instance_values['auto_disk_config'] = True + + def fake_get_partitions(dev): + return [(1, 0, 100, 'ext4'), (2, 100, 200, 'ext4')] + self.stubs.Set(vm_utils, "_get_partitions", + fake_get_partitions) + + self.assertIsPartitionCalled(False) + + @stub_vm_utils_with_vdi_attached_here + def test_instance_auto_disk_config_passes_fail_safes(self): + """Should partition if instance is marked as auto_disk_config=True and + virt-layer specific fail-safe checks pass. + """ + self.instance_values['auto_disk_config'] = True + + def fake_get_partitions(dev): + return [(1, 0, 100, 'ext4')] + self.stubs.Set(vm_utils, "_get_partitions", + fake_get_partitions) + + self.assertIsPartitionCalled(True) + + +class XenAPIGenerateLocal(stubs.XenAPITestBase): + """Test generating of local disks, like swap and ephemeral.""" + def setUp(self): + super(XenAPIGenerateLocal, self).setUp() + self.flags(xenapi_connection_url='test_url', + xenapi_connection_password='test_pass', + firewall_driver='nova.virt.xenapi.firewall.' + 'Dom0IptablesFirewallDriver') + stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests) + db_fakes.stub_out_db_instance_api(self.stubs) + self.conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False) + + self.user_id = 'fake' + self.project_id = 'fake' + + self.instance_values = {'id': 1, + 'project_id': self.project_id, + 'user_id': self.user_id, + 'image_ref': 1, + 'kernel_id': 2, + 'ramdisk_id': 3, + 'root_gb': 20, + 'instance_type_id': '3', # m1.large + 'os_type': 'linux', + 'architecture': 'x86-64'} + + self.context = context.RequestContext(self.user_id, self.project_id) + + def fake_create_vbd(session, vm_ref, vdi_ref, userdevice, + vbd_type='disk', read_only=False, bootable=True, + osvol=False, empty=False, unpluggable=True): + return session.call_xenapi('VBD.create', {'VM': vm_ref, + 'VDI': vdi_ref}) + + self.stubs.Set(vm_utils, 'create_vbd', fake_create_vbd) + + def assertCalled(self, instance, + disk_image_type=vm_utils.ImageType.DISK_VHD): + ctx = context.RequestContext(self.user_id, self.project_id) + session = xenapi_conn.XenAPISession('test_url', 'root', 'test_pass', + fake.FakeVirtAPI()) + + vm_ref = xenapi_fake.create_vm(instance['name'], 'Halted') + vdi_ref = xenapi_fake.create_vdi(instance['name'], 'fake') + + vdi_uuid = session.call_xenapi('VDI.get_record', vdi_ref)['uuid'] + + vdi_key = 'root' + if disk_image_type == vm_utils.ImageType.DISK_ISO: + vdi_key = 'iso' + vdis = {vdi_key: {'uuid': vdi_uuid, 'ref': vdi_ref}} + + self.called = False + self.conn._vmops._attach_disks(instance, vm_ref, instance['name'], + vdis, disk_image_type) + self.assertTrue(self.called) + + def test_generate_swap(self): + # Test swap disk generation. + instance_values = dict(self.instance_values, instance_type_id=5) + instance = create_instance_with_system_metadata(self.context, + instance_values) + + def fake_generate_swap(*args, **kwargs): + self.called = True + self.stubs.Set(vm_utils, 'generate_swap', fake_generate_swap) + + self.assertCalled(instance) + + def test_generate_ephemeral(self): + # Test ephemeral disk generation. + instance_values = dict(self.instance_values, instance_type_id=4) + instance = create_instance_with_system_metadata(self.context, + instance_values) + + def fake_generate_ephemeral(*args): + self.called = True + self.stubs.Set(vm_utils, 'generate_ephemeral', fake_generate_ephemeral) + + self.assertCalled(instance) + + def test_generate_iso_blank_root_disk(self): + instance_values = dict(self.instance_values, instance_type_id=4) + instance_values.pop('kernel_id') + instance_values.pop('ramdisk_id') + instance = create_instance_with_system_metadata(self.context, + instance_values) + + def fake_generate_ephemeral(*args): + pass + self.stubs.Set(vm_utils, 'generate_ephemeral', fake_generate_ephemeral) + + def fake_generate_iso(*args): + self.called = True + self.stubs.Set(vm_utils, 'generate_iso_blank_root_disk', + fake_generate_iso) + + self.assertCalled(instance, vm_utils.ImageType.DISK_ISO) + + +class XenAPIBWCountersTestCase(stubs.XenAPITestBase): + FAKE_VMS = {'test1:ref': dict(name_label='test1', + other_config=dict(nova_uuid='hash'), + domid='12', + _vifmap={'0': "a:b:c:d...", + '1': "e:f:12:q..."}), + 'test2:ref': dict(name_label='test2', + other_config=dict(nova_uuid='hash'), + domid='42', + _vifmap={'0': "a:3:c:d...", + '1': "e:f:42:q..."}), + } + + def setUp(self): + super(XenAPIBWCountersTestCase, self).setUp() + self.stubs.Set(vm_utils, 'list_vms', + XenAPIBWCountersTestCase._fake_list_vms) + self.flags(xenapi_connection_url='test_url', + xenapi_connection_password='test_pass', + firewall_driver='nova.virt.xenapi.firewall.' + 'Dom0IptablesFirewallDriver') + stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests) + self.conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False) + + def _fake_get_vif_device_map(vm_rec): + return vm_rec['_vifmap'] + + self.stubs.Set(self.conn._vmops, "_get_vif_device_map", + _fake_get_vif_device_map) + + @classmethod + def _fake_list_vms(cls, session): + return cls.FAKE_VMS.iteritems() + + @classmethod + def _fake_fetch_bandwidth_mt(cls, session): + return {} + + @classmethod + def _fake_fetch_bandwidth(cls, session): + return {'42': + {'0': {'bw_in': 21024, 'bw_out': 22048}, + '1': {'bw_in': 231337, 'bw_out': 221212121}}, + '12': + {'0': {'bw_in': 1024, 'bw_out': 2048}, + '1': {'bw_in': 31337, 'bw_out': 21212121}}, + } + + def test_get_all_bw_counters(self): + instances = [dict(name='test1', uuid='1-2-3'), + dict(name='test2', uuid='4-5-6')] + + self.stubs.Set(vm_utils, 'fetch_bandwidth', + XenAPIBWCountersTestCase._fake_fetch_bandwidth) + result = self.conn.get_all_bw_counters(instances) + self.assertEqual(len(result), 4) + self.assertIn(dict(uuid='1-2-3', + mac_address="a:b:c:d...", + bw_in=1024, + bw_out=2048), result) + self.assertIn(dict(uuid='1-2-3', + mac_address="e:f:12:q...", + bw_in=31337, + bw_out=21212121), result) + + self.assertIn(dict(uuid='4-5-6', + mac_address="a:3:c:d...", + bw_in=21024, + bw_out=22048), result) + self.assertIn(dict(uuid='4-5-6', + mac_address="e:f:42:q...", + bw_in=231337, + bw_out=221212121), result) + + def test_get_all_bw_counters_in_failure_case(self): + """Test that get_all_bw_conters returns an empty list when + no data returned from Xenserver. c.f. bug #910045. + """ + instances = [dict(name='instance-0001', uuid='1-2-3-4-5')] + + self.stubs.Set(vm_utils, 'fetch_bandwidth', + XenAPIBWCountersTestCase._fake_fetch_bandwidth_mt) + result = self.conn.get_all_bw_counters(instances) + self.assertEqual(result, []) + + +# TODO(salvatore-orlando): this class and +# nova.tests.virt.test_libvirt.IPTablesFirewallDriverTestCase share a lot of +# code. Consider abstracting common code in a base class for firewall driver +# testing. +class XenAPIDom0IptablesFirewallTestCase(stubs.XenAPITestBase): + + _in_rules = [ + '# Generated by iptables-save v1.4.10 on Sat Feb 19 00:03:19 2011', + '*nat', + ':PREROUTING ACCEPT [1170:189210]', + ':INPUT ACCEPT [844:71028]', + ':OUTPUT ACCEPT [5149:405186]', + ':POSTROUTING ACCEPT [5063:386098]', + '# Completed on Mon Dec 6 11:54:13 2010', + '# Generated by iptables-save v1.4.4 on Mon Dec 6 11:54:13 2010', + '*mangle', + ':INPUT ACCEPT [969615:281627771]', + ':FORWARD ACCEPT [0:0]', + ':OUTPUT ACCEPT [915599:63811649]', + ':nova-block-ipv4 - [0:0]', + '[0:0] -A INPUT -i virbr0 -p tcp -m tcp --dport 67 -j ACCEPT ', + '[0:0] -A FORWARD -d 192.168.122.0/24 -o virbr0 -m state --state RELATED' + ',ESTABLISHED -j ACCEPT ', + '[0:0] -A FORWARD -s 192.168.122.0/24 -i virbr0 -j ACCEPT ', + '[0:0] -A FORWARD -i virbr0 -o virbr0 -j ACCEPT ', + '[0:0] -A FORWARD -o virbr0 -j REJECT ' + '--reject-with icmp-port-unreachable ', + '[0:0] -A FORWARD -i virbr0 -j REJECT ' + '--reject-with icmp-port-unreachable ', + 'COMMIT', + '# Completed on Mon Dec 6 11:54:13 2010', + '# Generated by iptables-save v1.4.4 on Mon Dec 6 11:54:13 2010', + '*filter', + ':INPUT ACCEPT [969615:281627771]', + ':FORWARD ACCEPT [0:0]', + ':OUTPUT ACCEPT [915599:63811649]', + ':nova-block-ipv4 - [0:0]', + '[0:0] -A INPUT -i virbr0 -p tcp -m tcp --dport 67 -j ACCEPT ', + '[0:0] -A FORWARD -d 192.168.122.0/24 -o virbr0 -m state --state RELATED' + ',ESTABLISHED -j ACCEPT ', + '[0:0] -A FORWARD -s 192.168.122.0/24 -i virbr0 -j ACCEPT ', + '[0:0] -A FORWARD -i virbr0 -o virbr0 -j ACCEPT ', + '[0:0] -A FORWARD -o virbr0 -j REJECT ' + '--reject-with icmp-port-unreachable ', + '[0:0] -A FORWARD -i virbr0 -j REJECT ' + '--reject-with icmp-port-unreachable ', + 'COMMIT', + '# Completed on Mon Dec 6 11:54:13 2010', + ] + + _in6_filter_rules = [ + '# Generated by ip6tables-save v1.4.4 on Tue Jan 18 23:47:56 2011', + '*filter', + ':INPUT ACCEPT [349155:75810423]', + ':FORWARD ACCEPT [0:0]', + ':OUTPUT ACCEPT [349256:75777230]', + 'COMMIT', + '# Completed on Tue Jan 18 23:47:56 2011', + ] + + def setUp(self): + super(XenAPIDom0IptablesFirewallTestCase, self).setUp() + self.flags(xenapi_connection_url='test_url', + xenapi_connection_password='test_pass', + instance_name_template='%d', + firewall_driver='nova.virt.xenapi.firewall.' + 'Dom0IptablesFirewallDriver') + xenapi_fake.create_local_srs() + xenapi_fake.create_local_pifs() + self.user_id = 'mappin' + self.project_id = 'fake' + stubs.stubout_session(self.stubs, stubs.FakeSessionForFirewallTests, + test_case=self) + self.context = context.RequestContext(self.user_id, self.project_id) + self.network = importutils.import_object(CONF.network_manager) + self.conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False) + self.fw = self.conn._vmops.firewall_driver + + def _create_instance_ref(self): + return db.instance_create(self.context, + {'user_id': self.user_id, + 'project_id': self.project_id, + 'instance_type_id': 1}) + + def _create_test_security_group(self): + admin_ctxt = context.get_admin_context() + secgroup = db.security_group_create(admin_ctxt, + {'user_id': self.user_id, + 'project_id': self.project_id, + 'name': 'testgroup', + 'description': 'test group'}) + db.security_group_rule_create(admin_ctxt, + {'parent_group_id': secgroup['id'], + 'protocol': 'icmp', + 'from_port': -1, + 'to_port': -1, + 'cidr': '192.168.11.0/24'}) + + db.security_group_rule_create(admin_ctxt, + {'parent_group_id': secgroup['id'], + 'protocol': 'icmp', + 'from_port': 8, + 'to_port': -1, + 'cidr': '192.168.11.0/24'}) + + db.security_group_rule_create(admin_ctxt, + {'parent_group_id': secgroup['id'], + 'protocol': 'tcp', + 'from_port': 80, + 'to_port': 81, + 'cidr': '192.168.10.0/24'}) + return secgroup + + def _validate_security_group(self): + in_rules = filter(lambda l: not l.startswith('#'), + self._in_rules) + for rule in in_rules: + if 'nova' not in rule: + self.assertTrue(rule in self._out_rules, + 'Rule went missing: %s' % rule) + + instance_chain = None + for rule in self._out_rules: + # This is pretty crude, but it'll do for now + # last two octets change + if re.search('-d 192.168.[0-9]{1,3}.[0-9]{1,3} -j', rule): + instance_chain = rule.split(' ')[-1] + break + self.assertTrue(instance_chain, "The instance chain wasn't added") + security_group_chain = None + for rule in self._out_rules: + # This is pretty crude, but it'll do for now + if '-A %s -j' % instance_chain in rule: + security_group_chain = rule.split(' ')[-1] + break + self.assertTrue(security_group_chain, + "The security group chain wasn't added") + + regex = re.compile('\[0\:0\] -A .* -j ACCEPT -p icmp' + ' -s 192.168.11.0/24') + self.assertTrue(len(filter(regex.match, self._out_rules)) > 0, + "ICMP acceptance rule wasn't added") + + regex = re.compile('\[0\:0\] -A .* -j ACCEPT -p icmp -m icmp' + ' --icmp-type 8 -s 192.168.11.0/24') + self.assertTrue(len(filter(regex.match, self._out_rules)) > 0, + "ICMP Echo Request acceptance rule wasn't added") + + regex = re.compile('\[0\:0\] -A .* -j ACCEPT -p tcp --dport 80:81' + ' -s 192.168.10.0/24') + self.assertTrue(len(filter(regex.match, self._out_rules)) > 0, + "TCP port 80/81 acceptance rule wasn't added") + + def test_static_filters(self): + instance_ref = self._create_instance_ref() + src_instance_ref = self._create_instance_ref() + admin_ctxt = context.get_admin_context() + secgroup = self._create_test_security_group() + + src_secgroup = db.security_group_create(admin_ctxt, + {'user_id': self.user_id, + 'project_id': self.project_id, + 'name': 'testsourcegroup', + 'description': 'src group'}) + db.security_group_rule_create(admin_ctxt, + {'parent_group_id': secgroup['id'], + 'protocol': 'tcp', + 'from_port': 80, + 'to_port': 81, + 'group_id': src_secgroup['id']}) + + db.instance_add_security_group(admin_ctxt, instance_ref['uuid'], + secgroup['id']) + db.instance_add_security_group(admin_ctxt, src_instance_ref['uuid'], + src_secgroup['id']) + instance_ref = db.instance_get(admin_ctxt, instance_ref['id']) + src_instance_ref = db.instance_get(admin_ctxt, src_instance_ref['id']) + + network_model = fake_network.fake_get_instance_nw_info(self.stubs, + 1, spectacular=True) + + fake_network.stub_out_nw_api_get_instance_nw_info(self.stubs, + lambda *a, **kw: network_model) + + network_info = network_model.legacy() + self.fw.prepare_instance_filter(instance_ref, network_info) + self.fw.apply_instance_filter(instance_ref, network_info) + + self._validate_security_group() + # Extra test for TCP acceptance rules + for ip in network_model.fixed_ips(): + if ip['version'] != 4: + continue + regex = re.compile('\[0\:0\] -A .* -j ACCEPT -p tcp' + ' --dport 80:81 -s %s' % ip['address']) + self.assertTrue(len(filter(regex.match, self._out_rules)) > 0, + "TCP port 80/81 acceptance rule wasn't added") + + db.instance_destroy(admin_ctxt, instance_ref['uuid']) + + def test_filters_for_instance_with_ip_v6(self): + self.flags(use_ipv6=True) + network_info = fake_network.fake_get_instance_nw_info(self.stubs, 1) + rulesv4, rulesv6 = self.fw._filters_for_instance("fake", network_info) + self.assertEquals(len(rulesv4), 2) + self.assertEquals(len(rulesv6), 1) + + def test_filters_for_instance_without_ip_v6(self): + self.flags(use_ipv6=False) + network_info = fake_network.fake_get_instance_nw_info(self.stubs, 1) + rulesv4, rulesv6 = self.fw._filters_for_instance("fake", network_info) + self.assertEquals(len(rulesv4), 2) + self.assertEquals(len(rulesv6), 0) + + def test_multinic_iptables(self): + ipv4_rules_per_addr = 1 + ipv4_addr_per_network = 2 + ipv6_rules_per_addr = 1 + ipv6_addr_per_network = 1 + networks_count = 5 + instance_ref = self._create_instance_ref() + _get_instance_nw_info = fake_network.fake_get_instance_nw_info + network_info = _get_instance_nw_info(self.stubs, + networks_count, + ipv4_addr_per_network) + ipv4_len = len(self.fw.iptables.ipv4['filter'].rules) + ipv6_len = len(self.fw.iptables.ipv6['filter'].rules) + inst_ipv4, inst_ipv6 = self.fw.instance_rules(instance_ref, + network_info) + self.fw.prepare_instance_filter(instance_ref, network_info) + ipv4 = self.fw.iptables.ipv4['filter'].rules + ipv6 = self.fw.iptables.ipv6['filter'].rules + ipv4_network_rules = len(ipv4) - len(inst_ipv4) - ipv4_len + ipv6_network_rules = len(ipv6) - len(inst_ipv6) - ipv6_len + # Extra rules are for the DHCP request + rules = (ipv4_rules_per_addr * ipv4_addr_per_network * + networks_count) + 2 + self.assertEquals(ipv4_network_rules, rules) + self.assertEquals(ipv6_network_rules, + ipv6_rules_per_addr * ipv6_addr_per_network * networks_count) + + def test_do_refresh_security_group_rules(self): + admin_ctxt = context.get_admin_context() + instance_ref = self._create_instance_ref() + network_info = fake_network.fake_get_instance_nw_info(self.stubs, 1, 1) + secgroup = self._create_test_security_group() + db.instance_add_security_group(admin_ctxt, instance_ref['uuid'], + secgroup['id']) + self.fw.prepare_instance_filter(instance_ref, network_info) + self.fw.instances[instance_ref['id']] = instance_ref + self._validate_security_group() + # add a rule to the security group + db.security_group_rule_create(admin_ctxt, + {'parent_group_id': secgroup['id'], + 'protocol': 'udp', + 'from_port': 200, + 'to_port': 299, + 'cidr': '192.168.99.0/24'}) + #validate the extra rule + self.fw.refresh_security_group_rules(secgroup) + regex = re.compile('\[0\:0\] -A .* -j ACCEPT -p udp --dport 200:299' + ' -s 192.168.99.0/24') + self.assertTrue(len(filter(regex.match, self._out_rules)) > 0, + "Rules were not updated properly." + "The rule for UDP acceptance is missing") + + def test_provider_firewall_rules(self): + # setup basic instance data + instance_ref = self._create_instance_ref() + # FRAGILE: as in libvirt tests + # peeks at how the firewall names chains + chain_name = 'inst-%s' % instance_ref['id'] + + network_info = fake_network.fake_get_instance_nw_info(self.stubs, 1, 1) + self.fw.prepare_instance_filter(instance_ref, network_info) + self.assertTrue('provider' in self.fw.iptables.ipv4['filter'].chains) + rules = [rule for rule in self.fw.iptables.ipv4['filter'].rules + if rule.chain == 'provider'] + self.assertEqual(0, len(rules)) + + admin_ctxt = context.get_admin_context() + # add a rule and send the update message, check for 1 rule + provider_fw0 = db.provider_fw_rule_create(admin_ctxt, + {'protocol': 'tcp', + 'cidr': '10.99.99.99/32', + 'from_port': 1, + 'to_port': 65535}) + self.fw.refresh_provider_fw_rules() + rules = [rule for rule in self.fw.iptables.ipv4['filter'].rules + if rule.chain == 'provider'] + self.assertEqual(1, len(rules)) + + # Add another, refresh, and make sure number of rules goes to two + provider_fw1 = db.provider_fw_rule_create(admin_ctxt, + {'protocol': 'udp', + 'cidr': '10.99.99.99/32', + 'from_port': 1, + 'to_port': 65535}) + self.fw.refresh_provider_fw_rules() + rules = [rule for rule in self.fw.iptables.ipv4['filter'].rules + if rule.chain == 'provider'] + self.assertEqual(2, len(rules)) + + # create the instance filter and make sure it has a jump rule + self.fw.prepare_instance_filter(instance_ref, network_info) + self.fw.apply_instance_filter(instance_ref, network_info) + inst_rules = [rule for rule in self.fw.iptables.ipv4['filter'].rules + if rule.chain == chain_name] + jump_rules = [rule for rule in inst_rules if '-j' in rule.rule] + provjump_rules = [] + # IptablesTable doesn't make rules unique internally + for rule in jump_rules: + if 'provider' in rule.rule and rule not in provjump_rules: + provjump_rules.append(rule) + self.assertEqual(1, len(provjump_rules)) + + # remove a rule from the db, cast to compute to refresh rule + db.provider_fw_rule_destroy(admin_ctxt, provider_fw1['id']) + self.fw.refresh_provider_fw_rules() + rules = [rule for rule in self.fw.iptables.ipv4['filter'].rules + if rule.chain == 'provider'] + self.assertEqual(1, len(rules)) + + +class XenAPISRSelectionTestCase(stubs.XenAPITestBase): + """Unit tests for testing we find the right SR.""" + def test_safe_find_sr_raise_exception(self): + # Ensure StorageRepositoryNotFound is raise when wrong filter. + self.flags(sr_matching_filter='yadayadayada') + stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests) + session = xenapi_conn.XenAPISession('test_url', 'root', 'test_pass', + fake.FakeVirtAPI()) + self.assertRaises(exception.StorageRepositoryNotFound, + vm_utils.safe_find_sr, session) + + def test_safe_find_sr_local_storage(self): + # Ensure the default local-storage is found. + self.flags(sr_matching_filter='other-config:i18n-key=local-storage') + stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests) + session = xenapi_conn.XenAPISession('test_url', 'root', 'test_pass', + fake.FakeVirtAPI()) + host_ref = xenapi_fake.get_all('host')[0] + local_sr = xenapi_fake.create_sr( + name_label='Fake Storage', + type='lvm', + other_config={'i18n-original-value-name_label': + 'Local storage', + 'i18n-key': 'local-storage'}, + host_ref=host_ref) + expected = vm_utils.safe_find_sr(session) + self.assertEqual(local_sr, expected) + + def test_safe_find_sr_by_other_criteria(self): + # Ensure the SR is found when using a different filter. + self.flags(sr_matching_filter='other-config:my_fake_sr=true') + stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests) + session = xenapi_conn.XenAPISession('test_url', 'root', 'test_pass', + fake.FakeVirtAPI()) + host_ref = xenapi_fake.get_all('host')[0] + local_sr = xenapi_fake.create_sr(name_label='Fake Storage', + type='lvm', + other_config={'my_fake_sr': 'true'}, + host_ref=host_ref) + expected = vm_utils.safe_find_sr(session) + self.assertEqual(local_sr, expected) + + def test_safe_find_sr_default(self): + # Ensure the default SR is found regardless of other-config. + self.flags(sr_matching_filter='default-sr:true') + stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests) + session = xenapi_conn.XenAPISession('test_url', 'root', 'test_pass', + fake.FakeVirtAPI()) + pool_ref = xenapi_fake.create_pool('') + expected = vm_utils.safe_find_sr(session) + self.assertEqual(session.call_xenapi('pool.get_default_SR', pool_ref), + expected) + + +def _create_service_entries(context, values={'avail_zone1': ['fake_host1', + 'fake_host2'], + 'avail_zone2': ['fake_host3'], }): + for avail_zone, hosts in values.iteritems(): + for host in hosts: + db.service_create(context, + {'host': host, + 'binary': 'nova-compute', + 'topic': 'compute', + 'report_count': 0}) + return values + + +class XenAPIAggregateTestCase(stubs.XenAPITestBase): + """Unit tests for aggregate operations.""" + def setUp(self): + super(XenAPIAggregateTestCase, self).setUp() + self.flags(xenapi_connection_url='http://test_url', + xenapi_connection_username='test_user', + xenapi_connection_password='test_pass', + instance_name_template='%d', + firewall_driver='nova.virt.xenapi.firewall.' + 'Dom0IptablesFirewallDriver', + host='host', + compute_driver='xenapi.XenAPIDriver', + default_availability_zone='avail_zone1') + self.flags(use_local=True, group='conductor') + host_ref = xenapi_fake.get_all('host')[0] + stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests) + self.context = context.get_admin_context() + self.conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False) + self.compute = importutils.import_object(CONF.compute_manager) + self.api = compute_api.AggregateAPI() + values = {'name': 'test_aggr', + 'metadata': {'availability_zone': 'test_zone', + pool_states.POOL_FLAG: 'XenAPI'}} + self.aggr = db.aggregate_create(self.context, values) + self.fake_metadata = {pool_states.POOL_FLAG: 'XenAPI', + 'master_compute': 'host', + 'availability_zone': 'fake_zone', + pool_states.KEY: pool_states.ACTIVE, + 'host': xenapi_fake.get_record('host', + host_ref)['uuid']} + + def test_pool_add_to_aggregate_called_by_driver(self): + + calls = [] + + def pool_add_to_aggregate(context, aggregate, host, slave_info=None): + self.assertEquals("CONTEXT", context) + self.assertEquals("AGGREGATE", aggregate) + self.assertEquals("HOST", host) + self.assertEquals("SLAVEINFO", slave_info) + calls.append(pool_add_to_aggregate) + self.stubs.Set(self.conn._pool, + "add_to_aggregate", + pool_add_to_aggregate) + + self.conn.add_to_aggregate("CONTEXT", "AGGREGATE", "HOST", + slave_info="SLAVEINFO") + + self.assertTrue(pool_add_to_aggregate in calls) + + def test_pool_remove_from_aggregate_called_by_driver(self): + + calls = [] + + def pool_remove_from_aggregate(context, aggregate, host, + slave_info=None): + self.assertEquals("CONTEXT", context) + self.assertEquals("AGGREGATE", aggregate) + self.assertEquals("HOST", host) + self.assertEquals("SLAVEINFO", slave_info) + calls.append(pool_remove_from_aggregate) + self.stubs.Set(self.conn._pool, + "remove_from_aggregate", + pool_remove_from_aggregate) + + self.conn.remove_from_aggregate("CONTEXT", "AGGREGATE", "HOST", + slave_info="SLAVEINFO") + + self.assertTrue(pool_remove_from_aggregate in calls) + + def test_add_to_aggregate_for_first_host_sets_metadata(self): + def fake_init_pool(id, name): + fake_init_pool.called = True + self.stubs.Set(self.conn._pool, "_init_pool", fake_init_pool) + + aggregate = self._aggregate_setup() + self.conn._pool.add_to_aggregate(self.context, aggregate, "host") + result = db.aggregate_get(self.context, aggregate['id']) + self.assertTrue(fake_init_pool.called) + self.assertThat(self.fake_metadata, + matchers.DictMatches(result['metadetails'])) + + def test_join_slave(self): + # Ensure join_slave gets called when the request gets to master. + def fake_join_slave(id, compute_uuid, host, url, user, password): + fake_join_slave.called = True + self.stubs.Set(self.conn._pool, "_join_slave", fake_join_slave) + + aggregate = self._aggregate_setup(hosts=['host', 'host2'], + metadata=self.fake_metadata) + self.conn._pool.add_to_aggregate(self.context, aggregate, "host2", + dict(compute_uuid='fake_uuid', + url='fake_url', + user='fake_user', + passwd='fake_pass', + xenhost_uuid='fake_uuid')) + self.assertTrue(fake_join_slave.called) + + def test_add_to_aggregate_first_host(self): + def fake_pool_set_name_label(self, session, pool_ref, name): + fake_pool_set_name_label.called = True + self.stubs.Set(xenapi_fake.SessionBase, "pool_set_name_label", + fake_pool_set_name_label) + self.conn._session.call_xenapi("pool.create", {"name": "asdf"}) + + values = {"name": 'fake_aggregate', + 'metadata': {'availability_zone': 'fake_zone'}} + result = db.aggregate_create(self.context, values) + metadata = {'availability_zone': 'fake_zone', + pool_states.POOL_FLAG: "XenAPI", + pool_states.KEY: pool_states.CREATED} + db.aggregate_metadata_add(self.context, result['id'], metadata) + + db.aggregate_host_add(self.context, result['id'], "host") + aggregate = db.aggregate_get(self.context, result['id']) + self.assertEqual(["host"], aggregate['hosts']) + self.assertEqual(metadata, aggregate['metadetails']) + + self.conn._pool.add_to_aggregate(self.context, aggregate, "host") + self.assertTrue(fake_pool_set_name_label.called) + + def test_remove_from_aggregate_called(self): + def fake_remove_from_aggregate(context, aggregate, host): + fake_remove_from_aggregate.called = True + self.stubs.Set(self.conn._pool, + "remove_from_aggregate", + fake_remove_from_aggregate) + + self.conn.remove_from_aggregate(None, None, None) + self.assertTrue(fake_remove_from_aggregate.called) + + def test_remove_from_empty_aggregate(self): + result = self._aggregate_setup() + self.assertRaises(exception.InvalidAggregateAction, + self.conn._pool.remove_from_aggregate, + self.context, result, "test_host") + + def test_remove_slave(self): + # Ensure eject slave gets called. + def fake_eject_slave(id, compute_uuid, host_uuid): + fake_eject_slave.called = True + self.stubs.Set(self.conn._pool, "_eject_slave", fake_eject_slave) + + self.fake_metadata['host2'] = 'fake_host2_uuid' + aggregate = self._aggregate_setup(hosts=['host', 'host2'], + metadata=self.fake_metadata, aggr_state=pool_states.ACTIVE) + self.conn._pool.remove_from_aggregate(self.context, aggregate, "host2") + self.assertTrue(fake_eject_slave.called) + + def test_remove_master_solo(self): + # Ensure metadata are cleared after removal. + def fake_clear_pool(id): + fake_clear_pool.called = True + self.stubs.Set(self.conn._pool, "_clear_pool", fake_clear_pool) + + aggregate = self._aggregate_setup(metadata=self.fake_metadata) + self.conn._pool.remove_from_aggregate(self.context, aggregate, "host") + result = db.aggregate_get(self.context, aggregate['id']) + self.assertTrue(fake_clear_pool.called) + self.assertThat({'availability_zone': 'fake_zone', + pool_states.POOL_FLAG: 'XenAPI', + pool_states.KEY: pool_states.ACTIVE}, + matchers.DictMatches(result['metadetails'])) + + def test_remote_master_non_empty_pool(self): + # Ensure AggregateError is raised if removing the master. + aggregate = self._aggregate_setup(hosts=['host', 'host2'], + metadata=self.fake_metadata) + + self.assertRaises(exception.InvalidAggregateAction, + self.conn._pool.remove_from_aggregate, + self.context, aggregate, "host") + + def _aggregate_setup(self, aggr_name='fake_aggregate', + aggr_zone='fake_zone', + aggr_state=pool_states.CREATED, + hosts=['host'], metadata=None): + values = {"name": aggr_name} + result = db.aggregate_create(self.context, values, + metadata={'availability_zone': aggr_zone}) + pool_flag = {pool_states.POOL_FLAG: "XenAPI", + pool_states.KEY: aggr_state} + db.aggregate_metadata_add(self.context, result['id'], pool_flag) + + for host in hosts: + db.aggregate_host_add(self.context, result['id'], host) + if metadata: + db.aggregate_metadata_add(self.context, result['id'], metadata) + return db.aggregate_get(self.context, result['id']) + + def test_add_host_to_aggregate_invalid_changing_status(self): + """Ensure InvalidAggregateAction is raised when adding host while + aggregate is not ready.""" + aggregate = self._aggregate_setup(aggr_state=pool_states.CHANGING) + self.assertRaises(exception.InvalidAggregateAction, + self.conn.add_to_aggregate, self.context, + aggregate, 'host') + + def test_add_host_to_aggregate_invalid_dismissed_status(self): + """Ensure InvalidAggregateAction is raised when aggregate is + deleted.""" + aggregate = self._aggregate_setup(aggr_state=pool_states.DISMISSED) + self.assertRaises(exception.InvalidAggregateAction, + self.conn.add_to_aggregate, self.context, + aggregate, 'fake_host') + + def test_add_host_to_aggregate_invalid_error_status(self): + """Ensure InvalidAggregateAction is raised when aggregate is + in error.""" + aggregate = self._aggregate_setup(aggr_state=pool_states.ERROR) + self.assertRaises(exception.InvalidAggregateAction, + self.conn.add_to_aggregate, self.context, + aggregate, 'fake_host') + + def test_remove_host_from_aggregate_error(self): + # Ensure we can remove a host from an aggregate even if in error. + values = _create_service_entries(self.context) + fake_zone = values.keys()[0] + aggr = self.api.create_aggregate(self.context, + 'fake_aggregate', fake_zone) + # let's mock the fact that the aggregate is ready! + metadata = {pool_states.POOL_FLAG: "XenAPI", + pool_states.KEY: pool_states.ACTIVE} + db.aggregate_metadata_add(self.context, aggr['id'], metadata) + for host in values[fake_zone]: + aggr = self.api.add_host_to_aggregate(self.context, + aggr['id'], host) + # let's mock the fact that the aggregate is in error! + status = {'operational_state': pool_states.ERROR} + expected = self.api.remove_host_from_aggregate(self.context, + aggr['id'], + values[fake_zone][0]) + self.assertEqual(len(aggr['hosts']) - 1, len(expected['hosts'])) + self.assertEqual(expected['metadata'][pool_states.KEY], + pool_states.ACTIVE) + + def test_remove_host_from_aggregate_invalid_dismissed_status(self): + """Ensure InvalidAggregateAction is raised when aggregate is + deleted.""" + aggregate = self._aggregate_setup(aggr_state=pool_states.DISMISSED) + self.assertRaises(exception.InvalidAggregateAction, + self.conn.remove_from_aggregate, self.context, + aggregate, 'fake_host') + + def test_remove_host_from_aggregate_invalid_changing_status(self): + """Ensure InvalidAggregateAction is raised when aggregate is + changing.""" + aggregate = self._aggregate_setup(aggr_state=pool_states.CHANGING) + self.assertRaises(exception.InvalidAggregateAction, + self.conn.remove_from_aggregate, self.context, + aggregate, 'fake_host') + + def test_add_aggregate_host_raise_err(self): + # Ensure the undo operation works correctly on add. + def fake_driver_add_to_aggregate(context, aggregate, host, **_ignore): + raise exception.AggregateError( + aggregate_id='', action='', reason='') + self.stubs.Set(self.compute.driver, "add_to_aggregate", + fake_driver_add_to_aggregate) + metadata = {pool_states.POOL_FLAG: "XenAPI", + pool_states.KEY: pool_states.ACTIVE} + db.aggregate_metadata_add(self.context, self.aggr['id'], metadata) + db.aggregate_host_add(self.context, self.aggr['id'], 'fake_host') + + self.assertRaises(exception.AggregateError, + self.compute.add_aggregate_host, + self.context, "fake_host", + aggregate=jsonutils.to_primitive(self.aggr)) + excepted = db.aggregate_get(self.context, self.aggr['id']) + self.assertEqual(excepted['metadetails'][pool_states.KEY], + pool_states.ERROR) + self.assertEqual(excepted['hosts'], []) + + +class MockComputeAPI(object): + def __init__(self): + self._mock_calls = [] + + def add_aggregate_host(self, ctxt, aggregate, + host_param, host, slave_info): + self._mock_calls.append(( + self.add_aggregate_host, ctxt, aggregate, + host_param, host, slave_info)) + + def remove_aggregate_host(self, ctxt, aggregate_id, host_param, + host, slave_info): + self._mock_calls.append(( + self.remove_aggregate_host, ctxt, aggregate_id, + host_param, host, slave_info)) + + +class StubDependencies(object): + """Stub dependencies for ResourcePool.""" + + def __init__(self): + self.compute_rpcapi = MockComputeAPI() + + def _is_hv_pool(self, *_ignore): + return True + + def _get_metadata(self, *_ignore): + return { + pool_states.KEY: {}, + 'master_compute': 'master' + } + + def _create_slave_info(self, *ignore): + return "SLAVE_INFO" + + +class ResourcePoolWithStubs(StubDependencies, pool.ResourcePool): + """A ResourcePool, use stub dependencies.""" + + +class HypervisorPoolTestCase(test.TestCase): + + fake_aggregate = { + 'id': 98, + 'hosts': [], + 'metadetails': { + 'master_compute': 'master', + pool_states.POOL_FLAG: {}, + pool_states.KEY: {} + } + } + + def test_slave_asks_master_to_add_slave_to_pool(self): + slave = ResourcePoolWithStubs() + + slave.add_to_aggregate("CONTEXT", self.fake_aggregate, "slave") + + self.assertIn( + (slave.compute_rpcapi.add_aggregate_host, + "CONTEXT", jsonutils.to_primitive(self.fake_aggregate), + "slave", "master", "SLAVE_INFO"), + slave.compute_rpcapi._mock_calls) + + def test_slave_asks_master_to_remove_slave_from_pool(self): + slave = ResourcePoolWithStubs() + + slave.remove_from_aggregate("CONTEXT", self.fake_aggregate, "slave") + + self.assertIn( + (slave.compute_rpcapi.remove_aggregate_host, + "CONTEXT", 98, "slave", "master", "SLAVE_INFO"), + slave.compute_rpcapi._mock_calls) + + +class SwapXapiHostTestCase(test.TestCase): + + def test_swapping(self): + self.assertEquals( + "http://otherserver:8765/somepath", + pool.swap_xapi_host( + "http://someserver:8765/somepath", 'otherserver')) + + def test_no_port(self): + self.assertEquals( + "http://otherserver/somepath", + pool.swap_xapi_host( + "http://someserver/somepath", 'otherserver')) + + def test_no_path(self): + self.assertEquals( + "http://otherserver", + pool.swap_xapi_host( + "http://someserver", 'otherserver')) + + +class XenAPILiveMigrateTestCase(stubs.XenAPITestBase): + """Unit tests for live_migration.""" + def setUp(self): + super(XenAPILiveMigrateTestCase, self).setUp() + self.flags(xenapi_connection_url='test_url', + xenapi_connection_password='test_pass', + firewall_driver='nova.virt.xenapi.firewall.' + 'Dom0IptablesFirewallDriver', + host='host') + db_fakes.stub_out_db_instance_api(self.stubs) + self.context = context.get_admin_context() + xenapi_fake.create_local_pifs() + + def test_live_migration_calls_vmops(self): + stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests) + self.conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False) + + def fake_live_migrate(context, instance_ref, dest, post_method, + recover_method, block_migration, migrate_data): + fake_live_migrate.called = True + + self.stubs.Set(self.conn._vmops, "live_migrate", fake_live_migrate) + + self.conn.live_migration(None, None, None, None, None) + self.assertTrue(fake_live_migrate.called) + + def test_pre_live_migration(self): + # ensure method is present + stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests) + self.conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False) + self.conn.pre_live_migration(None, None, None, None) + + def test_post_live_migration_at_destination(self): + # ensure method is present + stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests) + self.conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False) + self.conn.post_live_migration_at_destination(None, None, None, None) + + def test_check_can_live_migrate_destination_with_block_migration(self): + stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests) + self.conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False) + + self.stubs.Set(vm_utils, "safe_find_sr", lambda _x: "asdf") + + expected = {'block_migration': True, + 'migrate_data': { + 'migrate_send_data': "fake_migrate_data", + 'destination_sr_ref': 'asdf' + } + } + result = self.conn.check_can_live_migrate_destination(self.context, + {'host': 'host'}, + {}, {}, + True, False) + self.assertEqual(expected, result) + + def test_check_can_live_migrate_destination_block_migration_fails(self): + stubs.stubout_session(self.stubs, + stubs.FakeSessionForFailedMigrateTests) + self.conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False) + self.assertRaises(exception.MigrationError, + self.conn.check_can_live_migrate_destination, + self.context, {'host': 'host'}, + {}, {}, + True, False) + + def _add_default_live_migrate_stubs(self, conn): + def fake_generate_vdi_map(destination_sr_ref, _vm_ref): + pass + + def fake_get_iscsi_srs(destination_sr_ref, _vm_ref): + return [] + + def fake_get_vm_opaque_ref(instance): + return "fake_vm" + + self.stubs.Set(conn._vmops, "_generate_vdi_map", + fake_generate_vdi_map) + self.stubs.Set(conn._vmops, "_get_iscsi_srs", + fake_get_iscsi_srs) + self.stubs.Set(conn._vmops, "_get_vm_opaque_ref", + fake_get_vm_opaque_ref) + + def test_check_can_live_migrate_source_with_block_migrate(self): + stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests) + self.conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False) + + self._add_default_live_migrate_stubs(self.conn) + + dest_check_data = {'block_migration': True, + 'migrate_data': { + 'destination_sr_ref': None, + 'migrate_send_data': None + }} + result = self.conn.check_can_live_migrate_source(self.context, + {'host': 'host'}, + dest_check_data) + self.assertEqual(dest_check_data, result) + + def test_check_can_live_migrate_source_with_block_migrate_iscsi(self): + stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests) + self.conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False) + + self._add_default_live_migrate_stubs(self.conn) + + def fake_get_iscsi_srs(destination_sr_ref, _vm_ref): + return ['sr_ref'] + self.stubs.Set(self.conn._vmops, "_get_iscsi_srs", + fake_get_iscsi_srs) + + def fake_make_plugin_call(plugin, method, **args): + return "true" + self.stubs.Set(self.conn._vmops, "_make_plugin_call", + fake_make_plugin_call) + + dest_check_data = {'block_migration': True, + 'migrate_data': { + 'destination_sr_ref': None, + 'migrate_send_data': None + }} + result = self.conn.check_can_live_migrate_source(self.context, + {'host': 'host'}, + dest_check_data) + self.assertEqual(dest_check_data, result) + + def test_check_can_live_migrate_source_with_block_iscsi_fails(self): + stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests) + self.conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False) + + self._add_default_live_migrate_stubs(self.conn) + + def fake_get_iscsi_srs(destination_sr_ref, _vm_ref): + return ['sr_ref'] + self.stubs.Set(self.conn._vmops, "_get_iscsi_srs", + fake_get_iscsi_srs) + + def fake_make_plugin_call(plugin, method, **args): + return {'returncode': 'error', 'message': 'Plugin not found'} + self.stubs.Set(self.conn._vmops, "_make_plugin_call", + fake_make_plugin_call) + + dest_check_data = {'block_migration': True, + 'migrate_data': { + 'destination_sr_ref': None, + 'migrate_send_data': None + }} + + self.assertRaises(exception.MigrationError, + self.conn.check_can_live_migrate_source, + self.context, {'host': 'host'}, + {}) + + def test_check_can_live_migrate_source_with_block_migrate_fails(self): + stubs.stubout_session(self.stubs, + stubs.FakeSessionForFailedMigrateTests) + self.conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False) + + self._add_default_live_migrate_stubs(self.conn) + + dest_check_data = {'block_migration': True, + 'migrate_data': { + 'destination_sr_ref': None, + 'migrate_send_data': None + }} + self.assertRaises(exception.MigrationError, + self.conn.check_can_live_migrate_source, + self.context, + {'host': 'host'}, + dest_check_data) + + def test_check_can_live_migrate_works(self): + stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests) + self.conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False) + + class fake_aggregate: + def __init__(self): + self.metadetails = {"host": "test_host_uuid"} + + def fake_aggregate_get_by_host(context, host, key=None): + self.assertEqual(CONF.host, host) + return [fake_aggregate()] + + self.stubs.Set(db, "aggregate_get_by_host", + fake_aggregate_get_by_host) + self.conn.check_can_live_migrate_destination(self.context, + {'host': 'host'}, False, False) + + def test_check_can_live_migrate_fails(self): + stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests) + self.conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False) + + class fake_aggregate: + def __init__(self): + self.metadetails = {"dest_other": "test_host_uuid"} + + def fake_aggregate_get_by_host(context, host, key=None): + self.assertEqual(CONF.host, host) + return [fake_aggregate()] + + self.stubs.Set(db, "aggregate_get_by_host", + fake_aggregate_get_by_host) + self.assertRaises(exception.MigrationError, + self.conn.check_can_live_migrate_destination, + self.context, {'host': 'host'}, None, None) + + def test_live_migration(self): + stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests) + self.conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False) + + def fake_get_vm_opaque_ref(instance): + return "fake_vm" + self.stubs.Set(self.conn._vmops, "_get_vm_opaque_ref", + fake_get_vm_opaque_ref) + + def fake_get_host_opaque_ref(context, destination_hostname): + return "fake_host" + self.stubs.Set(self.conn._vmops, "_get_host_opaque_ref", + fake_get_host_opaque_ref) + + def post_method(context, instance, destination_hostname, + block_migration): + post_method.called = True + + self.conn.live_migration(self.conn, None, None, post_method, None) + + self.assertTrue(post_method.called, "post_method.called") + + def test_live_migration_on_failure(self): + stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests) + self.conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False) + + def fake_get_vm_opaque_ref(instance): + return "fake_vm" + self.stubs.Set(self.conn._vmops, "_get_vm_opaque_ref", + fake_get_vm_opaque_ref) + + def fake_get_host_opaque_ref(context, destination_hostname): + return "fake_host" + self.stubs.Set(self.conn._vmops, "_get_host_opaque_ref", + fake_get_host_opaque_ref) + + def fake_call_xenapi(*args): + raise NotImplementedError() + self.stubs.Set(self.conn._vmops._session, "call_xenapi", + fake_call_xenapi) + + def recover_method(context, instance, destination_hostname, + block_migration): + recover_method.called = True + + self.assertRaises(NotImplementedError, self.conn.live_migration, + self.conn, None, None, None, recover_method) + self.assertTrue(recover_method.called, "recover_method.called") + + def test_live_migration_calls_post_migration(self): + stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests) + self.conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False) + + self._add_default_live_migrate_stubs(self.conn) + + def post_method(context, instance, destination_hostname, + block_migration): + post_method.called = True + + # pass block_migration = True and migrate data + migrate_data = {"destination_sr_ref": "foo", + "migrate_send_data": "bar"} + self.conn.live_migration(self.conn, None, None, post_method, None, + True, migrate_data) + self.assertTrue(post_method.called, "post_method.called") + + def test_live_migration_block_cleans_srs(self): + stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests) + self.conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False) + + self._add_default_live_migrate_stubs(self.conn) + + def fake_get_iscsi_srs(context, instance): + return ['sr_ref'] + self.stubs.Set(self.conn._vmops, "_get_iscsi_srs", + fake_get_iscsi_srs) + + def fake_forget_sr(context, instance): + fake_forget_sr.called = True + self.stubs.Set(volume_utils, "forget_sr", + fake_forget_sr) + + def post_method(context, instance, destination_hostname, + block_migration): + post_method.called = True + + migrate_data = {"destination_sr_ref": "foo", + "migrate_send_data": "bar"} + self.conn.live_migration(self.conn, None, None, post_method, None, + True, migrate_data) + + self.assertTrue(post_method.called, "post_method.called") + self.assertTrue(fake_forget_sr.called, "forget_sr.called") + + def test_live_migration_with_block_migration_raises_invalid_param(self): + stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests) + self.conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False) + + def fake_get_vm_opaque_ref(instance): + return "fake_vm" + self.stubs.Set(self.conn._vmops, "_get_vm_opaque_ref", + fake_get_vm_opaque_ref) + + def recover_method(context, instance, destination_hostname, + block_migration): + recover_method.called = True + # pass block_migration = True and no migrate data + self.assertRaises(exception.InvalidParameterValue, + self.conn.live_migration, self.conn, + None, None, None, recover_method, True, None) + self.assertTrue(recover_method.called, "recover_method.called") + + def test_live_migration_with_block_migration_fails_migrate_send(self): + stubs.stubout_session(self.stubs, + stubs.FakeSessionForFailedMigrateTests) + self.conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False) + + self._add_default_live_migrate_stubs(self.conn) + + def recover_method(context, instance, destination_hostname, + block_migration): + recover_method.called = True + # pass block_migration = True and migrate data + migrate_data = dict(destination_sr_ref='foo', migrate_send_data='bar') + self.assertRaises(exception.MigrationError, + self.conn.live_migration, self.conn, + None, None, None, recover_method, True, migrate_data) + self.assertTrue(recover_method.called, "recover_method.called") + + def test_live_migrate_block_migration_xapi_call_parameters(self): + + fake_vdi_map = object() + + class Session(xenapi_fake.SessionBase): + def VM_migrate_send(self_, session, vmref, migrate_data, islive, + vdi_map, vif_map, options): + self.assertEquals('SOMEDATA', migrate_data) + self.assertEquals(fake_vdi_map, vdi_map) + + stubs.stubout_session(self.stubs, Session) + + conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False) + + self._add_default_live_migrate_stubs(conn) + + def fake_generate_vdi_map(destination_sr_ref, _vm_ref): + return fake_vdi_map + + self.stubs.Set(conn._vmops, "_generate_vdi_map", + fake_generate_vdi_map) + + def dummy_callback(*args, **kwargs): + pass + + conn.live_migration( + self.context, instance_ref=dict(name='ignore'), dest=None, + post_method=dummy_callback, recover_method=dummy_callback, + block_migration="SOMEDATA", + migrate_data=dict(migrate_send_data='SOMEDATA', + destination_sr_ref="TARGET_SR_OPAQUE_REF")) + + def test_generate_vdi_map(self): + stubs.stubout_session(self.stubs, xenapi_fake.SessionBase) + conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False) + + vm_ref = "fake_vm_ref" + + def fake_find_sr(_session): + self.assertEquals(conn._session, _session) + return "source_sr_ref" + self.stubs.Set(vm_utils, "safe_find_sr", fake_find_sr) + + def fake_get_instance_vdis_for_sr(_session, _vm_ref, _sr_ref): + self.assertEquals(conn._session, _session) + self.assertEquals(vm_ref, _vm_ref) + self.assertEquals("source_sr_ref", _sr_ref) + return ["vdi0", "vdi1"] + + self.stubs.Set(vm_utils, "get_instance_vdis_for_sr", + fake_get_instance_vdis_for_sr) + + result = conn._vmops._generate_vdi_map("dest_sr_ref", vm_ref) + + self.assertEquals({"vdi0": "dest_sr_ref", + "vdi1": "dest_sr_ref"}, result) + + +class XenAPIInjectMetadataTestCase(stubs.XenAPITestBase): + def setUp(self): + super(XenAPIInjectMetadataTestCase, self).setUp() + self.flags(xenapi_connection_url='test_url', + xenapi_connection_password='test_pass', + firewall_driver='nova.virt.xenapi.firewall.' + 'Dom0IptablesFirewallDriver') + stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests) + self.conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False) + + self.xenstore = dict(persist={}, ephem={}) + + def fake_get_vm_opaque_ref(inst, instance): + self.assertEqual(instance, 'instance') + return 'vm_ref' + + def fake_add_to_param_xenstore(inst, vm_ref, key, val): + self.assertEqual(vm_ref, 'vm_ref') + self.xenstore['persist'][key] = val + + def fake_remove_from_param_xenstore(inst, vm_ref, key): + self.assertEqual(vm_ref, 'vm_ref') + if key in self.xenstore['persist']: + del self.xenstore['persist'][key] + + def fake_write_to_xenstore(inst, instance, path, value, vm_ref=None): + self.assertEqual(instance, 'instance') + self.assertEqual(vm_ref, 'vm_ref') + self.xenstore['ephem'][path] = jsonutils.dumps(value) + + def fake_delete_from_xenstore(inst, instance, path, vm_ref=None): + self.assertEqual(instance, 'instance') + self.assertEqual(vm_ref, 'vm_ref') + if path in self.xenstore['ephem']: + del self.xenstore['ephem'][path] + + self.stubs.Set(vmops.VMOps, '_get_vm_opaque_ref', + fake_get_vm_opaque_ref) + self.stubs.Set(vmops.VMOps, '_add_to_param_xenstore', + fake_add_to_param_xenstore) + self.stubs.Set(vmops.VMOps, '_remove_from_param_xenstore', + fake_remove_from_param_xenstore) + self.stubs.Set(vmops.VMOps, '_write_to_xenstore', + fake_write_to_xenstore) + self.stubs.Set(vmops.VMOps, '_delete_from_xenstore', + fake_delete_from_xenstore) + + def test_inject_instance_metadata(self): + + # Add some system_metadata to ensure it doesn't get added + # to xenstore + instance = dict(metadata=[{'key': 'a', 'value': 1}, + {'key': 'b', 'value': 2}, + {'key': 'c', 'value': 3}, + # Check xenstore key sanitizing + {'key': 'hi.there', 'value': 4}, + {'key': 'hi!t.e/e', 'value': 5}], + # Check xenstore key sanitizing + system_metadata=[{'key': 'sys_a', 'value': 1}, + {'key': 'sys_b', 'value': 2}, + {'key': 'sys_c', 'value': 3}]) + self.conn._vmops.inject_instance_metadata(instance, 'vm_ref') + + self.assertEqual(self.xenstore, { + 'persist': { + 'vm-data/user-metadata/a': '1', + 'vm-data/user-metadata/b': '2', + 'vm-data/user-metadata/c': '3', + 'vm-data/user-metadata/hi_there': '4', + 'vm-data/user-metadata/hi_t_e_e': '5', + }, + 'ephem': {}, + }) + + def test_change_instance_metadata_add(self): + # Test XenStore key sanitizing here, too. + diff = {'test.key': ['+', 4]} + self.xenstore = { + 'persist': { + 'vm-data/user-metadata/a': '1', + 'vm-data/user-metadata/b': '2', + 'vm-data/user-metadata/c': '3', + }, + 'ephem': { + 'vm-data/user-metadata/a': '1', + 'vm-data/user-metadata/b': '2', + 'vm-data/user-metadata/c': '3', + }, + } + + self.conn._vmops.change_instance_metadata('instance', diff) + + self.assertEqual(self.xenstore, { + 'persist': { + 'vm-data/user-metadata/a': '1', + 'vm-data/user-metadata/b': '2', + 'vm-data/user-metadata/c': '3', + 'vm-data/user-metadata/test_key': '4', + }, + 'ephem': { + 'vm-data/user-metadata/a': '1', + 'vm-data/user-metadata/b': '2', + 'vm-data/user-metadata/c': '3', + 'vm-data/user-metadata/test_key': '4', + }, + }) + + def test_change_instance_metadata_update(self): + diff = dict(b=['+', 4]) + self.xenstore = { + 'persist': { + 'vm-data/user-metadata/a': '1', + 'vm-data/user-metadata/b': '2', + 'vm-data/user-metadata/c': '3', + }, + 'ephem': { + 'vm-data/user-metadata/a': '1', + 'vm-data/user-metadata/b': '2', + 'vm-data/user-metadata/c': '3', + }, + } + + self.conn._vmops.change_instance_metadata('instance', diff) + + self.assertEqual(self.xenstore, { + 'persist': { + 'vm-data/user-metadata/a': '1', + 'vm-data/user-metadata/b': '4', + 'vm-data/user-metadata/c': '3', + }, + 'ephem': { + 'vm-data/user-metadata/a': '1', + 'vm-data/user-metadata/b': '4', + 'vm-data/user-metadata/c': '3', + }, + }) + + def test_change_instance_metadata_delete(self): + diff = dict(b=['-']) + self.xenstore = { + 'persist': { + 'vm-data/user-metadata/a': '1', + 'vm-data/user-metadata/b': '2', + 'vm-data/user-metadata/c': '3', + }, + 'ephem': { + 'vm-data/user-metadata/a': '1', + 'vm-data/user-metadata/b': '2', + 'vm-data/user-metadata/c': '3', + }, + } + + self.conn._vmops.change_instance_metadata('instance', diff) + + self.assertEqual(self.xenstore, { + 'persist': { + 'vm-data/user-metadata/a': '1', + 'vm-data/user-metadata/c': '3', + }, + 'ephem': { + 'vm-data/user-metadata/a': '1', + 'vm-data/user-metadata/c': '3', + }, + }) + + +class XenAPISessionTestCase(test.TestCase): + def _get_mock_xapisession(self, software_version): + class XcpXapiSession(xenapi_conn.XenAPISession): + def __init__(_ignore): + "Skip the superclass's dirty init" + + def _get_software_version(_ignore): + return software_version + + return XcpXapiSession() + + def test_get_product_version_product_brand_does_not_fail(self): + session = self._get_mock_xapisession({ + 'build_number': '0', + 'date': '2012-08-03', + 'hostname': 'komainu', + 'linux': '3.2.0-27-generic', + 'network_backend': 'bridge', + 'platform_name': 'XCP_Kronos', + 'platform_version': '1.6.0', + 'xapi': '1.3', + 'xen': '4.1.2', + 'xencenter_max': '1.10', + 'xencenter_min': '1.10' + }) + + self.assertEquals( + (None, None), + session._get_product_version_and_brand() + ) + + def test_get_product_version_product_brand_xs_6(self): + session = self._get_mock_xapisession({ + 'product_brand': 'XenServer', + 'product_version': '6.0.50' + }) + + self.assertEquals( + ((6, 0, 50), 'XenServer'), + session._get_product_version_and_brand() + ) diff --git a/nova/tests/virt/xenapi/vm_rrd.xml b/nova/tests/virt/xenapi/vm_rrd.xml new file mode 100644 index 000000000..f9a7c8083 --- /dev/null +++ b/nova/tests/virt/xenapi/vm_rrd.xml @@ -0,0 +1,1101 @@ + + 0003 + 5 + 1328795567 + + cpu0 + DERIVE + 300.0000 + 0.0 + 1.0000 + 5102.8417 + 0.0110 + 0 + + + memory + GAUGE + 300.0000 + 0.0 + Infinity + 4294967296 + 10961792000.0000 + 0 + + + memory_target + GAUGE + 300.0000 + 0.0 + Infinity + 4294967296 + 10961792000.0000 + 0 + + + vif_0_tx + DERIVE + 300.0000 + -Infinity + Infinity + 1079132206 + 752.4007 + 0 + + + vif_0_rx + DERIVE + 300.0000 + -Infinity + Infinity + 1093250983 + 4837.8805 + 0 + + + vbd_xvda_write + DERIVE + 300.0000 + -Infinity + Infinity + 4552440832 + 0.0 + 0 + + + vbd_xvda_read + DERIVE + 300.0000 + -Infinity + Infinity + 1371223040 + 0.0 + 0 + + + memory_internal_free + GAUGE + 300.0000 + -Infinity + Infinity + 1415564 + 3612860.6020 + 0 + + + vbd_xvdb_write + DERIVE + 300.0000 + -Infinity + Infinity + 0.0 + 0.0 + 2 + + + vbd_xvdb_read + DERIVE + 300.0000 + -Infinity + Infinity + 0.0 + 0.0 + 2 + + + vif_2_tx + DERIVE + 300.0000 + -Infinity + Infinity + 0.0 + 0.0 + 2 + + + vif_2_rx + DERIVE + 300.0000 + -Infinity + Infinity + 0.0 + 0.0 + 2 + + + AVERAGE + 1 + + 0.5000 + + + + 0.0 + 0.0 + 0.0 + 0 + + + 0.0 + 0.0 + 0.0 + 0 + + + 0.0 + 0.0 + 0.0 + 0 + + + 0.0 + 0.0 + 0.0 + 0 + + + 0.0 + 0.0 + 0.0 + 0 + + + 0.0 + 0.0 + 0.0 + 0 + + + 0.0 + 0.0 + 0.0 + 0 + + + 0.0 + 0.0 + 0.0 + 0 + + + 0.0 + 0.0 + 0.0 + 0 + + + 0.0 + 0.0 + 0.0 + 0 + + + 0.0 + 0.0 + 0.0 + 0 + + + 0.0 + 0.0 + 0.0 + 0 + + + + + 0.0259 + 4294967296.0000 + 4294967296.0000 + 270.6642 + 1968.1381 + 0.0 + 0.0 + 1433552.0000 + 0.0 + 0.0 + 0.0 + 0.0 + + + 0.0042 + 4294967296.0000 + 4294967296.0000 + 258.6530 + 1890.5522 + 565.3453 + 0.0 + 1433552.0000 + 0.0 + 0.0 + 0.0 + 0.0 + + + 0.0043 + 4294967296.0000 + 4294967296.0000 + 249.1120 + 1778.2501 + 817.5985 + 0.0 + 1433552.0000 + 0.0 + 0.0 + 0.0 + 0.0 + + + 0.0039 + 4294967296.0000 + 4294967296.0000 + 270.5131 + 1806.3336 + 9811.4443 + 0.0 + 1433552.0000 + 0.0 + 0.0 + 0.0 + 0.0 + + + 0.0041 + 4294967296.0000 + 4294967296.0000 + 264.3683 + 1952.4054 + 4370.4121 + 0.0 + 1433552.0000 + 0.0 + 0.0 + 0.0 + 0.0 + + + 0.0034 + 4294967296.0000 + 4294967296.0000 + 251.6331 + 1958.8002 + 0.0 + 0.0 + 1433552.0000 + 0.0 + 0.0 + 0.0 + 0.0 + + + 0.0042 + 4294967296.0000 + 4294967296.0000 + 274.5222 + 2067.5947 + 0.0 + 0.0 + 1433552.0000 + 0.0 + 0.0 + 0.0 + 0.0 + + + 0.0046 + 4294967296.0000 + 4294967296.0000 + 260.9790 + 2042.7045 + 1671.6940 + 0.0 + 1433552.0000 + 0.0 + 0.0 + 0.0 + 0.0 + + + 0.0163 + 4294967296.0000 + 4294967296.0000 + 249.0992 + 1845.3728 + 4119.4312 + 0.0 + 1431698.1250 + 0.0 + 0.0 + 0.0 + 0.0 + + + 0.0098 + 4294967296.0000 + 4294967296.0000 + 273.9898 + 1879.1331 + 5459.4102 + 0.0 + 1430824.0000 + 0.0 + 0.0 + 0.0 + 0.0 + + + 0.0043 + 4294967296.0000 + 4294967296.0000 + 261.3513 + 2335.3000 + 6837.4907 + 0.0 + 1430824.0000 + 0.0 + 0.0 + 0.0 + 0.0 + + + 0.0793 + 4294967296.0000 + 4294967296.0000 + 249.2620 + 2092.4504 + 2391.9744 + 0.0 + 1430824.0000 + 0.0 + 0.0 + 0.0 + 0.0 + + + 0.0406 + 4294967296.0000 + 4294967296.0000 + 270.0746 + 1859.9802 + 0.0 + 0.0 + 1430824.0000 + 0.0 + 0.0 + 0.0 + 0.0 + + + 0.0043 + 4294967296.0000 + 4294967296.0000 + 263.4259 + 2010.8950 + 550.1484 + 0.0 + 1430824.0000 + 0.0 + 0.0 + 0.0 + 0.0 + + + 0.0565 + 4294967296.0000 + 4294967296.0000 + 29891.2227 + 26210.6699 + 3213.4324 + 0.0 + 1415564.0000 + 0.0 + 0.0 + 0.0 + 0.0 + + + 0.0645 + 4294967296.0000 + 4294967296.0000 + 31501.1562 + 29642.1641 + 400.9566 + 0.0 + 1415564.0000 + 0.0 + 0.0 + 0.0 + 0.0 + + + 0.0381 + 4294967296.0000 + 4294967296.0000 + 17350.7676 + 20748.6133 + 1247.4755 + 0.0 + 1415564.0000 + 0.0 + 0.0 + 0.0 + 0.0 + + + 0.0212 + 4294967296.0000 + 4294967296.0000 + 11981.0918 + 12866.9775 + 5774.9497 + 0.0 + 1415564.0000 + 0.0 + 0.0 + 0.0 + 0.0 + + + 0.0045 + 4294967296.0000 + 4294967296.0000 + 249.0901 + 1898.6758 + 4446.3750 + 0.0 + 1415564.0000 + 0.0 + 0.0 + 0.0 + 0.0 + + + 0.0614 + 4294967296.0000 + 4294967296.0000 + 249.0959 + 2255.1912 + 0.0 + 0.0 + 1415564.0000 + 0.0 + 0.0 + 0.0 + 0.0 + + + 0.0609 + 4294967296.0000 + 4294967296.0000 + 253.1091 + 2099.0601 + 1230.0925 + 0.0 + 1415564.0000 + 0.0 + 0.0 + 0.0 + 0.0 + + + 0.0047 + 4294967296.0000 + 4294967296.0000 + 268.6620 + 1759.5667 + 2861.2107 + 0.0 + 1415564.0000 + 0.0 + 0.0 + 0.0 + 0.0 + + + 0.0100 + 4294967296.0000 + 4294967296.0000 + 292.2647 + 1828.5435 + 3270.3474 + 0.0 + 1415564.0000 + 0.0 + 0.0 + 0.0 + 0.0 + + + 0.0093 + 4294967296.0000 + 4294967296.0000 + 303.5810 + 1932.1176 + 4485.4355 + 0.0 + 1415564.0000 + 0.0 + 0.0 + 0.0 + 0.0 + + + 0.0038 + 4294967296.0000 + 4294967296.0000 + 291.6633 + 1842.4425 + 2898.5137 + 0.0 + 1415564.0000 + 0.0 + 0.0 + 0.0 + 0.0 + + + 0.0042 + 4294967296.0000 + 4294967296.0000 + 287.4134 + 1816.0144 + 0.0 + 0.0 + 1415564.0000 + 0.0 + 0.0 + 0.0 + 0.0 + + + + + AVERAGE + 12 + + 0.5000 + + + + 0.0 + 0.0 + 0.0150 + 0 + + + 0.0 + 0.0 + 3221225472.0000 + 0 + + + 0.0 + 0.0 + 3221225472.0000 + 0 + + + 0.0 + 0.0 + 1181.3309 + 0 + + + 0.0 + 0.0 + 2358.2158 + 0 + + + 0.0 + 0.0 + 2080.5770 + 0 + + + 0.0 + 0.0 + 0.0 + 0 + + + 0.0 + 0.0 + 1061673.0000 + 0 + + + 0.0 + 0.0 + 0.0 + 0 + + + 0.0 + 0.0 + 0.0 + 0 + + + 0.0 + 0.0 + 0.0 + 0 + + + 0.0 + 0.0 + 0.0 + 0 + + + + + 0.0130 + 4294967296.0000 + 4294967296.0000 + 261.6000 + 1990.6442 + 1432.2385 + 0.0 + 1441908.0000 + 0.0 + 0.0 + 0.0 + 0.0 + + + 0.0172 + 4294967296.0000 + 4294967296.0000 + 318.8885 + 1979.7030 + 1724.9528 + 0.0 + 1441912.7500 + 0.0 + 0.0 + 0.0 + 0.0 + + + 0.0483 + 4294967296.0000 + 4294967296.0000 + 3108.1233 + 4815.9639 + 4962.0503 + 68.2667 + 1441916.0000 + 0.0 + 0.0 + 0.0 + 0.0 + + + 0.0229 + 4294967296.0000 + 4294967296.0000 + 1944.2039 + 3757.9177 + 10861.6670 + 0.0 + 1439546.7500 + 0.0 + 0.0 + 0.0 + 0.0 + + + 0.0639 + 4294967296.0000 + 4294967296.0000 + 44504.8789 + 34745.1523 + 9571.1455 + 0.0 + 1437892.0000 + 0.0 + 0.0 + 0.0 + 0.0 + + + 0.2945 + 4294967296.0000 + 4294967296.0000 + 79219.1641 + 102827.0781 + 438999.3438 + 0.0 + 1415337.7500 + 0.0 + 0.0 + 0.0 + 0.0 + + + 0.1219 + 4294967296.0000 + 4294967296.0000 + 61093.7109 + 49836.3164 + 8734.3730 + 0.0 + 1399324.0000 + 0.0 + 0.0 + 0.0 + 0.0 + + + 0.0151 + 4294967296.0000 + 4294967296.0000 + 48.3914 + 1922.5935 + 2251.4346 + 0.0 + 1421237.1250 + 0.0 + 0.0 + 0.0 + 0.0 + + + 0.3162 + 4294967296.0000 + 4294967296.0000 + 80667.4922 + 53950.0430 + 416858.5000 + 0.0 + 1437032.0000 + 0.0 + 0.0 + 0.0 + 0.0 + + + + + AVERAGE + 720 + + 0.5000 + + + + 0.0 + 0.0 + 0.0848 + 0 + + + 0.0 + 0.0 + 3775992081.0667 + 0 + + + 0.0 + 0.0 + 3775992081.0667 + 0 + + + 0.0 + 0.0 + 16179.3166 + 0 + + + 0.0 + 0.0 + 13379.7997 + 0 + + + 0.0 + 0.0 + 109091.4636 + 0 + + + 0.0 + 0.0 + 323.1289 + 0 + + + 0.0 + 0.0 + 1259057.5294 + 0 + + + 0.0 + 0.0 + 0.0 + 0 + + + 0.0 + 0.0 + 0.0 + 0 + + + 0.0 + 0.0 + 0.0 + 0 + + + 0.0 + 0.0 + 0.0 + 0 + + + + + 0.1458 + 4294967296.0000 + 4294967296.0000 + 6454.3096 + 5327.6709 + 116520.9609 + 738.4178 + 2653538.0000 + 0.0 + 0.0 + 0.0 + 0.0 + + + 0.0971 + 4294967296.0000 + 4294967296.0000 + 10180.4941 + 10825.1777 + 98749.3438 + 523.3778 + 2381725.7500 + 0.0 + 0.0 + 0.0 + 0.0 + + + 0.0683 + 4294967296.0000 + 4294967296.0000 + 23183.2695 + 19607.6523 + 93946.5703 + 807.8222 + 2143269.2500 + 0.0 + 0.0 + 0.0 + 0.0 + + + 0.0352 + 4294967296.0000 + 4294967296.0000 + 7552.5708 + 7320.5391 + 30907.9453 + 150384.6406 + 1583336.0000 + 0.0 + 0.0 + 0.0 + 0.0 + + + + + AVERAGE + 17280 + + 0.5000 + + + + 0.0 + 0.0 + 0.0187 + 0 + + + 0.0 + 0.0 + 2483773622.0445 + 0 + + + 0.0 + 0.0 + 2483773622.0445 + 0 + + + 0.0 + 0.0 + 2648.2715 + 0 + + + 0.0 + 0.0 + 3002.4238 + 0 + + + 0.0 + 0.0 + 19129.3156 + 0 + + + 0.0 + 0.0 + 6365.7244 + 0 + + + 0.0 + 0.0 + 1468863.7753 + 0 + + + 0.0 + 0.0 + 0.0 + 0 + + + 0.0 + 0.0 + 0.0 + 0 + + + 0.0 + 0.0 + 0.0 + 0 + + + 0.0 + 0.0 + 0.0 + 0 + + + + + 0.0579 + 4294967296.0000 + 4294967296.0000 + 6291.0151 + 7489.2583 + 70915.3750 + 50.1570 + 613674.0000 + 0.0 + 0.0 + 0.0 + 0.0 + + + 0.0541 + 4294967296.0000 + 4294967296.0000 + 10406.3682 + 10638.9365 + 32972.1250 + 7.6800 + 647683.5625 + 0.0 + 0.0 + 0.0 + 0.0 + + + 0.0189 + 4294967296.0000 + 4294967296.0000 + 207.0768 + 2145.3167 + 1685.8905 + 0.0 + 599934.0000 + 0.0 + 0.0 + 0.0 + 0.0 + + + 0.0202 + 4294967296.0000 + 4294967296.0000 + 71.0270 + 2046.6521 + 6703.9795 + 182.0444 + 595963.8750 + 0.0 + 0.0 + 0.0 + 0.0 + + + 0.0661 + 4294967296.0000 + 4294967296.0000 + 8520.3213 + 8488.0664 + 52978.7930 + 7.3956 + 727540.0000 + 0.0 + 0.0 + 0.0 + 0.0 + + + 0.0219 + 4294967296.0000 + 4294967296.0000 + 40443.0117 + 20702.5996 + -1377536.8750 + 36990.5898 + 1823778.0000 + 0.0 + 0.0 + 0.0 + 0.0 + + + 0.0265 + 4294971904.0000 + 4294754304.0000 + 6384.6367 + 6513.4951 + 22415.6348 + 2486.9690 + 3072170.0000 + 0.0 + 0.0 + 0.0 + 0.0 + + + + diff --git a/nova/tests/xenapi/__init__.py b/nova/tests/xenapi/__init__.py deleted file mode 100644 index 1dd02bdc1..000000000 --- a/nova/tests/xenapi/__init__.py +++ /dev/null @@ -1,20 +0,0 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 - -# Copyright (c) 2010 Citrix Systems, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -""" -:mod:`xenapi` -- Stubs for XenAPI -================================= -""" diff --git a/nova/tests/xenapi/stubs.py b/nova/tests/xenapi/stubs.py deleted file mode 100644 index fa214b23e..000000000 --- a/nova/tests/xenapi/stubs.py +++ /dev/null @@ -1,353 +0,0 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 - -# Copyright (c) 2010 Citrix Systems, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -"""Stubouts, mocks and fixtures for the test suite.""" - -import pickle -import random - -from nova.openstack.common import jsonutils -from nova import test -import nova.tests.image.fake -from nova.virt.xenapi import driver as xenapi_conn -from nova.virt.xenapi import fake -from nova.virt.xenapi import vm_utils -from nova.virt.xenapi import vmops - - -def stubout_firewall_driver(stubs, conn): - - def fake_none(self, *args): - return - - _vmops = conn._vmops - stubs.Set(_vmops.firewall_driver, 'prepare_instance_filter', fake_none) - stubs.Set(_vmops.firewall_driver, 'instance_filter_exists', fake_none) - - -def stubout_instance_snapshot(stubs): - def fake_fetch_image(context, session, instance, name_label, image, type): - return {'root': dict(uuid=_make_fake_vdi(), file=None), - 'kernel': dict(uuid=_make_fake_vdi(), file=None), - 'ramdisk': dict(uuid=_make_fake_vdi(), file=None)} - - stubs.Set(vm_utils, '_fetch_image', fake_fetch_image) - - def fake_wait_for_vhd_coalesce(*args): - #TODO(sirp): Should we actually fake out the data here - return "fakeparent", "fakebase" - - stubs.Set(vm_utils, '_wait_for_vhd_coalesce', fake_wait_for_vhd_coalesce) - - -def stubout_session(stubs, cls, product_version=(5, 6, 2), - product_brand='XenServer', **opt_args): - """Stubs out methods from XenAPISession.""" - stubs.Set(xenapi_conn.XenAPISession, '_create_session', - lambda s, url: cls(url, **opt_args)) - stubs.Set(xenapi_conn.XenAPISession, '_get_product_version_and_brand', - lambda s: (product_version, product_brand)) - - -def stubout_get_this_vm_uuid(stubs): - def f(): - vms = [rec['uuid'] for ref, rec - in fake.get_all_records('VM').iteritems() - if rec['is_control_domain']] - return vms[0] - stubs.Set(vm_utils, 'get_this_vm_uuid', f) - - -def stubout_image_service_download(stubs): - def fake_download(*args, **kwargs): - pass - stubs.Set(nova.tests.image.fake._FakeImageService, - 'download', fake_download) - - -def stubout_stream_disk(stubs): - def fake_stream_disk(*args, **kwargs): - pass - stubs.Set(vm_utils, '_stream_disk', fake_stream_disk) - - -def stubout_is_vdi_pv(stubs): - def f(_1): - return False - stubs.Set(vm_utils, '_is_vdi_pv', f) - - -def stubout_determine_is_pv_objectstore(stubs): - """Assumes VMs stu have PV kernels.""" - - def f(*args): - return False - stubs.Set(vm_utils, '_determine_is_pv_objectstore', f) - - -def stubout_is_snapshot(stubs): - """Always returns true - xenapi fake driver does not create vmrefs for snapshots """ - - def f(*args): - return True - stubs.Set(vm_utils, 'is_snapshot', f) - - -def stubout_lookup_image(stubs): - """Simulates a failure in lookup image.""" - def f(_1, _2, _3, _4): - raise Exception("Test Exception raised by fake lookup_image") - stubs.Set(vm_utils, 'lookup_image', f) - - -def stubout_fetch_disk_image(stubs, raise_failure=False): - """Simulates a failure in fetch image_glance_disk.""" - - def _fake_fetch_disk_image(context, session, instance, name_label, image, - image_type): - if raise_failure: - raise fake.Failure("Test Exception raised by " - "fake fetch_image_glance_disk") - elif image_type == vm_utils.ImageType.KERNEL: - filename = "kernel" - elif image_type == vm_utils.ImageType.RAMDISK: - filename = "ramdisk" - else: - filename = "unknown" - - vdi_type = vm_utils.ImageType.to_string(image_type) - return {vdi_type: dict(uuid=None, file=filename)} - - stubs.Set(vm_utils, '_fetch_disk_image', _fake_fetch_disk_image) - - -def stubout_create_vm(stubs): - """Simulates a failure in create_vm.""" - - def f(*args): - raise fake.Failure("Test Exception raised by fake create_vm") - stubs.Set(vm_utils, 'create_vm', f) - - -def stubout_attach_disks(stubs): - """Simulates a failure in _attach_disks.""" - - def f(*args): - raise fake.Failure("Test Exception raised by fake _attach_disks") - stubs.Set(vmops.VMOps, '_attach_disks', f) - - -def _make_fake_vdi(): - sr_ref = fake.get_all('SR')[0] - vdi_ref = fake.create_vdi('', sr_ref) - vdi_rec = fake.get_record('VDI', vdi_ref) - return vdi_rec['uuid'] - - -class FakeSessionForVMTests(fake.SessionBase): - """Stubs out a XenAPISession for VM tests.""" - - _fake_iptables_save_output = ("# Generated by iptables-save v1.4.10 on " - "Sun Nov 6 22:49:02 2011\n" - "*filter\n" - ":INPUT ACCEPT [0:0]\n" - ":FORWARD ACCEPT [0:0]\n" - ":OUTPUT ACCEPT [0:0]\n" - "COMMIT\n" - "# Completed on Sun Nov 6 22:49:02 2011\n") - - def host_call_plugin(self, _1, _2, plugin, method, _5): - if (plugin, method) == ('glance', 'download_vhd'): - root_uuid = _make_fake_vdi() - return pickle.dumps(dict(root=dict(uuid=root_uuid))) - elif (plugin, method) == ("xenhost", "iptables_config"): - return fake.as_json(out=self._fake_iptables_save_output, - err='') - else: - return (super(FakeSessionForVMTests, self). - host_call_plugin(_1, _2, plugin, method, _5)) - - def VM_start(self, _1, ref, _2, _3): - vm = fake.get_record('VM', ref) - if vm['power_state'] != 'Halted': - raise fake.Failure(['VM_BAD_POWER_STATE', ref, 'Halted', - vm['power_state']]) - vm['power_state'] = 'Running' - vm['is_a_template'] = False - vm['is_control_domain'] = False - vm['domid'] = random.randrange(1, 1 << 16) - return vm - - def VM_start_on(self, _1, vm_ref, host_ref, _2, _3): - vm_rec = self.VM_start(_1, vm_ref, _2, _3) - vm_rec['resident_on'] = host_ref - - def VDI_snapshot(self, session_ref, vm_ref, _1): - sr_ref = "fakesr" - return fake.create_vdi('fakelabel', sr_ref, read_only=True) - - def SR_scan(self, session_ref, sr_ref): - pass - - -class FakeSessionForFirewallTests(FakeSessionForVMTests): - """Stubs out a XenApi Session for doing IPTable Firewall tests.""" - - def __init__(self, uri, test_case=None): - super(FakeSessionForFirewallTests, self).__init__(uri) - if hasattr(test_case, '_in_rules'): - self._in_rules = test_case._in_rules - if hasattr(test_case, '_in6_filter_rules'): - self._in6_filter_rules = test_case._in6_filter_rules - self._test_case = test_case - - def host_call_plugin(self, _1, _2, plugin, method, args): - """Mock method four host_call_plugin to be used in unit tests - for the dom0 iptables Firewall drivers for XenAPI - - """ - if plugin == "xenhost" and method == "iptables_config": - # The command to execute is a json-encoded list - cmd_args = args.get('cmd_args', None) - cmd = jsonutils.loads(cmd_args) - if not cmd: - ret_str = '' - else: - output = '' - process_input = args.get('process_input', None) - if cmd == ['ip6tables-save', '-c']: - output = '\n'.join(self._in6_filter_rules) - if cmd == ['iptables-save', '-c']: - output = '\n'.join(self._in_rules) - if cmd == ['iptables-restore', '-c', ]: - lines = process_input.split('\n') - if '*filter' in lines: - if self._test_case is not None: - self._test_case._out_rules = lines - output = '\n'.join(lines) - if cmd == ['ip6tables-restore', '-c', ]: - lines = process_input.split('\n') - if '*filter' in lines: - output = '\n'.join(lines) - ret_str = fake.as_json(out=output, err='') - return ret_str - - -def stub_out_vm_methods(stubs): - def fake_acquire_bootlock(self, vm): - pass - - def fake_release_bootlock(self, vm): - pass - - def fake_generate_ephemeral(*args): - pass - - def fake_wait_for_device(dev): - pass - - stubs.Set(vmops.VMOps, "_acquire_bootlock", fake_acquire_bootlock) - stubs.Set(vmops.VMOps, "_release_bootlock", fake_release_bootlock) - stubs.Set(vm_utils, 'generate_ephemeral', fake_generate_ephemeral) - stubs.Set(vm_utils, '_wait_for_device', fake_wait_for_device) - - -class FakeSessionForVolumeTests(fake.SessionBase): - """Stubs out a XenAPISession for Volume tests.""" - def VDI_introduce(self, _1, uuid, _2, _3, _4, _5, - _6, _7, _8, _9, _10, _11): - valid_vdi = False - refs = fake.get_all('VDI') - for ref in refs: - rec = fake.get_record('VDI', ref) - if rec['uuid'] == uuid: - valid_vdi = True - if not valid_vdi: - raise fake.Failure([['INVALID_VDI', 'session', self._session]]) - - -class FakeSessionForVolumeFailedTests(FakeSessionForVolumeTests): - """Stubs out a XenAPISession for Volume tests: it injects failures.""" - def VDI_introduce(self, _1, uuid, _2, _3, _4, _5, - _6, _7, _8, _9, _10, _11): - # This is for testing failure - raise fake.Failure([['INVALID_VDI', 'session', self._session]]) - - def PBD_unplug(self, _1, ref): - rec = fake.get_record('PBD', ref) - rec['currently-attached'] = False - - def SR_forget(self, _1, ref): - pass - - -def stub_out_migration_methods(stubs): - fakesr = fake.create_sr() - - def fake_move_disks(self, instance, disk_info): - vdi_ref = fake.create_vdi(instance['name'], fakesr) - vdi_rec = fake.get_record('VDI', vdi_ref) - vdi_rec['other_config']['nova_disk_type'] = 'root' - return {'uuid': vdi_rec['uuid'], 'ref': vdi_ref} - - def fake_get_vdi(session, vm_ref): - vdi_ref_parent = fake.create_vdi('derp-parent', fakesr) - vdi_rec_parent = fake.get_record('VDI', vdi_ref_parent) - vdi_ref = fake.create_vdi('derp', fakesr, - sm_config={'vhd-parent': vdi_rec_parent['uuid']}) - vdi_rec = session.call_xenapi("VDI.get_record", vdi_ref) - return vdi_ref, vdi_rec - - def fake_sr(session, *args): - return fakesr - - def fake_get_sr_path(*args): - return "fake" - - def fake_destroy(*args, **kwargs): - pass - - def fake_generate_ephemeral(*args): - pass - - stubs.Set(vmops.VMOps, '_destroy', fake_destroy) - stubs.Set(vm_utils, 'move_disks', fake_move_disks) - stubs.Set(vm_utils, 'scan_default_sr', fake_sr) - stubs.Set(vm_utils, 'get_vdi_for_vm_safely', fake_get_vdi) - stubs.Set(vm_utils, 'get_sr_path', fake_get_sr_path) - stubs.Set(vm_utils, 'generate_ephemeral', fake_generate_ephemeral) - - -class FakeSessionForFailedMigrateTests(FakeSessionForVMTests): - def VM_assert_can_migrate(self, session, vmref, migrate_data, - live, vdi_map, vif_map, options): - raise fake.Failure("XenAPI VM.assert_can_migrate failed") - - def host_migrate_receive(self, session, hostref, networkref, options): - raise fake.Failure("XenAPI host.migrate_receive failed") - - def VM_migrate_send(self, session, vmref, migrate_data, islive, vdi_map, - vif_map, options): - raise fake.Failure("XenAPI VM.migrate_send failed") - - -class XenAPITestBase(test.TestCase): - def setUp(self): - super(XenAPITestBase, self).setUp() - - self.useFixture(test.ReplaceModule('XenAPI', fake)) - - fake.reset() diff --git a/nova/tests/xenapi/test_vm_utils.py b/nova/tests/xenapi/test_vm_utils.py deleted file mode 100644 index 694be1713..000000000 --- a/nova/tests/xenapi/test_vm_utils.py +++ /dev/null @@ -1,377 +0,0 @@ -import mox -from nova import context -from nova import db -from nova import exception -from nova import test -from nova.tests.xenapi import stubs -from nova.virt.xenapi import driver as xenapi_conn -from nova.virt.xenapi import fake -from nova.virt.xenapi import vm_utils -from nova.virt.xenapi import volume_utils - - -XENSM_TYPE = 'xensm' -ISCSI_TYPE = 'iscsi' - - -class FakeSession(): - def call_xenapi(self, operation, *args, **kwargs): - # VDI.add_to_other_config -> VDI_add_to_other_config - method = getattr(self, operation.replace('.', '_'), None) - if method: - return method(*args, **kwargs) - - self.operation = operation - self.args = args - self.kwargs = kwargs - - -def get_fake_connection_data(sr_type): - fakes = {XENSM_TYPE: {'sr_uuid': 'falseSR', - 'name_label': 'fake_storage', - 'name_description': 'test purposes', - 'server': 'myserver', - 'serverpath': '/local/scratch/myname', - 'sr_type': 'nfs', - 'introduce_sr_keys': ['server', - 'serverpath', - 'sr_type'], - 'vdi_uuid': 'falseVDI'}, - ISCSI_TYPE: {'volume_id': 'fake_volume_id', - 'target_lun': 1, - 'target_iqn': 'fake_iqn:volume-fake_volume_id', - 'target_portal': u'localhost:3260', - 'target_discovered': False}, } - return fakes[sr_type] - - -class GetInstanceForVdisForSrTestCase(stubs.XenAPITestBase): - def setUp(self): - super(GetInstanceForVdisForSrTestCase, self).setUp() - self.flags(disable_process_locking=True, - instance_name_template='%d', - firewall_driver='nova.virt.xenapi.firewall.' - 'Dom0IptablesFirewallDriver', - xenapi_connection_url='test_url', - xenapi_connection_password='test_pass',) - - def test_get_instance_vdis_for_sr(self): - vm_ref = fake.create_vm("foo", "Running") - sr_ref = fake.create_sr() - - vdi_1 = fake.create_vdi('vdiname1', sr_ref) - vdi_2 = fake.create_vdi('vdiname2', sr_ref) - - for vdi_ref in [vdi_1, vdi_2]: - fake.create_vbd(vm_ref, vdi_ref) - - stubs.stubout_session(self.stubs, fake.SessionBase) - driver = xenapi_conn.XenAPIDriver(False) - - result = list(vm_utils.get_instance_vdis_for_sr( - driver._session, vm_ref, sr_ref)) - - self.assertEquals([vdi_1, vdi_2], result) - - def test_get_instance_vdis_for_sr_no_vbd(self): - vm_ref = fake.create_vm("foo", "Running") - sr_ref = fake.create_sr() - - stubs.stubout_session(self.stubs, fake.SessionBase) - driver = xenapi_conn.XenAPIDriver(False) - - result = list(vm_utils.get_instance_vdis_for_sr( - driver._session, vm_ref, sr_ref)) - - self.assertEquals([], result) - - def test_get_vdi_uuid_for_volume_with_sr_uuid(self): - connection_data = get_fake_connection_data(XENSM_TYPE) - stubs.stubout_session(self.stubs, fake.SessionBase) - driver = xenapi_conn.XenAPIDriver(False) - - vdi_uuid = vm_utils.get_vdi_uuid_for_volume( - driver._session, connection_data) - self.assertEquals(vdi_uuid, 'falseVDI') - - def test_get_vdi_uuid_for_volume_failure(self): - stubs.stubout_session(self.stubs, fake.SessionBase) - driver = xenapi_conn.XenAPIDriver(False) - - def bad_introduce_sr(session, sr_uuid, label, sr_params): - return None - - self.stubs.Set(volume_utils, 'introduce_sr', bad_introduce_sr) - connection_data = get_fake_connection_data(XENSM_TYPE) - self.assertRaises(exception.NovaException, - vm_utils.get_vdi_uuid_for_volume, - driver._session, connection_data) - - def test_get_vdi_uuid_for_volume_from_iscsi_vol_missing_sr_uuid(self): - connection_data = get_fake_connection_data(ISCSI_TYPE) - stubs.stubout_session(self.stubs, fake.SessionBase) - driver = xenapi_conn.XenAPIDriver(False) - - vdi_uuid = vm_utils.get_vdi_uuid_for_volume( - driver._session, connection_data) - self.assertNotEquals(vdi_uuid, None) - - -class VMRefOrRaiseVMFoundTestCase(test.TestCase): - - def test_lookup_call(self): - mock = mox.Mox() - mock.StubOutWithMock(vm_utils, 'lookup') - - vm_utils.lookup('session', 'somename').AndReturn('ignored') - - mock.ReplayAll() - vm_utils.vm_ref_or_raise('session', 'somename') - mock.VerifyAll() - - def test_return_value(self): - mock = mox.Mox() - mock.StubOutWithMock(vm_utils, 'lookup') - - vm_utils.lookup(mox.IgnoreArg(), mox.IgnoreArg()).AndReturn('vmref') - - mock.ReplayAll() - self.assertEquals( - 'vmref', vm_utils.vm_ref_or_raise('session', 'somename')) - mock.VerifyAll() - - -class VMRefOrRaiseVMNotFoundTestCase(test.TestCase): - - def test_exception_raised(self): - mock = mox.Mox() - mock.StubOutWithMock(vm_utils, 'lookup') - - vm_utils.lookup('session', 'somename').AndReturn(None) - - mock.ReplayAll() - self.assertRaises( - exception.InstanceNotFound, - lambda: vm_utils.vm_ref_or_raise('session', 'somename') - ) - mock.VerifyAll() - - def test_exception_msg_contains_vm_name(self): - mock = mox.Mox() - mock.StubOutWithMock(vm_utils, 'lookup') - - vm_utils.lookup('session', 'somename').AndReturn(None) - - mock.ReplayAll() - try: - vm_utils.vm_ref_or_raise('session', 'somename') - except exception.InstanceNotFound as e: - self.assertTrue( - 'somename' in str(e)) - mock.VerifyAll() - - -class BittorrentTestCase(stubs.XenAPITestBase): - def setUp(self): - super(BittorrentTestCase, self).setUp() - self.context = context.get_admin_context() - - def test_image_uses_bittorrent(self): - sys_meta = {'image_bittorrent': True} - instance = db.instance_create(self.context, - {'system_metadata': sys_meta}) - instance = db.instance_get_by_uuid(self.context, instance['uuid']) - self.flags(xenapi_torrent_images='some') - self.assertTrue(vm_utils._image_uses_bittorrent(self.context, - instance)) - - def _test_create_image(self, cache_type): - sys_meta = {'image_cache_in_nova': True} - instance = db.instance_create(self.context, - {'system_metadata': sys_meta}) - instance = db.instance_get_by_uuid(self.context, instance['uuid']) - self.flags(cache_images=cache_type) - - was = {'called': None} - - def fake_create_cached_image(*args): - was['called'] = 'some' - return {} - self.stubs.Set(vm_utils, '_create_cached_image', - fake_create_cached_image) - - def fake_fetch_image(*args): - was['called'] = 'none' - return {} - self.stubs.Set(vm_utils, '_fetch_image', - fake_fetch_image) - - vm_utils._create_image(self.context, None, instance, - 'foo', 'bar', 'baz') - - self.assertEqual(was['called'], cache_type) - - def test_create_image_cached(self): - self._test_create_image('some') - - def test_create_image_uncached(self): - self._test_create_image('none') - - -class CreateVBDTestCase(test.TestCase): - def setUp(self): - super(CreateVBDTestCase, self).setUp() - self.session = FakeSession() - self.mock = mox.Mox() - self.mock.StubOutWithMock(self.session, 'call_xenapi') - self.vbd_rec = self._generate_vbd_rec() - - def _generate_vbd_rec(self): - vbd_rec = {} - vbd_rec['VM'] = 'vm_ref' - vbd_rec['VDI'] = 'vdi_ref' - vbd_rec['userdevice'] = '0' - vbd_rec['bootable'] = False - vbd_rec['mode'] = 'RW' - vbd_rec['type'] = 'disk' - vbd_rec['unpluggable'] = True - vbd_rec['empty'] = False - vbd_rec['other_config'] = {} - vbd_rec['qos_algorithm_type'] = '' - vbd_rec['qos_algorithm_params'] = {} - vbd_rec['qos_supported_algorithms'] = [] - return vbd_rec - - def test_create_vbd_default_args(self): - self.session.call_xenapi('VBD.create', - self.vbd_rec).AndReturn("vbd_ref") - self.mock.ReplayAll() - - result = vm_utils.create_vbd(self.session, "vm_ref", "vdi_ref", 0) - self.assertEquals(result, "vbd_ref") - self.mock.VerifyAll() - - def test_create_vbd_osvol(self): - self.session.call_xenapi('VBD.create', - self.vbd_rec).AndReturn("vbd_ref") - self.session.call_xenapi('VBD.add_to_other_config', "vbd_ref", - "osvol", "True") - self.mock.ReplayAll() - result = vm_utils.create_vbd(self.session, "vm_ref", "vdi_ref", 0, - osvol=True) - self.assertEquals(result, "vbd_ref") - self.mock.VerifyAll() - - def test_create_vbd_extra_args(self): - self.vbd_rec['VDI'] = 'OpaqueRef:NULL' - self.vbd_rec['type'] = 'a' - self.vbd_rec['mode'] = 'RO' - self.vbd_rec['bootable'] = True - self.vbd_rec['empty'] = True - self.vbd_rec['unpluggable'] = False - self.session.call_xenapi('VBD.create', - self.vbd_rec).AndReturn("vbd_ref") - self.mock.ReplayAll() - - result = vm_utils.create_vbd(self.session, "vm_ref", None, 0, - vbd_type="a", read_only=True, bootable=True, - empty=True, unpluggable=False) - self.assertEquals(result, "vbd_ref") - self.mock.VerifyAll() - - def test_attach_cd(self): - self.mock.StubOutWithMock(vm_utils, 'create_vbd') - - vm_utils.create_vbd(self.session, "vm_ref", None, 1, - vbd_type='cd', read_only=True, bootable=True, - empty=True, unpluggable=False).AndReturn("vbd_ref") - self.session.call_xenapi('VBD.insert', "vbd_ref", "vdi_ref") - self.mock.ReplayAll() - - result = vm_utils.attach_cd(self.session, "vm_ref", "vdi_ref", 1) - self.assertEquals(result, "vbd_ref") - self.mock.VerifyAll() - - -class VDIOtherConfigTestCase(stubs.XenAPITestBase): - """Tests to ensure that the code is populating VDI's `other_config` - attribute with the correct metadta. - """ - - def setUp(self): - super(VDIOtherConfigTestCase, self).setUp() - self.session = FakeSession() - self.context = context.get_admin_context() - self.fake_instance = {'uuid': 'aaaa-bbbb-cccc-dddd', - 'name': 'myinstance'} - - def test_create_vdi(self): - # Some images are registered with XenServer explicitly by calling - # `create_vdi` - vm_utils.create_vdi(self.session, 'sr_ref', self.fake_instance, - 'myvdi', 'root', 1024, read_only=True) - - expected = {'nova_disk_type': 'root', - 'nova_instance_uuid': 'aaaa-bbbb-cccc-dddd'} - - self.assertEqual(expected, self.session.args[0]['other_config']) - - def test_create_image(self): - # Other images are registered implicitly when they are dropped into - # the SR by a dom0 plugin or some other process - self.flags(cache_images='none') - - def fake_fetch_image(*args): - return {'root': {'uuid': 'fake-uuid'}} - - self.stubs.Set(vm_utils, '_fetch_image', fake_fetch_image) - - other_config = {} - - def VDI_add_to_other_config(ref, key, value): - other_config[key] = value - - def VDI_get_record(ref): - return {'other_config': {}} - - # Stubbing on the session object and not class so we don't pollute - # other tests - self.session.VDI_add_to_other_config = VDI_add_to_other_config - self.session.VDI_get_record = VDI_get_record - - vm_utils._create_image(self.context, self.session, self.fake_instance, - 'myvdi', 'image1', vm_utils.ImageType.DISK_VHD) - - expected = {'nova_disk_type': 'root', - 'nova_instance_uuid': 'aaaa-bbbb-cccc-dddd'} - - self.assertEqual(expected, other_config) - - def test_move_disks(self): - # Migrated images should preserve the `other_config` - other_config = {} - - def VDI_add_to_other_config(ref, key, value): - other_config[key] = value - - def VDI_get_record(ref): - return {'other_config': {}} - - def call_plugin_serialized(*args, **kwargs): - return {'root': {'uuid': 'aaaa-bbbb-cccc-dddd'}} - - # Stubbing on the session object and not class so we don't pollute - # other tests - self.session.VDI_add_to_other_config = VDI_add_to_other_config - self.session.VDI_get_record = VDI_get_record - self.session.call_plugin_serialized = call_plugin_serialized - - self.stubs.Set(vm_utils, 'get_sr_path', lambda *a, **k: None) - self.stubs.Set(vm_utils, 'scan_default_sr', lambda *a, **k: None) - - vm_utils.move_disks(self.session, self.fake_instance, {}) - - expected = {'nova_disk_type': 'root', - 'nova_instance_uuid': 'aaaa-bbbb-cccc-dddd'} - - self.assertEqual(expected, other_config) diff --git a/nova/tests/xenapi/vm_rrd.xml b/nova/tests/xenapi/vm_rrd.xml deleted file mode 100644 index f9a7c8083..000000000 --- a/nova/tests/xenapi/vm_rrd.xml +++ /dev/null @@ -1,1101 +0,0 @@ - - 0003 - 5 - 1328795567 - - cpu0 - DERIVE - 300.0000 - 0.0 - 1.0000 - 5102.8417 - 0.0110 - 0 - - - memory - GAUGE - 300.0000 - 0.0 - Infinity - 4294967296 - 10961792000.0000 - 0 - - - memory_target - GAUGE - 300.0000 - 0.0 - Infinity - 4294967296 - 10961792000.0000 - 0 - - - vif_0_tx - DERIVE - 300.0000 - -Infinity - Infinity - 1079132206 - 752.4007 - 0 - - - vif_0_rx - DERIVE - 300.0000 - -Infinity - Infinity - 1093250983 - 4837.8805 - 0 - - - vbd_xvda_write - DERIVE - 300.0000 - -Infinity - Infinity - 4552440832 - 0.0 - 0 - - - vbd_xvda_read - DERIVE - 300.0000 - -Infinity - Infinity - 1371223040 - 0.0 - 0 - - - memory_internal_free - GAUGE - 300.0000 - -Infinity - Infinity - 1415564 - 3612860.6020 - 0 - - - vbd_xvdb_write - DERIVE - 300.0000 - -Infinity - Infinity - 0.0 - 0.0 - 2 - - - vbd_xvdb_read - DERIVE - 300.0000 - -Infinity - Infinity - 0.0 - 0.0 - 2 - - - vif_2_tx - DERIVE - 300.0000 - -Infinity - Infinity - 0.0 - 0.0 - 2 - - - vif_2_rx - DERIVE - 300.0000 - -Infinity - Infinity - 0.0 - 0.0 - 2 - - - AVERAGE - 1 - - 0.5000 - - - - 0.0 - 0.0 - 0.0 - 0 - - - 0.0 - 0.0 - 0.0 - 0 - - - 0.0 - 0.0 - 0.0 - 0 - - - 0.0 - 0.0 - 0.0 - 0 - - - 0.0 - 0.0 - 0.0 - 0 - - - 0.0 - 0.0 - 0.0 - 0 - - - 0.0 - 0.0 - 0.0 - 0 - - - 0.0 - 0.0 - 0.0 - 0 - - - 0.0 - 0.0 - 0.0 - 0 - - - 0.0 - 0.0 - 0.0 - 0 - - - 0.0 - 0.0 - 0.0 - 0 - - - 0.0 - 0.0 - 0.0 - 0 - - - - - 0.0259 - 4294967296.0000 - 4294967296.0000 - 270.6642 - 1968.1381 - 0.0 - 0.0 - 1433552.0000 - 0.0 - 0.0 - 0.0 - 0.0 - - - 0.0042 - 4294967296.0000 - 4294967296.0000 - 258.6530 - 1890.5522 - 565.3453 - 0.0 - 1433552.0000 - 0.0 - 0.0 - 0.0 - 0.0 - - - 0.0043 - 4294967296.0000 - 4294967296.0000 - 249.1120 - 1778.2501 - 817.5985 - 0.0 - 1433552.0000 - 0.0 - 0.0 - 0.0 - 0.0 - - - 0.0039 - 4294967296.0000 - 4294967296.0000 - 270.5131 - 1806.3336 - 9811.4443 - 0.0 - 1433552.0000 - 0.0 - 0.0 - 0.0 - 0.0 - - - 0.0041 - 4294967296.0000 - 4294967296.0000 - 264.3683 - 1952.4054 - 4370.4121 - 0.0 - 1433552.0000 - 0.0 - 0.0 - 0.0 - 0.0 - - - 0.0034 - 4294967296.0000 - 4294967296.0000 - 251.6331 - 1958.8002 - 0.0 - 0.0 - 1433552.0000 - 0.0 - 0.0 - 0.0 - 0.0 - - - 0.0042 - 4294967296.0000 - 4294967296.0000 - 274.5222 - 2067.5947 - 0.0 - 0.0 - 1433552.0000 - 0.0 - 0.0 - 0.0 - 0.0 - - - 0.0046 - 4294967296.0000 - 4294967296.0000 - 260.9790 - 2042.7045 - 1671.6940 - 0.0 - 1433552.0000 - 0.0 - 0.0 - 0.0 - 0.0 - - - 0.0163 - 4294967296.0000 - 4294967296.0000 - 249.0992 - 1845.3728 - 4119.4312 - 0.0 - 1431698.1250 - 0.0 - 0.0 - 0.0 - 0.0 - - - 0.0098 - 4294967296.0000 - 4294967296.0000 - 273.9898 - 1879.1331 - 5459.4102 - 0.0 - 1430824.0000 - 0.0 - 0.0 - 0.0 - 0.0 - - - 0.0043 - 4294967296.0000 - 4294967296.0000 - 261.3513 - 2335.3000 - 6837.4907 - 0.0 - 1430824.0000 - 0.0 - 0.0 - 0.0 - 0.0 - - - 0.0793 - 4294967296.0000 - 4294967296.0000 - 249.2620 - 2092.4504 - 2391.9744 - 0.0 - 1430824.0000 - 0.0 - 0.0 - 0.0 - 0.0 - - - 0.0406 - 4294967296.0000 - 4294967296.0000 - 270.0746 - 1859.9802 - 0.0 - 0.0 - 1430824.0000 - 0.0 - 0.0 - 0.0 - 0.0 - - - 0.0043 - 4294967296.0000 - 4294967296.0000 - 263.4259 - 2010.8950 - 550.1484 - 0.0 - 1430824.0000 - 0.0 - 0.0 - 0.0 - 0.0 - - - 0.0565 - 4294967296.0000 - 4294967296.0000 - 29891.2227 - 26210.6699 - 3213.4324 - 0.0 - 1415564.0000 - 0.0 - 0.0 - 0.0 - 0.0 - - - 0.0645 - 4294967296.0000 - 4294967296.0000 - 31501.1562 - 29642.1641 - 400.9566 - 0.0 - 1415564.0000 - 0.0 - 0.0 - 0.0 - 0.0 - - - 0.0381 - 4294967296.0000 - 4294967296.0000 - 17350.7676 - 20748.6133 - 1247.4755 - 0.0 - 1415564.0000 - 0.0 - 0.0 - 0.0 - 0.0 - - - 0.0212 - 4294967296.0000 - 4294967296.0000 - 11981.0918 - 12866.9775 - 5774.9497 - 0.0 - 1415564.0000 - 0.0 - 0.0 - 0.0 - 0.0 - - - 0.0045 - 4294967296.0000 - 4294967296.0000 - 249.0901 - 1898.6758 - 4446.3750 - 0.0 - 1415564.0000 - 0.0 - 0.0 - 0.0 - 0.0 - - - 0.0614 - 4294967296.0000 - 4294967296.0000 - 249.0959 - 2255.1912 - 0.0 - 0.0 - 1415564.0000 - 0.0 - 0.0 - 0.0 - 0.0 - - - 0.0609 - 4294967296.0000 - 4294967296.0000 - 253.1091 - 2099.0601 - 1230.0925 - 0.0 - 1415564.0000 - 0.0 - 0.0 - 0.0 - 0.0 - - - 0.0047 - 4294967296.0000 - 4294967296.0000 - 268.6620 - 1759.5667 - 2861.2107 - 0.0 - 1415564.0000 - 0.0 - 0.0 - 0.0 - 0.0 - - - 0.0100 - 4294967296.0000 - 4294967296.0000 - 292.2647 - 1828.5435 - 3270.3474 - 0.0 - 1415564.0000 - 0.0 - 0.0 - 0.0 - 0.0 - - - 0.0093 - 4294967296.0000 - 4294967296.0000 - 303.5810 - 1932.1176 - 4485.4355 - 0.0 - 1415564.0000 - 0.0 - 0.0 - 0.0 - 0.0 - - - 0.0038 - 4294967296.0000 - 4294967296.0000 - 291.6633 - 1842.4425 - 2898.5137 - 0.0 - 1415564.0000 - 0.0 - 0.0 - 0.0 - 0.0 - - - 0.0042 - 4294967296.0000 - 4294967296.0000 - 287.4134 - 1816.0144 - 0.0 - 0.0 - 1415564.0000 - 0.0 - 0.0 - 0.0 - 0.0 - - - - - AVERAGE - 12 - - 0.5000 - - - - 0.0 - 0.0 - 0.0150 - 0 - - - 0.0 - 0.0 - 3221225472.0000 - 0 - - - 0.0 - 0.0 - 3221225472.0000 - 0 - - - 0.0 - 0.0 - 1181.3309 - 0 - - - 0.0 - 0.0 - 2358.2158 - 0 - - - 0.0 - 0.0 - 2080.5770 - 0 - - - 0.0 - 0.0 - 0.0 - 0 - - - 0.0 - 0.0 - 1061673.0000 - 0 - - - 0.0 - 0.0 - 0.0 - 0 - - - 0.0 - 0.0 - 0.0 - 0 - - - 0.0 - 0.0 - 0.0 - 0 - - - 0.0 - 0.0 - 0.0 - 0 - - - - - 0.0130 - 4294967296.0000 - 4294967296.0000 - 261.6000 - 1990.6442 - 1432.2385 - 0.0 - 1441908.0000 - 0.0 - 0.0 - 0.0 - 0.0 - - - 0.0172 - 4294967296.0000 - 4294967296.0000 - 318.8885 - 1979.7030 - 1724.9528 - 0.0 - 1441912.7500 - 0.0 - 0.0 - 0.0 - 0.0 - - - 0.0483 - 4294967296.0000 - 4294967296.0000 - 3108.1233 - 4815.9639 - 4962.0503 - 68.2667 - 1441916.0000 - 0.0 - 0.0 - 0.0 - 0.0 - - - 0.0229 - 4294967296.0000 - 4294967296.0000 - 1944.2039 - 3757.9177 - 10861.6670 - 0.0 - 1439546.7500 - 0.0 - 0.0 - 0.0 - 0.0 - - - 0.0639 - 4294967296.0000 - 4294967296.0000 - 44504.8789 - 34745.1523 - 9571.1455 - 0.0 - 1437892.0000 - 0.0 - 0.0 - 0.0 - 0.0 - - - 0.2945 - 4294967296.0000 - 4294967296.0000 - 79219.1641 - 102827.0781 - 438999.3438 - 0.0 - 1415337.7500 - 0.0 - 0.0 - 0.0 - 0.0 - - - 0.1219 - 4294967296.0000 - 4294967296.0000 - 61093.7109 - 49836.3164 - 8734.3730 - 0.0 - 1399324.0000 - 0.0 - 0.0 - 0.0 - 0.0 - - - 0.0151 - 4294967296.0000 - 4294967296.0000 - 48.3914 - 1922.5935 - 2251.4346 - 0.0 - 1421237.1250 - 0.0 - 0.0 - 0.0 - 0.0 - - - 0.3162 - 4294967296.0000 - 4294967296.0000 - 80667.4922 - 53950.0430 - 416858.5000 - 0.0 - 1437032.0000 - 0.0 - 0.0 - 0.0 - 0.0 - - - - - AVERAGE - 720 - - 0.5000 - - - - 0.0 - 0.0 - 0.0848 - 0 - - - 0.0 - 0.0 - 3775992081.0667 - 0 - - - 0.0 - 0.0 - 3775992081.0667 - 0 - - - 0.0 - 0.0 - 16179.3166 - 0 - - - 0.0 - 0.0 - 13379.7997 - 0 - - - 0.0 - 0.0 - 109091.4636 - 0 - - - 0.0 - 0.0 - 323.1289 - 0 - - - 0.0 - 0.0 - 1259057.5294 - 0 - - - 0.0 - 0.0 - 0.0 - 0 - - - 0.0 - 0.0 - 0.0 - 0 - - - 0.0 - 0.0 - 0.0 - 0 - - - 0.0 - 0.0 - 0.0 - 0 - - - - - 0.1458 - 4294967296.0000 - 4294967296.0000 - 6454.3096 - 5327.6709 - 116520.9609 - 738.4178 - 2653538.0000 - 0.0 - 0.0 - 0.0 - 0.0 - - - 0.0971 - 4294967296.0000 - 4294967296.0000 - 10180.4941 - 10825.1777 - 98749.3438 - 523.3778 - 2381725.7500 - 0.0 - 0.0 - 0.0 - 0.0 - - - 0.0683 - 4294967296.0000 - 4294967296.0000 - 23183.2695 - 19607.6523 - 93946.5703 - 807.8222 - 2143269.2500 - 0.0 - 0.0 - 0.0 - 0.0 - - - 0.0352 - 4294967296.0000 - 4294967296.0000 - 7552.5708 - 7320.5391 - 30907.9453 - 150384.6406 - 1583336.0000 - 0.0 - 0.0 - 0.0 - 0.0 - - - - - AVERAGE - 17280 - - 0.5000 - - - - 0.0 - 0.0 - 0.0187 - 0 - - - 0.0 - 0.0 - 2483773622.0445 - 0 - - - 0.0 - 0.0 - 2483773622.0445 - 0 - - - 0.0 - 0.0 - 2648.2715 - 0 - - - 0.0 - 0.0 - 3002.4238 - 0 - - - 0.0 - 0.0 - 19129.3156 - 0 - - - 0.0 - 0.0 - 6365.7244 - 0 - - - 0.0 - 0.0 - 1468863.7753 - 0 - - - 0.0 - 0.0 - 0.0 - 0 - - - 0.0 - 0.0 - 0.0 - 0 - - - 0.0 - 0.0 - 0.0 - 0 - - - 0.0 - 0.0 - 0.0 - 0 - - - - - 0.0579 - 4294967296.0000 - 4294967296.0000 - 6291.0151 - 7489.2583 - 70915.3750 - 50.1570 - 613674.0000 - 0.0 - 0.0 - 0.0 - 0.0 - - - 0.0541 - 4294967296.0000 - 4294967296.0000 - 10406.3682 - 10638.9365 - 32972.1250 - 7.6800 - 647683.5625 - 0.0 - 0.0 - 0.0 - 0.0 - - - 0.0189 - 4294967296.0000 - 4294967296.0000 - 207.0768 - 2145.3167 - 1685.8905 - 0.0 - 599934.0000 - 0.0 - 0.0 - 0.0 - 0.0 - - - 0.0202 - 4294967296.0000 - 4294967296.0000 - 71.0270 - 2046.6521 - 6703.9795 - 182.0444 - 595963.8750 - 0.0 - 0.0 - 0.0 - 0.0 - - - 0.0661 - 4294967296.0000 - 4294967296.0000 - 8520.3213 - 8488.0664 - 52978.7930 - 7.3956 - 727540.0000 - 0.0 - 0.0 - 0.0 - 0.0 - - - 0.0219 - 4294967296.0000 - 4294967296.0000 - 40443.0117 - 20702.5996 - -1377536.8750 - 36990.5898 - 1823778.0000 - 0.0 - 0.0 - 0.0 - 0.0 - - - 0.0265 - 4294971904.0000 - 4294754304.0000 - 6384.6367 - 6513.4951 - 22415.6348 - 2486.9690 - 3072170.0000 - 0.0 - 0.0 - 0.0 - 0.0 - - - - -- cgit