summaryrefslogtreecommitdiffstats
path: root/nova/tests
diff options
context:
space:
mode:
authorVishvananda Ishaya <vishvananda@gmail.com>2011-09-23 09:22:32 -0700
committerVishvananda Ishaya <vishvananda@gmail.com>2011-10-11 14:25:04 -0700
commiteb03d47fecd3bfc24243da29ee01679b334a08fe (patch)
tree23243973d2656fecadab6811e0dca6ceb246a7ae /nova/tests
parente164f3f703026db30937dbbddc63818cef8bd939 (diff)
Remove AoE, Clean up volume code
* Removes Ata Over Ethernet * Adds drivers to libvirt for volumes * Adds initialize_connection and terminate_connection to volume api * Passes connection info back through volume api Change-Id: I1b1626f40bebe8466ab410fb174683293c7c474f
Diffstat (limited to 'nova/tests')
-rw-r--r--nova/tests/api/ec2/test_cloud.py21
-rw-r--r--nova/tests/fake_flags.py4
-rw-r--r--nova/tests/integrated/test_volumes.py11
-rw-r--r--nova/tests/scheduler/test_scheduler.py5
-rw-r--r--nova/tests/test_compute.py301
-rw-r--r--nova/tests/test_libvirt.py136
-rw-r--r--nova/tests/test_virt_drivers.py8
-rw-r--r--nova/tests/test_volume.py82
-rw-r--r--nova/tests/test_xenapi.py24
9 files changed, 258 insertions, 334 deletions
diff --git a/nova/tests/api/ec2/test_cloud.py b/nova/tests/api/ec2/test_cloud.py
index 0f22df751..b00e26a75 100644
--- a/nova/tests/api/ec2/test_cloud.py
+++ b/nova/tests/api/ec2/test_cloud.py
@@ -1277,7 +1277,7 @@ class CloudTestCase(test.TestCase):
LOG.debug(info)
if predicate(info):
break
- greenthread.sleep(1)
+ greenthread.sleep(0.5)
def _wait_for_running(self, instance_id):
def is_running(info):
@@ -1296,6 +1296,16 @@ class CloudTestCase(test.TestCase):
def _wait_for_terminate(self, instance_id):
def is_deleted(info):
return info['deleted']
+ id = ec2utils.ec2_id_to_id(instance_id)
+ # NOTE(vish): Wait for InstanceNotFound, then verify that
+ # the instance is actually deleted.
+ while True:
+ try:
+ self.cloud.compute_api.get(self.context, instance_id=id)
+ except exception.InstanceNotFound:
+ break
+ greenthread.sleep(0.1)
+
elevated = self.context.elevated(read_deleted=True)
self._wait_for_state(elevated, instance_id, is_deleted)
@@ -1311,26 +1321,21 @@ class CloudTestCase(test.TestCase):
# a running instance can't be started. It is just ignored.
result = self.cloud.start_instances(self.context, [instance_id])
- greenthread.sleep(0.3)
self.assertTrue(result)
result = self.cloud.stop_instances(self.context, [instance_id])
- greenthread.sleep(0.3)
self.assertTrue(result)
self._wait_for_stopped(instance_id)
result = self.cloud.start_instances(self.context, [instance_id])
- greenthread.sleep(0.3)
self.assertTrue(result)
self._wait_for_running(instance_id)
result = self.cloud.stop_instances(self.context, [instance_id])
- greenthread.sleep(0.3)
self.assertTrue(result)
self._wait_for_stopped(instance_id)
result = self.cloud.terminate_instances(self.context, [instance_id])
- greenthread.sleep(0.3)
self.assertTrue(result)
self._restart_compute_service()
@@ -1542,24 +1547,20 @@ class CloudTestCase(test.TestCase):
self.assertTrue(vol2_id)
self.cloud.terminate_instances(self.context, [ec2_instance_id])
- greenthread.sleep(0.3)
self._wait_for_terminate(ec2_instance_id)
- greenthread.sleep(0.3)
admin_ctxt = context.get_admin_context(read_deleted=False)
vol = db.volume_get(admin_ctxt, vol1_id)
self._assert_volume_detached(vol)
self.assertFalse(vol['deleted'])
db.volume_destroy(self.context, vol1_id)
- greenthread.sleep(0.3)
admin_ctxt = context.get_admin_context(read_deleted=True)
vol = db.volume_get(admin_ctxt, vol2_id)
self.assertTrue(vol['deleted'])
for snapshot_id in (ec2_snapshot1_id, ec2_snapshot2_id):
self.cloud.delete_snapshot(self.context, snapshot_id)
- greenthread.sleep(0.3)
db.volume_destroy(self.context, vol['id'])
def test_create_image(self):
diff --git a/nova/tests/fake_flags.py b/nova/tests/fake_flags.py
index 073216495..6c4de8481 100644
--- a/nova/tests/fake_flags.py
+++ b/nova/tests/fake_flags.py
@@ -33,11 +33,7 @@ FLAGS['network_size'].SetDefault(8)
FLAGS['num_networks'].SetDefault(2)
FLAGS['fake_network'].SetDefault(True)
FLAGS['image_service'].SetDefault('nova.image.fake.FakeImageService')
-flags.DECLARE('num_shelves', 'nova.volume.driver')
-flags.DECLARE('blades_per_shelf', 'nova.volume.driver')
flags.DECLARE('iscsi_num_targets', 'nova.volume.driver')
-FLAGS['num_shelves'].SetDefault(2)
-FLAGS['blades_per_shelf'].SetDefault(4)
FLAGS['iscsi_num_targets'].SetDefault(8)
FLAGS['verbose'].SetDefault(True)
FLAGS['sqlite_db'].SetDefault("tests.sqlite")
diff --git a/nova/tests/integrated/test_volumes.py b/nova/tests/integrated/test_volumes.py
index 36c0592eb..7d803736f 100644
--- a/nova/tests/integrated/test_volumes.py
+++ b/nova/tests/integrated/test_volumes.py
@@ -263,22 +263,23 @@ class VolumesTest(integrated_helpers._IntegratedTestBase):
LOG.debug("Logs: %s" % driver.LoggingVolumeDriver.all_logs())
- # Discover_volume and undiscover_volume are called from compute
+ # prepare_attach and prepare_detach are called from compute
# on attach/detach
disco_moves = driver.LoggingVolumeDriver.logs_like(
- 'discover_volume',
+ 'initialize_connection',
id=volume_id)
- LOG.debug("discover_volume actions: %s" % disco_moves)
+ LOG.debug("initialize_connection actions: %s" % disco_moves)
self.assertEquals(1, len(disco_moves))
disco_move = disco_moves[0]
self.assertEquals(disco_move['id'], volume_id)
last_days_of_disco_moves = driver.LoggingVolumeDriver.logs_like(
- 'undiscover_volume',
+ 'terminate_connection',
id=volume_id)
- LOG.debug("undiscover_volume actions: %s" % last_days_of_disco_moves)
+ LOG.debug("terminate_connection actions: %s" %
+ last_days_of_disco_moves)
self.assertEquals(1, len(last_days_of_disco_moves))
undisco_move = last_days_of_disco_moves[0]
diff --git a/nova/tests/scheduler/test_scheduler.py b/nova/tests/scheduler/test_scheduler.py
index 77db3520b..6f708691b 100644
--- a/nova/tests/scheduler/test_scheduler.py
+++ b/nova/tests/scheduler/test_scheduler.py
@@ -1080,7 +1080,8 @@ class SimpleDriverTestCase(test.TestCase):
rpc.call(mox.IgnoreArg(), mox.IgnoreArg(),
{"method": 'compare_cpu',
"args": {'cpu_info': s_ref2['compute_node'][0]['cpu_info']}}).\
- AndRaise(rpc.RemoteError("doesn't have compatibility to", "", ""))
+ AndRaise(rpc.RemoteError(exception.InvalidCPUInfo,
+ exception.InvalidCPUInfo(reason='fake')))
self.mox.ReplayAll()
try:
@@ -1089,7 +1090,7 @@ class SimpleDriverTestCase(test.TestCase):
dest,
False)
except rpc.RemoteError, e:
- c = (e.message.find(_("doesn't have compatibility to")) >= 0)
+ c = (e.exc_type == exception.InvalidCPUInfo)
self.assertTrue(c)
db.instance_destroy(self.context, instance_id)
diff --git a/nova/tests/test_compute.py b/nova/tests/test_compute.py
index 786ddd0ee..6ac8ca7d4 100644
--- a/nova/tests/test_compute.py
+++ b/nova/tests/test_compute.py
@@ -21,6 +21,7 @@ Tests For Compute
"""
from copy import copy
+import mox
from nova import compute
from nova import context
@@ -159,21 +160,6 @@ class ComputeTestCase(test.TestCase):
'project_id': self.project_id}
return db.security_group_create(self.context, values)
- def _get_dummy_instance(self):
- """Get mock-return-value instance object
- Use this when any testcase executed later than test_run_terminate
- """
- vol1 = models.Volume()
- vol1['id'] = 1
- vol2 = models.Volume()
- vol2['id'] = 2
- instance_ref = models.Instance()
- instance_ref['id'] = 1
- instance_ref['volumes'] = [vol1, vol2]
- instance_ref['hostname'] = 'hostname-1'
- instance_ref['host'] = 'dummy'
- return instance_ref
-
def test_create_instance_defaults_display_name(self):
"""Verify that an instance cannot be created without a display_name."""
cases = [dict(), dict(display_name=None)]
@@ -726,235 +712,124 @@ class ComputeTestCase(test.TestCase):
def test_pre_live_migration_instance_has_no_fixed_ip(self):
"""Confirm raising exception if instance doesn't have fixed_ip."""
- instance_ref = self._get_dummy_instance()
+ # creating instance testdata
+ instance_id = self._create_instance({'host': 'dummy'})
c = context.get_admin_context()
- i_id = instance_ref['id']
+ inst_ref = db.instance_get(c, instance_id)
+ topic = db.queue_get_for(c, FLAGS.compute_topic, inst_ref['host'])
- dbmock = self.mox.CreateMock(db)
- dbmock.instance_get(c, i_id).AndReturn(instance_ref)
-
- self.compute.db = dbmock
- self.mox.ReplayAll()
- self.assertRaises(exception.NotFound,
+ # start test
+ self.assertRaises(exception.FixedIpNotFoundForInstance,
self.compute.pre_live_migration,
- c, instance_ref['id'], time=FakeTime())
+ c, inst_ref['id'], time=FakeTime())
+ # cleanup
+ db.instance_destroy(c, instance_id)
- def test_pre_live_migration_instance_has_volume(self):
+ def test_pre_live_migration_works_correctly(self):
"""Confirm setup_compute_volume is called when volume is mounted."""
- def fake_nw_info(*args, **kwargs):
- return [(0, {'ips':['dummy']})]
-
- i_ref = self._get_dummy_instance()
- c = context.get_admin_context()
-
- self._setup_other_managers()
- dbmock = self.mox.CreateMock(db)
- volmock = self.mox.CreateMock(self.volume_manager)
- drivermock = self.mox.CreateMock(self.compute_driver)
-
- dbmock.instance_get(c, i_ref['id']).AndReturn(i_ref)
- for i in range(len(i_ref['volumes'])):
- vid = i_ref['volumes'][i]['id']
- volmock.setup_compute_volume(c, vid).InAnyOrder('g1')
- drivermock.plug_vifs(i_ref, fake_nw_info())
- drivermock.ensure_filtering_rules_for_instance(i_ref, fake_nw_info())
-
- self.stubs.Set(self.compute, '_get_instance_nw_info', fake_nw_info)
- self.compute.db = dbmock
- self.compute.volume_manager = volmock
- self.compute.driver = drivermock
-
- self.mox.ReplayAll()
- ret = self.compute.pre_live_migration(c, i_ref['id'])
- self.assertEqual(ret, None)
-
- def test_pre_live_migration_instance_has_no_volume(self):
- """Confirm log meg when instance doesn't mount any volumes."""
- def fake_nw_info(*args, **kwargs):
- return [(0, {'ips':['dummy']})]
-
- i_ref = self._get_dummy_instance()
- i_ref['volumes'] = []
+ # creating instance testdata
+ instance_id = self._create_instance({'host': 'dummy'})
c = context.get_admin_context()
-
- self._setup_other_managers()
- dbmock = self.mox.CreateMock(db)
- drivermock = self.mox.CreateMock(self.compute_driver)
-
- dbmock.instance_get(c, i_ref['id']).AndReturn(i_ref)
- self.mox.StubOutWithMock(compute_manager.LOG, 'info')
- compute_manager.LOG.info(_("%s has no volume."), i_ref['hostname'])
- drivermock.plug_vifs(i_ref, fake_nw_info())
- drivermock.ensure_filtering_rules_for_instance(i_ref, fake_nw_info())
-
- self.stubs.Set(self.compute, '_get_instance_nw_info', fake_nw_info)
- self.compute.db = dbmock
- self.compute.driver = drivermock
-
+ inst_ref = db.instance_get(c, instance_id)
+ topic = db.queue_get_for(c, FLAGS.compute_topic, inst_ref['host'])
+
+ # creating mocks
+ self.mox.StubOutWithMock(self.compute.driver, 'pre_live_migration')
+ self.compute.driver.pre_live_migration({'block_device_mapping': []})
+ dummy_nw_info = [[None, {'ips':'1.1.1.1'}]]
+ self.mox.StubOutWithMock(self.compute, '_get_instance_nw_info')
+ self.compute._get_instance_nw_info(c, mox.IsA(inst_ref)
+ ).AndReturn(dummy_nw_info)
+ self.mox.StubOutWithMock(self.compute.driver, 'plug_vifs')
+ self.compute.driver.plug_vifs(mox.IsA(inst_ref), dummy_nw_info)
+ self.mox.StubOutWithMock(self.compute.driver,
+ 'ensure_filtering_rules_for_instance')
+ self.compute.driver.ensure_filtering_rules_for_instance(
+ mox.IsA(inst_ref), dummy_nw_info)
+
+ # start test
self.mox.ReplayAll()
- ret = self.compute.pre_live_migration(c, i_ref['id'], time=FakeTime())
+ ret = self.compute.pre_live_migration(c, inst_ref['id'])
self.assertEqual(ret, None)
- def test_pre_live_migration_setup_compute_node_fail(self):
- """Confirm operation setup_compute_network() fails.
-
- It retries and raise exception when timeout exceeded.
-
- """
- def fake_nw_info(*args, **kwargs):
- return [(0, {'ips':['dummy']})]
-
- i_ref = self._get_dummy_instance()
- c = context.get_admin_context()
-
- self._setup_other_managers()
- dbmock = self.mox.CreateMock(db)
- netmock = self.mox.CreateMock(self.network_manager)
- volmock = self.mox.CreateMock(self.volume_manager)
- drivermock = self.mox.CreateMock(self.compute_driver)
-
- dbmock.instance_get(c, i_ref['id']).AndReturn(i_ref)
- for i in range(len(i_ref['volumes'])):
- volmock.setup_compute_volume(c, i_ref['volumes'][i]['id'])
- for i in range(FLAGS.live_migration_retry_count):
- drivermock.plug_vifs(i_ref, fake_nw_info()).\
- AndRaise(exception.ProcessExecutionError())
-
- self.stubs.Set(self.compute, '_get_instance_nw_info', fake_nw_info)
- self.compute.db = dbmock
- self.compute.network_manager = netmock
- self.compute.volume_manager = volmock
- self.compute.driver = drivermock
-
- self.mox.ReplayAll()
- self.assertRaises(exception.ProcessExecutionError,
- self.compute.pre_live_migration,
- c, i_ref['id'], time=FakeTime())
-
- def test_live_migration_works_correctly_with_volume(self):
- """Confirm check_for_export to confirm volume health check."""
- i_ref = self._get_dummy_instance()
- c = context.get_admin_context()
- topic = db.queue_get_for(c, FLAGS.compute_topic, i_ref['host'])
-
- dbmock = self.mox.CreateMock(db)
- dbmock.instance_get(c, i_ref['id']).AndReturn(i_ref)
- self.mox.StubOutWithMock(rpc, 'call')
- rpc.call(c, FLAGS.volume_topic, {"method": "check_for_export",
- "args": {'instance_id': i_ref['id']}})
- dbmock.queue_get_for(c, FLAGS.compute_topic, i_ref['host']).\
- AndReturn(topic)
- rpc.call(c, topic, {"method": "pre_live_migration",
- "args": {'instance_id': i_ref['id'],
- 'block_migration': False,
- 'disk': None}})
-
- self.mox.StubOutWithMock(self.compute.driver, 'live_migration')
- self.compute.driver.live_migration(c, i_ref, i_ref['host'],
- self.compute.post_live_migration,
- self.compute.rollback_live_migration,
- False)
-
- self.compute.db = dbmock
- self.mox.ReplayAll()
- ret = self.compute.live_migration(c, i_ref['id'], i_ref['host'])
- self.assertEqual(ret, None)
+ # cleanup
+ db.instance_destroy(c, instance_id)
def test_live_migration_dest_raises_exception(self):
"""Confirm exception when pre_live_migration fails."""
- i_ref = self._get_dummy_instance()
+ # creating instance testdata
+ instance_id = self._create_instance({'host': 'dummy'})
c = context.get_admin_context()
- topic = db.queue_get_for(c, FLAGS.compute_topic, i_ref['host'])
-
- dbmock = self.mox.CreateMock(db)
- dbmock.instance_get(c, i_ref['id']).AndReturn(i_ref)
+ inst_ref = db.instance_get(c, instance_id)
+ topic = db.queue_get_for(c, FLAGS.compute_topic, inst_ref['host'])
+ # creating volume testdata
+ volume_id = 1
+ db.volume_create(c, {'id': volume_id})
+ values = {'instance_id': instance_id, 'device_name': '/dev/vdc',
+ 'delete_on_termination': False, 'volume_id': volume_id}
+ db.block_device_mapping_create(c, values)
+
+ # creating mocks
self.mox.StubOutWithMock(rpc, 'call')
rpc.call(c, FLAGS.volume_topic, {"method": "check_for_export",
- "args": {'instance_id': i_ref['id']}})
- dbmock.queue_get_for(c, FLAGS.compute_topic, i_ref['host']).\
- AndReturn(topic)
+ "args": {'instance_id': instance_id}})
rpc.call(c, topic, {"method": "pre_live_migration",
- "args": {'instance_id': i_ref['id'],
- 'block_migration': False,
+ "args": {'instance_id': instance_id,
+ 'block_migration': True,
'disk': None}}).\
- AndRaise(rpc.RemoteError('', '', ''))
- dbmock.instance_update(c, i_ref['id'], {'vm_state': vm_states.ACTIVE,
- 'task_state': None,
- 'host': i_ref['host']})
- for v in i_ref['volumes']:
- dbmock.volume_update(c, v['id'], {'status': 'in-use'})
- # mock for volume_api.remove_from_compute
- rpc.call(c, topic, {"method": "remove_volume",
- "args": {'volume_id': v['id']}})
-
- self.compute.db = dbmock
+ AndRaise(rpc.common.RemoteError('', '', ''))
+ # mocks for rollback
+ rpc.call(c, topic, {"method": "remove_volume_connection",
+ "args": {'instance_id': instance_id,
+ 'volume_id': volume_id}})
+ rpc.cast(c, topic, {"method": "rollback_live_migration_at_destination",
+ "args": {'instance_id': inst_ref['id']}})
+
+ # start test
self.mox.ReplayAll()
self.assertRaises(rpc.RemoteError,
self.compute.live_migration,
- c, i_ref['id'], i_ref['host'])
-
- def test_live_migration_dest_raises_exception_no_volume(self):
- """Same as above test(input pattern is different) """
- i_ref = self._get_dummy_instance()
- i_ref['volumes'] = []
- c = context.get_admin_context()
- topic = db.queue_get_for(c, FLAGS.compute_topic, i_ref['host'])
+ c, instance_id, inst_ref['host'], True)
- dbmock = self.mox.CreateMock(db)
- dbmock.instance_get(c, i_ref['id']).AndReturn(i_ref)
- dbmock.queue_get_for(c, FLAGS.compute_topic, i_ref['host']).\
- AndReturn(topic)
- self.mox.StubOutWithMock(rpc, 'call')
- rpc.call(c, topic, {"method": "pre_live_migration",
- "args": {'instance_id': i_ref['id'],
- 'block_migration': False,
- 'disk': None}}).\
- AndRaise(rpc.RemoteError('', '', ''))
- dbmock.instance_update(c, i_ref['id'], {'vm_state': vm_states.ACTIVE,
- 'task_state': None,
- 'host': i_ref['host']})
-
- self.compute.db = dbmock
- self.mox.ReplayAll()
- self.assertRaises(rpc.RemoteError,
- self.compute.live_migration,
- c, i_ref['id'], i_ref['host'])
+ # cleanup
+ for bdms in db.block_device_mapping_get_all_by_instance(c,
+ instance_id):
+ db.block_device_mapping_destroy(c, bdms['id'])
+ db.volume_destroy(c, volume_id)
+ db.instance_destroy(c, instance_id)
- def test_live_migration_works_correctly_no_volume(self):
+ def test_live_migration_works_correctly(self):
"""Confirm live_migration() works as expected correctly."""
- i_ref = self._get_dummy_instance()
- i_ref['volumes'] = []
+ # creating instance testdata
+ instance_id = self._create_instance({'host': 'dummy'})
c = context.get_admin_context()
- topic = db.queue_get_for(c, FLAGS.compute_topic, i_ref['host'])
+ inst_ref = db.instance_get(c, instance_id)
+ topic = db.queue_get_for(c, FLAGS.compute_topic, inst_ref['host'])
- dbmock = self.mox.CreateMock(db)
- dbmock.instance_get(c, i_ref['id']).AndReturn(i_ref)
+ # create
self.mox.StubOutWithMock(rpc, 'call')
- dbmock.queue_get_for(c, FLAGS.compute_topic, i_ref['host']).\
- AndReturn(topic)
rpc.call(c, topic, {"method": "pre_live_migration",
- "args": {'instance_id': i_ref['id'],
+ "args": {'instance_id': instance_id,
'block_migration': False,
'disk': None}})
- self.mox.StubOutWithMock(self.compute.driver, 'live_migration')
- self.compute.driver.live_migration(c, i_ref, i_ref['host'],
- self.compute.post_live_migration,
- self.compute.rollback_live_migration,
- False)
- self.compute.db = dbmock
+ # start test
self.mox.ReplayAll()
- ret = self.compute.live_migration(c, i_ref['id'], i_ref['host'])
+ ret = self.compute.live_migration(c, inst_ref['id'], inst_ref['host'])
self.assertEqual(ret, None)
+ # cleanup
+ db.instance_destroy(c, instance_id)
+
def test_post_live_migration_working_correctly(self):
"""Confirm post_live_migration() works as expected correctly."""
dest = 'desthost'
flo_addr = '1.2.1.2'
- # Preparing datas
+ # creating testdata
c = context.get_admin_context()
- instance_id = self._create_instance()
+ instance_id = self._create_instance({'state_description': 'migrating',
+ 'state': power_state.PAUSED})
i_ref = db.instance_get(c, instance_id)
db.instance_update(c, i_ref['id'], {'vm_state': vm_states.MIGRATING,
'power_state': power_state.PAUSED})
@@ -964,14 +839,8 @@ class ComputeTestCase(test.TestCase):
fix_ref = db.fixed_ip_get_by_address(c, fix_addr)
flo_ref = db.floating_ip_create(c, {'address': flo_addr,
'fixed_ip_id': fix_ref['id']})
- # reload is necessary before setting mocks
- i_ref = db.instance_get(c, instance_id)
- # Preparing mocks
- self.mox.StubOutWithMock(self.compute.volume_manager,
- 'remove_compute_volume')
- for v in i_ref['volumes']:
- self.compute.volume_manager.remove_compute_volume(c, v['id'])
+ # creating mocks
self.mox.StubOutWithMock(self.compute.driver, 'unfilter_instance')
self.compute.driver.unfilter_instance(i_ref, [])
self.mox.StubOutWithMock(rpc, 'call')
@@ -979,18 +848,18 @@ class ComputeTestCase(test.TestCase):
{"method": "post_live_migration_at_destination",
"args": {'instance_id': i_ref['id'], 'block_migration': False}})
- # executing
+ # start test
self.mox.ReplayAll()
ret = self.compute.post_live_migration(c, i_ref, dest)
- # make sure every data is rewritten to dest
+ # make sure every data is rewritten to destinatioin hostname.
i_ref = db.instance_get(c, i_ref['id'])
c1 = (i_ref['host'] == dest)
flo_refs = db.floating_ip_get_all_by_host(c, dest)
c2 = (len(flo_refs) != 0 and flo_refs[0]['address'] == flo_addr)
-
- # post operaton
self.assertTrue(c1 and c2)
+
+ # cleanup
db.instance_destroy(c, instance_id)
db.volume_destroy(c, v_ref['id'])
db.floating_ip_destroy(c, flo_addr)
diff --git a/nova/tests/test_libvirt.py b/nova/tests/test_libvirt.py
index 8b6f2080d..d59fb8f56 100644
--- a/nova/tests/test_libvirt.py
+++ b/nova/tests/test_libvirt.py
@@ -30,6 +30,7 @@ from nova import context
from nova import db
from nova import exception
from nova import flags
+from nova import log as logging
from nova import test
from nova import utils
from nova.api.ec2 import cloud
@@ -38,10 +39,13 @@ from nova.compute import vm_states
from nova.virt import driver
from nova.virt.libvirt import connection
from nova.virt.libvirt import firewall
+from nova.virt.libvirt import volume
+from nova.volume import driver as volume_driver
from nova.tests import fake_network
libvirt = None
FLAGS = flags.FLAGS
+LOG = logging.getLogger('nova.tests.test_libvirt')
_fake_network_info = fake_network.fake_get_instance_nw_info
_ipv4_like = fake_network.ipv4_like
@@ -87,6 +91,71 @@ class FakeVirtDomain(object):
return self._fake_dom_xml
+class LibvirtVolumeTestCase(test.TestCase):
+
+ @staticmethod
+ def fake_execute(*cmd, **kwargs):
+ LOG.debug("FAKE EXECUTE: %s" % ' '.join(cmd))
+ return None, None
+
+ def setUp(self):
+ super(LibvirtVolumeTestCase, self).setUp()
+ self.stubs.Set(utils, 'execute', self.fake_execute)
+
+ def test_libvirt_iscsi_driver(self):
+ # NOTE(vish) exists is to make driver assume connecting worked
+ self.stubs.Set(os.path, 'exists', lambda x: True)
+ vol_driver = volume_driver.ISCSIDriver()
+ libvirt_driver = volume.LibvirtISCSIVolumeDriver('fake')
+ name = 'volume-00000001'
+ vol = {'id': 1,
+ 'name': name,
+ 'provider_auth': None,
+ 'provider_location': '10.0.2.15:3260,fake '
+ 'iqn.2010-10.org.openstack:volume-00000001'}
+ address = '127.0.0.1'
+ connection_info = vol_driver.initialize_connection(vol, address)
+ mount_device = "vde"
+ xml = libvirt_driver.connect_volume(connection_info, mount_device)
+ tree = xml_to_tree(xml)
+ dev_str = '/dev/disk/by-path/ip-10.0.2.15:3260-iscsi-iqn.' \
+ '2010-10.org.openstack:%s-lun-0' % name
+ self.assertEqual(tree.get('type'), 'block')
+ self.assertEqual(tree.find('./source').get('dev'), dev_str)
+ libvirt_driver.disconnect_volume(connection_info, mount_device)
+
+ def test_libvirt_sheepdog_driver(self):
+ vol_driver = volume_driver.SheepdogDriver()
+ libvirt_driver = volume.LibvirtNetVolumeDriver('fake')
+ name = 'volume-00000001'
+ vol = {'id': 1, 'name': name}
+ address = '127.0.0.1'
+ connection_info = vol_driver.initialize_connection(vol, address)
+ mount_device = "vde"
+ xml = libvirt_driver.connect_volume(connection_info, mount_device)
+ tree = xml_to_tree(xml)
+ self.assertEqual(tree.get('type'), 'network')
+ self.assertEqual(tree.find('./source').get('protocol'), 'sheepdog')
+ self.assertEqual(tree.find('./source').get('name'), name)
+ libvirt_driver.disconnect_volume(connection_info, mount_device)
+
+ def test_libvirt_rbd_driver(self):
+ vol_driver = volume_driver.RBDDriver()
+ libvirt_driver = volume.LibvirtNetVolumeDriver('fake')
+ name = 'volume-00000001'
+ vol = {'id': 1, 'name': name}
+ address = '127.0.0.1'
+ connection_info = vol_driver.initialize_connection(vol, address)
+ mount_device = "vde"
+ xml = libvirt_driver.connect_volume(connection_info, mount_device)
+ tree = xml_to_tree(xml)
+ self.assertEqual(tree.get('type'), 'network')
+ self.assertEqual(tree.find('./source').get('protocol'), 'rbd')
+ rbd_name = '%s/%s' % (FLAGS.rbd_pool, name)
+ self.assertEqual(tree.find('./source').get('name'), rbd_name)
+ libvirt_driver.disconnect_volume(connection_info, mount_device)
+
+
class CacheConcurrencyTestCase(test.TestCase):
def setUp(self):
super(CacheConcurrencyTestCase, self).setUp()
@@ -145,6 +214,20 @@ class CacheConcurrencyTestCase(test.TestCase):
eventlet.sleep(0)
+class FakeVolumeDriver(object):
+ def __init__(self, *args, **kwargs):
+ pass
+
+ def attach_volume(self, *args):
+ pass
+
+ def detach_volume(self, *args):
+ pass
+
+ def get_xml(self, *args):
+ return ""
+
+
class LibvirtConnTestCase(test.TestCase):
def setUp(self):
@@ -192,14 +275,14 @@ class LibvirtConnTestCase(test.TestCase):
return FakeVirtDomain()
# Creating mocks
+ volume_driver = 'iscsi=nova.tests.test_libvirt.FakeVolumeDriver'
+ self.flags(libvirt_volume_drivers=[volume_driver])
fake = FakeLibvirtConnection()
# Customizing above fake if necessary
for key, val in kwargs.items():
fake.__setattr__(key, val)
self.flags(image_service='nova.image.fake.FakeImageService')
- fw_driver = "nova.tests.fake_network.FakeIptablesFirewallDriver"
- self.flags(firewall_driver=fw_driver)
self.flags(libvirt_vif_driver="nova.tests.fake_network.FakeVIFDriver")
self.mox.StubOutWithMock(connection.LibvirtConnection, '_conn')
@@ -382,14 +465,16 @@ class LibvirtConnTestCase(test.TestCase):
self.assertEquals(snapshot['status'], 'active')
self.assertEquals(snapshot['name'], snapshot_name)
- def test_attach_invalid_device(self):
+ def test_attach_invalid_volume_type(self):
self.create_fake_libvirt_mock()
connection.LibvirtConnection._conn.lookupByName = self.fake_lookup
self.mox.ReplayAll()
conn = connection.LibvirtConnection(False)
- self.assertRaises(exception.InvalidDevicePath,
+ self.assertRaises(exception.VolumeDriverNotFound,
conn.attach_volume,
- "fake", "bad/device/path", "/dev/fake")
+ {"driver_volume_type": "badtype"},
+ "fake",
+ "/dev/fake")
def test_multi_nic(self):
instance_data = dict(self.test_instance)
@@ -640,9 +725,15 @@ class LibvirtConnTestCase(test.TestCase):
self.mox.ReplayAll()
try:
conn = connection.LibvirtConnection(False)
- conn.firewall_driver.setattr('setup_basic_filtering', fake_none)
- conn.firewall_driver.setattr('prepare_instance_filter', fake_none)
- conn.firewall_driver.setattr('instance_filter_exists', fake_none)
+ self.stubs.Set(conn.firewall_driver,
+ 'setup_basic_filtering',
+ fake_none)
+ self.stubs.Set(conn.firewall_driver,
+ 'prepare_instance_filter',
+ fake_none)
+ self.stubs.Set(conn.firewall_driver,
+ 'instance_filter_exists',
+ fake_none)
conn.ensure_filtering_rules_for_instance(instance_ref,
network_info,
time=fake_timer)
@@ -708,6 +799,27 @@ class LibvirtConnTestCase(test.TestCase):
db.volume_destroy(self.context, volume_ref['id'])
db.instance_destroy(self.context, instance_ref['id'])
+ def test_pre_live_migration_works_correctly(self):
+ """Confirms pre_block_migration works correctly."""
+ # Creating testdata
+ vol = {'block_device_mapping': [
+ {'connection_info': 'dummy', 'mount_device': '/dev/sda'},
+ {'connection_info': 'dummy', 'mount_device': '/dev/sdb'}]}
+ conn = connection.LibvirtConnection(False)
+
+ # Creating mocks
+ self.mox.StubOutWithMock(driver, "block_device_info_get_mapping")
+ driver.block_device_info_get_mapping(vol
+ ).AndReturn(vol['block_device_mapping'])
+ self.mox.StubOutWithMock(conn, "volume_driver_method")
+ for v in vol['block_device_mapping']:
+ conn.volume_driver_method('connect_volume',
+ v['connection_info'], v['mount_device'])
+
+ # Starting test
+ self.mox.ReplayAll()
+ self.assertEqual(conn.pre_live_migration(vol), None)
+
def test_pre_block_migration_works_correctly(self):
"""Confirms pre_block_migration works correctly."""
@@ -822,8 +934,12 @@ class LibvirtConnTestCase(test.TestCase):
# Start test
self.mox.ReplayAll()
conn = connection.LibvirtConnection(False)
- conn.firewall_driver.setattr('setup_basic_filtering', fake_none)
- conn.firewall_driver.setattr('prepare_instance_filter', fake_none)
+ self.stubs.Set(conn.firewall_driver,
+ 'setup_basic_filtering',
+ fake_none)
+ self.stubs.Set(conn.firewall_driver,
+ 'prepare_instance_filter',
+ fake_none)
try:
conn.spawn(self.context, instance, network_info)
diff --git a/nova/tests/test_virt_drivers.py b/nova/tests/test_virt_drivers.py
index d4180b6f7..fed89a2ec 100644
--- a/nova/tests/test_virt_drivers.py
+++ b/nova/tests/test_virt_drivers.py
@@ -254,9 +254,11 @@ class _VirtDriverTestCase(test.TestCase):
network_info = test_utils.get_test_network_info()
instance_ref = test_utils.get_test_instance()
self.connection.spawn(self.ctxt, instance_ref, network_info)
- self.connection.attach_volume(instance_ref['name'],
- '/dev/null', '/mnt/nova/something')
- self.connection.detach_volume(instance_ref['name'],
+ self.connection.attach_volume({'driver_volume_type': 'fake'},
+ instance_ref['name'],
+ '/mnt/nova/something')
+ self.connection.detach_volume({'driver_volume_type': 'fake'},
+ instance_ref['name'],
'/mnt/nova/something')
@catch_notimplementederror
diff --git a/nova/tests/test_volume.py b/nova/tests/test_volume.py
index 7888b6b0b..88a73f550 100644
--- a/nova/tests/test_volume.py
+++ b/nova/tests/test_volume.py
@@ -257,7 +257,7 @@ class VolumeTestCase(test.TestCase):
class DriverTestCase(test.TestCase):
"""Base Test class for Drivers."""
- driver_name = "nova.volume.driver.FakeAOEDriver"
+ driver_name = "nova.volume.driver.FakeBaseDriver"
def setUp(self):
super(DriverTestCase, self).setUp()
@@ -295,83 +295,6 @@ class DriverTestCase(test.TestCase):
self.volume.delete_volume(self.context, volume_id)
-class AOETestCase(DriverTestCase):
- """Test Case for AOEDriver"""
- driver_name = "nova.volume.driver.AOEDriver"
-
- def setUp(self):
- super(AOETestCase, self).setUp()
-
- def tearDown(self):
- super(AOETestCase, self).tearDown()
-
- def _attach_volume(self):
- """Attach volumes to an instance. This function also sets
- a fake log message."""
- volume_id_list = []
- for index in xrange(3):
- vol = {}
- vol['size'] = 0
- volume_id = db.volume_create(self.context,
- vol)['id']
- self.volume.create_volume(self.context, volume_id)
-
- # each volume has a different mountpoint
- mountpoint = "/dev/sd" + chr((ord('b') + index))
- db.volume_attached(self.context, volume_id, self.instance_id,
- mountpoint)
-
- (shelf_id, blade_id) = db.volume_get_shelf_and_blade(self.context,
- volume_id)
- self.output += "%s %s eth0 /dev/nova-volumes/vol-foo auto run\n" \
- % (shelf_id, blade_id)
-
- volume_id_list.append(volume_id)
-
- return volume_id_list
-
- def test_check_for_export_with_no_volume(self):
- """No log message when no volume is attached to an instance."""
- self.stream.truncate(0)
- self.volume.check_for_export(self.context, self.instance_id)
- self.assertEqual(self.stream.getvalue(), '')
-
- def test_check_for_export_with_all_vblade_processes(self):
- """No log message when all the vblade processes are running."""
- volume_id_list = self._attach_volume()
-
- self.stream.truncate(0)
- self.volume.check_for_export(self.context, self.instance_id)
- self.assertEqual(self.stream.getvalue(), '')
-
- self._detach_volume(volume_id_list)
-
- def test_check_for_export_with_vblade_process_missing(self):
- """Output a warning message when some vblade processes aren't
- running."""
- volume_id_list = self._attach_volume()
-
- # the first vblade process isn't running
- self.output = self.output.replace("run", "down", 1)
- (shelf_id, blade_id) = db.volume_get_shelf_and_blade(self.context,
- volume_id_list[0])
-
- msg_is_match = False
- self.stream.truncate(0)
- try:
- self.volume.check_for_export(self.context, self.instance_id)
- except exception.ProcessExecutionError, e:
- volume_id = volume_id_list[0]
- msg = _("Cannot confirm exported volume id:%(volume_id)s. "
- "vblade process for e%(shelf_id)s.%(blade_id)s "
- "isn't running.") % locals()
-
- msg_is_match = (0 <= e.message.find(msg))
-
- self.assertTrue(msg_is_match)
- self._detach_volume(volume_id_list)
-
-
class ISCSITestCase(DriverTestCase):
"""Test Case for ISCSIDriver"""
driver_name = "nova.volume.driver.ISCSIDriver"
@@ -408,7 +331,7 @@ class ISCSITestCase(DriverTestCase):
self.assertEqual(self.stream.getvalue(), '')
def test_check_for_export_with_all_volume_exported(self):
- """No log message when all the vblade processes are running."""
+ """No log message when all the processes are running."""
volume_id_list = self._attach_volume()
self.mox.StubOutWithMock(self.volume.driver, '_execute')
@@ -431,7 +354,6 @@ class ISCSITestCase(DriverTestCase):
by ietd."""
volume_id_list = self._attach_volume()
- # the first vblade process isn't running
tid = db.volume_get_iscsi_target_num(self.context, volume_id_list[0])
self.mox.StubOutWithMock(self.volume.driver, '_execute')
self.volume.driver._execute("ietadm", "--op", "show",
diff --git a/nova/tests/test_xenapi.py b/nova/tests/test_xenapi.py
index 225d51aba..47488ddda 100644
--- a/nova/tests/test_xenapi.py
+++ b/nova/tests/test_xenapi.py
@@ -99,6 +99,20 @@ class XenAPIVolumeTestCase(test.TestCase):
vol['attach_status'] = "detached"
return db.volume_create(self.context, vol)
+ @staticmethod
+ def _make_info():
+ return {
+ 'driver_volume_type': 'iscsi',
+ 'data': {
+ 'volume_id': 1,
+ 'target_iqn': 'iqn.2010-10.org.openstack:volume-00000001',
+ 'target_portal': '127.0.0.1:3260,fake',
+ 'auth_method': 'CHAP',
+ 'auth_method': 'fake',
+ 'auth_method': 'fake',
+ }
+ }
+
def test_create_iscsi_storage(self):
"""This shows how to test helper classes' methods."""
stubs.stubout_session(self.stubs, stubs.FakeSessionForVolumeTests)
@@ -106,7 +120,7 @@ class XenAPIVolumeTestCase(test.TestCase):
helper = volume_utils.VolumeHelper
helper.XenAPI = session.get_imported_xenapi()
vol = self._create_volume()
- info = helper.parse_volume_info(vol['id'], '/dev/sdc')
+ info = helper.parse_volume_info(self._make_info(), '/dev/sdc')
label = 'SR-%s' % vol['id']
description = 'Test-SR'
sr_ref = helper.create_iscsi_storage(session, info, label, description)
@@ -124,8 +138,9 @@ class XenAPIVolumeTestCase(test.TestCase):
# oops, wrong mount point!
self.assertRaises(volume_utils.StorageError,
helper.parse_volume_info,
- vol['id'],
- '/dev/sd')
+ self._make_info(),
+ 'dev/sd'
+ )
db.volume_destroy(context.get_admin_context(), vol['id'])
def test_attach_volume(self):
@@ -135,7 +150,8 @@ class XenAPIVolumeTestCase(test.TestCase):
volume = self._create_volume()
instance = db.instance_create(self.context, self.instance_values)
vm = xenapi_fake.create_vm(instance.name, 'Running')
- result = conn.attach_volume(instance.name, volume['id'], '/dev/sdc')
+ result = conn.attach_volume(self._make_info(),
+ instance.name, '/dev/sdc')
def check():
# check that the VM has a VBD attached to it