summaryrefslogtreecommitdiffstats
path: root/nova/tests
diff options
context:
space:
mode:
authorSalvatore Orlando <salvatore.orlando@eu.citrix.com>2011-03-14 21:23:27 +0000
committerSalvatore Orlando <salvatore.orlando@eu.citrix.com>2011-03-14 21:23:27 +0000
commit915fb5049258cf74a2fb3ae27789987f0757bc91 (patch)
treed222ced284ca177445a3e0908f7855fff30f8a65 /nova/tests
parent3a4f5de30362bb32846db69b98cda16eae88ee99 (diff)
parent8c2a4a565e718e594a2f42ff84eb4b9017ef15a7 (diff)
merge trunk
Diffstat (limited to 'nova/tests')
-rw-r--r--nova/tests/test_compute.py294
-rw-r--r--nova/tests/test_scheduler.py623
-rw-r--r--nova/tests/test_service.py41
-rw-r--r--nova/tests/test_virt.py228
-rw-r--r--nova/tests/test_volume.py195
5 files changed, 1376 insertions, 5 deletions
diff --git a/nova/tests/test_compute.py b/nova/tests/test_compute.py
index 643b2e93a..e486050be 100644
--- a/nova/tests/test_compute.py
+++ b/nova/tests/test_compute.py
@@ -20,6 +20,7 @@ Tests For Compute
"""
import datetime
+import mox
from nova import compute
from nova import context
@@ -27,15 +28,20 @@ from nova import db
from nova import exception
from nova import flags
from nova import log as logging
+from nova import rpc
from nova import test
from nova import utils
from nova.auth import manager
from nova.compute import instance_types
+from nova.compute import manager as compute_manager
+from nova.compute import power_state
+from nova.db.sqlalchemy import models
from nova.image import local
LOG = logging.getLogger('nova.tests.compute')
FLAGS = flags.FLAGS
flags.DECLARE('stub_network', 'nova.compute.manager')
+flags.DECLARE('live_migration_retry_count', 'nova.compute.manager')
class ComputeTestCase(test.TestCase):
@@ -83,6 +89,41 @@ class ComputeTestCase(test.TestCase):
'project_id': self.project.id}
return db.security_group_create(self.context, values)
+ def _get_dummy_instance(self):
+ """Get mock-return-value instance object
+ Use this when any testcase executed later than test_run_terminate
+ """
+ vol1 = models.Volume()
+ vol1['id'] = 1
+ vol2 = models.Volume()
+ vol2['id'] = 2
+ instance_ref = models.Instance()
+ instance_ref['id'] = 1
+ instance_ref['volumes'] = [vol1, vol2]
+ instance_ref['hostname'] = 'i-00000001'
+ instance_ref['host'] = 'dummy'
+ return instance_ref
+
+ def test_create_instance_defaults_display_name(self):
+ """Verify that an instance cannot be created without a display_name."""
+ cases = [dict(), dict(display_name=None)]
+ for instance in cases:
+ ref = self.compute_api.create(self.context,
+ FLAGS.default_instance_type, None, **instance)
+ try:
+ self.assertNotEqual(ref[0]['display_name'], None)
+ finally:
+ db.instance_destroy(self.context, ref[0]['id'])
+
+ def test_create_instance_associates_security_groups(self):
+ """Make sure create associates security groups"""
+ group = self._create_group()
+ instance_ref = models.Instance()
+ instance_ref['id'] = 1
+ instance_ref['volumes'] = [{'id': 1}, {'id': 2}]
+ instance_ref['hostname'] = 'i-00000001'
+ return instance_ref
+
def test_create_instance_defaults_display_name(self):
"""Verify that an instance cannot be created without a display_name."""
cases = [dict(), dict(display_name=None)]
@@ -301,3 +342,256 @@ class ComputeTestCase(test.TestCase):
self.compute.terminate_instance(self.context, instance_id)
type = instance_types.get_by_flavor_id("1")
self.assertEqual(type, 'm1.tiny')
+
+ def _setup_other_managers(self):
+ self.volume_manager = utils.import_object(FLAGS.volume_manager)
+ self.network_manager = utils.import_object(FLAGS.network_manager)
+ self.compute_driver = utils.import_object(FLAGS.compute_driver)
+
+ def test_pre_live_migration_instance_has_no_fixed_ip(self):
+ """Confirm raising exception if instance doesn't have fixed_ip."""
+ instance_ref = self._get_dummy_instance()
+ c = context.get_admin_context()
+ i_id = instance_ref['id']
+
+ dbmock = self.mox.CreateMock(db)
+ dbmock.instance_get(c, i_id).AndReturn(instance_ref)
+ dbmock.instance_get_fixed_address(c, i_id).AndReturn(None)
+
+ self.compute.db = dbmock
+ self.mox.ReplayAll()
+ self.assertRaises(exception.NotFound,
+ self.compute.pre_live_migration,
+ c, instance_ref['id'])
+
+ def test_pre_live_migration_instance_has_volume(self):
+ """Confirm setup_compute_volume is called when volume is mounted."""
+ i_ref = self._get_dummy_instance()
+ c = context.get_admin_context()
+
+ self._setup_other_managers()
+ dbmock = self.mox.CreateMock(db)
+ volmock = self.mox.CreateMock(self.volume_manager)
+ netmock = self.mox.CreateMock(self.network_manager)
+ drivermock = self.mox.CreateMock(self.compute_driver)
+
+ dbmock.instance_get(c, i_ref['id']).AndReturn(i_ref)
+ dbmock.instance_get_fixed_address(c, i_ref['id']).AndReturn('dummy')
+ for i in range(len(i_ref['volumes'])):
+ vid = i_ref['volumes'][i]['id']
+ volmock.setup_compute_volume(c, vid).InAnyOrder('g1')
+ netmock.setup_compute_network(c, i_ref['id'])
+ drivermock.ensure_filtering_rules_for_instance(i_ref)
+
+ self.compute.db = dbmock
+ self.compute.volume_manager = volmock
+ self.compute.network_manager = netmock
+ self.compute.driver = drivermock
+
+ self.mox.ReplayAll()
+ ret = self.compute.pre_live_migration(c, i_ref['id'])
+ self.assertEqual(ret, None)
+
+ def test_pre_live_migration_instance_has_no_volume(self):
+ """Confirm log meg when instance doesn't mount any volumes."""
+ i_ref = self._get_dummy_instance()
+ i_ref['volumes'] = []
+ c = context.get_admin_context()
+
+ self._setup_other_managers()
+ dbmock = self.mox.CreateMock(db)
+ netmock = self.mox.CreateMock(self.network_manager)
+ drivermock = self.mox.CreateMock(self.compute_driver)
+
+ dbmock.instance_get(c, i_ref['id']).AndReturn(i_ref)
+ dbmock.instance_get_fixed_address(c, i_ref['id']).AndReturn('dummy')
+ self.mox.StubOutWithMock(compute_manager.LOG, 'info')
+ compute_manager.LOG.info(_("%s has no volume."), i_ref['hostname'])
+ netmock.setup_compute_network(c, i_ref['id'])
+ drivermock.ensure_filtering_rules_for_instance(i_ref)
+
+ self.compute.db = dbmock
+ self.compute.network_manager = netmock
+ self.compute.driver = drivermock
+
+ self.mox.ReplayAll()
+ ret = self.compute.pre_live_migration(c, i_ref['id'])
+ self.assertEqual(ret, None)
+
+ def test_pre_live_migration_setup_compute_node_fail(self):
+ """Confirm operation setup_compute_network() fails.
+
+ It retries and raise exception when timeout exceeded.
+
+ """
+
+ i_ref = self._get_dummy_instance()
+ c = context.get_admin_context()
+
+ self._setup_other_managers()
+ dbmock = self.mox.CreateMock(db)
+ netmock = self.mox.CreateMock(self.network_manager)
+ volmock = self.mox.CreateMock(self.volume_manager)
+
+ dbmock.instance_get(c, i_ref['id']).AndReturn(i_ref)
+ dbmock.instance_get_fixed_address(c, i_ref['id']).AndReturn('dummy')
+ for i in range(len(i_ref['volumes'])):
+ volmock.setup_compute_volume(c, i_ref['volumes'][i]['id'])
+ for i in range(FLAGS.live_migration_retry_count):
+ netmock.setup_compute_network(c, i_ref['id']).\
+ AndRaise(exception.ProcessExecutionError())
+
+ self.compute.db = dbmock
+ self.compute.network_manager = netmock
+ self.compute.volume_manager = volmock
+
+ self.mox.ReplayAll()
+ self.assertRaises(exception.ProcessExecutionError,
+ self.compute.pre_live_migration,
+ c, i_ref['id'])
+
+ def test_live_migration_works_correctly_with_volume(self):
+ """Confirm check_for_export to confirm volume health check."""
+ i_ref = self._get_dummy_instance()
+ c = context.get_admin_context()
+ topic = db.queue_get_for(c, FLAGS.compute_topic, i_ref['host'])
+
+ dbmock = self.mox.CreateMock(db)
+ dbmock.instance_get(c, i_ref['id']).AndReturn(i_ref)
+ self.mox.StubOutWithMock(rpc, 'call')
+ rpc.call(c, FLAGS.volume_topic, {"method": "check_for_export",
+ "args": {'instance_id': i_ref['id']}})
+ dbmock.queue_get_for(c, FLAGS.compute_topic, i_ref['host']).\
+ AndReturn(topic)
+ rpc.call(c, topic, {"method": "pre_live_migration",
+ "args": {'instance_id': i_ref['id']}})
+ self.mox.StubOutWithMock(self.compute.driver, 'live_migration')
+ self.compute.driver.live_migration(c, i_ref, i_ref['host'],
+ self.compute.post_live_migration,
+ self.compute.recover_live_migration)
+
+ self.compute.db = dbmock
+ self.mox.ReplayAll()
+ ret = self.compute.live_migration(c, i_ref['id'], i_ref['host'])
+ self.assertEqual(ret, None)
+
+ def test_live_migration_dest_raises_exception(self):
+ """Confirm exception when pre_live_migration fails."""
+ i_ref = self._get_dummy_instance()
+ c = context.get_admin_context()
+ topic = db.queue_get_for(c, FLAGS.compute_topic, i_ref['host'])
+
+ dbmock = self.mox.CreateMock(db)
+ dbmock.instance_get(c, i_ref['id']).AndReturn(i_ref)
+ self.mox.StubOutWithMock(rpc, 'call')
+ rpc.call(c, FLAGS.volume_topic, {"method": "check_for_export",
+ "args": {'instance_id': i_ref['id']}})
+ dbmock.queue_get_for(c, FLAGS.compute_topic, i_ref['host']).\
+ AndReturn(topic)
+ rpc.call(c, topic, {"method": "pre_live_migration",
+ "args": {'instance_id': i_ref['id']}}).\
+ AndRaise(rpc.RemoteError('', '', ''))
+ dbmock.instance_update(c, i_ref['id'], {'state_description': 'running',
+ 'state': power_state.RUNNING,
+ 'host': i_ref['host']})
+ for v in i_ref['volumes']:
+ dbmock.volume_update(c, v['id'], {'status': 'in-use'})
+
+ self.compute.db = dbmock
+ self.mox.ReplayAll()
+ self.assertRaises(rpc.RemoteError,
+ self.compute.live_migration,
+ c, i_ref['id'], i_ref['host'])
+
+ def test_live_migration_dest_raises_exception_no_volume(self):
+ """Same as above test(input pattern is different) """
+ i_ref = self._get_dummy_instance()
+ i_ref['volumes'] = []
+ c = context.get_admin_context()
+ topic = db.queue_get_for(c, FLAGS.compute_topic, i_ref['host'])
+
+ dbmock = self.mox.CreateMock(db)
+ dbmock.instance_get(c, i_ref['id']).AndReturn(i_ref)
+ dbmock.queue_get_for(c, FLAGS.compute_topic, i_ref['host']).\
+ AndReturn(topic)
+ self.mox.StubOutWithMock(rpc, 'call')
+ rpc.call(c, topic, {"method": "pre_live_migration",
+ "args": {'instance_id': i_ref['id']}}).\
+ AndRaise(rpc.RemoteError('', '', ''))
+ dbmock.instance_update(c, i_ref['id'], {'state_description': 'running',
+ 'state': power_state.RUNNING,
+ 'host': i_ref['host']})
+
+ self.compute.db = dbmock
+ self.mox.ReplayAll()
+ self.assertRaises(rpc.RemoteError,
+ self.compute.live_migration,
+ c, i_ref['id'], i_ref['host'])
+
+ def test_live_migration_works_correctly_no_volume(self):
+ """Confirm live_migration() works as expected correctly."""
+ i_ref = self._get_dummy_instance()
+ i_ref['volumes'] = []
+ c = context.get_admin_context()
+ topic = db.queue_get_for(c, FLAGS.compute_topic, i_ref['host'])
+
+ dbmock = self.mox.CreateMock(db)
+ dbmock.instance_get(c, i_ref['id']).AndReturn(i_ref)
+ self.mox.StubOutWithMock(rpc, 'call')
+ dbmock.queue_get_for(c, FLAGS.compute_topic, i_ref['host']).\
+ AndReturn(topic)
+ rpc.call(c, topic, {"method": "pre_live_migration",
+ "args": {'instance_id': i_ref['id']}})
+ self.mox.StubOutWithMock(self.compute.driver, 'live_migration')
+ self.compute.driver.live_migration(c, i_ref, i_ref['host'],
+ self.compute.post_live_migration,
+ self.compute.recover_live_migration)
+
+ self.compute.db = dbmock
+ self.mox.ReplayAll()
+ ret = self.compute.live_migration(c, i_ref['id'], i_ref['host'])
+ self.assertEqual(ret, None)
+
+ def test_post_live_migration_working_correctly(self):
+ """Confirm post_live_migration() works as expected correctly."""
+ dest = 'desthost'
+ flo_addr = '1.2.1.2'
+
+ # Preparing datas
+ c = context.get_admin_context()
+ instance_id = self._create_instance()
+ i_ref = db.instance_get(c, instance_id)
+ db.instance_update(c, i_ref['id'], {'state_description': 'migrating',
+ 'state': power_state.PAUSED})
+ v_ref = db.volume_create(c, {'size': 1, 'instance_id': instance_id})
+ fix_addr = db.fixed_ip_create(c, {'address': '1.1.1.1',
+ 'instance_id': instance_id})
+ fix_ref = db.fixed_ip_get_by_address(c, fix_addr)
+ flo_ref = db.floating_ip_create(c, {'address': flo_addr,
+ 'fixed_ip_id': fix_ref['id']})
+ # reload is necessary before setting mocks
+ i_ref = db.instance_get(c, instance_id)
+
+ # Preparing mocks
+ self.mox.StubOutWithMock(self.compute.volume_manager,
+ 'remove_compute_volume')
+ for v in i_ref['volumes']:
+ self.compute.volume_manager.remove_compute_volume(c, v['id'])
+ self.mox.StubOutWithMock(self.compute.driver, 'unfilter_instance')
+ self.compute.driver.unfilter_instance(i_ref)
+
+ # executing
+ self.mox.ReplayAll()
+ ret = self.compute.post_live_migration(c, i_ref, dest)
+
+ # make sure every data is rewritten to dest
+ i_ref = db.instance_get(c, i_ref['id'])
+ c1 = (i_ref['host'] == dest)
+ flo_refs = db.floating_ip_get_all_by_host(c, dest)
+ c2 = (len(flo_refs) != 0 and flo_refs[0]['address'] == flo_addr)
+
+ # post operaton
+ self.assertTrue(c1 and c2)
+ db.instance_destroy(c, instance_id)
+ db.volume_destroy(c, v_ref['id'])
+ db.floating_ip_destroy(c, flo_addr)
diff --git a/nova/tests/test_scheduler.py b/nova/tests/test_scheduler.py
index bb279ac4b..244e43bd9 100644
--- a/nova/tests/test_scheduler.py
+++ b/nova/tests/test_scheduler.py
@@ -20,10 +20,12 @@ Tests For Scheduler
"""
import datetime
+import mox
from mox import IgnoreArg
from nova import context
from nova import db
+from nova import exception
from nova import flags
from nova import service
from nova import test
@@ -32,11 +34,14 @@ from nova import utils
from nova.auth import manager as auth_manager
from nova.scheduler import manager
from nova.scheduler import driver
+from nova.compute import power_state
+from nova.db.sqlalchemy import models
FLAGS = flags.FLAGS
flags.DECLARE('max_cores', 'nova.scheduler.simple')
flags.DECLARE('stub_network', 'nova.compute.manager')
+flags.DECLARE('instances_path', 'nova.compute.manager')
class TestDriver(driver.Scheduler):
@@ -54,6 +59,34 @@ class SchedulerTestCase(test.TestCase):
super(SchedulerTestCase, self).setUp()
self.flags(scheduler_driver='nova.tests.test_scheduler.TestDriver')
+ def _create_compute_service(self):
+ """Create compute-manager(ComputeNode and Service record)."""
+ ctxt = context.get_admin_context()
+ dic = {'host': 'dummy', 'binary': 'nova-compute', 'topic': 'compute',
+ 'report_count': 0, 'availability_zone': 'dummyzone'}
+ s_ref = db.service_create(ctxt, dic)
+
+ dic = {'service_id': s_ref['id'],
+ 'vcpus': 16, 'memory_mb': 32, 'local_gb': 100,
+ 'vcpus_used': 16, 'memory_mb_used': 32, 'local_gb_used': 10,
+ 'hypervisor_type': 'qemu', 'hypervisor_version': 12003,
+ 'cpu_info': ''}
+ db.compute_node_create(ctxt, dic)
+
+ return db.service_get(ctxt, s_ref['id'])
+
+ def _create_instance(self, **kwargs):
+ """Create a test instance"""
+ ctxt = context.get_admin_context()
+ inst = {}
+ inst['user_id'] = 'admin'
+ inst['project_id'] = kwargs.get('project_id', 'fake')
+ inst['host'] = kwargs.get('host', 'dummy')
+ inst['vcpus'] = kwargs.get('vcpus', 1)
+ inst['memory_mb'] = kwargs.get('memory_mb', 10)
+ inst['local_gb'] = kwargs.get('local_gb', 20)
+ return db.instance_create(ctxt, inst)
+
def test_fallback(self):
scheduler = manager.SchedulerManager()
self.mox.StubOutWithMock(rpc, 'cast', use_mock_anything=True)
@@ -76,6 +109,73 @@ class SchedulerTestCase(test.TestCase):
self.mox.ReplayAll()
scheduler.named_method(ctxt, 'topic', num=7)
+ def test_show_host_resources_host_not_exit(self):
+ """A host given as an argument does not exists."""
+
+ scheduler = manager.SchedulerManager()
+ dest = 'dummydest'
+ ctxt = context.get_admin_context()
+
+ try:
+ scheduler.show_host_resources(ctxt, dest)
+ except exception.NotFound, e:
+ c1 = (e.message.find(_("does not exist or is not a "
+ "compute node.")) >= 0)
+ self.assertTrue(c1)
+
+ def _dic_is_equal(self, dic1, dic2, keys=None):
+ """Compares 2 dictionary contents(Helper method)"""
+ if not keys:
+ keys = ['vcpus', 'memory_mb', 'local_gb',
+ 'vcpus_used', 'memory_mb_used', 'local_gb_used']
+
+ for key in keys:
+ if not (dic1[key] == dic2[key]):
+ return False
+ return True
+
+ def test_show_host_resources_no_project(self):
+ """No instance are running on the given host."""
+
+ scheduler = manager.SchedulerManager()
+ ctxt = context.get_admin_context()
+ s_ref = self._create_compute_service()
+
+ result = scheduler.show_host_resources(ctxt, s_ref['host'])
+
+ # result checking
+ c1 = ('resource' in result and 'usage' in result)
+ compute_node = s_ref['compute_node'][0]
+ c2 = self._dic_is_equal(result['resource'], compute_node)
+ c3 = result['usage'] == {}
+ self.assertTrue(c1 and c2 and c3)
+ db.service_destroy(ctxt, s_ref['id'])
+
+ def test_show_host_resources_works_correctly(self):
+ """Show_host_resources() works correctly as expected."""
+
+ scheduler = manager.SchedulerManager()
+ ctxt = context.get_admin_context()
+ s_ref = self._create_compute_service()
+ i_ref1 = self._create_instance(project_id='p-01', host=s_ref['host'])
+ i_ref2 = self._create_instance(project_id='p-02', vcpus=3,
+ host=s_ref['host'])
+
+ result = scheduler.show_host_resources(ctxt, s_ref['host'])
+
+ c1 = ('resource' in result and 'usage' in result)
+ compute_node = s_ref['compute_node'][0]
+ c2 = self._dic_is_equal(result['resource'], compute_node)
+ c3 = result['usage'].keys() == ['p-01', 'p-02']
+ keys = ['vcpus', 'memory_mb', 'local_gb']
+ c4 = self._dic_is_equal(result['usage']['p-01'], i_ref1, keys)
+ c5 = self._dic_is_equal(result['usage']['p-02'], i_ref2, keys)
+ self.assertTrue(c1 and c2 and c3 and c4 and c5)
+
+ db.service_destroy(ctxt, s_ref['id'])
+ db.instance_destroy(ctxt, i_ref1['id'])
+ db.instance_destroy(ctxt, i_ref2['id'])
+
class ZoneSchedulerTestCase(test.TestCase):
"""Test case for zone scheduler"""
@@ -161,9 +261,15 @@ class SimpleDriverTestCase(test.TestCase):
inst['project_id'] = self.project.id
inst['instance_type'] = 'm1.tiny'
inst['mac_address'] = utils.generate_mac()
+ inst['vcpus'] = kwargs.get('vcpus', 1)
inst['ami_launch_index'] = 0
- inst['vcpus'] = 1
inst['availability_zone'] = kwargs.get('availability_zone', None)
+ inst['host'] = kwargs.get('host', 'dummy')
+ inst['memory_mb'] = kwargs.get('memory_mb', 20)
+ inst['local_gb'] = kwargs.get('local_gb', 30)
+ inst['launched_on'] = kwargs.get('launghed_on', 'dummy')
+ inst['state_description'] = kwargs.get('state_description', 'running')
+ inst['state'] = kwargs.get('state', power_state.RUNNING)
return db.instance_create(self.context, inst)['id']
def _create_volume(self):
@@ -173,6 +279,211 @@ class SimpleDriverTestCase(test.TestCase):
vol['availability_zone'] = 'test'
return db.volume_create(self.context, vol)['id']
+ def _create_compute_service(self, **kwargs):
+ """Create a compute service."""
+
+ dic = {'binary': 'nova-compute', 'topic': 'compute',
+ 'report_count': 0, 'availability_zone': 'dummyzone'}
+ dic['host'] = kwargs.get('host', 'dummy')
+ s_ref = db.service_create(self.context, dic)
+ if 'created_at' in kwargs.keys() or 'updated_at' in kwargs.keys():
+ t = datetime.datetime.utcnow() - datetime.timedelta(0)
+ dic['created_at'] = kwargs.get('created_at', t)
+ dic['updated_at'] = kwargs.get('updated_at', t)
+ db.service_update(self.context, s_ref['id'], dic)
+
+ dic = {'service_id': s_ref['id'],
+ 'vcpus': 16, 'memory_mb': 32, 'local_gb': 100,
+ 'vcpus_used': 16, 'local_gb_used': 10,
+ 'hypervisor_type': 'qemu', 'hypervisor_version': 12003,
+ 'cpu_info': ''}
+ dic['memory_mb_used'] = kwargs.get('memory_mb_used', 32)
+ dic['hypervisor_type'] = kwargs.get('hypervisor_type', 'qemu')
+ dic['hypervisor_version'] = kwargs.get('hypervisor_version', 12003)
+ db.compute_node_create(self.context, dic)
+ return db.service_get(self.context, s_ref['id'])
+
+ def test_doesnt_report_disabled_hosts_as_up(self):
+ """Ensures driver doesn't find hosts before they are enabled"""
+ # NOTE(vish): constructing service without create method
+ # because we are going to use it without queue
+ compute1 = service.Service('host1',
+ 'nova-compute',
+ 'compute',
+ FLAGS.compute_manager)
+ compute1.start()
+ compute2 = service.Service('host2',
+ 'nova-compute',
+ 'compute',
+ FLAGS.compute_manager)
+ compute2.start()
+ s1 = db.service_get_by_args(self.context, 'host1', 'nova-compute')
+ s2 = db.service_get_by_args(self.context, 'host2', 'nova-compute')
+ db.service_update(self.context, s1['id'], {'disabled': True})
+ db.service_update(self.context, s2['id'], {'disabled': True})
+ hosts = self.scheduler.driver.hosts_up(self.context, 'compute')
+ self.assertEqual(0, len(hosts))
+ compute1.kill()
+ compute2.kill()
+
+ def test_reports_enabled_hosts_as_up(self):
+ """Ensures driver can find the hosts that are up"""
+ # NOTE(vish): constructing service without create method
+ # because we are going to use it without queue
+ compute1 = service.Service('host1',
+ 'nova-compute',
+ 'compute',
+ FLAGS.compute_manager)
+ compute1.start()
+ compute2 = service.Service('host2',
+ 'nova-compute',
+ 'compute',
+ FLAGS.compute_manager)
+ compute2.start()
+ hosts = self.scheduler.driver.hosts_up(self.context, 'compute')
+ self.assertEqual(2, len(hosts))
+ compute1.kill()
+ compute2.kill()
+
+ def test_least_busy_host_gets_instance(self):
+ """Ensures the host with less cores gets the next one"""
+ compute1 = service.Service('host1',
+ 'nova-compute',
+ 'compute',
+ FLAGS.compute_manager)
+ compute1.start()
+ compute2 = service.Service('host2',
+ 'nova-compute',
+ 'compute',
+ FLAGS.compute_manager)
+ compute2.start()
+ instance_id1 = self._create_instance()
+ compute1.run_instance(self.context, instance_id1)
+ instance_id2 = self._create_instance()
+ host = self.scheduler.driver.schedule_run_instance(self.context,
+ instance_id2)
+ self.assertEqual(host, 'host2')
+ compute1.terminate_instance(self.context, instance_id1)
+ db.instance_destroy(self.context, instance_id2)
+ compute1.kill()
+ compute2.kill()
+
+ def test_specific_host_gets_instance(self):
+ """Ensures if you set availability_zone it launches on that zone"""
+ compute1 = service.Service('host1',
+ 'nova-compute',
+ 'compute',
+ FLAGS.compute_manager)
+ compute1.start()
+ compute2 = service.Service('host2',
+ 'nova-compute',
+ 'compute',
+ FLAGS.compute_manager)
+ compute2.start()
+ instance_id1 = self._create_instance()
+ compute1.run_instance(self.context, instance_id1)
+ instance_id2 = self._create_instance(availability_zone='nova:host1')
+ host = self.scheduler.driver.schedule_run_instance(self.context,
+ instance_id2)
+ self.assertEqual('host1', host)
+ compute1.terminate_instance(self.context, instance_id1)
+ db.instance_destroy(self.context, instance_id2)
+ compute1.kill()
+ compute2.kill()
+
+ def test_wont_sechedule_if_specified_host_is_down(self):
+ compute1 = service.Service('host1',
+ 'nova-compute',
+ 'compute',
+ FLAGS.compute_manager)
+ compute1.start()
+ s1 = db.service_get_by_args(self.context, 'host1', 'nova-compute')
+ now = datetime.datetime.utcnow()
+ delta = datetime.timedelta(seconds=FLAGS.service_down_time * 2)
+ past = now - delta
+ db.service_update(self.context, s1['id'], {'updated_at': past})
+ instance_id2 = self._create_instance(availability_zone='nova:host1')
+ self.assertRaises(driver.WillNotSchedule,
+ self.scheduler.driver.schedule_run_instance,
+ self.context,
+ instance_id2)
+ db.instance_destroy(self.context, instance_id2)
+ compute1.kill()
+
+ def test_will_schedule_on_disabled_host_if_specified(self):
+ compute1 = service.Service('host1',
+ 'nova-compute',
+ 'compute',
+ FLAGS.compute_manager)
+ compute1.start()
+ s1 = db.service_get_by_args(self.context, 'host1', 'nova-compute')
+ db.service_update(self.context, s1['id'], {'disabled': True})
+ instance_id2 = self._create_instance(availability_zone='nova:host1')
+ host = self.scheduler.driver.schedule_run_instance(self.context,
+ instance_id2)
+ self.assertEqual('host1', host)
+ db.instance_destroy(self.context, instance_id2)
+ compute1.kill()
+
+ def test_too_many_cores(self):
+ """Ensures we don't go over max cores"""
+ compute1 = service.Service('host1',
+ 'nova-compute',
+ 'compute',
+ FLAGS.compute_manager)
+ compute1.start()
+ compute2 = service.Service('host2',
+ 'nova-compute',
+ 'compute',
+ FLAGS.compute_manager)
+ compute2.start()
+ instance_ids1 = []
+ instance_ids2 = []
+ for index in xrange(FLAGS.max_cores):
+ instance_id = self._create_instance()
+ compute1.run_instance(self.context, instance_id)
+ instance_ids1.append(instance_id)
+ instance_id = self._create_instance()
+ compute2.run_instance(self.context, instance_id)
+ instance_ids2.append(instance_id)
+ instance_id = self._create_instance()
+ self.assertRaises(driver.NoValidHost,
+ self.scheduler.driver.schedule_run_instance,
+ self.context,
+ instance_id)
+ for instance_id in instance_ids1:
+ compute1.terminate_instance(self.context, instance_id)
+ for instance_id in instance_ids2:
+ compute2.terminate_instance(self.context, instance_id)
+ compute1.kill()
+ compute2.kill()
+
+ def test_least_busy_host_gets_volume(self):
+ """Ensures the host with less gigabytes gets the next one"""
+ volume1 = service.Service('host1',
+ 'nova-volume',
+ 'volume',
+ FLAGS.volume_manager)
+ volume1.start()
+ volume2 = service.Service('host2',
+ 'nova-volume',
+ 'volume',
+ FLAGS.volume_manager)
+ volume2.start()
+ volume_id1 = self._create_volume()
+ volume1.create_volume(self.context, volume_id1)
+ volume_id2 = self._create_volume()
+ host = self.scheduler.driver.schedule_create_volume(self.context,
+ volume_id2)
+ self.assertEqual(host, 'host2')
+ volume1.delete_volume(self.context, volume_id1)
+ db.volume_destroy(self.context, volume_id2)
+ dic = {'service_id': s_ref['id'],
+ 'vcpus': 16, 'memory_mb': 32, 'local_gb': 100,
+ 'vcpus_used': 16, 'memory_mb_used': 12, 'local_gb_used': 10,
+ 'hypervisor_type': 'qemu', 'hypervisor_version': 12003,
+ 'cpu_info': ''}
+
def test_doesnt_report_disabled_hosts_as_up(self):
"""Ensures driver doesn't find hosts before they are enabled"""
compute1 = self.start_service('compute', host='host1')
@@ -316,3 +627,313 @@ class SimpleDriverTestCase(test.TestCase):
volume2.delete_volume(self.context, volume_id)
volume1.kill()
volume2.kill()
+
+ def test_scheduler_live_migration_with_volume(self):
+ """scheduler_live_migration() works correctly as expected.
+
+ Also, checks instance state is changed from 'running' -> 'migrating'.
+
+ """
+
+ instance_id = self._create_instance()
+ i_ref = db.instance_get(self.context, instance_id)
+ dic = {'instance_id': instance_id, 'size': 1}
+ v_ref = db.volume_create(self.context, dic)
+
+ # cannot check 2nd argument b/c the addresses of instance object
+ # is different.
+ driver_i = self.scheduler.driver
+ nocare = mox.IgnoreArg()
+ self.mox.StubOutWithMock(driver_i, '_live_migration_src_check')
+ self.mox.StubOutWithMock(driver_i, '_live_migration_dest_check')
+ self.mox.StubOutWithMock(driver_i, '_live_migration_common_check')
+ driver_i._live_migration_src_check(nocare, nocare)
+ driver_i._live_migration_dest_check(nocare, nocare, i_ref['host'])
+ driver_i._live_migration_common_check(nocare, nocare, i_ref['host'])
+ self.mox.StubOutWithMock(rpc, 'cast', use_mock_anything=True)
+ kwargs = {'instance_id': instance_id, 'dest': i_ref['host']}
+ rpc.cast(self.context,
+ db.queue_get_for(nocare, FLAGS.compute_topic, i_ref['host']),
+ {"method": 'live_migration', "args": kwargs})
+
+ self.mox.ReplayAll()
+ self.scheduler.live_migration(self.context, FLAGS.compute_topic,
+ instance_id=instance_id,
+ dest=i_ref['host'])
+
+ i_ref = db.instance_get(self.context, instance_id)
+ self.assertTrue(i_ref['state_description'] == 'migrating')
+ db.instance_destroy(self.context, instance_id)
+ db.volume_destroy(self.context, v_ref['id'])
+
+ def test_live_migration_src_check_instance_not_running(self):
+ """The instance given by instance_id is not running."""
+
+ instance_id = self._create_instance(state_description='migrating')
+ i_ref = db.instance_get(self.context, instance_id)
+
+ try:
+ self.scheduler.driver._live_migration_src_check(self.context,
+ i_ref)
+ except exception.Invalid, e:
+ c = (e.message.find('is not running') > 0)
+
+ self.assertTrue(c)
+ db.instance_destroy(self.context, instance_id)
+
+ def test_live_migration_src_check_volume_node_not_alive(self):
+ """Raise exception when volume node is not alive."""
+
+ instance_id = self._create_instance()
+ i_ref = db.instance_get(self.context, instance_id)
+ dic = {'instance_id': instance_id, 'size': 1}
+ v_ref = db.volume_create(self.context, {'instance_id': instance_id,
+ 'size': 1})
+ t1 = datetime.datetime.utcnow() - datetime.timedelta(1)
+ dic = {'created_at': t1, 'updated_at': t1, 'binary': 'nova-volume',
+ 'topic': 'volume', 'report_count': 0}
+ s_ref = db.service_create(self.context, dic)
+
+ try:
+ self.scheduler.driver.schedule_live_migration(self.context,
+ instance_id,
+ i_ref['host'])
+ except exception.Invalid, e:
+ c = (e.message.find('volume node is not alive') >= 0)
+
+ self.assertTrue(c)
+ db.instance_destroy(self.context, instance_id)
+ db.service_destroy(self.context, s_ref['id'])
+ db.volume_destroy(self.context, v_ref['id'])
+
+ def test_live_migration_src_check_compute_node_not_alive(self):
+ """Confirms src-compute node is alive."""
+ instance_id = self._create_instance()
+ i_ref = db.instance_get(self.context, instance_id)
+ t = datetime.datetime.utcnow() - datetime.timedelta(10)
+ s_ref = self._create_compute_service(created_at=t, updated_at=t,
+ host=i_ref['host'])
+
+ try:
+ self.scheduler.driver._live_migration_src_check(self.context,
+ i_ref)
+ except exception.Invalid, e:
+ c = (e.message.find('is not alive') >= 0)
+
+ self.assertTrue(c)
+ db.instance_destroy(self.context, instance_id)
+ db.service_destroy(self.context, s_ref['id'])
+
+ def test_live_migration_src_check_works_correctly(self):
+ """Confirms this method finishes with no error."""
+ instance_id = self._create_instance()
+ i_ref = db.instance_get(self.context, instance_id)
+ s_ref = self._create_compute_service(host=i_ref['host'])
+
+ ret = self.scheduler.driver._live_migration_src_check(self.context,
+ i_ref)
+
+ self.assertTrue(ret == None)
+ db.instance_destroy(self.context, instance_id)
+ db.service_destroy(self.context, s_ref['id'])
+
+ def test_live_migration_dest_check_not_alive(self):
+ """Confirms exception raises in case dest host does not exist."""
+ instance_id = self._create_instance()
+ i_ref = db.instance_get(self.context, instance_id)
+ t = datetime.datetime.utcnow() - datetime.timedelta(10)
+ s_ref = self._create_compute_service(created_at=t, updated_at=t,
+ host=i_ref['host'])
+
+ try:
+ self.scheduler.driver._live_migration_dest_check(self.context,
+ i_ref,
+ i_ref['host'])
+ except exception.Invalid, e:
+ c = (e.message.find('is not alive') >= 0)
+
+ self.assertTrue(c)
+ db.instance_destroy(self.context, instance_id)
+ db.service_destroy(self.context, s_ref['id'])
+
+ def test_live_migration_dest_check_service_same_host(self):
+ """Confirms exceptioin raises in case dest and src is same host."""
+ instance_id = self._create_instance()
+ i_ref = db.instance_get(self.context, instance_id)
+ s_ref = self._create_compute_service(host=i_ref['host'])
+
+ try:
+ self.scheduler.driver._live_migration_dest_check(self.context,
+ i_ref,
+ i_ref['host'])
+ except exception.Invalid, e:
+ c = (e.message.find('choose other host') >= 0)
+
+ self.assertTrue(c)
+ db.instance_destroy(self.context, instance_id)
+ db.service_destroy(self.context, s_ref['id'])
+
+ def test_live_migration_dest_check_service_lack_memory(self):
+ """Confirms exception raises when dest doesn't have enough memory."""
+ instance_id = self._create_instance()
+ i_ref = db.instance_get(self.context, instance_id)
+ s_ref = self._create_compute_service(host='somewhere',
+ memory_mb_used=12)
+
+ try:
+ self.scheduler.driver._live_migration_dest_check(self.context,
+ i_ref,
+ 'somewhere')
+ except exception.NotEmpty, e:
+ c = (e.message.find('Unable to migrate') >= 0)
+
+ self.assertTrue(c)
+ db.instance_destroy(self.context, instance_id)
+ db.service_destroy(self.context, s_ref['id'])
+
+ def test_live_migration_dest_check_service_works_correctly(self):
+ """Confirms method finishes with no error."""
+ instance_id = self._create_instance()
+ i_ref = db.instance_get(self.context, instance_id)
+ s_ref = self._create_compute_service(host='somewhere',
+ memory_mb_used=5)
+
+ ret = self.scheduler.driver._live_migration_dest_check(self.context,
+ i_ref,
+ 'somewhere')
+ self.assertTrue(ret == None)
+ db.instance_destroy(self.context, instance_id)
+ db.service_destroy(self.context, s_ref['id'])
+
+ def test_live_migration_common_check_service_orig_not_exists(self):
+ """Destination host does not exist."""
+
+ dest = 'dummydest'
+ # mocks for live_migration_common_check()
+ instance_id = self._create_instance()
+ i_ref = db.instance_get(self.context, instance_id)
+ t1 = datetime.datetime.utcnow() - datetime.timedelta(10)
+ s_ref = self._create_compute_service(created_at=t1, updated_at=t1,
+ host=dest)
+
+ # mocks for mounted_on_same_shared_storage()
+ fpath = '/test/20110127120000'
+ self.mox.StubOutWithMock(driver, 'rpc', use_mock_anything=True)
+ topic = FLAGS.compute_topic
+ driver.rpc.call(mox.IgnoreArg(),
+ db.queue_get_for(self.context, topic, dest),
+ {"method": 'create_shared_storage_test_file'}).AndReturn(fpath)
+ driver.rpc.call(mox.IgnoreArg(),
+ db.queue_get_for(mox.IgnoreArg(), topic, i_ref['host']),
+ {"method": 'check_shared_storage_test_file',
+ "args": {'filename': fpath}})
+ driver.rpc.call(mox.IgnoreArg(),
+ db.queue_get_for(mox.IgnoreArg(), topic, dest),
+ {"method": 'cleanup_shared_storage_test_file',
+ "args": {'filename': fpath}})
+
+ self.mox.ReplayAll()
+ try:
+ self.scheduler.driver._live_migration_common_check(self.context,
+ i_ref,
+ dest)
+ except exception.Invalid, e:
+ c = (e.message.find('does not exist') >= 0)
+
+ self.assertTrue(c)
+ db.instance_destroy(self.context, instance_id)
+ db.service_destroy(self.context, s_ref['id'])
+
+ def test_live_migration_common_check_service_different_hypervisor(self):
+ """Original host and dest host has different hypervisor type."""
+ dest = 'dummydest'
+ instance_id = self._create_instance()
+ i_ref = db.instance_get(self.context, instance_id)
+
+ # compute service for destination
+ s_ref = self._create_compute_service(host=i_ref['host'])
+ # compute service for original host
+ s_ref2 = self._create_compute_service(host=dest, hypervisor_type='xen')
+
+ # mocks
+ driver = self.scheduler.driver
+ self.mox.StubOutWithMock(driver, 'mounted_on_same_shared_storage')
+ driver.mounted_on_same_shared_storage(mox.IgnoreArg(), i_ref, dest)
+
+ self.mox.ReplayAll()
+ try:
+ self.scheduler.driver._live_migration_common_check(self.context,
+ i_ref,
+ dest)
+ except exception.Invalid, e:
+ c = (e.message.find(_('Different hypervisor type')) >= 0)
+
+ self.assertTrue(c)
+ db.instance_destroy(self.context, instance_id)
+ db.service_destroy(self.context, s_ref['id'])
+ db.service_destroy(self.context, s_ref2['id'])
+
+ def test_live_migration_common_check_service_different_version(self):
+ """Original host and dest host has different hypervisor version."""
+ dest = 'dummydest'
+ instance_id = self._create_instance()
+ i_ref = db.instance_get(self.context, instance_id)
+
+ # compute service for destination
+ s_ref = self._create_compute_service(host=i_ref['host'])
+ # compute service for original host
+ s_ref2 = self._create_compute_service(host=dest,
+ hypervisor_version=12002)
+
+ # mocks
+ driver = self.scheduler.driver
+ self.mox.StubOutWithMock(driver, 'mounted_on_same_shared_storage')
+ driver.mounted_on_same_shared_storage(mox.IgnoreArg(), i_ref, dest)
+
+ self.mox.ReplayAll()
+ try:
+ self.scheduler.driver._live_migration_common_check(self.context,
+ i_ref,
+ dest)
+ except exception.Invalid, e:
+ c = (e.message.find(_('Older hypervisor version')) >= 0)
+
+ self.assertTrue(c)
+ db.instance_destroy(self.context, instance_id)
+ db.service_destroy(self.context, s_ref['id'])
+ db.service_destroy(self.context, s_ref2['id'])
+
+ def test_live_migration_common_check_checking_cpuinfo_fail(self):
+ """Raise excetion when original host doen't have compatible cpu."""
+
+ dest = 'dummydest'
+ instance_id = self._create_instance()
+ i_ref = db.instance_get(self.context, instance_id)
+
+ # compute service for destination
+ s_ref = self._create_compute_service(host=i_ref['host'])
+ # compute service for original host
+ s_ref2 = self._create_compute_service(host=dest)
+
+ # mocks
+ driver = self.scheduler.driver
+ self.mox.StubOutWithMock(driver, 'mounted_on_same_shared_storage')
+ driver.mounted_on_same_shared_storage(mox.IgnoreArg(), i_ref, dest)
+ self.mox.StubOutWithMock(rpc, 'call', use_mock_anything=True)
+ rpc.call(mox.IgnoreArg(), mox.IgnoreArg(),
+ {"method": 'compare_cpu',
+ "args": {'cpu_info': s_ref2['compute_node'][0]['cpu_info']}}).\
+ AndRaise(rpc.RemoteError("doesn't have compatibility to", "", ""))
+
+ self.mox.ReplayAll()
+ try:
+ self.scheduler.driver._live_migration_common_check(self.context,
+ i_ref,
+ dest)
+ except rpc.RemoteError, e:
+ c = (e.message.find(_("doesn't have compatibility to")) >= 0)
+
+ self.assertTrue(c)
+ db.instance_destroy(self.context, instance_id)
+ db.service_destroy(self.context, s_ref['id'])
+ db.service_destroy(self.context, s_ref2['id'])
diff --git a/nova/tests/test_service.py b/nova/tests/test_service.py
index 45d9afa6c..393f9d20b 100644
--- a/nova/tests/test_service.py
+++ b/nova/tests/test_service.py
@@ -30,6 +30,7 @@ from nova import rpc
from nova import test
from nova import service
from nova import manager
+from nova.compute import manager as compute_manager
FLAGS = flags.FLAGS
flags.DEFINE_string("fake_manager", "nova.tests.test_service.FakeManager",
@@ -251,3 +252,43 @@ class ServiceTestCase(test.TestCase):
serv.report_state()
self.assert_(not serv.model_disconnected)
+
+ def test_compute_can_update_available_resource(self):
+ """Confirm compute updates their record of compute-service table."""
+ host = 'foo'
+ binary = 'nova-compute'
+ topic = 'compute'
+
+ # Any mocks are not working without UnsetStubs() here.
+ self.mox.UnsetStubs()
+ ctxt = context.get_admin_context()
+ service_ref = db.service_create(ctxt, {'host': host,
+ 'binary': binary,
+ 'topic': topic})
+ serv = service.Service(host,
+ binary,
+ topic,
+ 'nova.compute.manager.ComputeManager')
+
+ # This testcase want to test calling update_available_resource.
+ # No need to call periodic call, then below variable must be set 0.
+ serv.report_interval = 0
+ serv.periodic_interval = 0
+
+ # Creating mocks
+ self.mox.StubOutWithMock(service.rpc.Connection, 'instance')
+ service.rpc.Connection.instance(new=mox.IgnoreArg())
+ service.rpc.Connection.instance(new=mox.IgnoreArg())
+ self.mox.StubOutWithMock(serv.manager.driver,
+ 'update_available_resource')
+ serv.manager.driver.update_available_resource(mox.IgnoreArg(), host)
+
+ # Just doing start()-stop(), not confirm new db record is created,
+ # because update_available_resource() works only in
+ # libvirt environment. This testcase confirms
+ # update_available_resource() is called. Otherwise, mox complains.
+ self.mox.ReplayAll()
+ serv.start()
+ serv.stop()
+
+ db.service_destroy(ctxt, service_ref['id'])
diff --git a/nova/tests/test_virt.py b/nova/tests/test_virt.py
index 27893043a..b214f5ce7 100644
--- a/nova/tests/test_virt.py
+++ b/nova/tests/test_virt.py
@@ -14,22 +14,29 @@
# License for the specific language governing permissions and limitations
# under the License.
-import re
+import eventlet
+import mox
import os
+import re
+import sys
-import eventlet
from xml.etree.ElementTree import fromstring as xml_to_tree
from xml.dom.minidom import parseString as xml_to_dom
from nova import context
from nova import db
+from nova import exception
from nova import flags
from nova import test
from nova import utils
from nova.api.ec2 import cloud
from nova.auth import manager
+from nova.compute import manager as compute_manager
+from nova.compute import power_state
+from nova.db.sqlalchemy import models
from nova.virt import libvirt_conn
+libvirt = None
FLAGS = flags.FLAGS
flags.DECLARE('instances_path', 'nova.compute.manager')
@@ -104,11 +111,28 @@ class LibvirtConnTestCase(test.TestCase):
libvirt_conn._late_load_cheetah()
self.flags(fake_call=True)
self.manager = manager.AuthManager()
+
+ try:
+ pjs = self.manager.get_projects()
+ pjs = [p for p in pjs if p.name == 'fake']
+ if 0 != len(pjs):
+ self.manager.delete_project(pjs[0])
+
+ users = self.manager.get_users()
+ users = [u for u in users if u.name == 'fake']
+ if 0 != len(users):
+ self.manager.delete_user(users[0])
+ except Exception, e:
+ pass
+
+ users = self.manager.get_users()
self.user = self.manager.create_user('fake', 'fake', 'fake',
admin=True)
self.project = self.manager.create_project('fake', 'fake', 'fake')
self.network = utils.import_object(FLAGS.network_manager)
+ self.context = context.get_admin_context()
FLAGS.instances_path = ''
+ self.call_libvirt_dependant_setup = False
test_ip = '10.11.12.13'
test_instance = {'memory_kb': '1024000',
@@ -120,6 +144,58 @@ class LibvirtConnTestCase(test.TestCase):
'bridge': 'br101',
'instance_type': 'm1.small'}
+ def lazy_load_library_exists(self):
+ """check if libvirt is available."""
+ # try to connect libvirt. if fail, skip test.
+ try:
+ import libvirt
+ import libxml2
+ except ImportError:
+ return False
+ global libvirt
+ libvirt = __import__('libvirt')
+ libvirt_conn.libvirt = __import__('libvirt')
+ libvirt_conn.libxml2 = __import__('libxml2')
+ return True
+
+ def create_fake_libvirt_mock(self, **kwargs):
+ """Defining mocks for LibvirtConnection(libvirt is not used)."""
+
+ # A fake libvirt.virConnect
+ class FakeLibvirtConnection(object):
+ pass
+
+ # A fake libvirt_conn.IptablesFirewallDriver
+ class FakeIptablesFirewallDriver(object):
+
+ def __init__(self, **kwargs):
+ pass
+
+ def setattr(self, key, val):
+ self.__setattr__(key, val)
+
+ # Creating mocks
+ fake = FakeLibvirtConnection()
+ fakeip = FakeIptablesFirewallDriver
+ # Customizing above fake if necessary
+ for key, val in kwargs.items():
+ fake.__setattr__(key, val)
+
+ # Inevitable mocks for libvirt_conn.LibvirtConnection
+ self.mox.StubOutWithMock(libvirt_conn.utils, 'import_class')
+ libvirt_conn.utils.import_class(mox.IgnoreArg()).AndReturn(fakeip)
+ self.mox.StubOutWithMock(libvirt_conn.LibvirtConnection, '_conn')
+ libvirt_conn.LibvirtConnection._conn = fake
+
+ def create_service(self, **kwargs):
+ service_ref = {'host': kwargs.get('host', 'dummy'),
+ 'binary': 'nova-compute',
+ 'topic': 'compute',
+ 'report_count': 0,
+ 'availability_zone': 'zone'}
+
+ return db.service_create(context.get_admin_context(), service_ref)
+
def test_xml_and_uri_no_ramdisk_no_kernel(self):
instance_data = dict(self.test_instance)
self._check_xml_and_uri(instance_data,
@@ -259,8 +335,8 @@ class LibvirtConnTestCase(test.TestCase):
expected_result,
'%s failed common check %d' % (xml, i))
- # This test is supposed to make sure we don't override a specifically
- # set uri
+ # This test is supposed to make sure we don't
+ # override a specifically set uri
#
# Deliberately not just assigning this string to FLAGS.libvirt_uri and
# checking against that later on. This way we make sure the
@@ -274,6 +350,150 @@ class LibvirtConnTestCase(test.TestCase):
self.assertEquals(uri, testuri)
db.instance_destroy(user_context, instance_ref['id'])
+ def test_update_available_resource_works_correctly(self):
+ """Confirm compute_node table is updated successfully."""
+ org_path = FLAGS.instances_path = ''
+ FLAGS.instances_path = '.'
+
+ # Prepare mocks
+ def getVersion():
+ return 12003
+
+ def getType():
+ return 'qemu'
+
+ def listDomainsID():
+ return []
+
+ service_ref = self.create_service(host='dummy')
+ self.create_fake_libvirt_mock(getVersion=getVersion,
+ getType=getType,
+ listDomainsID=listDomainsID)
+ self.mox.StubOutWithMock(libvirt_conn.LibvirtConnection,
+ 'get_cpu_info')
+ libvirt_conn.LibvirtConnection.get_cpu_info().AndReturn('cpuinfo')
+
+ # Start test
+ self.mox.ReplayAll()
+ conn = libvirt_conn.LibvirtConnection(False)
+ conn.update_available_resource(self.context, 'dummy')
+ service_ref = db.service_get(self.context, service_ref['id'])
+ compute_node = service_ref['compute_node'][0]
+
+ if sys.platform.upper() == 'LINUX2':
+ self.assertTrue(compute_node['vcpus'] >= 0)
+ self.assertTrue(compute_node['memory_mb'] > 0)
+ self.assertTrue(compute_node['local_gb'] > 0)
+ self.assertTrue(compute_node['vcpus_used'] == 0)
+ self.assertTrue(compute_node['memory_mb_used'] > 0)
+ self.assertTrue(compute_node['local_gb_used'] > 0)
+ self.assertTrue(len(compute_node['hypervisor_type']) > 0)
+ self.assertTrue(compute_node['hypervisor_version'] > 0)
+ else:
+ self.assertTrue(compute_node['vcpus'] >= 0)
+ self.assertTrue(compute_node['memory_mb'] == 0)
+ self.assertTrue(compute_node['local_gb'] > 0)
+ self.assertTrue(compute_node['vcpus_used'] == 0)
+ self.assertTrue(compute_node['memory_mb_used'] == 0)
+ self.assertTrue(compute_node['local_gb_used'] > 0)
+ self.assertTrue(len(compute_node['hypervisor_type']) > 0)
+ self.assertTrue(compute_node['hypervisor_version'] > 0)
+
+ db.service_destroy(self.context, service_ref['id'])
+ FLAGS.instances_path = org_path
+
+ def test_update_resource_info_no_compute_record_found(self):
+ """Raise exception if no recorde found on services table."""
+ org_path = FLAGS.instances_path = ''
+ FLAGS.instances_path = '.'
+ self.create_fake_libvirt_mock()
+
+ self.mox.ReplayAll()
+ conn = libvirt_conn.LibvirtConnection(False)
+ self.assertRaises(exception.Invalid,
+ conn.update_available_resource,
+ self.context, 'dummy')
+
+ FLAGS.instances_path = org_path
+
+ def test_ensure_filtering_rules_for_instance_timeout(self):
+ """ensure_filtering_fules_for_instance() finishes with timeout."""
+ # Skip if non-libvirt environment
+ if not self.lazy_load_library_exists():
+ return
+
+ # Preparing mocks
+ def fake_none(self):
+ return
+
+ def fake_raise(self):
+ raise libvirt.libvirtError('ERR')
+
+ self.create_fake_libvirt_mock(nwfilterLookupByName=fake_raise)
+ instance_ref = db.instance_create(self.context, self.test_instance)
+
+ # Start test
+ self.mox.ReplayAll()
+ try:
+ conn = libvirt_conn.LibvirtConnection(False)
+ conn.firewall_driver.setattr('setup_basic_filtering', fake_none)
+ conn.firewall_driver.setattr('prepare_instance_filter', fake_none)
+ conn.ensure_filtering_rules_for_instance(instance_ref)
+ except exception.Error, e:
+ c1 = (0 <= e.message.find('Timeout migrating for'))
+ self.assertTrue(c1)
+
+ db.instance_destroy(self.context, instance_ref['id'])
+
+ def test_live_migration_raises_exception(self):
+ """Confirms recover method is called when exceptions are raised."""
+ # Skip if non-libvirt environment
+ if not self.lazy_load_library_exists():
+ return
+
+ # Preparing data
+ self.compute = utils.import_object(FLAGS.compute_manager)
+ instance_dict = {'host': 'fake', 'state': power_state.RUNNING,
+ 'state_description': 'running'}
+ instance_ref = db.instance_create(self.context, self.test_instance)
+ instance_ref = db.instance_update(self.context, instance_ref['id'],
+ instance_dict)
+ vol_dict = {'status': 'migrating', 'size': 1}
+ volume_ref = db.volume_create(self.context, vol_dict)
+ db.volume_attached(self.context, volume_ref['id'], instance_ref['id'],
+ '/dev/fake')
+
+ # Preparing mocks
+ vdmock = self.mox.CreateMock(libvirt.virDomain)
+ self.mox.StubOutWithMock(vdmock, "migrateToURI")
+ vdmock.migrateToURI(FLAGS.live_migration_uri % 'dest',
+ mox.IgnoreArg(),
+ None, FLAGS.live_migration_bandwidth).\
+ AndRaise(libvirt.libvirtError('ERR'))
+
+ def fake_lookup(instance_name):
+ if instance_name == instance_ref.name:
+ return vdmock
+
+ self.create_fake_libvirt_mock(lookupByName=fake_lookup)
+
+ # Start test
+ self.mox.ReplayAll()
+ conn = libvirt_conn.LibvirtConnection(False)
+ self.assertRaises(libvirt.libvirtError,
+ conn._live_migration,
+ self.context, instance_ref, 'dest', '',
+ self.compute.recover_live_migration)
+
+ instance_ref = db.instance_get(self.context, instance_ref['id'])
+ self.assertTrue(instance_ref['state_description'] == 'running')
+ self.assertTrue(instance_ref['state'] == power_state.RUNNING)
+ volume_ref = db.volume_get(self.context, volume_ref['id'])
+ self.assertTrue(volume_ref['status'] == 'in-use')
+
+ db.volume_destroy(self.context, volume_ref['id'])
+ db.instance_destroy(self.context, instance_ref['id'])
+
def tearDown(self):
self.manager.delete_project(self.project)
self.manager.delete_user(self.user)
diff --git a/nova/tests/test_volume.py b/nova/tests/test_volume.py
index f698c85b5..1b1d72092 100644
--- a/nova/tests/test_volume.py
+++ b/nova/tests/test_volume.py
@@ -20,6 +20,8 @@ Tests for Volume Code.
"""
+import cStringIO
+
from nova import context
from nova import exception
from nova import db
@@ -173,3 +175,196 @@ class VolumeTestCase(test.TestCase):
# each of them having a different FLAG for storage_node
# This will allow us to test cross-node interactions
pass
+
+
+class DriverTestCase(test.TestCase):
+ """Base Test class for Drivers."""
+ driver_name = "nova.volume.driver.FakeAOEDriver"
+
+ def setUp(self):
+ super(DriverTestCase, self).setUp()
+ self.flags(volume_driver=self.driver_name,
+ logging_default_format_string="%(message)s")
+ self.volume = utils.import_object(FLAGS.volume_manager)
+ self.context = context.get_admin_context()
+ self.output = ""
+
+ def _fake_execute(_command, *_args, **_kwargs):
+ """Fake _execute."""
+ return self.output, None
+ self.volume.driver._execute = _fake_execute
+ self.volume.driver._sync_execute = _fake_execute
+
+ log = logging.getLogger()
+ self.stream = cStringIO.StringIO()
+ log.addHandler(logging.StreamHandler(self.stream))
+
+ inst = {}
+ self.instance_id = db.instance_create(self.context, inst)['id']
+
+ def tearDown(self):
+ super(DriverTestCase, self).tearDown()
+
+ def _attach_volume(self):
+ """Attach volumes to an instance. This function also sets
+ a fake log message."""
+ return []
+
+ def _detach_volume(self, volume_id_list):
+ """Detach volumes from an instance."""
+ for volume_id in volume_id_list:
+ db.volume_detached(self.context, volume_id)
+ self.volume.delete_volume(self.context, volume_id)
+
+
+class AOETestCase(DriverTestCase):
+ """Test Case for AOEDriver"""
+ driver_name = "nova.volume.driver.AOEDriver"
+
+ def setUp(self):
+ super(AOETestCase, self).setUp()
+
+ def tearDown(self):
+ super(AOETestCase, self).tearDown()
+
+ def _attach_volume(self):
+ """Attach volumes to an instance. This function also sets
+ a fake log message."""
+ volume_id_list = []
+ for index in xrange(3):
+ vol = {}
+ vol['size'] = 0
+ volume_id = db.volume_create(self.context,
+ vol)['id']
+ self.volume.create_volume(self.context, volume_id)
+
+ # each volume has a different mountpoint
+ mountpoint = "/dev/sd" + chr((ord('b') + index))
+ db.volume_attached(self.context, volume_id, self.instance_id,
+ mountpoint)
+
+ (shelf_id, blade_id) = db.volume_get_shelf_and_blade(self.context,
+ volume_id)
+ self.output += "%s %s eth0 /dev/nova-volumes/vol-foo auto run\n" \
+ % (shelf_id, blade_id)
+
+ volume_id_list.append(volume_id)
+
+ return volume_id_list
+
+ def test_check_for_export_with_no_volume(self):
+ """No log message when no volume is attached to an instance."""
+ self.stream.truncate(0)
+ self.volume.check_for_export(self.context, self.instance_id)
+ self.assertEqual(self.stream.getvalue(), '')
+
+ def test_check_for_export_with_all_vblade_processes(self):
+ """No log message when all the vblade processes are running."""
+ volume_id_list = self._attach_volume()
+
+ self.stream.truncate(0)
+ self.volume.check_for_export(self.context, self.instance_id)
+ self.assertEqual(self.stream.getvalue(), '')
+
+ self._detach_volume(volume_id_list)
+
+ def test_check_for_export_with_vblade_process_missing(self):
+ """Output a warning message when some vblade processes aren't
+ running."""
+ volume_id_list = self._attach_volume()
+
+ # the first vblade process isn't running
+ self.output = self.output.replace("run", "down", 1)
+ (shelf_id, blade_id) = db.volume_get_shelf_and_blade(self.context,
+ volume_id_list[0])
+
+ msg_is_match = False
+ self.stream.truncate(0)
+ try:
+ self.volume.check_for_export(self.context, self.instance_id)
+ except exception.ProcessExecutionError, e:
+ volume_id = volume_id_list[0]
+ msg = _("Cannot confirm exported volume id:%(volume_id)s. "
+ "vblade process for e%(shelf_id)s.%(blade_id)s "
+ "isn't running.") % locals()
+
+ msg_is_match = (0 <= e.message.find(msg))
+
+ self.assertTrue(msg_is_match)
+ self._detach_volume(volume_id_list)
+
+
+class ISCSITestCase(DriverTestCase):
+ """Test Case for ISCSIDriver"""
+ driver_name = "nova.volume.driver.ISCSIDriver"
+
+ def setUp(self):
+ super(ISCSITestCase, self).setUp()
+
+ def tearDown(self):
+ super(ISCSITestCase, self).tearDown()
+
+ def _attach_volume(self):
+ """Attach volumes to an instance. This function also sets
+ a fake log message."""
+ volume_id_list = []
+ for index in xrange(3):
+ vol = {}
+ vol['size'] = 0
+ vol_ref = db.volume_create(self.context, vol)
+ self.volume.create_volume(self.context, vol_ref['id'])
+ vol_ref = db.volume_get(self.context, vol_ref['id'])
+
+ # each volume has a different mountpoint
+ mountpoint = "/dev/sd" + chr((ord('b') + index))
+ db.volume_attached(self.context, vol_ref['id'], self.instance_id,
+ mountpoint)
+ volume_id_list.append(vol_ref['id'])
+
+ return volume_id_list
+
+ def test_check_for_export_with_no_volume(self):
+ """No log message when no volume is attached to an instance."""
+ self.stream.truncate(0)
+ self.volume.check_for_export(self.context, self.instance_id)
+ self.assertEqual(self.stream.getvalue(), '')
+
+ def test_check_for_export_with_all_volume_exported(self):
+ """No log message when all the vblade processes are running."""
+ volume_id_list = self._attach_volume()
+
+ self.mox.StubOutWithMock(self.volume.driver, '_execute')
+ for i in volume_id_list:
+ tid = db.volume_get_iscsi_target_num(self.context, i)
+ self.volume.driver._execute("sudo ietadm --op show --tid=%(tid)d"
+ % locals())
+
+ self.stream.truncate(0)
+ self.mox.ReplayAll()
+ self.volume.check_for_export(self.context, self.instance_id)
+ self.assertEqual(self.stream.getvalue(), '')
+ self.mox.UnsetStubs()
+
+ self._detach_volume(volume_id_list)
+
+ def test_check_for_export_with_some_volume_missing(self):
+ """Output a warning message when some volumes are not recognied
+ by ietd."""
+ volume_id_list = self._attach_volume()
+
+ # the first vblade process isn't running
+ tid = db.volume_get_iscsi_target_num(self.context, volume_id_list[0])
+ self.mox.StubOutWithMock(self.volume.driver, '_execute')
+ self.volume.driver._execute("sudo ietadm --op show --tid=%(tid)d"
+ % locals()).AndRaise(exception.ProcessExecutionError())
+
+ self.mox.ReplayAll()
+ self.assertRaises(exception.ProcessExecutionError,
+ self.volume.check_for_export,
+ self.context,
+ self.instance_id)
+ msg = _("Cannot confirm exported volume id:%s.") % volume_id_list[0]
+ self.assertTrue(0 <= self.stream.getvalue().find(msg))
+ self.mox.UnsetStubs()
+
+ self._detach_volume(volume_id_list)