summaryrefslogtreecommitdiffstats
path: root/nova/tests
diff options
context:
space:
mode:
authorKei Masumoto <masumotok@nttdata.co.jp>2011-01-27 20:39:33 +0900
committerKei Masumoto <masumotok@nttdata.co.jp>2011-01-27 20:39:33 +0900
commit7c8096384507908a5e583f4554d0fc765ae5f2eb (patch)
tree69ea124e8cdc6db6cf79627d8fad64d18f0551fb /nova/tests
parent1dc38833c75d546b1c64d2bcd1f5d9a5bab8836d (diff)
adding testcode
Diffstat (limited to 'nova/tests')
-rw-r--r--nova/tests/test_compute.py305
-rw-r--r--nova/tests/test_scheduler.py722
-rw-r--r--nova/tests/test_service.py61
-rw-r--r--nova/tests/test_virt.py520
4 files changed, 1604 insertions, 4 deletions
diff --git a/nova/tests/test_compute.py b/nova/tests/test_compute.py
index 09f6ee94a..344c2d2b5 100644
--- a/nova/tests/test_compute.py
+++ b/nova/tests/test_compute.py
@@ -20,6 +20,7 @@ Tests For Compute
"""
import datetime
+import mox
from nova import compute
from nova import context
@@ -27,9 +28,12 @@ from nova import db
from nova import exception
from nova import flags
from nova import log as logging
+from nova import rpc
from nova import test
from nova import utils
from nova.auth import manager
+from nova.compute import manager as compute_manager
+from nova.compute import power_state
LOG = logging.getLogger('nova.tests.compute')
@@ -219,3 +223,304 @@ class ComputeTestCase(test.TestCase):
self.assertEqual(ret_val, None)
self.compute.terminate_instance(self.context, instance_id)
+
+ def test_update_service_exception(self):
+ """nova-compute updates Serivce table on DB like below.
+ nova.service.Serivce.start ->
+ nova.compute.ComputeManager.update_service.
+ This testcase confirms if no record found on Service
+ table, exception can be raised.
+ """
+ host = 'foo'
+ binary = 'nova-compute'
+ dbmock = self.mox.CreateMock(db)
+ dbmock.service_get_by_args(mox.IgnoreArg(),
+ mox.StrContains(host),
+ mox.StrContains(binary)).\
+ AndRaise(exception.NotFound())
+ self.compute.db = dbmock
+ self.mox.ReplayAll()
+ try:
+ self.compute.update_service('dummy', host, binary)
+ except exception.Invalid, e:
+ msg = 'Cannot insert compute manager specific info'
+ c1 = ( 0 <= e.message.find(msg))
+ self.assertTrue(c1)
+ self.mox.ResetAll()
+
+ def test_update_service_success(self):
+ """nova-compute updates Serivce table on DB like below.
+ nova.service.Serivce.start ->
+ nova.compute.ComputeManager.update_service.
+ In this method, vcpus/memory_mb/local_gb/hypervisor_type/
+ hypervisor_version/cpu_info should be changed.
+ Based on this specification, this testcase confirms
+ if this method finishes successfully,
+ meaning self.db.service_update is called with dictinary
+
+ {'vcpu':aaa, 'memory_mb':bbb, 'local_gb':ccc,
+ 'hypervisor_type':ddd, 'hypervisor_version':eee,
+ 'cpu_info':fff}
+
+ Since each value of above dict can be obtained through
+ driver(different depends on environment),
+ only dictionary keys are checked.
+ """
+
+ def dic_key_check(dic):
+ validkey = ['vcpus', 'memory_mb', 'local_gb',
+ 'hypervisor_type', 'hypervisor_version', 'cpu_info']
+ return (list(set(validkey)) == list(set(dic.keys())))
+
+ host = 'foo'
+ binary = 'nova-compute'
+ service_ref = {'id':1, 'binary':'nova-compute', 'topic':'compute'}
+ dbmock = self.mox.CreateMock(db)
+ dbmock.service_get_by_args(mox.IgnoreArg(),
+ mox.StrContains(host),
+ mox.StrContains(binary)).\
+ AndReturn(service_ref)
+ dbmock.service_update(mox.IgnoreArg(),
+ service_ref['id'],
+ mox.Func(dic_key_check))
+
+ self.compute.db = dbmock
+ self.mox.ReplayAll()
+ try:
+ self.compute.update_service('dummy', host, binary)
+ except exception.Invalid, e:
+ msg = 'Cannot insert compute manager specific info'
+ c1 = ( 0 <= e.message.find(msg))
+ self.assertTrue(c1)
+ self.mox.ResetAll()
+
+ def _setup_other_managers(self):
+ self.volume_manager = utils.import_object(FLAGS.volume_manager)
+ self.network_manager = utils.import_object(FLAGS.network_manager)
+ self.compute_driver = utils.import_object(FLAGS.compute_driver)
+
+ def test_pre_live_migration_instance_has_no_fixed_ip(self):
+ """
+ if instances that are intended to be migrated doesnt have fixed_ip
+ (not happens usually), pre_live_migration has to raise Exception.
+ """
+ instance_ref={'id':1, 'volumes':[{'id':1}, {'id':2}],
+ 'hostname':'i-000000001'}
+ c = context.get_admin_context()
+ i_id = instance_ref['id']
+
+ dbmock = self.mox.CreateMock(db)
+ dbmock.instance_get(c, i_id).AndReturn(instance_ref)
+ dbmock.instance_get_fixed_address(c, i_id).AndReturn(None)
+
+ self.compute.db = dbmock
+ self.mox.ReplayAll()
+ self.assertRaises(exception.NotFound,
+ self.compute.pre_live_migration,
+ c, instance_ref['id'])
+ self.mox.ResetAll()
+
+ def test_pre_live_migration_instance_has_volume(self):
+ """if any volumes are attached to the instances that are
+ intended to be migrated, setup_compute_volume must be
+ called because aoe module should be inserted at destination
+ host. This testcase checks on it.
+ """
+ instance_ref={'id':1, 'volumes':[{'id':1}, {'id':2}],
+ 'hostname':'i-000000001'}
+ c = context.get_admin_context()
+ i_id=instance_ref['id']
+
+ self._setup_other_managers()
+ dbmock = self.mox.CreateMock(db)
+ volmock = self.mox.CreateMock(self.volume_manager)
+ netmock = self.mox.CreateMock(self.network_manager)
+ drivermock = self.mox.CreateMock(self.compute_driver)
+
+ dbmock.instance_get(c, i_id).AndReturn(instance_ref)
+ dbmock.instance_get_fixed_address(c, i_id).AndReturn('dummy')
+ for i in range(len(instance_ref['volumes'])):
+ vid = instance_ref['volumes'][i]['id']
+ volmock.setup_compute_volume(c, vid).InAnyOrder('g1')
+ netmock.setup_compute_network(c, instance_ref['id'])
+ drivermock.ensure_filtering_rules_for_instance(instance_ref)
+
+ self.compute.db = dbmock
+ self.compute.volume_manager = volmock
+ self.compute.network_manager = netmock
+ self.compute.driver = drivermock
+
+ self.mox.ReplayAll()
+ ret = self.compute.pre_live_migration(c, i_id)
+ self.assertEqual(ret, None)
+ self.mox.ResetAll()
+
+ def test_pre_live_migration_instance_has_no_volume(self):
+ """if any volumes are not attached to the instances that are
+ intended to be migrated, log message should be appears
+ because administrator can proove instance conditions before
+ live_migration if any trouble occurs.
+ """
+ instance_ref={'id':1, 'volumes':[], 'hostname':'i-20000001'}
+ c = context.get_admin_context()
+ i_id = instance_ref['id']
+
+ self._setup_other_managers()
+ dbmock = self.mox.CreateMock(db)
+ netmock = self.mox.CreateMock(self.network_manager)
+ drivermock = self.mox.CreateMock(self.compute_driver)
+
+ dbmock.instance_get(c, i_id).AndReturn(instance_ref)
+ dbmock.instance_get_fixed_address(c, i_id).AndReturn('dummy')
+ self.mox.StubOutWithMock(compute_manager.LOG, 'info')
+ compute_manager.LOG.info(_("%s has no volume."), instance_ref['hostname'])
+ netmock.setup_compute_network(c, i_id)
+ drivermock.ensure_filtering_rules_for_instance(instance_ref)
+
+ self.compute.db = dbmock
+ self.compute.network_manager = netmock
+ self.compute.driver = drivermock
+
+ self.mox.ReplayAll()
+ ret = self.compute.pre_live_migration(c, i_id)
+ self.assertEqual(ret, None)
+ self.mox.ResetAll()
+
+ def test_live_migration_instance_has_volume(self):
+ """Any volumes are mounted by instances to be migrated are found,
+ vblade health must be checked before starting live-migration.
+ And that is checked by check_for_export().
+ This testcase confirms check_for_export() is called.
+ """
+ instance_ref={'id':1, 'volumes':[{'id':1}, {'id':2}], 'hostname':'i-00000001'}
+ c = context.get_admin_context()
+ dest='dummydest'
+ i_id = instance_ref['id']
+
+ self._setup_other_managers()
+ dbmock = self.mox.CreateMock(db)
+ drivermock = self.mox.CreateMock(self.compute_driver)
+
+ dbmock.instance_get(c, instance_ref['id']).AndReturn(instance_ref)
+ self.mox.StubOutWithMock(rpc, 'call')
+ rpc.call(c, FLAGS.volume_topic,
+ {"method": "check_for_export",
+ "args": {'instance_id': i_id}}).InAnyOrder('g1')
+ rpc.call(c, db.queue_get_for(c, FLAGS.compute_topic, dest),
+ {"method": "pre_live_migration",
+ "args": {'instance_id': i_id}}).InAnyOrder('g1')
+
+ self.compute.db = dbmock
+ self.compute.driver = drivermock
+ self.mox.ReplayAll()
+ ret = self.compute.live_migration(c, i_id, dest)
+ self.assertEqual(ret, None)
+ self.mox.ResetAll()
+
+ def test_live_migration_instance_has_volume_and_exception(self):
+ """In addition to test_live_migration_instance_has_volume testcase,
+ this testcase confirms if any exception raises from check_for_export().
+ Then, valid seaquence of this method should recovering instance/volumes
+ status(ex. instance['state_description'] is changed from 'migrating'
+ -> 'running', was changed by scheduler)
+ """
+ instance_ref={'id':1, 'volumes':[{'id':1}, {'id':2}],
+ 'hostname':'i-000000001'}
+ dest='dummydest'
+ c = context.get_admin_context()
+ i_id = instance_ref['id']
+
+ self._setup_other_managers()
+ dbmock = self.mox.CreateMock(db)
+ drivermock = self.mox.CreateMock(self.compute_driver)
+
+ dbmock.instance_get(c, instance_ref['id']).AndReturn(instance_ref)
+ self.mox.StubOutWithMock(rpc, 'call')
+ rpc.call(c, FLAGS.volume_topic,
+ {"method": "check_for_export",
+ "args": {'instance_id': i_id}}).InAnyOrder('g1')
+ compute_topic = db.queue_get_for(c, FLAGS.compute_topic, dest)
+ dbmock.queue_get_for(c, FLAGS.compute_topic, dest).AndReturn(compute_topic)
+ rpc.call(c, db.queue_get_for(c, FLAGS.compute_topic, dest),
+ {"method": "pre_live_migration",
+ "args": {'instance_id': i_id}}).\
+ InAnyOrder('g1').AndRaise(rpc.RemoteError('du', 'mm', 'y'))
+ self.mox.StubOutWithMock(compute_manager.LOG, 'error')
+ compute_manager.LOG.error('Pre live migration for %s failed at %s',
+ instance_ref['hostname'], dest)
+ dbmock.instance_set_state(c, i_id, power_state.RUNNING, 'running')
+ for i in range(len(instance_ref['volumes'])):
+ vid = instance_ref['volumes'][i]['id']
+ dbmock.volume_update(c, vid, {'status': 'in-use'})
+
+ self.compute.db = dbmock
+ self.compute.driver = drivermock
+ self.mox.ReplayAll()
+ self.assertRaises(rpc.RemoteError,
+ self.compute.live_migration,
+ c, i_id, dest)
+ self.mox.ResetAll()
+
+ def test_live_migration_instance_has_no_volume_and_exception(self):
+ """Simpler than test_live_migration_instance_has_volume_and_exception"""
+
+ instance_ref={'id':1, 'volumes':[], 'hostname':'i-000000001'}
+ dest='dummydest'
+ c = context.get_admin_context()
+ i_id = instance_ref['id']
+
+ self._setup_other_managers()
+ dbmock = self.mox.CreateMock(db)
+ drivermock = self.mox.CreateMock(self.compute_driver)
+
+ dbmock.instance_get(c, instance_ref['id']).AndReturn(instance_ref)
+ self.mox.StubOutWithMock(rpc, 'call')
+ compute_topic = db.queue_get_for(c, FLAGS.compute_topic, dest)
+ dbmock.queue_get_for(c, FLAGS.compute_topic, dest).AndReturn(compute_topic)
+ rpc.call(c, compute_topic,
+ {"method": "pre_live_migration",
+ "args": {'instance_id': i_id}}).\
+ AndRaise(rpc.RemoteError('du', 'mm', 'y'))
+ self.mox.StubOutWithMock(compute_manager.LOG, 'error')
+ compute_manager.LOG.error('Pre live migration for %s failed at %s',
+ instance_ref['hostname'], dest)
+ dbmock.instance_set_state(c, i_id, power_state.RUNNING, 'running')
+
+ self.compute.db = dbmock
+ self.compute.driver = drivermock
+ self.mox.ReplayAll()
+ self.assertRaises(rpc.RemoteError,
+ self.compute.live_migration,
+ c, i_id, dest)
+ self.mox.ResetAll()
+
+ def test_live_migration_instance_has_volume(self):
+ """Simpler version than test_live_migration_instance_has_volume."""
+ instance_ref={'id':1, 'volumes':[{'id':1}, {'id':2}],
+ 'hostname':'i-000000001'}
+ c = context.get_admin_context()
+ dest='dummydest'
+ i_id = instance_ref['id']
+
+ self._setup_other_managers()
+ dbmock = self.mox.CreateMock(db)
+ drivermock = self.mox.CreateMock(self.compute_driver)
+
+ dbmock.instance_get(c, i_id).AndReturn(instance_ref)
+ self.mox.StubOutWithMock(rpc, 'call')
+ rpc.call(c, FLAGS.volume_topic,
+ {"method": "check_for_export",
+ "args": {'instance_id': i_id}}).InAnyOrder('g1')
+ compute_topic = db.queue_get_for(c, FLAGS.compute_topic, dest)
+ dbmock.queue_get_for(c, FLAGS.compute_topic, dest).AndReturn(compute_topic)
+ rpc.call(c, compute_topic,
+ {"method": "pre_live_migration",
+ "args": {'instance_id': i_id}}).InAnyOrder('g1')
+ drivermock.live_migration(c, instance_ref, dest)
+
+ self.compute.db = dbmock
+ self.compute.driver = drivermock
+ self.mox.ReplayAll()
+ ret = self.compute.live_migration(c, i_id, dest)
+ self.assertEqual(ret, None)
+ self.mox.ResetAll()
diff --git a/nova/tests/test_scheduler.py b/nova/tests/test_scheduler.py
index 9d458244b..c62bca9b1 100644
--- a/nova/tests/test_scheduler.py
+++ b/nova/tests/test_scheduler.py
@@ -20,10 +20,12 @@ Tests For Scheduler
"""
import datetime
+import mox
from mox import IgnoreArg
from nova import context
from nova import db
+from nova import exception
from nova import flags
from nova import service
from nova import test
@@ -32,6 +34,8 @@ from nova import utils
from nova.auth import manager as auth_manager
from nova.scheduler import manager
from nova.scheduler import driver
+from nova.compute import power_state
+from nova.db.sqlalchemy import models
FLAGS = flags.FLAGS
@@ -75,7 +79,102 @@ class SchedulerTestCase(test.TestCase):
'args': {'num': 7}})
self.mox.ReplayAll()
scheduler.named_method(ctxt, 'topic', num=7)
+
+ def test_show_host_resource_host_not_exit(self):
+ """
+ A testcase of driver.has_enough_resource
+ given host does not exists.
+ """
+ scheduler = manager.SchedulerManager()
+ dest = 'dummydest'
+ ctxt = context.get_admin_context()
+
+ self.mox.StubOutWithMock(manager, 'db', use_mock_anything=True)
+ manager.db.service_get_all_compute_sorted(mox.IgnoreArg()).\
+ AndReturn([])
+
+ self.mox.ReplayAll()
+ result = scheduler.show_host_resource(ctxt, dest)
+ # ret should be dict
+ keys = ['ret', 'msg']
+ c1 = list(set(result.keys())) == list(set(keys))
+ c2 = not result['ret']
+ c3 = result['msg'].find('No such Host or not compute node') <= 0
+ self.assertTrue( c1 and c2 and c3)
+ self.mox.UnsetStubs()
+
+ def test_show_host_resource_no_project(self):
+ """
+ A testcase of driver.show_host_resource
+ no instance stays on the given host
+ """
+ scheduler = manager.SchedulerManager()
+ dest = 'dummydest'
+ ctxt = context.get_admin_context()
+ r0 = {'vcpus':16, 'memory_mb':32, 'local_gb':100}
+ service_ref = {'id':1, 'host':dest}
+ service_ref.update(r0)
+
+ self.mox.StubOutWithMock(manager, 'db', use_mock_anything=True)
+ manager.db.service_get_all_compute_sorted(mox.IgnoreArg()).\
+ AndReturn([(service_ref, 0)])
+ manager.db.instance_get_all_by_host(mox.IgnoreArg(), dest).\
+ AndReturn([])
+
+ self.mox.ReplayAll()
+ result = scheduler.show_host_resource(ctxt, dest)
+ # ret should be dict
+ keys = ['ret', 'phy_resource', 'usage']
+ c1 = list(set(result.keys())) == list(set(keys))
+ c2 = result['ret']
+ c3 = result['phy_resource'] == r0
+ c4 = result['usage'] == {}
+ self.assertTrue( c1 and c2 and c3 and c4)
+ self.mox.UnsetStubs()
+
+ def test_show_host_resource_works_correctly(self):
+ """
+ A testcase of driver.show_host_resource
+ to make sure everything finished with no error.
+ """
+ scheduler = manager.SchedulerManager()
+ dest = 'dummydest'
+ ctxt = context.get_admin_context()
+ r0 = {'vcpus':16, 'memory_mb':32, 'local_gb':100}
+ r1 = {'vcpus':10, 'memory_mb':4, 'local_gb':20}
+ r2 = {'vcpus':10, 'memory_mb':20, 'local_gb':30}
+ service_ref = {'id':1, 'host':dest}
+ service_ref.update(r0)
+ instance_ref2 = {'id':2, 'project_id':'p-01', 'host':'dummy'}
+ instance_ref2.update(r1)
+ instance_ref3 = {'id':3, 'project_id':'p-02', 'host':'dummy'}
+ instance_ref3.update(r1)
+ self.mox.StubOutWithMock(manager, 'db', use_mock_anything=True)
+ manager.db.service_get_all_compute_sorted(mox.IgnoreArg()).\
+ AndReturn([(service_ref, 0)])
+ manager.db.instance_get_all_by_host(mox.IgnoreArg(), dest).\
+ AndReturn([instance_ref2, instance_ref3])
+ for p in ['p-01', 'p-02']:
+ manager.db.instance_get_vcpu_sum_by_host_and_project(
+ ctxt, dest, p).AndReturn(r2['vcpus'])
+ manager.db.instance_get_memory_sum_by_host_and_project(
+ ctxt, dest, p).AndReturn(r2['memory_mb'])
+ manager.db.instance_get_disk_sum_by_host_and_project(
+ ctxt, dest, p).AndReturn(r2['local_gb'])
+
+ self.mox.ReplayAll()
+ result = scheduler.show_host_resource(ctxt, dest)
+ # ret should be dict
+ keys = ['ret', 'phy_resource', 'usage']
+ c1 = list(set(result.keys())) == list(set(keys))
+ c2 = result['ret']
+ c3 = result['phy_resource'] == r0
+ c4 = result['usage'].keys() == ['p-01', 'p-02']
+ c5 = result['usage']['p-01'] == r2
+ c6 = result['usage']['p-02'] == r2
+ self.assertTrue( c1 and c2 and c3 and c4 and c5 and c6)
+ self.mox.UnsetStubs()
class ZoneSchedulerTestCase(test.TestCase):
"""Test case for zone scheduler"""
@@ -384,3 +483,626 @@ class SimpleDriverTestCase(test.TestCase):
volume2.delete_volume(self.context, volume_id)
volume1.kill()
volume2.kill()
+
+ def test_scheduler_live_migraiton_with_volume(self):
+ """
+ driver.scheduler_live_migration finishes successfully
+ (volumes are attached to instances)
+ This testcase make sure schedule_live_migration
+ changes instance state from 'running' -> 'migrating'
+ """
+ driver_i = self.scheduler.driver
+ ctxt = context.get_admin_context()
+ topic = FLAGS.compute_topic
+ i_ref = {'id':1, 'hostname':'i-00000001', 'host':'dummy',
+ 'volumes':[{'id':1}, {'id':2}]}
+ dest = 'dummydest'
+
+ self.mox.StubOutWithMock(driver, 'db', use_mock_anything=True)
+ # must be IgnoreArg() because scheduler changes ctxt's memory address
+ driver.db.instance_get(mox.IgnoreArg(), i_ref['id']).AndReturn(i_ref)
+
+ self.mox.StubOutWithMock(driver_i, '_live_migration_src_check')
+ driver_i._live_migration_src_check(mox.IgnoreArg(), i_ref)
+ self.mox.StubOutWithMock(driver_i, '_live_migration_dest_check')
+ driver_i._live_migration_dest_check(mox.IgnoreArg(), i_ref, dest)
+ self.mox.StubOutWithMock(driver_i, '_live_migration_common_check')
+ driver_i._live_migration_common_check(mox.IgnoreArg(), i_ref, dest)
+ driver.db.instance_set_state(mox.IgnoreArg(), i_ref['id'],
+ power_state.PAUSED, 'migrating')
+ for v in i_ref['volumes']:
+ driver.db.volume_update(mox.IgnoreArg(), v['id'],
+ {'status': 'migrating'})
+ self.mox.StubOutWithMock(rpc, 'cast', use_mock_anything=True)
+ kwargs={'instance_id':i_ref['id'], 'dest':dest}
+ rpc.cast(ctxt, db.queue_get_for(ctxt, topic, i_ref['host']),
+ {"method": 'live_migration',
+ "args": kwargs})
+
+ self.mox.ReplayAll()
+ self.scheduler.live_migration(ctxt, topic,
+ instance_id=i_ref['id'], dest=dest)
+ self.mox.UnsetStubs()
+
+ def test_scheduler_live_migraiton_no_volume(self):
+ """
+ driver.scheduler_live_migration finishes successfully
+ (volumes are attached to instances)
+ This testcase make sure schedule_live_migration
+ changes instance state from 'running' -> 'migrating'
+ """
+ driver_i = self.scheduler.driver
+ ctxt = context.get_admin_context()
+ topic = FLAGS.compute_topic
+ i_ref = {'id':1, 'hostname':'i-01', 'host':'dummy', 'volumes':[]}
+ dest = 'dummydest'
+
+ self.mox.StubOutWithMock(driver, 'db', use_mock_anything=True)
+ # must be IgnoreArg() because scheduler changes ctxt's memory address
+ driver.db.instance_get(mox.IgnoreArg(), i_ref['id']).AndReturn(i_ref)
+ self.mox.StubOutWithMock(driver_i, '_live_migration_src_check')
+ driver_i._live_migration_src_check(mox.IgnoreArg(), i_ref)
+ self.mox.StubOutWithMock(driver_i, '_live_migration_dest_check')
+ driver_i._live_migration_dest_check(mox.IgnoreArg(), i_ref, dest)
+ self.mox.StubOutWithMock(driver_i, '_live_migration_common_check')
+ driver_i._live_migration_common_check(mox.IgnoreArg(), i_ref, dest)
+ driver.db.instance_set_state(mox.IgnoreArg(), i_ref['id'],
+ power_state.PAUSED, 'migrating')
+ self.mox.StubOutWithMock(rpc, 'cast', use_mock_anything=True)
+ kwargs={'instance_id':i_ref['id'], 'dest':dest}
+ rpc.cast(ctxt, db.queue_get_for(ctxt, topic, i_ref['host']),
+ {"method": 'live_migration',
+ "args": kwargs})
+
+ self.mox.ReplayAll()
+ self.scheduler.live_migration(ctxt, topic,
+ instance_id=i_ref['id'], dest=dest)
+ self.mox.UnsetStubs()
+
+ def test_live_migraiton_src_check_instance_not_running(self):
+ """
+ A testcase of driver._live_migration_src_check.
+ The instance given by instance_id is not running.
+ """
+ ctxt = context.get_admin_context()
+ topic = FLAGS.compute_topic
+ dest = 'dummydest'
+ i_ref = {'id':1, 'hostname':'i-01', 'host':'dummy',
+ 'volumes':[], 'state_description':'migrating',
+ 'state':power_state.RUNNING}
+
+ self.mox.ReplayAll()
+ try:
+ self.scheduler.driver._live_migration_src_check(ctxt, i_ref)
+ except exception.Invalid, e:
+ self.assertTrue(e.message.find('is not running') > 0)
+ self.mox.UnsetStubs()
+
+ def test_live_migraiton_src_check_volume_node_not_alive(self):
+ """
+ A testcase of driver._live_migration_src_check.
+ Volume node is not alive if any volumes are attached to
+ the given instance.
+ """
+ dest = 'dummydest'
+ ctxt = context.get_admin_context()
+ topic = FLAGS.compute_topic
+ i_ref = {'id':1, 'hostname':'i-01', 'host':'dummy',
+ 'volumes':[{'id':1}, {'id':2}],
+ 'state_description':'running', 'state':power_state.RUNNING}
+
+ self.mox.StubOutWithMock(driver, 'db', use_mock_anything=True)
+ driver.db.service_get_all_by_topic(mox.IgnoreArg(), 'volume').\
+ AndReturn([])
+
+ self.mox.ReplayAll()
+ try:
+ self.scheduler.driver._live_migration_src_check(ctxt, i_ref)
+ except exception.Invalid, e:
+ self.assertTrue(e.message.find('volume node is not alive') >= 0)
+ self.mox.UnsetStubs()
+
+ def test_live_migraiton_src_check_volume_node_not_alive(self):
+ """
+ A testcase of driver._live_migration_src_check.
+ The testcase make sure src-compute node is alive.
+ """
+ dest = 'dummydest'
+ ctxt = context.get_admin_context()
+ topic = FLAGS.compute_topic
+ i_ref = {'id':1, 'hostname':'i-01', 'host':'dummy', 'volumes':[],
+ 'state_description':'running', 'state':power_state.RUNNING}
+
+ self.mox.StubOutWithMock(driver, 'db', use_mock_anything=True)
+ driver.db.service_get_all_by_topic(mox.IgnoreArg(), 'compute').\
+ AndReturn([])
+
+ self.mox.ReplayAll()
+ try:
+ self.scheduler.driver._live_migration_src_check(ctxt, i_ref)
+ except exception.Invalid, e:
+ self.assertTrue(e.message.find('is not alive') >= 0)
+ self.mox.UnsetStubs()
+
+ def test_live_migraiton_src_check_works_correctly(self):
+ """
+ A testcase of driver._live_migration_src_check.
+ The testcase make sure everything finished with no error.
+ """
+ driver_i = self.scheduler.driver
+ dest = 'dummydest'
+ ctxt = context.get_admin_context()
+ topic = FLAGS.compute_topic
+ i_ref = {'id':1, 'hostname':'i-01', 'host':'dummy', 'volumes':[],
+ 'state_description':'running', 'state':power_state.RUNNING}
+ service_ref = models.Service()
+ service_ref.__setitem__('id', 1)
+ service_ref.__setitem__('host', i_ref['host'])
+
+ self.mox.StubOutWithMock(driver, 'db', use_mock_anything=True)
+ driver.db.service_get_all_by_topic(mox.IgnoreArg(), 'compute').\
+ AndReturn([service_ref])
+ self.mox.StubOutWithMock(driver_i, 'service_is_up')
+ driver_i.service_is_up(service_ref).AndReturn(True)
+
+ self.mox.ReplayAll()
+ ret = driver_i._live_migration_src_check(ctxt, i_ref)
+ self.assertTrue(ret == None)
+ self.mox.UnsetStubs()
+
+ def test_live_migraiton_dest_check_service_not_exists(self):
+ """
+ A testcase of driver._live_migration_dst_check.
+ Destination host does not exist.
+ """
+ driver_i = self.scheduler.driver
+ dest = 'dummydest'
+ ctxt = context.get_admin_context()
+ topic = FLAGS.compute_topic
+ i_ref = {'id':1, 'hostname':'i-01', 'host':'dummy'}
+ service_ref = models.Service()
+ service_ref.__setitem__('id', 1)
+ service_ref.__setitem__('host', i_ref['host'])
+
+ self.mox.StubOutWithMock(driver, 'db', use_mock_anything=True)
+ driver.db.service_get_all_by_host(mox.IgnoreArg(), dest).\
+ AndReturn([])
+
+ self.mox.ReplayAll()
+ try:
+ driver_i._live_migration_dest_check(ctxt, i_ref, dest)
+ except exception.Invalid, e:
+ self.assertTrue(e.message.find('does not exists') >= 0)
+ self.mox.UnsetStubs()
+
+ def test_live_migraiton_dest_check_service_isnot_compute(self):
+ """
+ A testcase of driver._live_migration_dst_check.
+ Destination host does not provide compute.
+ """
+ driver_i = self.scheduler.driver
+ dest = 'dummydest'
+ ctxt = context.get_admin_context()
+ topic = FLAGS.compute_topic
+ i_ref = {'id':1, 'hostname':'i-01', 'host':'dummy'}
+ service_ref = models.Service()
+ service_ref.__setitem__('id', 1)
+ service_ref.__setitem__('host', i_ref['host'])
+ service_ref.__setitem__('topic', 'api')
+
+ self.mox.StubOutWithMock(driver, 'db', use_mock_anything=True)
+ driver.db.service_get_all_by_host(mox.IgnoreArg(), dest).\
+ AndReturn([service_ref])
+
+ self.mox.ReplayAll()
+ try:
+ driver_i._live_migration_dest_check(ctxt, i_ref, dest)
+ except exception.Invalid, e:
+ self.assertTrue(e.message.find('must be compute node') >= 0)
+ self.mox.UnsetStubs()
+
+ def test_live_migraiton_dest_check_service_not_alive(self):
+ """
+ A testcase of driver._live_migration_dst_check.
+ Destination host compute service is not alive.
+ """
+ dest = 'dummydest'
+ ctxt = context.get_admin_context()
+ topic = FLAGS.compute_topic
+ i_ref = {'id':1, 'hostname':'i-01', 'host':'dummy'}
+ service_ref = models.Service()
+ service_ref.__setitem__('id', 1)
+ service_ref.__setitem__('host', i_ref['host'])
+ service_ref.__setitem__('topic', 'compute')
+
+ self.mox.StubOutWithMock(driver, 'db', use_mock_anything=True)
+ driver.db.service_get_all_by_host(mox.IgnoreArg(), dest).\
+ AndReturn([service_ref])
+ self.mox.StubOutWithMock(self.scheduler.driver, 'service_is_up')
+ self.scheduler.driver.service_is_up(service_ref).AndReturn(False)
+
+ self.mox.ReplayAll()
+ try:
+ self.scheduler.driver._live_migration_dest_check(ctxt, i_ref, dest)
+ except exception.Invalid, e:
+ self.assertTrue(e.message.find('is not alive') >= 0)
+ self.mox.UnsetStubs()
+
+ def test_live_migraiton_dest_check_service_same_host(self):
+ """
+ A testcase of driver._live_migration_dst_check.
+ Destination host is same as src host.
+ """
+ dest = 'dummydest'
+ ctxt = context.get_admin_context()
+ topic = FLAGS.compute_topic
+ i_ref = {'id':1, 'hostname':'i-01', 'host':'dummydest'}
+ service_ref = models.Service()
+ service_ref.__setitem__('id', 1)
+ service_ref.__setitem__('host', i_ref['host'])
+ service_ref.__setitem__('topic', 'compute')
+
+ self.mox.StubOutWithMock(driver, 'db', use_mock_anything=True)
+ driver.db.service_get_all_by_host(mox.IgnoreArg(), dest).\
+ AndReturn([service_ref])
+ self.mox.StubOutWithMock(self.scheduler.driver, 'service_is_up')
+ self.scheduler.driver.service_is_up(service_ref).AndReturn(True)
+
+ self.mox.ReplayAll()
+ try:
+ self.scheduler.driver._live_migration_dest_check(ctxt, i_ref, dest)
+ except exception.Invalid, e:
+ self.assertTrue(e.message.find('is running now. choose other host') >= 0)
+ self.mox.UnsetStubs()
+
+ def test_live_migraiton_dest_check_service_works_correctly(self):
+ """
+ A testcase of driver._live_migration_dst_check.
+ The testcase make sure everything finished with no error.
+ """
+ dest = 'dummydest'
+ ctxt = context.get_admin_context()
+ topic = FLAGS.compute_topic
+ i_ref = {'id':1, 'hostname':'i-01', 'host':'dummydest'}
+ service_ref = models.Service()
+ service_ref.__setitem__('id', 1)
+ service_ref.__setitem__('host', i_ref['host'])
+ service_ref.__setitem__('topic', 'compute')
+
+ self.mox.StubOutWithMock(driver, 'db', use_mock_anything=True)
+ driver.db.service_get_all_by_host(mox.IgnoreArg(), dest).\
+ AndReturn([service_ref])
+ self.mox.StubOutWithMock(self.scheduler.driver, 'service_is_up')
+ self.scheduler.driver.service_is_up(service_ref).AndReturn(True)
+ self.mox.StubOutWithMock(self.scheduler.driver, 'has_enough_resource')
+ self.scheduler.driver.has_enough_resource(mox.IgnoreArg(), i_ref, dest)
+
+ self.mox.ReplayAll()
+ try:
+ self.scheduler.driver._live_migration_dest_check(ctxt, i_ref, dest)
+ except exception.Invalid, e:
+ self.assertTrue(e.message.find('is running now. choose other host') >= 0)
+ self.mox.UnsetStubs()
+
+ def test_live_migraiton_common_check_service_dest_not_exists(self):
+ """
+ A testcase of driver._live_migration_common_check.
+ Destination host does not exist.
+ """
+ dest = 'dummydest'
+ ctxt = context.get_admin_context()
+ topic = FLAGS.compute_topic
+ i_ref = {'id':1, 'hostname':'i-01', 'host':'dummy'}
+
+ self.mox.StubOutWithMock(driver, 'db', use_mock_anything=True)
+ driver.db.service_get_all_by_host(mox.IgnoreArg(), dest).\
+ AndReturn([])
+
+ self.mox.ReplayAll()
+ try:
+ self.scheduler.driver._live_migration_common_check(ctxt,
+ i_ref,
+ dest)
+ except exception.Invalid, e:
+ self.assertTrue(e.message.find('does not exists') >= 0)
+ self.mox.UnsetStubs()
+
+ def test_live_migraiton_common_check_service_orig_not_exists(self):
+ """
+ A testcase of driver._live_migration_common_check.
+ Original host(an instance launched on) does not exist.
+ """
+ dest = 'dummydest'
+ ctxt = context.get_admin_context()
+ topic = FLAGS.compute_topic
+ i_ref = {'id':1, 'hostname':'i-01', 'host':'dummy', 'launched_on':'h1'}
+ service_ref = models.Service()
+ service_ref.__setitem__('id', 1)
+ service_ref.__setitem__('topic', 'compute')
+ service_ref.__setitem__('host', i_ref['host'])
+
+ self.mox.StubOutWithMock(driver, 'db', use_mock_anything=True)
+ driver.db.service_get_all_by_host(mox.IgnoreArg(), dest).\
+ AndReturn([service_ref])
+ driver.db.service_get_all_by_host(mox.IgnoreArg(),
+ i_ref['launched_on']).\
+ AndReturn([])
+
+ self.mox.ReplayAll()
+ try:
+ self.scheduler.driver._live_migration_common_check(ctxt,
+ i_ref,
+ dest)
+ except exception.Invalid, e:
+ msg = 'where instance was launched at) does not exists'
+ self.assertTrue(e.message.find(msg) >= 0)
+ self.mox.UnsetStubs()
+
+ def test_live_migraiton_common_check_service_different_hypervisor(self):
+ """
+ A testcase of driver._live_migration_common_check.
+ Original host and dest host has different hypervisor type.
+ """
+ dest = 'dummydest'
+ ctxt = context.get_admin_context()
+ topic = FLAGS.compute_topic
+ i_ref = {'id':1, 'hostname':'i-01',
+ 'host':'dummy', 'launched_on':'h1'}
+ service_ref = models.Service()
+ service_ref.__setitem__('id', 1)
+ service_ref.__setitem__('topic', 'compute')
+ service_ref.__setitem__('hypervisor_type', 'kvm')
+ service_ref2 = models.Service()
+ service_ref2.__setitem__('id', 2)
+ service_ref2.__setitem__('hypervisor_type', 'xen')
+
+ self.mox.StubOutWithMock(driver, 'db', use_mock_anything=True)
+ driver.db.service_get_all_by_host(mox.IgnoreArg(), dest).\
+ AndReturn([service_ref])
+ driver.db.service_get_all_by_host(mox.IgnoreArg(),
+ i_ref['launched_on']).\
+ AndReturn([service_ref2])
+
+ self.mox.ReplayAll()
+ try:
+ self.scheduler.driver._live_migration_common_check(ctxt,
+ i_ref,
+ dest)
+ except exception.Invalid, e:
+ msg = 'Different hypervisor type'
+ self.assertTrue(e.message.find(msg) >= 0)
+ self.mox.UnsetStubs()
+
+ def test_live_migraiton_common_check_service_different_version(self):
+ """
+ A testcase of driver._live_migration_common_check.
+ Original host and dest host has different hypervisor version.
+ """
+ dest = 'dummydest'
+ ctxt = context.get_admin_context()
+ topic = FLAGS.compute_topic
+ i_ref = {'id':1, 'hostname':'i-01',
+ 'host':'dummy', 'launched_on':'h1'}
+ service_ref = models.Service()
+ service_ref.__setitem__('id', 1)
+ service_ref.__setitem__('topic', 'compute')
+ service_ref.__setitem__('hypervisor_version', 12000)
+ service_ref2 = models.Service()
+ service_ref2.__setitem__('id', 2)
+ service_ref2.__setitem__('hypervisor_version', 12001)
+
+ self.mox.StubOutWithMock(driver, 'db', use_mock_anything=True)
+ driver.db.service_get_all_by_host(mox.IgnoreArg(), dest).\
+ AndReturn([service_ref])
+ driver.db.service_get_all_by_host(mox.IgnoreArg(),
+ i_ref['launched_on']).\
+ AndReturn([service_ref2])
+
+ self.mox.ReplayAll()
+ try:
+ self.scheduler.driver._live_migration_common_check(ctxt,
+ i_ref,
+ dest)
+ except exception.Invalid, e:
+ msg = 'Older hypervisor version'
+ self.assertTrue(e.message.find(msg) >= 0)
+ self.mox.UnsetStubs()
+
+ def test_live_migraiton_common_check_service_checking_cpuinfo_fail(self):
+ """
+ A testcase of driver._live_migration_common_check.
+ Original host and dest host has different hypervisor version.
+ """
+ dest = 'dummydest'
+ ctxt = context.get_admin_context()
+ topic = FLAGS.compute_topic
+ i_ref = {'id':1, 'hostname':'i-01',
+ 'host':'dummy', 'launched_on':'h1'}
+ service_ref = models.Service()
+ service_ref.__setitem__('id', 1)
+ service_ref.__setitem__('topic', 'compute')
+ service_ref.__setitem__('hypervisor_version', 12000)
+ service_ref2 = models.Service()
+ service_ref2.__setitem__('id', 2)
+ service_ref2.__setitem__('hypervisor_version', 12000)
+ service_ref2.__setitem__('cpuinfo', '<cpu>info</cpu>')
+
+ self.mox.StubOutWithMock(driver, 'db', use_mock_anything=True)
+ driver.db.service_get_all_by_host(mox.IgnoreArg(), dest).\
+ AndReturn([service_ref])
+ driver.db.service_get_all_by_host(mox.IgnoreArg(),
+ i_ref['launched_on']).\
+ AndReturn([service_ref2])
+ driver.db.queue_get_for(mox.IgnoreArg(), FLAGS.compute_topic, dest)
+ self.mox.StubOutWithMock(driver, 'rpc', use_mock_anything=True)
+ driver.rpc.call(mox.IgnoreArg(), mox.IgnoreArg(),
+ {"method": 'compare_cpu',
+ "args": {'cpu_info': service_ref2['cpu_info']}}).\
+ AndRaise(rpc.RemoteError('doesnt have compatibility to', '', ''))
+
+ self.mox.ReplayAll()
+ try:
+ self.scheduler.driver._live_migration_common_check(ctxt,
+ i_ref,
+ dest)
+ except rpc.RemoteError, e:
+ msg = 'doesnt have compatibility to'
+ self.assertTrue(e.message.find(msg) >= 0)
+ self.mox.UnsetStubs()
+
+ def test_live_migraiton_common_check_service_works_correctly(self):
+ """
+ A testcase of driver._live_migration_common_check.
+ The testcase make sure everything finished with no error.
+ """
+ dest = 'dummydest'
+ ctxt = context.get_admin_context()
+ topic = FLAGS.compute_topic
+ i_ref = {'id':1, 'hostname':'i-01',
+ 'host':'dummy', 'launched_on':'h1'}
+ service_ref = models.Service()
+ service_ref.__setitem__('id', 1)
+ service_ref.__setitem__('topic', 'compute')
+ service_ref.__setitem__('hypervisor_version', 12000)
+ service_ref2 = models.Service()
+ service_ref2.__setitem__('id', 2)
+ service_ref2.__setitem__('hypervisor_version', 12000)
+ service_ref2.__setitem__('cpuinfo', '<cpu>info</cpu>')
+
+ self.mox.StubOutWithMock(driver, 'db', use_mock_anything=True)
+ driver.db.service_get_all_by_host(mox.IgnoreArg(), dest).\
+ AndReturn([service_ref])
+ driver.db.service_get_all_by_host(mox.IgnoreArg(),
+ i_ref['launched_on']).\
+ AndReturn([service_ref2])
+ driver.db.queue_get_for(mox.IgnoreArg(), FLAGS.compute_topic, dest)
+ self.mox.StubOutWithMock(driver, 'rpc', use_mock_anything=True)
+ driver.rpc.call(mox.IgnoreArg(), mox.IgnoreArg(),
+ {"method": 'compare_cpu',
+ "args": {'cpu_info': service_ref2['cpu_info']}})
+
+ self.mox.ReplayAll()
+ ret = self.scheduler.driver._live_migration_common_check(ctxt,
+ i_ref,
+ dest)
+ self.assertTrue(ret == None)
+ self.mox.UnsetStubs()
+
+ def test_has_enough_resource_lack_resource_vcpu(self):
+ """
+ A testcase of driver.has_enough_resource.
+ Lack of vcpu.(boundary check)
+ """
+ dest = 'dummydest'
+ ctxt = context.get_admin_context()
+ topic = FLAGS.compute_topic
+ service_ref = {'id':1, 'vcpus':16, 'memory_mb':32, 'local_gb':100}
+ i_ref1 = {'id':1, 'hostname':'i-01', 'host':'dummy',
+ 'vcpus':6, 'memory_mb':8, 'local_gb':10}
+ i_ref2 = {'id':2, 'hostname':'i-01', 'host':'dummy',
+ 'vcpus':5, 'memory_mb':8, 'local_gb':10}
+ i_ref3 = {'id':3, 'hostname':'i-02', 'host':'dummy',
+ 'vcpus':5, 'memory_mb':8, 'local_gb':10}
+
+ self.mox.StubOutWithMock(driver, 'db', use_mock_anything=True)
+ driver.db.service_get_all_by_host(mox.IgnoreArg(), dest).\
+ AndReturn([service_ref])
+ driver.db.instance_get_all_by_host(mox.IgnoreArg(), dest).\
+ AndReturn([i_ref2, i_ref3])
+
+ self.mox.ReplayAll()
+ try:
+ self.scheduler.driver.has_enough_resource(ctxt, i_ref1, dest)
+ except exception.NotEmpty, e:
+ msg = 'is not capable to migrate'
+ self.assertTrue(e.message.find(msg) >= 0)
+ self.mox.UnsetStubs()
+
+ def test_has_enough_resource_lack_resource_memory(self):
+ """
+ A testcase of driver.has_enough_resource.
+ Lack of memory_mb.(boundary check)
+ """
+ dest = 'dummydest'
+ ctxt = context.get_admin_context()
+ topic = FLAGS.compute_topic
+ service_ref = {'id':1, 'vcpus':16, 'memory_mb':32, 'local_gb':100}
+ i_ref1 = {'id':1, 'hostname':'i-01', 'host':'dummy',
+ 'vcpus':5, 'memory_mb':16, 'local_gb':10}
+ i_ref2 = {'id':2, 'hostname':'i-01', 'host':'dummy',
+ 'vcpus':5, 'memory_mb':8, 'local_gb':10}
+ i_ref3 = {'id':3, 'hostname':'i-02', 'host':'dummy',
+ 'vcpus':5, 'memory_mb':8, 'local_gb':10}
+
+ self.mox.StubOutWithMock(driver, 'db', use_mock_anything=True)
+ driver.db.service_get_all_by_host(mox.IgnoreArg(), dest).\
+ AndReturn([service_ref])
+ driver.db.instance_get_all_by_host(mox.IgnoreArg(), dest).\
+ AndReturn([i_ref2, i_ref3])
+
+ self.mox.ReplayAll()
+ try:
+ self.scheduler.driver.has_enough_resource(ctxt, i_ref1, dest)
+ except exception.NotEmpty, e:
+ msg = 'is not capable to migrate'
+ self.assertTrue(e.message.find(msg) >= 0)
+ self.mox.UnsetStubs()
+ self.mox.UnsetStubs()
+
+ def test_has_enough_resource_lack_resource_disk(self):
+ """
+ A testcase of driver.has_enough_resource.
+ Lack of local_gb.(boundary check)
+ """
+ scheduler = manager.SchedulerManager()
+ dest = 'dummydest'
+ ctxt = context.get_admin_context()
+ topic = FLAGS.compute_topic
+ service_ref = {'id':1, 'vcpus':16, 'memory_mb':32, 'local_gb':100}
+ i_ref1 = {'id':1, 'hostname':'i-01', 'host':'dummy',
+ 'vcpus':5, 'memory_mb':8, 'local_gb':80}
+ i_ref2 = {'id':2, 'hostname':'i-01', 'host':'dummy',
+ 'vcpus':5, 'memory_mb':8, 'local_gb':10}
+ i_ref3 = {'id':3, 'hostname':'i-02', 'host':'dummy',
+ 'vcpus':5, 'memory_mb':8, 'local_gb':10}
+
+ self.mox.StubOutWithMock(driver, 'db', use_mock_anything=True)
+ driver.db.service_get_all_by_host(mox.IgnoreArg(), dest).\
+ AndReturn([service_ref])
+ driver.db.instance_get_all_by_host(mox.IgnoreArg(), dest).\
+ AndReturn([i_ref2, i_ref3])
+
+ self.mox.ReplayAll()
+ try:
+ self.scheduler.driver.has_enough_resource(ctxt, i_ref1, dest)
+ except exception.NotEmpty, e:
+ msg = 'is not capable to migrate'
+ self.assertTrue(e.message.find(msg) >= 0)
+ self.mox.UnsetStubs()
+
+ def test_has_enough_resource_works_correctly(self):
+ """
+ A testcase of driver.has_enough_resource
+ to make sure everything finished with no error.
+ """
+ dest = 'dummydest'
+ ctxt = context.get_admin_context()
+ topic = FLAGS.compute_topic
+ service_ref = {'id':1, 'vcpus':16, 'memory_mb':32, 'local_gb':100}
+ i_ref1 = {'id':1, 'hostname':'i-01', 'host':'dummy',
+ 'vcpus':5, 'memory_mb':8, 'local_gb':10}
+ i_ref2 = {'id':2, 'hostname':'i-01', 'host':'dummy',
+ 'vcpus':5, 'memory_mb':8, 'local_gb':10}
+ i_ref3 = {'id':3, 'hostname':'i-02', 'host':'dummy',
+ 'vcpus':5, 'memory_mb':8, 'local_gb':10}
+
+ self.mox.StubOutWithMock(driver, 'db', use_mock_anything=True)
+ driver.db.service_get_all_by_host(mox.IgnoreArg(), dest).\
+ AndReturn([service_ref])
+ driver.db.instance_get_all_by_host(mox.IgnoreArg(), dest).\
+ AndReturn([i_ref2, i_ref3])
+
+ self.mox.ReplayAll()
+ ret = self.scheduler.driver.has_enough_resource(ctxt, i_ref1, dest)
+ self.assertTrue(ret == None)
+ self.mox.UnsetStubs()
diff --git a/nova/tests/test_service.py b/nova/tests/test_service.py
index a67c8d1e8..a147e69b4 100644
--- a/nova/tests/test_service.py
+++ b/nova/tests/test_service.py
@@ -30,6 +30,7 @@ from nova import rpc
from nova import test
from nova import service
from nova import manager
+from nova.compute import manager as compute_manager
FLAGS = flags.FLAGS
flags.DEFINE_string("fake_manager", "nova.tests.test_service.FakeManager",
@@ -41,7 +42,20 @@ class FakeManager(manager.Manager):
def test_method(self):
return 'manager'
-
+# temporary variable to store host/binary/self.mox from each method to fake class.
+global_host = None
+global_binary = None
+global_mox = None
+class FakeComputeManager(compute_manager.ComputeManager):
+ """Fake computemanager manager for tests"""
+
+ def __init__(self, compute_driver=None, *args, **kwargs):
+ global ghost, gbinary, gmox
+ self.update_service(mox.IgnoreArg(), mox.StrContains(ghost), mox.StrContains(gbinary))
+ gmox.ReplayAll()
+ super(FakeComputeManager, self).__init__(compute_driver, *args, **kwargs)
+
+
class ExtendedService(service.Service):
def test_method(self):
return 'service'
@@ -258,3 +272,48 @@ class ServiceTestCase(test.TestCase):
serv.report_state()
self.assert_(not serv.model_disconnected)
+
+ def test_compute_can_update_services(self):
+ """
+ Test nova-compute successfully updated Service table on DB.
+ Doing so, self.manager.update_service must be called
+ if 'self.binary == nova-compute', and this testcase checks on it.
+ """
+ host = 'foo'
+ binary = 'nova-compute'
+ topic = 'compute1'
+ service_create = {'host': host,
+ 'binary': binary,
+ 'topic': topic,
+ 'report_count': 0,
+ 'availability_zone': 'nova'}
+ service_ref = {'host': host,
+ 'binary': binary,
+ 'topic': topic,
+ 'report_count': 0,
+ 'availability_zone': 'nova',
+ 'id': 1}
+
+ service.db.service_get_by_args(mox.IgnoreArg(),
+ host,
+ binary).AndRaise(exception.NotFound())
+ service.db.service_create(mox.IgnoreArg(),
+ service_create).AndReturn(service_ref)
+ self.mox.StubOutWithMock(compute_manager.ComputeManager, 'update_service')
+
+
+ global ghost, gbinary, gmox
+ ghost = host
+ gbinary = binary
+ gmox = self.mox
+
+ serv = service.Service(host,
+ binary,
+ topic,
+ 'nova.tests.test_service.FakeComputeManager')
+ # ReplayAll has been executed FakeComputeManager.__init__()
+ #self.mox.ReplayAll()
+ serv.start()
+ serv.stop()
+
+
diff --git a/nova/tests/test_virt.py b/nova/tests/test_virt.py
index afdc89ba2..177e8f021 100644
--- a/nova/tests/test_virt.py
+++ b/nova/tests/test_virt.py
@@ -14,21 +14,29 @@
# License for the specific language governing permissions and limitations
# under the License.
+import mox
+
from xml.etree.ElementTree import fromstring as xml_to_tree
from xml.dom.minidom import parseString as xml_to_dom
from nova import context
from nova import db
+from nova import exception
from nova import flags
from nova import test
+from nova import logging
from nova import utils
from nova.api.ec2 import cloud
from nova.auth import manager
+from nova.db.sqlalchemy import models
+from nova.compute import power_state
from nova.virt import libvirt_conn
FLAGS = flags.FLAGS
flags.DECLARE('instances_path', 'nova.compute.manager')
+libvirt = None
+libxml2 = None
class LibvirtConnTestCase(test.TestCase):
def setUp(self):
@@ -52,6 +60,38 @@ class LibvirtConnTestCase(test.TestCase):
'bridge': 'br101',
'instance_type': 'm1.small'}
+ def _driver_dependent_test_setup(self):
+ """
+ Setup method.
+ Call this method at the top of each testcase method,
+ if the testcase is necessary libvirt and cheetah.
+ """
+ try :
+ global libvirt
+ global libxml2
+ libvirt_conn.libvirt = __import__('libvirt')
+ libvirt_conn.libxml2 = __import__('libxml2')
+ libvirt_conn._late_load_cheetah()
+ libvirt = __import__('libvirt')
+ except ImportError, e:
+ logging.warn("""This test has not been done since """
+ """using driver-dependent library Cheetah/libvirt/libxml2.""")
+ raise e
+
+ # inebitable mocks for calling
+ #nova.virt.libvirt_conn.LibvirtConnection.__init__
+ nwmock = self.mox.CreateMock(libvirt_conn.NWFilterFirewall)
+ self.mox.StubOutWithMock(libvirt_conn, 'NWFilterFirewall',
+ use_mock_anything=True)
+ libvirt_conn.NWFilterFirewall(mox.IgnoreArg()).AndReturn(nwmock)
+
+ obj = utils.import_object(FLAGS.firewall_driver)
+ fwmock = self.mox.CreateMock(obj)
+ self.mox.StubOutWithMock(libvirt_conn, 'utils',
+ use_mock_anything=True)
+ libvirt_conn.utils.import_object(FLAGS.firewall_driver).AndReturn(fwmock)
+ return nwmock, fwmock
+
def test_xml_and_uri_no_ramdisk_no_kernel(self):
instance_data = dict(self.test_instance)
self._check_xml_and_uri(instance_data,
@@ -188,9 +228,8 @@ class LibvirtConnTestCase(test.TestCase):
expected_result,
'%s failed common check %d' % (xml, i))
- # This test is supposed to make sure we don't override a specifically
- # set uri
- #
+ # This test is supposed to make sure we don't override a specifically set uri
+ #
# Deliberately not just assigning this string to FLAGS.libvirt_uri and
# checking against that later on. This way we make sure the
# implementation doesn't fiddle around with the FLAGS.
@@ -202,6 +241,480 @@ class LibvirtConnTestCase(test.TestCase):
uri = conn.get_uri()
self.assertEquals(uri, testuri)
+ def test_get_memory_mb(self):
+ """
+ Check if get_memory_mb returns memory value
+ Connection/OS/driver differenct does not matter for this method,
+ so everyone can execute for checking.
+ """
+ try:
+ self._driver_dependent_test_setup()
+ except:
+ return
+
+ self.mox.ReplayAll()
+ conn = libvirt_conn.LibvirtConnection(False)
+ self.assertTrue(0 < conn.get_memory_mb())
+ self.mox.UnsetStubs()
+
+ def test_get_cpu_info_works_correctly(self):
+ """
+ Check if get_cpu_info works correctly.
+ (in case libvirt.getCapabilities() works correctly)
+ """
+ xml=("""<cpu><arch>x86_64</arch><model>Nehalem</model>"""
+ """<vendor>Intel</vendor><topology sockets='2' """
+ """cores='4' threads='2'/><feature name='rdtscp'/>"""
+ """<feature name='dca'/><feature name='xtpr'/>"""
+ """<feature name='tm2'/><feature name='est'/>"""
+ """<feature name='vmx'/><feature name='ds_cpl'/>"""
+ """<feature name='monitor'/><feature name='pbe'/>"""
+ """<feature name='tm'/><feature name='ht'/>"""
+ """<feature name='ss'/><feature name='acpi'/>"""
+ """<feature name='ds'/><feature name='vme'/></cpu>""")
+
+ try:
+ self._driver_dependent_test_setup()
+ except:
+ return
+ self.mox.StubOutWithMock(libvirt_conn.LibvirtConnection, '_conn', use_mock_anything=True)
+ libvirt_conn.LibvirtConnection._conn.getCapabilities().AndReturn(xml)
+
+ self.mox.ReplayAll()
+ conn = libvirt_conn.LibvirtConnection(False)
+ self.assertTrue(0 < len(conn.get_cpu_info()))
+ self.mox.UnsetStubs()
+
+ def test_get_cpu_info_inappropreate_xml(self):
+ """
+ Check if get_cpu_info raises exception
+ in case libvirt.getCapabilities() returns wrong xml
+ (in case of xml doesnt have <cpu> tag)
+ """
+ xml=("""<cccccpu><arch>x86_64</arch><model>Nehalem</model>"""
+ """<vendor>Intel</vendor><topology sockets='2' """
+ """cores='4' threads='2'/><feature name='rdtscp'/>"""
+ """<feature name='dca'/><feature name='xtpr'/>"""
+ """<feature name='tm2'/><feature name='est'/>"""
+ """<feature name='vmx'/><feature name='ds_cpl'/>"""
+ """<feature name='monitor'/><feature name='pbe'/>"""
+ """<feature name='tm'/><feature name='ht'/>"""
+ """<feature name='ss'/><feature name='acpi'/>"""
+ """<feature name='ds'/><feature name='vme'/></cccccpu>""")
+
+ try:
+ self._driver_dependent_test_setup()
+ except:
+ return
+ self.mox.StubOutWithMock(libvirt_conn.LibvirtConnection, '_conn', use_mock_anything=True)
+ libvirt_conn.LibvirtConnection._conn.getCapabilities().AndReturn(xml)
+
+ self.mox.ReplayAll()
+ conn = libvirt_conn.LibvirtConnection(False)
+ try:
+ conn.get_cpu_info()
+ except exception.Invalid, e:
+ c1 = ( 0 <= e.message.find('Invalid xml') )
+ self.assertTrue(c1)
+ self.mox.UnsetStubs()
+
+ def test_get_cpu_info_inappropreate_xml2(self):
+ """
+ Check if get_cpu_info raises exception
+ in case libvirt.getCapabilities() returns wrong xml
+ (in case of xml doesnt have inproper <topology> tag
+ meaning missing "socket" attribute)
+ """
+ xml=("""<cpu><arch>x86_64</arch><model>Nehalem</model>"""
+ """<vendor>Intel</vendor><topology """
+ """cores='4' threads='2'/><feature name='rdtscp'/>"""
+ """<feature name='dca'/><feature name='xtpr'/>"""
+ """<feature name='tm2'/><feature name='est'/>"""
+ """<feature name='vmx'/><feature name='ds_cpl'/>"""
+ """<feature name='monitor'/><feature name='pbe'/>"""
+ """<feature name='tm'/><feature name='ht'/>"""
+ """<feature name='ss'/><feature name='acpi'/>"""
+ """<feature name='ds'/><feature name='vme'/></cpu>""")
+
+ try:
+ self._driver_dependent_test_setup()
+ except:
+ return
+ self.mox.StubOutWithMock(libvirt_conn.LibvirtConnection, '_conn', use_mock_anything=True)
+ libvirt_conn.LibvirtConnection._conn.getCapabilities().AndReturn(xml)
+
+ self.mox.ReplayAll()
+ conn = libvirt_conn.LibvirtConnection(False)
+ try:
+ conn.get_cpu_info()
+ except exception.Invalid, e:
+ c1 = ( 0 <= e.message.find('Invalid xml: topology') )
+ self.assertTrue(c1)
+ self.mox.UnsetStubs()
+
+ def test_compare_cpu_works_correctly(self):
+ """Calling libvirt.compute_cpu() and works correctly """
+
+ t = ("""{"arch":"%s", "model":"%s", "vendor":"%s", """
+ """"topology":{"cores":"%s", "threads":"%s", """
+ """"sockets":"%s"}, "features":[%s]}""")
+ cpu_info = t % ('x86', 'model', 'vendor', '2', '1', '4', '"tm"')
+
+ try:
+ self._driver_dependent_test_setup()
+ except:
+ return
+
+ self.mox.StubOutWithMock(libvirt_conn.LibvirtConnection, '_conn', use_mock_anything=True)
+ libvirt_conn.LibvirtConnection._conn.compareCPU(mox.IgnoreArg(),0).AndReturn(1)
+
+ self.mox.ReplayAll()
+ conn = libvirt_conn.LibvirtConnection(False)
+ self.assertTrue( None== conn.compare_cpu(cpu_info))
+ self.mox.UnsetStubs()
+
+ def test_compare_cpu_raises_exception(self):
+ """
+ Libvirt-related exception occurs when calling
+ libvirt.compare_cpu().
+ """
+ t = ("""{"arch":"%s", "model":"%s", "vendor":"%s", """
+ """"topology":{"cores":"%s", "threads":"%s", """
+ """"sockets":"%s"}, "features":[%s]}""")
+ cpu_info = t % ('x86', 'model', 'vendor', '2', '1', '4', '"tm"')
+
+ try:
+ self._driver_dependent_test_setup()
+ except:
+ return
+
+ self.mox.StubOutWithMock(libvirt_conn.LibvirtConnection, '_conn',
+ use_mock_anything=True)
+ libvirt_conn.LibvirtConnection._conn.compareCPU(mox.IgnoreArg(),0).\
+ AndRaise(libvirt.libvirtError('ERR'))
+
+ self.mox.ReplayAll()
+ conn = libvirt_conn.LibvirtConnection(False)
+ self.assertRaises(libvirt.libvirtError, conn.compare_cpu, cpu_info)
+ self.mox.UnsetStubs()
+
+ def test_compare_cpu_no_compatibility(self):
+ """libvirt.compare_cpu() return less than 0.(no compatibility)"""
+
+ t = ("""{"arch":"%s", "model":"%s", "vendor":"%s", """
+ """"topology":{"cores":"%s", "threads":"%s", """
+ """"sockets":"%s"}, "features":[%s]}""")
+ cpu_info = t % ('x86', 'model', 'vendor', '2', '1', '4', '"tm"')
+
+ try:
+ self._driver_dependent_test_setup()
+ except:
+ return
+
+ self.mox.StubOutWithMock(libvirt_conn.LibvirtConnection, '_conn',
+ use_mock_anything=True)
+ libvirt_conn.LibvirtConnection._conn.compareCPU(mox.IgnoreArg(),0).\
+ AndRaise(exception.Invalid('ERR'))
+
+ self.mox.ReplayAll()
+ conn = libvirt_conn.LibvirtConnection(False)
+ self.assertRaises(exception.Invalid, conn.compare_cpu, cpu_info)
+ self.mox.UnsetStubs()
+
+ def test_ensure_filtering_rules_for_instance_works_correctly(self):
+ """ensure_filtering_rules_for_instance works as expected correctly"""
+
+ instance_ref = models.Instance()
+ instance_ref.__setitem__('id', 1)
+
+ try:
+ nwmock, fwmock = self._driver_dependent_test_setup()
+ except:
+ return
+
+ nwmock.setup_basic_filtering(mox.IgnoreArg())
+ fwmock.prepare_instance_filter(instance_ref)
+ self.mox.StubOutWithMock(libvirt_conn.LibvirtConnection, '_conn',
+ use_mock_anything=True)
+ n = 'nova-instance-%s' % instance_ref.name
+ libvirt_conn.LibvirtConnection._conn.nwfilterLookupByName(n)
+
+ self.mox.ReplayAll()
+ conn = libvirt_conn.LibvirtConnection(False)
+ conn.ensure_filtering_rules_for_instance(instance_ref)
+ self.mox.UnsetStubs()
+
+ def test_ensure_filtering_rules_for_instance_timeout(self):
+ """ensure_filtering_fules_for_instance finishes with timeout"""
+
+ instance_ref = models.Instance()
+ instance_ref.__setitem__('id', 1)
+
+ try:
+ nwmock, fwmock = self._driver_dependent_test_setup()
+ except:
+ return
+
+ nwmock.setup_basic_filtering(mox.IgnoreArg())
+ fwmock.prepare_instance_filter(instance_ref)
+ self.mox.StubOutWithMock(libvirt_conn.LibvirtConnection, '_conn',
+ use_mock_anything=True)
+ n = 'nova-instance-%s' % instance_ref.name
+ for i in range(FLAGS.live_migration_timeout_sec * 2):
+ libvirt_conn.LibvirtConnection._conn.\
+ nwfilterLookupByName(n).AndRaise(libvirt.libvirtError('ERR'))
+
+ self.mox.ReplayAll()
+ conn = libvirt_conn.LibvirtConnection(False)
+ try:
+ conn.ensure_filtering_rules_for_instance(instance_ref)
+ except exception.Error, e:
+ c1 = ( 0<=e.message.find('Timeout migrating for'))
+ self.assertTrue(c1)
+ self.mox.UnsetStubs()
+
+ def test_live_migration_works_correctly(self):
+ """_live_migration works as expected correctly """
+
+ class dummyCall(object):
+ f = None
+ def start(self, interval=0, now=False):
+ pass
+
+ instance_ref = models.Instance()
+ instance_ref.__setitem__('id', 1)
+ dest = 'desthost'
+ ctxt = context.get_admin_context()
+
+ try:
+ self._driver_dependent_test_setup()
+ except:
+ return
+
+ self.mox.StubOutWithMock(libvirt_conn.LibvirtConnection, '_conn',
+ use_mock_anything=True)
+ vdmock = self.mox.CreateMock(libvirt.virDomain)
+ self.mox.StubOutWithMock(vdmock, "migrateToURI",
+ use_mock_anything=True)
+ vdmock.migrateToURI(FLAGS.live_migration_uri % dest, mox.IgnoreArg(),
+ None, FLAGS.live_migration_bandwidth).\
+ AndReturn(None)
+ libvirt_conn.LibvirtConnection._conn.lookupByName(instance_ref.name).\
+ AndReturn(vdmock)
+ # below description is also ok.
+ #self.mox.StubOutWithMock(libvirt_conn.LibvirtConnection._conn,
+ # "lookupByName", use_mock_anything=True)
+
+ libvirt_conn.utils.LoopingCall(f=None).AndReturn(dummyCall())
+
+
+ self.mox.ReplayAll()
+ conn = libvirt_conn.LibvirtConnection(False)
+ ret = conn._live_migration(ctxt, instance_ref, dest)
+ self.assertTrue(ret == None)
+ self.mox.UnsetStubs()
+
+ def test_live_migration_raises_exception(self):
+ """
+ _live_migration raises exception, then this testcase confirms
+ state_description/state for the instances/volumes are recovered.
+ """
+ class Instance(models.NovaBase):
+ id = 0
+ volumes = None
+ name = 'name'
+
+ ctxt = context.get_admin_context()
+ dest = 'desthost'
+ instance_ref = Instance()
+ instance_ref.__setitem__('id', 1)
+ instance_ref.__setitem__('volumes', [{'id':1}, {'id':2}])
+
+ try:
+ nwmock, fwmock = self._driver_dependent_test_setup()
+ except:
+ return
+
+ self.mox.StubOutWithMock(libvirt_conn.LibvirtConnection, '_conn',
+ use_mock_anything=True)
+ vdmock = self.mox.CreateMock(libvirt.virDomain)
+ self.mox.StubOutWithMock(vdmock, "migrateToURI",
+ use_mock_anything=True)
+ vdmock.migrateToURI(FLAGS.live_migration_uri % dest, mox.IgnoreArg(),
+ None, FLAGS.live_migration_bandwidth).\
+ AndRaise(libvirt.libvirtError('ERR'))
+ libvirt_conn.LibvirtConnection._conn.lookupByName(instance_ref.name).\
+ AndReturn(vdmock)
+ self.mox.StubOutWithMock(db, 'instance_set_state')
+ db.instance_set_state(ctxt, instance_ref['id'],
+ power_state.RUNNING, 'running')
+ self.mox.StubOutWithMock(db, 'volume_update')
+ for v in instance_ref.volumes:
+ db.volume_update(ctxt, v['id'], {'status': 'in-use'}).\
+ InAnyOrder('g1')
+
+ self.mox.ReplayAll()
+ conn = libvirt_conn.LibvirtConnection(False)
+ self.assertRaises(libvirt.libvirtError,
+ conn._live_migration,
+ ctxt, instance_ref, dest)
+ self.mox.UnsetStubs()
+
+ def test_post_live_migration_working_correctly(self):
+ """_post_live_migration works as expected correctly """
+
+ dest = 'dummydest'
+ ctxt = context.get_admin_context()
+ instance_ref = {'id':1, 'hostname':'i-00000001', 'host':dest,
+ 'fixed_ip':'dummyip', 'floating_ip':'dummyflip',
+ 'volumes':[{'id':1}, {'id':2} ]}
+ network_ref = {'id':1, 'host':dest}
+ floating_ip_ref = {'id':1, 'address':'1.1.1.1'}
+
+ try:
+ nwmock, fwmock = self._driver_dependent_test_setup()
+ except:
+ return
+ fwmock.unfilter_instance(instance_ref)
+
+ fixed_ip = instance_ref['fixed_ip']
+ self.mox.StubOutWithMock(db, 'instance_get_fixed_address')
+ db.instance_get_fixed_address(ctxt, instance_ref['id']).AndReturn(fixed_ip)
+ self.mox.StubOutWithMock(db, 'fixed_ip_update')
+ db.fixed_ip_update(ctxt, fixed_ip, {'host': dest})
+ self.mox.StubOutWithMock(db, 'fixed_ip_get_network')
+ db.fixed_ip_get_network(ctxt, fixed_ip).AndReturn(network_ref)
+ self.mox.StubOutWithMock(db, 'network_update')
+ db.network_update(ctxt, network_ref['id'], {'host': dest})
+
+ fl_ip = instance_ref['floating_ip']
+ self.mox.StubOutWithMock(db, 'instance_get_floating_address')
+ db.instance_get_floating_address(ctxt, instance_ref['id']).AndReturn(fl_ip)
+ self.mox.StubOutWithMock(db, 'floating_ip_get_by_address')
+ db.floating_ip_get_by_address(ctxt, instance_ref['floating_ip']).\
+ AndReturn(floating_ip_ref)
+ self.mox.StubOutWithMock(db, 'floating_ip_update')
+ db.floating_ip_update(ctxt, floating_ip_ref['address'], {'host': dest})
+
+ self.mox.StubOutWithMock(db, 'instance_update')
+ db.instance_update(ctxt, instance_ref['id'],
+ {'state_description': 'running',
+ 'state': power_state.RUNNING, 'host': dest})
+ self.mox.StubOutWithMock(db, 'volume_update')
+ for v in instance_ref['volumes']:
+ db.volume_update(ctxt, v['id'], {'status': 'in-use'})
+
+ self.mox.ReplayAll()
+ conn = libvirt_conn.LibvirtConnection(False)
+ conn._post_live_migration( ctxt, instance_ref, dest)
+ self.mox.UnsetStubs()
+
+ def test_post_live_migration_no_floating_ip(self):
+ """
+ _post_live_migration works as expected correctly
+ (in case instance doesnt have floaitng ip)
+ """
+ dest = 'dummydest'
+ ctxt = context.get_admin_context()
+ instance_ref = {'id':1, 'hostname':'i-00000001', 'host':dest,
+ 'fixed_ip':'dummyip', 'floating_ip':'dummyflip',
+ 'volumes':[{'id':1}, {'id':2} ]}
+ network_ref = {'id':1, 'host':dest}
+ floating_ip_ref = {'id':1, 'address':'1.1.1.1'}
+
+ try:
+ nwmock, fwmock = self._driver_dependent_test_setup()
+ except:
+ return
+ fwmock.unfilter_instance(instance_ref)
+
+ fixed_ip = instance_ref['fixed_ip']
+ self.mox.StubOutWithMock(db, 'instance_get_fixed_address')
+ db.instance_get_fixed_address(ctxt, instance_ref['id']).AndReturn(fixed_ip)
+ self.mox.StubOutWithMock(db, 'fixed_ip_update')
+ db.fixed_ip_update(ctxt, fixed_ip, {'host': dest})
+ self.mox.StubOutWithMock(db, 'fixed_ip_get_network')
+ db.fixed_ip_get_network(ctxt, fixed_ip).AndReturn(network_ref)
+ self.mox.StubOutWithMock(db, 'network_update')
+ db.network_update(ctxt, network_ref['id'], {'host': dest})
+
+ self.mox.StubOutWithMock(db, 'instance_get_floating_address')
+ db.instance_get_floating_address(ctxt, instance_ref['id']).AndReturn(None)
+ self.mox.StubOutWithMock(libvirt_conn.LOG, 'info')
+ libvirt_conn.LOG.info(_('post livemigration operation is started..'))
+ libvirt_conn.LOG.info(_('floating_ip is not found for %s'),
+ instance_ref['hostname'])
+ # Checking last messages are ignored. may be no need to check so strictly?
+ libvirt_conn.LOG.info(mox.IgnoreArg())
+ libvirt_conn.LOG.info(mox.IgnoreArg())
+
+ self.mox.StubOutWithMock(db, 'instance_update')
+ db.instance_update(ctxt, instance_ref['id'],
+ {'state_description': 'running',
+ 'state': power_state.RUNNING,
+ 'host': dest})
+ self.mox.StubOutWithMock(db, 'volume_update')
+ for v in instance_ref['volumes']:
+ db.volume_update(ctxt, v['id'], {'status': 'in-use'})
+
+ self.mox.ReplayAll()
+ conn = libvirt_conn.LibvirtConnection(False)
+ conn._post_live_migration( ctxt, instance_ref, dest)
+ self.mox.UnsetStubs()
+
+ def test_post_live_migration_no_floating_ip_with_exception(self):
+ """
+ _post_live_migration works as expected correctly
+ (in case instance doesnt have floaitng ip, and raise exception)
+ """
+ dest = 'dummydest'
+ ctxt = context.get_admin_context()
+ instance_ref = {'id':1, 'hostname':'i-00000001', 'host':dest,
+ 'fixed_ip':'dummyip', 'floating_ip':'dummyflip',
+ 'volumes':[{'id':1}, {'id':2} ]}
+ network_ref = {'id':1, 'host':dest}
+ floating_ip_ref = {'id':1, 'address':'1.1.1.1'}
+
+ try:
+ nwmock, fwmock = self._driver_dependent_test_setup()
+ except:
+ return
+ fwmock.unfilter_instance(instance_ref)
+
+ fixed_ip = instance_ref['fixed_ip']
+ self.mox.StubOutWithMock(db, 'instance_get_fixed_address')
+ db.instance_get_fixed_address(ctxt, instance_ref['id']).AndReturn(fixed_ip)
+ self.mox.StubOutWithMock(db, 'fixed_ip_update')
+ db.fixed_ip_update(ctxt, fixed_ip, {'host': dest})
+ self.mox.StubOutWithMock(db, 'fixed_ip_get_network')
+ db.fixed_ip_get_network(ctxt, fixed_ip).AndReturn(network_ref)
+ self.mox.StubOutWithMock(db, 'network_update')
+ db.network_update(ctxt, network_ref['id'], {'host': dest})
+
+ self.mox.StubOutWithMock(db, 'instance_get_floating_address')
+ db.instance_get_floating_address(ctxt, instance_ref['id']).\
+ AndRaise(exception.NotFound())
+ self.mox.StubOutWithMock(libvirt_conn.LOG, 'info')
+ libvirt_conn.LOG.info(_('post livemigration operation is started..'))
+ libvirt_conn.LOG.info(_('floating_ip is not found for %s'),
+ instance_ref['hostname'])
+ # the last message is ignored. may be no need to check so strictly?
+ libvirt_conn.LOG.info(mox.IgnoreArg())
+ libvirt_conn.LOG.info(mox.IgnoreArg())
+
+ self.mox.StubOutWithMock(db, 'instance_update')
+ db.instance_update(ctxt, instance_ref['id'],
+ {'state_description': 'running',
+ 'state': power_state.RUNNING, 'host': dest})
+ self.mox.StubOutWithMock(db, 'volume_update')
+ for v in instance_ref['volumes']:
+ db.volume_update(ctxt, v['id'], {'status': 'in-use'})
+
+ self.mox.ReplayAll()
+ conn = libvirt_conn.LibvirtConnection(False)
+ conn._post_live_migration( ctxt, instance_ref, dest)
+ self.mox.UnsetStubs()
+
def tearDown(self):
super(LibvirtConnTestCase, self).tearDown()
self.manager.delete_project(self.project)
@@ -475,3 +988,4 @@ class NWFilterTestCase(test.TestCase):
self.fw.prepare_instance_filter(instance)
_ensure_all_called()
self.teardown_security_group()
+