summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorKei Masumoto <masumotok@nttdata.co.jp>2011-01-27 20:39:33 +0900
committerKei Masumoto <masumotok@nttdata.co.jp>2011-01-27 20:39:33 +0900
commit7c8096384507908a5e583f4554d0fc765ae5f2eb (patch)
tree69ea124e8cdc6db6cf79627d8fad64d18f0551fb
parent1dc38833c75d546b1c64d2bcd1f5d9a5bab8836d (diff)
adding testcode
-rwxr-xr-xbin/nova-api2
-rw-r--r--nova/compute/manager.py66
-rw-r--r--nova/db/sqlalchemy/api.py49
-rw-r--r--nova/db/sqlalchemy/models.py12
-rw-r--r--nova/scheduler/driver.py53
-rw-r--r--nova/scheduler/manager.py28
-rw-r--r--nova/tests/test_compute.py305
-rw-r--r--nova/tests/test_scheduler.py722
-rw-r--r--nova/tests/test_service.py61
-rw-r--r--nova/tests/test_virt.py520
-rw-r--r--nova/virt/fake.py12
-rw-r--r--nova/virt/libvirt_conn.py159
-rw-r--r--nova/virt/xenapi_conn.py14
-rw-r--r--nova/volume/manager.py2
14 files changed, 1809 insertions, 196 deletions
diff --git a/bin/nova-api b/bin/nova-api
index 7b4fbeab1..fba09889f 100755
--- a/bin/nova-api
+++ b/bin/nova-api
@@ -37,11 +37,13 @@ gettext.install('nova', unicode=1)
from nova import flags
from nova import log as logging
from nova import wsgi
+from nova import utils
logging.basicConfig()
LOG = logging.getLogger('nova.api')
LOG.setLevel(logging.DEBUG)
+utils.default_flagfile()
FLAGS = flags.FLAGS
API_ENDPOINTS = ['ec2', 'osapi']
diff --git a/nova/compute/manager.py b/nova/compute/manager.py
index efb5753aa..4acba7153 100644
--- a/nova/compute/manager.py
+++ b/nova/compute/manager.py
@@ -125,12 +125,12 @@ class ComputeManager(manager.Manager):
"""Insert compute node specific information to DB."""
try:
- service_ref = db.service_get_by_args(ctxt,
- host,
- binary)
+ service_ref = self.db.service_get_by_args(ctxt,
+ host,
+ binary)
except exception.NotFound:
- msg = _(("""Cannot insert compute manager specific info"""
- """Because no service record found."""))
+ msg = _(("""Cannot insert compute manager specific info,"""
+ """ Because no service record found."""))
raise exception.Invalid(msg)
# Updating host information
@@ -141,14 +141,14 @@ class ComputeManager(manager.Manager):
version = self.driver.get_hypervisor_version()
cpu_info = self.driver.get_cpu_info()
- db.service_update(ctxt,
- service_ref['id'],
- {'vcpus': vcpu,
- 'memory_mb': memory_mb,
- 'local_gb': local_gb,
- 'hypervisor_type': hypervisor,
- 'hypervisor_version': version,
- 'cpu_info': cpu_info})
+ self.db.service_update(ctxt,
+ service_ref['id'],
+ {'vcpus': vcpu,
+ 'memory_mb': memory_mb,
+ 'local_gb': local_gb,
+ 'hypervisor_type': hypervisor,
+ 'hypervisor_version': version,
+ 'cpu_info': cpu_info})
def _update_state(self, context, instance_id):
"""Update the state of an instance from the driver info."""
@@ -596,22 +596,22 @@ class ComputeManager(manager.Manager):
""" Check the host cpu is compatible to a cpu given by xml."""
return self.driver.compare_cpu(cpu_info)
- def pre_live_migration(self, context, instance_id, dest):
+ def pre_live_migration(self, context, instance_id):
"""Any preparation for live migration at dst host."""
# Getting instance info
- instance_ref = db.instance_get(context, instance_id)
+ instance_ref = self.db.instance_get(context, instance_id)
ec2_id = instance_ref['hostname']
# Getting fixed ips
- fixed_ip = db.instance_get_fixed_address(context, instance_id)
+ fixed_ip = self.db.instance_get_fixed_address(context, instance_id)
if not fixed_ip:
msg = _('%s(%s) doesnt have fixed_ip') % (instance_id, ec2_id)
raise exception.NotFound(msg)
# If any volume is mounted, prepare here.
if len(instance_ref['volumes']) == 0:
- logging.info(_("%s has no volume.") % ec2_id)
+ LOG.info(_("%s has no volume."), ec2_id)
else:
for v in instance_ref['volumes']:
self.volume_manager.setup_compute_volume(context, v['id'])
@@ -634,7 +634,7 @@ class ComputeManager(manager.Manager):
"""executes live migration."""
# Get instance for error handling.
- instance_ref = db.instance_get(context, instance_id)
+ instance_ref = self.db.instance_get(context, instance_id)
ec2_id = instance_ref['hostname']
try:
@@ -647,27 +647,27 @@ class ComputeManager(manager.Manager):
"args": {'instance_id': instance_id}})
# Asking dest host to preparing live migration.
- compute_topic = db.queue_get_for(context,
- FLAGS.compute_topic,
- dest)
+ compute_topic = self.db.queue_get_for(context,
+ FLAGS.compute_topic,
+ dest)
rpc.call(context,
- compute_topic,
- {"method": "pre_live_migration",
- "args": {'instance_id': instance_id,
- 'dest': dest}})
+ compute_topic,
+ {"method": "pre_live_migration",
+ "args": {'instance_id': instance_id}})
except Exception, e:
+ print e
msg = _('Pre live migration for %s failed at %s')
- logging.error(msg, ec2_id, dest)
- db.instance_set_state(context,
- instance_id,
- power_state.RUNNING,
- 'running')
+ LOG.error(msg, ec2_id, dest)
+ self.db.instance_set_state(context,
+ instance_id,
+ power_state.RUNNING,
+ 'running')
for v in instance_ref['volumes']:
- db.volume_update(context,
- v['id'],
- {'status': 'in-use'})
+ self.db.volume_update(context,
+ v['id'],
+ {'status': 'in-use'})
# e should be raised. just calling "raise" may raise NotFound.
raise e
diff --git a/nova/db/sqlalchemy/api.py b/nova/db/sqlalchemy/api.py
index 248a46f65..1cdd5a286 100644
--- a/nova/db/sqlalchemy/api.py
+++ b/nova/db/sqlalchemy/api.py
@@ -897,41 +897,42 @@ def instance_get_all_by_host(context, hostname):
@require_context
-def _instance_get_sum_by_host_and_project(context, column, hostname, proj_id):
+def instance_get_vcpu_sum_by_host_and_project(context, hostname, proj_id):
session = get_session()
-
result = session.query(models.Instance).\
- filter_by(host=hostname).\
- filter_by(project_id=proj_id).\
- filter_by(deleted=can_read_deleted(context)).\
- value(column)
- if not result:
+ filter_by(host=hostname).\
+ filter_by(project_id=proj_id).\
+ filter_by(deleted=False).\
+ value(func.sum(models.Instance.vcpus))
+ if None == result:
return 0
return result
@require_context
-def instance_get_vcpu_sum_by_host_and_project(context, hostname, proj_id):
- return _instance_get_sum_by_host_and_project(context,
- 'vcpus',
- hostname,
- proj_id)
-
-
-@require_context
def instance_get_memory_sum_by_host_and_project(context, hostname, proj_id):
- return _instance_get_sum_by_host_and_project(context,
- 'memory_mb',
- hostname,
- proj_id)
-
+ session = get_session()
+ result = session.query(models.Instance).\
+ filter_by(host=hostname).\
+ filter_by(project_id=proj_id).\
+ filter_by(deleted=False).\
+ value(func.sum(models.Instance.memory_mb))
+ if None == result:
+ return 0
+ return result
@require_context
def instance_get_disk_sum_by_host_and_project(context, hostname, proj_id):
- return _instance_get_sum_by_host_and_project(context,
- 'local_gb',
- hostname,
- proj_id)
+ session = get_session()
+ result = session.query(models.Instance).\
+ filter_by(host=hostname).\
+ filter_by(project_id=proj_id).\
+ filter_by(deleted=False).\
+ value(func.sum(models.Instance.local_gb))
+ if None == result:
+ return 0
+ return result
+
@require_context
diff --git a/nova/db/sqlalchemy/models.py b/nova/db/sqlalchemy/models.py
index b28c64b59..7c40d5596 100644
--- a/nova/db/sqlalchemy/models.py
+++ b/nova/db/sqlalchemy/models.py
@@ -161,11 +161,11 @@ class Service(BASE, NovaBase):
# The below items are compute node only.
# -1 or None is inserted for other service.
- vcpus = Column(Integer, nullable=False, default=-1)
- memory_mb = Column(Integer, nullable=False, default=-1)
- local_gb = Column(Integer, nullable=False, default=-1)
- hypervisor_type = Column(String(128))
- hypervisor_version = Column(Integer, nullable=False, default=-1)
+ vcpus = Column(Integer, nullable=True)
+ memory_mb = Column(Integer, nullable=True)
+ local_gb = Column(Integer, nullable=True)
+ hypervisor_type = Column(String(128), nullable=True)
+ hypervisor_version = Column(Integer, nullable=True)
# Note(masumotok): Expected Strings example:
#
# '{"arch":"x86_64", "model":"Nehalem",
@@ -174,7 +174,7 @@ class Service(BASE, NovaBase):
#
# Points are "json translatable" and it must have all
# dictionary keys above.
- cpu_info = Column(String(512))
+ cpu_info = Column(Text(), nullable=True)
class Certificate(BASE, NovaBase):
diff --git a/nova/scheduler/driver.py b/nova/scheduler/driver.py
index 65745093b..d4ad42388 100644
--- a/nova/scheduler/driver.py
+++ b/nova/scheduler/driver.py
@@ -69,11 +69,10 @@ class Scheduler(object):
raise NotImplementedError(_("Must implement a fallback schedule"))
def schedule_live_migration(self, context, instance_id, dest):
- """ live migration method """
+ """live migration method"""
# Whether instance exists and running
instance_ref = db.instance_get(context, instance_id)
- ec2_id = instance_ref['hostname']
# Checking instance.
self._live_migration_src_check(context, instance_ref)
@@ -159,48 +158,45 @@ class Scheduler(object):
def _live_migration_common_check(self, context, instance_ref, dest):
"""
- Live migration check routine.
- Below pre-checkings are followed by
- http://wiki.libvirt.org/page/TodoPreMigrationChecks
+ Live migration check routine.
+ Below pre-checkings are followed by
+ http://wiki.libvirt.org/page/TodoPreMigrationChecks
"""
# Checking dest exists.
dservice_refs = db.service_get_all_by_host(context, dest)
if len(dservice_refs) <= 0:
- msg = _('%s does not exists.')
- raise exception.Invalid(msg % dest)
+ raise exception.Invalid(_('%s does not exists.') % dest)
dservice_ref = dservice_refs[0]
# Checking original host( where instance was launched at) exists.
- orighost = instance_ref['launched_on']
- oservice_refs = db.service_get_all_by_host(context, orighost)
+ oservice_refs = db.service_get_all_by_host(context,
+ instance_ref['launched_on'])
if len(oservice_refs) <= 0:
msg = _('%s(where instance was launched at) does not exists.')
- raise exception.Invalid(msg % orighost)
+ raise exception.Invalid(msg % instance_ref['launched_on'])
oservice_ref = oservice_refs[0]
# Checking hypervisor is same.
- otype = oservice_ref['hypervisor_type']
- dtype = dservice_ref['hypervisor_type']
- if otype != dtype:
+ if oservice_ref['hypervisor_type'] != dservice_ref['hypervisor_type']:
msg = _('Different hypervisor type(%s->%s)')
- raise exception.Invalid(msg % (otype, dtype))
+ raise exception.Invalid(msg % (oservice_ref['hypervisor_type'],
+ dservice_ref['hypervisor_type']))
# Checkng hypervisor version.
- oversion = oservice_ref['hypervisor_version']
- dversion = dservice_ref['hypervisor_version']
- if oversion > dversion:
+ if oservice_ref['hypervisor_version'] > \
+ dservice_ref['hypervisor_version']:
msg = _('Older hypervisor version(%s->%s)')
- raise exception.Invalid(msg % (oversion, dversion))
+ raise exception.Invalid(msg % (oservice_ref['hypervisor_version'],
+ dservice_ref['hypervisor_version']))
# Checking cpuinfo.
- cpu_info = oservice_ref['cpu_info']
try:
rpc.call(context,
db.queue_get_for(context, FLAGS.compute_topic, dest),
{"method": 'compare_cpu',
- "args": {'cpu_info': cpu_info}})
+ "args": {'cpu_info': oservice_ref['cpu_info']}})
except rpc.RemoteError, e:
msg = _(("""%s doesnt have compatibility to %s"""
@@ -211,7 +207,7 @@ class Scheduler(object):
raise e
def has_enough_resource(self, context, instance_ref, dest):
- """ Check if destination host has enough resource for live migration"""
+ """Check if destination host has enough resource for live migration"""
# Getting instance information
ec2_id = instance_ref['hostname']
@@ -222,28 +218,27 @@ class Scheduler(object):
# Gettin host information
service_refs = db.service_get_all_by_host(context, dest)
if len(service_refs) <= 0:
- msg = _('%s does not exists.')
- raise exception.Invalid(msg % dest)
+ raise exception.Invalid(_('%s does not exists.') % dest)
service_ref = service_refs[0]
total_cpu = int(service_ref['vcpus'])
total_mem = int(service_ref['memory_mb'])
total_hdd = int(service_ref['local_gb'])
- instances_ref = db.instance_get_all_by_host(context, dest)
- for i_ref in instances_ref:
+ instances_refs = db.instance_get_all_by_host(context, dest)
+ for i_ref in instances_refs:
total_cpu -= int(i_ref['vcpus'])
total_mem -= int(i_ref['memory_mb'])
total_hdd -= int(i_ref['local_gb'])
# Checking host has enough information
- logging.debug('host(%s) remains vcpu:%s mem:%s hdd:%s,' %
+ logging.debug(_('host(%s) remains vcpu:%s mem:%s hdd:%s,') %
(dest, total_cpu, total_mem, total_hdd))
- logging.debug('instance(%s) has vcpu:%s mem:%s hdd:%s,' %
+ logging.debug(_('instance(%s) has vcpu:%s mem:%s hdd:%s,') %
(ec2_id, vcpus, mem, hdd))
if total_cpu <= vcpus or total_mem <= mem or total_hdd <= hdd:
- msg = '%s doesnt have enough resource for %s' % (dest, ec2_id)
- raise exception.NotEmpty(msg)
+ raise exception.NotEmpty(_('%s is not capable to migrate %s') %
+ (dest, ec2_id))
logging.debug(_('%s has_enough_resource() for %s') % (dest, ec2_id))
diff --git a/nova/scheduler/manager.py b/nova/scheduler/manager.py
index 1cc767a03..a181225a6 100644
--- a/nova/scheduler/manager.py
+++ b/nova/scheduler/manager.py
@@ -73,17 +73,13 @@ class SchedulerManager(manager.Manager):
# Based on bear design summit discussion,
# just put this here for bexar release.
def show_host_resource(self, context, host, *args):
- """ show the physical/usage resource given by hosts."""
+ """show the physical/usage resource given by hosts."""
- services = db.service_get_all_by_host(context, host)
- if len(services) == 0:
- return {'ret': False, 'msg': 'No such Host'}
-
- compute = [s for s in services if s['topic'] == 'compute']
- if 0 == len(compute):
- service_ref = services[0]
- else:
- service_ref = compute[0]
+ computes = db.service_get_all_compute_sorted(context)
+ computes = [s for s,v in computes if s['host'] == host]
+ if 0 == len(computes):
+ return {'ret': False, 'msg': 'No such Host or not compute node.'}
+ service_ref = computes[0]
# Getting physical resource information
h_resource = {'vcpus': service_ref['vcpus'],
@@ -92,13 +88,15 @@ class SchedulerManager(manager.Manager):
# Getting usage resource information
u_resource = {}
- instances_ref = db.instance_get_all_by_host(context,
- service_ref['host'])
+ instances_refs = db.instance_get_all_by_host(context,
+ service_ref['host'])
- if 0 == len(instances_ref):
- return {'ret': True, 'phy_resource': h_resource, 'usage': {}}
+ if 0 == len(instances_refs):
+ return {'ret': True,
+ 'phy_resource': h_resource,
+ 'usage': u_resource}
- project_ids = [i['project_id'] for i in instances_ref]
+ project_ids = [i['project_id'] for i in instances_refs]
project_ids = list(set(project_ids))
for p_id in project_ids:
vcpus = db.instance_get_vcpu_sum_by_host_and_project(context,
diff --git a/nova/tests/test_compute.py b/nova/tests/test_compute.py
index 09f6ee94a..344c2d2b5 100644
--- a/nova/tests/test_compute.py
+++ b/nova/tests/test_compute.py
@@ -20,6 +20,7 @@ Tests For Compute
"""
import datetime
+import mox
from nova import compute
from nova import context
@@ -27,9 +28,12 @@ from nova import db
from nova import exception
from nova import flags
from nova import log as logging
+from nova import rpc
from nova import test
from nova import utils
from nova.auth import manager
+from nova.compute import manager as compute_manager
+from nova.compute import power_state
LOG = logging.getLogger('nova.tests.compute')
@@ -219,3 +223,304 @@ class ComputeTestCase(test.TestCase):
self.assertEqual(ret_val, None)
self.compute.terminate_instance(self.context, instance_id)
+
+ def test_update_service_exception(self):
+ """nova-compute updates Serivce table on DB like below.
+ nova.service.Serivce.start ->
+ nova.compute.ComputeManager.update_service.
+ This testcase confirms if no record found on Service
+ table, exception can be raised.
+ """
+ host = 'foo'
+ binary = 'nova-compute'
+ dbmock = self.mox.CreateMock(db)
+ dbmock.service_get_by_args(mox.IgnoreArg(),
+ mox.StrContains(host),
+ mox.StrContains(binary)).\
+ AndRaise(exception.NotFound())
+ self.compute.db = dbmock
+ self.mox.ReplayAll()
+ try:
+ self.compute.update_service('dummy', host, binary)
+ except exception.Invalid, e:
+ msg = 'Cannot insert compute manager specific info'
+ c1 = ( 0 <= e.message.find(msg))
+ self.assertTrue(c1)
+ self.mox.ResetAll()
+
+ def test_update_service_success(self):
+ """nova-compute updates Serivce table on DB like below.
+ nova.service.Serivce.start ->
+ nova.compute.ComputeManager.update_service.
+ In this method, vcpus/memory_mb/local_gb/hypervisor_type/
+ hypervisor_version/cpu_info should be changed.
+ Based on this specification, this testcase confirms
+ if this method finishes successfully,
+ meaning self.db.service_update is called with dictinary
+
+ {'vcpu':aaa, 'memory_mb':bbb, 'local_gb':ccc,
+ 'hypervisor_type':ddd, 'hypervisor_version':eee,
+ 'cpu_info':fff}
+
+ Since each value of above dict can be obtained through
+ driver(different depends on environment),
+ only dictionary keys are checked.
+ """
+
+ def dic_key_check(dic):
+ validkey = ['vcpus', 'memory_mb', 'local_gb',
+ 'hypervisor_type', 'hypervisor_version', 'cpu_info']
+ return (list(set(validkey)) == list(set(dic.keys())))
+
+ host = 'foo'
+ binary = 'nova-compute'
+ service_ref = {'id':1, 'binary':'nova-compute', 'topic':'compute'}
+ dbmock = self.mox.CreateMock(db)
+ dbmock.service_get_by_args(mox.IgnoreArg(),
+ mox.StrContains(host),
+ mox.StrContains(binary)).\
+ AndReturn(service_ref)
+ dbmock.service_update(mox.IgnoreArg(),
+ service_ref['id'],
+ mox.Func(dic_key_check))
+
+ self.compute.db = dbmock
+ self.mox.ReplayAll()
+ try:
+ self.compute.update_service('dummy', host, binary)
+ except exception.Invalid, e:
+ msg = 'Cannot insert compute manager specific info'
+ c1 = ( 0 <= e.message.find(msg))
+ self.assertTrue(c1)
+ self.mox.ResetAll()
+
+ def _setup_other_managers(self):
+ self.volume_manager = utils.import_object(FLAGS.volume_manager)
+ self.network_manager = utils.import_object(FLAGS.network_manager)
+ self.compute_driver = utils.import_object(FLAGS.compute_driver)
+
+ def test_pre_live_migration_instance_has_no_fixed_ip(self):
+ """
+ if instances that are intended to be migrated doesnt have fixed_ip
+ (not happens usually), pre_live_migration has to raise Exception.
+ """
+ instance_ref={'id':1, 'volumes':[{'id':1}, {'id':2}],
+ 'hostname':'i-000000001'}
+ c = context.get_admin_context()
+ i_id = instance_ref['id']
+
+ dbmock = self.mox.CreateMock(db)
+ dbmock.instance_get(c, i_id).AndReturn(instance_ref)
+ dbmock.instance_get_fixed_address(c, i_id).AndReturn(None)
+
+ self.compute.db = dbmock
+ self.mox.ReplayAll()
+ self.assertRaises(exception.NotFound,
+ self.compute.pre_live_migration,
+ c, instance_ref['id'])
+ self.mox.ResetAll()
+
+ def test_pre_live_migration_instance_has_volume(self):
+ """if any volumes are attached to the instances that are
+ intended to be migrated, setup_compute_volume must be
+ called because aoe module should be inserted at destination
+ host. This testcase checks on it.
+ """
+ instance_ref={'id':1, 'volumes':[{'id':1}, {'id':2}],
+ 'hostname':'i-000000001'}
+ c = context.get_admin_context()
+ i_id=instance_ref['id']
+
+ self._setup_other_managers()
+ dbmock = self.mox.CreateMock(db)
+ volmock = self.mox.CreateMock(self.volume_manager)
+ netmock = self.mox.CreateMock(self.network_manager)
+ drivermock = self.mox.CreateMock(self.compute_driver)
+
+ dbmock.instance_get(c, i_id).AndReturn(instance_ref)
+ dbmock.instance_get_fixed_address(c, i_id).AndReturn('dummy')
+ for i in range(len(instance_ref['volumes'])):
+ vid = instance_ref['volumes'][i]['id']
+ volmock.setup_compute_volume(c, vid).InAnyOrder('g1')
+ netmock.setup_compute_network(c, instance_ref['id'])
+ drivermock.ensure_filtering_rules_for_instance(instance_ref)
+
+ self.compute.db = dbmock
+ self.compute.volume_manager = volmock
+ self.compute.network_manager = netmock
+ self.compute.driver = drivermock
+
+ self.mox.ReplayAll()
+ ret = self.compute.pre_live_migration(c, i_id)
+ self.assertEqual(ret, None)
+ self.mox.ResetAll()
+
+ def test_pre_live_migration_instance_has_no_volume(self):
+ """if any volumes are not attached to the instances that are
+ intended to be migrated, log message should be appears
+ because administrator can proove instance conditions before
+ live_migration if any trouble occurs.
+ """
+ instance_ref={'id':1, 'volumes':[], 'hostname':'i-20000001'}
+ c = context.get_admin_context()
+ i_id = instance_ref['id']
+
+ self._setup_other_managers()
+ dbmock = self.mox.CreateMock(db)
+ netmock = self.mox.CreateMock(self.network_manager)
+ drivermock = self.mox.CreateMock(self.compute_driver)
+
+ dbmock.instance_get(c, i_id).AndReturn(instance_ref)
+ dbmock.instance_get_fixed_address(c, i_id).AndReturn('dummy')
+ self.mox.StubOutWithMock(compute_manager.LOG, 'info')
+ compute_manager.LOG.info(_("%s has no volume."), instance_ref['hostname'])
+ netmock.setup_compute_network(c, i_id)
+ drivermock.ensure_filtering_rules_for_instance(instance_ref)
+
+ self.compute.db = dbmock
+ self.compute.network_manager = netmock
+ self.compute.driver = drivermock
+
+ self.mox.ReplayAll()
+ ret = self.compute.pre_live_migration(c, i_id)
+ self.assertEqual(ret, None)
+ self.mox.ResetAll()
+
+ def test_live_migration_instance_has_volume(self):
+ """Any volumes are mounted by instances to be migrated are found,
+ vblade health must be checked before starting live-migration.
+ And that is checked by check_for_export().
+ This testcase confirms check_for_export() is called.
+ """
+ instance_ref={'id':1, 'volumes':[{'id':1}, {'id':2}], 'hostname':'i-00000001'}
+ c = context.get_admin_context()
+ dest='dummydest'
+ i_id = instance_ref['id']
+
+ self._setup_other_managers()
+ dbmock = self.mox.CreateMock(db)
+ drivermock = self.mox.CreateMock(self.compute_driver)
+
+ dbmock.instance_get(c, instance_ref['id']).AndReturn(instance_ref)
+ self.mox.StubOutWithMock(rpc, 'call')
+ rpc.call(c, FLAGS.volume_topic,
+ {"method": "check_for_export",
+ "args": {'instance_id': i_id}}).InAnyOrder('g1')
+ rpc.call(c, db.queue_get_for(c, FLAGS.compute_topic, dest),
+ {"method": "pre_live_migration",
+ "args": {'instance_id': i_id}}).InAnyOrder('g1')
+
+ self.compute.db = dbmock
+ self.compute.driver = drivermock
+ self.mox.ReplayAll()
+ ret = self.compute.live_migration(c, i_id, dest)
+ self.assertEqual(ret, None)
+ self.mox.ResetAll()
+
+ def test_live_migration_instance_has_volume_and_exception(self):
+ """In addition to test_live_migration_instance_has_volume testcase,
+ this testcase confirms if any exception raises from check_for_export().
+ Then, valid seaquence of this method should recovering instance/volumes
+ status(ex. instance['state_description'] is changed from 'migrating'
+ -> 'running', was changed by scheduler)
+ """
+ instance_ref={'id':1, 'volumes':[{'id':1}, {'id':2}],
+ 'hostname':'i-000000001'}
+ dest='dummydest'
+ c = context.get_admin_context()
+ i_id = instance_ref['id']
+
+ self._setup_other_managers()
+ dbmock = self.mox.CreateMock(db)
+ drivermock = self.mox.CreateMock(self.compute_driver)
+
+ dbmock.instance_get(c, instance_ref['id']).AndReturn(instance_ref)
+ self.mox.StubOutWithMock(rpc, 'call')
+ rpc.call(c, FLAGS.volume_topic,
+ {"method": "check_for_export",
+ "args": {'instance_id': i_id}}).InAnyOrder('g1')
+ compute_topic = db.queue_get_for(c, FLAGS.compute_topic, dest)
+ dbmock.queue_get_for(c, FLAGS.compute_topic, dest).AndReturn(compute_topic)
+ rpc.call(c, db.queue_get_for(c, FLAGS.compute_topic, dest),
+ {"method": "pre_live_migration",
+ "args": {'instance_id': i_id}}).\
+ InAnyOrder('g1').AndRaise(rpc.RemoteError('du', 'mm', 'y'))
+ self.mox.StubOutWithMock(compute_manager.LOG, 'error')
+ compute_manager.LOG.error('Pre live migration for %s failed at %s',
+ instance_ref['hostname'], dest)
+ dbmock.instance_set_state(c, i_id, power_state.RUNNING, 'running')
+ for i in range(len(instance_ref['volumes'])):
+ vid = instance_ref['volumes'][i]['id']
+ dbmock.volume_update(c, vid, {'status': 'in-use'})
+
+ self.compute.db = dbmock
+ self.compute.driver = drivermock
+ self.mox.ReplayAll()
+ self.assertRaises(rpc.RemoteError,
+ self.compute.live_migration,
+ c, i_id, dest)
+ self.mox.ResetAll()
+
+ def test_live_migration_instance_has_no_volume_and_exception(self):
+ """Simpler than test_live_migration_instance_has_volume_and_exception"""
+
+ instance_ref={'id':1, 'volumes':[], 'hostname':'i-000000001'}
+ dest='dummydest'
+ c = context.get_admin_context()
+ i_id = instance_ref['id']
+
+ self._setup_other_managers()
+ dbmock = self.mox.CreateMock(db)
+ drivermock = self.mox.CreateMock(self.compute_driver)
+
+ dbmock.instance_get(c, instance_ref['id']).AndReturn(instance_ref)
+ self.mox.StubOutWithMock(rpc, 'call')
+ compute_topic = db.queue_get_for(c, FLAGS.compute_topic, dest)
+ dbmock.queue_get_for(c, FLAGS.compute_topic, dest).AndReturn(compute_topic)
+ rpc.call(c, compute_topic,
+ {"method": "pre_live_migration",
+ "args": {'instance_id': i_id}}).\
+ AndRaise(rpc.RemoteError('du', 'mm', 'y'))
+ self.mox.StubOutWithMock(compute_manager.LOG, 'error')
+ compute_manager.LOG.error('Pre live migration for %s failed at %s',
+ instance_ref['hostname'], dest)
+ dbmock.instance_set_state(c, i_id, power_state.RUNNING, 'running')
+
+ self.compute.db = dbmock
+ self.compute.driver = drivermock
+ self.mox.ReplayAll()
+ self.assertRaises(rpc.RemoteError,
+ self.compute.live_migration,
+ c, i_id, dest)
+ self.mox.ResetAll()
+
+ def test_live_migration_instance_has_volume(self):
+ """Simpler version than test_live_migration_instance_has_volume."""
+ instance_ref={'id':1, 'volumes':[{'id':1}, {'id':2}],
+ 'hostname':'i-000000001'}
+ c = context.get_admin_context()
+ dest='dummydest'
+ i_id = instance_ref['id']
+
+ self._setup_other_managers()
+ dbmock = self.mox.CreateMock(db)
+ drivermock = self.mox.CreateMock(self.compute_driver)
+
+ dbmock.instance_get(c, i_id).AndReturn(instance_ref)
+ self.mox.StubOutWithMock(rpc, 'call')
+ rpc.call(c, FLAGS.volume_topic,
+ {"method": "check_for_export",
+ "args": {'instance_id': i_id}}).InAnyOrder('g1')
+ compute_topic = db.queue_get_for(c, FLAGS.compute_topic, dest)
+ dbmock.queue_get_for(c, FLAGS.compute_topic, dest).AndReturn(compute_topic)
+ rpc.call(c, compute_topic,
+ {"method": "pre_live_migration",
+ "args": {'instance_id': i_id}}).InAnyOrder('g1')
+ drivermock.live_migration(c, instance_ref, dest)
+
+ self.compute.db = dbmock
+ self.compute.driver = drivermock
+ self.mox.ReplayAll()
+ ret = self.compute.live_migration(c, i_id, dest)
+ self.assertEqual(ret, None)
+ self.mox.ResetAll()
diff --git a/nova/tests/test_scheduler.py b/nova/tests/test_scheduler.py
index 9d458244b..c62bca9b1 100644
--- a/nova/tests/test_scheduler.py
+++ b/nova/tests/test_scheduler.py
@@ -20,10 +20,12 @@ Tests For Scheduler
"""
import datetime
+import mox
from mox import IgnoreArg
from nova import context
from nova import db
+from nova import exception
from nova import flags
from nova import service
from nova import test
@@ -32,6 +34,8 @@ from nova import utils
from nova.auth import manager as auth_manager
from nova.scheduler import manager
from nova.scheduler import driver
+from nova.compute import power_state
+from nova.db.sqlalchemy import models
FLAGS = flags.FLAGS
@@ -75,7 +79,102 @@ class SchedulerTestCase(test.TestCase):
'args': {'num': 7}})
self.mox.ReplayAll()
scheduler.named_method(ctxt, 'topic', num=7)
+
+ def test_show_host_resource_host_not_exit(self):
+ """
+ A testcase of driver.has_enough_resource
+ given host does not exists.
+ """
+ scheduler = manager.SchedulerManager()
+ dest = 'dummydest'
+ ctxt = context.get_admin_context()
+
+ self.mox.StubOutWithMock(manager, 'db', use_mock_anything=True)
+ manager.db.service_get_all_compute_sorted(mox.IgnoreArg()).\
+ AndReturn([])
+
+ self.mox.ReplayAll()
+ result = scheduler.show_host_resource(ctxt, dest)
+ # ret should be dict
+ keys = ['ret', 'msg']
+ c1 = list(set(result.keys())) == list(set(keys))
+ c2 = not result['ret']
+ c3 = result['msg'].find('No such Host or not compute node') <= 0
+ self.assertTrue( c1 and c2 and c3)
+ self.mox.UnsetStubs()
+
+ def test_show_host_resource_no_project(self):
+ """
+ A testcase of driver.show_host_resource
+ no instance stays on the given host
+ """
+ scheduler = manager.SchedulerManager()
+ dest = 'dummydest'
+ ctxt = context.get_admin_context()
+ r0 = {'vcpus':16, 'memory_mb':32, 'local_gb':100}
+ service_ref = {'id':1, 'host':dest}
+ service_ref.update(r0)
+
+ self.mox.StubOutWithMock(manager, 'db', use_mock_anything=True)
+ manager.db.service_get_all_compute_sorted(mox.IgnoreArg()).\
+ AndReturn([(service_ref, 0)])
+ manager.db.instance_get_all_by_host(mox.IgnoreArg(), dest).\
+ AndReturn([])
+
+ self.mox.ReplayAll()
+ result = scheduler.show_host_resource(ctxt, dest)
+ # ret should be dict
+ keys = ['ret', 'phy_resource', 'usage']
+ c1 = list(set(result.keys())) == list(set(keys))
+ c2 = result['ret']
+ c3 = result['phy_resource'] == r0
+ c4 = result['usage'] == {}
+ self.assertTrue( c1 and c2 and c3 and c4)
+ self.mox.UnsetStubs()
+
+ def test_show_host_resource_works_correctly(self):
+ """
+ A testcase of driver.show_host_resource
+ to make sure everything finished with no error.
+ """
+ scheduler = manager.SchedulerManager()
+ dest = 'dummydest'
+ ctxt = context.get_admin_context()
+ r0 = {'vcpus':16, 'memory_mb':32, 'local_gb':100}
+ r1 = {'vcpus':10, 'memory_mb':4, 'local_gb':20}
+ r2 = {'vcpus':10, 'memory_mb':20, 'local_gb':30}
+ service_ref = {'id':1, 'host':dest}
+ service_ref.update(r0)
+ instance_ref2 = {'id':2, 'project_id':'p-01', 'host':'dummy'}
+ instance_ref2.update(r1)
+ instance_ref3 = {'id':3, 'project_id':'p-02', 'host':'dummy'}
+ instance_ref3.update(r1)
+ self.mox.StubOutWithMock(manager, 'db', use_mock_anything=True)
+ manager.db.service_get_all_compute_sorted(mox.IgnoreArg()).\
+ AndReturn([(service_ref, 0)])
+ manager.db.instance_get_all_by_host(mox.IgnoreArg(), dest).\
+ AndReturn([instance_ref2, instance_ref3])
+ for p in ['p-01', 'p-02']:
+ manager.db.instance_get_vcpu_sum_by_host_and_project(
+ ctxt, dest, p).AndReturn(r2['vcpus'])
+ manager.db.instance_get_memory_sum_by_host_and_project(
+ ctxt, dest, p).AndReturn(r2['memory_mb'])
+ manager.db.instance_get_disk_sum_by_host_and_project(
+ ctxt, dest, p).AndReturn(r2['local_gb'])
+
+ self.mox.ReplayAll()
+ result = scheduler.show_host_resource(ctxt, dest)
+ # ret should be dict
+ keys = ['ret', 'phy_resource', 'usage']
+ c1 = list(set(result.keys())) == list(set(keys))
+ c2 = result['ret']
+ c3 = result['phy_resource'] == r0
+ c4 = result['usage'].keys() == ['p-01', 'p-02']
+ c5 = result['usage']['p-01'] == r2
+ c6 = result['usage']['p-02'] == r2
+ self.assertTrue( c1 and c2 and c3 and c4 and c5 and c6)
+ self.mox.UnsetStubs()
class ZoneSchedulerTestCase(test.TestCase):
"""Test case for zone scheduler"""
@@ -384,3 +483,626 @@ class SimpleDriverTestCase(test.TestCase):
volume2.delete_volume(self.context, volume_id)
volume1.kill()
volume2.kill()
+
+ def test_scheduler_live_migraiton_with_volume(self):
+ """
+ driver.scheduler_live_migration finishes successfully
+ (volumes are attached to instances)
+ This testcase make sure schedule_live_migration
+ changes instance state from 'running' -> 'migrating'
+ """
+ driver_i = self.scheduler.driver
+ ctxt = context.get_admin_context()
+ topic = FLAGS.compute_topic
+ i_ref = {'id':1, 'hostname':'i-00000001', 'host':'dummy',
+ 'volumes':[{'id':1}, {'id':2}]}
+ dest = 'dummydest'
+
+ self.mox.StubOutWithMock(driver, 'db', use_mock_anything=True)
+ # must be IgnoreArg() because scheduler changes ctxt's memory address
+ driver.db.instance_get(mox.IgnoreArg(), i_ref['id']).AndReturn(i_ref)
+
+ self.mox.StubOutWithMock(driver_i, '_live_migration_src_check')
+ driver_i._live_migration_src_check(mox.IgnoreArg(), i_ref)
+ self.mox.StubOutWithMock(driver_i, '_live_migration_dest_check')
+ driver_i._live_migration_dest_check(mox.IgnoreArg(), i_ref, dest)
+ self.mox.StubOutWithMock(driver_i, '_live_migration_common_check')
+ driver_i._live_migration_common_check(mox.IgnoreArg(), i_ref, dest)
+ driver.db.instance_set_state(mox.IgnoreArg(), i_ref['id'],
+ power_state.PAUSED, 'migrating')
+ for v in i_ref['volumes']:
+ driver.db.volume_update(mox.IgnoreArg(), v['id'],
+ {'status': 'migrating'})
+ self.mox.StubOutWithMock(rpc, 'cast', use_mock_anything=True)
+ kwargs={'instance_id':i_ref['id'], 'dest':dest}
+ rpc.cast(ctxt, db.queue_get_for(ctxt, topic, i_ref['host']),
+ {"method": 'live_migration',
+ "args": kwargs})
+
+ self.mox.ReplayAll()
+ self.scheduler.live_migration(ctxt, topic,
+ instance_id=i_ref['id'], dest=dest)
+ self.mox.UnsetStubs()
+
+ def test_scheduler_live_migraiton_no_volume(self):
+ """
+ driver.scheduler_live_migration finishes successfully
+ (volumes are attached to instances)
+ This testcase make sure schedule_live_migration
+ changes instance state from 'running' -> 'migrating'
+ """
+ driver_i = self.scheduler.driver
+ ctxt = context.get_admin_context()
+ topic = FLAGS.compute_topic
+ i_ref = {'id':1, 'hostname':'i-01', 'host':'dummy', 'volumes':[]}
+ dest = 'dummydest'
+
+ self.mox.StubOutWithMock(driver, 'db', use_mock_anything=True)
+ # must be IgnoreArg() because scheduler changes ctxt's memory address
+ driver.db.instance_get(mox.IgnoreArg(), i_ref['id']).AndReturn(i_ref)
+ self.mox.StubOutWithMock(driver_i, '_live_migration_src_check')
+ driver_i._live_migration_src_check(mox.IgnoreArg(), i_ref)
+ self.mox.StubOutWithMock(driver_i, '_live_migration_dest_check')
+ driver_i._live_migration_dest_check(mox.IgnoreArg(), i_ref, dest)
+ self.mox.StubOutWithMock(driver_i, '_live_migration_common_check')
+ driver_i._live_migration_common_check(mox.IgnoreArg(), i_ref, dest)
+ driver.db.instance_set_state(mox.IgnoreArg(), i_ref['id'],
+ power_state.PAUSED, 'migrating')
+ self.mox.StubOutWithMock(rpc, 'cast', use_mock_anything=True)
+ kwargs={'instance_id':i_ref['id'], 'dest':dest}
+ rpc.cast(ctxt, db.queue_get_for(ctxt, topic, i_ref['host']),
+ {"method": 'live_migration',
+ "args": kwargs})
+
+ self.mox.ReplayAll()
+ self.scheduler.live_migration(ctxt, topic,
+ instance_id=i_ref['id'], dest=dest)
+ self.mox.UnsetStubs()
+
+ def test_live_migraiton_src_check_instance_not_running(self):
+ """
+ A testcase of driver._live_migration_src_check.
+ The instance given by instance_id is not running.
+ """
+ ctxt = context.get_admin_context()
+ topic = FLAGS.compute_topic
+ dest = 'dummydest'
+ i_ref = {'id':1, 'hostname':'i-01', 'host':'dummy',
+ 'volumes':[], 'state_description':'migrating',
+ 'state':power_state.RUNNING}
+
+ self.mox.ReplayAll()
+ try:
+ self.scheduler.driver._live_migration_src_check(ctxt, i_ref)
+ except exception.Invalid, e:
+ self.assertTrue(e.message.find('is not running') > 0)
+ self.mox.UnsetStubs()
+
+ def test_live_migraiton_src_check_volume_node_not_alive(self):
+ """
+ A testcase of driver._live_migration_src_check.
+ Volume node is not alive if any volumes are attached to
+ the given instance.
+ """
+ dest = 'dummydest'
+ ctxt = context.get_admin_context()
+ topic = FLAGS.compute_topic
+ i_ref = {'id':1, 'hostname':'i-01', 'host':'dummy',
+ 'volumes':[{'id':1}, {'id':2}],
+ 'state_description':'running', 'state':power_state.RUNNING}
+
+ self.mox.StubOutWithMock(driver, 'db', use_mock_anything=True)
+ driver.db.service_get_all_by_topic(mox.IgnoreArg(), 'volume').\
+ AndReturn([])
+
+ self.mox.ReplayAll()
+ try:
+ self.scheduler.driver._live_migration_src_check(ctxt, i_ref)
+ except exception.Invalid, e:
+ self.assertTrue(e.message.find('volume node is not alive') >= 0)
+ self.mox.UnsetStubs()
+
+ def test_live_migraiton_src_check_volume_node_not_alive(self):
+ """
+ A testcase of driver._live_migration_src_check.
+ The testcase make sure src-compute node is alive.
+ """
+ dest = 'dummydest'
+ ctxt = context.get_admin_context()
+ topic = FLAGS.compute_topic
+ i_ref = {'id':1, 'hostname':'i-01', 'host':'dummy', 'volumes':[],
+ 'state_description':'running', 'state':power_state.RUNNING}
+
+ self.mox.StubOutWithMock(driver, 'db', use_mock_anything=True)
+ driver.db.service_get_all_by_topic(mox.IgnoreArg(), 'compute').\
+ AndReturn([])
+
+ self.mox.ReplayAll()
+ try:
+ self.scheduler.driver._live_migration_src_check(ctxt, i_ref)
+ except exception.Invalid, e:
+ self.assertTrue(e.message.find('is not alive') >= 0)
+ self.mox.UnsetStubs()
+
+ def test_live_migraiton_src_check_works_correctly(self):
+ """
+ A testcase of driver._live_migration_src_check.
+ The testcase make sure everything finished with no error.
+ """
+ driver_i = self.scheduler.driver
+ dest = 'dummydest'
+ ctxt = context.get_admin_context()
+ topic = FLAGS.compute_topic
+ i_ref = {'id':1, 'hostname':'i-01', 'host':'dummy', 'volumes':[],
+ 'state_description':'running', 'state':power_state.RUNNING}
+ service_ref = models.Service()
+ service_ref.__setitem__('id', 1)
+ service_ref.__setitem__('host', i_ref['host'])
+
+ self.mox.StubOutWithMock(driver, 'db', use_mock_anything=True)
+ driver.db.service_get_all_by_topic(mox.IgnoreArg(), 'compute').\
+ AndReturn([service_ref])
+ self.mox.StubOutWithMock(driver_i, 'service_is_up')
+ driver_i.service_is_up(service_ref).AndReturn(True)
+
+ self.mox.ReplayAll()
+ ret = driver_i._live_migration_src_check(ctxt, i_ref)
+ self.assertTrue(ret == None)
+ self.mox.UnsetStubs()
+
+ def test_live_migraiton_dest_check_service_not_exists(self):
+ """
+ A testcase of driver._live_migration_dst_check.
+ Destination host does not exist.
+ """
+ driver_i = self.scheduler.driver
+ dest = 'dummydest'
+ ctxt = context.get_admin_context()
+ topic = FLAGS.compute_topic
+ i_ref = {'id':1, 'hostname':'i-01', 'host':'dummy'}
+ service_ref = models.Service()
+ service_ref.__setitem__('id', 1)
+ service_ref.__setitem__('host', i_ref['host'])
+
+ self.mox.StubOutWithMock(driver, 'db', use_mock_anything=True)
+ driver.db.service_get_all_by_host(mox.IgnoreArg(), dest).\
+ AndReturn([])
+
+ self.mox.ReplayAll()
+ try:
+ driver_i._live_migration_dest_check(ctxt, i_ref, dest)
+ except exception.Invalid, e:
+ self.assertTrue(e.message.find('does not exists') >= 0)
+ self.mox.UnsetStubs()
+
+ def test_live_migraiton_dest_check_service_isnot_compute(self):
+ """
+ A testcase of driver._live_migration_dst_check.
+ Destination host does not provide compute.
+ """
+ driver_i = self.scheduler.driver
+ dest = 'dummydest'
+ ctxt = context.get_admin_context()
+ topic = FLAGS.compute_topic
+ i_ref = {'id':1, 'hostname':'i-01', 'host':'dummy'}
+ service_ref = models.Service()
+ service_ref.__setitem__('id', 1)
+ service_ref.__setitem__('host', i_ref['host'])
+ service_ref.__setitem__('topic', 'api')
+
+ self.mox.StubOutWithMock(driver, 'db', use_mock_anything=True)
+ driver.db.service_get_all_by_host(mox.IgnoreArg(), dest).\
+ AndReturn([service_ref])
+
+ self.mox.ReplayAll()
+ try:
+ driver_i._live_migration_dest_check(ctxt, i_ref, dest)
+ except exception.Invalid, e:
+ self.assertTrue(e.message.find('must be compute node') >= 0)
+ self.mox.UnsetStubs()
+
+ def test_live_migraiton_dest_check_service_not_alive(self):
+ """
+ A testcase of driver._live_migration_dst_check.
+ Destination host compute service is not alive.
+ """
+ dest = 'dummydest'
+ ctxt = context.get_admin_context()
+ topic = FLAGS.compute_topic
+ i_ref = {'id':1, 'hostname':'i-01', 'host':'dummy'}
+ service_ref = models.Service()
+ service_ref.__setitem__('id', 1)
+ service_ref.__setitem__('host', i_ref['host'])
+ service_ref.__setitem__('topic', 'compute')
+
+ self.mox.StubOutWithMock(driver, 'db', use_mock_anything=True)
+ driver.db.service_get_all_by_host(mox.IgnoreArg(), dest).\
+ AndReturn([service_ref])
+ self.mox.StubOutWithMock(self.scheduler.driver, 'service_is_up')
+ self.scheduler.driver.service_is_up(service_ref).AndReturn(False)
+
+ self.mox.ReplayAll()
+ try:
+ self.scheduler.driver._live_migration_dest_check(ctxt, i_ref, dest)
+ except exception.Invalid, e:
+ self.assertTrue(e.message.find('is not alive') >= 0)
+ self.mox.UnsetStubs()
+
+ def test_live_migraiton_dest_check_service_same_host(self):
+ """
+ A testcase of driver._live_migration_dst_check.
+ Destination host is same as src host.
+ """
+ dest = 'dummydest'
+ ctxt = context.get_admin_context()
+ topic = FLAGS.compute_topic
+ i_ref = {'id':1, 'hostname':'i-01', 'host':'dummydest'}
+ service_ref = models.Service()
+ service_ref.__setitem__('id', 1)
+ service_ref.__setitem__('host', i_ref['host'])
+ service_ref.__setitem__('topic', 'compute')
+
+ self.mox.StubOutWithMock(driver, 'db', use_mock_anything=True)
+ driver.db.service_get_all_by_host(mox.IgnoreArg(), dest).\
+ AndReturn([service_ref])
+ self.mox.StubOutWithMock(self.scheduler.driver, 'service_is_up')
+ self.scheduler.driver.service_is_up(service_ref).AndReturn(True)
+
+ self.mox.ReplayAll()
+ try:
+ self.scheduler.driver._live_migration_dest_check(ctxt, i_ref, dest)
+ except exception.Invalid, e:
+ self.assertTrue(e.message.find('is running now. choose other host') >= 0)
+ self.mox.UnsetStubs()
+
+ def test_live_migraiton_dest_check_service_works_correctly(self):
+ """
+ A testcase of driver._live_migration_dst_check.
+ The testcase make sure everything finished with no error.
+ """
+ dest = 'dummydest'
+ ctxt = context.get_admin_context()
+ topic = FLAGS.compute_topic
+ i_ref = {'id':1, 'hostname':'i-01', 'host':'dummydest'}
+ service_ref = models.Service()
+ service_ref.__setitem__('id', 1)
+ service_ref.__setitem__('host', i_ref['host'])
+ service_ref.__setitem__('topic', 'compute')
+
+ self.mox.StubOutWithMock(driver, 'db', use_mock_anything=True)
+ driver.db.service_get_all_by_host(mox.IgnoreArg(), dest).\
+ AndReturn([service_ref])
+ self.mox.StubOutWithMock(self.scheduler.driver, 'service_is_up')
+ self.scheduler.driver.service_is_up(service_ref).AndReturn(True)
+ self.mox.StubOutWithMock(self.scheduler.driver, 'has_enough_resource')
+ self.scheduler.driver.has_enough_resource(mox.IgnoreArg(), i_ref, dest)
+
+ self.mox.ReplayAll()
+ try:
+ self.scheduler.driver._live_migration_dest_check(ctxt, i_ref, dest)
+ except exception.Invalid, e:
+ self.assertTrue(e.message.find('is running now. choose other host') >= 0)
+ self.mox.UnsetStubs()
+
+ def test_live_migraiton_common_check_service_dest_not_exists(self):
+ """
+ A testcase of driver._live_migration_common_check.
+ Destination host does not exist.
+ """
+ dest = 'dummydest'
+ ctxt = context.get_admin_context()
+ topic = FLAGS.compute_topic
+ i_ref = {'id':1, 'hostname':'i-01', 'host':'dummy'}
+
+ self.mox.StubOutWithMock(driver, 'db', use_mock_anything=True)
+ driver.db.service_get_all_by_host(mox.IgnoreArg(), dest).\
+ AndReturn([])
+
+ self.mox.ReplayAll()
+ try:
+ self.scheduler.driver._live_migration_common_check(ctxt,
+ i_ref,
+ dest)
+ except exception.Invalid, e:
+ self.assertTrue(e.message.find('does not exists') >= 0)
+ self.mox.UnsetStubs()
+
+ def test_live_migraiton_common_check_service_orig_not_exists(self):
+ """
+ A testcase of driver._live_migration_common_check.
+ Original host(an instance launched on) does not exist.
+ """
+ dest = 'dummydest'
+ ctxt = context.get_admin_context()
+ topic = FLAGS.compute_topic
+ i_ref = {'id':1, 'hostname':'i-01', 'host':'dummy', 'launched_on':'h1'}
+ service_ref = models.Service()
+ service_ref.__setitem__('id', 1)
+ service_ref.__setitem__('topic', 'compute')
+ service_ref.__setitem__('host', i_ref['host'])
+
+ self.mox.StubOutWithMock(driver, 'db', use_mock_anything=True)
+ driver.db.service_get_all_by_host(mox.IgnoreArg(), dest).\
+ AndReturn([service_ref])
+ driver.db.service_get_all_by_host(mox.IgnoreArg(),
+ i_ref['launched_on']).\
+ AndReturn([])
+
+ self.mox.ReplayAll()
+ try:
+ self.scheduler.driver._live_migration_common_check(ctxt,
+ i_ref,
+ dest)
+ except exception.Invalid, e:
+ msg = 'where instance was launched at) does not exists'
+ self.assertTrue(e.message.find(msg) >= 0)
+ self.mox.UnsetStubs()
+
+ def test_live_migraiton_common_check_service_different_hypervisor(self):
+ """
+ A testcase of driver._live_migration_common_check.
+ Original host and dest host has different hypervisor type.
+ """
+ dest = 'dummydest'
+ ctxt = context.get_admin_context()
+ topic = FLAGS.compute_topic
+ i_ref = {'id':1, 'hostname':'i-01',
+ 'host':'dummy', 'launched_on':'h1'}
+ service_ref = models.Service()
+ service_ref.__setitem__('id', 1)
+ service_ref.__setitem__('topic', 'compute')
+ service_ref.__setitem__('hypervisor_type', 'kvm')
+ service_ref2 = models.Service()
+ service_ref2.__setitem__('id', 2)
+ service_ref2.__setitem__('hypervisor_type', 'xen')
+
+ self.mox.StubOutWithMock(driver, 'db', use_mock_anything=True)
+ driver.db.service_get_all_by_host(mox.IgnoreArg(), dest).\
+ AndReturn([service_ref])
+ driver.db.service_get_all_by_host(mox.IgnoreArg(),
+ i_ref['launched_on']).\
+ AndReturn([service_ref2])
+
+ self.mox.ReplayAll()
+ try:
+ self.scheduler.driver._live_migration_common_check(ctxt,
+ i_ref,
+ dest)
+ except exception.Invalid, e:
+ msg = 'Different hypervisor type'
+ self.assertTrue(e.message.find(msg) >= 0)
+ self.mox.UnsetStubs()
+
+ def test_live_migraiton_common_check_service_different_version(self):
+ """
+ A testcase of driver._live_migration_common_check.
+ Original host and dest host has different hypervisor version.
+ """
+ dest = 'dummydest'
+ ctxt = context.get_admin_context()
+ topic = FLAGS.compute_topic
+ i_ref = {'id':1, 'hostname':'i-01',
+ 'host':'dummy', 'launched_on':'h1'}
+ service_ref = models.Service()
+ service_ref.__setitem__('id', 1)
+ service_ref.__setitem__('topic', 'compute')
+ service_ref.__setitem__('hypervisor_version', 12000)
+ service_ref2 = models.Service()
+ service_ref2.__setitem__('id', 2)
+ service_ref2.__setitem__('hypervisor_version', 12001)
+
+ self.mox.StubOutWithMock(driver, 'db', use_mock_anything=True)
+ driver.db.service_get_all_by_host(mox.IgnoreArg(), dest).\
+ AndReturn([service_ref])
+ driver.db.service_get_all_by_host(mox.IgnoreArg(),
+ i_ref['launched_on']).\
+ AndReturn([service_ref2])
+
+ self.mox.ReplayAll()
+ try:
+ self.scheduler.driver._live_migration_common_check(ctxt,
+ i_ref,
+ dest)
+ except exception.Invalid, e:
+ msg = 'Older hypervisor version'
+ self.assertTrue(e.message.find(msg) >= 0)
+ self.mox.UnsetStubs()
+
+ def test_live_migraiton_common_check_service_checking_cpuinfo_fail(self):
+ """
+ A testcase of driver._live_migration_common_check.
+ Original host and dest host has different hypervisor version.
+ """
+ dest = 'dummydest'
+ ctxt = context.get_admin_context()
+ topic = FLAGS.compute_topic
+ i_ref = {'id':1, 'hostname':'i-01',
+ 'host':'dummy', 'launched_on':'h1'}
+ service_ref = models.Service()
+ service_ref.__setitem__('id', 1)
+ service_ref.__setitem__('topic', 'compute')
+ service_ref.__setitem__('hypervisor_version', 12000)
+ service_ref2 = models.Service()
+ service_ref2.__setitem__('id', 2)
+ service_ref2.__setitem__('hypervisor_version', 12000)
+ service_ref2.__setitem__('cpuinfo', '<cpu>info</cpu>')
+
+ self.mox.StubOutWithMock(driver, 'db', use_mock_anything=True)
+ driver.db.service_get_all_by_host(mox.IgnoreArg(), dest).\
+ AndReturn([service_ref])
+ driver.db.service_get_all_by_host(mox.IgnoreArg(),
+ i_ref['launched_on']).\
+ AndReturn([service_ref2])
+ driver.db.queue_get_for(mox.IgnoreArg(), FLAGS.compute_topic, dest)
+ self.mox.StubOutWithMock(driver, 'rpc', use_mock_anything=True)
+ driver.rpc.call(mox.IgnoreArg(), mox.IgnoreArg(),
+ {"method": 'compare_cpu',
+ "args": {'cpu_info': service_ref2['cpu_info']}}).\
+ AndRaise(rpc.RemoteError('doesnt have compatibility to', '', ''))
+
+ self.mox.ReplayAll()
+ try:
+ self.scheduler.driver._live_migration_common_check(ctxt,
+ i_ref,
+ dest)
+ except rpc.RemoteError, e:
+ msg = 'doesnt have compatibility to'
+ self.assertTrue(e.message.find(msg) >= 0)
+ self.mox.UnsetStubs()
+
+ def test_live_migraiton_common_check_service_works_correctly(self):
+ """
+ A testcase of driver._live_migration_common_check.
+ The testcase make sure everything finished with no error.
+ """
+ dest = 'dummydest'
+ ctxt = context.get_admin_context()
+ topic = FLAGS.compute_topic
+ i_ref = {'id':1, 'hostname':'i-01',
+ 'host':'dummy', 'launched_on':'h1'}
+ service_ref = models.Service()
+ service_ref.__setitem__('id', 1)
+ service_ref.__setitem__('topic', 'compute')
+ service_ref.__setitem__('hypervisor_version', 12000)
+ service_ref2 = models.Service()
+ service_ref2.__setitem__('id', 2)
+ service_ref2.__setitem__('hypervisor_version', 12000)
+ service_ref2.__setitem__('cpuinfo', '<cpu>info</cpu>')
+
+ self.mox.StubOutWithMock(driver, 'db', use_mock_anything=True)
+ driver.db.service_get_all_by_host(mox.IgnoreArg(), dest).\
+ AndReturn([service_ref])
+ driver.db.service_get_all_by_host(mox.IgnoreArg(),
+ i_ref['launched_on']).\
+ AndReturn([service_ref2])
+ driver.db.queue_get_for(mox.IgnoreArg(), FLAGS.compute_topic, dest)
+ self.mox.StubOutWithMock(driver, 'rpc', use_mock_anything=True)
+ driver.rpc.call(mox.IgnoreArg(), mox.IgnoreArg(),
+ {"method": 'compare_cpu',
+ "args": {'cpu_info': service_ref2['cpu_info']}})
+
+ self.mox.ReplayAll()
+ ret = self.scheduler.driver._live_migration_common_check(ctxt,
+ i_ref,
+ dest)
+ self.assertTrue(ret == None)
+ self.mox.UnsetStubs()
+
+ def test_has_enough_resource_lack_resource_vcpu(self):
+ """
+ A testcase of driver.has_enough_resource.
+ Lack of vcpu.(boundary check)
+ """
+ dest = 'dummydest'
+ ctxt = context.get_admin_context()
+ topic = FLAGS.compute_topic
+ service_ref = {'id':1, 'vcpus':16, 'memory_mb':32, 'local_gb':100}
+ i_ref1 = {'id':1, 'hostname':'i-01', 'host':'dummy',
+ 'vcpus':6, 'memory_mb':8, 'local_gb':10}
+ i_ref2 = {'id':2, 'hostname':'i-01', 'host':'dummy',
+ 'vcpus':5, 'memory_mb':8, 'local_gb':10}
+ i_ref3 = {'id':3, 'hostname':'i-02', 'host':'dummy',
+ 'vcpus':5, 'memory_mb':8, 'local_gb':10}
+
+ self.mox.StubOutWithMock(driver, 'db', use_mock_anything=True)
+ driver.db.service_get_all_by_host(mox.IgnoreArg(), dest).\
+ AndReturn([service_ref])
+ driver.db.instance_get_all_by_host(mox.IgnoreArg(), dest).\
+ AndReturn([i_ref2, i_ref3])
+
+ self.mox.ReplayAll()
+ try:
+ self.scheduler.driver.has_enough_resource(ctxt, i_ref1, dest)
+ except exception.NotEmpty, e:
+ msg = 'is not capable to migrate'
+ self.assertTrue(e.message.find(msg) >= 0)
+ self.mox.UnsetStubs()
+
+ def test_has_enough_resource_lack_resource_memory(self):
+ """
+ A testcase of driver.has_enough_resource.
+ Lack of memory_mb.(boundary check)
+ """
+ dest = 'dummydest'
+ ctxt = context.get_admin_context()
+ topic = FLAGS.compute_topic
+ service_ref = {'id':1, 'vcpus':16, 'memory_mb':32, 'local_gb':100}
+ i_ref1 = {'id':1, 'hostname':'i-01', 'host':'dummy',
+ 'vcpus':5, 'memory_mb':16, 'local_gb':10}
+ i_ref2 = {'id':2, 'hostname':'i-01', 'host':'dummy',
+ 'vcpus':5, 'memory_mb':8, 'local_gb':10}
+ i_ref3 = {'id':3, 'hostname':'i-02', 'host':'dummy',
+ 'vcpus':5, 'memory_mb':8, 'local_gb':10}
+
+ self.mox.StubOutWithMock(driver, 'db', use_mock_anything=True)
+ driver.db.service_get_all_by_host(mox.IgnoreArg(), dest).\
+ AndReturn([service_ref])
+ driver.db.instance_get_all_by_host(mox.IgnoreArg(), dest).\
+ AndReturn([i_ref2, i_ref3])
+
+ self.mox.ReplayAll()
+ try:
+ self.scheduler.driver.has_enough_resource(ctxt, i_ref1, dest)
+ except exception.NotEmpty, e:
+ msg = 'is not capable to migrate'
+ self.assertTrue(e.message.find(msg) >= 0)
+ self.mox.UnsetStubs()
+ self.mox.UnsetStubs()
+
+ def test_has_enough_resource_lack_resource_disk(self):
+ """
+ A testcase of driver.has_enough_resource.
+ Lack of local_gb.(boundary check)
+ """
+ scheduler = manager.SchedulerManager()
+ dest = 'dummydest'
+ ctxt = context.get_admin_context()
+ topic = FLAGS.compute_topic
+ service_ref = {'id':1, 'vcpus':16, 'memory_mb':32, 'local_gb':100}
+ i_ref1 = {'id':1, 'hostname':'i-01', 'host':'dummy',
+ 'vcpus':5, 'memory_mb':8, 'local_gb':80}
+ i_ref2 = {'id':2, 'hostname':'i-01', 'host':'dummy',
+ 'vcpus':5, 'memory_mb':8, 'local_gb':10}
+ i_ref3 = {'id':3, 'hostname':'i-02', 'host':'dummy',
+ 'vcpus':5, 'memory_mb':8, 'local_gb':10}
+
+ self.mox.StubOutWithMock(driver, 'db', use_mock_anything=True)
+ driver.db.service_get_all_by_host(mox.IgnoreArg(), dest).\
+ AndReturn([service_ref])
+ driver.db.instance_get_all_by_host(mox.IgnoreArg(), dest).\
+ AndReturn([i_ref2, i_ref3])
+
+ self.mox.ReplayAll()
+ try:
+ self.scheduler.driver.has_enough_resource(ctxt, i_ref1, dest)
+ except exception.NotEmpty, e:
+ msg = 'is not capable to migrate'
+ self.assertTrue(e.message.find(msg) >= 0)
+ self.mox.UnsetStubs()
+
+ def test_has_enough_resource_works_correctly(self):
+ """
+ A testcase of driver.has_enough_resource
+ to make sure everything finished with no error.
+ """
+ dest = 'dummydest'
+ ctxt = context.get_admin_context()
+ topic = FLAGS.compute_topic
+ service_ref = {'id':1, 'vcpus':16, 'memory_mb':32, 'local_gb':100}
+ i_ref1 = {'id':1, 'hostname':'i-01', 'host':'dummy',
+ 'vcpus':5, 'memory_mb':8, 'local_gb':10}
+ i_ref2 = {'id':2, 'hostname':'i-01', 'host':'dummy',
+ 'vcpus':5, 'memory_mb':8, 'local_gb':10}
+ i_ref3 = {'id':3, 'hostname':'i-02', 'host':'dummy',
+ 'vcpus':5, 'memory_mb':8, 'local_gb':10}
+
+ self.mox.StubOutWithMock(driver, 'db', use_mock_anything=True)
+ driver.db.service_get_all_by_host(mox.IgnoreArg(), dest).\
+ AndReturn([service_ref])
+ driver.db.instance_get_all_by_host(mox.IgnoreArg(), dest).\
+ AndReturn([i_ref2, i_ref3])
+
+ self.mox.ReplayAll()
+ ret = self.scheduler.driver.has_enough_resource(ctxt, i_ref1, dest)
+ self.assertTrue(ret == None)
+ self.mox.UnsetStubs()
diff --git a/nova/tests/test_service.py b/nova/tests/test_service.py
index a67c8d1e8..a147e69b4 100644
--- a/nova/tests/test_service.py
+++ b/nova/tests/test_service.py
@@ -30,6 +30,7 @@ from nova import rpc
from nova import test
from nova import service
from nova import manager
+from nova.compute import manager as compute_manager
FLAGS = flags.FLAGS
flags.DEFINE_string("fake_manager", "nova.tests.test_service.FakeManager",
@@ -41,7 +42,20 @@ class FakeManager(manager.Manager):
def test_method(self):
return 'manager'
-
+# temporary variable to store host/binary/self.mox from each method to fake class.
+global_host = None
+global_binary = None
+global_mox = None
+class FakeComputeManager(compute_manager.ComputeManager):
+ """Fake computemanager manager for tests"""
+
+ def __init__(self, compute_driver=None, *args, **kwargs):
+ global ghost, gbinary, gmox
+ self.update_service(mox.IgnoreArg(), mox.StrContains(ghost), mox.StrContains(gbinary))
+ gmox.ReplayAll()
+ super(FakeComputeManager, self).__init__(compute_driver, *args, **kwargs)
+
+
class ExtendedService(service.Service):
def test_method(self):
return 'service'
@@ -258,3 +272,48 @@ class ServiceTestCase(test.TestCase):
serv.report_state()
self.assert_(not serv.model_disconnected)
+
+ def test_compute_can_update_services(self):
+ """
+ Test nova-compute successfully updated Service table on DB.
+ Doing so, self.manager.update_service must be called
+ if 'self.binary == nova-compute', and this testcase checks on it.
+ """
+ host = 'foo'
+ binary = 'nova-compute'
+ topic = 'compute1'
+ service_create = {'host': host,
+ 'binary': binary,
+ 'topic': topic,
+ 'report_count': 0,
+ 'availability_zone': 'nova'}
+ service_ref = {'host': host,
+ 'binary': binary,
+ 'topic': topic,
+ 'report_count': 0,
+ 'availability_zone': 'nova',
+ 'id': 1}
+
+ service.db.service_get_by_args(mox.IgnoreArg(),
+ host,
+ binary).AndRaise(exception.NotFound())
+ service.db.service_create(mox.IgnoreArg(),
+ service_create).AndReturn(service_ref)
+ self.mox.StubOutWithMock(compute_manager.ComputeManager, 'update_service')
+
+
+ global ghost, gbinary, gmox
+ ghost = host
+ gbinary = binary
+ gmox = self.mox
+
+ serv = service.Service(host,
+ binary,
+ topic,
+ 'nova.tests.test_service.FakeComputeManager')
+ # ReplayAll has been executed FakeComputeManager.__init__()
+ #self.mox.ReplayAll()
+ serv.start()
+ serv.stop()
+
+
diff --git a/nova/tests/test_virt.py b/nova/tests/test_virt.py
index afdc89ba2..177e8f021 100644
--- a/nova/tests/test_virt.py
+++ b/nova/tests/test_virt.py
@@ -14,21 +14,29 @@
# License for the specific language governing permissions and limitations
# under the License.
+import mox
+
from xml.etree.ElementTree import fromstring as xml_to_tree
from xml.dom.minidom import parseString as xml_to_dom
from nova import context
from nova import db
+from nova import exception
from nova import flags
from nova import test
+from nova import logging
from nova import utils
from nova.api.ec2 import cloud
from nova.auth import manager
+from nova.db.sqlalchemy import models
+from nova.compute import power_state
from nova.virt import libvirt_conn
FLAGS = flags.FLAGS
flags.DECLARE('instances_path', 'nova.compute.manager')
+libvirt = None
+libxml2 = None
class LibvirtConnTestCase(test.TestCase):
def setUp(self):
@@ -52,6 +60,38 @@ class LibvirtConnTestCase(test.TestCase):
'bridge': 'br101',
'instance_type': 'm1.small'}
+ def _driver_dependent_test_setup(self):
+ """
+ Setup method.
+ Call this method at the top of each testcase method,
+ if the testcase is necessary libvirt and cheetah.
+ """
+ try :
+ global libvirt
+ global libxml2
+ libvirt_conn.libvirt = __import__('libvirt')
+ libvirt_conn.libxml2 = __import__('libxml2')
+ libvirt_conn._late_load_cheetah()
+ libvirt = __import__('libvirt')
+ except ImportError, e:
+ logging.warn("""This test has not been done since """
+ """using driver-dependent library Cheetah/libvirt/libxml2.""")
+ raise e
+
+ # inebitable mocks for calling
+ #nova.virt.libvirt_conn.LibvirtConnection.__init__
+ nwmock = self.mox.CreateMock(libvirt_conn.NWFilterFirewall)
+ self.mox.StubOutWithMock(libvirt_conn, 'NWFilterFirewall',
+ use_mock_anything=True)
+ libvirt_conn.NWFilterFirewall(mox.IgnoreArg()).AndReturn(nwmock)
+
+ obj = utils.import_object(FLAGS.firewall_driver)
+ fwmock = self.mox.CreateMock(obj)
+ self.mox.StubOutWithMock(libvirt_conn, 'utils',
+ use_mock_anything=True)
+ libvirt_conn.utils.import_object(FLAGS.firewall_driver).AndReturn(fwmock)
+ return nwmock, fwmock
+
def test_xml_and_uri_no_ramdisk_no_kernel(self):
instance_data = dict(self.test_instance)
self._check_xml_and_uri(instance_data,
@@ -188,9 +228,8 @@ class LibvirtConnTestCase(test.TestCase):
expected_result,
'%s failed common check %d' % (xml, i))
- # This test is supposed to make sure we don't override a specifically
- # set uri
- #
+ # This test is supposed to make sure we don't override a specifically set uri
+ #
# Deliberately not just assigning this string to FLAGS.libvirt_uri and
# checking against that later on. This way we make sure the
# implementation doesn't fiddle around with the FLAGS.
@@ -202,6 +241,480 @@ class LibvirtConnTestCase(test.TestCase):
uri = conn.get_uri()
self.assertEquals(uri, testuri)
+ def test_get_memory_mb(self):
+ """
+ Check if get_memory_mb returns memory value
+ Connection/OS/driver differenct does not matter for this method,
+ so everyone can execute for checking.
+ """
+ try:
+ self._driver_dependent_test_setup()
+ except:
+ return
+
+ self.mox.ReplayAll()
+ conn = libvirt_conn.LibvirtConnection(False)
+ self.assertTrue(0 < conn.get_memory_mb())
+ self.mox.UnsetStubs()
+
+ def test_get_cpu_info_works_correctly(self):
+ """
+ Check if get_cpu_info works correctly.
+ (in case libvirt.getCapabilities() works correctly)
+ """
+ xml=("""<cpu><arch>x86_64</arch><model>Nehalem</model>"""
+ """<vendor>Intel</vendor><topology sockets='2' """
+ """cores='4' threads='2'/><feature name='rdtscp'/>"""
+ """<feature name='dca'/><feature name='xtpr'/>"""
+ """<feature name='tm2'/><feature name='est'/>"""
+ """<feature name='vmx'/><feature name='ds_cpl'/>"""
+ """<feature name='monitor'/><feature name='pbe'/>"""
+ """<feature name='tm'/><feature name='ht'/>"""
+ """<feature name='ss'/><feature name='acpi'/>"""
+ """<feature name='ds'/><feature name='vme'/></cpu>""")
+
+ try:
+ self._driver_dependent_test_setup()
+ except:
+ return
+ self.mox.StubOutWithMock(libvirt_conn.LibvirtConnection, '_conn', use_mock_anything=True)
+ libvirt_conn.LibvirtConnection._conn.getCapabilities().AndReturn(xml)
+
+ self.mox.ReplayAll()
+ conn = libvirt_conn.LibvirtConnection(False)
+ self.assertTrue(0 < len(conn.get_cpu_info()))
+ self.mox.UnsetStubs()
+
+ def test_get_cpu_info_inappropreate_xml(self):
+ """
+ Check if get_cpu_info raises exception
+ in case libvirt.getCapabilities() returns wrong xml
+ (in case of xml doesnt have <cpu> tag)
+ """
+ xml=("""<cccccpu><arch>x86_64</arch><model>Nehalem</model>"""
+ """<vendor>Intel</vendor><topology sockets='2' """
+ """cores='4' threads='2'/><feature name='rdtscp'/>"""
+ """<feature name='dca'/><feature name='xtpr'/>"""
+ """<feature name='tm2'/><feature name='est'/>"""
+ """<feature name='vmx'/><feature name='ds_cpl'/>"""
+ """<feature name='monitor'/><feature name='pbe'/>"""
+ """<feature name='tm'/><feature name='ht'/>"""
+ """<feature name='ss'/><feature name='acpi'/>"""
+ """<feature name='ds'/><feature name='vme'/></cccccpu>""")
+
+ try:
+ self._driver_dependent_test_setup()
+ except:
+ return
+ self.mox.StubOutWithMock(libvirt_conn.LibvirtConnection, '_conn', use_mock_anything=True)
+ libvirt_conn.LibvirtConnection._conn.getCapabilities().AndReturn(xml)
+
+ self.mox.ReplayAll()
+ conn = libvirt_conn.LibvirtConnection(False)
+ try:
+ conn.get_cpu_info()
+ except exception.Invalid, e:
+ c1 = ( 0 <= e.message.find('Invalid xml') )
+ self.assertTrue(c1)
+ self.mox.UnsetStubs()
+
+ def test_get_cpu_info_inappropreate_xml2(self):
+ """
+ Check if get_cpu_info raises exception
+ in case libvirt.getCapabilities() returns wrong xml
+ (in case of xml doesnt have inproper <topology> tag
+ meaning missing "socket" attribute)
+ """
+ xml=("""<cpu><arch>x86_64</arch><model>Nehalem</model>"""
+ """<vendor>Intel</vendor><topology """
+ """cores='4' threads='2'/><feature name='rdtscp'/>"""
+ """<feature name='dca'/><feature name='xtpr'/>"""
+ """<feature name='tm2'/><feature name='est'/>"""
+ """<feature name='vmx'/><feature name='ds_cpl'/>"""
+ """<feature name='monitor'/><feature name='pbe'/>"""
+ """<feature name='tm'/><feature name='ht'/>"""
+ """<feature name='ss'/><feature name='acpi'/>"""
+ """<feature name='ds'/><feature name='vme'/></cpu>""")
+
+ try:
+ self._driver_dependent_test_setup()
+ except:
+ return
+ self.mox.StubOutWithMock(libvirt_conn.LibvirtConnection, '_conn', use_mock_anything=True)
+ libvirt_conn.LibvirtConnection._conn.getCapabilities().AndReturn(xml)
+
+ self.mox.ReplayAll()
+ conn = libvirt_conn.LibvirtConnection(False)
+ try:
+ conn.get_cpu_info()
+ except exception.Invalid, e:
+ c1 = ( 0 <= e.message.find('Invalid xml: topology') )
+ self.assertTrue(c1)
+ self.mox.UnsetStubs()
+
+ def test_compare_cpu_works_correctly(self):
+ """Calling libvirt.compute_cpu() and works correctly """
+
+ t = ("""{"arch":"%s", "model":"%s", "vendor":"%s", """
+ """"topology":{"cores":"%s", "threads":"%s", """
+ """"sockets":"%s"}, "features":[%s]}""")
+ cpu_info = t % ('x86', 'model', 'vendor', '2', '1', '4', '"tm"')
+
+ try:
+ self._driver_dependent_test_setup()
+ except:
+ return
+
+ self.mox.StubOutWithMock(libvirt_conn.LibvirtConnection, '_conn', use_mock_anything=True)
+ libvirt_conn.LibvirtConnection._conn.compareCPU(mox.IgnoreArg(),0).AndReturn(1)
+
+ self.mox.ReplayAll()
+ conn = libvirt_conn.LibvirtConnection(False)
+ self.assertTrue( None== conn.compare_cpu(cpu_info))
+ self.mox.UnsetStubs()
+
+ def test_compare_cpu_raises_exception(self):
+ """
+ Libvirt-related exception occurs when calling
+ libvirt.compare_cpu().
+ """
+ t = ("""{"arch":"%s", "model":"%s", "vendor":"%s", """
+ """"topology":{"cores":"%s", "threads":"%s", """
+ """"sockets":"%s"}, "features":[%s]}""")
+ cpu_info = t % ('x86', 'model', 'vendor', '2', '1', '4', '"tm"')
+
+ try:
+ self._driver_dependent_test_setup()
+ except:
+ return
+
+ self.mox.StubOutWithMock(libvirt_conn.LibvirtConnection, '_conn',
+ use_mock_anything=True)
+ libvirt_conn.LibvirtConnection._conn.compareCPU(mox.IgnoreArg(),0).\
+ AndRaise(libvirt.libvirtError('ERR'))
+
+ self.mox.ReplayAll()
+ conn = libvirt_conn.LibvirtConnection(False)
+ self.assertRaises(libvirt.libvirtError, conn.compare_cpu, cpu_info)
+ self.mox.UnsetStubs()
+
+ def test_compare_cpu_no_compatibility(self):
+ """libvirt.compare_cpu() return less than 0.(no compatibility)"""
+
+ t = ("""{"arch":"%s", "model":"%s", "vendor":"%s", """
+ """"topology":{"cores":"%s", "threads":"%s", """
+ """"sockets":"%s"}, "features":[%s]}""")
+ cpu_info = t % ('x86', 'model', 'vendor', '2', '1', '4', '"tm"')
+
+ try:
+ self._driver_dependent_test_setup()
+ except:
+ return
+
+ self.mox.StubOutWithMock(libvirt_conn.LibvirtConnection, '_conn',
+ use_mock_anything=True)
+ libvirt_conn.LibvirtConnection._conn.compareCPU(mox.IgnoreArg(),0).\
+ AndRaise(exception.Invalid('ERR'))
+
+ self.mox.ReplayAll()
+ conn = libvirt_conn.LibvirtConnection(False)
+ self.assertRaises(exception.Invalid, conn.compare_cpu, cpu_info)
+ self.mox.UnsetStubs()
+
+ def test_ensure_filtering_rules_for_instance_works_correctly(self):
+ """ensure_filtering_rules_for_instance works as expected correctly"""
+
+ instance_ref = models.Instance()
+ instance_ref.__setitem__('id', 1)
+
+ try:
+ nwmock, fwmock = self._driver_dependent_test_setup()
+ except:
+ return
+
+ nwmock.setup_basic_filtering(mox.IgnoreArg())
+ fwmock.prepare_instance_filter(instance_ref)
+ self.mox.StubOutWithMock(libvirt_conn.LibvirtConnection, '_conn',
+ use_mock_anything=True)
+ n = 'nova-instance-%s' % instance_ref.name
+ libvirt_conn.LibvirtConnection._conn.nwfilterLookupByName(n)
+
+ self.mox.ReplayAll()
+ conn = libvirt_conn.LibvirtConnection(False)
+ conn.ensure_filtering_rules_for_instance(instance_ref)
+ self.mox.UnsetStubs()
+
+ def test_ensure_filtering_rules_for_instance_timeout(self):
+ """ensure_filtering_fules_for_instance finishes with timeout"""
+
+ instance_ref = models.Instance()
+ instance_ref.__setitem__('id', 1)
+
+ try:
+ nwmock, fwmock = self._driver_dependent_test_setup()
+ except:
+ return
+
+ nwmock.setup_basic_filtering(mox.IgnoreArg())
+ fwmock.prepare_instance_filter(instance_ref)
+ self.mox.StubOutWithMock(libvirt_conn.LibvirtConnection, '_conn',
+ use_mock_anything=True)
+ n = 'nova-instance-%s' % instance_ref.name
+ for i in range(FLAGS.live_migration_timeout_sec * 2):
+ libvirt_conn.LibvirtConnection._conn.\
+ nwfilterLookupByName(n).AndRaise(libvirt.libvirtError('ERR'))
+
+ self.mox.ReplayAll()
+ conn = libvirt_conn.LibvirtConnection(False)
+ try:
+ conn.ensure_filtering_rules_for_instance(instance_ref)
+ except exception.Error, e:
+ c1 = ( 0<=e.message.find('Timeout migrating for'))
+ self.assertTrue(c1)
+ self.mox.UnsetStubs()
+
+ def test_live_migration_works_correctly(self):
+ """_live_migration works as expected correctly """
+
+ class dummyCall(object):
+ f = None
+ def start(self, interval=0, now=False):
+ pass
+
+ instance_ref = models.Instance()
+ instance_ref.__setitem__('id', 1)
+ dest = 'desthost'
+ ctxt = context.get_admin_context()
+
+ try:
+ self._driver_dependent_test_setup()
+ except:
+ return
+
+ self.mox.StubOutWithMock(libvirt_conn.LibvirtConnection, '_conn',
+ use_mock_anything=True)
+ vdmock = self.mox.CreateMock(libvirt.virDomain)
+ self.mox.StubOutWithMock(vdmock, "migrateToURI",
+ use_mock_anything=True)
+ vdmock.migrateToURI(FLAGS.live_migration_uri % dest, mox.IgnoreArg(),
+ None, FLAGS.live_migration_bandwidth).\
+ AndReturn(None)
+ libvirt_conn.LibvirtConnection._conn.lookupByName(instance_ref.name).\
+ AndReturn(vdmock)
+ # below description is also ok.
+ #self.mox.StubOutWithMock(libvirt_conn.LibvirtConnection._conn,
+ # "lookupByName", use_mock_anything=True)
+
+ libvirt_conn.utils.LoopingCall(f=None).AndReturn(dummyCall())
+
+
+ self.mox.ReplayAll()
+ conn = libvirt_conn.LibvirtConnection(False)
+ ret = conn._live_migration(ctxt, instance_ref, dest)
+ self.assertTrue(ret == None)
+ self.mox.UnsetStubs()
+
+ def test_live_migration_raises_exception(self):
+ """
+ _live_migration raises exception, then this testcase confirms
+ state_description/state for the instances/volumes are recovered.
+ """
+ class Instance(models.NovaBase):
+ id = 0
+ volumes = None
+ name = 'name'
+
+ ctxt = context.get_admin_context()
+ dest = 'desthost'
+ instance_ref = Instance()
+ instance_ref.__setitem__('id', 1)
+ instance_ref.__setitem__('volumes', [{'id':1}, {'id':2}])
+
+ try:
+ nwmock, fwmock = self._driver_dependent_test_setup()
+ except:
+ return
+
+ self.mox.StubOutWithMock(libvirt_conn.LibvirtConnection, '_conn',
+ use_mock_anything=True)
+ vdmock = self.mox.CreateMock(libvirt.virDomain)
+ self.mox.StubOutWithMock(vdmock, "migrateToURI",
+ use_mock_anything=True)
+ vdmock.migrateToURI(FLAGS.live_migration_uri % dest, mox.IgnoreArg(),
+ None, FLAGS.live_migration_bandwidth).\
+ AndRaise(libvirt.libvirtError('ERR'))
+ libvirt_conn.LibvirtConnection._conn.lookupByName(instance_ref.name).\
+ AndReturn(vdmock)
+ self.mox.StubOutWithMock(db, 'instance_set_state')
+ db.instance_set_state(ctxt, instance_ref['id'],
+ power_state.RUNNING, 'running')
+ self.mox.StubOutWithMock(db, 'volume_update')
+ for v in instance_ref.volumes:
+ db.volume_update(ctxt, v['id'], {'status': 'in-use'}).\
+ InAnyOrder('g1')
+
+ self.mox.ReplayAll()
+ conn = libvirt_conn.LibvirtConnection(False)
+ self.assertRaises(libvirt.libvirtError,
+ conn._live_migration,
+ ctxt, instance_ref, dest)
+ self.mox.UnsetStubs()
+
+ def test_post_live_migration_working_correctly(self):
+ """_post_live_migration works as expected correctly """
+
+ dest = 'dummydest'
+ ctxt = context.get_admin_context()
+ instance_ref = {'id':1, 'hostname':'i-00000001', 'host':dest,
+ 'fixed_ip':'dummyip', 'floating_ip':'dummyflip',
+ 'volumes':[{'id':1}, {'id':2} ]}
+ network_ref = {'id':1, 'host':dest}
+ floating_ip_ref = {'id':1, 'address':'1.1.1.1'}
+
+ try:
+ nwmock, fwmock = self._driver_dependent_test_setup()
+ except:
+ return
+ fwmock.unfilter_instance(instance_ref)
+
+ fixed_ip = instance_ref['fixed_ip']
+ self.mox.StubOutWithMock(db, 'instance_get_fixed_address')
+ db.instance_get_fixed_address(ctxt, instance_ref['id']).AndReturn(fixed_ip)
+ self.mox.StubOutWithMock(db, 'fixed_ip_update')
+ db.fixed_ip_update(ctxt, fixed_ip, {'host': dest})
+ self.mox.StubOutWithMock(db, 'fixed_ip_get_network')
+ db.fixed_ip_get_network(ctxt, fixed_ip).AndReturn(network_ref)
+ self.mox.StubOutWithMock(db, 'network_update')
+ db.network_update(ctxt, network_ref['id'], {'host': dest})
+
+ fl_ip = instance_ref['floating_ip']
+ self.mox.StubOutWithMock(db, 'instance_get_floating_address')
+ db.instance_get_floating_address(ctxt, instance_ref['id']).AndReturn(fl_ip)
+ self.mox.StubOutWithMock(db, 'floating_ip_get_by_address')
+ db.floating_ip_get_by_address(ctxt, instance_ref['floating_ip']).\
+ AndReturn(floating_ip_ref)
+ self.mox.StubOutWithMock(db, 'floating_ip_update')
+ db.floating_ip_update(ctxt, floating_ip_ref['address'], {'host': dest})
+
+ self.mox.StubOutWithMock(db, 'instance_update')
+ db.instance_update(ctxt, instance_ref['id'],
+ {'state_description': 'running',
+ 'state': power_state.RUNNING, 'host': dest})
+ self.mox.StubOutWithMock(db, 'volume_update')
+ for v in instance_ref['volumes']:
+ db.volume_update(ctxt, v['id'], {'status': 'in-use'})
+
+ self.mox.ReplayAll()
+ conn = libvirt_conn.LibvirtConnection(False)
+ conn._post_live_migration( ctxt, instance_ref, dest)
+ self.mox.UnsetStubs()
+
+ def test_post_live_migration_no_floating_ip(self):
+ """
+ _post_live_migration works as expected correctly
+ (in case instance doesnt have floaitng ip)
+ """
+ dest = 'dummydest'
+ ctxt = context.get_admin_context()
+ instance_ref = {'id':1, 'hostname':'i-00000001', 'host':dest,
+ 'fixed_ip':'dummyip', 'floating_ip':'dummyflip',
+ 'volumes':[{'id':1}, {'id':2} ]}
+ network_ref = {'id':1, 'host':dest}
+ floating_ip_ref = {'id':1, 'address':'1.1.1.1'}
+
+ try:
+ nwmock, fwmock = self._driver_dependent_test_setup()
+ except:
+ return
+ fwmock.unfilter_instance(instance_ref)
+
+ fixed_ip = instance_ref['fixed_ip']
+ self.mox.StubOutWithMock(db, 'instance_get_fixed_address')
+ db.instance_get_fixed_address(ctxt, instance_ref['id']).AndReturn(fixed_ip)
+ self.mox.StubOutWithMock(db, 'fixed_ip_update')
+ db.fixed_ip_update(ctxt, fixed_ip, {'host': dest})
+ self.mox.StubOutWithMock(db, 'fixed_ip_get_network')
+ db.fixed_ip_get_network(ctxt, fixed_ip).AndReturn(network_ref)
+ self.mox.StubOutWithMock(db, 'network_update')
+ db.network_update(ctxt, network_ref['id'], {'host': dest})
+
+ self.mox.StubOutWithMock(db, 'instance_get_floating_address')
+ db.instance_get_floating_address(ctxt, instance_ref['id']).AndReturn(None)
+ self.mox.StubOutWithMock(libvirt_conn.LOG, 'info')
+ libvirt_conn.LOG.info(_('post livemigration operation is started..'))
+ libvirt_conn.LOG.info(_('floating_ip is not found for %s'),
+ instance_ref['hostname'])
+ # Checking last messages are ignored. may be no need to check so strictly?
+ libvirt_conn.LOG.info(mox.IgnoreArg())
+ libvirt_conn.LOG.info(mox.IgnoreArg())
+
+ self.mox.StubOutWithMock(db, 'instance_update')
+ db.instance_update(ctxt, instance_ref['id'],
+ {'state_description': 'running',
+ 'state': power_state.RUNNING,
+ 'host': dest})
+ self.mox.StubOutWithMock(db, 'volume_update')
+ for v in instance_ref['volumes']:
+ db.volume_update(ctxt, v['id'], {'status': 'in-use'})
+
+ self.mox.ReplayAll()
+ conn = libvirt_conn.LibvirtConnection(False)
+ conn._post_live_migration( ctxt, instance_ref, dest)
+ self.mox.UnsetStubs()
+
+ def test_post_live_migration_no_floating_ip_with_exception(self):
+ """
+ _post_live_migration works as expected correctly
+ (in case instance doesnt have floaitng ip, and raise exception)
+ """
+ dest = 'dummydest'
+ ctxt = context.get_admin_context()
+ instance_ref = {'id':1, 'hostname':'i-00000001', 'host':dest,
+ 'fixed_ip':'dummyip', 'floating_ip':'dummyflip',
+ 'volumes':[{'id':1}, {'id':2} ]}
+ network_ref = {'id':1, 'host':dest}
+ floating_ip_ref = {'id':1, 'address':'1.1.1.1'}
+
+ try:
+ nwmock, fwmock = self._driver_dependent_test_setup()
+ except:
+ return
+ fwmock.unfilter_instance(instance_ref)
+
+ fixed_ip = instance_ref['fixed_ip']
+ self.mox.StubOutWithMock(db, 'instance_get_fixed_address')
+ db.instance_get_fixed_address(ctxt, instance_ref['id']).AndReturn(fixed_ip)
+ self.mox.StubOutWithMock(db, 'fixed_ip_update')
+ db.fixed_ip_update(ctxt, fixed_ip, {'host': dest})
+ self.mox.StubOutWithMock(db, 'fixed_ip_get_network')
+ db.fixed_ip_get_network(ctxt, fixed_ip).AndReturn(network_ref)
+ self.mox.StubOutWithMock(db, 'network_update')
+ db.network_update(ctxt, network_ref['id'], {'host': dest})
+
+ self.mox.StubOutWithMock(db, 'instance_get_floating_address')
+ db.instance_get_floating_address(ctxt, instance_ref['id']).\
+ AndRaise(exception.NotFound())
+ self.mox.StubOutWithMock(libvirt_conn.LOG, 'info')
+ libvirt_conn.LOG.info(_('post livemigration operation is started..'))
+ libvirt_conn.LOG.info(_('floating_ip is not found for %s'),
+ instance_ref['hostname'])
+ # the last message is ignored. may be no need to check so strictly?
+ libvirt_conn.LOG.info(mox.IgnoreArg())
+ libvirt_conn.LOG.info(mox.IgnoreArg())
+
+ self.mox.StubOutWithMock(db, 'instance_update')
+ db.instance_update(ctxt, instance_ref['id'],
+ {'state_description': 'running',
+ 'state': power_state.RUNNING, 'host': dest})
+ self.mox.StubOutWithMock(db, 'volume_update')
+ for v in instance_ref['volumes']:
+ db.volume_update(ctxt, v['id'], {'status': 'in-use'})
+
+ self.mox.ReplayAll()
+ conn = libvirt_conn.LibvirtConnection(False)
+ conn._post_live_migration( ctxt, instance_ref, dest)
+ self.mox.UnsetStubs()
+
def tearDown(self):
super(LibvirtConnTestCase, self).tearDown()
self.manager.delete_project(self.project)
@@ -475,3 +988,4 @@ class NWFilterTestCase(test.TestCase):
self.fw.prepare_instance_filter(instance)
_ensure_all_called()
self.teardown_security_group()
+
diff --git a/nova/virt/fake.py b/nova/virt/fake.py
index 80ae7f34c..f469af681 100644
--- a/nova/virt/fake.py
+++ b/nova/virt/fake.py
@@ -316,15 +316,15 @@ class FakeConnection(object):
def get_vcpu_number(self):
"""This method is supported only libvirt. """
- return -1
+ return
def get_memory_mb(self):
"""This method is supported only libvirt.."""
- return -1
+ return
def get_local_gb(self):
"""This method is supported only libvirt.."""
- return -1
+ return
def get_hypervisor_type(self):
"""This method is supported only libvirt.."""
@@ -332,12 +332,16 @@ class FakeConnection(object):
def get_hypervisor_version(self):
"""This method is supported only libvirt.."""
- return -1
+ return
def compare_cpu(self, xml):
"""This method is supported only libvirt.."""
raise NotImplementedError('This method is supported only libvirt.')
+ def ensure_filtering_rules_for_instance(self, instance_ref):
+ """This method is supported only libvirt.."""
+ raise NotImplementedError('This method is supported only libvirt.')
+
def live_migration(self, context, instance_ref, dest):
"""This method is supported only libvirt.."""
raise NotImplementedError('This method is supported only libvirt.')
diff --git a/nova/virt/libvirt_conn.py b/nova/virt/libvirt_conn.py
index 7d1f76b32..49dd03c57 100644
--- a/nova/virt/libvirt_conn.py
+++ b/nova/virt/libvirt_conn.py
@@ -861,18 +861,18 @@ class LibvirtConnection(object):
def get_cpu_info(self):
""" Get cpuinfo information """
- xmlstr = self._conn.getCapabilities()
- xml = libxml2.parseDoc(xmlstr)
+ xml = self._conn.getCapabilities()
+ xml = libxml2.parseDoc(xml)
nodes = xml.xpathEval('//cpu')
if len(nodes) != 1:
- msg = 'Unexpected xml format. tag "cpu" must be 1, but %d.' \
- % len(nodes)
+ msg = 'Invalid xml. "<cpu>" must be 1, but %d.' % len(nodes)
msg += '\n' + xml.serialize()
raise exception.Invalid(_(msg))
- arch = xml.xpathEval('//cpu/arch')[0].getContent()
- model = xml.xpathEval('//cpu/model')[0].getContent()
- vendor = xml.xpathEval('//cpu/vendor')[0].getContent()
+ cpu_info = dict()
+ cpu_info['arch'] = xml.xpathEval('//cpu/arch')[0].getContent()
+ cpu_info['model'] = xml.xpathEval('//cpu/model')[0].getContent()
+ cpu_info['vendor'] = xml.xpathEval('//cpu/vendor')[0].getContent()
topology_node = xml.xpathEval('//cpu/topology')[0].get_properties()
topology = dict()
@@ -890,18 +890,19 @@ class LibvirtConnection(object):
feature_nodes = xml.xpathEval('//cpu/feature')
features = list()
for nodes in feature_nodes:
- feature_name = nodes.get_properties().getContent()
- features.append(feature_name)
+ features.append(nodes.get_properties().getContent())
template = ("""{"arch":"%s", "model":"%s", "vendor":"%s", """
""""topology":{"cores":"%s", "threads":"%s", """
""""sockets":"%s"}, "features":[%s]}""")
- c = topology['cores']
- s = topology['sockets']
- t = topology['threads']
f = ['"%s"' % x for x in features]
- cpu_info = template % (arch, model, vendor, c, s, t, ', '.join(f))
- return cpu_info
+ return template % (cpu_info['arch'],
+ cpu_info['model'],
+ cpu_info['vendor'],
+ topology['cores'],
+ topology['sockets'],
+ topology['threads'],
+ ', '.join(f))
def block_stats(self, instance_name, disk):
"""
@@ -935,12 +936,12 @@ class LibvirtConnection(object):
def compare_cpu(self, cpu_info):
"""
- Check the host cpu is compatible to a cpu given by xml.
- "xml" must be a part of libvirt.openReadonly().getCapabilities().
- return values follows by virCPUCompareResult.
- if 0 > return value, do live migration.
+ Check the host cpu is compatible to a cpu given by xml.
+ "xml" must be a part of libvirt.openReadonly().getCapabilities().
+ return values follows by virCPUCompareResult.
+ if 0 > return value, do live migration.
- 'http://libvirt.org/html/libvirt-libvirt.html#virCPUCompareResult'
+ 'http://libvirt.org/html/libvirt-libvirt.html#virCPUCompareResult'
"""
msg = _('Checking cpu_info: instance was launched this cpu.\n: %s ')
LOG.info(msg % cpu_info)
@@ -952,7 +953,7 @@ class LibvirtConnection(object):
url = 'http://libvirt.org/html/libvirt-libvirt.html'
url += '#virCPUCompareResult\n'
msg = 'CPU does not have compativility.\n'
- msg += 'result:%d \n'
+ msg += 'result:%s \n'
msg += 'Refer to %s'
msg = _(msg)
@@ -960,7 +961,7 @@ class LibvirtConnection(object):
try:
ret = self._conn.compareCPU(xml, 0)
except libvirt.libvirtError, e:
- LOG.error(msg % (ret, url))
+ LOG.error(msg % (e.message, url))
raise e
if ret <= 0:
@@ -969,24 +970,26 @@ class LibvirtConnection(object):
return
def ensure_filtering_rules_for_instance(self, instance_ref):
- """ Setting up inevitable filtering rules on compute node,
- and waiting for its completion.
- To migrate an instance, filtering rules to hypervisors
- and firewalls are inevitable on destination host.
- ( Waiting only for filterling rules to hypervisor,
- since filtering rules to firewall rules can be set faster).
-
- Concretely, the below method must be called.
- - setup_basic_filtering (for nova-basic, etc.)
- - prepare_instance_filter(for nova-instance-instance-xxx, etc.)
-
- to_xml may have to be called since it defines PROJNET, PROJMASK.
- but libvirt migrates those value through migrateToURI(),
- so , no need to be called.
-
- Don't use thread for this method since migration should
- not be started when setting-up filtering rules operations
- are not completed."""
+ """
+ Setting up inevitable filtering rules on compute node,
+ and waiting for its completion.
+ To migrate an instance, filtering rules to hypervisors
+ and firewalls are inevitable on destination host.
+ ( Waiting only for filterling rules to hypervisor,
+ since filtering rules to firewall rules can be set faster).
+
+ Concretely, the below method must be called.
+ - setup_basic_filtering (for nova-basic, etc.)
+ - prepare_instance_filter(for nova-instance-instance-xxx, etc.)
+
+ to_xml may have to be called since it defines PROJNET, PROJMASK.
+ but libvirt migrates those value through migrateToURI(),
+ so , no need to be called.
+
+ Don't use thread for this method since migration should
+ not be started when setting-up filtering rules operations
+ are not completed.
+ """
# Tf any instances never launch at destination host,
# basic-filtering must be set here.
@@ -1009,40 +1012,44 @@ class LibvirtConnection(object):
raise exception.Error(msg % (ec2_id, instance_ref.name))
time.sleep(0.5)
- def live_migration(self, context, instance_ref, dest):
+ def live_migration(self, ctxt, instance_ref, dest):
"""
- Just spawning live_migration operation for
- distributing high-load.
+ Just spawning live_migration operation for
+ distributing high-load.
"""
- greenthread.spawn(self._live_migration, context, instance_ref, dest)
+ greenthread.spawn(self._live_migration, ctxt, instance_ref, dest)
- def _live_migration(self, context, instance_ref, dest):
+ def _live_migration(self, ctxt, instance_ref, dest):
""" Do live migration."""
# Do live migration.
try:
- duri = FLAGS.live_migration_uri % dest
-
flaglist = FLAGS.live_migration_flag.split(',')
flagvals = [getattr(libvirt, x.strip()) for x in flaglist]
logical_sum = reduce(lambda x, y: x | y, flagvals)
- bandwidth = FLAGS.live_migration_bandwidth
-
if self.read_only:
tmpconn = self._connect(self.libvirt_uri, False)
dom = tmpconn.lookupByName(instance_ref.name)
- dom.migrateToURI(duri, logical_sum, None, bandwidth)
+ dom.migrateToURI(FLAGS.live_migration_uri % dest,
+ logical_sum,
+ None,
+ FLAGS.live_migration_bandwidth)
tmpconn.close()
else:
dom = self._conn.lookupByName(instance_ref.name)
- dom.migrateToURI(duri, logical_sum, None, bandwidth)
+ dom.migrateToURI(FLAGS.live_migration_uri % dest,
+ logical_sum,
+ None,
+ FLAGS.live_migration_bandwidth)
except Exception, e:
- id = instance_ref['id']
- db.instance_set_state(context, id, power_state.RUNNING, 'running')
+ db.instance_set_state(ctxt,
+ instance_ref['id'],
+ power_state.RUNNING,
+ 'running')
for v in instance_ref['volumes']:
- db.volume_update(context,
+ db.volume_update(ctxt,
v['id'],
{'status': 'in-use'})
@@ -1052,20 +1059,20 @@ class LibvirtConnection(object):
timer = utils.LoopingCall(f=None)
def wait_for_live_migration():
-
+ """waiting for live migration completion"""
try:
- state = self.get_info(instance_ref.name)['state']
+ self.get_info(instance_ref.name)['state']
except exception.NotFound:
timer.stop()
- self._post_live_migration(context, instance_ref, dest)
+ self._post_live_migration(ctxt, instance_ref, dest)
timer.f = wait_for_live_migration
timer.start(interval=0.5, now=True)
- def _post_live_migration(self, context, instance_ref, dest):
+ def _post_live_migration(self, ctxt, instance_ref, dest):
"""
- Post operations for live migration.
- Mainly, database updating.
+ Post operations for live migration.
+ Mainly, database updating.
"""
LOG.info('post livemigration operation is started..')
# Detaching volumes.
@@ -1079,61 +1086,61 @@ class LibvirtConnection(object):
'nova.virt.libvirt_conn.IptablesFirewallDriver':
try:
self.firewall_driver.unfilter_instance(instance_ref)
- except KeyError, e:
+ except KeyError:
pass
# Database updating.
ec2_id = instance_ref['hostname']
instance_id = instance_ref['id']
- fixed_ip = db.instance_get_fixed_address(context, instance_id)
+ fixed_ip = db.instance_get_fixed_address(ctxt, instance_id)
# Not return if fixed_ip is not found, otherwise,
# instance never be accessible..
if None == fixed_ip:
logging.warn('fixed_ip is not found for %s ' % ec2_id)
- db.fixed_ip_update(context, fixed_ip, {'host': dest})
- network_ref = db.fixed_ip_get_network(context, fixed_ip)
- db.network_update(context, network_ref['id'], {'host': dest})
+ db.fixed_ip_update(ctxt, fixed_ip, {'host': dest})
+ network_ref = db.fixed_ip_get_network(ctxt, fixed_ip)
+ db.network_update(ctxt, network_ref['id'], {'host': dest})
try:
floating_ip \
- = db.instance_get_floating_address(context, instance_id)
+ = db.instance_get_floating_address(ctxt, instance_id)
# Not return if floating_ip is not found, otherwise,
# instance never be accessible..
if None == floating_ip:
- logging.error('floating_ip is not found for %s ' % ec2_id)
+ LOG.info(_('floating_ip is not found for %s'), ec2_id)
else:
- floating_ip_ref = db.floating_ip_get_by_address(context,
+ floating_ip_ref = db.floating_ip_get_by_address(ctxt,
floating_ip)
- db.floating_ip_update(context,
+ db.floating_ip_update(ctxt,
floating_ip_ref['address'],
{'host': dest})
except exception.NotFound:
- logging.debug('%s doesnt have floating_ip.. ' % ec2_id)
+ LOG.info(_('floating_ip is not found for %s'), ec2_id)
except:
- msg = 'Live migration: Unexpected error:'
- msg += '%s cannot inherit floating ip.. ' % ec2_id
- logging.error(_(msg))
+ msg = ("""Live migration: Unexpected error:"""
+ """%s cannot inherit floating ip..""")
+ LOG.error(_(msg), ec2_id)
# Restore instance/volume state
- db.instance_update(context,
+ db.instance_update(ctxt,
instance_id,
{'state_description': 'running',
'state': power_state.RUNNING,
'host': dest})
for v in instance_ref['volumes']:
- db.volume_update(context,
+ db.volume_update(ctxt,
v['id'],
{'status': 'in-use'})
- logging.info(_('Live migrating %s to %s finishes successfully')
+ LOG.info(_('Live migrating %s to %s finishes successfully')
% (ec2_id, dest))
msg = _(("""Known error: the below error is nomally occurs.\n"""
"""Just check if iinstance is successfully migrated.\n"""
"""libvir: QEMU error : Domain not found: no domain """
"""with matching name.."""))
- logging.info(msg)
+ LOG.info(msg)
class FirewallDriver(object):
diff --git a/nova/virt/xenapi_conn.py b/nova/virt/xenapi_conn.py
index c10f73fe7..1e7933f51 100644
--- a/nova/virt/xenapi_conn.py
+++ b/nova/virt/xenapi_conn.py
@@ -215,15 +215,15 @@ class XenAPIConnection(object):
def get_vcpu_number(self):
"""This method is supported only libvirt. """
- return -1
+ return
def get_memory_mb(self):
"""This method is supported only libvirt.."""
- return -1
+ return
def get_local_gb(self):
"""This method is supported only libvirt.."""
- return -1
+ return
def get_hypervisor_type(self):
"""This method is supported only libvirt.."""
@@ -231,12 +231,18 @@ class XenAPIConnection(object):
def get_hypervisor_version(self):
"""This method is supported only libvirt.."""
- return -1
+ return
def compare_cpu(self, xml):
+ """This method is supported only libvirt.."""
+ raise NotImplementedError('This method is supported only libvirt.')
+
+ def ensure_filtering_rules_for_instance(self, instance_ref):
+ """This method is supported only libvirt.."""
raise NotImplementedError('This method is supported only libvirt.')
def live_migration(self, context, instance_ref, dest):
+ """This method is supported only libvirt.."""
raise NotImplementedError('This method is supported only libvirt.')
diff --git a/nova/volume/manager.py b/nova/volume/manager.py
index 1735d79eb..906eb86ea 100644
--- a/nova/volume/manager.py
+++ b/nova/volume/manager.py
@@ -153,6 +153,6 @@ class VolumeManager(manager.Manager):
def check_for_export(self, context, instance_id):
"""Make sure whether volume is exported."""
if FLAGS.volume_driver == 'nova.volume.driver.AOEDriver':
- instance_ref = self.db.instance_get(instance_id)
+ instance_ref = self.db.instance_get(context, instance_id)
for v in instance_ref['volumes']:
self.driver.check_for_export(context, v['id'])