summaryrefslogtreecommitdiffstats
path: root/nova
diff options
context:
space:
mode:
Diffstat (limited to 'nova')
-rw-r--r--nova/compute/manager.py53
-rw-r--r--nova/scheduler/driver.py74
-rw-r--r--nova/scheduler/manager.py18
-rw-r--r--nova/tests/test_scheduler.py14
-rw-r--r--nova/tests/test_virt.py363
-rw-r--r--nova/virt/libvirt_conn.py10
-rw-r--r--nova/volume/manager.py4
7 files changed, 166 insertions, 370 deletions
diff --git a/nova/compute/manager.py b/nova/compute/manager.py
index d085a0b6a..7104daa1e 100644
--- a/nova/compute/manager.py
+++ b/nova/compute/manager.py
@@ -624,11 +624,12 @@ class ComputeManager(manager.Manager):
return self.driver.compare_cpu(cpu_info)
@exception.wrap_exception
- def mktmpfile(self, context):
+ def create_shared_storage_test_file(self, context):
"""Makes tmpfile under FLAGS.instance_path.
This method enables compute nodes to recognize that they mounts
- same shared storage. mktmpfile()/confirm_tmpfile is a pair.
+ same shared storage. (create|check|creanup)_shared_storage_test_file()
+ is a pair.
:param context: security context
:returns: tmpfile name(basename)
@@ -636,26 +637,36 @@ class ComputeManager(manager.Manager):
"""
dirpath = FLAGS.instances_path
- fd, name = tempfile.mkstemp(dir=dirpath)
+ fd, tmp_file = tempfile.mkstemp(dir=dirpath)
LOG.debug(_("Creating tmpfile %s to notify to other "
- "compute node that they mounts same storage.") % name)
+ "compute node that they mounts same storage.") % tmp_file)
os.fdopen(fd, 'w+').close()
- return os.path.basename(name)
+ return os.path.basename(tmp_file)
@exception.wrap_exception
- def confirm_tmpfile(self, context, filename):
- """Confirms existence of the tmpfile given by path.
+ def check_shared_storage_test_file(self, context, filename):
+ """Confirms existence of the tmpfile under FLAGS.instances_path.
:param context: security context
:param filename: confirm existence of FLAGS.instances_path/thisfile
- :returns: depends on os.remove()
"""
- p = os.path.join(FLAGS.instances_path, filename)
- if not os.path.exists(p):
- raise exception.NotFound(_('%s not found') % p)
- return os.remove(p)
+ tmp_file = os.path.join(FLAGS.instances_path, filename)
+ if not os.path.exists(tmp_file):
+ raise exception.NotFound(_('%s not found') % tmp_file)
+
+ @exception.wrap_exception
+ def cleanup_shared_storage_test_file(self, context, filename):
+ """Removes existence of the tmpfile under FLAGS.instances_path.
+
+ :param context: security context
+ :param filename: remove existence of FLAGS.instances_path/thisfile
+
+ """
+
+ tmp_file = os.path.join(FLAGS.instances_path, filename)
+ os.remove(tmp_file)
@exception.wrap_exception
def update_available_resource(self, context):
@@ -687,7 +698,7 @@ class ComputeManager(manager.Manager):
raise exception.NotFound(msg % locals())
# If any volume is mounted, prepare here.
- if len(instance_ref['volumes']) == 0:
+ if not instance_ref['volumes']:
LOG.info(_("%s has no volume."), ec2_id)
else:
for v in instance_ref['volumes']:
@@ -701,16 +712,16 @@ class ComputeManager(manager.Manager):
# Retry operation is necessary because continuously request comes,
# concorrent request occurs to iptables, then it complains.
max_retry = FLAGS.live_migration_retry_count
- for i in range(max_retry):
+ for cnt in range(max_retry):
try:
self.network_manager.setup_compute_network(context,
instance_id)
break
- except exception.ProcessExecutionError, e:
- if i == max_retry - 1:
+ except exception.ProcessExecutionError:
+ if cnt == max_retry - 1:
raise
else:
- LOG.warn(_("setup_compute_network() failed %(i)d."
+ LOG.warn(_("setup_compute_network() failed %(cnt)d."
"Retry up to %(max_retry)d for %(ec2_id)s.")
% locals())
time.sleep(1)
@@ -739,7 +750,7 @@ class ComputeManager(manager.Manager):
try:
# Checking volume node is working correctly when any volumes
# are attached to instances.
- if len(instance_ref['volumes']) != 0:
+ if instance_ref['volumes']:
rpc.call(context,
FLAGS.volume_topic,
{"method": "check_for_export",
@@ -751,7 +762,7 @@ class ComputeManager(manager.Manager):
{"method": "pre_live_migration",
"args": {'instance_id': instance_id}})
- except Exception, e:
+ except Exception:
msg = _("Pre live migration for %(i_name)s failed at %(dest)s")
LOG.error(msg % locals())
self.recover_live_migration(context, instance_ref)
@@ -843,5 +854,5 @@ class ComputeManager(manager.Manager):
'state': power_state.RUNNING,
'host': host})
- for v in instance_ref['volumes']:
- self.db.volume_update(ctxt, v['id'], {'status': 'in-use'})
+ for volume in instance_ref['volumes']:
+ self.db.volume_update(ctxt, volume['id'], {'status': 'in-use'})
diff --git a/nova/scheduler/driver.py b/nova/scheduler/driver.py
index 73ce651da..4485ba39f 100644
--- a/nova/scheduler/driver.py
+++ b/nova/scheduler/driver.py
@@ -100,9 +100,9 @@ class Scheduler(object):
'migrating')
# Changing volume state
- for v in instance_ref['volumes']:
+ for volume_ref in instance_ref['volumes']:
db.volume_update(context,
- v['id'],
+ volume_ref['id'],
{'status': 'migrating'})
# Return value is necessary to send request to src
@@ -121,17 +121,16 @@ class Scheduler(object):
# Checking instance is running.
if (power_state.RUNNING != instance_ref['state'] or \
'running' != instance_ref['state_description']):
- msg = _('Instance(%s) is not running')
ec2_id = instance_ref['hostname']
- raise exception.Invalid(msg % ec2_id)
+ raise exception.Invalid(_('Instance(%s) is not running') % ec2_id)
# Checing volume node is running when any volumes are mounted
# to the instance.
if len(instance_ref['volumes']) != 0:
services = db.service_get_all_by_topic(context, 'volume')
if len(services) < 1 or not self.service_is_up(services[0]):
- msg = _('volume node is not alive(time synchronize problem?)')
- raise exception.Invalid(msg)
+ raise exception.Invalid(_("volume node is not alive"
+ "(time synchronize problem?)"))
# Checking src host exists and compute node
src = instance_ref['host']
@@ -139,8 +138,8 @@ class Scheduler(object):
# Checking src host is alive.
if not self.service_is_up(services[0]):
- msg = _('%s is not alive(time synchronize problem?)')
- raise exception.Invalid(msg % src)
+ raise exception.Invalid(_("%s is not alive(time "
+ "synchronize problem?)") % src)
def _live_migration_dest_check(self, context, instance_ref, dest):
"""Live migration check routine (for destination host).
@@ -157,8 +156,8 @@ class Scheduler(object):
# Checking dest host is alive.
if not self.service_is_up(dservice_ref):
- msg = _('%s is not alive(time synchronize problem?)')
- raise exception.Invalid(msg % dest)
+ raise exception.Invalid(_("%s is not alive(time "
+ "synchronize problem?)") % dest)
# Checking whether The host where instance is running
# and dest is not same.
@@ -170,7 +169,9 @@ class Scheduler(object):
% locals())
# Checking dst host still has enough capacities.
- self.has_enough_resources(context, instance_ref, dest)
+ self.assert_compute_node_has_enough_resources(context,
+ instance_ref,
+ dest)
def _live_migration_common_check(self, context, instance_ref, dest):
"""Live migration common check routine.
@@ -202,18 +203,20 @@ class Scheduler(object):
oservice_ref = oservice_refs[0]['compute_service'][0]
# Checking hypervisor is same.
- o = oservice_ref['hypervisor_type']
- d = dservice_ref['hypervisor_type']
- if o != d:
+ orig_hypervisor = oservice_ref['hypervisor_type']
+ dest_hypervisor = dservice_ref['hypervisor_type']
+ if orig_hypervisor != dest_hypervisor:
raise exception.Invalid(_("Different hypervisor type"
- "(%(o)s->%(d)s)')" % locals()))
+ "(%(orig_hypervisor)s->"
+ "%(dest_hypervisor)s)')" % locals()))
# Checkng hypervisor version.
- o = oservice_ref['hypervisor_version']
- d = dservice_ref['hypervisor_version']
- if o > d:
- raise exception.Invalid(_('Older hypervisor version(%(o)s->%(d)s)')
- % locals())
+ orig_hypervisor = oservice_ref['hypervisor_version']
+ dest_hypervisor = dservice_ref['hypervisor_version']
+ if orig_hypervisor > dest_hypervisor:
+ raise exception.Invalid(_("Older hypervisor version"
+ "(%(orig_hypervisor)s->"
+ "%(dest_hypervisor)s)") % locals())
# Checking cpuinfo.
try:
@@ -222,14 +225,15 @@ class Scheduler(object):
{"method": 'compare_cpu',
"args": {'cpu_info': oservice_ref['cpu_info']}})
- except rpc.RemoteError, e:
+ except rpc.RemoteError:
ec2_id = instance_ref['hostname']
src = instance_ref['host']
logging.exception(_("host %(dest)s is not compatible with "
"original host %(src)s.") % locals())
raise
- def has_enough_resources(self, context, instance_ref, dest):
+ def assert_compute_node_has_enough_resources(self, context,
+ instance_ref, dest):
"""Checks if destination host has enough resource for live migration.
Currently, only memory checking has been done.
@@ -276,22 +280,24 @@ class Scheduler(object):
dst_t = db.queue_get_for(context, FLAGS.compute_topic, dest)
src_t = db.queue_get_for(context, FLAGS.compute_topic, src)
- # create tmpfile at dest host
try:
- filename = rpc.call(context, dst_t, {"method": 'mktmpfile'})
- except rpc.RemoteError, e:
- msg = _("Cannot create tmpfile at %s to confirm shared storage.")
- LOG.error(msg % FLAGS.instances_path)
- raise
+ # create tmpfile at dest host
+ filename = rpc.call(context, dst_t,
+ {"method": 'create_shared_storage_test_file'})
- # make sure existence at src host.
- try:
+ # make sure existence at src host.
rpc.call(context, src_t,
- {"method": 'confirm_tmpfile',
+ {"method": 'check_shared_storage_test_file',
"args": {'filename': filename}})
- except (rpc.RemoteError, exception.NotFound), e:
+ except rpc.RemoteError:
ipath = FLAGS.instances_path
- logging.error(_("Cannot comfirm %(ipath)s at %(dest)s is "
- "located at same shared storage.") % locals())
+ logging.error(_("Cannot comfirm tmpfile at %(ipath)s is on "
+ "same shared storage between %(src)s "
+ "and %(dest)s.") % locals())
raise
+
+ finally:
+ rpc.call(context, dst_t,
+ {"method": 'cleanup_shared_storage_test_file',
+ "args": {'filename': filename}})
diff --git a/nova/scheduler/manager.py b/nova/scheduler/manager.py
index cd5012fd5..a50d3ab20 100644
--- a/nova/scheduler/manager.py
+++ b/nova/scheduler/manager.py
@@ -98,24 +98,24 @@ class SchedulerManager(manager.Manager):
# Getting usage resource information
usage = {}
instance_refs = db.instance_get_all_by_host(context,
- compute_ref['host'])
- if 0 == len(instance_refs):
+ compute_ref['host'])
+ if not instance_refs:
return {'resource': resource, 'usage': usage}
project_ids = [i['project_id'] for i in instance_refs]
project_ids = list(set(project_ids))
- for i in project_ids:
+ for project_id in project_ids:
vcpus = db.instance_get_vcpu_sum_by_host_and_project(context,
host,
- i)
+ project_id)
mem = db.instance_get_memory_sum_by_host_and_project(context,
host,
- i)
+ project_id)
hdd = db.instance_get_disk_sum_by_host_and_project(context,
host,
- i)
- usage[i] = {'vcpus': int(vcpus),
- 'memory_mb': int(mem),
- 'local_gb': int(hdd)}
+ project_id)
+ usage[project_id] = {'vcpus': int(vcpus),
+ 'memory_mb': int(mem),
+ 'local_gb': int(hdd)}
return {'resource': resource, 'usage': usage}
diff --git a/nova/tests/test_scheduler.py b/nova/tests/test_scheduler.py
index c4e4d148e..62db42b11 100644
--- a/nova/tests/test_scheduler.py
+++ b/nova/tests/test_scheduler.py
@@ -661,7 +661,6 @@ class SimpleDriverTestCase(test.TestCase):
self.scheduler.live_migration(self.context, FLAGS.compute_topic,
instance_id=instance_id,
dest=i_ref['host'])
- self.mox.UnsetStubs()
i_ref = db.instance_get(self.context, instance_id)
self.assertTrue(i_ref['state_description'] == 'migrating')
@@ -824,10 +823,15 @@ class SimpleDriverTestCase(test.TestCase):
topic = FLAGS.compute_topic
driver.rpc.call(mox.IgnoreArg(),
db.queue_get_for(self.context, topic, dest),
- {"method": 'mktmpfile'}).AndReturn(fpath)
+ {"method": 'create_shared_storage_test_file'}).AndReturn(fpath)
driver.rpc.call(mox.IgnoreArg(),
db.queue_get_for(mox.IgnoreArg(), topic, i_ref['host']),
- {"method": 'confirm_tmpfile', "args": {'filename': fpath}})
+ {"method": 'check_shared_storage_test_file',
+ "args": {'filename': fpath}})
+ driver.rpc.call(mox.IgnoreArg(),
+ db.queue_get_for(mox.IgnoreArg(), topic, dest),
+ {"method": 'cleanup_shared_storage_test_file',
+ "args": {'filename': fpath}})
self.mox.ReplayAll()
try:
@@ -838,7 +842,6 @@ class SimpleDriverTestCase(test.TestCase):
c = (e.message.find('does not exist') >= 0)
self.assertTrue(c)
- self.mox.UnsetStubs()
db.instance_destroy(self.context, instance_id)
db.service_destroy(self.context, s_ref['id'])
@@ -867,7 +870,6 @@ class SimpleDriverTestCase(test.TestCase):
c = (e.message.find(_('Different hypervisor type')) >= 0)
self.assertTrue(c)
- self.mox.UnsetStubs()
db.instance_destroy(self.context, instance_id)
db.service_destroy(self.context, s_ref['id'])
db.service_destroy(self.context, s_ref2['id'])
@@ -898,7 +900,6 @@ class SimpleDriverTestCase(test.TestCase):
c = (e.message.find(_('Older hypervisor version')) >= 0)
self.assertTrue(c)
- self.mox.UnsetStubs()
db.instance_destroy(self.context, instance_id)
db.service_destroy(self.context, s_ref['id'])
db.service_destroy(self.context, s_ref2['id'])
@@ -934,7 +935,6 @@ class SimpleDriverTestCase(test.TestCase):
c = (e.message.find(_("doesn't have compatibility to")) >= 0)
self.assertTrue(c)
- self.mox.UnsetStubs()
db.instance_destroy(self.context, instance_id)
db.service_destroy(self.context, s_ref['id'])
db.service_destroy(self.context, s_ref2['id'])
diff --git a/nova/tests/test_virt.py b/nova/tests/test_virt.py
index f46b5950e..17b80c294 100644
--- a/nova/tests/test_virt.py
+++ b/nova/tests/test_virt.py
@@ -14,7 +14,6 @@
# License for the specific language governing permissions and limitations
# under the License.
-import libvirt
import mox
from xml.etree.ElementTree import fromstring as xml_to_tree
@@ -60,6 +59,7 @@ class LibvirtConnTestCase(test.TestCase):
admin=True)
self.project = self.manager.create_project('fake', 'fake', 'fake')
self.network = utils.import_object(FLAGS.network_manager)
+ self.context = context.get_admin_context()
FLAGS.instances_path = ''
self.call_libvirt_dependant_setup = False
@@ -73,22 +73,52 @@ class LibvirtConnTestCase(test.TestCase):
'bridge': 'br101',
'instance_type': 'm1.small'}
- def libvirt_dependant_setup(self):
- """A setup method of LibvirtConnection dependent test."""
- # try to connect libvirt. if fail, skip test.
- self.call_libvirt_dependant_setup = True
- try:
- libvirt.openReadOnly('qemu:///system')
- except libvirt.libvirtError:
- return
- return libvirt_conn.get_connection(False)
-
- def libvirt_dependant_teardown(self):
- """teardown method of LibvirtConnection dependent test."""
- if self.call_libvirt_dependant_setup:
- libvirt_conn.libvirt = None
- libvirt_conn.libxml2 = None
- self.call_libvirt_dependant_setup = False
+ def create_fake_libvirt_mock(self, **kwargs):
+ """Defining mocks for LibvirtConnection(libvirt is not used)."""
+
+ # A fake libvirt.virtConnect
+ class FakeLibvirtConnection(object):
+ def getVersion(self):
+ return 12003
+
+ def getType(self):
+ return 'qemu'
+
+ def getCapabilities(self):
+ return 'qemu'
+
+ def listDomainsID(self):
+ return []
+
+ def getCapabilitied(self):
+ return
+
+ # A fake libvirt_conn.IptablesFirewallDriver
+ class FakeIptablesFirewallDriver(object):
+ def __init__(self, **kwargs):
+ pass
+
+ # Creating mocks
+ fake = FakeLibvirtConnection()
+ fakeip = FakeIptablesFirewallDriver
+ # Customizing above fake if necessary
+ for key, val in kwargs.items():
+ fake.__setattr__(key, val)
+
+ # Inevitable mocks for libvirt_conn.LibvirtConnection
+ self.mox.StubOutWithMock(libvirt_conn.utils, 'import_class')
+ libvirt_conn.utils.import_class(mox.IgnoreArg()).AndReturn(fakeip)
+ self.mox.StubOutWithMock(libvirt_conn.LibvirtConnection, '_conn')
+ libvirt_conn.LibvirtConnection._conn = fake
+
+ def create_service(self, **kwargs):
+ service_ref = {'host': kwargs.get('host', 'dummy'),
+ 'binary': 'nova-compute',
+ 'topic': 'compute',
+ 'report_count': 0,
+ 'availability_zone': 'zone'}
+
+ return db.service_create(context.get_admin_context(), service_ref)
def test_xml_and_uri_no_ramdisk_no_kernel(self):
instance_data = dict(self.test_instance)
@@ -244,306 +274,55 @@ class LibvirtConnTestCase(test.TestCase):
self.assertEquals(uri, testuri)
db.instance_destroy(user_context, instance_ref['id'])
- def test_get_vcpu_used(self):
- """Check if get_local_gb_total returns appropriate disk value."""
- self.mox.StubOutWithMock(libvirt_conn.LibvirtConnection, '_conn')
- libvirt_conn.LibvirtConnection._conn.listDomainsID().AndReturn([1, 2])
- vdmock = self.mox.CreateMock(libvirt.virDomain)
- self.mox.StubOutWithMock(vdmock, "vcpus")
- vdmock.vcpus().AndReturn(['', [('dummycpu'), ('dummycpu')]])
- vdmock.vcpus().AndReturn(['', [('dummycpu'), ('dummycpu')]])
- arg = mox.IgnoreArg()
- libvirt_conn.LibvirtConnection._conn.lookupByID(arg).AndReturn(vdmock)
- libvirt_conn.LibvirtConnection._conn.lookupByID(arg).AndReturn(vdmock)
-
- self.mox.ReplayAll()
- conn = libvirt_conn.LibvirtConnection(False)
- self.assertTrue(conn.get_vcpu_used() == 4)
-
- def test_get_cpu_info_inappropreate_xml(self):
- """Raise exception if given xml is inappropriate."""
- conn = self.libvirt_dependant_setup()
- if not conn:
- return
-
- xml = """<cccccpu>
- <arch>x86_64</arch>
- <model>Nehalem</model>
- <vendor>Intel</vendor>
- <topology sockets='2' cores='4' threads='2'/>
- <feature name='rdtscp'/>
- <feature name='dca'/>
- <feature name='xtpr'/>
- <feature name='tm2'/>
- <feature name='est'/>
- <feature name='vmx'/>
- <feature name='ds_cpl'/>
- <feature name='monitor'/>
- <feature name='pbe'/>
- <feature name='tm'/>
- <feature name='ht'/>
- <feature name='ss'/>
- <feature name='acpi'/>
- <feature name='ds'/>
- <feature name='vme'/>
- </cccccpu>
- """
-
- self.mox.StubOutWithMock(conn._conn, 'getCapabilities')
- conn._conn.getCapabilities().AndReturn(xml)
-
- self.mox.ReplayAll()
- try:
- conn.get_cpu_info()
- except exception.Invalid, e:
- c1 = (0 <= e.message.find('Invalid xml'))
- self.assertTrue(c1)
-
- def test_get_cpu_info_inappropreate_xml2(self):
- """Raise exception if given xml is inappropriate(topology tag)."""
- conn = self.libvirt_dependant_setup()
- if not conn:
- return
-
- xml = """<cpu>
- <arch>x86_64</arch>
- <model>Nehalem</model>
- <vendor>Intel</vendor><topology cores='4' threads='2'/>
- <feature name='rdtscp'/>
- <feature name='dca'/>
- <feature name='xtpr'/>
- <feature name='tm2'/>
- <feature name='est'/>
- <feature name='vmx'/>
- <feature name='ds_cpl'/>
- <feature name='monitor'/>
- <feature name='pbe'/>
- <feature name='tm'/>
- <feature name='ht'/>
- <feature name='ss'/>
- <feature name='acpi'/>
- <feature name='ds'/>
- <feature name='vme'/>
- </cpu>
- """
- self.mox.StubOutWithMock(conn._conn, 'getCapabilities')
- conn._conn.getCapabilities().AndReturn(xml)
-
- self.mox.ReplayAll()
- try:
- conn.get_cpu_info()
- except exception.Invalid, e:
- c1 = (0 <= e.message.find('Invalid xml: topology'))
- self.assertTrue(c1)
-
def test_update_available_resource_works_correctly(self):
"""Confirm compute_service table is updated successfully."""
- conn = self.libvirt_dependant_setup()
- if not conn:
- return
-
- host = 'dummy'
- zone = 'dummyzone'
- ctxt = context.get_admin_context()
org_path = FLAGS.instances_path = ''
FLAGS.instances_path = '.'
- service_ref = db.service_create(ctxt,
- {'host': host,
- 'binary': 'nova-compute',
- 'topic': 'compute',
- 'report_count': 0,
- 'availability_zone': zone})
- conn.update_available_resource(ctxt, host)
+ service_ref = self.create_service(host='dummy')
+ self.create_fake_libvirt_mock()
+ self.mox.StubOutWithMock(libvirt_conn.LibvirtConnection,
+ 'get_cpu_info')
+ libvirt_conn.LibvirtConnection.get_cpu_info().AndReturn('cpuinfo')
- service_ref = db.service_get(ctxt, service_ref['id'])
- print service_ref['compute_service']
+ self.mox.ReplayAll()
+ conn = libvirt_conn.LibvirtConnection(False)
+ conn.update_available_resource(self.context, 'dummy')
+ service_ref = db.service_get(self.context, service_ref['id'])
compute_service = service_ref['compute_service'][0]
+
c1 = (compute_service['vcpus'] > 0)
c2 = (compute_service['memory_mb'] > 0)
c3 = (compute_service['local_gb'] > 0)
- # vcpu_used is checked at test_get_vcpu_used.
- c4 = (compute_service['memory_mb_used'] > 0)
- c5 = (compute_service['local_gb_used'] > 0)
- c6 = (len(compute_service['hypervisor_type']) > 0)
- c7 = (compute_service['hypervisor_version'] > 0)
+ c4 = (compute_service['vcpus_used'] == 0)
+ c5 = (compute_service['memory_mb_used'] > 0)
+ c6 = (compute_service['local_gb_used'] > 0)
+ c7 = (len(compute_service['hypervisor_type']) > 0)
+ c8 = (compute_service['hypervisor_version'] > 0)
- self.assertTrue(c1 and c2 and c3 and c4 and c5 and c6 and c7)
+ self.assertTrue(c1 and c2 and c3 and c4 and c5 and c6 and c7 and c8)
- db.service_destroy(ctxt, service_ref['id'])
+ db.service_destroy(self.context, service_ref['id'])
FLAGS.instances_path = org_path
- def test_update_resource_info_raise_exception(self):
+ def test_update_resource_info_no_compute_record_found(self):
"""Raise exception if no recorde found on services table."""
- host = 'dummy'
- org_path = FLAGS.instances_path = ''
- FLAGS.instances_path = '.'
- try:
- conn = libvirt_conn.LibvirtConnection(False)
- conn.update_available_resource(context.get_admin_context(), host)
- except exception.Invalid, e:
- msg = 'Cannot update compute manager specific info'
- c1 = (0 <= e.message.find(msg))
- self.assertTrue(c1)
- FLAGS.instances_path = org_path
-
- def test_compare_cpu_works_correctly(self):
- """Calling libvirt.compute_cpu() and works correctly."""
- conn = self.libvirt_dependant_setup()
- if not conn:
- return
- host = 'dummy'
- zone = 'dummyzone'
- ctxt = context.get_admin_context()
org_path = FLAGS.instances_path = ''
FLAGS.instances_path = '.'
-
- service_ref = db.service_create(ctxt,
- {'host': host,
- 'binary': 'nova-compute',
- 'topic': 'compute',
- 'report_count': 0,
- 'availability_zone': zone})
- conn.update_available_resource(ctxt, host)
- service_ref = db.service_get(ctxt, service_ref['id'])
- ret = conn.compare_cpu(service_ref['compute_service'][0]['cpu_info'])
- self.assertTrue(ret == None)
-
- db.service_destroy(ctxt, service_ref['id'])
- FLAGS.instances_path = org_path
-
- def test_compare_cpu_no_compatibility(self):
- """Libvirt.compare_cpu() return less than 0.(no compatibility)."""
- conn = self.libvirt_dependant_setup()
- if not conn:
- return
-
- t = {}
- t['arch'] = 'x86'
- t['model'] = 'model'
- t['vendor'] = 'Intel'
- t['topology'] = {'cores': "2", "threads": "1", "sockets": "4"}
- t['features'] = ["tm"]
- cpu_info = utils.dumps(t)
- self.mox.StubOutWithMock(conn._conn, 'compareCPU')
- conn._conn.compareCPU(mox.IgnoreArg(), 0).AndReturn(0)
-
- self.mox.ReplayAll()
- self.assertRaises(exception.Invalid, conn.compare_cpu, cpu_info)
-
- def test_ensure_filtering_rules_for_instance_works_correctly(self):
- """ensure_filtering_rules_for_instance() works successfully."""
- conn = self.libvirt_dependant_setup()
- if not conn:
- return
-
- instance_ref = models.Instance()
- instance_ref.__setitem__('id', 1)
- fwdriver = conn.firewall_driver
-
- self.mox.StubOutWithMock(fwdriver, 'setup_basic_filtering')
- fwdriver.setup_basic_filtering(instance_ref)
- self.mox.StubOutWithMock(fwdriver, 'prepare_instance_filter')
- fwdriver.prepare_instance_filter(instance_ref)
- self.mox.StubOutWithMock(libvirt_conn.LibvirtConnection, '_conn')
- n = 'nova-instance-%s' % instance_ref.name
- conn._conn.nwfilterLookupByName(n)
-
- self.mox.ReplayAll()
- conn.ensure_filtering_rules_for_instance(instance_ref)
-
- def test_ensure_filtering_rules_for_instance_timeout(self):
- """ensure_filtering_fules_for_instance() finishes with timeout."""
- conn = self.libvirt_dependant_setup()
- if not conn:
- return
-
- instance_ref = models.Instance()
- instance_ref.__setitem__('id', 1)
- fwdriver = conn.firewall_driver
-
- self.mox.StubOutWithMock(fwdriver, 'setup_basic_filtering')
- fwdriver.setup_basic_filtering(instance_ref)
- self.mox.StubOutWithMock(fwdriver, 'prepare_instance_filter')
- fwdriver.prepare_instance_filter(instance_ref)
- self.mox.StubOutWithMock(libvirt_conn.LibvirtConnection, '_conn')
- n = 'nova-instance-%s' % instance_ref.name
- for i in range(FLAGS.live_migration_retry_count):
- conn._conn.nwfilterLookupByName(n).\
- AndRaise(libvirt.libvirtError('ERR'))
-
- self.mox.ReplayAll()
- try:
- conn.ensure_filtering_rules_for_instance(instance_ref)
- except exception.Error, e:
- c1 = (0 <= e.message.find('Timeout migrating for'))
- self.assertTrue(c1)
-
- def test_live_migration_works_correctly(self):
- """_live_migration() works as expected correctly."""
- conn = self.libvirt_dependant_setup()
- if not conn:
- return
-
- class dummyCall(object):
- f = None
-
- def start(self, interval=0, now=False):
- pass
-
- i_ref = models.Instance()
- i_ref.__setitem__('id', 1)
- ctxt = context.get_admin_context()
-
- vdmock = self.mox.CreateMock(libvirt.virDomain)
- self.mox.StubOutWithMock(vdmock, "migrateToURI")
- vdmock.migrateToURI(FLAGS.live_migration_uri % 'dest',
- mox.IgnoreArg(),
- None, FLAGS.live_migration_bandwidth).\
- AndReturn(None)
- self.mox.StubOutWithMock(libvirt_conn.LibvirtConnection, '_conn')
- conn._conn.lookupByName(i_ref.name).AndReturn(vdmock)
- self.mox.StubOutWithMock(libvirt_conn.utils, 'LoopingCall')
- libvirt_conn.utils.LoopingCall(f=None).AndReturn(dummyCall())
+ self.create_fake_libvirt_mock()
self.mox.ReplayAll()
- # Nothing to do with setting post_method/recover_method or not.
- ret = conn._live_migration(ctxt, i_ref, 'dest', '', '')
- self.assertTrue(ret == None)
-
- def test_live_migration_raises_exception(self):
- """Confirms recover method is called when exceptions are raised."""
- conn = self.libvirt_dependant_setup()
- if not conn:
- return
-
- i_ref = models.Instance()
- i_ref.__setitem__('id', 1)
- ctxt = context.get_admin_context()
-
- def dummy_recover_method(c, instance, host=None):
- pass
-
- vdmock = self.mox.CreateMock(libvirt.virDomain)
- self.mox.StubOutWithMock(vdmock, "migrateToURI")
- vdmock.migrateToURI(FLAGS.live_migration_uri % 'dest',
- mox.IgnoreArg(),
- None, FLAGS.live_migration_bandwidth).\
- AndRaise(libvirt.libvirtError('ERR'))
- self.mox.StubOutWithMock(libvirt_conn.LibvirtConnection, '_conn')
- conn._conn.lookupByName(i_ref.name).AndReturn(vdmock)
+ conn = libvirt_conn.LibvirtConnection(False)
+ self.assertRaises(exception.Invalid,
+ conn.update_available_resource,
+ self.context, 'dummy')
- self.mox.ReplayAll()
- self.assertRaises(libvirt.libvirtError,
- conn._live_migration,
- ctxt, i_ref, 'dest',
- '', dummy_recover_method)
+ FLAGS.instances_path = org_path
def tearDown(self):
self.manager.delete_project(self.project)
self.manager.delete_user(self.user)
super(LibvirtConnTestCase, self).tearDown()
- self.libvirt_dependant_teardown()
class IptablesFirewallTestCase(test.TestCase):
diff --git a/nova/virt/libvirt_conn.py b/nova/virt/libvirt_conn.py
index 75e4f0a53..70fdcc453 100644
--- a/nova/virt/libvirt_conn.py
+++ b/nova/virt/libvirt_conn.py
@@ -891,8 +891,8 @@ class LibvirtConnection(object):
"""
total = 0
- for i in self._conn.listDomainsID():
- dom = self._conn.lookupByID(i)
+ for dom_id in self._conn.listDomainsID():
+ dom = self._conn.lookupByID(dom_id)
total += len(dom.vcpus()[1])
return total
@@ -1048,7 +1048,7 @@ class LibvirtConnection(object):
'cpu_info': self.get_cpu_info()}
compute_service_ref = service_ref['compute_service']
- if len(compute_service_ref) == 0:
+ if not compute_service_ref:
LOG.info(_('Compute_service record is created for %s ') % host)
dic['service_id'] = service_ref['id']
db.compute_service_create(ctxt, dic)
@@ -1124,7 +1124,7 @@ class LibvirtConnection(object):
# wait for completion
timeout_count = range(FLAGS.live_migration_retry_count)
- while len(timeout_count) != 0:
+ while not timeout_count:
try:
filter_name = 'nova-instance-%s' % instance_ref.name
self._conn.nwfilterLookupByName(filter_name)
@@ -1198,7 +1198,7 @@ class LibvirtConnection(object):
None,
FLAGS.live_migration_bandwidth)
- except Exception, e:
+ except Exception:
recover_method(ctxt, instance_ref)
raise
diff --git a/nova/volume/manager.py b/nova/volume/manager.py
index 5dc9077b4..9dea35b35 100644
--- a/nova/volume/manager.py
+++ b/nova/volume/manager.py
@@ -175,5 +175,5 @@ class VolumeManager(manager.Manager):
def check_for_export(self, context, instance_id):
"""Make sure whether volume is exported."""
instance_ref = self.db.instance_get(context, instance_id)
- for v in instance_ref['volumes']:
- self.driver.check_for_export(context, v['id'])
+ for volume in instance_ref['volumes']:
+ self.driver.check_for_export(context, volume['id'])