summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--Authors2
-rw-r--r--README.livemigration.txt154
-rwxr-xr-xbin/nova-manage82
-rw-r--r--nova/compute/api.py1
-rw-r--r--nova/compute/manager.py92
-rw-r--r--nova/db/api.py63
-rw-r--r--nova/db/sqlalchemy/api.py138
-rw-r--r--nova/db/sqlalchemy/models.py24
-rw-r--r--nova/livemigration_test/SI/picture.pptxbin0 -> 137730 bytes
-rw-r--r--nova/livemigration_test/SI/testCase_SI.xlsbin0 -> 49152 bytes
-rw-r--r--nova/livemigration_test/SI/testParameterSheet_SI.xlsbin0 -> 464384 bytes
-rwxr-xr-xnova/livemigration_test/SI/utils/demo-firstboot.sh39
-rwxr-xr-xnova/livemigration_test/SI/utils/demo-runInstance.sh57
-rw-r--r--nova/livemigration_test/SI/utils/nova-manage.conf18
-rw-r--r--nova/livemigration_test/SI/utils/nova.conf10
-rwxr-xr-xnova/livemigration_test/SI/utils/nova.sh180
-rwxr-xr-xnova/livemigration_test/SI/utils/nova.sh.compute37
-rw-r--r--nova/livemigration_test/UT/computeManager.test.py413
-rw-r--r--nova/livemigration_test/UT/libvirtConnection.test.py605
-rw-r--r--nova/livemigration_test/UT/nova-manage.test.py321
-rw-r--r--nova/livemigration_test/UT/schedulerManager.test.py458
-rw-r--r--nova/livemigration_test/UT/testCase_UT.xlsbin0 -> 240128 bytes
-rw-r--r--nova/network/api.py1
-rw-r--r--nova/network/manager.py14
-rw-r--r--nova/scheduler/driver.py133
-rw-r--r--nova/scheduler/manager.py46
-rw-r--r--nova/service.py27
-rw-r--r--nova/virt/libvirt_conn.py238
-rw-r--r--nova/volume/driver.py26
-rw-r--r--nova/volume/manager.py12
-rw-r--r--setup.py1
31 files changed, 3177 insertions, 15 deletions
diff --git a/Authors b/Authors
index 47101e272..7640585b0 100644
--- a/Authors
+++ b/Authors
@@ -23,10 +23,12 @@ Jonathan Bryce <jbryce@jbryce.com>
Josh Kearney <josh.kearney@rackspace.com>
Joshua McKenty <jmckenty@gmail.com>
Justin Santa Barbara <justin@fathomdb.com>
+Kei Masumoto <masumotok@nttdata.co.jp>
Ken Pepple <ken.pepple@gmail.com>
Matt Dietz <matt.dietz@rackspace.com>
Michael Gundlach <michael.gundlach@rackspace.com>
Monty Taylor <mordred@inaugust.com>
+Muneyuki Noguchi <noguchimn@nttdata.co.jp>
Paul Voccio <paul@openstack.org>
Rick Clark <rick@openstack.org>
Rick Harris <rconradharris@gmail.com>
diff --git a/README.livemigration.txt b/README.livemigration.txt
new file mode 100644
index 000000000..d859c24ca
--- /dev/null
+++ b/README.livemigration.txt
@@ -0,0 +1,154 @@
+#
+# Live migration feature usage:
+#
+# @auther Kei Masumoto <masumotok@nttdata.co.jp>
+# @date 2010.12.01
+#
+# @history ver.1 2010.12.01 ( masumotok )
+# initial version
+#
+
+
+0. pre-requisit settings
+ OS: Ubuntu lucid 10.04 for both instances and host.
+ NFS: nova-install-dir/instances has to be mounted by shared storage.
+ ( this version is tested using NFS)
+ Network manager: Only VlanManager can be used in this version.
+ instances : Instance must keep running without any EBS volume.
+
+
+1. pre-requisite settings.
+
+ (a) shared storage
+ As mentioned above, shared storage is inevitable for the live_migration functionality.
+ An example is NFS( my test environment ), and example setting is as follows.
+
+ Prepare NFS server machine( nova-api server is OK), and add below line /etc/exports:
+
+ > nova-install-dir/instances a.b.c.d/255.255.0.0(rw,sync,fsid=0,no_root_squash)
+
+ where "nova-install-dir" is the directory which openstack is installed, and
+ add appropriate ip address and netmask for "a.b.c.d/255.255.0.0" , which should include
+ compute nodes which try to mount this directory.
+
+ Then restart nfs server.
+
+ > /etc/init.d/nfs-kernel-server restart
+ > /etc/init.d/idmapd restart
+
+ Also, at any compute nodes, add below line to /etc/fstab:
+
+ >172.19.0.131:/ DIR nfs4 defaults 0 0
+
+ where "DIR" must be same as 'instances_path'( see nova.compute.manager for the default value)
+
+ Then try to mount,
+
+ > mount -a -v
+
+ Check exported directory is successfully mounted. if fail, try this at any hosts,
+
+ > iptables -F
+
+ Also, check file/daemon permissions.
+ we expect any nova daemons are running as root.
+ > root@openstack2-api:/opt/nova-2010.4# ps -ef | grep nova
+ > root 5948 5904 9 11:29 pts/4 00:00:00 python /opt/nova-2010.4//bin/nova-api
+ > root 5952 5908 6 11:29 pts/5 00:00:00 python /opt/nova-2010.4//bin/nova-objectstore
+ > ... (snip)
+
+ "instances/" directory can be seen from server side:
+ > root@openstack:~# ls -ld nova-install-dir/instances/
+ > drwxr-xr-x 2 root root 4096 2010-12-07 14:34 nova-install-dir/instances/
+
+ also, client side:
+ > root@openstack-client:~# ls -ld nova-install-dir/instances/
+ > drwxr-xr-x 2 root root 4096 2010-12-07 14:34 nova-install-dir/instances/
+
+
+
+ (b) libvirt settings
+ In default configuration, this feature use simple tcp protocol(qemu+tcp://).
+ To use this protocol, below configuration is necessary.
+
+ a. modify /etc/libvirt/libvirt.conf
+
+ before : #listen_tls = 0
+ after : listen_tls = 0
+
+ before : #listen_tcp = 1
+ after : listen_tcp = 1
+
+ append : auth_tcp = "none"
+
+ b. modify /etc/init/libvirt-bin.conf
+
+ before : exec /usr/sbin/libvirtd -d
+ after : exec /usr/sbin/libvirtd -d -l
+
+ c. modify /etc/default/libvirt-bin
+
+ before :libvirtd_opts=" -d"
+ after :libvirtd_opts=" -d -l"
+
+ then, restart libvirt
+ stop libvirt-bin && start libvirt-bin
+ ps -ef | grep libvirt
+
+ make sure you get the below result.
+ > root@openstack2:/opt/nova-2010.2# ps -ef | grep libvirt
+ > root 1145 1 0 Nov27 ? 00:00:03 /usr/sbin/libvirtd -d -l
+
+ if you would like to use qemu+ssh or other protocol, change "live_migration_uri" flag.
+ by adding "--live_migration_uri" to /etc/nova/nova.conf (Note that file name may be
+ changed depends on version).
+
+
+2. command usage
+
+ To get a list of physical hosts,
+ nova-manage host list
+
+ To get a available pysical resource of each host,
+ nova-manage host show hostname
+
+ an example result is below:
+ > HOST PROJECT cpu mem(mb) disk(gb)
+ > openstack2-c2 16 32232 878
+ > openstack2-c2 admin 1 2048 20
+
+ The 1st line shows total amount of resource that the specified host has.
+ The 2nd and latter lines show usage resource per project.
+ This command is created because admins can decide which host should be
+ a destination of live migration.
+
+ For live migration,
+ nova-manage instances live_migration ec2-id(i-xxxx) destination-host-name.
+
+ once this command is executed, admins will check the status through
+ euca-describe-instances. The status is changed from 'running' to 'migrating',
+ and changed to 'running' when live migration finishes.
+ Note that it depends on an environment how long it takes to live migration finishes.
+ If it finishes too fast, admins cannot see 'migrating' status.
+
+ > root@openstack2:/opt/nova-2010.2# euca-describe-instances
+ > Reservation:r-qlg3favp
+ > RESERVATION r-qlg3favp admin
+ > INSTANCE i-2ah453 ami-tiny 172.19.0.134 10.0.0.3
+ > migrating testkey (admin, openstack2-c2) 0 m1.small
+ > 2010-11-28 16:09:16 openstack2-c2
+
+ When live migration finishes successfully, admin can check the last part of
+ euca-describe-instances which shows physical node information.
+ ( only when euca-describe-instances is executed by admin user )
+ Admins also can check live migration source compute node logfile which may
+ show a log.
+ > Live migration i-xxxx to DESTHOST finishes successfully.
+
+
+3. error checking
+ When live migration fails somehow, error messages are shown at:
+ a. scheduler logfile
+ b. source compute node logfile
+ c. dest compute node logfile
+
diff --git a/bin/nova-manage b/bin/nova-manage
index 3f5957190..33fc08b81 100755
--- a/bin/nova-manage
+++ b/bin/nova-manage
@@ -77,11 +77,13 @@ from nova import crypto
from nova import db
from nova import exception
from nova import flags
+from nova import log as logging
from nova import quota
from nova import utils
from nova.auth import manager
+from nova import rpc
from nova.cloudpipe import pipelib
-
+from nova.api.ec2 import cloud
FLAGS = flags.FLAGS
flags.DECLARE('fixed_range', 'nova.network.manager')
@@ -459,6 +461,82 @@ class NetworkCommands(object):
int(vpn_start))
+class InstanceCommands(object):
+ """Class for mangaging VM instances."""
+
+ def live_migration(self, ec2_id, dest):
+ """live_migration"""
+
+
+ if FLAGS.connection_type != 'libvirt':
+ raise exception.Error('Only KVM is supported for now. '
+ 'Sorry.')
+
+ if FLAGS.volume_driver != 'nova.volume.driver.AOEDriver':
+ raise exception.Error('Only AOEDriver is supported for now. '
+ 'Sorry.')
+
+ logging.basicConfig()
+ ctxt = context.get_admin_context()
+ instance_id = cloud.ec2_id_to_id(ec2_id)
+
+ rpc.call(ctxt,
+ FLAGS.scheduler_topic,
+ {"method": "live_migration",
+ "args": {"instance_id": instance_id,
+ "dest": dest,
+ "topic": FLAGS.compute_topic}})
+
+ msg = 'Migration of %s initiated. ' % ec2_id
+ msg += 'Check its progress using euca-describe-instances.'
+ print msg
+
+
+class HostCommands(object):
+ """Class for mangaging host(physical nodes)."""
+
+ def list(self):
+ """describe host list."""
+
+ # To supress msg: No handlers could be found for logger "amqplib"
+ logging.basicConfig()
+
+ host_refs = db.host_get_all(context.get_admin_context())
+ for host_ref in host_refs:
+ print host_ref['name']
+
+ def show(self, host):
+ """describe cpu/memory/hdd info for host."""
+
+ # To supress msg: No handlers could be found for logger "amqplib"
+ logging.basicConfig()
+
+ result = rpc.call(context.get_admin_context(),
+ FLAGS.scheduler_topic,
+ {"method": "show_host_resource",
+ "args": {"host": host}})
+
+ # Checking result msg format is necessary, that will have done
+ # when this feture is included in API.
+ if type(result) != dict:
+ print 'Unexpected error occurs'
+ elif not result['ret']:
+ print '%s' % result['msg']
+ else:
+ cpu = result['phy_resource']['vcpus']
+ mem = result['phy_resource']['memory_mb']
+ hdd = result['phy_resource']['local_gb']
+
+ print 'HOST\t\tPROJECT\t\tcpu\tmem(mb)\tdisk(gb)'
+ print '%s\t\t\t%s\t%s\t%s' % (host, cpu, mem, hdd)
+ for p_id, val in result['usage'].items():
+ print '%s\t%s\t\t%s\t%s\t%s' % (host,
+ p_id,
+ val['vcpus'],
+ val['memory_mb'],
+ val['local_gb'])
+
+
class ServiceCommands(object):
"""Enable and disable running services"""
@@ -521,6 +599,8 @@ CATEGORIES = [
('vpn', VpnCommands),
('floating', FloatingIpCommands),
('network', NetworkCommands),
+ ('instance', InstanceCommands),
+ ('host', HostCommands),
('service', ServiceCommands),
('log', LogCommands)]
diff --git a/nova/compute/api.py b/nova/compute/api.py
index 6364a80ef..e7e8dc385 100644
--- a/nova/compute/api.py
+++ b/nova/compute/api.py
@@ -21,6 +21,7 @@ Handles all requests relating to instances (guest vms).
"""
import datetime
+import re
import time
from nova import db
diff --git a/nova/compute/manager.py b/nova/compute/manager.py
index 6ae9b689a..2c0322bae 100644
--- a/nova/compute/manager.py
+++ b/nova/compute/manager.py
@@ -35,8 +35,11 @@ terminating it.
"""
import datetime
+import logging
+import socket
import functools
+from nova import db
from nova import exception
from nova import flags
from nova import log as logging
@@ -157,9 +160,10 @@ class ComputeManager(manager.Manager):
raise exception.Error(_("Instance has already been created"))
LOG.audit(_("instance %s: starting..."), instance_id,
context=context)
+
self.db.instance_update(context,
instance_id,
- {'host': self.host})
+ {'host': self.host, 'launched_on': self.host})
self.db.instance_set_state(context,
instance_id,
@@ -502,3 +506,89 @@ class ComputeManager(manager.Manager):
self.volume_manager.remove_compute_volume(context, volume_id)
self.db.volume_detached(context, volume_id)
return True
+
+ def compare_cpu(self, context, xml):
+ """ Check the host cpu is compatible to a cpu given by xml."""
+ return self.driver.compare_cpu(xml)
+
+ def pre_live_migration(self, context, instance_id, dest):
+ """Any preparation for live migration at dst host."""
+
+ # Getting instance info
+ instance_ref = db.instance_get(context, instance_id)
+ ec2_id = instance_ref['hostname']
+
+ # Getting fixed ips
+ fixed_ip = db.instance_get_fixed_address(context, instance_id)
+ if not fixed_ip:
+ msg = _('%s(%s) doesnt have fixed_ip') % (instance_id, ec2_id)
+ raise exception.NotFound(msg)
+
+ # If any volume is mounted, prepare here.
+ try:
+ for vol in db.volume_get_all_by_instance(context, instance_id):
+ self.volume_manager.setup_compute_volume(context, vol['id'])
+ except exception.NotFound:
+ logging.info(_("%s has no volume.") % ec2_id)
+
+ # Bridge settings
+ # call this method prior to ensure_filtering_rules_for_instance,
+ # since bridge is not set up, ensure_filtering_rules_for instance
+ # fails.
+ self.network_manager.setup_compute_network(context, instance_id)
+
+ # Creating filters to hypervisors and firewalls.
+ # An example is that nova-instance-instance-xxx,
+ # which is written to libvirt.xml( check "virsh nwfilter-list )
+ # On destination host, this nwfilter is necessary.
+ # In addition, this method is creating filtering rule
+ # onto destination host.
+ self.driver.ensure_filtering_rules_for_instance(instance_ref)
+
+ def live_migration(self, context, instance_id, dest):
+ """executes live migration."""
+
+ # Get instance for error handling.
+ instance_ref = db.instance_get(context, instance_id)
+ ec2_id = instance_ref['hostname']
+
+ try:
+ # Checking volume node is working correctly when any volumes
+ # are attached to instances.
+ rpc.call(context,
+ FLAGS.volume_topic,
+ {"method": "check_for_export",
+ "args": {'instance_id': instance_id}})
+
+ # Asking dest host to preparing live migration.
+ compute_topic = db.queue_get_for(context, FLAGS.compute_topic, dest)
+
+ rpc.call(context,
+ compute_topic,
+ {"method": "pre_live_migration",
+ "args": {'instance_id': instance_id,
+ 'dest': dest}})
+
+ except Exception, e:
+ msg = _('Pre live migration for %s failed at %s')
+ logging.error(msg, ec2_id, dest)
+ db.instance_set_state(context,
+ instance_id,
+ power_state.RUNNING,
+ 'running')
+
+ try:
+ for vol in db.volume_get_all_by_instance(context, instance_id):
+ db.volume_update(context,
+ vol['id'],
+ {'status': 'in-use'})
+ except exception.NotFound:
+ pass
+
+ # e should be raised. just calling "raise" may raise NotFound.
+ raise e
+
+ # Executing live migration
+ # live_migration might raises exceptions, but
+ # nothing must be recovered in this version.
+ self.driver.live_migration(context, instance_ref, dest)
diff --git a/nova/db/api.py b/nova/db/api.py
index a4d26ec85..98beb479c 100644
--- a/nova/db/api.py
+++ b/nova/db/api.py
@@ -244,6 +244,10 @@ def floating_ip_get_by_address(context, address):
return IMPL.floating_ip_get_by_address(context, address)
+def floating_ip_update(context, address, values):
+ """update floating ip information."""
+ return IMPL.floating_ip_update(context, address, values)
+
####################
@@ -388,6 +392,32 @@ def instance_add_security_group(context, instance_id, security_group_id):
security_group_id)
+def instance_get_all_by_host(context, hostname):
+ """Get instances by host"""
+ return IMPL.instance_get_all_by_host(context, hostname)
+
+
+def instance_get_vcpu_sum_by_host_and_project(context, hostname, proj_id):
+ """Get instances.vcpus by host and project"""
+ return IMPL.instance_get_vcpu_sum_by_host_and_project(context,
+ hostname,
+ proj_id)
+
+
+def instance_get_memory_sum_by_host_and_project(context, hostname, proj_id):
+ """Get amount of memory by host and project """
+ return IMPL.instance_get_memory_sum_by_host_and_project(context,
+ hostname,
+ proj_id)
+
+
+def instance_get_disk_sum_by_host_and_project(context, hostname, proj_id):
+ """Get total amount of disk by host and project """
+ return IMPL.instance_get_disk_sum_by_host_and_project(context,
+ hostname,
+ proj_id)
+
+
def instance_action_create(context, values):
"""Create an instance action from the values dictionary."""
return IMPL.instance_action_create(context, values)
@@ -691,6 +721,11 @@ def volume_get_by_ec2_id(context, ec2_id):
return IMPL.volume_get_by_ec2_id(context, ec2_id)
+def volume_get_all_by_instance(context, instance_id):
+ """Get all volumes by instance id or raise if it does not exist."""
+ return IMPL.volume_get_all_by_instance(context, instance_id)
+
+
def volume_get_instance(context, volume_id):
"""Get the instance that a volume is attached to."""
return IMPL.volume_get_instance(context, volume_id)
@@ -906,3 +941,31 @@ def host_get_networks(context, host):
"""
return IMPL.host_get_networks(context, host)
+
+
+###################
+
+
+def host_create(context, value):
+ """Create a host from the values dictionary."""
+ return IMPL.host_create(context, value)
+
+
+def host_get(context, host_id):
+ """Get an host or raise if it does not exist."""
+ return IMPL.host_get(context, host_id)
+
+
+def host_get_all(context, session=None):
+ """Get all hosts or raise if it does not exist."""
+ return IMPL.host_get_all(context)
+
+
+def host_get_by_name(context, host):
+ """Get an host or raise if it does not exist."""
+ return IMPL.host_get_by_name(context, host)
+
+
+def host_update(context, host, values):
+ """Set the given properties on an host and update it."""
+ return IMPL.host_update(context, host, values)
diff --git a/nova/db/sqlalchemy/api.py b/nova/db/sqlalchemy/api.py
index e475b4d8c..d54a5ca99 100644
--- a/nova/db/sqlalchemy/api.py
+++ b/nova/db/sqlalchemy/api.py
@@ -486,6 +486,16 @@ def floating_ip_get_by_address(context, address, session=None):
return result
+@require_context
+def floating_ip_update(context, address, values):
+ session = get_session()
+ with session.begin():
+ floating_ip_ref = floating_ip_get_by_address(context, address, session)
+ for (key, value) in values.iteritems():
+ floating_ip_ref[key] = value
+ floating_ip_ref.save(session=session)
+
+
###################
@@ -824,6 +834,7 @@ def instance_update(context, instance_id, values):
return instance_ref
+@require_context
def instance_add_security_group(context, instance_id, security_group_id):
"""Associate the given security group with the given instance"""
session = get_session()
@@ -837,6 +848,53 @@ def instance_add_security_group(context, instance_id, security_group_id):
@require_context
+def instance_get_all_by_host(context, hostname):
+ session = get_session()
+ if not session:
+ session = get_session()
+
+ result = session.query(models.Instance
+ ).filter_by(host=hostname
+ ).filter_by(deleted=can_read_deleted(context)
+ ).all()
+ if not result:
+ return []
+ return result
+
+
+@require_context
+def _instance_get_sum_by_host_and_project(context, column, hostname, proj_id):
+ session = get_session()
+
+ result = session.query(models.Instance
+ ).filter_by(host=hostname
+ ).filter_by(project_id=proj_id
+ ).filter_by(deleted=can_read_deleted(context)
+ ).value(column)
+ if not result:
+ return 0
+ return result
+
+
+@require_context
+def instance_get_vcpu_sum_by_host_and_project(context, hostname, proj_id):
+ return _instance_get_sum_by_host_and_project(context, 'vcpus', hostname,
+ proj_id)
+
+
+@require_context
+def instance_get_memory_sum_by_host_and_project(context, hostname, proj_id):
+ return _instance_get_sum_by_host_and_project(context, 'memory_mb',
+ hostname, proj_id)
+
+
+@require_context
+def instance_get_disk_sum_by_host_and_project(context, hostname, proj_id):
+ return _instance_get_sum_by_host_and_project(context, 'local_gb',
+ hostname, proj_id)
+
+
+@require_context
def instance_action_create(context, values):
"""Create an instance action from the values dictionary."""
action_ref = models.InstanceActions()
@@ -1402,6 +1460,18 @@ def volume_get_all_by_project(context, project_id):
@require_admin_context
+def volume_get_all_by_instance(context, instance_id):
+ session = get_session()
+ result = session.query(models.Volume).\
+ filter_by(instance_id=instance_id).\
+ filter_by(deleted=False).\
+ all()
+ if not result:
+ raise exception.NotFound(_('No volume for instance %s') % instance_id)
+ return result
+
+
+@require_admin_context
def volume_get_instance(context, volume_id):
session = get_session()
result = session.query(models.Volume).\
@@ -1863,3 +1933,71 @@ def host_get_networks(context, host):
filter_by(deleted=False).\
filter_by(host=host).\
all()
+
+
+###################
+
+@require_admin_context
+def host_create(context, values):
+ host_ref = models.Host()
+ for (key, value) in values.iteritems():
+ host_ref[key] = value
+ host_ref.save()
+ return host_ref
+
+
+@require_admin_context
+def host_get(context, host_id, session=None):
+ if not session:
+ session = get_session()
+
+ result = session.query(models.Host
+ ).filter_by(deleted=False
+ ).filter_by(id=host_id
+ ).first()
+
+ if not result:
+ raise exception.NotFound('No host for id %s' % host_id)
+
+ return result
+
+
+@require_admin_context
+def host_get_all(context, session=None):
+ if not session:
+ session = get_session()
+
+ result = session.query(models.Host
+ ).filter_by(deleted=False
+ ).all()
+
+ if not result:
+ raise exception.NotFound('No host record found .')
+
+ return result
+
+
+@require_admin_context
+def host_get_by_name(context, host, session=None):
+ if not session:
+ session = get_session()
+
+ result = session.query(models.Host
+ ).filter_by(deleted=False
+ ).filter_by(name=host
+ ).first()
+
+ if not result:
+ raise exception.NotFound('No host for name %s' % host)
+
+ return result
+
+
+@require_admin_context
+def host_update(context, host_id, values):
+ session = get_session()
+ with session.begin():
+ host_ref = host_get(context, host_id, session=session)
+ for (key, value) in values.iteritems():
+ host_ref[key] = value
+ host_ref.save(session=session)
diff --git a/nova/db/sqlalchemy/models.py b/nova/db/sqlalchemy/models.py
index 1ed366127..f8d8852b9 100644
--- a/nova/db/sqlalchemy/models.py
+++ b/nova/db/sqlalchemy/models.py
@@ -138,6 +138,25 @@ class NovaBase(object):
# __tablename__ = 'hosts'
# id = Column(String(255), primary_key=True)
+class Host(BASE, NovaBase):
+ """Represents a host where services are running"""
+ __tablename__ = 'hosts'
+ id = Column(Integer, primary_key=True)
+ name = Column(String(255))
+ vcpus = Column(Integer, nullable=False, default=-1)
+ memory_mb = Column(Integer, nullable=False, default=-1)
+ local_gb = Column(Integer, nullable=False, default=-1)
+ hypervisor_type = Column(String(128))
+ hypervisor_version = Column(Integer, nullable=False, default=-1)
+ cpu_info = Column(String(1024))
+ deleted = Column(Boolean, default=False)
+ # C: when calling service_create()
+ # D: never deleted. instead of deleting cloumn "deleted" is true
+ # when host is down
+ # b/c Host.id is foreign key of service, and records
+ # of the "service" table are not deleted.
+ # R: Column "deleted" is true when calling hosts_up() and host is down.
+
class Service(BASE, NovaBase):
"""Represents a running service on a host."""
@@ -224,6 +243,9 @@ class Instance(BASE, NovaBase):
display_name = Column(String(255))
display_description = Column(String(255))
+ # To remember on which host a instance booted.
+ # An instance may moved to other host by live migraiton.
+ launched_on = Column(String(255))
locked = Column(Boolean)
# TODO(vish): see Ewan's email about state improvements, probably
@@ -552,7 +574,7 @@ def register_models():
Volume, ExportDevice, IscsiTarget, FixedIp, FloatingIp,
Network, SecurityGroup, SecurityGroupIngressRule,
SecurityGroupInstanceAssociation, AuthToken, User,
- Project, Certificate) # , Image, Host
+ Project, Certificate, Host) # , Image
engine = create_engine(FLAGS.sql_connection, echo=False)
for model in models:
model.metadata.create_all(engine)
diff --git a/nova/livemigration_test/SI/picture.pptx b/nova/livemigration_test/SI/picture.pptx
new file mode 100644
index 000000000..b47bec9b5
--- /dev/null
+++ b/nova/livemigration_test/SI/picture.pptx
Binary files differ
diff --git a/nova/livemigration_test/SI/testCase_SI.xls b/nova/livemigration_test/SI/testCase_SI.xls
new file mode 100644
index 000000000..be98b391a
--- /dev/null
+++ b/nova/livemigration_test/SI/testCase_SI.xls
Binary files differ
diff --git a/nova/livemigration_test/SI/testParameterSheet_SI.xls b/nova/livemigration_test/SI/testParameterSheet_SI.xls
new file mode 100644
index 000000000..400b43b43
--- /dev/null
+++ b/nova/livemigration_test/SI/testParameterSheet_SI.xls
Binary files differ
diff --git a/nova/livemigration_test/SI/utils/demo-firstboot.sh b/nova/livemigration_test/SI/utils/demo-firstboot.sh
new file mode 100755
index 000000000..3a6f7fb0b
--- /dev/null
+++ b/nova/livemigration_test/SI/utils/demo-firstboot.sh
@@ -0,0 +1,39 @@
+#!/bin/bash
+
+DIR=/opt/nova-2010.1
+
+# 1. 管理者ユーザを作成する
+# nova-manage user admin ユーザ名 access-key secret-key
+#
+#$DIR/bin/nova-manage user admin admin admin admin
+
+# 2. プロジェクトを作成する
+# nova-manage create project プロジェクト名 プロジェクトに属するユーザ名
+#
+#$DIR/bin/nova-manage project create admin admin
+
+# 3. クラウドを使うための認証情報を生成する
+# nova-manage project environment プロジェクト名 ユーザ名 認証情報を格納するファイル
+#
+#$DIR/bin/nova-manage project environment admin admin $DIR/novarc
+
+# 4. 認証情報の読み込み
+. $DIR/novarc
+
+# 5. プロジェクト用仮想マシンネットワークの作成を行う
+# nova-manage user admin ユーザ名 access-key secret-key
+#
+$DIR/bin/nova-manage network create 10.0.0.0/8 3 16
+
+# 6. 初回ログインにはSSHの公開鍵認証が必要
+#
+if [ "" == "`euca-describe-keypairs | grep testkey`" ]; then
+ euca-add-keypair testkey > testkey.pem
+fi
+
+# 7.
+for i in 172.19.0.134 172.19.0.135 172.19.0.136 172.19.0.137 ; do
+ sudo ip addr del $i dev eth0 2> /dev/null
+done
+
+
diff --git a/nova/livemigration_test/SI/utils/demo-runInstance.sh b/nova/livemigration_test/SI/utils/demo-runInstance.sh
new file mode 100755
index 000000000..171291262
--- /dev/null
+++ b/nova/livemigration_test/SI/utils/demo-runInstance.sh
@@ -0,0 +1,57 @@
+#!/bin/bash
+
+DIR=/opt/nova-2010.1
+
+function inc_assigned(){
+ assigned=`expr $assigned + 1`
+}
+
+
+# 1. 認証情報の読み込み
+. $DIR/novarc
+
+# 3. 仮想マシンの起動
+#
+ret=`euca-run-instances -t m1.small -k testkey ami-centos`
+#ret=`euca-run-instances -t m1.small -k testkey ami-tiny`
+
+# 4. 仮想マシン用IPの確保
+# 未登録なら登録しておく
+registered=`euca-describe-addresses`
+for ip in 172.19.0.134 172.19.0.135 172.19.0.136 172.19.0.137 ; do
+
+ not_registered=`echo $registered | grep $ip`
+ if [ "" == "$not_registered" ]; then
+ echo "[INFO] registed $ip"
+ $DIR/bin/nova-manage floating create `hostname` $ip
+ fi
+done
+
+# 5. IPの割当
+echo 0 > /tmp/demo-runinstance
+euca-describe-addresses | grep -v reserved | while read line; do
+ # 割り当てられてないものを仮想マシンに割り当てる
+ ip=`echo $line | cut -d ' ' -f 2`
+ id=`echo $ret | cut -d ' ' -f 5`
+ if [ "" == "`echo $id | grep i- `" ] ; then
+ echo "[INFO] try again" $ret
+ break
+ fi
+ echo "[INFO] assigned to ipaddr($ip) to instance($id) "
+ euca-associate-address -i $id $ip
+ echo 1 > /tmp/demo-runinstance
+ break
+done
+
+echo $assigned
+if [ 0 -eq "`cat /tmp/demo-runinstance`" ] ; then
+ echo "[INFO] address is full."
+fi
+rm -rf /tmp/demo-runinstance
+
+
+# 6. FWの設定
+euca-authorize -P tcp -p 22 default 2> /dev/null > /dev/null
+euca-authorize -P tcp -p 80 default 2> /dev/null > /dev/null
+euca-authorize -P tcp -p 5555 default 2> /dev/null > /dev/null
+
diff --git a/nova/livemigration_test/SI/utils/nova-manage.conf b/nova/livemigration_test/SI/utils/nova-manage.conf
new file mode 100644
index 000000000..9f8a02b96
--- /dev/null
+++ b/nova/livemigration_test/SI/utils/nova-manage.conf
@@ -0,0 +1,18 @@
+--verbose
+--nodaemon
+--dhcpbridge_flagfile=/etc/nova/nova-manage.conf
+--FAKE_subdomain=ec2
+--libvirt_type=qemu
+--no_internet_conn=True
+--public_netif=eth0
+--public_interface=eth0
+
+--cc-host=172.19.0.131
+--routing_source_ip=172.19.0.131
+--sql_connection=mysql://root:nova@172.19.0.131/nova
+--rabbit_host=172.19.0.131
+--redis_host=172.19.0.131
+--s3_host=172.19.0.131
+--auth_driver=nova.auth.ldapdriver.LdapDriver
+--ldap_url=ldap://172.19.0.131
+
diff --git a/nova/livemigration_test/SI/utils/nova.conf b/nova/livemigration_test/SI/utils/nova.conf
new file mode 100644
index 000000000..c66bfbc53
--- /dev/null
+++ b/nova/livemigration_test/SI/utils/nova.conf
@@ -0,0 +1,10 @@
+--verbose
+--nodaemon
+--dhcpbridge_flagfile=/opt/nova-2010.4//bin/nova.conf
+--network_manager=nova.network.manager.VlanManager
+--cc_host=172.19.0.131
+--routing_source_ip=172.19.0.131
+--sql_connection=mysql://root:nova@localhost/nova
+--auth_driver=nova.auth.ldapdriver.LdapDriver
+--libvirt_type=qemu
+--public_interface=eth0
diff --git a/nova/livemigration_test/SI/utils/nova.sh b/nova/livemigration_test/SI/utils/nova.sh
new file mode 100755
index 000000000..b8e2e9f26
--- /dev/null
+++ b/nova/livemigration_test/SI/utils/nova.sh
@@ -0,0 +1,180 @@
+#!/usr/bin/env bash
+DIR=`pwd`
+CMD=$1
+SOURCE_BRANCH=lp:nova
+if [ -n "$2" ]; then
+ SOURCE_BRANCH=$2
+fi
+#DIRNAME=nova
+DIRNAME=""
+NOVA_DIR=$DIR/$DIRNAME
+if [ -n "$3" ]; then
+ NOVA_DIR=$DIR/$3
+fi
+
+if [ ! -n "$HOST_IP" ]; then
+ # NOTE(vish): This will just get the first ip in the list, so if you
+ # have more than one eth device set up, this will fail, and
+ # you should explicitly set HOST_IP in your environment
+ HOST_IP=`ifconfig | grep -m 1 'inet addr:'| cut -d: -f2 | awk '{print $1}'`
+fi
+
+#USE_MYSQL=${USE_MYSQL:-0}
+USE_MYSQL=1
+MYSQL_PASS=${MYSQL_PASS:-nova}
+TEST=${TEST:-0}
+#USE_LDAP=${USE_LDAP:-0}
+USE_LDAP=1
+LIBVIRT_TYPE=${LIBVIRT_TYPE:-qemu}
+NET_MAN=${NET_MAN:-VlanManager}
+# NOTE(vish): If you are using FlatDHCP on multiple hosts, set the interface
+# below but make sure that the interface doesn't already have an
+# ip or you risk breaking things.
+# FLAT_INTERFACE=eth0
+
+if [ "$USE_MYSQL" == 1 ]; then
+ SQL_CONN=mysql://root:$MYSQL_PASS@localhost/nova
+else
+ SQL_CONN=sqlite:///$NOVA_DIR/nova.sqlite
+fi
+
+if [ "$USE_LDAP" == 1 ]; then
+ AUTH=ldapdriver.LdapDriver
+else
+ AUTH=dbdriver.DbDriver
+fi
+
+mkdir -p /etc/nova
+cat >$NOVA_DIR/bin/nova.conf << NOVA_CONF_EOF
+--verbose
+--nodaemon
+--dhcpbridge_flagfile=$NOVA_DIR/bin/nova.conf
+--network_manager=nova.network.manager.$NET_MAN
+--cc_host=$HOST_IP
+--routing_source_ip=$HOST_IP
+--sql_connection=$SQL_CONN
+--auth_driver=nova.auth.$AUTH
+--libvirt_type=$LIBVIRT_TYPE
+--public_interface=eth0
+NOVA_CONF_EOF
+
+if [ -n "$FLAT_INTERFACE" ]; then
+ echo "--flat_interface=$FLAT_INTERFACE" >>$NOVA_DIR/bin/nova.conf
+fi
+
+if [ "$CMD" == "branch" ]; then
+ sudo apt-get install -y bzr
+ rm -rf $NOVA_DIR
+ bzr branch $SOURCE_BRANCH $NOVA_DIR
+ cd $NOVA_DIR
+ mkdir -p $NOVA_DIR/instances
+ mkdir -p $NOVA_DIR/networks
+fi
+
+# You should only have to run this once
+if [ "$CMD" == "install" ]; then
+ sudo apt-get install -y python-software-properties
+ sudo add-apt-repository ppa:nova-core/ppa
+ sudo apt-get update
+ sudo apt-get install -y dnsmasq kpartx kvm gawk iptables ebtables
+ sudo apt-get install -y user-mode-linux kvm libvirt-bin
+ sudo apt-get install -y screen euca2ools vlan curl rabbitmq-server
+ sudo apt-get install -y lvm2 iscsitarget open-iscsi
+ echo "ISCSITARGET_ENABLE=true" | sudo tee /etc/default/iscsitarget
+ sudo /etc/init.d/iscsitarget restart
+ sudo modprobe kvm
+ sudo /etc/init.d/libvirt-bin restart
+ sudo apt-get install -y python-twisted python-sqlalchemy python-mox python-greenlet python-carrot
+ sudo apt-get install -y python-daemon python-eventlet python-gflags python-tornado python-ipy
+ sudo apt-get install -y python-libvirt python-libxml2 python-routes
+ if [ "$USE_MYSQL" == 1 ]; then
+ cat <<MYSQL_PRESEED | debconf-set-selections
+mysql-server-5.1 mysql-server/root_password password $MYSQL_PASS
+mysql-server-5.1 mysql-server/root_password_again password $MYSQL_PASS
+mysql-server-5.1 mysql-server/start_on_boot boolean true
+MYSQL_PRESEED
+ apt-get install -y mysql-server python-mysqldb
+ fi
+ wget http://c2477062.cdn.cloudfiles.rackspacecloud.com/images.tgz
+ tar -C $DIR -zxf images.tgz
+fi
+
+NL=`echo -ne '\015'`
+
+function screen_it {
+ screen -S nova -X screen -t $1
+ screen -S nova -p $1 -X stuff "$2$NL"
+}
+
+if [ "$CMD" == "run" ]; then
+ killall dnsmasq
+ screen -d -m -S nova -t nova
+ sleep 1
+ if [ "$USE_MYSQL" == 1 ]; then
+ mysql -p$MYSQL_PASS -e 'DROP DATABASE nova;'
+ mysql -p$MYSQL_PASS -e 'CREATE DATABASE nova;'
+ else
+ rm $NOVA_DIR/nova.sqlite
+ fi
+ if [ "$USE_LDAP" == 1 ]; then
+ sudo $NOVA_DIR/nova/auth/slap.sh
+ fi
+ rm -rf $NOVA_DIR/instances
+ mkdir -p $NOVA_DIR/instances
+ rm -rf $NOVA_DIR/networks
+ mkdir -p $NOVA_DIR/networks
+ $NOVA_DIR/tools/clean-vlans
+ if [ ! -d "$NOVA_DIR/images" ]; then
+ #ln -s $DIR/images $NOVA_DIR/images
+ ln -s /opt/images $NOVA_DIR/images
+ fi
+
+ if [ "$TEST" == 1 ]; then
+ cd $NOVA_DIR
+ python $NOVA_DIR/run_tests.py
+ cd $DIR
+ fi
+
+ # create an admin user called 'admin'
+ $NOVA_DIR/bin/nova-manage user admin admin admin admin
+ # create a project called 'admin' with project manager of 'admin'
+ $NOVA_DIR/bin/nova-manage project create admin admin
+ # export environment variables for project 'admin' and user 'admin'
+ $NOVA_DIR/bin/nova-manage project environment admin admin $NOVA_DIR/novarc
+ # create a small network
+ $NOVA_DIR/bin/nova-manage network create 10.0.0.0/8 1 32
+
+ # nova api crashes if we start it with a regular screen command,
+ # so send the start command by forcing text into the window.
+ screen_it api "$NOVA_DIR/bin/nova-api"
+ screen_it objectstore "$NOVA_DIR/bin/nova-objectstore"
+ #screen_it compute "$NOVA_DIR/bin/nova-compute"
+ screen_it network "$NOVA_DIR/bin/nova-network"
+ screen_it scheduler "$NOVA_DIR/bin/nova-scheduler"
+ screen_it volume "$NOVA_DIR/bin/nova-volume"
+ #screen_it test ". $NOVA_DIR/novarc"
+ screen -S nova -x
+fi
+
+if [ "$CMD" == "run" ] || [ "$CMD" == "terminate" ]; then
+ # shutdown instances
+ . $NOVA_DIR/novarc; euca-describe-instances | grep i- | cut -f2 | xargs euca-terminate-instances
+ sleep 2
+ # delete volumes
+ . $NOVA_DIR/novarc; euca-describe-volumes | grep vol- | cut -f2 | xargs -n1 euca-delete-volume
+fi
+
+if [ "$CMD" == "run" ] || [ "$CMD" == "clean" ]; then
+ screen -S nova -X quit
+ rm *.pid*
+fi
+
+if [ "$CMD" == "scrub" ]; then
+ $NOVA_DIR/tools/clean-vlans
+ if [ "$LIBVIRT_TYPE" == "uml" ]; then
+ virsh -c uml:///system list | grep i- | awk '{print \$1}' | xargs -n1 virsh -c uml:///system destroy
+ else
+ virsh list | grep i- | awk '{print \$1}' | xargs -n1 virsh destroy
+ fi
+ vblade-persist ls | grep vol- | awk '{print \$1\" \"\$2}' | xargs -n2 vblade-persist destroy
+fi
diff --git a/nova/livemigration_test/SI/utils/nova.sh.compute b/nova/livemigration_test/SI/utils/nova.sh.compute
new file mode 100755
index 000000000..ede38ba20
--- /dev/null
+++ b/nova/livemigration_test/SI/utils/nova.sh.compute
@@ -0,0 +1,37 @@
+#!/usr/bin/env bash
+
+DIRNAME=nova
+NOVA_DIR=$DIR/$DIRNAME
+if [ -n "$3" ]; then
+ NOVA_DIR=$DIR/$3
+fi
+
+DIR=/opt/nova-2010.4
+REDIS_DIR=/opt/redis-2.0.0-rc4/
+NOVA_DIR=$DIR/nova
+HOST_IP=172.19.0.131
+
+mkdir -p /etc/nova
+cat >/etc/nova/nova-manage.conf << NOVA_CONF_EOF
+--verbose
+--nodaemon
+--dhcpbridge_flagfile=/etc/nova/nova-manage.conf
+--FAKE_subdomain=ec2
+--libvirt_type=qemu
+--no_internet_conn=True
+--public_netif=eth0
+--public_interface=eth0
+
+--cc-host=$HOST_IP
+--routing_source_ip=$HOST_IP
+--sql_connection=mysql://root:nova@$HOST_IP/nova
+--rabbit_host=$HOST_IP
+--redis_host=$HOST_IP
+--s3_host=$HOST_IP
+--auth_driver=nova.auth.ldapdriver.LdapDriver
+--ldap_url=ldap://$HOST_IP
+
+NOVA_CONF_EOF
+
+$DIR/bin/nova-compute --flagfile=/etc/nova/nova-manage.conf
+
diff --git a/nova/livemigration_test/UT/computeManager.test.py b/nova/livemigration_test/UT/computeManager.test.py
new file mode 100644
index 000000000..5a27f6699
--- /dev/null
+++ b/nova/livemigration_test/UT/computeManager.test.py
@@ -0,0 +1,413 @@
+#!/usr/bin/python
+# -*- coding: UTF-8 -*-
+
+
+import sys
+import os
+import unittest
+import commands
+import re
+import logging
+
+from mock import Mock
+import twisted
+
+# getting /nova-inst-dir
+NOVA_DIR = os.path.abspath(sys.argv[0])
+for i in range(4):
+ NOVA_DIR = os.path.dirname(NOVA_DIR)
+
+try:
+ print
+ print 'checking %s/bin/nova-manage exists, set the NOVA_DIR properly..' \
+ % NOVA_DIR
+ print
+
+ sys.path.append(NOVA_DIR)
+
+ from nova.compute.manager import ComputeManager
+ from nova.virt.libvirt_conn import LibvirtConnection
+
+ from nova import context
+ from nova import db
+ from nova import exception
+ from nova import flags
+ from nova import quota
+ from nova import utils
+ from nova.auth import manager
+ from nova.cloudpipe import pipelib
+ from nova import rpc
+ from nova.api.ec2 import cloud
+ from nova.compute import power_state
+
+ from nova.db.sqlalchemy.models import *
+
+
+except:
+ print 'set correct NOVA_DIR in this script. '
+ raise
+
+
+class tmpStdout:
+ def __init__(self):
+ self.buffer = ""
+
+ def write(self, arg):
+ self.buffer += arg
+
+ def writelines(self, arg):
+ self.buffer += arg
+
+ def flush(self):
+ print 'flush'
+ self.buffer = ''
+
+
+class tmpStderr(tmpStdout):
+ def write(self, arg):
+ self.buffer += arg
+
+ def flush(self):
+ pass
+
+ def realFlush(self):
+ self.buffer = ''
+
+dummyCallReturnValue={ 0:True }
+dummyCallCount=0
+def dummyCall(context, topic, method):
+ global dummyCallReturnValue, dummyCallCount
+ if dummyCallCount in dummyCallReturnValue.keys() :
+ ret = dummyCallReturnValue[ dummyCallCount ]
+ dummyCallCount += 1
+ return ret
+ else :
+ dummyCallCount += 1
+ return False
+
+
+class ComputeTestFunctions(unittest.TestCase):
+
+ stdout = None
+ stdoutBak = None
+ stderr = None
+ stderrBak = None
+ manager = None
+
+ # 共通の初期化処理
+ def setUp(self):
+ """common init method. """
+
+ #if self.stdout is None:
+ # self.__class__.stdout = tmpStdout()
+ #self.stdoutBak = sys.stdout
+ #sys.stdout = self.stdout
+ if self.stderr is None:
+ self.__class__.stderr = tmpStderr()
+ self.stderrBak = sys.stderr
+ sys.stderr = self.stderr
+
+ self.host = 'openstack2-api'
+ if self.manager is None:
+ self.__class__.manager = ComputeManager(host=self.host)
+
+ self.setTestData()
+ self.setMocks()
+
+ def setTestData(self):
+
+ self.host1 = Host()
+ for key, val in [('name', 'host1'), ('cpu', 5),
+ ('memory_mb', 20480), ('hdd_gb', 876)]:
+ self.host1.__setitem__(key, val)
+
+ self.host2 = Host()
+ for key, val in [('name', 'host2'), ('cpu', 5),
+ ('memory_mb', 20480), ('hdd_gb', 876)]:
+ self.host2.__setitem__(key, val)
+
+ self.instance1 = Instance()
+ for key, val in [('id', 1), ('host', 'host1'),
+ ('hostname', 'i-12345'), ('state', power_state.RUNNING),
+ ('project_id', 'testPJ'), ('vcpus', 3), ('memory_mb', 1024),
+ ('hdd_gb', 5), ('internal_id', 12345)]:
+ self.instance1.__setitem__(key, val)
+
+ self.instance2 = Instance()
+ for key, val in [('id', 2), ('host', 'host1'),
+ ('hostname', 'i-12345'), ('state', power_state.RUNNING),
+ ('project_id', 'testPJ'), ('vcpus', 3), ('memory_mb', 1024),
+ ('hdd_gb', 5)]:
+ self.instance2.__setitem__(key, val)
+
+ self.fixed_ip1 = FixedIp()
+ for key, val in [('id', 1), ('address', '1.1.1.1'),
+ ('network_id', '1'), ('instance_id', 1)]:
+ self.fixed_ip1.__setitem__(key, val)
+
+ self.vol1 = Volume()
+ for key, val in [('id', 1), ('ec2_id', 'vol-qijjuc7e'),
+ ('availability_zone', 'nova'), ('host', 'host1')]:
+ self.vol1.__setitem__(key, val)
+
+ self.vol2 = Volume()
+ for key, val in [('id', 2), ('ec2_id', 'vol-qi22222'),
+ ('availability_zone', 'nova'), ('host', 'host1')]:
+ self.vol2.__setitem__(key, val)
+
+ self.secgrp1 = Volume()
+ for key, val in [('id', 1), ('ec2_id', 'default')]:
+ self.secgrp1.__setitem__(key, val)
+
+ self.secgrp2 = Volume()
+ for key, val in [('id', 2), ('ec2_id', 'def2')]:
+ self.secgrp2.__setitem__(key, val)
+
+ self.netref1 = Network()
+
+ def setMocks(self):
+
+ # mocks for pre_live_migration
+ self.ctxt = context.get_admin_context()
+ db.instance_get = Mock(return_value=self.instance1)
+ db.instance_get_fixed_address = Mock(return_value = self.fixed_ip1)
+ db.volume_get_all_by_instance \
+ = Mock(return_value=[self.vol1, self.vol2])
+ db.volume_get_shelf_and_blade = Mock(return_value=(3, 4))
+ db.instance_get_fixed_address = Mock(return_value=self.fixed_ip1)
+ db.security_group_get_by_instance \
+ = Mock(return_value=[self.secgrp1, self.secgrp2])
+ self.manager.driver.ensure_filtering_rules_for_instance \
+ = Mock(return_value=None)
+ self.manager.driver.nwfilter_for_instance_exists = Mock(return_value=None)
+ self.manager.network_manager.setup_compute_network \
+ = Mock(return_value=None)
+ # mocks for live_migration_
+ rpc.call = Mock(return_value=True)
+ db.instance_set_state = Mock(return_value=True)
+
+
+ # ---> test for nova.compute.manager.pre_live_migration()
+ def test01(self):
+ """01: NotFound error occurs on finding instance on DB. """
+
+ db.instance_get = Mock(side_effect=exception.NotFound('ERR'))
+
+ self.assertRaises(exception.NotFound,
+ self.manager.pre_live_migration,
+ self.ctxt,
+ 'dummy_ec2_id',
+ 'host2')
+
+ def test02(self):
+ """02: Unknown error occurs on finding instance on DB. """
+
+ db.instance_get = Mock(side_effect=TypeError('ERR'))
+
+ self.assertRaises(TypeError,
+ self.manager.pre_live_migration,
+ self.ctxt,
+ 'dummy_ec2_id',
+ 'host2')
+
+ # the case of no fixed ip found on DB( instance_get_fixed_address
+ # returns unknown error) is ommited because it is same as test02
+
+ def test03(self):
+ """03: no fixed ip found on DB, """
+
+ db.instance_get_fixed_address = Mock(return_value=None)
+
+ try :
+ self.manager.pre_live_migration(self.ctxt, 'dummy_ec2_id', 'host2')
+ except exception.NotFound, e:
+ c1 = ( 0 < e.message.find('doesnt have fixed_ip'))
+ self.assertTrue(c1, True)
+ return False
+
+
+ def test04(self):
+ """04: no fixed_ip found on DB. """
+
+ db.instance_get_fixed_address \
+ = Mock(side_effect=exception.NotFound('ERR'))
+
+ self.assertRaises(exception.NotFound,
+ self.manager.pre_live_migration,
+ self.ctxt,
+ 'dummy_ec2_id',
+ 'host2')
+
+ def test05(self):
+ """05: NotAuthrized occurs on finding volume on DB. """
+
+ db.volume_get_all_by_instance \
+ = Mock(side_effect=exception.NotAuthorized('ERR'))
+
+ self.assertRaises(exception.NotAuthorized,
+ self.manager.pre_live_migration,
+ self.ctxt,
+ 'dummy_ec2_id',
+ 'host2')
+
+ def test06(self):
+ """06: Unexpected exception occurs on finding volume on DB. """
+
+ db.volume_get_all_by_instance = Mock(side_effect=TypeError('ERR'))
+
+ self.assertRaises(TypeError,
+ self.manager.pre_live_migration,
+ self.ctxt,
+ 'dummy_ec2_id',
+ 'host2')
+
+ def test07(self):
+ """07: self.network_manager.setup_compute_network causes ProcessExecutionError. """
+ self.manager.network_manager.setup_compute_network \
+ = Mock(side_effect=exception.ProcessExecutionError("ERR"))
+
+ self.assertRaises(exception.ProcessExecutionError,
+ self.manager.pre_live_migration,
+ self.ctxt,
+ 'dummy_ec2_id',
+ 'host2')
+
+
+ def test08(self):
+ """08: self.manager.network_manager.setup_compute_network
+ exception.NotFound. """
+ self.manager.network_manager.setup_compute_network \
+ = Mock(side_effect=exception.NotFound("ERR"))
+
+ self.assertRaises(exception.NotFound,
+ self.manager.pre_live_migration,
+ self.ctxt,
+ 'dummy_ec2_id',
+ 'host2')
+
+ # ensure_filtering_rules_for_instance mainly access to libvirt.
+ # Therefore, exception may be libvirt-related one, but cannot expect.
+ # so, for test cases, just expect unkonwn exception( TypeError is just a case)
+ def test09(self):
+ """09: self.driver.ensure_filtering_rules_for_instance raises unexpected exception. """
+ self.manager.driver.ensure_filtering_rules_for_instance \
+ = Mock(side_effect=TypeError('ERR'))
+
+ self.assertRaises(TypeError,
+ self.manager.pre_live_migration,
+ self.ctxt,
+ 'dummy_ec2_id',
+ 'host2')
+
+ def test10(self):
+ """10: volume/fixed_ip found on DB, all procedure finish
+ successfully.. """
+
+ result = self.manager.pre_live_migration(self.ctxt, 'dummy_ec2_id',
+ 'host2')
+ self.assertEqual(result, None)
+
+ # ---> test for nova.compute.manager.live_migration()
+
+ def test11(self):
+ """11: if db_instance_get issues NotFound.
+ """
+ db.instance_get = Mock(side_effect=exception.NotFound("ERR"))
+
+ self.assertRaises(exception.NotFound,
+ self.manager.live_migration,
+ self.ctxt,
+ 'dummy_ec2_id',
+ 'host2')
+
+ def test12(self):
+ """12: if db_instance_get issues Unexpected Error.
+ """
+ db.instance_get = Mock(side_effect=TypeError("ERR"))
+
+ self.assertRaises(TypeError,
+ self.manager.live_migration,
+ self.ctxt,
+ 'dummy_ec2_id',
+ 'host2')
+
+ def test13(self):
+ """13: if rpc.call returns rpc.RemoteError. """
+
+ rpc.call = Mock(return_value=rpc.RemoteError(None, None, None))
+ db.instance_set_state = Mock(return_value=True)
+ try :
+ self.manager.live_migration(self.ctxt, 'dummy_ec2_id','host2')
+ except rpc.RemoteError, e:
+ c1 = ( 0 < e.message.find('Pre live migration for'))
+ self.assertTrue(c1, True)
+ return False
+
+ def test14(self):
+ """14: if rpc.call returns rpc.RemoteError and instance_set_state
+ also ends up err. (then , unexpected err occurs, in this case
+ TypeError)
+ """
+ rpc.call = Mock(return_value=rpc.RemoteError(None, None, None))
+ db.instance_set_state = Mock(side_effect=TypeError("ERR"))
+
+ try :
+ self.manager.live_migration(self.ctxt, 'dummy_ec2_id','host2')
+ except TypeError, e:
+ c1 = ( 0 < e.message.find('Pre live migration for'))
+ self.assertTrue(c1, True)
+ return False
+
+
+ def test15(self):
+ """15: if rpc.call returns rpc.RemoteError and volume_get_all_by_instance
+ also ends up err. (then , unexpected err occurs, in this case
+ TypeError)
+ """
+ rpc.call = Mock(return_value=rpc.RemoteError(None, None, None))
+ db.volume_get_all_by_instance = Mock(side_effect=exception.NotAuthorized("ERR"))
+
+ try :
+ self.manager.live_migration(self.ctxt, 'dummy_ec2_id','host2')
+ except exception.NotAuthorized, e:
+ c1 = ( 0 < e.message.find('Pre live migration for'))
+ self.assertTrue(c1, True)
+ return False
+
+
+ def test16(self):
+ """16: if rpc.call finish successfully, e, and live_migration raises
+ TypeError(Unexpected error), which means unexpected libvirt-related one. """
+ rpc.call = Mock(return_value=None)
+ ret = self.manager.driver.live_migration \
+ = Mock(side_effect=TypeError("ERR"))
+
+ self.assertRaises(TypeError,
+ self.manager.live_migration,
+ self.ctxt,
+ 'dummy_ec2_id',
+ 'host2')
+
+ def test17(self):
+ """17: everything goes well. """
+ self.manager.driver.live_migration = Mock(return_value=True)
+ ret = self.manager.live_migration(self.ctxt, 'i-12345', 'host1')
+ self.assertEqual(ret, None)
+
+ def tearDown(self):
+ """common terminating method. """
+ self.stderr.realFlush()
+ sys.stderr = self.stderrBak
+ #sys.stdout = self.stdoutBak
+
+if __name__ == '__main__':
+ logging.getLogger().setLevel(logging.DEBUG)
+ #unittest.main()
+
+ suite = unittest.TestLoader().loadTestsFromTestCase(ComputeTestFunctions)
+ unittest.TextTestRunner(verbosity=2).run(suite)
+
+ #suite = unittest.TestSuite()
+ #suite.addTest(ComputeTestFunctions("test15"))
+ #suite.addTest(ComputeTestFunctions("test16"))
+ #unittest.TextTestRunner(verbosity=2).run(suite)
diff --git a/nova/livemigration_test/UT/libvirtConnection.test.py b/nova/livemigration_test/UT/libvirtConnection.test.py
new file mode 100644
index 000000000..1040b7aa8
--- /dev/null
+++ b/nova/livemigration_test/UT/libvirtConnection.test.py
@@ -0,0 +1,605 @@
+#!/usr/bin/python
+# -*- coding: UTF-8 -*-
+
+
+import sys
+import os
+import unittest
+import commands
+import re
+import logging
+import libvirt
+import libxml2
+
+from mock import Mock
+import twisted
+
+# getting /nova-inst-dir
+NOVA_DIR = os.path.abspath(sys.argv[0])
+for i in range(4):
+ NOVA_DIR = os.path.dirname(NOVA_DIR)
+
+
+try :
+ print
+ print 'checking %s/bin/nova-manage exists, set the NOVA_DIR properly..' % NOVA_DIR
+ print
+
+ sys.path.append(NOVA_DIR)
+
+ from nova.compute.manager import ComputeManager
+ from nova.virt import libvirt_conn
+
+ from nova import context
+ from nova import db
+ from nova import exception
+ from nova import flags
+ from nova import quota
+ from nova import utils
+ from nova.auth import manager
+ from nova.cloudpipe import pipelib
+ from nova import rpc
+ from nova.api.ec2 import cloud
+ from nova.compute import power_state
+
+ from nova.db.sqlalchemy.models import *
+
+
+except:
+ print 'set correct NOVA_DIR in this script. '
+ raise
+
+
+class tmpStdout:
+ def __init__(self):
+ self.buffer = ""
+ def write(self,arg):
+ self.buffer += arg
+ def writelines(self, arg):
+ self.buffer += arg
+ def flush(self):
+ print 'flush'
+ self.buffer = ''
+
+class tmpStderr(tmpStdout):
+ def write(self,arg):
+ self.buffer += arg
+ def flush(self):
+ pass
+ def realFlush(self):
+ self.buffer = ''
+
+class DummyLibvirtConn(object):
+ nwfilterLookupByName = None
+ def __init__(self):
+ pass
+
+class testDomain(object):
+ def __init__(self):
+ pass
+ def migrateToURI(self, a, b, c, d):
+ pass
+
+
+class LibvirtConnectionTestFunctions(unittest.TestCase):
+
+ stdout = None
+ stdoutBak = None
+ stderr = None
+ stderrBak = None
+ manager = None
+
+ # 共通の初期化処理
+ def setUp(self):
+ """common init method. """
+
+ if self.stderr is None:
+ self.__class__.stderr = tmpStderr()
+ self.stderrBak = sys.stderr
+ sys.stderr = self.stderr
+
+ self.host = 'openstack2-api'
+ if self.manager is None:
+ self.__class__.manager = libvirt_conn.get_connection(False)
+
+ self.setTestData()
+ self.setMocks()
+
+ def setTestData(self):
+
+ self.host1 = Host()
+ for key, val in [ ('name', 'host1'), ('cpu', 5), ('memory_mb', 20480), ('hdd_gb', 876) ]:
+ self.host1.__setitem__(key, val)
+
+ self.instance1 = Instance()
+ for key, val in [ ('id', 1), ('host', 'host1'), ('hostname', 'i-12345'),
+ ('state', power_state.RUNNING), ('project_id', 'testPJ'),
+ ('vcpus', 3), ('memory_mb', 1024), ('hdd_gb', 5), ('internal_id',12345) ]:
+ self.instance1.__setitem__(key, val)
+
+
+ self.instance2 = Instance()
+ for key, val in [ ('id', 2), ('host', 'host1'), ('hostname', 'i-12345'),
+ ('state', power_state.RUNNING), ('project_id', 'testPJ'),
+ ('vcpus', 3), ('memory_mb', 1024), ('hdd_gb', 5) ]:
+ self.instance2.__setitem__(key, val)
+
+
+ self.fixed_ip1 = FixedIp()
+ for key, val in [ ('id', 1), ('address', '1.1.1.1'), ('network_id', '1'),
+ ('instance_id', 1)]:
+ self.fixed_ip1.__setitem__(key, val)
+
+ self.floating_ip1 = FloatingIp()
+ for key, val in [ ('id', 1), ('address', '1.1.1.200') ]:
+ self.floating_ip1.__setitem__(key, val)
+
+ self.netref1 = Network()
+ for key, val in [ ('id', 1) ]:
+ self.netref1.__setitem__(key, val)
+
+ self.xml="<cpu><arch>x86_64</arch><model>Nehalem</model><vendor>Intel</vendor><topology sockets='2' cores='4' threads='2'/><feature name='rdtscp'/><feature name='dca'/><feature name='xtpr'/><feature name='tm2'/><feature name='est'/><feature name='vmx'/><feature name='ds_cpl'/><feature name='monitor'/><feature name='pbe'/><feature name='tm'/><feature name='ht'/><feature name='ss'/><feature name='acpi'/><feature name='ds'/><feature name='vme'/></cpu>"
+
+ self.xml2="<cccccpu><arch>x86_64</arch><model>Nehalem</model><vendor>Intel</vendor><topology sockets='2' cores='4' threads='2'/><feature name='rdtscp'/><feature name='dca'/><feature name='xtpr'/><feature name='tm2'/><feature name='est'/><feature name='vmx'/><feature name='ds_cpl'/><feature name='monitor'/><feature name='pbe'/><feature name='tm'/><feature name='ht'/><feature name='ss'/><feature name='acpi'/><feature name='ds'/><feature name='vme'/></cccccpu>"
+
+ self.conn = libvirt.virConnect()
+ self.tmpDomain = testDomain()
+
+ def setMocks(self):
+
+ self.ctxt = context.get_admin_context()
+ # mocks for get_cpu_xml
+ self.manager._wrapped_conn = self.conn
+ self.manager._test_connection = Mock(return_value=True)
+ self.manager._conn.getCapabilities = Mock(return_value=self.xml)
+ # mocks for ensure_filtering_rules_for_instance
+ self.manager.nwfilter.setup_basic_filtering = Mock(return_value=None)
+ self.manager.firewall_driver.prepare_instance_filter = \
+ Mock(return_value=None)
+ #self.manager._conn.nwfilterLookupByName = Mock(return_value=None)
+ self.conn.nwfilterLookupByName = Mock(return_value=None)
+ # mocks for _live_migration
+ self.manager.read_only=True
+ self.manager._connect = Mock(return_value=self.conn)
+ self.conn.lookupByName = Mock(return_value=self.tmpDomain)
+ db.instance_set_state = Mock(return_value=True)
+ db.volume_get_all_by_instance = Mock(return_value=[])
+ #self.manager._conn = Mock(return_value=self.tmpDomain)
+
+ db.instance_get_fixed_address = Mock(return_value = '1.1.1.1')
+ db.fixed_ip_update = Mock(return_value = None)
+ db.fixed_ip_get_network = Mock(return_value = self.netref1)
+ db.network_update = Mock(return_value = None)
+ db.instance_get_floating_address = Mock(return_value = '1.1.1.200')
+ db.floating_ip_get_by_address = Mock(return_value = self.floating_ip1)
+ db.floating_ip_update = Mock(return_value = None)
+ db.instance_update = Mock(return_value = None)
+
+
+ # ---> test for nova.virt.get_cpu_xml()
+ def test01(self):
+ """01: getCapabilities raises libvirt.libvirtError. """
+
+ self.manager._conn.getCapabilities = \
+ Mock(side_effect=libvirt.libvirtError("ERR"))
+
+ self.assertRaises(libvirt.libvirtError, self.manager.get_cpu_xml)
+ return False
+
+ def test02(self):
+ """02: libxml2.parseDoc raises libxml2.parserError. """
+
+ tmp = libxml2.parseDoc
+ libxml2.parseDoc = Mock(side_effect=libxml2.parserError("ERR"))
+ try :
+ self.manager.get_cpu_xml()
+ except libxml2.parserError, e:
+ libxml2.parseDoc = tmp
+ return True
+ libxml2.parseDoc = tmp
+ return False
+
+ # no exception case assumed for xml.xpathEval, so no test case.
+ def test03(self):
+ """03: xml format is invalid no 2 cpu tag exists). """
+
+ self.manager._conn.getCapabilities = Mock(return_value=self.xml2)
+ try :
+ self.manager.get_cpu_xml()
+ except exception.Invalid, e:
+ c1 = ( 0 <= e.message.find('Unexpected xml format'))
+ self.assertTrue(c1, True)
+ return False
+
+ def test04(self):
+ """04: re.sub raises unexpected exceptioin. """
+
+ tmp = re.sub
+ re.sub = Mock(side_effect=TypeError("ERR"))
+ try :
+ self.manager.get_cpu_xml()
+ except TypeError, e:
+ re.sub = tmp
+ return True
+ re.sub = tmp
+ return False
+
+ def test05(self):
+ """05: everything goes well. """
+
+ ret = self.manager.get_cpu_xml()
+ self.assertTrue(type(ret), str)
+ return False
+
+ # ---> test for nova.virt.libvirt_conn.compare_cpu()
+
+ def test06(self):
+ """06: compareCPU raises libvirt.libvirtError. """
+
+ self.manager._conn.compareCPU = \
+ Mock(side_effect=libvirt.libvirtError("ERR"))
+
+ self.assertRaises(libvirt.libvirtError, self.manager.compare_cpu, '')
+ return False
+
+ def test07(self):
+ """07: compareCPU returns 0 """
+
+ self.manager._conn.compareCPU = Mock(return_value=0)
+ try :
+ self.manager.compare_cpu('')
+ except exception.Invalid, e:
+ c1 = ( 0 <= e.message.find('CPU does not have compativility'))
+ self.assertTrue(c1, True)
+ return False
+
+ def test08(self):
+ """08: compare_cpu finished successfully. """
+
+ self.conn.compareCPU = Mock(return_value=1)
+ ret = self.manager.compare_cpu('')
+ return ret == None
+
+ # ---> test for nova.virt.libvirt_conn.ensure_filtering_for_instance()
+
+ def test09(self):
+ """09: setup_basic_filtering raises unexpected exception. """
+
+ self.manager.nwfilter.setup_basic_filtering = \
+ Mock(side_effect=libvirt.libvirtError('ERR'))
+ self.assertRaises(libvirt.libvirtError,
+ self.manager.ensure_filtering_rules_for_instance,
+ self.instance1)
+ return False
+
+ def test10(self):
+ """10: prepare_instance_filter raises unexpected exception. """
+
+ self.manager.firewall_driver.prepare_instance_filter = \
+ Mock(side_effect=libvirt.libvirtError('ERR'))
+
+ self.assertRaises(libvirt.libvirtError,
+ self.manager.ensure_filtering_rules_for_instance,
+ self.instance1)
+ return False
+
+ def test11(self):
+ """11: nwfilterLookupByName raises libvirt.libvirtError. """
+
+ self.conn.nwfilterLookupByName = \
+ Mock(side_effect=libvirt.libvirtError('ERR'))
+
+ try :
+ self.manager.ensure_filtering_rules_for_instance(self.instance1)
+ except exception.Error, e:
+ c1 = ( 0 <= e.message.find('Timeout migrating for'))
+ self.assertTrue(c1, True)
+ return False
+
+ def test12(self):
+ """12: everything goes well. """
+
+ ret = self.manager.ensure_filtering_rules_for_instance(self.instance1)
+ return ret == None
+
+
+ # ---> test for nova.virt.libvirt_conn.live_migraiton()
+
+ def test13(self):
+ """13: self._connect raises libvirt.libvirtError. """
+
+ self.manager._connect = Mock(side_effect=libvirt.libvirtError('ERR'))
+ self.assertRaises(libvirt.libvirtError,
+ self.manager._live_migration,
+ self.ctxt,
+ self.instance1,
+ 'host2')
+
+ def test14(self):
+ """14: lookupByName raises libvirt.libvirtError. """
+
+ self.conn.lookupByName = Mock(side_effect=libvirt.libvirtError('ERR'))
+ self.assertRaises(libvirt.libvirtError,
+ self.manager._live_migration,
+ self.ctxt,
+ self.instance1,
+ 'host2')
+
+ def test15(self):
+ """15: migrateToURI raises libvirt.libvirtError. """
+
+ self.tmpDomain.migrateToURI = Mock(side_effect=libvirt.libvirtError('ERR'))
+ self.assertRaises(libvirt.libvirtError,
+ self.manager._live_migration,
+ self.ctxt,
+ self.instance1,
+ 'host2')
+
+ def test16(self):
+ """16: close raises libvirt.libvirtError. """
+
+ self.conn.close = Mock(side_effect=libvirt.libvirtError('ERR'))
+ self.assertRaises(libvirt.libvirtError,
+ self.manager._live_migration,
+ self.ctxt,
+ self.instance1,
+ 'host2')
+
+ def test17(self):
+ """17: lookupByName raises libvirt.libvirtError(using existing connectioin). """
+
+ self.manager.read_only = False
+ self.conn.lookupByName = Mock(side_effect=libvirt.libvirtError('ERR'))
+ self.assertRaises(libvirt.libvirtError,
+ self.manager._live_migration,
+ self.ctxt,
+ self.instance1,
+ 'host2')
+
+ def test18(self):
+ """18: migrateToURI raises libvirt.libvirtError(using existing connectioin). """
+
+ self.manager.read_only = False
+ self.tmpDomain.migrateToURI = Mock(side_effect=libvirt.libvirtError('ERR'))
+ self.assertRaises(libvirt.libvirtError,
+ self.manager._live_migration,
+ self.ctxt,
+ self.instance1,
+ 'host2')
+
+ def test19(self):
+ """19: migrateToURI raises libvirt.libvirtError(using existing connectioin), and instance_set_state raises unexpected exception(TypeError here)"""
+
+ self.manager.read_only = False
+ self.tmpDomain.migrateToURI = Mock(side_effect=libvirt.libvirtError('ERR'))
+ db.instance_set_state = Mock(side_effect=TypeError('ERR'))
+ self.assertRaises(TypeError,
+ self.manager._live_migration,
+ self.ctxt,
+ self.instance1,
+ 'host2')
+
+ def test20(self):
+ """20: migrateToURI raises libvirt.libvirtError(using existing connectioin), and volume_get_all_by_instance raises unexpected exception(TypeError here)"""
+
+ self.manager.read_only = False
+ self.tmpDomain.migrateToURI = Mock(side_effect=libvirt.libvirtError('ERR'))
+ db.volume_get_all_by_instance = Mock(side_effect=TypeError('ERR'))
+ self.assertRaises(TypeError,
+ self.manager._live_migration,
+ self.ctxt,
+ self.instance1,
+ 'host2')
+
+ def test21(self):
+ """21: migrateToURI raises libvirt.libvirtError(using existing connectioin), and volume_get_all_by_instance raises exception.NotFound"""
+
+ self.manager.read_only = False
+ self.tmpDomain.migrateToURI = Mock(side_effect=libvirt.libvirtError('ERR'))
+ db.volume_get_all_by_instance = Mock(side_effect=exception.NotFound('ERR'))
+ self.assertRaises(libvirt.libvirtError,
+ self.manager._live_migration,
+ self.ctxt,
+ self.instance1,
+ 'host2')
+
+ def test22(self):
+ """22: everything goes well"""
+
+ self.manager.read_only = False
+ ret = self.manager._live_migration(self.ctxt, self.instance1, 'host2')
+ return ret == None
+
+
+ # ---> test for nova.virt.libvirt_conn._post_live_migraiton
+
+ def test23(self):
+ """23: instance_ref is not nova.db.sqlalchemy.models.Instances"""
+
+ self.assertRaises(TypeError,
+ self.manager._post_live_migration,
+ self.ctxt,
+ "dummy string",
+ 'host2')
+
+ def test24(self):
+ """24: db.instance_get_fixed_address return None"""
+
+ db.instance_get_fixed_address = Mock( return_value=None )
+ ret = self.manager._post_live_migration(self.ctxt, self.instance1, 'host1')
+ c1 = (ret == None)
+ c2 = (0 <= sys.stderr.buffer.find('fixed_ip is not found'))
+ self.assertEqual(c1 and c2, True)
+
+ def test25(self):
+ """25: db.instance_get_fixed_address raises NotFound"""
+
+ db.instance_get_fixed_address = Mock( side_effect=exception.NotFound('ERR') )
+ self.assertRaises(exception.NotFound,
+ self.manager._post_live_migration,
+ self.ctxt,
+ self.instance1,
+ 'host2')
+
+ def test26(self):
+ """26: db.instance_get_fixed_address raises Unknown exception"""
+
+ db.instance_get_fixed_address = Mock( side_effect=TypeError('ERR') )
+ self.assertRaises(TypeError,
+ self.manager._post_live_migration,
+ self.ctxt,
+ self.instance1,
+ 'host1')
+
+ def test27(self):
+ """27: db.fixed_ip_update return NotFound. """
+
+ db.fixed_ip_update = Mock( side_effect=exception.NotFound('ERR') )
+ self.assertRaises(exception.NotFound,
+ self.manager._post_live_migration,
+ self.ctxt,
+ self.instance1,
+ 'host1')
+
+ def test28(self):
+ """28: db.fixed_ip_update return NotAuthorized. """
+ db.fixed_ip_update = Mock( side_effect=exception.NotAuthorized('ERR') )
+ self.assertRaises(exception.NotAuthorized,
+ self.manager._post_live_migration,
+ self.ctxt,
+ self.instance1,
+ 'host1')
+
+ def test29(self):
+ """10: db.fixed_ip_update return Unknown exception. """
+ db.fixed_ip_update = Mock( side_effect=TypeError('ERR') )
+ self.assertRaises(TypeError,
+ self.manager._post_live_migration,
+ self.ctxt,
+ self.instance1,
+ 'host1')
+
+ def test29(self):
+ """11: db.fixed_ip_get_network causes NotFound. """
+
+ db.fixed_ip_get_network = Mock( side_effect=exception.NotFound('ERR') )
+ self.assertRaises(exception.NotFound,
+ self.manager._post_live_migration,
+ self.ctxt,
+ self.instance1,
+ 'host1')
+
+ # not tested db.fixed_ip_get_network raises NotAuthorized
+ # because same test has been done at previous test.
+
+ def test30(self):
+ """30: db.fixed_ip_get_network causes Unknown exception. """
+
+ db.fixed_ip_get_network = Mock( side_effect=TypeError('ERR') )
+ self.assertRaises(TypeError,
+ self.manager._post_live_migration,
+ self.ctxt,
+ self.instance1,
+ 'host1')
+
+ def test31(self):
+ """13: db.network_update raises Unknown exception. """
+ db.network_update = Mock( side_effect=TypeError('ERR') )
+ self.assertRaises(TypeError,
+ self.manager._post_live_migration,
+ self.ctxt,
+ self.instance1,
+ 'host1')
+
+ def test31(self):
+ """14: db.instance_get_floating_address raises NotFound. """
+ db.instance_get_floating_address = Mock(side_effect=exception.NotFound("ERR"))
+ ret = self.manager._post_live_migration(self.ctxt, self.instance1, 'host1')
+ c1 = (ret == None)
+ c2 = (0 <= sys.stderr.buffer.find('doesnt have floating_ip'))
+ self.assertEqual(c1 and c2, True)
+
+
+ def test32(self):
+ """32: db.instance_get_floating_address returns None. """
+
+ db.instance_get_floating_address = Mock( return_value=None )
+ ret = self.manager._post_live_migration(self.ctxt, self.instance1, 'host1')
+ c1 = (ret == None)
+ c2 = (0 <= sys.stderr.buffer.find('floating_ip is not found'))
+ self.assertEqual(c1 and c2, True)
+
+ def test33(self):
+ """33: db.instance_get_floating_address raises NotFound. """
+
+ db.instance_get_floating_address = Mock(side_effect=exception.NotFound("ERR"))
+ ret = self.manager._post_live_migration(self.ctxt, self.instance1, 'host1')
+ c1 = (ret == None)
+ c2 = (0 <= sys.stderr.buffer.find('doesnt have floating_ip'))
+ self.assertEqual(c1 and c2, True)
+
+ def test34(self):
+ """34: db.instance_get_floating_address raises Unknown exception. """
+ db.instance_get_floating_address = Mock(side_effect=TypeError("ERR"))
+ ret = self.manager._post_live_migration(self.ctxt, self.instance1, 'host1')
+ c1 = (ret == None)
+ c2 = (0 <= sys.stderr.buffer.find('Live migration: Unexpected error'))
+ self.assertEqual(c1 and c2, True)
+
+
+ def test35(self):
+ """35: db.floating_ip_get_by_address raises NotFound """
+
+ db.floating_ip_get_by_address = Mock(side_effect=exception.NotFound("ERR"))
+ ret = self.manager._post_live_migration(self.ctxt, self.instance1, 'host1')
+ c1 = (ret == None)
+ c2 = (0 <= sys.stderr.buffer.find('doesnt have floating_ip'))
+ self.assertEqual(c1 and c2, True)
+
+ def test36(self):
+ """36: db.floating_ip_get_by_address raises Unknown exception. """
+ db.floating_ip_get_by_address = Mock(side_effect=TypeError("ERR"))
+ ret = self.manager._post_live_migration(self.ctxt, self.instance1, 'host1')
+ c1 = (ret == None)
+ c2 = (0 <= sys.stderr.buffer.find('Live migration: Unexpected error'))
+ self.assertEqual(c1 and c2, True)
+
+
+ def test37(self):
+ """37: db.floating_ip_update raises Unknown exception.
+ """
+ db.floating_ip_update = Mock(side_effect=TypeError("ERR"))
+ ret = self.manager._post_live_migration(self.ctxt, self.instance1, 'host1')
+ c1 = (ret == None)
+ c2 = (0 <= sys.stderr.buffer.find('Live migration: Unexpected error'))
+ self.assertEqual(c1 and c2, True)
+
+ def test38(self):
+ """38: db.instance_update raises unknown exception. """
+
+ db.instance_update = Mock(side_effect=TypeError("ERR"))
+ self.assertRaises(TypeError,
+ self.manager._post_live_migration,
+ self.ctxt,
+ self.instance1,
+ 'host1')
+
+ def tearDown(self):
+ """common terminating method. """
+ self.stderr.realFlush()
+ sys.stderr = self.stderrBak
+ #sys.stdout = self.stdoutBak
+
+if __name__ == '__main__':
+ logging.getLogger().setLevel(logging.DEBUG)
+ #unittest.main()
+
+ suite = unittest.TestLoader().loadTestsFromTestCase(LibvirtConnectionTestFunctions)
+ unittest.TextTestRunner(verbosity=2).run(suite)
+
+ #suite = unittest.TestSuite()
+ #suite.addTest(LibvirtConnectionTestFunctions("test14"))
+ #suite.addTest(LibvirtConnectionTestFunctions("test16"))
+ #unittest.TextTestRunner(verbosity=2).run(suite)
+
+
diff --git a/nova/livemigration_test/UT/nova-manage.test.py b/nova/livemigration_test/UT/nova-manage.test.py
new file mode 100644
index 000000000..0017cedfe
--- /dev/null
+++ b/nova/livemigration_test/UT/nova-manage.test.py
@@ -0,0 +1,321 @@
+#!/usr/bin/python
+# -*- coding: UTF-8 -*-
+
+NOVA_DIR='/opt/nova-2010.4'
+
+import sys
+import os
+import unittest
+import commands
+import re
+
+from mock import Mock
+
+# getting /nova-inst-dir
+NOVA_DIR = os.path.abspath(sys.argv[0])
+for i in range(4):
+ NOVA_DIR = os.path.dirname(NOVA_DIR)
+
+
+try :
+ print
+ print 'Testing %s/bin/nova-manage, set the NOVA_DIR properly..' % NOVA_DIR
+ print
+
+ sys.path.append(NOVA_DIR)
+
+ from nova import context
+ from nova import db
+ from nova import exception
+ from nova import flags
+ from nova import quota
+ from nova import utils
+ from nova.auth import manager
+ from nova.cloudpipe import pipelib
+ from nova import rpc
+ from nova.api.ec2 import cloud
+ from nova.compute import power_state
+
+ from nova.db.sqlalchemy.models import *
+
+
+except:
+ print 'set correct NOVA_DIR in this script. '
+ raise
+
+
+class tmpStdout:
+ def __init__(self):
+ self.buffer = ""
+ def write(self,arg):
+ self.buffer += arg
+ def flush(self):
+ self.buffer = ''
+
+class tmpStderr(tmpStdout):
+ def write(self, arg):
+ self.buffer += arg
+ def flush(self):
+ pass
+ def realFlush(self):
+ self.buffer = ''
+
+
+class NovaManageTestFunctions(unittest.TestCase):
+
+ stdout = None
+ stdoutBak = None
+ stderr = None
+ stderrBak = None
+
+ hostCmds = None
+
+ # 共通の初期化処理
+ def setUp(self):
+ """common init method. """
+
+ commands.getstatusoutput('cp -f %s/bin/nova-manage %s' % ( NOVA_DIR, self.getNovaManageCopyPath() ))
+ commands.getstatusoutput('touch %s' % self.getInitpyPath() )
+ try :
+ import bin.novamanagetest
+ except:
+ print 'Fail to import nova-manage . check bin/nova-manage exists'
+ raise
+
+ # replace stdout for checking nova-manage output
+ if self.stdout is None :
+ self.__class__.stdout = tmpStdout()
+ self.stdoutBak = sys.stdout
+ sys.stdout = self.stdout
+
+ # replace stderr for checking nova-manage output
+ if self.stderr is None:
+ self.__class__.stderr = tmpStderr()
+ self.stderrBak = sys.stderr
+ sys.stderr = self.stderr
+
+ # prepare test data
+ self.setTestData()
+
+ # only AoE is supported for now
+ FLAGS.volume_driver = 'nova.volume.driver.AOEDriver'
+
+
+ def setTestData(self):
+ import bin.novamanagetest
+
+ if self.hostCmds is None :
+ self.__class__.hostCmds = bin.novamanagetest.HostCommands()
+ self.instanceCmds = bin.novamanagetest.InstanceCommands()
+
+ self.host1 = Host()
+ self.host1.__setitem__('name', 'host1')
+
+ self.host2 = Host()
+ self.host2.__setitem__('name', 'host2')
+
+ self.instance1 = Instance()
+ self.instance1.__setitem__('id', 1)
+ self.instance1.__setitem__('host', 'host1')
+ self.instance1.__setitem__('hostname', 'i-12345')
+ self.instance1.__setitem__('state', power_state.NOSTATE)
+ self.instance1.__setitem__('state_description', 'running')
+
+ self.instance2 = Instance()
+ self.instance2.__setitem__('id', 2)
+ self.instance2.__setitem__('host', 'host1')
+ self.instance2.__setitem__('hostname', 'i-12345')
+ self.instance2.__setitem__('state', power_state.RUNNING)
+ self.instance2.__setitem__('state_description', 'pending')
+
+ self.instance3 = Instance()
+ self.instance3.__setitem__('id', 3)
+ self.instance3.__setitem__('host', 'host1')
+ self.instance3.__setitem__('hostname', 'i-12345')
+ self.instance3.__setitem__('state', power_state.RUNNING)
+ self.instance3.__setitem__('state_description', 'running')
+
+ db.host_get_all = Mock(return_value=[self.host1, self.host2])
+
+ def getInitpyPath(self):
+ return '%s/bin/__init__.py' % NOVA_DIR
+
+ def getNovaManageCopyPath(self):
+ return '%s/bin/novamanagetest.py' % NOVA_DIR
+
+ # -----> Test for nova-manage host list
+
+ def test01(self):
+ """01: Got some host lists. """
+
+ self.hostCmds.list()
+
+ c1 = (2 == self.stdout.buffer.count('\n'))
+ c2 = (0 <= self.stdout.buffer.find('host1'))
+ c3 = (0 <= self.stdout.buffer.find('host2'))
+ self.assertEqual(c1 and c2 and c3, True)
+
+ def test02(self):
+ """02: Got empty lsit. """
+
+ db.host_get_all = Mock(return_value=[])
+ self.hostCmds.list()
+
+ # result should be empty
+ c = (0 == len(self.stdout.buffer) )
+ self.assertEqual(c, True)
+
+ def test03(self):
+ """03: Got notFound """
+
+ db.host_get_all = Mock(side_effect=exception.NotFound("ERR"))
+ self.assertRaises(exception.NotFound, self.hostCmds.list)
+
+ # --------> Test For nova-manage host show
+
+ def test04(self):
+ """04: args are not enough(nova-manage host show) """
+ self.assertRaises(TypeError, self.hostCmds.show )
+
+
+ def test05(self):
+ """05: nova-manage host show not-registered-host, and got an error"""
+
+ rpc.call = Mock(return_value={'ret' : False, 'msg': 'ERR'} )
+ self.hostCmds.show('host1')
+ self.assertEqual( self.stdout.buffer[:3]=='ERR', True )
+
+
+ def test06(self):
+ """06: nova-manage host show registerd-host, and no project uses the host"""
+
+ dic = {'ret': True,
+ 'phy_resource': {'vcpus':1, 'memory_mb':2, 'local_gb':3},
+ 'usage': {}}
+
+ rpc.call = Mock(return_value=dic )
+ self.hostCmds.show('host1')
+
+ # result should be :
+ # HOST PROJECT cpu mem(mb) disk(gb)
+ # host1 1 2 3
+ line = self.stdout.buffer.split('\n')[1]
+ line = re.compile('\t+').sub(' ', line).strip()
+ c1 = ( 'host1 1 2 3' == line )
+ c2 = ( self.stdout.buffer.count('\n') == 2 )
+
+ self.assertEqual( c1 and c2, True )
+
+ def test07(self):
+ """07: nova-manage host show registerd-host,
+ and some projects use the host
+ """
+ dic = {'ret': True,
+ 'phy_resource': {'vcpus':1, 'memory_mb':2, 'local_gb':3},
+ 'usage': {'p1': {'vcpus':1, 'memory_mb':2, 'local_gb':3},
+ 'p2': {'vcpus':1, 'memory_mb':2, 'local_gb':3} }}
+
+ rpc.call = Mock(return_value=dic )
+ self.hostCmds.show('host1')
+
+ # result should be :
+ # HOST PROJECT cpu mem(mb) disk(gb)
+ # host1 1 2 3
+ # host1 p1 1 2 3
+ # host1 p2 4 5 6
+ line = self.stdout.buffer.split('\n')[1]
+ ret = re.compile('\t+').sub(' ', line).strip()
+ c1 = ( 'host1 1 2 3' == ret )
+
+ line = self.stdout.buffer.split('\n')[2]
+ line = re.compile('\t+').sub(' ', line).strip()
+ c2 = ( 'host1 p1 1 2 3' == line ) or ( 'host1 p2 1 2 3' == line )
+
+ line = self.stdout.buffer.split('\n')[3]
+ ret = re.compile('\t+').sub(' ', line).strip()
+ c3 = ( 'host1 p1 1 2 3' == ret ) or ( 'host1 p2 1 2 3' == ret )
+
+ self.assertEqual( c1 and c2 and c3, True )
+
+ def test08(self):
+ """08: nova-manage host show registerd-host, and rpc.call returns None
+ (unexpected error)
+ """
+ rpc.call = Mock(return_value=None )
+ self.hostCmds.show('host1')
+ c1 = ( 0 <= self.stdout.buffer.find('Unexpected error') )
+ self.assertEqual( c1, True )
+
+ # ----------> Test for bin/nova-manage instance live_migration
+
+ def test09(self):
+ """09: arguments are not enough(nova-manage instances live_migration)
+ """
+ self.assertRaises(TypeError, self.instanceCmds.live_migration )
+
+ def test10(self):
+ """10: arguments are not enough(nova-manage instances live_migration ec2_id)
+ """
+ self.assertRaises(TypeError, self.instanceCmds.live_migration, 'i-xxx' )
+
+ def test11(self):
+ """11: nova-manage instances live_migration ec2_id(invalid id) host"""
+
+ db.instance_get_by_internal_id = Mock( side_effect=exception.NotFound('ERR') )
+ try :
+ self.instanceCmds.live_migration('i-xxx', 'host1')
+ except exception.NotFound, e:
+ c1 = (0 < str(e.args).find('is not found') )
+ self.assertTrue(c1, True)
+ return False
+
+ def test12(self):
+ """12: nova-manage instances live_migration ec2_id host
+ and db.instance_get_by_internal_id raises unexpected exceptioin.
+ """
+ db.instance_get_by_internal_id = Mock( side_effect=TypeError('ERR') )
+ self.assertRaises(TypeError, self.instanceCmds.live_migration, 'i-xxx' )
+
+ def test13(self):
+ """13: nova-manage instances live_migration ec2_id host,
+ rpc.call raises RemoteError because destination doesnt have enough resource.
+ """
+ db.host_get_by_name = Mock(return_value = self.host1)
+ db.instance_get_by_internal_id = Mock( return_value = self.instance3 )
+ rpc.call = Mock(return_value = rpc.RemoteError(TypeError, 'val', 'traceback'))
+ try :
+ self.instanceCmds.live_migration('i-xxx', 'host2')
+ except exception.Error, e:
+ c1 = ( 0 < e.message.find('traceback'))
+ self.assertTrue(c1, True)
+ return True
+ return False
+
+ def test14(self):
+ """14: nova-manage instances live_migration ec2_id host,
+ everything goes well, ang gets success messages.
+ """
+ db.host_get_by_name = Mock(return_value = self.host1)
+ db.instance_get_by_internal_id = Mock( return_value = self.instance3 )
+ rpc.call = Mock(return_value = None)
+
+ self.instanceCmds.live_migration('i-12345', 'host2')
+ c1 = (0 <= self.stdout.buffer.find('Check its progress using euca-describe-instances') )
+ self.assertEqual( c1, True )
+
+
+ def tearDown(self):
+ """common terminating method. """
+ commands.getstatusoutput('rm -rf %s' % self.getInitpyPath() )
+ commands.getstatusoutput('rm -rf %s' % self.getNovaManageCopyPath() )
+ sys.stdout.flush()
+ sys.stdout = self.stdoutBak
+ self.stderr.realFlush()
+ sys.stderr = self.stderrBak
+
+if __name__ == '__main__':
+ #unittest.main()
+ suite = unittest.TestLoader().loadTestsFromTestCase(NovaManageTestFunctions)
+ unittest.TextTestRunner(verbosity=3).run(suite)
+
+
diff --git a/nova/livemigration_test/UT/schedulerManager.test.py b/nova/livemigration_test/UT/schedulerManager.test.py
new file mode 100644
index 000000000..db4f48bc1
--- /dev/null
+++ b/nova/livemigration_test/UT/schedulerManager.test.py
@@ -0,0 +1,458 @@
+#!/usr/bin/python
+# -*- coding: UTF-8 -*-
+
+
+import sys
+import os
+import unittest
+import commands
+import re
+import libvirt
+
+from mock import Mock
+
+# getting /nova-inst-dir
+NOVA_DIR = os.path.abspath(sys.argv[0])
+for i in range(4):
+ NOVA_DIR = os.path.dirname(NOVA_DIR)
+
+try :
+ print
+ print 'Checking %s/bin/nova-manage exists, set the NOVA_DIR properly..' % NOVA_DIR
+ print
+
+ sys.path.append(NOVA_DIR)
+
+ from nova.scheduler.manager import SchedulerManager
+
+ from nova import context
+ from nova import db
+ from nova import exception
+ from nova import flags
+ from nova import quota
+ from nova import utils
+ from nova.auth import manager
+ from nova.cloudpipe import pipelib
+ from nova import rpc
+ from nova.api.ec2 import cloud
+ from nova.compute import power_state
+
+ from nova.db.sqlalchemy.models import *
+
+except:
+ print 'set correct NOVA_DIR in this script. '
+ raise
+
+
+class tmpStdout:
+ def __init__(self):
+ self.buffer = ""
+ def write(self,arg):
+ self.buffer += arg
+ def flush(self):
+ self.buffer = ''
+
+
+class SchedulerTestFunctions(unittest.TestCase):
+
+ manager = None
+
+ # 共通の初期化処理
+ def setUp(self):
+ """common init method. """
+
+ self.host = 'openstack2-api'
+ if self.manager is None:
+ self.manager = SchedulerManager(host=self.host)
+
+ self.setTestData()
+ self.setMocks()
+
+ def setTestData(self):
+
+ self.host1 = Host()
+ self.host1.__setitem__('name', 'host1')
+ self.host1.__setitem__('vcpus', 5)
+ self.host1.__setitem__('memory_mb', 20480)
+ self.host1.__setitem__('local_gb', 876)
+ self.host1.__setitem__('cpu_info', 1)
+
+ self.host2 = Host()
+ self.host2.__setitem__('name', 'host2')
+ self.host2.__setitem__('vcpus', 5)
+ self.host2.__setitem__('memory_mb', 20480)
+ self.host2.__setitem__('local_gb', 876)
+ self.host2.__setitem__('hypervisor_type', 'QEMU')
+ self.host2.__setitem__('hypervisor_version', 12003)
+ xml="<cpu><arch>x86_64</arch><model>Nehalem</model><vendor>Intel</vendor><topology sockets='2' cores='4' threads='2'/><feature name='rdtscp'/><feature name='dca'/><feature name='xtpr'/><feature name='tm2'/><feature name='est'/><feature name='vmx'/><feature name='ds_cpl'/><feature name='monitor'/><feature name='pbe'/><feature name='tm'/><feature name='ht'/><feature name='ss'/><feature name='acpi'/><feature name='ds'/><feature name='vme'/></cpu>"
+ self.host2.__setitem__('cpu_info', xml)
+
+ self.instance1 = Instance()
+ for key, val in [ ('id', 1), ('host', 'host1'), ('hostname', 'i-12345'),
+ ('state', power_state.RUNNING), ('project_id', 'testPJ'),
+ ('vcpus', 3), ('memory_mb', 1024), ('local_gb', 5) ]:
+ self.instance1.__setitem__(key, val)
+
+
+ self.instance2 = Instance()
+ for key, val in [ ('id', 2), ('host', 'host1'), ('hostname', 'i-12345'),
+ ('state', power_state.RUNNING), ('project_id', 'testPJ'),
+ ('vcpus', 3), ('memory_mb', 1024), ('local_gb', 5) ]:
+ self.instance2.__setitem__(key, val)
+
+
+ self.instance3 = Instance()
+ for key, val in [ ('id', 3), ('host', 'host1'), ('hostname', 'i-12345'),
+ ('state', power_state.RUNNING), ('project_id', 'testPJ2'),
+ ('vcpus', 1), ('memory_mb', 1024), ('local_gb', 5),
+ ('internal_id', 123456), ('state', 1),
+ ('state_description', 'running') ]:
+ self.instance3.__setitem__(key, val)
+
+ self.instance4 = Instance()
+ for key, val in [ ('id', 4), ('host', 'host2'), ('hostname', 'i-12345'),
+ ('state', power_state.RUNNING), ('project_id', 'testPJ2'),
+ ('vcpus', 1), ('memory_mb', 1024), ('local_gb', 5),
+ ('internal_id', 123456), ('state', 0),
+ ('state_description', 'running') ]:
+ self.instance4.__setitem__(key, val)
+
+ self.instance5 = Instance()
+ for key, val in [ ('id', 5), ('host', 'host2'), ('hostname', 'i-12345'),
+ ('state', power_state.RUNNING), ('project_id', 'testPJ2'),
+ ('vcpus', 1), ('memory_mb', 1024), ('local_gb', 5),
+ ('internal_id', 123456), ('state', 1),
+ ('state_description', 'migrating') ]:
+ self.instance5.__setitem__(key, val)
+
+ self.instance6 = Instance()
+ for key, val in [ ('id', 6), ('host', 'host2'), ('hostname', 'i-12345'),
+ ('state', power_state.RUNNING), ('project_id', 'testPJ2'),
+ ('vcpus', 3), ('memory_mb', 1024), ('local_gb', 5) ]:
+ self.instance6.__setitem__(key, val)
+
+ self.instance7 = Instance()
+ for key, val in [ ('id', 7), ('host', 'host1'), ('hostname', 'i-12345'),
+ ('state', power_state.RUNNING), ('project_id', 'testPJ2'),
+ ('vcpus', 1), ('memory_mb', 18432), ('local_gb', 5) ]:
+ self.instance7.__setitem__(key, val)
+
+ self.instance8 = Instance()
+ for key, val in [ ('id', 8), ('host', 'host1'), ('hostname', 'i-12345'),
+ ('state', power_state.RUNNING),
+ ('state_description', 'running'),('project_id', 'testPJ2'),
+ ('vcpus', 1), ('memory_mb', 1024), ('local_gb', 866) ]:
+ self.instance8.__setitem__(key, val)
+
+ self.service1 = Service()
+ for key, val in [ ('id', 1), ('host', 'host1'), ('binary', 'nova-compute'),
+ ('topic', 'compute')]:
+ self.service1.__setitem__(key, val)
+
+ self.service2 = Service()
+ for key, val in [ ('id', 2), ('host', 'host2'), ('binary', 'nova-compute'),
+ ('topic', 'compute')]:
+ self.service1.__setitem__(key, val)
+
+ def setMocks(self):
+ self.ctxt = context.get_admin_context()
+ # Mocks for has_enough_resource()
+ db.instance_get = Mock(return_value = self.instance3)
+ db.host_get_by_name = Mock(return_value = self.host2)
+ db.instance_get_all_by_host = Mock(return_value = [self.instance4, self.instance5] )
+
+ # Mocks for live_migration
+ db.service_get_all_by_topic = Mock(return_value = [self.service1] )
+ self.manager.service_ip_up = Mock(return_value = True)
+ rpc.call = Mock(return_value=1)
+ db.instance_set_state = Mock(return_value = True)
+ self.manager.driver.service_is_up = Mock(return_value = True)
+
+ def check_format(self, val):
+ """check result format of show_host_resource """
+
+ if dict != type(val) :
+ sys.stderr.write('return value is not dict')
+ return False
+
+ if not val.has_key('ret'):
+ sys.stderr.write('invalid format(missing "ret"). ')
+ return False
+
+ if not val['ret'] :
+ if not val.has_key('msg') :
+ sys.stderr.write( 'invalid format(missing "msg").' )
+ return False
+
+ else :
+ if not val.has_key('phy_resource') :
+ sys.stderr.write('invalid format(missing "phy_resource"). ')
+ return False
+
+ if not val.has_key('usage'):
+ sys.stderr.write('invalid format(missing "usage"). ')
+ return False
+
+ if not self._check_format(val['phy_resource']):
+ return False
+
+ for key, dic in val['usage'].items() :
+ if not self._check_format(dic):
+ return False
+ return True
+
+ def _check_format(self, val):
+ if dict != type(val) :
+ sys.stderr.write('return value is not dict')
+ return False
+
+ for key in ['vcpus', 'memory_mb', 'local_gb']:
+ if not val.has_key(key) :
+ sys.stderr.write('invalid format(missing "%s"). ' % key )
+ return False
+
+ return True
+
+
+ # ---> test for nova.scheduler.manager.show_host_resource()
+
+ def test01(self):
+ """01: get NotFound exception when dest host not found on DB """
+
+ db.host_get_by_name = Mock( side_effect=exception.NotFound('ERR') )
+ result = self.manager.show_host_resource(self.ctxt, 'not-registered-host')
+ c1 = ( not result['ret'] )
+ c2 = ( 0 == result['msg'].find('No such') )
+ self.assertEqual(c1 and c2, True)
+
+ def test02(self):
+ """02: get other exception if unexpected err. """
+
+ db.host_get_by_name = Mock( side_effect=TypeError('ERR') )
+ self.assertRaises(TypeError, self.manager.show_host_resource, self.ctxt, 'host1' )
+
+ def test03(self):
+ """03: no instance found on dest host. """
+
+ db.host_get_by_name = Mock( return_value = self.host1 )
+ db.instance_get_all_by_host = Mock( return_value=[])
+ ret= self.manager.show_host_resource(self.ctxt, 'host1')
+
+ c1 = self.check_format(ret)
+ v = ret['phy_resource']
+ c2 = ( (5 == v['vcpus']) and (20480 == v['memory_mb']) and (876 == v['local_gb']))
+ c3 = ( 0 == len(ret['usage']) )
+
+ self.assertEqual(c1 and c2 and c3, True)
+
+ def test04(self):
+ """04: some instance found on dest host. """
+
+ db.host_get_by_name = Mock( return_value = self.host1 )
+ db.instance_get_all_by_host = Mock( return_value=[ self.instance1,
+ self.instance2,
+ self.instance3] )
+
+ db.instance_get_vcpu_sum_by_host_and_project = Mock(return_value=3)
+ db.instance_get_memory_sum_by_host_and_project = Mock(return_value=1024)
+ db.instance_get_disk_sum_by_host_and_project = Mock(return_value=5)
+
+ ret= self.manager.show_host_resource(self.ctxt, 'host1')
+
+ c1 = self.check_format(ret)
+ v = ret['phy_resource']
+ c2 = ( (5 == v['vcpus']) and (20480 == v['memory_mb']) and (876 == v['local_gb']))
+ c3 = ( 2 == len(ret['usage']) )
+ c4 = ( self.instance1['project_id'] in ret['usage'].keys())
+ c5 = ( self.instance3['project_id'] in ret['usage'].keys())
+
+ self.assertEqual(c1 and c2 and c3 and c4 and c5, True)
+
+
+ # ---> test for nova.scheduler.manager.has_enough_resource()
+ def test05(self):
+ """05: when cpu is exccded some instance found on dest host. """
+
+ db.instance_get = Mock(return_value = self.instance6)
+ try :
+ self.manager.driver.has_enough_resource(self.ctxt, 'i-12345', 'host1')
+ except exception.NotEmpty, e:
+ # dont do e.message.find(), because the below message is occured.
+ # DeprecationWarning: BaseException.message has been deprecated
+ # as of Python 2.6
+ c1 = ( 0 < str(e.args).find('doesnt have enough resource') )
+ self.assertTrue(c1, True)
+ return False
+
+
+ def test06(self):
+ """06: when memory is exccded some instance found on dest host. """
+
+ db.instance_get = Mock(return_value = self.instance7)
+ try :
+ self.manager.driver.has_enough_resource(self.ctxt, 'i-12345', 'host1')
+ except exception.NotEmpty, e:
+ c1 = ( 0 <= str(e.args).find('doesnt have enough resource') )
+ self.assertTrue(c1, True)
+ return False
+
+ def test07(self):
+ """07: when hdd is exccded some instance found on dest host. """
+
+ db.instance_get = Mock(return_value = self.instance8)
+ try :
+ self.manager.driver.has_enough_resource(self.ctxt, 'i-12345', 'host1')
+ except exception.NotEmpty, e:
+ c1 = ( 0 <= str(e.args).find('doesnt have enough resource') )
+ self.assertTrue(c1, True)
+ return False
+
+
+ def test08(self):
+ """08: everything goes well. (instance_get_all_by_host returns list)"""
+
+ ret= self.manager.driver.has_enough_resource(self.ctxt, 'i-12345', 'host1')
+ self.assertEqual(ret, None)
+
+
+ def test09(self):
+ """09: everything goes well(instance_get_all_by_host returns[]). """
+
+ db.instance_get_all_by_host = Mock(return_value = [] )
+ ret= self.manager.driver.has_enough_resource(self.ctxt, 'i-12345', 'host1')
+ self.assertEqual(ret, None)
+
+
+ # ---> test for nova.scheduler.manager.live_migration()
+
+
+ def test10(self):
+ """10: instance_get issues NotFound. """
+
+ db.instance_get = Mock(side_effect=exception.NotFound("ERR"))
+ self.assertRaises(exception.NotFound,
+ self.manager.driver.schedule_live_migration,
+ self.ctxt,
+ 'i-12345',
+ 'host1')
+
+ def test11(self):
+ """11: instance_get issues Unexpected error. """
+
+ db.instance_get = Mock(side_effect=TypeError("ERR"))
+ self.assertRaises(TypeError,
+ self.manager.driver.schedule_live_migration,
+ self.ctxt,
+ 'i-12345',
+ 'host1')
+
+ def test12(self):
+ """12: instance state is not power_state.RUNNING. """
+
+ db.instance_get = Mock(return_value=self.instance4)
+ try :
+ self.manager.driver.schedule_live_migration(self.ctxt, 'i-12345', 'host1')
+ except exception.Invalid, e:
+ c1 = (0 <= str(e.args).find('is not running'))
+ self.assertTrue(c1, True)
+ return False
+
+ def test13(self):
+ """13: instance state_description is not running. """
+
+ db.instance_get = Mock(return_value=self.instance5)
+ try :
+ self.manager.driver.schedule_live_migration(self.ctxt, 'i-12345', 'host1')
+ except exception.Invalid, e:
+ c1 = (0 <= str(e.args).find('is not running'))
+ self.assertTrue(c1, True)
+ return False
+
+ def test14(self):
+ """14: dest is not compute node.
+ (dest is not included in the result of db.service_get_all_by_topic)
+ """
+ try :
+ self.manager.driver.schedule_live_migration(self.ctxt, 'i-12345', 'host2')
+ except exception.Invalid, e:
+ c1 = (0 <= str(e.args).find('must be compute node'))
+ self.assertTrue(c1, True)
+ return False
+
+ def test15(self):
+ """ 15: dest is not alive.(service_is up returns False) """
+
+ self.manager.driver.service_is_up = Mock(return_value=False)
+ try :
+ self.manager.driver.schedule_live_migration(self.ctxt, 'i-12345', 'host2')
+ except exception.Invalid, e:
+ c1 = (0 <= str(e.args).find('is not alive'))
+ self.assertTrue(c1, True)
+ return False
+
+ # Cannot test the case of hypervisor type difference and hypervisor
+ # version difference, since we cannot set different mocks to same method..
+
+ def test16(self):
+ """ 16: stored "cpuinfo" is not string """
+
+ try :
+ self.manager.driver.schedule_live_migration(self.ctxt, 'i-12345', 'host2')
+ except exception.Invalid, e:
+ c1 = (0 <= str(e.args).find('Unexpected err') )
+ self.assertTrue(c1, True)
+ return False
+
+
+ def test17(self):
+ """17: rpc.call raises RemoteError(Unexpected error occurs when executing compareCPU) """
+ rpc.call = Mock(return_value = rpc.RemoteError(libvirt.libvirtError, 'val', 'traceback'))
+ try :
+ self.manager.driver.schedule_live_migration(self.ctxt, 'i-12345', 'host2')
+ except rpc.RemoteError, e:
+ c1 = (0 <= str(e.message).find('doesnt have compatibility to') )
+ self.assertTrue(c1, True)
+ return False
+
+
+ def test18(self):
+ """18: rpc.call returns 0 (cpu is not compatible between src and dest) """
+ rpc.call = Mock(return_value = 0)
+ try :
+ self.manager.driver.schedule_live_migration(self.ctxt, 'i-12345', 'host2')
+ except exception.Invalid, e:
+ c1 = ( 0 <= str(e.args).find('doesnt have compatibility to'))
+ self.assertTrue(c1, True)
+ return False
+
+ def test19(self):
+ """19: raise NotEmpty if host doesnt have enough resource. """
+
+ db.instance_get = Mock(return_value = self.instance8)
+ try :
+ self.manager.driver.schedule_live_migration(self.ctxt, 'i-12345', 'host2')
+ except exception.NotEmpty, e:
+ c1 = ( 0 <= str(e.args).find('doesnt have enough resource') )
+ self.assertTrue(c1, True)
+ return False
+
+
+ def test20(self):
+ """20: everything goes well. """
+
+ #db.instance_get = Mock(return_value = self.instance8)
+ ret= self.manager.driver.schedule_live_migration(self.ctxt, 'i-12345', 'host2')
+ self.assertEqual(ret, self.instance8['host'])
+
+
+ def tearDown(self):
+ """common terminating method. """
+ #sys.stdout = self.stdoutBak
+ pass
+
+if __name__ == '__main__':
+ #unittest.main()
+ suite = unittest.TestLoader().loadTestsFromTestCase(SchedulerTestFunctions)
+ unittest.TextTestRunner(verbosity=3).run(suite)
+
+
diff --git a/nova/livemigration_test/UT/testCase_UT.xls b/nova/livemigration_test/UT/testCase_UT.xls
new file mode 100644
index 000000000..804dfe2a8
--- /dev/null
+++ b/nova/livemigration_test/UT/testCase_UT.xls
Binary files differ
diff --git a/nova/network/api.py b/nova/network/api.py
index bf43acb51..09d20b57e 100644
--- a/nova/network/api.py
+++ b/nova/network/api.py
@@ -67,7 +67,6 @@ class API(base.Base):
floating_ip = self.db.floating_ip_get_by_address(context, floating_ip)
# NOTE(vish): Perhaps we should just pass this on to compute and
# let compute communicate with network.
- host = fixed_ip['network']['host']
rpc.cast(context,
self.db.queue_get_for(context, FLAGS.network_topic, host),
{"method": "associate_floating_ip",
diff --git a/nova/network/manager.py b/nova/network/manager.py
index c75ecc671..b7c349a90 100644
--- a/nova/network/manager.py
+++ b/nova/network/manager.py
@@ -155,7 +155,7 @@ class NetworkManager(manager.Manager):
"""Called when this host becomes the host for a network."""
raise NotImplementedError()
- def setup_compute_network(self, context, instance_id):
+ def setup_compute_network(self, context, instance_id, network_ref=None):
"""Sets up matching network for compute hosts."""
raise NotImplementedError()
@@ -316,7 +316,7 @@ class FlatManager(NetworkManager):
self.db.fixed_ip_update(context, address, {'allocated': False})
self.db.fixed_ip_disassociate(context.elevated(), address)
- def setup_compute_network(self, context, instance_id):
+ def setup_compute_network(self, context, instance_id, network_ref=None):
"""Network is created manually."""
pass
@@ -383,9 +383,10 @@ class FlatDHCPManager(FlatManager):
super(FlatDHCPManager, self).init_host()
self.driver.metadata_forward()
- def setup_compute_network(self, context, instance_id):
+ def setup_compute_network(self, context, instance_id, network_ref=None):
"""Sets up matching network for compute hosts."""
- network_ref = db.network_get_by_instance(context, instance_id)
+ if network_ref is None:
+ network_ref = db.network_get_by_instance(context, instance_id)
self.driver.ensure_bridge(network_ref['bridge'],
FLAGS.flat_interface)
@@ -475,9 +476,10 @@ class VlanManager(NetworkManager):
"""Returns a fixed ip to the pool."""
self.db.fixed_ip_update(context, address, {'allocated': False})
- def setup_compute_network(self, context, instance_id):
+ def setup_compute_network(self, context, instance_id, network_ref=None):
"""Sets up matching network for compute hosts."""
- network_ref = db.network_get_by_instance(context, instance_id)
+ if network_ref is None:
+ network_ref = db.network_get_by_instance(context, instance_id)
self.driver.ensure_vlan_bridge(network_ref['vlan'],
network_ref['bridge'])
diff --git a/nova/scheduler/driver.py b/nova/scheduler/driver.py
index 66e46c1b9..d44a3ae44 100644
--- a/nova/scheduler/driver.py
+++ b/nova/scheduler/driver.py
@@ -26,6 +26,9 @@ import datetime
from nova import db
from nova import exception
from nova import flags
+from nova import log as logging
+from nova import rpc
+from nova.compute import power_state
FLAGS = flags.FLAGS
flags.DEFINE_integer('service_down_time', 60,
@@ -64,3 +67,133 @@ class Scheduler(object):
def schedule(self, context, topic, *_args, **_kwargs):
"""Must override at least this method for scheduler to work."""
raise NotImplementedError(_("Must implement a fallback schedule"))
+
+ def schedule_live_migration(self, context, instance_id, dest):
+ """ live migration method """
+
+ # Whether instance exists and running
+ instance_ref = db.instance_get(context, instance_id)
+ ec2_id = instance_ref['hostname']
+
+ # Checking instance state.
+ if power_state.RUNNING != instance_ref['state'] or \
+ 'running' != instance_ref['state_description']:
+ msg = _('Instance(%s) is not running')
+ raise exception.Invalid(msg % ec2_id)
+
+ # Checking destination host exists
+ dhost_ref = db.host_get_by_name(context, dest)
+
+ # Checking whether The host where instance is running
+ # and dest is not same.
+ src = instance_ref['host']
+ if dest == src:
+ msg = _('%s is where %s is running now. choose other host.')
+ raise exception.Invalid(msg % (dest, ec2_id))
+
+ # Checking dest is compute node.
+ services = db.service_get_all_by_topic(context, 'compute')
+ if dest not in [service.host for service in services]:
+ msg = _('%s must be compute node')
+ raise exception.Invalid(msg % dest)
+
+ # Checking dest host is alive.
+ service = [service for service in services if service.host == dest]
+ service = service[0]
+ if not self.service_is_up(service):
+ msg = _('%s is not alive(time synchronize problem?)')
+ raise exception.Invalid(msg % dest)
+
+ # NOTE(masumotok): Below pre-checkings are followed by
+ # http://wiki.libvirt.org/page/TodoPreMigrationChecks
+
+ # Checking hypervisor is same.
+ orighost = instance_ref['launched_on']
+ ohost_ref = db.host_get_by_name(context, orighost)
+
+ otype = ohost_ref['hypervisor_type']
+ dtype = dhost_ref['hypervisor_type']
+ if otype != dtype:
+ msg = _('Different hypervisor type(%s->%s)')
+ raise exception.Invalid(msg % (otype, dtype))
+
+ # Checkng hypervisor version.
+ oversion = ohost_ref['hypervisor_version']
+ dversion = dhost_ref['hypervisor_version']
+ if oversion > dversion:
+ msg = _('Older hypervisor version(%s->%s)')
+ raise exception.Invalid(msg % (oversion, dversion))
+
+ # Checking cpuinfo.
+ cpuinfo = ohost_ref['cpu_info']
+ if str != type(cpuinfo):
+ msg = _('Unexpected err: not found cpu_info for %s on DB.hosts')
+ raise exception.Invalid(msg % orighost)
+
+ try :
+ rpc.call(context,
+ db.queue_get_for(context, FLAGS.compute_topic, dest),
+ {"method": 'compare_cpu',
+ "args": {'xml': cpuinfo}})
+
+ except rpc.RemoteError, e:
+ msg = '%s doesnt have compatibility to %s(where %s launching at)\n'
+ msg += 'result:%s \n'
+ logging.error( _(msg) % (dest, src, ec2_id, ret))
+ raise e
+
+ # Checking dst host still has enough capacities.
+ self.has_enough_resource(context, instance_id, dest)
+
+ # Changing instance_state.
+ db.instance_set_state(context,
+ instance_id,
+ power_state.PAUSED,
+ 'migrating')
+
+ # Changing volume state
+ try:
+ for vol in db.volume_get_all_by_instance(context, instance_id):
+ db.volume_update(context,
+ vol['id'],
+ {'status': 'migrating'})
+ except exception.NotFound:
+ pass
+
+ # Return value is necessary to send request to src
+ # Check _schedule() in detail.
+ return src
+
+ def has_enough_resource(self, context, instance_id, dest):
+ """ Check if destination host has enough resource for live migration"""
+
+ # Getting instance information
+ instance_ref = db.instance_get(context, instance_id)
+ ec2_id = instance_ref['hostname']
+ vcpus = instance_ref['vcpus']
+ mem = instance_ref['memory_mb']
+ hdd = instance_ref['local_gb']
+
+ # Gettin host information
+ host_ref = db.host_get_by_name(context, dest)
+ total_cpu = int(host_ref['vcpus'])
+ total_mem = int(host_ref['memory_mb'])
+ total_hdd = int(host_ref['local_gb'])
+
+ instances_ref = db.instance_get_all_by_host(context, dest)
+ for i_ref in instances_ref:
+ total_cpu -= int(i_ref['vcpus'])
+ total_mem -= int(i_ref['memory_mb'])
+ total_hdd -= int(i_ref['local_gb'])
+
+ # Checking host has enough information
+ logging.debug('host(%s) remains vcpu:%s mem:%s hdd:%s,' %
+ (dest, total_cpu, total_mem, total_hdd))
+ logging.debug('instance(%s) has vcpu:%s mem:%s hdd:%s,' %
+ (ec2_id, vcpus, mem, hdd))
+
+ if total_cpu <= vcpus or total_mem <= mem or total_hdd <= hdd:
+ msg = '%s doesnt have enough resource for %s' % (dest, ec2_id)
+ raise exception.NotEmpty(msg)
+
+ logging.debug(_('%s has enough resource for %s') % (dest, ec2_id))
diff --git a/nova/scheduler/manager.py b/nova/scheduler/manager.py
index a4d6dd574..308fcffa2 100644
--- a/nova/scheduler/manager.py
+++ b/nova/scheduler/manager.py
@@ -29,6 +29,7 @@ from nova import log as logging
from nova import manager
from nova import rpc
from nova import utils
+from nova import exception
LOG = logging.getLogger('nova.scheduler.manager')
FLAGS = flags.FLAGS
@@ -67,3 +68,48 @@ class SchedulerManager(manager.Manager):
{"method": method,
"args": kwargs})
LOG.debug(_("Casting to %s %s for %s"), topic, host, method)
+
+
+ # NOTE (masumotok) : This method should be moved to nova.api.ec2.admin.
+ # Based on bear design summit discussion,
+ # just put this here for bexar release.
+ def show_host_resource(self, context, host, *args):
+ """ show the physical/usage resource given by hosts."""
+
+ try:
+ host_ref = db.host_get_by_name(context, host)
+ except exception.NotFound:
+ return {'ret': False, 'msg': 'No such Host'}
+ except:
+ raise
+
+ # Getting physical resource information
+ h_resource = {'vcpus': host_ref['vcpus'],
+ 'memory_mb': host_ref['memory_mb'],
+ 'local_gb': host_ref['local_gb']}
+
+ # Getting usage resource information
+ u_resource = {}
+ instances_ref = db.instance_get_all_by_host(context, host_ref['name'])
+
+ if 0 == len(instances_ref):
+ return {'ret': True, 'phy_resource': h_resource, 'usage': {}}
+
+ project_ids = [i['project_id'] for i in instances_ref]
+ project_ids = list(set(project_ids))
+ for p_id in project_ids:
+ vcpus = db.instance_get_vcpu_sum_by_host_and_project(context,
+ host,
+ p_id)
+ mem = db.instance_get_memory_sum_by_host_and_project(context,
+ host,
+ p_id)
+ hdd = db.instance_get_disk_sum_by_host_and_project(context,
+ host,
+ p_id)
+ u_resource[p_id] = {'vcpus': vcpus,
+ 'memory_mb': mem,
+ 'local_gb': hdd}
+
+ return {'ret': True, 'phy_resource': h_resource, 'usage': u_resource}
+
diff --git a/nova/service.py b/nova/service.py
index 523c1a8d7..c360dcf80 100644
--- a/nova/service.py
+++ b/nova/service.py
@@ -80,6 +80,13 @@ class Service(object):
self.manager.init_host()
self.model_disconnected = False
ctxt = context.get_admin_context()
+
+ try:
+ host_ref = db.host_get_by_name(ctxt, self.host)
+ except exception.NotFound:
+ host_ref = db.host_create(ctxt, {'name': self.host})
+ host_ref = self._update_host_ref(ctxt, host_ref)
+
try:
service_ref = db.service_get_by_args(ctxt,
self.host,
@@ -120,6 +127,26 @@ class Service(object):
'report_count': 0})
self.service_id = service_ref['id']
+ def _update_host_ref(self, context, host_ref):
+
+ if 0 <= self.manager_class_name.find('ComputeManager'):
+ vcpu = self.manager.driver.get_vcpu_number()
+ memory_mb = self.manager.driver.get_memory_mb()
+ local_gb = self.manager.driver.get_local_gb()
+ hypervisor = self.manager.driver.get_hypervisor_type()
+ version = self.manager.driver.get_hypervisor_version()
+ cpu_xml = self.manager.driver.get_cpu_xml()
+
+ db.host_update(context,
+ host_ref['id'],
+ {'vcpus': vcpu,
+ 'memory_mb': memory_mb,
+ 'local_gb': local_gb,
+ 'hypervisor_type': hypervisor,
+ 'hypervisor_version': version,
+ 'cpu_info': cpu_xml})
+ return host_ref
+
def __getattr__(self, key):
manager = self.__dict__.get('manager', None)
return getattr(manager, key)
diff --git a/nova/virt/libvirt_conn.py b/nova/virt/libvirt_conn.py
index 3a4b6d469..55e1d8a76 100644
--- a/nova/virt/libvirt_conn.py
+++ b/nova/virt/libvirt_conn.py
@@ -38,6 +38,8 @@ Supports KVM, QEMU, UML, and XEN.
import os
import shutil
+import re
+import time
from eventlet import greenthread
from eventlet import event
@@ -83,6 +85,16 @@ flags.DEFINE_string('libvirt_uri',
'',
'Override the default libvirt URI (which is dependent'
' on libvirt_type)')
+flags.DEFINE_string('live_migration_uri',
+ "qemu+tcp://%s/system",
+ 'Define protocol used by live_migration feature')
+flags.DEFINE_string('live_migration_flag',
+ "VIR_MIGRATE_UNDEFINE_SOURCE, VIR_MIGRATE_PEER2PEER",
+ 'Define live migration behavior.')
+flags.DEFINE_integer('live_migration_bandwidth', 0,
+ 'Define live migration behavior')
+flags.DEFINE_string('live_migration_timeout_sec', 10,
+ 'Timeout second for pre_live_migration is completed.')
flags.DEFINE_bool('allow_project_net_traffic',
True,
'Whether to allow in project network traffic')
@@ -691,6 +703,43 @@ class LibvirtConnection(object):
return interfaces
+ def get_vcpu_number(self):
+ """ Get vcpu number of physical computer. """
+ return self._conn.getMaxVcpus(None)
+
+ def get_memory_mb(self):
+ """Get the memory size of physical computer ."""
+ meminfo = open('/proc/meminfo').read().split()
+ idx = meminfo.index('MemTotal:')
+ # transforming kb to mb.
+ return int(meminfo[idx + 1]) / 1024
+
+ def get_local_gb(self):
+ """Get the hdd size of physical computer ."""
+ hddinfo = os.statvfs(FLAGS.instances_path)
+ return hddinfo.f_bsize * hddinfo.f_blocks / 1024 / 1024 / 1024
+
+ def get_hypervisor_type(self):
+ """ Get hypervisor type """
+ return self._conn.getType()
+
+ def get_hypervisor_version(self):
+ """ Get hypervisor version """
+ return self._conn.getVersion()
+
+ def get_cpu_xml(self):
+ """ Get cpuinfo information """
+ xmlstr = self._conn.getCapabilities()
+ xml = libxml2.parseDoc(xmlstr)
+ nodes = xml.xpathEval('//cpu')
+ if len(nodes) != 1:
+ msg = 'Unexpected xml format. tag "cpu" must be 1, but %d.' \
+ % len(nodes)
+ msg += '\n' + xml.serialize()
+ raise exception.Invalid(_(msg))
+ cpuxmlstr = re.sub("\n|[ ]+", ' ', nodes[0].serialize())
+ return cpuxmlstr
+
def block_stats(self, instance_name, disk):
"""
Note that this function takes an instance name, not an Instance, so
@@ -714,6 +763,194 @@ class LibvirtConnection(object):
self.firewall_driver.refresh_security_group_members(security_group_id)
+ def compare_cpu(self, xml):
+ """
+ Check the host cpu is compatible to a cpu given by xml.
+ "xml" must be a part of libvirt.openReadonly().getCapabilities().
+ return values follows by virCPUCompareResult.
+ if 0 > return value, do live migration.
+
+ 'http://libvirt.org/html/libvirt-libvirt.html#virCPUCompareResult'
+ """
+
+ ret = self._conn.compareCPU(xml, 0)
+ if ret <= 0 :
+ url = 'http://libvirt.org/html/libvirt-libvirt.html'
+ url += '#virCPUCompareResult\n'
+ msg = 'CPU does not have compativility.\n'
+ msg += 'result:%d \n'
+ msg += 'Refer to %s'
+ msg = _(msg)
+ raise exception.Invalid(msg % (ret, url))
+ return
+
+ def ensure_filtering_rules_for_instance(self, instance_ref):
+ """ Setting up inevitable filtering rules on compute node,
+ and waiting for its completion.
+ To migrate an instance, filtering rules to hypervisors
+ and firewalls are inevitable on destination host.
+ ( Waiting only for filterling rules to hypervisor,
+ since filtering rules to firewall rules can be set faster).
+
+ Concretely, the below method must be called.
+ - setup_basic_filtering (for nova-basic, etc.)
+ - prepare_instance_filter(for nova-instance-instance-xxx, etc.)
+
+ to_xml may have to be called since it defines PROJNET, PROJMASK.
+ but libvirt migrates those value through migrateToURI(),
+ so , no need to be called.
+
+ Don't use thread for this method since migration should
+ not be started when setting-up filtering rules operations
+ are not completed."""
+
+ # Tf any instances never launch at destination host,
+ # basic-filtering must be set here.
+ self.nwfilter.setup_basic_filtering(instance_ref)
+ # setting up n)ova-instance-instance-xx mainly.
+ self.firewall_driver.prepare_instance_filter(instance_ref)
+
+ # wait for completion
+ timeout_count = range(FLAGS.live_migration_timeout_sec * 2)
+ while len(timeout_count) != 0:
+ try:
+ filter_name = 'nova-instance-%s' % instance_ref.name
+ self._conn.nwfilterLookupByName(filter_name)
+ break
+ except libvirt.libvirtError:
+ timeout_count.pop()
+ if len(timeout_count) == 0:
+ ec2_id = instance_ref['hostname']
+ msg = _('Timeout migrating for %s(%s)')
+ raise exception.Error(msg % (ec2_id, instance_ref.name))
+ time.sleep(0.5)
+
+ def live_migration(self, context, instance_ref, dest):
+ """
+ Just spawning live_migration operation for
+ distributing high-load.
+ """
+ greenthread.spawn(self._live_migration, context, instance_ref, dest)
+
+ def _live_migration(self, context, instance_ref, dest):
+ """ Do live migration."""
+
+ # Do live migration.
+ try:
+ duri = FLAGS.live_migration_uri % dest
+
+ flaglist = FLAGS.live_migration_flag.split(',')
+ flagvals = [ getattr(libvirt, x.strip()) for x in flaglist ]
+ logical_sum = reduce(lambda x,y: x|y, flagvals)
+
+ bandwidth = FLAGS.live_migration_bandwidth
+
+ if self.read_only:
+ tmpconn = self._connect(self.libvirt_uri, False)
+ dom = tmpconn.lookupByName(instance_ref.name)
+ dom.migrateToURI(duri, logical_sum, None, bandwidth)
+ tmpconn.close()
+ else :
+ dom = self._conn.lookupByName(instance_ref.name)
+ dom.migrateToURI(duri, logical_sum, None, bandwidth)
+
+ except Exception, e:
+ id = instance_ref['id']
+ db.instance_set_state(context, id, power_state.RUNNING, 'running')
+ try:
+ for volume in db.volume_get_all_by_instance(context, id):
+ db.volume_update(context,
+ volume['id'],
+ {'status': 'in-use'})
+ except exception.NotFound:
+ pass
+
+ raise e
+
+ # Waiting for completion of live_migration.
+ timer = utils.LoopingCall(f=None)
+
+ def wait_for_live_migration():
+
+ try:
+ state = self.get_info(instance_ref.name)['state']
+ except exception.NotFound:
+ timer.stop()
+ self._post_live_migration(context, instance_ref, dest)
+
+ timer.f = wait_for_live_migration
+ timer.start(interval=0.5, now=True)
+
+ def _post_live_migration(self, context, instance_ref, dest):
+ """
+ Post operations for live migration.
+ Mainly, database updating.
+ """
+ # Detaching volumes.
+ # (not necessary in current version )
+
+ # Releasing vlan.
+ # (not necessary in current implementation?)
+
+ # Releasing security group ingress rule.
+ if FLAGS.firewall_driver == \
+ 'nova.virt.libvirt_conn.IptablesFirewallDriver':
+ try :
+ self.firewall_driver.remove_instance(instance_ref)
+ except KeyError, e:
+ pass
+
+ # Database updating.
+ ec2_id = instance_ref['hostname']
+
+ instance_id = instance_ref['id']
+ fixed_ip = db.instance_get_fixed_address(context, instance_id)
+ # Not return if fixed_ip is not found, otherwise,
+ # instance never be accessible..
+ if None == fixed_ip:
+ logging.warn('fixed_ip is not found for %s ' % ec2_id)
+ db.fixed_ip_update(context, fixed_ip, {'host': dest})
+ network_ref = db.fixed_ip_get_network(context, fixed_ip)
+ db.network_update(context, network_ref['id'], {'host': dest})
+
+ try:
+ floating_ip \
+ = db.instance_get_floating_address(context, instance_id)
+ # Not return if floating_ip is not found, otherwise,
+ # instance never be accessible..
+ if None == floating_ip:
+ logging.error('floating_ip is not found for %s ' % ec2_id)
+ else:
+ floating_ip_ref = db.floating_ip_get_by_address(context,
+ floating_ip)
+ db.floating_ip_update(context,
+ floating_ip_ref['address'],
+ {'host': dest})
+ except exception.NotFound:
+ logging.debug('%s doesnt have floating_ip.. ' % ec2_id)
+ except:
+ msg = 'Live migration: Unexpected error:'
+ msg += '%s cannot inherit floating ip.. ' % ec2_id
+ logging.error(_(msg))
+
+ db.instance_update(context,
+ instance_id,
+ {'state_description': 'running',
+ 'state': power_state.RUNNING,
+ 'host': dest})
+
+ try:
+ for volume in db.volume_get_all_by_instance(context, instance_id):
+ db.volume_update(context,
+ volume['id'],
+ {'status': 'in-use'})
+ except exception.NotFound:
+ pass
+
+ logging.info(_('Live migrating %s to %s finishes successfully')
+ % (ec2_id, dest))
+
+
class FirewallDriver(object):
def prepare_instance_filter(self, instance):
"""Prepare filters for the instance.
@@ -832,7 +1069,6 @@ class NWFilterFirewall(FirewallDriver):
# anyway.
return
- logging.info('ensuring static filters')
self._ensure_static_filters()
instance_filter_name = self._instance_filter_name(instance)
diff --git a/nova/volume/driver.py b/nova/volume/driver.py
index 6bc925f3e..5e2f99577 100644
--- a/nova/volume/driver.py
+++ b/nova/volume/driver.py
@@ -120,7 +120,7 @@ class VolumeDriver(object):
"""Removes an export for a logical volume."""
raise NotImplementedError()
- def discover_volume(self, volume):
+ def discover_volume(self, context, volume):
"""Discover volume on a remote host."""
raise NotImplementedError()
@@ -182,15 +182,35 @@ class AOEDriver(VolumeDriver):
self._try_execute("sudo vblade-persist destroy %s %s" %
(shelf_id, blade_id))
- def discover_volume(self, _volume):
+ def discover_volume(self, context, volume):
"""Discover volume on a remote host."""
self._execute("sudo aoe-discover")
self._execute("sudo aoe-stat", check_exit_code=False)
+ shelf_id, blade_id = self.db.volume_get_shelf_and_blade(context,
+ volume['id'])
+ return "/dev/etherd/e%s.%s" % (shelf_id, blade_id)
def undiscover_volume(self, _volume):
"""Undiscover volume on a remote host."""
pass
+ def check_for_export(self, context, volume_id):
+ """Make sure whether volume is exported."""
+ (shelf_id,
+ blade_id) = self.db.volume_get_shelf_and_blade(context,
+ volume_id)
+ (out, _err) = self._execute("sudo vblade-persist ls --no-header")
+ exists = False
+ for line in out.split('\n'):
+ param = line.split(' ')
+ if len(param) == 6 and param[0] == str(shelf_id) \
+ and param[1] == str(blade_id) and param[-1] == "run":
+ exists = True
+ break
+ if not exists:
+ logging.warning(_("vblade process for e%s.%s isn't running.")
+ % (shelf_id, blade_id))
+
class FakeAOEDriver(AOEDriver):
"""Logs calls instead of executing."""
@@ -274,7 +294,7 @@ class ISCSIDriver(VolumeDriver):
iscsi_portal = location.split(",")[0]
return (iscsi_name, iscsi_portal)
- def discover_volume(self, volume):
+ def discover_volume(self, _context, volume):
"""Discover volume on a remote host."""
iscsi_name, iscsi_portal = self._get_name_and_portal(volume['name'],
volume['host'])
diff --git a/nova/volume/manager.py b/nova/volume/manager.py
index 6348539c5..b4754c607 100644
--- a/nova/volume/manager.py
+++ b/nova/volume/manager.py
@@ -138,7 +138,7 @@ class VolumeManager(manager.Manager):
if volume_ref['host'] == self.host and FLAGS.use_local_volumes:
path = self.driver.local_path(volume_ref)
else:
- path = self.driver.discover_volume(volume_ref)
+ path = self.driver.discover_volume(context, volume_ref)
return path
def remove_compute_volume(self, context, volume_id):
@@ -149,3 +149,13 @@ class VolumeManager(manager.Manager):
return True
else:
self.driver.undiscover_volume(volume_ref)
+
+ def check_for_export(self, context, instance_id):
+ """Make sure whether volume is exported."""
+ if FLAGS.volume_driver == 'nova.volume.driver.AOEDriver':
+ try:
+ for vol in self.db.volume_get_all_by_instance(context,
+ instance_id):
+ self.driver.check_for_export(context, vol['id'])
+ except exception.NotFound:
+ pass
diff --git a/setup.py b/setup.py
index 3608ff805..a20802e8b 100644
--- a/setup.py
+++ b/setup.py
@@ -34,6 +34,7 @@ if os.path.isdir('.bzr'):
version_file.write(vcsversion)
+
class local_BuildDoc(BuildDoc):
def run(self):
for builder in ['html', 'man']: