summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--.mailmap1
-rwxr-xr-xbin/nova-manage19
-rw-r--r--nova/compute/manager.py125
-rw-r--r--nova/db/api.py10
-rw-r--r--nova/db/sqlalchemy/api.py26
-rw-r--r--nova/db/sqlalchemy/migrate_repo/versions/002_bexar.py19
-rw-r--r--nova/db/sqlalchemy/migrate_repo/versions/003_cactus.py10
-rw-r--r--nova/network/linux_net.py68
-rw-r--r--nova/network/manager.py6
-rw-r--r--nova/rpc.py3
-rw-r--r--nova/scheduler/driver.py21
-rw-r--r--nova/scheduler/manager.py35
-rw-r--r--nova/tests/test_compute.py405
-rw-r--r--nova/tests/test_scheduler.py227
-rw-r--r--nova/tests/test_service.py16
-rw-r--r--nova/tests/test_virt.py427
-rw-r--r--nova/tests/test_volume.py131
-rw-r--r--nova/utils.py38
-rw-r--r--nova/virt/disk.py1
-rw-r--r--nova/virt/fake.py9
-rw-r--r--nova/virt/libvirt_conn.py105
-rw-r--r--nova/virt/xenapi_conn.py9
-rw-r--r--nova/volume/driver.py64
-rw-r--r--nova/volume/manager.py11
-rw-r--r--nova/volume/san.py335
25 files changed, 1341 insertions, 780 deletions
diff --git a/.mailmap b/.mailmap
index d13219ab0..c6f6c9a8b 100644
--- a/.mailmap
+++ b/.mailmap
@@ -33,3 +33,4 @@
<corywright@gmail.com> <cory.wright@rackspace.com>
<ant@openstack.org> <amesserl@rackspace.com>
<chiradeep@cloud.com> <chiradeep@chiradeep-lt2>
+<justin@fathomdb.com> <superstack@superstack.org>
diff --git a/bin/nova-manage b/bin/nova-manage
index 740fe4abc..7336a582b 100755
--- a/bin/nova-manage
+++ b/bin/nova-manage
@@ -513,12 +513,10 @@ class InstanceCommands(object):
msg = _('Only KVM is supported for now. Sorry!')
raise exception.Error(msg)
- if FLAGS.volume_driver != 'nova.volume.driver.AOEDriver':
- instance_ref = db.instance_get(ctxt, instance_id)
- if len(instance_ref['volumes']) != 0:
- msg = _(("""Volumes attached by ISCSIDriver"""
- """ are not supported. Sorry!"""))
- raise exception.Error(msg)
+ if FLAGS.volume_driver != 'nova.volume.driver.AOEDriver' and \
+ FLAGS.volume_driver != 'nova.volume.driver.ISCSIDriver':
+ msg = _("Support only AOEDriver and ISCSIDriver. Sorry!")
+ raise exception.Error(msg)
rpc.call(ctxt,
FLAGS.scheduler_topic,
@@ -616,12 +614,13 @@ class ServiceCommands(object):
if len(service_refs) <= 0:
raise exception.Invalid(_('%s does not exists.') % host)
- service_refs = [s for s in service_refs if s['topic'] == 'compute']
+ service_refs = [s for s in service_refs if s['topic'] == 'compute']
if len(service_refs) <= 0:
raise exception.Invalid(_('%s is not compute node.') % host)
-
- result = rpc.call(ctxt, db.queue_get_for(ctxt, FLAGS.compute_topic, host),
- {"method": "update_available_resource"})
+
+ result = rpc.call(ctxt,
+ db.queue_get_for(ctxt, FLAGS.compute_topic, host),
+ {"method": "update_available_resource"})
class LogCommands(object):
diff --git a/nova/compute/manager.py b/nova/compute/manager.py
index 50ef85b49..cae95dd93 100644
--- a/nova/compute/manager.py
+++ b/nova/compute/manager.py
@@ -183,7 +183,7 @@ class ComputeManager(manager.Manager):
context=context)
self.db.instance_update(context,
instance_id,
- {'host': self.host, 'launched_on':self.host})
+ {'host': self.host, 'launched_on': self.host})
self.db.instance_set_state(context,
instance_id,
@@ -569,20 +569,24 @@ class ComputeManager(manager.Manager):
self.db.volume_detached(context, volume_id)
return True
+ @exception.wrap_exception
def compare_cpu(self, context, cpu_info):
""" Check the host cpu is compatible to a cpu given by xml."""
return self.driver.compare_cpu(cpu_info)
+ @exception.wrap_exception
def mktmpfile(self, context):
"""make tmpfile under FLAGS.instance_path."""
return utils.mktmpfile(FLAGS.instances_path)
+ @exception.wrap_exception
def confirm_tmpfile(self, context, path):
"""Confirm existence of the tmpfile given by path."""
- if not utils.exists(path):
+ if not utils.exists(path):
raise exception.NotFound(_('%s not found') % path)
return utils.remove(path)
+ @exception.wrap_exception
def update_available_resource(self, context):
"""See comments update_resource_info"""
return self.driver.update_available_resource(context, self.host)
@@ -598,7 +602,7 @@ class ComputeManager(manager.Manager):
fixed_ip = self.db.instance_get_fixed_address(context, instance_id)
if not fixed_ip:
msg = _("%(instance_id)s(%(ec2_id)s) doesnt have fixed_ip")
- raise exception.NotFound(msg % locals() )
+ raise exception.NotFound(msg % locals())
# If any volume is mounted, prepare here.
if len(instance_ref['volumes']) == 0:
@@ -614,20 +618,19 @@ class ComputeManager(manager.Manager):
#
# Retry operation is necessary because continuously request comes,
# concorrent request occurs to iptables, then it complains.
- #
max_retry = FLAGS.live_migration_retry_count
for i in range(max_retry):
try:
- self.network_manager.setup_compute_network(context, instance_id)
+ self.network_manager.setup_compute_network(context,
+ instance_id)
break
except exception.ProcessExecutionError, e:
- if i == max_retry-1:
+ if i == max_retry - 1:
raise e
else:
- i_name = instance_ref.name
- m = _("""setup_compute_node fail %(i)d th. """
+ m = _("""setup_compute_network fail %(i)d th. """
"""retry up to %(max_retry)d """
- """(%(ec2_id)s == %(i_name)s""") % locals()
+ """for %(ec2_id)s""") % locals()
LOG.warn(m)
time.sleep(1)
@@ -639,12 +642,13 @@ class ComputeManager(manager.Manager):
# onto destination host.
self.driver.ensure_filtering_rules_for_instance(instance_ref)
+ #@exception.wrap_exception
def live_migration(self, context, instance_id, dest):
"""executes live migration."""
# Get instance for error handling.
instance_ref = self.db.instance_get(context, instance_id)
- ec2_id = instance_ref['hostname']
+ i_name = instance_ref.name
try:
# Checking volume node is working correctly when any volumes
@@ -656,33 +660,96 @@ class ComputeManager(manager.Manager):
"args": {'instance_id': instance_id}})
# Asking dest host to preparing live migration.
- compute_topic = self.db.queue_get_for(context,
- FLAGS.compute_topic,
- dest)
rpc.call(context,
- compute_topic,
+ self.db.queue_get_for(context, FLAGS.compute_topic, dest),
{"method": "pre_live_migration",
"args": {'instance_id': instance_id}})
except Exception, e:
- msg = _("Pre live migration for %(ec2_id)s failed at %(dest)s")
+ msg = _("Pre live migration for %(i_name)s failed at %(dest)s")
LOG.error(msg % locals())
- msg = _("instance %s: starting...")
- LOG.audit(msg, ec2_id, context=context)
- self.db.instance_set_state(context,
- instance_id,
- power_state.RUNNING,
- 'running')
-
- for v in instance_ref['volumes']:
- self.db.volume_update(context,
- v['id'],
- {'status': 'in-use'})
-
- # e should be raised. just calling "raise" may raise NotFound.
+ self.recover_live_migration(context, instance_ref)
raise e
# Executing live migration
# live_migration might raises exceptions, but
# nothing must be recovered in this version.
- self.driver.live_migration(context, instance_ref, dest)
+ self.driver.live_migration(context, instance_ref, dest,
+ self.post_live_migration,
+ self.recover_live_migration)
+
+ def post_live_migration(self, ctxt, instance_ref, dest):
+ """
+ Post operations for live migration.
+ Mainly, database updating.
+ """
+ LOG.info('post_live_migration() is started..')
+ instance_id = instance_ref['id']
+
+ # Detaching volumes.
+ try:
+ for vol in self.db.volume_get_all_by_instance(ctxt, instance_id):
+ self.volume_manager.remove_compute_volume(ctxt, vol['id'])
+ except exception.NotFound:
+ pass
+
+ # Releasing vlan.
+ # (not necessary in current implementation?)
+
+ # Releasing security group ingress rule.
+ self.driver.unfilter_instance(instance_ref)
+
+ # Database updating.
+ i_name = instance_ref.name
+ fixed_ip = self.db.instance_get_fixed_address(ctxt, instance_id)
+ # Not return if fixed_ip is not found, otherwise,
+ # instance never be accessible..
+ if None == fixed_ip:
+ logging.warn('fixed_ip is not found for %s ' % i_name)
+ self.db.fixed_ip_update(ctxt, fixed_ip, {'host': dest})
+
+ try:
+ # Not return if floating_ip is not found, otherwise,
+ # instance never be accessible..
+ floating_ip = self.db.instance_get_floating_address(ctxt,
+ instance_id)
+ if None == floating_ip:
+ LOG.info(_('floating_ip is not found for %s'), i_name)
+ else:
+ floating_ip_ref = self.db.floating_ip_get_by_address(ctxt,
+ floating_ip)
+ self.db.floating_ip_update(ctxt,
+ floating_ip_ref['address'],
+ {'host': dest})
+ except exception.NotFound:
+ LOG.info(_('floating_ip is not found for %s'), i_name)
+ except:
+ msg = _("""Live migration: Unexpected error:"""
+ """%s cannot inherit floating ip..""") % i_name
+ LOG.error(msg)
+
+ # Restore instance/volume state
+ self.recover_live_migration(ctxt, instance_ref, dest)
+
+ msg = _('Migrating %(i_name)s to %(dest)s finishes successfully')
+ LOG.info(msg % locals())
+ msg = _("""The below error is normally occurs."""
+ """Just check if instance is successfully migrated.\n"""
+ """libvir: QEMU error : Domain not found: no domain """
+ """with matching name..""")
+ LOG.info(msg)
+
+ def recover_live_migration(self, ctxt, instance_ref, host=None):
+ """instance/volume state is recovered from migrating -> running"""
+
+ if not host:
+ host = instance_ref['host']
+
+ self.db.instance_update(ctxt,
+ instance_ref['id'],
+ {'state_description': 'running',
+ 'state': power_state.RUNNING,
+ 'host': host})
+
+ for v in instance_ref['volumes']:
+ self.db.volume_update(ctxt, v['id'], {'status': 'in-use'})
diff --git a/nova/db/api.py b/nova/db/api.py
index 4296f7633..c92c486f0 100644
--- a/nova/db/api.py
+++ b/nova/db/api.py
@@ -252,6 +252,11 @@ def floating_ip_get_by_address(context, address):
return IMPL.floating_ip_get_by_address(context, address)
+def floating_ip_update(context, address, values):
+ """Update a floating ip by address or raise if it doesn't exist."""
+ return IMPL.floating_ip_update(context, address, values)
+
+
####################
@@ -727,6 +732,11 @@ def volume_get_all_by_host(context, host):
return IMPL.volume_get_all_by_host(context, host)
+def volume_get_all_by_instance(context, instance_id):
+ """Get all volumes belonging to a instance."""
+ return IMPL.volume_get_all_by_instance(context, instance_id)
+
+
def volume_get_all_by_project(context, project_id):
"""Get all volumes belonging to a project."""
return IMPL.volume_get_all_by_project(context, project_id)
diff --git a/nova/db/sqlalchemy/api.py b/nova/db/sqlalchemy/api.py
index 6cff7fa34..38251a46b 100644
--- a/nova/db/sqlalchemy/api.py
+++ b/nova/db/sqlalchemy/api.py
@@ -497,6 +497,16 @@ def floating_ip_get_by_address(context, address, session=None):
return result
+@require_context
+def floating_ip_update(context, address, values):
+ session = get_session()
+ with session.begin():
+ floating_ip_ref = floating_ip_get_by_address(context, address, session)
+ for (key, value) in values.iteritems():
+ floating_ip_ref[key] = value
+ floating_ip_ref.save(session=session)
+
+
###################
@@ -579,7 +589,7 @@ def fixed_ip_disassociate_all_by_timeout(_context, host, time):
'AND instance_id IS NOT NULL '
'AND allocated = 0',
{'host': host,
- 'time': time.isoformat()})
+ 'time': time})
return result.rowcount
@@ -906,6 +916,7 @@ def instance_get_memory_sum_by_host_and_project(context, hostname, proj_id):
return 0
return result
+
@require_context
def instance_get_disk_sum_by_host_and_project(context, hostname, proj_id):
session = get_session()
@@ -919,7 +930,6 @@ def instance_get_disk_sum_by_host_and_project(context, hostname, proj_id):
return result
-
@require_context
def instance_action_create(context, values):
"""Create an instance action from the values dictionary."""
@@ -1481,6 +1491,18 @@ def volume_get_all_by_host(context, host):
all()
+@require_admin_context
+def volume_get_all_by_instance(context, instance_id):
+ session = get_session()
+ result = session.query(models.Volume).\
+ filter_by(instance_id=instance_id).\
+ filter_by(deleted=False).\
+ all()
+ if not result:
+ raise exception.NotFound(_('No volume for instance %s') % instance_id)
+ return result
+
+
@require_context
def volume_get_all_by_project(context, project_id):
authorize_project_context(context, project_id)
diff --git a/nova/db/sqlalchemy/migrate_repo/versions/002_bexar.py b/nova/db/sqlalchemy/migrate_repo/versions/002_bexar.py
index b101c3c31..38210db85 100644
--- a/nova/db/sqlalchemy/migrate_repo/versions/002_bexar.py
+++ b/nova/db/sqlalchemy/migrate_repo/versions/002_bexar.py
@@ -204,25 +204,6 @@ services_availability_zone = Column(
String(length=255, convert_unicode=False, assert_unicode=None,
unicode_error=None, _warn_on_bytestring=False))
-#services_vcpus = Column('vcpus', Integer(), nullable=True)
-#services_memory_mb = Column('memory_mb', Integer(), nullable=True)
-#services_local_gb = Column('local_gb', Integer(), nullable=True)
-#services_vcpus_used = Column('vcpus_used', Integer(), nullable=True)
-#services_memory_mb_used = Column('memory_mb_used', Integer(), nullable=True)
-#services_local_gb_used = Column('local_gb_used', Integer(), nullable=True)
-#services_hypervisor_type = Column(
-# 'hypervisor_type',
-# Text(convert_unicode=False, assert_unicode=None,
-# unicode_error=None, _warn_on_bytestring=False),
-# nullable=True)
-#services_hypervisor_version = Column(
-# 'hypervisor_version',
-# Integer(), nullable=True)
-#services_cpu_info = Column(
-# 'cpu_info',
-# Text(convert_unicode=False, assert_unicode=None,
-# unicode_error=None, _warn_on_bytestring=False),
-# nullable=True)
def upgrade(migrate_engine):
# Upgrade operations go here. Don't create your own engine;
diff --git a/nova/db/sqlalchemy/migrate_repo/versions/003_cactus.py b/nova/db/sqlalchemy/migrate_repo/versions/003_cactus.py
index 6e0e51d85..7a18d8b34 100644
--- a/nova/db/sqlalchemy/migrate_repo/versions/003_cactus.py
+++ b/nova/db/sqlalchemy/migrate_repo/versions/003_cactus.py
@@ -41,7 +41,7 @@ services = Table('services', meta,
# Tables to alter
#
instances_launched_on = Column(
- 'launched_on',
+ 'launched_on',
Text(convert_unicode=False, assert_unicode=None,
unicode_error=None, _warn_on_bytestring=False),
nullable=True)
@@ -52,19 +52,20 @@ services_vcpus_used = Column('vcpus_used', Integer(), nullable=True)
services_memory_mb_used = Column('memory_mb_used', Integer(), nullable=True)
services_local_gb_used = Column('local_gb_used', Integer(), nullable=True)
services_hypervisor_type = Column(
- 'hypervisor_type',
+ 'hypervisor_type',
Text(convert_unicode=False, assert_unicode=None,
unicode_error=None, _warn_on_bytestring=False),
nullable=True)
services_hypervisor_version = Column(
- 'hypervisor_version',
+ 'hypervisor_version',
Integer(), nullable=True)
services_cpu_info = Column(
- 'cpu_info',
+ 'cpu_info',
Text(convert_unicode=False, assert_unicode=None,
unicode_error=None, _warn_on_bytestring=False),
nullable=True)
+
def upgrade(migrate_engine):
# Upgrade operations go here. Don't create your own engine;
# bind migrate_engine to your metadata
@@ -80,4 +81,3 @@ def upgrade(migrate_engine):
services.create_column(services_hypervisor_type)
services.create_column(services_hypervisor_version)
services.create_column(services_cpu_info)
-
diff --git a/nova/network/linux_net.py b/nova/network/linux_net.py
index b740d0423..df54606db 100644
--- a/nova/network/linux_net.py
+++ b/nova/network/linux_net.py
@@ -20,6 +20,7 @@ Implements vlans, bridges, and iptables rules using linux utilities.
import os
from nova import db
+from nova import exception
from nova import flags
from nova import log as logging
from nova import utils
@@ -185,27 +186,72 @@ def ensure_vlan(vlan_num):
def ensure_bridge(bridge, interface, net_attrs=None):
- """Create a bridge unless it already exists"""
+ """Create a bridge unless it already exists.
+
+ :param interface: the interface to create the bridge on.
+ :param net_attrs: dictionary with attributes used to create the bridge.
+
+ If net_attrs is set, it will add the net_attrs['gateway'] to the bridge
+ using net_attrs['broadcast'] and net_attrs['cidr']. It will also add
+ the ip_v6 address specified in net_attrs['cidr_v6'] if use_ipv6 is set.
+
+ The code will attempt to move any ips that already exist on the interface
+ onto the bridge and reset the default gateway if necessary.
+ """
if not _device_exists(bridge):
LOG.debug(_("Starting Bridge interface for %s"), interface)
_execute("sudo brctl addbr %s" % bridge)
_execute("sudo brctl setfd %s 0" % bridge)
# _execute("sudo brctl setageing %s 10" % bridge)
_execute("sudo brctl stp %s off" % bridge)
- if interface:
- _execute("sudo brctl addif %s %s" % (bridge, interface))
+ _execute("sudo ip link set %s up" % bridge)
if net_attrs:
- _execute("sudo ip addr add %s/%s dev %s broadcast %s" % \
- (net_attrs['gateway'],
- net_attrs['netmask'],
- bridge,
- net_attrs['broadcast']))
+ # NOTE(vish): The ip for dnsmasq has to be the first address on the
+ # bridge for it to respond to reqests properly
+ suffix = net_attrs['cidr'].rpartition('/')[2]
+ out, err = _execute("sudo ip addr add %s/%s brd %s dev %s" %
+ (net_attrs['gateway'],
+ suffix,
+ net_attrs['broadcast'],
+ bridge),
+ check_exit_code=False)
+ if err and err != "RTNETLINK answers: File exists\n":
+ raise exception.Error("Failed to add ip: %s" % err)
if(FLAGS.use_ipv6):
_execute("sudo ip -f inet6 addr change %s dev %s" %
(net_attrs['cidr_v6'], bridge))
- _execute("sudo ip link set %s up" % bridge)
- else:
- _execute("sudo ip link set %s up" % bridge)
+ # NOTE(vish): If the public interface is the same as the
+ # bridge, then the bridge has to be in promiscuous
+ # to forward packets properly.
+ if(FLAGS.public_interface == bridge):
+ _execute("sudo ip link set dev %s promisc on" % bridge)
+ if interface:
+ # NOTE(vish): This will break if there is already an ip on the
+ # interface, so we move any ips to the bridge
+ gateway = None
+ out, err = _execute("sudo route -n")
+ for line in out.split("\n"):
+ fields = line.split()
+ if fields and fields[0] == "0.0.0.0" and fields[-1] == interface:
+ gateway = fields[1]
+ out, err = _execute("sudo ip addr show dev %s scope global" %
+ interface)
+ for line in out.split("\n"):
+ fields = line.split()
+ if fields and fields[0] == "inet":
+ params = ' '.join(fields[1:-1])
+ _execute("sudo ip addr del %s dev %s" % (params, fields[-1]))
+ _execute("sudo ip addr add %s dev %s" % (params, bridge))
+ if gateway:
+ _execute("sudo route add 0.0.0.0 gw %s" % gateway)
+ out, err = _execute("sudo brctl addif %s %s" %
+ (bridge, interface),
+ check_exit_code=False)
+
+ if (err and err != "device %s is already a member of a bridge; can't "
+ "enslave it to bridge %s.\n" % (interface, bridge)):
+ raise exception.Error("Failed to add interface: %s" % err)
+
if FLAGS.use_nova_chains:
(out, err) = _execute("sudo iptables -N nova_forward",
check_exit_code=False)
diff --git a/nova/network/manager.py b/nova/network/manager.py
index fbcbea131..8eb9f041b 100644
--- a/nova/network/manager.py
+++ b/nova/network/manager.py
@@ -118,6 +118,10 @@ class NetworkManager(manager.Manager):
super(NetworkManager, self).__init__(*args, **kwargs)
def init_host(self):
+ """Do any initialization that needs to be run if this is a
+ standalone service.
+ """
+ self.driver.init_host()
# Set up networking for the projects for which we're already
# the designated network host.
ctxt = context.get_admin_context()
@@ -395,7 +399,6 @@ class FlatDHCPManager(FlatManager):
standalone service.
"""
super(FlatDHCPManager, self).init_host()
- self.driver.init_host()
self.driver.metadata_forward()
def setup_compute_network(self, context, instance_id):
@@ -465,7 +468,6 @@ class VlanManager(NetworkManager):
standalone service.
"""
super(VlanManager, self).init_host()
- self.driver.init_host()
self.driver.metadata_forward()
def allocate_fixed_ip(self, context, instance_id, *args, **kwargs):
diff --git a/nova/rpc.py b/nova/rpc.py
index 1ecb5d97c..01fc6d44b 100644
--- a/nova/rpc.py
+++ b/nova/rpc.py
@@ -251,9 +251,6 @@ def msg_reply(msg_id, reply=None, failure=None):
try:
publisher.send({'result': reply, 'failure': failure})
except TypeError:
- print '>>>>>>>>>>>>>>>>>>'
- print reply
- print '>>>>>>>>>>>>>>>>>>'
publisher.send(
{'result': dict((k, repr(v))
for k, v in reply.__dict__.iteritems()),
diff --git a/nova/scheduler/driver.py b/nova/scheduler/driver.py
index a2bfb9363..bbebf3275 100644
--- a/nova/scheduler/driver.py
+++ b/nova/scheduler/driver.py
@@ -206,8 +206,8 @@ class Scheduler(object):
except rpc.RemoteError, e:
ec2_id = instance_ref['hostname']
src = instance_ref['host']
- msg = _(("""%(dest)s doesnt have compatibility to %(src)s"""
- """(where %(ec2_id)s was launched at)"""))
+ msg = _("""%(dest)s doesnt have compatibility to %(src)s"""
+ """(where %(ec2_id)s was launched at)""")
logging.exception(msg % locals())
raise e
@@ -219,10 +219,8 @@ class Scheduler(object):
without any shared storage) will be available, local storage
checking is also necessary.
"""
-
# Getting instance information
ec2_id = instance_ref['hostname']
- mem = instance_ref['memory_mb']
# Getting host information
service_refs = db.service_get_all_by_host(context, dest)
@@ -233,10 +231,10 @@ class Scheduler(object):
mem_total = int(service_ref['memory_mb'])
mem_used = int(service_ref['memory_mb_used'])
mem_avail = mem_total - mem_used
- mem_inst = instance_ref['memory_mb']
+ mem_inst = instance_ref['memory_mb']
if mem_avail <= mem_inst:
msg = _("""%(ec2_id)s is not capable to migrate %(dest)s"""
- """(host:%(mem_avail)s <= instance:%(mem_inst)s)""")
+ """(host:%(mem_avail)s <= instance:%(mem_inst)s)""")
raise exception.NotEmpty(msg % locals())
def mounted_on_same_shared_storage(self, context, instance_ref, dest):
@@ -259,12 +257,11 @@ class Scheduler(object):
# make sure existence at src host.
try:
rpc.call(context, src_t,
- {"method": 'confirm_tmpfile', "args":{'path':filename}})
+ {"method": 'confirm_tmpfile', "args": {'path': filename}})
except (rpc.RemoteError, exception.NotFound), e:
- ipath = FLAGS.instance_path
- msg = (_("""Cannot comfirm %(ipath)s at %(dest)s to """
- """confirm shared storage."""
- """Check if %(ipath)s is same shared storage."""))
- logging.error(msg % locals())
+ ipath = FLAGS.instance_path
+ msg = _("""Cannot comfirm %(ipath)s at %(dest)s is located at"""
+ """ same shared storage.""") % locals()
+ logging.error(msg)
raise e
diff --git a/nova/scheduler/manager.py b/nova/scheduler/manager.py
index a292500bb..af54c72be 100644
--- a/nova/scheduler/manager.py
+++ b/nova/scheduler/manager.py
@@ -75,24 +75,23 @@ class SchedulerManager(manager.Manager):
def show_host_resource(self, context, host, *args):
"""show the physical/usage resource given by hosts."""
- computes = db.service_get_all_compute_sorted(context)
- computes = [s for s,v in computes if s['host'] == host]
- if 0 == len(computes):
+ compute_refs = db.service_get_all_compute_sorted(context)
+ compute_refs = [s for s, v in compute_refs if s['host'] == host]
+ if 0 == len(compute_refs):
return {'ret': False, 'msg': 'No such Host or not compute node.'}
- service_ref = computes[0]
# Getting physical resource information
- h_resource = {'vcpus': service_ref['vcpus'],
- 'memory_mb': service_ref['memory_mb'],
- 'local_gb': service_ref['local_gb'],
- 'vcpus_used': service_ref['vcpus_used'],
- 'memory_mb_used': service_ref['memory_mb_used'],
- 'local_gb_used': service_ref['local_gb_used']}
+ h_resource = {'vcpus': compute_refs[0]['vcpus'],
+ 'memory_mb': compute_refs[0]['memory_mb'],
+ 'local_gb': compute_refs[0]['local_gb'],
+ 'vcpus_used': compute_refs[0]['vcpus_used'],
+ 'memory_mb_used': compute_refs[0]['memory_mb_used'],
+ 'local_gb_used': compute_refs[0]['local_gb_used']}
# Getting usage resource information
u_resource = {}
instances_refs = db.instance_get_all_by_host(context,
- service_ref['host'])
+ compute_refs[0]['host'])
if 0 == len(instances_refs):
return {'ret': True,
@@ -101,18 +100,18 @@ class SchedulerManager(manager.Manager):
project_ids = [i['project_id'] for i in instances_refs]
project_ids = list(set(project_ids))
- for p_id in project_ids:
+ for i in project_ids:
vcpus = db.instance_get_vcpu_sum_by_host_and_project(context,
host,
- p_id)
+ i)
mem = db.instance_get_memory_sum_by_host_and_project(context,
host,
- p_id)
+ i)
hdd = db.instance_get_disk_sum_by_host_and_project(context,
host,
- p_id)
- u_resource[p_id] = {'vcpus': int(vcpus),
- 'memory_mb': int(mem),
- 'local_gb': int(hdd)}
+ i)
+ u_resource[i] = {'vcpus': int(vcpus),
+ 'memory_mb': int(mem),
+ 'local_gb': int(hdd)}
return {'ret': True, 'phy_resource': h_resource, 'usage': u_resource}
diff --git a/nova/tests/test_compute.py b/nova/tests/test_compute.py
index 3154fc5c2..2862d9650 100644
--- a/nova/tests/test_compute.py
+++ b/nova/tests/test_compute.py
@@ -82,6 +82,41 @@ class ComputeTestCase(test.TestCase):
'project_id': self.project.id}
return db.security_group_create(self.context, values)
+ def _get_dummy_instance(self):
+ """Get mock-return-value instance object
+ Use this when any testcase executed later than test_run_terminate
+ """
+ vol1 = models.Volume()
+ vol1.__setitem__('id', 1)
+ vol2 = models.Volume()
+ vol2.__setitem__('id', 2)
+ instance_ref = models.Instance()
+ instance_ref.__setitem__('id', 1)
+ instance_ref.__setitem__('volumes', [vol1, vol2])
+ instance_ref.__setitem__('hostname', 'i-00000001')
+ instance_ref.__setitem__('host', 'dummy')
+ return instance_ref
+
+ def test_create_instance_defaults_display_name(self):
+ """Verify that an instance cannot be created without a display_name."""
+ cases = [dict(), dict(display_name=None)]
+ for instance in cases:
+ ref = self.compute_api.create(self.context,
+ FLAGS.default_instance_type, None, **instance)
+ try:
+ self.assertNotEqual(ref[0]['display_name'], None)
+ finally:
+ db.instance_destroy(self.context, ref[0]['id'])
+
+ def test_create_instance_associates_security_groups(self):
+ """Make sure create associates security groups"""
+ group = self._create_group()
+ instance_ref = models.Instance()
+ instance_ref.__setitem__('id', 1)
+ instance_ref.__setitem__('volumes', [{'id': 1}, {'id': 2}])
+ instance_ref.__setitem__('hostname', 'i-00000001')
+ return instance_ref
+
def test_create_instance_defaults_display_name(self):
"""Verify that an instance cannot be created without a display_name."""
cases = [dict(), dict(display_name=None)]
@@ -270,13 +305,12 @@ class ComputeTestCase(test.TestCase):
self.network_manager = utils.import_object(FLAGS.network_manager)
self.compute_driver = utils.import_object(FLAGS.compute_driver)
- def test_pre_live_migration_instance_has_no_fixed_ip(self):
+ def test_pre_live_migration_instance_has_no_fixed_ip(self):
"""
if instances that are intended to be migrated doesnt have fixed_ip
(not happens usually), pre_live_migration has to raise Exception.
"""
- instance_ref={'id':1, 'volumes':[{'id':1}, {'id':2}],
- 'hostname':'i-000000001'}
+ instance_ref = self._get_dummy_instance()
c = context.get_admin_context()
i_id = instance_ref['id']
@@ -291,16 +325,14 @@ class ComputeTestCase(test.TestCase):
c, instance_ref['id'])
self.mox.ResetAll()
- def test_pre_live_migration_instance_has_volume(self):
- """if any volumes are attached to the instances that are
+ def test_pre_live_migration_instance_has_volume(self):
+ """if any volumes are attached to the instances that are
intended to be migrated, setup_compute_volume must be
called because aoe module should be inserted at destination
host. This testcase checks on it.
"""
- instance_ref={'id':1, 'volumes':[{'id':1}, {'id':2}],
- 'hostname':'i-000000001'}
+ i_ref = self._get_dummy_instance()
c = context.get_admin_context()
- i_id=instance_ref['id']
self._setup_other_managers()
dbmock = self.mox.CreateMock(db)
@@ -308,229 +340,318 @@ class ComputeTestCase(test.TestCase):
netmock = self.mox.CreateMock(self.network_manager)
drivermock = self.mox.CreateMock(self.compute_driver)
- dbmock.instance_get(c, i_id).AndReturn(instance_ref)
- dbmock.instance_get_fixed_address(c, i_id).AndReturn('dummy')
- for i in range(len(instance_ref['volumes'])):
- vid = instance_ref['volumes'][i]['id']
+ dbmock.instance_get(c, i_ref['id']).AndReturn(i_ref)
+ dbmock.instance_get_fixed_address(c, i_ref['id']).AndReturn('dummy')
+ for i in range(len(i_ref['volumes'])):
+ vid = i_ref['volumes'][i]['id']
volmock.setup_compute_volume(c, vid).InAnyOrder('g1')
- netmock.setup_compute_network(c, instance_ref['id'])
- drivermock.ensure_filtering_rules_for_instance(instance_ref)
-
+ netmock.setup_compute_network(c, i_ref['id'])
+ drivermock.ensure_filtering_rules_for_instance(i_ref)
+
self.compute.db = dbmock
self.compute.volume_manager = volmock
self.compute.network_manager = netmock
self.compute.driver = drivermock
self.mox.ReplayAll()
- ret = self.compute.pre_live_migration(c, i_id)
+ ret = self.compute.pre_live_migration(c, i_ref['id'])
self.assertEqual(ret, None)
self.mox.ResetAll()
-
- def test_pre_live_migration_instance_has_no_volume(self):
- """if any volumes are not attached to the instances that are
+
+ def test_pre_live_migration_instance_has_no_volume(self):
+ """if any volumes are not attached to the instances that are
intended to be migrated, log message should be appears
because administrator can proove instance conditions before
live_migration if any trouble occurs.
"""
- instance_ref={'id':1, 'volumes':[], 'hostname':'i-20000001'}
+ i_ref = self._get_dummy_instance()
+ i_ref.__setitem__('volumes', [])
c = context.get_admin_context()
- i_id = instance_ref['id']
self._setup_other_managers()
dbmock = self.mox.CreateMock(db)
netmock = self.mox.CreateMock(self.network_manager)
drivermock = self.mox.CreateMock(self.compute_driver)
-
- dbmock.instance_get(c, i_id).AndReturn(instance_ref)
- dbmock.instance_get_fixed_address(c, i_id).AndReturn('dummy')
+
+ dbmock.instance_get(c, i_ref['id']).AndReturn(i_ref)
+ dbmock.instance_get_fixed_address(c, i_ref['id']).AndReturn('dummy')
self.mox.StubOutWithMock(compute_manager.LOG, 'info')
- compute_manager.LOG.info(_("%s has no volume."), instance_ref['hostname'])
- netmock.setup_compute_network(c, i_id)
- drivermock.ensure_filtering_rules_for_instance(instance_ref)
-
+ compute_manager.LOG.info(_("%s has no volume."), i_ref['hostname'])
+ netmock.setup_compute_network(c, i_ref['id'])
+ drivermock.ensure_filtering_rules_for_instance(i_ref)
+
self.compute.db = dbmock
self.compute.network_manager = netmock
self.compute.driver = drivermock
self.mox.ReplayAll()
- ret = self.compute.pre_live_migration(c, i_id)
+ ret = self.compute.pre_live_migration(c, i_ref['id'])
self.assertEqual(ret, None)
self.mox.ResetAll()
- def test_pre_live_migration_setup_compute_node_fail(self):
+ def test_pre_live_migration_setup_compute_node_fail(self):
"""setup_compute_node sometimes fail since concurrent request
comes to iptables and iptables complains. Then this method
tries to retry, but raise exception in case of over
- max_retry_count. this method confirms raising exception.
+ max_retry_count. this method confirms raising exception.
"""
-
- instance_ref = models.Instance()
- instance_ref.__setitem__('id', 1)
- instance_ref.__setitem__('volumes', [])
- instance_ref.__setitem__('hostname', 'i-ec2id')
-
+ i_ref = self._get_dummy_instance()
c = context.get_admin_context()
- i_id = instance_ref['id']
self._setup_other_managers()
dbmock = self.mox.CreateMock(db)
netmock = self.mox.CreateMock(self.network_manager)
- drivermock = self.mox.CreateMock(self.compute_driver)
-
- dbmock.instance_get(c, i_id).AndReturn(instance_ref)
- dbmock.instance_get_fixed_address(c, i_id).AndReturn('dummy')
- self.mox.StubOutWithMock(compute_manager.LOG, 'info')
- compute_manager.LOG.info(_("%s has no volume."), instance_ref['hostname'])
+ volmock = self.mox.CreateMock(self.volume_manager)
- for i in range(FLAGS.live_migration_retry_count):
- netmock.setup_compute_network(c, i_id).\
+ dbmock.instance_get(c, i_ref['id']).AndReturn(i_ref)
+ dbmock.instance_get_fixed_address(c, i_ref['id']).AndReturn('dummy')
+ for i in range(len(i_ref['volumes'])):
+ volmock.setup_compute_volume(c, i_ref['volumes'][i]['id'])
+ for i in range(FLAGS.live_migration_retry_count):
+ netmock.setup_compute_network(c, i_ref['id']).\
AndRaise(exception.ProcessExecutionError())
self.compute.db = dbmock
self.compute.network_manager = netmock
- self.compute.driver = drivermock
+ self.compute.volume_manager = volmock
self.mox.ReplayAll()
- self.assertRaises(exception.ProcessExecutionError,
+ self.assertRaises(exception.ProcessExecutionError,
self.compute.pre_live_migration,
- c, i_id)
+ c, i_ref['id'])
self.mox.ResetAll()
- def test_live_migration_instance_has_volume(self):
+ def test_live_migration_instance_has_volume(self):
"""Any volumes are mounted by instances to be migrated are found,
vblade health must be checked before starting live-migration.
And that is checked by check_for_export().
- This testcase confirms check_for_export() is called.
+ This testcase confirms check_for_export() is called.
"""
- instance_ref={'id':1, 'volumes':[{'id':1}, {'id':2}], 'hostname':'i-00000001'}
+ i_ref = self._get_dummy_instance()
c = context.get_admin_context()
- dest='dummydest'
- i_id = instance_ref['id']
+ topic = db.queue_get_for(c, FLAGS.compute_topic, i_ref['host'])
- self._setup_other_managers()
dbmock = self.mox.CreateMock(db)
- drivermock = self.mox.CreateMock(self.compute_driver)
-
- dbmock.instance_get(c, instance_ref['id']).AndReturn(instance_ref)
+ dbmock.instance_get(c, i_ref['id']).AndReturn(i_ref)
self.mox.StubOutWithMock(rpc, 'call')
- rpc.call(c, FLAGS.volume_topic,
- {"method": "check_for_export",
- "args": {'instance_id': i_id}}).InAnyOrder('g1')
- rpc.call(c, db.queue_get_for(c, FLAGS.compute_topic, dest),
- {"method": "pre_live_migration",
- "args": {'instance_id': i_id}}).InAnyOrder('g1')
+ rpc.call(c, FLAGS.volume_topic, {"method": "check_for_export",
+ "args": {'instance_id': i_ref['id']}})
+ dbmock.queue_get_for(c, FLAGS.compute_topic, i_ref['host']).\
+ AndReturn(topic)
+ rpc.call(c, topic, {"method": "pre_live_migration",
+ "args": {'instance_id': i_ref['id']}})
+ self.mox.StubOutWithMock(self.compute.driver, 'live_migration')
+ self.compute.driver.live_migration(c, i_ref, i_ref['host'],
+ self.compute.post_live_migration,
+ self.compute.recover_live_migration)
self.compute.db = dbmock
- self.compute.driver = drivermock
self.mox.ReplayAll()
- ret = self.compute.live_migration(c, i_id, dest)
+ ret = self.compute.live_migration(c, i_ref['id'], i_ref['host'])
self.assertEqual(ret, None)
self.mox.ResetAll()
- def test_live_migration_instance_has_volume_and_exception(self):
- """In addition to test_live_migration_instance_has_volume testcase,
- this testcase confirms if any exception raises from check_for_export().
- Then, valid seaquence of this method should recovering instance/volumes
- status(ex. instance['state_description'] is changed from 'migrating'
- -> 'running', was changed by scheduler)
+ def test_live_migration_instance_has_volume_and_exception(self):
+ """In addition to test_live_migration_instance_has_volume testcase,
+ this testcase confirms if any exception raises from
+ check_for_export(). Then, valid seaquence of this method should
+ recovering instance/volumes status(ex. instance['state_description']
+ is changed from 'migrating' -> 'running', was changed by scheduler)
"""
- instance_ref={'id':1, 'volumes':[{'id':1}, {'id':2}],
- 'hostname':'i-000000001'}
- dest='dummydest'
+ i_ref = self._get_dummy_instance()
c = context.get_admin_context()
- i_id = instance_ref['id']
+ topic = db.queue_get_for(c, FLAGS.compute_topic, i_ref['host'])
- self._setup_other_managers()
dbmock = self.mox.CreateMock(db)
- drivermock = self.mox.CreateMock(self.compute_driver)
+ dbmock.instance_get(c, i_ref['id']).AndReturn(i_ref)
+ self.mox.StubOutWithMock(rpc, 'call')
+ rpc.call(c, FLAGS.volume_topic, {"method": "check_for_export",
+ "args": {'instance_id': i_ref['id']}})
+ dbmock.queue_get_for(c, FLAGS.compute_topic, i_ref['host']).\
+ AndReturn(topic)
+ rpc.call(c, topic, {"method": "pre_live_migration",
+ "args": {'instance_id': i_ref['id']}}).\
+ AndRaise(rpc.RemoteError('', '', ''))
+ dbmock.instance_update(c, i_ref['id'], {'state_description': 'running',
+ 'state': power_state.RUNNING,
+ 'host': i_ref['host']})
+ for v in i_ref['volumes']:
+ dbmock.volume_update(c, v['id'], {'status': 'in-use'})
+
+ self.compute.db = dbmock
+ self.mox.ReplayAll()
+ self.assertRaises(rpc.RemoteError,
+ self.compute.live_migration,
+ c, i_ref['id'], i_ref['host'])
+ self.mox.ResetAll()
- dbmock.instance_get(c, instance_ref['id']).AndReturn(instance_ref)
+ def test_live_migration_instance_has_no_volume_and_exception(self):
+ """Simpler than
+ test_live_migration_instance_has_volume_and_exception
+ """
+ i_ref = self._get_dummy_instance()
+ i_ref.__setitem__('volumes', [])
+ c = context.get_admin_context()
+ topic = db.queue_get_for(c, FLAGS.compute_topic, i_ref['host'])
+
+ dbmock = self.mox.CreateMock(db)
+ dbmock.instance_get(c, i_ref['id']).AndReturn(i_ref)
+ dbmock.queue_get_for(c, FLAGS.compute_topic, i_ref['host']).\
+ AndReturn(topic)
self.mox.StubOutWithMock(rpc, 'call')
- rpc.call(c, FLAGS.volume_topic,
- {"method": "check_for_export",
- "args": {'instance_id': i_id}}).InAnyOrder('g1')
- compute_topic = db.queue_get_for(c, FLAGS.compute_topic, dest)
- dbmock.queue_get_for(c, FLAGS.compute_topic, dest).AndReturn(compute_topic)
- rpc.call(c, db.queue_get_for(c, FLAGS.compute_topic, dest),
- {"method": "pre_live_migration",
- "args": {'instance_id': i_id}}).\
- InAnyOrder('g1').AndRaise(rpc.RemoteError('', '', ''))
- #self.mox.StubOutWithMock(compute_manager.LOG, 'error')
- #compute_manager.LOG.error('Pre live migration for %s failed at %s',
- # instance_ref['hostname'], dest)
- dbmock.instance_set_state(c, i_id, power_state.RUNNING, 'running')
- for i in range(len(instance_ref['volumes'])):
- vid = instance_ref['volumes'][i]['id']
- dbmock.volume_update(c, vid, {'status': 'in-use'})
+ rpc.call(c, topic, {"method": "pre_live_migration",
+ "args": {'instance_id': i_ref['id']}}).\
+ AndRaise(rpc.RemoteError('', '', ''))
+ dbmock.instance_update(c, i_ref['id'], {'state_description': 'running',
+ 'state': power_state.RUNNING,
+ 'host': i_ref['host']})
self.compute.db = dbmock
- self.compute.driver = drivermock
self.mox.ReplayAll()
- self.assertRaises(rpc.RemoteError,
+ self.assertRaises(rpc.RemoteError,
self.compute.live_migration,
- c, i_id, dest)
+ c, i_ref['id'], i_ref['host'])
+ self.mox.ResetAll()
+
+ def test_live_migration_instance_has_no_volume(self):
+ """Simpler than test_live_migration_instance_has_volume."""
+ i_ref = self._get_dummy_instance()
+ i_ref.__setitem__('volumes', [])
+ c = context.get_admin_context()
+ topic = db.queue_get_for(c, FLAGS.compute_topic, i_ref['host'])
+
+ dbmock = self.mox.CreateMock(db)
+ dbmock.instance_get(c, i_ref['id']).AndReturn(i_ref)
+ self.mox.StubOutWithMock(rpc, 'call')
+ dbmock.queue_get_for(c, FLAGS.compute_topic, i_ref['host']).\
+ AndReturn(topic)
+ rpc.call(c, topic, {"method": "pre_live_migration",
+ "args": {'instance_id': i_ref['id']}})
+ self.mox.StubOutWithMock(self.compute.driver, 'live_migration')
+ self.compute.driver.live_migration(c, i_ref, i_ref['host'],
+ self.compute.post_live_migration,
+ self.compute.recover_live_migration)
+
+ self.compute.db = dbmock
+ self.mox.ReplayAll()
+ ret = self.compute.live_migration(c, i_ref['id'], i_ref['host'])
+ self.assertEqual(ret, None)
self.mox.ResetAll()
- def test_live_migration_instance_has_no_volume_and_exception(self):
- """Simpler than test_live_migration_instance_has_volume_and_exception"""
+ def test_post_live_migration_working_correctly(self):
+ """post_live_migration works as expected correctly """
- instance_ref={'id':1, 'volumes':[], 'hostname':'i-000000001'}
- dest='dummydest'
+ i_ref = self._get_dummy_instance()
+ fixed_ip_ref = {'id': 1, 'address': '1.1.1.1'}
+ floating_ip_ref = {'id': 1, 'address': '2.2.2.2'}
c = context.get_admin_context()
- i_id = instance_ref['id']
- self._setup_other_managers()
dbmock = self.mox.CreateMock(db)
- drivermock = self.mox.CreateMock(self.compute_driver)
+ dbmock.volume_get_all_by_instance(c, i_ref['id']).\
+ AndReturn(i_ref['volumes'])
+ self.mox.StubOutWithMock(self.compute.volume_manager,
+ 'remove_compute_volume')
+ for v in i_ref['volumes']:
+ self.compute.volume_manager.remove_compute_volume(c, v['id'])
+ self.mox.StubOutWithMock(self.compute.driver, 'unfilter_instance')
+ self.compute.driver.unfilter_instance(i_ref)
+
+ fixed_ip = fixed_ip_ref['address']
+ dbmock.instance_get_fixed_address(c, i_ref['id']).AndReturn(fixed_ip)
+ dbmock.fixed_ip_update(c, fixed_ip, {'host': i_ref['host']})
+
+ fl_ip = floating_ip_ref['address']
+ dbmock.instance_get_floating_address(c, i_ref['id']).AndReturn(fl_ip)
+ dbmock.floating_ip_get_by_address(c, fl_ip).AndReturn(floating_ip_ref)
+ dbmock.floating_ip_update(c, floating_ip_ref['address'],
+ {'host': i_ref['host']})
+ dbmock.instance_update(c, i_ref['id'],
+ {'state_description': 'running',
+ 'state': power_state.RUNNING,
+ 'host': i_ref['host']})
+ for v in i_ref['volumes']:
+ dbmock.volume_update(c, v['id'], {'status': 'in-use'})
- dbmock.instance_get(c, instance_ref['id']).AndReturn(instance_ref)
- self.mox.StubOutWithMock(rpc, 'call')
- compute_topic = db.queue_get_for(c, FLAGS.compute_topic, dest)
- dbmock.queue_get_for(c, FLAGS.compute_topic, dest).AndReturn(compute_topic)
- rpc.call(c, compute_topic,
- {"method": "pre_live_migration",
- "args": {'instance_id': i_id}}).\
- AndRaise(rpc.RemoteError('', '', ''))
- #self.mox.StubOutWithMock(compute_manager.LOG, 'error')
- #compute_manager.LOG.error('Pre live migration for %s failed at %s',
- # instance_ref['hostname'], dest)
- dbmock.instance_set_state(c, i_id, power_state.RUNNING, 'running')
+ self.compute.db = dbmock
+ self.mox.ReplayAll()
+ ret = self.compute.post_live_migration(c, i_ref, i_ref['host'])
+ self.assertEqual(ret, None)
+ self.mox.ResetAll()
+
+ def test_post_live_migration_no_floating_ip(self):
+ """
+ post_live_migration works as expected correctly
+ (in case instance doesnt have floaitng ip)
+ """
+ i_ref = self._get_dummy_instance()
+ i_ref.__setitem__('volumes', [])
+ fixed_ip_ref = {'id': 1, 'address': '1.1.1.1'}
+ floating_ip_ref = {'id': 1, 'address': '1.1.1.1'}
+ c = context.get_admin_context()
+
+ dbmock = self.mox.CreateMock(db)
+ dbmock.volume_get_all_by_instance(c, i_ref['id']).AndReturn([])
+ self.mox.StubOutWithMock(self.compute.driver, 'unfilter_instance')
+ self.compute.driver.unfilter_instance(i_ref)
+
+ fixed_ip = fixed_ip_ref['address']
+ dbmock.instance_get_fixed_address(c, i_ref['id']).AndReturn(fixed_ip)
+ dbmock.fixed_ip_update(c, fixed_ip, {'host': i_ref['host']})
+
+ dbmock.instance_get_floating_address(c, i_ref['id']).AndReturn(None)
+ dbmock.instance_update(c, i_ref['id'],
+ {'state_description': 'running',
+ 'state': power_state.RUNNING,
+ 'host': i_ref['host']})
+ for v in i_ref['volumes']:
+ dbmock.volume_update(c, v['id'], {'status': 'in-use'})
self.compute.db = dbmock
- self.compute.driver = drivermock
self.mox.ReplayAll()
- self.assertRaises(rpc.RemoteError,
- self.compute.live_migration,
- c, i_id, dest)
+ ret = self.compute.post_live_migration(c, i_ref, i_ref['host'])
+ self.assertEqual(ret, None)
self.mox.ResetAll()
- def test_live_migration_instance_has_volume(self):
- """Simpler version than test_live_migration_instance_has_volume."""
- instance_ref={'id':1, 'volumes':[{'id':1}, {'id':2}],
- 'hostname':'i-000000001'}
+ def test_post_live_migration_no_floating_ip_with_exception(self):
+ """
+ post_live_migration works as expected correctly
+ (in case instance doesnt have floaitng ip, and raise exception)
+ """
+ i_ref = self._get_dummy_instance()
+ i_ref.__setitem__('volumes', [])
+ fixed_ip_ref = {'id': 1, 'address': '1.1.1.1'}
+ floating_ip_ref = {'id': 1, 'address': '1.1.1.1'}
c = context.get_admin_context()
- dest='dummydest'
- i_id = instance_ref['id']
- self._setup_other_managers()
dbmock = self.mox.CreateMock(db)
- drivermock = self.mox.CreateMock(self.compute_driver)
+ dbmock.volume_get_all_by_instance(c, i_ref['id']).AndReturn([])
+ self.mox.StubOutWithMock(self.compute.driver, 'unfilter_instance')
+ self.compute.driver.unfilter_instance(i_ref)
- dbmock.instance_get(c, i_id).AndReturn(instance_ref)
- self.mox.StubOutWithMock(rpc, 'call')
- rpc.call(c, FLAGS.volume_topic,
- {"method": "check_for_export",
- "args": {'instance_id': i_id}}).InAnyOrder('g1')
- compute_topic = db.queue_get_for(c, FLAGS.compute_topic, dest)
- dbmock.queue_get_for(c, FLAGS.compute_topic, dest).AndReturn(compute_topic)
- rpc.call(c, compute_topic,
- {"method": "pre_live_migration",
- "args": {'instance_id': i_id}}).InAnyOrder('g1')
- drivermock.live_migration(c, instance_ref, dest)
+ fixed_ip = fixed_ip_ref['address']
+ dbmock.instance_get_fixed_address(c, i_ref['id']).AndReturn(fixed_ip)
+ dbmock.fixed_ip_update(c, fixed_ip, {'host': i_ref['host']})
+ dbmock.instance_get_floating_address(c, i_ref['id']).\
+ AndRaise(exception.NotFound())
+
+ self.mox.StubOutWithMock(compute_manager.LOG, 'info')
+ compute_manager.LOG.info(_('post_live_migration() is started..'))
+ compute_manager.LOG.info(_('floating_ip is not found for %s'),
+ i_ref.name)
+ # first 2 messages are checked.
+ compute_manager.LOG.info(mox.IgnoreArg())
+ compute_manager.LOG.info(mox.IgnoreArg())
+
+ self.mox.StubOutWithMock(db, 'instance_update')
+ dbmock.instance_update(c, i_ref['id'], {'state_description': 'running',
+ 'state': power_state.RUNNING,
+ 'host': i_ref['host']})
+ self.mox.StubOutWithMock(db, 'volume_update')
+ for v in i_ref['volumes']:
+ dbmock.volume_update(c, v['id'], {'status': 'in-use'})
self.compute.db = dbmock
- self.compute.driver = drivermock
self.mox.ReplayAll()
- ret = self.compute.live_migration(c, i_id, dest)
+ ret = self.compute.post_live_migration(c, i_ref, i_ref['host'])
self.assertEqual(ret, None)
self.mox.ResetAll()
diff --git a/nova/tests/test_scheduler.py b/nova/tests/test_scheduler.py
index 708b427cc..e31e66c31 100644
--- a/nova/tests/test_scheduler.py
+++ b/nova/tests/test_scheduler.py
@@ -80,10 +80,10 @@ class SchedulerTestCase(test.TestCase):
'args': {'num': 7}})
self.mox.ReplayAll()
scheduler.named_method(ctxt, 'topic', num=7)
-
+
def test_show_host_resource_host_not_exit(self):
"""
- A testcase of driver.has_enough_resource
+ A testcase of driver.has_enough_resource
given host does not exists.
"""
scheduler = manager.SchedulerManager()
@@ -95,26 +95,26 @@ class SchedulerTestCase(test.TestCase):
AndReturn([])
self.mox.ReplayAll()
- result = scheduler.show_host_resource(ctxt, dest)
+ result = scheduler.show_host_resource(ctxt, dest)
# ret should be dict
keys = ['ret', 'msg']
c1 = list(set(result.keys())) == list(set(keys))
c2 = not result['ret']
c3 = result['msg'].find('No such Host or not compute node') <= 0
- self.assertTrue( c1 and c2 and c3)
+ self.assertTrue(c1 and c2 and c3)
self.mox.UnsetStubs()
def test_show_host_resource_no_project(self):
"""
- A testcase of driver.show_host_resource
+ A testcase of driver.show_host_resource
no instance stays on the given host
"""
scheduler = manager.SchedulerManager()
dest = 'dummydest'
ctxt = context.get_admin_context()
- r0 = {'vcpus':16, 'memory_mb':32, 'local_gb':100,
- 'vcpus_used':16, 'memory_mb_used':32, 'local_gb_used':10}
- service_ref = {'id':1, 'host':dest}
+ r0 = {'vcpus': 16, 'memory_mb': 32, 'local_gb': 100,
+ 'vcpus_used': 16, 'memory_mb_used': 32, 'local_gb_used': 10}
+ service_ref = {'id': 1, 'host': dest}
service_ref.update(r0)
self.mox.StubOutWithMock(manager, 'db', use_mock_anything=True)
@@ -122,35 +122,35 @@ class SchedulerTestCase(test.TestCase):
AndReturn([(service_ref, 0)])
manager.db.instance_get_all_by_host(mox.IgnoreArg(), dest).\
AndReturn([])
-
+
self.mox.ReplayAll()
- result = scheduler.show_host_resource(ctxt, dest)
+ result = scheduler.show_host_resource(ctxt, dest)
# ret should be dict
keys = ['ret', 'phy_resource', 'usage']
c1 = list(set(result.keys())) == list(set(keys))
c2 = result['ret']
c3 = result['phy_resource'] == r0
c4 = result['usage'] == {}
- self.assertTrue( c1 and c2 and c3 and c4)
+ self.assertTrue(c1 and c2 and c3 and c4)
self.mox.UnsetStubs()
def test_show_host_resource_works_correctly(self):
"""
- A testcase of driver.show_host_resource
+ A testcase of driver.show_host_resource
to make sure everything finished with no error.
"""
scheduler = manager.SchedulerManager()
dest = 'dummydest'
ctxt = context.get_admin_context()
- r0 = {'vcpus':16, 'memory_mb':32, 'local_gb':100,
- 'vcpus_used':16, 'memory_mb_used':32, 'local_gb_used':10}
- r1 = {'vcpus':10, 'memory_mb':4, 'local_gb':20}
- r2 = {'vcpus':10, 'memory_mb':20, 'local_gb':30}
- service_ref = {'id':1, 'host':dest}
+ r0 = {'vcpus': 16, 'memory_mb': 32, 'local_gb': 100,
+ 'vcpus_used': 16, 'memory_mb_used': 32, 'local_gb_used': 10}
+ r1 = {'vcpus': 10, 'memory_mb': 4, 'local_gb': 20}
+ r2 = {'vcpus': 10, 'memory_mb': 20, 'local_gb': 30}
+ service_ref = {'id': 1, 'host': dest}
service_ref.update(r0)
- instance_ref2 = {'id':2, 'project_id':'p-01', 'host':'dummy'}
+ instance_ref2 = {'id': 2, 'project_id': 'p-01', 'host': 'dummy'}
instance_ref2.update(r1)
- instance_ref3 = {'id':3, 'project_id':'p-02', 'host':'dummy'}
+ instance_ref3 = {'id': 3, 'project_id': 'p-02', 'host': 'dummy'}
instance_ref3.update(r2)
self.mox.StubOutWithMock(manager, 'db', use_mock_anything=True)
@@ -165,9 +165,9 @@ class SchedulerTestCase(test.TestCase):
ctxt, dest, p).AndReturn(r2['memory_mb'])
manager.db.instance_get_disk_sum_by_host_and_project(
ctxt, dest, p).AndReturn(r2['local_gb'])
-
+
self.mox.ReplayAll()
- result = scheduler.show_host_resource(ctxt, dest)
+ result = scheduler.show_host_resource(ctxt, dest)
# ret should be dict
keys = ['ret', 'phy_resource', 'usage']
c1 = list(set(result.keys())) == list(set(keys))
@@ -176,7 +176,7 @@ class SchedulerTestCase(test.TestCase):
c4 = result['usage'].keys() == ['p-01', 'p-02']
c5 = result['usage']['p-01'] == r2
c6 = result['usage']['p-02'] == r2
- self.assertTrue( c1 and c2 and c3 and c4 and c5 and c6)
+ self.assertTrue(c1 and c2 and c3 and c4 and c5 and c6)
self.mox.UnsetStubs()
@@ -498,8 +498,8 @@ class SimpleDriverTestCase(test.TestCase):
driver_i = self.scheduler.driver
ctxt = context.get_admin_context()
topic = FLAGS.compute_topic
- i_ref = {'id':1, 'hostname':'i-00000001', 'host':'dummy',
- 'volumes':[{'id':1}, {'id':2}]}
+ i_ref = {'id': 1, 'hostname': 'i-00000001', 'host': 'dummy',
+ 'volumes': [{'id': 1}, {'id': 2}]}
dest = 'dummydest'
self.mox.StubOutWithMock(driver, 'db', use_mock_anything=True)
@@ -518,10 +518,9 @@ class SimpleDriverTestCase(test.TestCase):
driver.db.volume_update(mox.IgnoreArg(), v['id'],
{'status': 'migrating'})
self.mox.StubOutWithMock(rpc, 'cast', use_mock_anything=True)
- kwargs={'instance_id':i_ref['id'], 'dest':dest}
+ kwargs = {'instance_id': i_ref['id'], 'dest': dest}
rpc.cast(ctxt, db.queue_get_for(ctxt, topic, i_ref['host']),
- {"method": 'live_migration',
- "args": kwargs})
+ {"method": 'live_migration', "args": kwargs})
self.mox.ReplayAll()
self.scheduler.live_migration(ctxt, topic,
@@ -538,7 +537,7 @@ class SimpleDriverTestCase(test.TestCase):
driver_i = self.scheduler.driver
ctxt = context.get_admin_context()
topic = FLAGS.compute_topic
- i_ref = {'id':1, 'hostname':'i-01', 'host':'dummy', 'volumes':[]}
+ i_ref = {'id': 1, 'hostname': 'i-01', 'host': 'dummy', 'volumes': []}
dest = 'dummydest'
self.mox.StubOutWithMock(driver, 'db', use_mock_anything=True)
@@ -553,10 +552,9 @@ class SimpleDriverTestCase(test.TestCase):
driver.db.instance_set_state(mox.IgnoreArg(), i_ref['id'],
power_state.PAUSED, 'migrating')
self.mox.StubOutWithMock(rpc, 'cast', use_mock_anything=True)
- kwargs={'instance_id':i_ref['id'], 'dest':dest}
+ kwargs = {'instance_id': i_ref['id'], 'dest': dest}
rpc.cast(ctxt, db.queue_get_for(ctxt, topic, i_ref['host']),
- {"method": 'live_migration',
- "args": kwargs})
+ {"method": 'live_migration', "args": kwargs})
self.mox.ReplayAll()
self.scheduler.live_migration(ctxt, topic,
@@ -565,15 +563,15 @@ class SimpleDriverTestCase(test.TestCase):
def test_live_migraiton_src_check_instance_not_running(self):
"""
- A testcase of driver._live_migration_src_check.
+ A testcase of driver._live_migration_src_check.
The instance given by instance_id is not running.
"""
ctxt = context.get_admin_context()
topic = FLAGS.compute_topic
dest = 'dummydest'
- i_ref = {'id':1, 'hostname':'i-01', 'host':'dummy',
- 'volumes':[], 'state_description':'migrating',
- 'state':power_state.RUNNING}
+ i_ref = {'id': 1, 'hostname': 'i-01', 'host': 'dummy',
+ 'volumes': [], 'state_description': 'migrating',
+ 'state': power_state.RUNNING}
self.mox.ReplayAll()
try:
@@ -584,21 +582,21 @@ class SimpleDriverTestCase(test.TestCase):
def test_live_migraiton_src_check_volume_node_not_alive(self):
"""
- A testcase of driver._live_migration_src_check.
- Volume node is not alive if any volumes are attached to
+ A testcase of driver._live_migration_src_check.
+ Volume node is not alive if any volumes are attached to
the given instance.
"""
dest = 'dummydest'
ctxt = context.get_admin_context()
topic = FLAGS.compute_topic
- i_ref = {'id':1, 'hostname':'i-01', 'host':'dummy',
- 'volumes':[{'id':1}, {'id':2}],
- 'state_description':'running', 'state':power_state.RUNNING}
+ i_ref = {'id': 1, 'hostname': 'i-01', 'host': 'dummy',
+ 'volumes': [{'id': 1}, {'id': 2}],
+ 'state_description': 'running', 'state': power_state.RUNNING}
self.mox.StubOutWithMock(driver, 'db', use_mock_anything=True)
driver.db.service_get_all_by_topic(mox.IgnoreArg(), 'volume').\
AndReturn([])
-
+
self.mox.ReplayAll()
try:
self.scheduler.driver._live_migration_src_check(ctxt, i_ref)
@@ -608,19 +606,19 @@ class SimpleDriverTestCase(test.TestCase):
def test_live_migraiton_src_check_volume_node_not_alive(self):
"""
- A testcase of driver._live_migration_src_check.
+ A testcase of driver._live_migration_src_check.
The testcase make sure src-compute node is alive.
"""
dest = 'dummydest'
ctxt = context.get_admin_context()
topic = FLAGS.compute_topic
- i_ref = {'id':1, 'hostname':'i-01', 'host':'dummy', 'volumes':[],
- 'state_description':'running', 'state':power_state.RUNNING}
+ i_ref = {'id': 1, 'hostname': 'i-01', 'host': 'dummy', 'volumes': [],
+ 'state_description': 'running', 'state': power_state.RUNNING}
self.mox.StubOutWithMock(driver, 'db', use_mock_anything=True)
driver.db.service_get_all_by_topic(mox.IgnoreArg(), 'compute').\
AndReturn([])
-
+
self.mox.ReplayAll()
try:
self.scheduler.driver._live_migration_src_check(ctxt, i_ref)
@@ -630,15 +628,15 @@ class SimpleDriverTestCase(test.TestCase):
def test_live_migraiton_src_check_works_correctly(self):
"""
- A testcase of driver._live_migration_src_check.
+ A testcase of driver._live_migration_src_check.
The testcase make sure everything finished with no error.
"""
driver_i = self.scheduler.driver
dest = 'dummydest'
ctxt = context.get_admin_context()
topic = FLAGS.compute_topic
- i_ref = {'id':1, 'hostname':'i-01', 'host':'dummy', 'volumes':[],
- 'state_description':'running', 'state':power_state.RUNNING}
+ i_ref = {'id': 1, 'hostname': 'i-01', 'host': 'dummy', 'volumes': [],
+ 'state_description': 'running', 'state': power_state.RUNNING}
service_ref = models.Service()
service_ref.__setitem__('id', 1)
service_ref.__setitem__('host', i_ref['host'])
@@ -648,7 +646,7 @@ class SimpleDriverTestCase(test.TestCase):
AndReturn([service_ref])
self.mox.StubOutWithMock(driver_i, 'service_is_up')
driver_i.service_is_up(service_ref).AndReturn(True)
-
+
self.mox.ReplayAll()
ret = driver_i._live_migration_src_check(ctxt, i_ref)
self.assertTrue(ret == None)
@@ -656,14 +654,14 @@ class SimpleDriverTestCase(test.TestCase):
def test_live_migraiton_dest_check_service_not_exists(self):
"""
- A testcase of driver._live_migration_dst_check.
+ A testcase of driver._live_migration_dst_check.
Destination host does not exist.
"""
driver_i = self.scheduler.driver
dest = 'dummydest'
ctxt = context.get_admin_context()
topic = FLAGS.compute_topic
- i_ref = {'id':1, 'hostname':'i-01', 'host':'dummy'}
+ i_ref = {'id': 1, 'hostname': 'i-01', 'host': 'dummy'}
service_ref = models.Service()
service_ref.__setitem__('id', 1)
service_ref.__setitem__('host', i_ref['host'])
@@ -671,7 +669,7 @@ class SimpleDriverTestCase(test.TestCase):
self.mox.StubOutWithMock(driver, 'db', use_mock_anything=True)
driver.db.service_get_all_by_host(mox.IgnoreArg(), dest).\
AndReturn([])
-
+
self.mox.ReplayAll()
try:
driver_i._live_migration_dest_check(ctxt, i_ref, dest)
@@ -681,14 +679,14 @@ class SimpleDriverTestCase(test.TestCase):
def test_live_migraiton_dest_check_service_isnot_compute(self):
"""
- A testcase of driver._live_migration_dst_check.
+ A testcase of driver._live_migration_dst_check.
Destination host does not provide compute.
"""
driver_i = self.scheduler.driver
dest = 'dummydest'
ctxt = context.get_admin_context()
topic = FLAGS.compute_topic
- i_ref = {'id':1, 'hostname':'i-01', 'host':'dummy'}
+ i_ref = {'id': 1, 'hostname': 'i-01', 'host': 'dummy'}
service_ref = models.Service()
service_ref.__setitem__('id', 1)
service_ref.__setitem__('host', i_ref['host'])
@@ -697,7 +695,7 @@ class SimpleDriverTestCase(test.TestCase):
self.mox.StubOutWithMock(driver, 'db', use_mock_anything=True)
driver.db.service_get_all_by_host(mox.IgnoreArg(), dest).\
AndReturn([service_ref])
-
+
self.mox.ReplayAll()
try:
driver_i._live_migration_dest_check(ctxt, i_ref, dest)
@@ -707,13 +705,13 @@ class SimpleDriverTestCase(test.TestCase):
def test_live_migraiton_dest_check_service_not_alive(self):
"""
- A testcase of driver._live_migration_dst_check.
+ A testcase of driver._live_migration_dst_check.
Destination host compute service is not alive.
"""
dest = 'dummydest'
ctxt = context.get_admin_context()
topic = FLAGS.compute_topic
- i_ref = {'id':1, 'hostname':'i-01', 'host':'dummy'}
+ i_ref = {'id': 1, 'hostname': 'i-01', 'host': 'dummy'}
service_ref = models.Service()
service_ref.__setitem__('id', 1)
service_ref.__setitem__('host', i_ref['host'])
@@ -724,7 +722,7 @@ class SimpleDriverTestCase(test.TestCase):
AndReturn([service_ref])
self.mox.StubOutWithMock(self.scheduler.driver, 'service_is_up')
self.scheduler.driver.service_is_up(service_ref).AndReturn(False)
-
+
self.mox.ReplayAll()
try:
self.scheduler.driver._live_migration_dest_check(ctxt, i_ref, dest)
@@ -734,13 +732,13 @@ class SimpleDriverTestCase(test.TestCase):
def test_live_migraiton_dest_check_service_same_host(self):
"""
- A testcase of driver._live_migration_dst_check.
+ A testcase of driver._live_migration_dst_check.
Destination host is same as src host.
"""
dest = 'dummydest'
ctxt = context.get_admin_context()
topic = FLAGS.compute_topic
- i_ref = {'id':1, 'hostname':'i-01', 'host':'dummydest'}
+ i_ref = {'id': 1, 'hostname': 'i-01', 'host': 'dummydest'}
service_ref = models.Service()
service_ref.__setitem__('id', 1)
service_ref.__setitem__('host', i_ref['host'])
@@ -751,23 +749,24 @@ class SimpleDriverTestCase(test.TestCase):
AndReturn([service_ref])
self.mox.StubOutWithMock(self.scheduler.driver, 'service_is_up')
self.scheduler.driver.service_is_up(service_ref).AndReturn(True)
-
+
self.mox.ReplayAll()
try:
self.scheduler.driver._live_migration_dest_check(ctxt, i_ref, dest)
except exception.Invalid, e:
- self.assertTrue(e.message.find('is running now. choose other host') >= 0)
+ msg = 'is running now. choose other host'
+ self.assertTrue(e.message.find(msg) >= 0)
self.mox.UnsetStubs()
def test_live_migraiton_dest_check_service_works_correctly(self):
"""
- A testcase of driver._live_migration_dst_check.
+ A testcase of driver._live_migration_dst_check.
The testcase make sure everything finished with no error.
"""
dest = 'dummydest'
ctxt = context.get_admin_context()
topic = FLAGS.compute_topic
- i_ref = {'id':1, 'hostname':'i-01', 'host':'dummydest'}
+ i_ref = {'id': 1, 'hostname': 'i-01', 'host': 'dummydest'}
service_ref = models.Service()
service_ref.__setitem__('id', 1)
service_ref.__setitem__('host', i_ref['host'])
@@ -780,23 +779,24 @@ class SimpleDriverTestCase(test.TestCase):
self.scheduler.driver.service_is_up(service_ref).AndReturn(True)
self.mox.StubOutWithMock(self.scheduler.driver, 'has_enough_resource')
self.scheduler.driver.has_enough_resource(mox.IgnoreArg(), i_ref, dest)
-
+
self.mox.ReplayAll()
try:
self.scheduler.driver._live_migration_dest_check(ctxt, i_ref, dest)
except exception.Invalid, e:
- self.assertTrue(e.message.find('is running now. choose other host') >= 0)
+ msg = 'is running now. choose other host'
+ self.assertTrue(e.message.find(msg) >= 0)
self.mox.UnsetStubs()
def test_live_migraiton_common_check_service_dest_not_exists(self):
"""
- A testcase of driver._live_migration_common_check.
+ A testcase of driver._live_migration_common_check.
Destination host does not exist.
"""
dest = 'dummydest'
ctxt = context.get_admin_context()
topic = FLAGS.compute_topic
- i_ref = {'id':1, 'hostname':'i-01', 'host':'dummy'}
+ i_ref = {'id': 1, 'hostname': 'i-01', 'host': 'dummy'}
driver_i = self.scheduler.driver
self.mox.StubOutWithMock(driver_i, 'mounted_on_same_shared_storage')
@@ -804,7 +804,7 @@ class SimpleDriverTestCase(test.TestCase):
self.mox.StubOutWithMock(driver, 'db', use_mock_anything=True)
driver.db.service_get_all_by_host(mox.IgnoreArg(), dest).\
AndReturn([])
-
+
self.mox.ReplayAll()
try:
self.scheduler.driver._live_migration_common_check(ctxt,
@@ -816,14 +816,15 @@ class SimpleDriverTestCase(test.TestCase):
def test_live_migraiton_common_check_service_orig_not_exists(self):
"""
- A testcase of driver._live_migration_common_check.
+ A testcase of driver._live_migration_common_check.
Original host(an instance launched on) does not exist.
"""
dest = 'dummydest'
driver_i = self.scheduler.driver
ctxt = context.get_admin_context()
topic = FLAGS.compute_topic
- i_ref = {'id':1, 'hostname':'i-01', 'host':'dummy', 'launched_on':'h1'}
+ i_ref = {'id': 1, 'hostname': 'i-01',
+ 'host': 'dummy', 'launched_on': 'h1'}
service_ref = models.Service()
service_ref.__setitem__('id', 1)
service_ref.__setitem__('topic', 'compute')
@@ -837,7 +838,7 @@ class SimpleDriverTestCase(test.TestCase):
driver.db.service_get_all_by_host(mox.IgnoreArg(),
i_ref['launched_on']).\
AndReturn([])
-
+
self.mox.ReplayAll()
try:
self.scheduler.driver._live_migration_common_check(ctxt,
@@ -850,15 +851,15 @@ class SimpleDriverTestCase(test.TestCase):
def test_live_migraiton_common_check_service_different_hypervisor(self):
"""
- A testcase of driver._live_migration_common_check.
+ A testcase of driver._live_migration_common_check.
Original host and dest host has different hypervisor type.
"""
dest = 'dummydest'
driver_i = self.scheduler.driver
ctxt = context.get_admin_context()
topic = FLAGS.compute_topic
- i_ref = {'id':1, 'hostname':'i-01',
- 'host':'dummy', 'launched_on':'h1'}
+ i_ref = {'id': 1, 'hostname': 'i-01',
+ 'host': 'dummy', 'launched_on': 'h1'}
service_ref = models.Service()
service_ref.__setitem__('id', 1)
service_ref.__setitem__('topic', 'compute')
@@ -875,7 +876,7 @@ class SimpleDriverTestCase(test.TestCase):
driver.db.service_get_all_by_host(mox.IgnoreArg(),
i_ref['launched_on']).\
AndReturn([service_ref2])
-
+
self.mox.ReplayAll()
try:
self.scheduler.driver._live_migration_common_check(ctxt,
@@ -888,15 +889,15 @@ class SimpleDriverTestCase(test.TestCase):
def test_live_migraiton_common_check_service_different_version(self):
"""
- A testcase of driver._live_migration_common_check.
+ A testcase of driver._live_migration_common_check.
Original host and dest host has different hypervisor version.
"""
dest = 'dummydest'
driver_i = self.scheduler.driver
ctxt = context.get_admin_context()
topic = FLAGS.compute_topic
- i_ref = {'id':1, 'hostname':'i-01',
- 'host':'dummy', 'launched_on':'h1'}
+ i_ref = {'id': 1, 'hostname': 'i-01',
+ 'host': 'dummy', 'launched_on': 'h1'}
service_ref = models.Service()
service_ref.__setitem__('id', 1)
service_ref.__setitem__('topic', 'compute')
@@ -913,7 +914,7 @@ class SimpleDriverTestCase(test.TestCase):
driver.db.service_get_all_by_host(mox.IgnoreArg(),
i_ref['launched_on']).\
AndReturn([service_ref2])
-
+
self.mox.ReplayAll()
try:
self.scheduler.driver._live_migration_common_check(ctxt,
@@ -926,15 +927,15 @@ class SimpleDriverTestCase(test.TestCase):
def test_live_migraiton_common_check_service_checking_cpuinfo_fail(self):
"""
- A testcase of driver._live_migration_common_check.
+ A testcase of driver._live_migration_common_check.
Original host and dest host has different hypervisor version.
"""
dest = 'dummydest'
driver_i = self.scheduler.driver
ctxt = context.get_admin_context()
topic = FLAGS.compute_topic
- i_ref = {'id':1, 'hostname':'i-01',
- 'host':'dummy', 'launched_on':'h1'}
+ i_ref = {'id': 1, 'hostname': 'i-01',
+ 'host': 'dummy', 'launched_on': 'h1'}
service_ref = models.Service()
service_ref.__setitem__('id', 1)
service_ref.__setitem__('topic', 'compute')
@@ -958,7 +959,7 @@ class SimpleDriverTestCase(test.TestCase):
{"method": 'compare_cpu',
"args": {'cpu_info': service_ref2['cpu_info']}}).\
AndRaise(rpc.RemoteError('doesnt have compatibility to', '', ''))
-
+
self.mox.ReplayAll()
try:
self.scheduler.driver._live_migration_common_check(ctxt,
@@ -971,15 +972,15 @@ class SimpleDriverTestCase(test.TestCase):
def test_live_migraiton_common_check_service_works_correctly(self):
"""
- A testcase of driver._live_migration_common_check.
+ A testcase of driver._live_migration_common_check.
The testcase make sure everything finished with no error.
"""
dest = 'dummydest'
driver_i = self.scheduler.driver
ctxt = context.get_admin_context()
topic = FLAGS.compute_topic
- i_ref = {'id':1, 'hostname':'i-01',
- 'host':'dummy', 'launched_on':'h1'}
+ i_ref = {'id': 1, 'hostname': 'i-01',
+ 'host': 'dummy', 'launched_on': 'h1'}
service_ref = models.Service()
service_ref.__setitem__('id', 1)
service_ref.__setitem__('topic', 'compute')
@@ -1002,7 +1003,7 @@ class SimpleDriverTestCase(test.TestCase):
driver.rpc.call(mox.IgnoreArg(), mox.IgnoreArg(),
{"method": 'compare_cpu',
"args": {'cpu_info': service_ref2['cpu_info']}})
-
+
self.mox.ReplayAll()
ret = self.scheduler.driver._live_migration_common_check(ctxt,
i_ref,
@@ -1012,20 +1013,21 @@ class SimpleDriverTestCase(test.TestCase):
def test_has_enough_resource_lack_resource_memory(self):
"""
- A testcase of driver.has_enough_resource.
+ A testcase of driver.has_enough_resource.
Lack of memory_mb.(boundary check)
"""
dest = 'dummydest'
ctxt = context.get_admin_context()
topic = FLAGS.compute_topic
- service_ref = {'id':1, 'memory_mb':32, 'memory_mb_used':12, 'local_gb':100}
- i_ref = {'id':1, 'hostname':'i-01', 'host':'dummy',
- 'vcpus':5, 'memory_mb':20, 'local_gb':10}
+ service_ref = {'id': 1, 'memory_mb': 32,
+ 'memory_mb_used': 12, 'local_gb': 100}
+ i_ref = {'id': 1, 'hostname': 'i-01', 'host': 'dummy',
+ 'vcpus': 5, 'memory_mb': 20, 'local_gb': 10}
self.mox.StubOutWithMock(driver, 'db', use_mock_anything=True)
driver.db.service_get_all_by_host(mox.IgnoreArg(), dest).\
AndReturn([service_ref])
-
+
self.mox.ReplayAll()
try:
self.scheduler.driver.has_enough_resource(ctxt, i_ref, dest)
@@ -1037,20 +1039,20 @@ class SimpleDriverTestCase(test.TestCase):
def test_has_enough_resource_works_correctly(self):
"""
- A testcase of driver.has_enough_resource
+ A testcase of driver.has_enough_resource
to make sure everything finished with no error.
"""
dest = 'dummydest'
ctxt = context.get_admin_context()
topic = FLAGS.compute_topic
- service_ref = {'id':1, 'memory_mb':120, 'memory_mb_used':32}
- i_ref = {'id':1, 'hostname':'i-01', 'host':'dummy',
- 'vcpus':5, 'memory_mb':8, 'local_gb':10}
+ service_ref = {'id': 1, 'memory_mb': 120, 'memory_mb_used': 32}
+ i_ref = {'id': 1, 'hostname': 'i-01', 'host': 'dummy',
+ 'vcpus': 5, 'memory_mb': 8, 'local_gb': 10}
self.mox.StubOutWithMock(driver, 'db', use_mock_anything=True)
driver.db.service_get_all_by_host(mox.IgnoreArg(), dest).\
AndReturn([service_ref])
-
+
self.mox.ReplayAll()
ret = self.scheduler.driver.has_enough_resource(ctxt, i_ref, dest)
self.assertTrue(ret == None)
@@ -1066,7 +1068,7 @@ class SimpleDriverTestCase(test.TestCase):
ctxt = context.get_admin_context()
topic = FLAGS.compute_topic
fpath = '/test/20110127120000'
- i_ref = {'id':1, 'hostname':'i-01', 'host':'dummy'}
+ i_ref = {'id': 1, 'hostname': 'i-01', 'host': 'dummy'}
self.mox.StubOutWithMock(driver, 'rpc', use_mock_anything=True)
driver.rpc.call(mox.IgnoreArg(),
@@ -1077,8 +1079,8 @@ class SimpleDriverTestCase(test.TestCase):
driver.logging.error(msg % FLAGS.instances_path)
self.mox.ReplayAll()
- self.assertRaises(rpc.RemoteError,
- driver_i.mounted_on_same_shared_storage,
+ self.assertRaises(rpc.RemoteError,
+ driver_i.mounted_on_same_shared_storage,
ctxt, i_ref, dest)
self.mox.UnsetStubs()
@@ -1092,27 +1094,26 @@ class SimpleDriverTestCase(test.TestCase):
ctxt = context.get_admin_context()
topic = FLAGS.compute_topic
fpath = '/test/20110127120000'
- i_ref = {'id':1, 'hostname':'i-01', 'host':'dummy'}
+ i_ref = {'id': 1, 'hostname': 'i-01', 'host': 'dummy'}
self.mox.StubOutWithMock(driver, 'rpc', use_mock_anything=True)
driver.rpc.call(mox.IgnoreArg(),
db.queue_get_for(ctxt, FLAGS.compute_topic, dest),
{"method": 'mktmpfile'}).AndReturn(fpath)
- driver.rpc.call(mox.IgnoreArg(),
+ driver.rpc.call(mox.IgnoreArg(),
db.queue_get_for(ctxt, FLAGS.compute_topic, i_ref['host']),
- {"method": 'confirm_tmpfile', "args":{'path':fpath}}).\
- AndRaise(rpc.RemoteError('','',''))
+ {"method": 'confirm_tmpfile', "args": {'path': fpath}}).\
+ AndRaise(rpc.RemoteError('', '', ''))
self.mox.StubOutWithMock(driver.logging, 'error')
msg = _("Cannot create tmpfile at %s to confirm shared storage.")
driver.logging.error(msg % FLAGS.instances_path)
self.mox.ReplayAll()
- self.assertRaises(rpc.RemoteError,
- driver_i.mounted_on_same_shared_storage,
+ self.assertRaises(rpc.RemoteError,
+ driver_i.mounted_on_same_shared_storage,
ctxt, i_ref, dest)
self.mox.UnsetStubs()
-
def test_mounted_on_same_shared_storage_works_correctly(self):
"""
A testcase of driver.mounted_on_same_shared_storage
@@ -1122,15 +1123,17 @@ class SimpleDriverTestCase(test.TestCase):
ctxt = context.get_admin_context()
topic = FLAGS.compute_topic
fpath = '/test/20110127120000'
- i_ref = {'id':1, 'hostname':'i-01', 'host':'dummy'}
+ i_ref = {'id': 1, 'hostname': 'i-01', 'host': 'dummy'}
self.mox.StubOutWithMock(driver, 'rpc', use_mock_anything=True)
driver.rpc.call(mox.IgnoreArg(),
db.queue_get_for(mox.IgnoreArg(), FLAGS.compute_topic, dest),
{"method": 'mktmpfile'}).AndReturn(fpath)
- driver.rpc.call(mox.IgnoreArg(),
- db.queue_get_for(mox.IgnoreArg(), FLAGS.compute_topic, i_ref['host']),
- {"method": 'confirm_tmpfile', "args":{'path':fpath}})
+ driver.rpc.call(mox.IgnoreArg(),
+ db.queue_get_for(mox.IgnoreArg(),
+ FLAGS.compute_topic,
+ i_ref['host']),
+ {"method": 'confirm_tmpfile', "args": {'path': fpath}})
self.mox.ReplayAll()
ret = self.scheduler.driver.mounted_on_same_shared_storage(ctxt,
diff --git a/nova/tests/test_service.py b/nova/tests/test_service.py
index 60cef9428..cb65584cf 100644
--- a/nova/tests/test_service.py
+++ b/nova/tests/test_service.py
@@ -42,20 +42,25 @@ class FakeManager(manager.Manager):
def test_method(self):
return 'manager'
-# temporary variable to store host/binary/self.mox from each method to fake class.
+# temporary variable to store host/binary/self.mox
+# from each method to fake class.
global_host = None
global_binary = None
global_mox = None
+
+
class FakeComputeManager(compute_manager.ComputeManager):
"""Fake computemanager manager for tests"""
-
+
def __init__(self, compute_driver=None, *args, **kwargs):
global ghost, gbinary, gmox
self.update_available_resource(mox.IgnoreArg())
gmox.ReplayAll()
- super(FakeComputeManager, self).__init__(compute_driver, *args, **kwargs)
+ super(FakeComputeManager, self).__init__(compute_driver,
+ *args,
+ **kwargs)
+
-
class ExtendedService(service.Service):
def test_method(self):
return 'service'
@@ -299,7 +304,8 @@ class ServiceTestCase(test.TestCase):
binary).AndRaise(exception.NotFound())
service.db.service_create(mox.IgnoreArg(),
service_create).AndReturn(service_ref)
- self.mox.StubOutWithMock(compute_manager.ComputeManager, 'update_available_resource')
+ self.mox.StubOutWithMock(compute_manager.ComputeManager,
+ 'update_available_resource')
global ghost, gbinary, gmox
ghost = host
diff --git a/nova/tests/test_virt.py b/nova/tests/test_virt.py
index 0e7340462..8ed726c21 100644
--- a/nova/tests/test_virt.py
+++ b/nova/tests/test_virt.py
@@ -38,6 +38,7 @@ flags.DECLARE('instances_path', 'nova.compute.manager')
libvirt = None
libxml2 = None
+
class LibvirtConnTestCase(test.TestCase):
def setUp(self):
super(LibvirtConnTestCase, self).setUp()
@@ -48,12 +49,12 @@ class LibvirtConnTestCase(test.TestCase):
try:
pjs = self.manager.get_projects()
pjs = [p for p in pjs if p.name == 'fake']
- if 0 != len(pjs):
+ if 0 != len(pjs):
self.manager.delete_project(pjs[0])
users = self.manager.get_users()
users = [u for u in users if u.name == 'fake']
- if 0 != len(users):
+ if 0 != len(users):
self.manager.delete_user(users[0])
except Exception, e:
pass
@@ -75,13 +76,13 @@ class LibvirtConnTestCase(test.TestCase):
'bridge': 'br101',
'instance_type': 'm1.small'}
- def _driver_dependent_test_setup(self):
+ def _driver_dependent_test_setup(self):
"""
Setup method.
- Call this method at the top of each testcase method,
+ Call this method at the top of each testcase method,
if the testcase is necessary libvirt and cheetah.
"""
- try :
+ try:
global libvirt
global libxml2
libvirt_conn.libvirt = __import__('libvirt')
@@ -93,13 +94,14 @@ class LibvirtConnTestCase(test.TestCase):
"""using driver-dependent library Cheetah/libvirt/libxml2.""")
raise e
- # inebitable mocks for calling
+ # inebitable mocks for calling
#nova.virt.libvirt_conn.LibvirtConnection.__init__
obj = utils.import_object(FLAGS.firewall_driver)
fwmock = self.mox.CreateMock(obj)
self.mox.StubOutWithMock(libvirt_conn, 'utils',
use_mock_anything=True)
- libvirt_conn.utils.import_object(FLAGS.firewall_driver).AndReturn(fwmock)
+ libvirt_conn.utils.import_object(FLAGS.firewall_driver).\
+ AndReturn(fwmock)
return fwmock
def test_xml_and_uri_no_ramdisk_no_kernel(self):
@@ -241,8 +243,9 @@ class LibvirtConnTestCase(test.TestCase):
expected_result,
'%s failed common check %d' % (xml, i))
- # This test is supposed to make sure we don't override a specifically set uri
- #
+ # This test is supposed to make sure we don't
+ # override a specifically set uri
+ #
# Deliberately not just assigning this string to FLAGS.libvirt_uri and
# checking against that later on. This way we make sure the
# implementation doesn't fiddle around with the FLAGS.
@@ -256,27 +259,26 @@ class LibvirtConnTestCase(test.TestCase):
def test_get_vcpu_total(self):
"""
- Check if get_vcpu_total returns appropriate cpu value
+ Check if get_vcpu_total returns appropriate cpu value
Connection/OS/driver differenct does not matter for this method,
everyone can execute for checking.
"""
- try:
+ try:
self._driver_dependent_test_setup()
- except:
- return
+ except:
+ return
self.mox.ReplayAll()
conn = libvirt_conn.LibvirtConnection(False)
self.assertTrue(0 < conn.get_vcpu_total())
self.mox.UnsetStubs()
-
def test_get_memory_mb_total(self):
"""Check if get_memory_mb returns appropriate memory value"""
- try:
+ try:
self._driver_dependent_test_setup()
- except:
- return
+ except:
+ return
self.mox.ReplayAll()
conn = libvirt_conn.LibvirtConnection(False)
@@ -285,8 +287,7 @@ class LibvirtConnTestCase(test.TestCase):
def test_get_local_gb_total(self):
"""Check if get_local_gb_total returns appropriate disk value"""
- # Note(masumotok): cannot test b/c FLAGS.instances_path is
- # inevitable for this test..
+ # Note(masumotok): leave this b/c FLAGS.instances_path is inevitable..
#try:
# self._driver_dependent_test_setup()
#except:
@@ -305,8 +306,9 @@ class LibvirtConnTestCase(test.TestCase):
except:
return
- self.mox.StubOutWithMock(libvirt_conn.LibvirtConnection, '_conn', use_mock_anything=True)
- libvirt_conn.LibvirtConnection._conn.listDomainsID().AndReturn([1,2])
+ self.mox.StubOutWithMock(libvirt_conn.LibvirtConnection,
+ '_conn', use_mock_anything=True)
+ libvirt_conn.LibvirtConnection._conn.listDomainsID().AndReturn([1, 2])
vdmock = self.mox.CreateMock(libvirt.virDomain)
self.mox.StubOutWithMock(vdmock, "vcpus", use_mock_anything=True)
vdmock.vcpus().AndReturn(['', [('dummycpu'), ('dummycpu')]])
@@ -318,7 +320,7 @@ class LibvirtConnTestCase(test.TestCase):
self.mox.ReplayAll()
conn = libvirt_conn.LibvirtConnection(False)
- self.assertTrue( conn.get_vcpu_used() == 4)
+ self.assertTrue(conn.get_vcpu_used() == 4)
self.mox.UnsetStubs()
def test_get_memory_mb_used(self):
@@ -335,8 +337,7 @@ class LibvirtConnTestCase(test.TestCase):
def test_get_local_gb_used(self):
"""Check if get_local_gb_total returns appropriate disk value"""
- # Note(masumotok): cannot test b/c FLAGS.instances_path is
- # inevitable for this test..
+ # Note(masumotok): leave this b/c FLAGS.instances_path is inevitable
#try:
# self._driver_dependent_test_setup()
#except:
@@ -353,22 +354,23 @@ class LibvirtConnTestCase(test.TestCase):
Check if get_cpu_info works correctly.
(in case libvirt.getCapabilities() works correctly)
"""
- xml=("""<cpu><arch>x86_64</arch><model>Nehalem</model>"""
- """<vendor>Intel</vendor><topology sockets='2' """
- """cores='4' threads='2'/><feature name='rdtscp'/>"""
- """<feature name='dca'/><feature name='xtpr'/>"""
- """<feature name='tm2'/><feature name='est'/>"""
- """<feature name='vmx'/><feature name='ds_cpl'/>"""
- """<feature name='monitor'/><feature name='pbe'/>"""
- """<feature name='tm'/><feature name='ht'/>"""
- """<feature name='ss'/><feature name='acpi'/>"""
- """<feature name='ds'/><feature name='vme'/></cpu>""")
-
- try:
+ xml = ("""<cpu><arch>x86_64</arch><model>Nehalem</model>"""
+ """<vendor>Intel</vendor><topology sockets='2' """
+ """cores='4' threads='2'/><feature name='rdtscp'/>"""
+ """<feature name='dca'/><feature name='xtpr'/>"""
+ """<feature name='tm2'/><feature name='est'/>"""
+ """<feature name='vmx'/><feature name='ds_cpl'/>"""
+ """<feature name='monitor'/><feature name='pbe'/>"""
+ """<feature name='tm'/><feature name='ht'/>"""
+ """<feature name='ss'/><feature name='acpi'/>"""
+ """<feature name='ds'/><feature name='vme'/></cpu>""")
+
+ try:
self._driver_dependent_test_setup()
- except:
- return
- self.mox.StubOutWithMock(libvirt_conn.LibvirtConnection, '_conn', use_mock_anything=True)
+ except:
+ return
+ self.mox.StubOutWithMock(libvirt_conn.LibvirtConnection,
+ '_conn', use_mock_anything=True)
libvirt_conn.LibvirtConnection._conn.getCapabilities().AndReturn(xml)
self.mox.ReplayAll()
@@ -382,33 +384,34 @@ class LibvirtConnTestCase(test.TestCase):
in case libvirt.getCapabilities() returns wrong xml
(in case of xml doesnt have <cpu> tag)
"""
- xml=("""<cccccpu><arch>x86_64</arch><model>Nehalem</model>"""
- """<vendor>Intel</vendor><topology sockets='2' """
- """cores='4' threads='2'/><feature name='rdtscp'/>"""
- """<feature name='dca'/><feature name='xtpr'/>"""
- """<feature name='tm2'/><feature name='est'/>"""
- """<feature name='vmx'/><feature name='ds_cpl'/>"""
- """<feature name='monitor'/><feature name='pbe'/>"""
- """<feature name='tm'/><feature name='ht'/>"""
- """<feature name='ss'/><feature name='acpi'/>"""
- """<feature name='ds'/><feature name='vme'/></cccccpu>""")
-
- try:
+ xml = ("""<cccccpu><arch>x86_64</arch><model>Nehalem</model>"""
+ """<vendor>Intel</vendor><topology sockets='2' """
+ """cores='4' threads='2'/><feature name='rdtscp'/>"""
+ """<feature name='dca'/><feature name='xtpr'/>"""
+ """<feature name='tm2'/><feature name='est'/>"""
+ """<feature name='vmx'/><feature name='ds_cpl'/>"""
+ """<feature name='monitor'/><feature name='pbe'/>"""
+ """<feature name='tm'/><feature name='ht'/>"""
+ """<feature name='ss'/><feature name='acpi'/>"""
+ """<feature name='ds'/><feature name='vme'/></cccccpu>""")
+
+ try:
self._driver_dependent_test_setup()
- except:
- return
- self.mox.StubOutWithMock(libvirt_conn.LibvirtConnection, '_conn', use_mock_anything=True)
+ except:
+ return
+ self.mox.StubOutWithMock(libvirt_conn.LibvirtConnection,
+ '_conn', use_mock_anything=True)
libvirt_conn.LibvirtConnection._conn.getCapabilities().AndReturn(xml)
self.mox.ReplayAll()
conn = libvirt_conn.LibvirtConnection(False)
- try:
+ try:
conn.get_cpu_info()
except exception.Invalid, e:
- c1 = ( 0 <= e.message.find('Invalid xml') )
+ c1 = (0 <= e.message.find('Invalid xml'))
self.assertTrue(c1)
self.mox.UnsetStubs()
-
+
def test_get_cpu_info_inappropreate_xml2(self):
"""
Check if get_cpu_info raises exception
@@ -416,30 +419,31 @@ class LibvirtConnTestCase(test.TestCase):
(in case of xml doesnt have inproper <topology> tag
meaning missing "socket" attribute)
"""
- xml=("""<cpu><arch>x86_64</arch><model>Nehalem</model>"""
- """<vendor>Intel</vendor><topology """
- """cores='4' threads='2'/><feature name='rdtscp'/>"""
- """<feature name='dca'/><feature name='xtpr'/>"""
- """<feature name='tm2'/><feature name='est'/>"""
- """<feature name='vmx'/><feature name='ds_cpl'/>"""
- """<feature name='monitor'/><feature name='pbe'/>"""
- """<feature name='tm'/><feature name='ht'/>"""
- """<feature name='ss'/><feature name='acpi'/>"""
- """<feature name='ds'/><feature name='vme'/></cpu>""")
-
- try:
+ xml = ("""<cpu><arch>x86_64</arch><model>Nehalem</model>"""
+ """<vendor>Intel</vendor><topology """
+ """cores='4' threads='2'/><feature name='rdtscp'/>"""
+ """<feature name='dca'/><feature name='xtpr'/>"""
+ """<feature name='tm2'/><feature name='est'/>"""
+ """<feature name='vmx'/><feature name='ds_cpl'/>"""
+ """<feature name='monitor'/><feature name='pbe'/>"""
+ """<feature name='tm'/><feature name='ht'/>"""
+ """<feature name='ss'/><feature name='acpi'/>"""
+ """<feature name='ds'/><feature name='vme'/></cpu>""")
+
+ try:
self._driver_dependent_test_setup()
- except:
- return
- self.mox.StubOutWithMock(libvirt_conn.LibvirtConnection, '_conn', use_mock_anything=True)
+ except:
+ return
+ self.mox.StubOutWithMock(libvirt_conn.LibvirtConnection,
+ '_conn', use_mock_anything=True)
libvirt_conn.LibvirtConnection._conn.getCapabilities().AndReturn(xml)
self.mox.ReplayAll()
conn = libvirt_conn.LibvirtConnection(False)
- try:
+ try:
conn.get_cpu_info()
except exception.Invalid, e:
- c1 = ( 0 <= e.message.find('Invalid xml: topology') )
+ c1 = (0 <= e.message.find('Invalid xml: topology'))
self.assertTrue(c1)
self.mox.UnsetStubs()
@@ -461,10 +465,10 @@ class LibvirtConnTestCase(test.TestCase):
driver(different depends on environment),
only dictionary keys are checked.
"""
- try:
+ try:
self._driver_dependent_test_setup()
- except:
- return
+ except:
+ return
def dic_key_check(dic):
validkey = ['vcpus', 'memory_mb', 'local_gb',
@@ -474,7 +478,8 @@ class LibvirtConnTestCase(test.TestCase):
host = 'foo'
binary = 'nova-compute'
- service_ref = {'id':1, 'host':host, 'binary':binary, 'topic':'compute'}
+ service_ref = {'id': 1, 'host': host, 'binary': binary,
+ 'topic': 'compute'}
self.mox.StubOutWithMock(db, 'service_get_all_by_topic')
db.service_get_all_by_topic(mox.IgnoreMox(), 'compute').\
@@ -493,10 +498,10 @@ class LibvirtConnTestCase(test.TestCase):
This testcase confirms if no record found on Service
table, exception can be raised.
"""
- try:
+ try:
self._driver_dependent_test_setup()
- except:
- return
+ except:
+ return
host = 'foo'
binary = 'nova-compute'
@@ -511,7 +516,7 @@ class LibvirtConnTestCase(test.TestCase):
conn.update_available_resource(host)
except exception.Invalid, e:
msg = 'Cannot insert compute manager specific info'
- c1 = ( 0 <= e.message.find(msg))
+ c1 = (0 <= e.message.find(msg))
self.assertTrue(c1)
self.mox.ResetAll()
@@ -523,17 +528,20 @@ class LibvirtConnTestCase(test.TestCase):
""""sockets":"%s"}, "features":[%s]}""")
cpu_info = t % ('x86', 'model', 'vendor', '2', '1', '4', '"tm"')
- try:
+ try:
self._driver_dependent_test_setup()
- except:
- return
+ except:
+ return
- self.mox.StubOutWithMock(libvirt_conn.LibvirtConnection, '_conn', use_mock_anything=True)
- libvirt_conn.LibvirtConnection._conn.compareCPU(mox.IgnoreArg(),0).AndReturn(1)
+ self.mox.StubOutWithMock(libvirt_conn.LibvirtConnection,
+ '_conn',
+ use_mock_anything=True)
+ libvirt_conn.LibvirtConnection._conn.compareCPU(mox.IgnoreArg(),
+ 0).AndReturn(1)
self.mox.ReplayAll()
conn = libvirt_conn.LibvirtConnection(False)
- self.assertTrue( None== conn.compare_cpu(cpu_info))
+ self.assertTrue(None == conn.compare_cpu(cpu_info))
self.mox.UnsetStubs()
def test_compare_cpu_raises_exception(self):
@@ -546,14 +554,14 @@ class LibvirtConnTestCase(test.TestCase):
""""sockets":"%s"}, "features":[%s]}""")
cpu_info = t % ('x86', 'model', 'vendor', '2', '1', '4', '"tm"')
- try:
+ try:
self._driver_dependent_test_setup()
- except:
- return
+ except:
+ return
self.mox.StubOutWithMock(libvirt_conn.LibvirtConnection, '_conn',
use_mock_anything=True)
- libvirt_conn.LibvirtConnection._conn.compareCPU(mox.IgnoreArg(),0).\
+ libvirt_conn.LibvirtConnection._conn.compareCPU(mox.IgnoreArg(), 0).\
AndRaise(libvirt.libvirtError('ERR'))
self.mox.ReplayAll()
@@ -569,14 +577,14 @@ class LibvirtConnTestCase(test.TestCase):
""""sockets":"%s"}, "features":[%s]}""")
cpu_info = t % ('x86', 'model', 'vendor', '2', '1', '4', '"tm"')
- try:
+ try:
self._driver_dependent_test_setup()
- except:
- return
+ except:
+ return
self.mox.StubOutWithMock(libvirt_conn.LibvirtConnection, '_conn',
use_mock_anything=True)
- libvirt_conn.LibvirtConnection._conn.compareCPU(mox.IgnoreArg(),0).\
+ libvirt_conn.LibvirtConnection._conn.compareCPU(mox.IgnoreArg(), 0).\
AndRaise(exception.Invalid('ERR'))
self.mox.ReplayAll()
@@ -590,10 +598,10 @@ class LibvirtConnTestCase(test.TestCase):
instance_ref = models.Instance()
instance_ref.__setitem__('id', 1)
- try:
+ try:
nwmock, fwmock = self._driver_dependent_test_setup()
- except:
- return
+ except:
+ return
nwmock.setup_basic_filtering(mox.IgnoreArg())
fwmock.prepare_instance_filter(instance_ref)
@@ -613,10 +621,10 @@ class LibvirtConnTestCase(test.TestCase):
instance_ref = models.Instance()
instance_ref.__setitem__('id', 1)
- try:
+ try:
nwmock, fwmock = self._driver_dependent_test_setup()
- except:
- return
+ except:
+ return
nwmock.setup_basic_filtering(mox.IgnoreArg())
fwmock.prepare_instance_filter(instance_ref)
@@ -629,10 +637,10 @@ class LibvirtConnTestCase(test.TestCase):
self.mox.ReplayAll()
conn = libvirt_conn.LibvirtConnection(False)
- try:
+ try:
conn.ensure_filtering_rules_for_instance(instance_ref)
except exception.Error, e:
- c1 = ( 0<=e.message.find('Timeout migrating for'))
+ c1 = (0 <= e.message.find('Timeout migrating for'))
self.assertTrue(c1)
self.mox.UnsetStubs()
@@ -641,62 +649,57 @@ class LibvirtConnTestCase(test.TestCase):
class dummyCall(object):
f = None
- def start(self, interval=0, now=False):
+
+ def start(self, interval=0, now=False):
pass
- instance_ref = models.Instance()
- instance_ref.__setitem__('id', 1)
- dest = 'desthost'
+ i_ref = models.Instance()
+ i_ref.__setitem__('id', 1)
+ i_ref.__setitem__('host', 'dummy')
ctxt = context.get_admin_context()
- try:
+ try:
self._driver_dependent_test_setup()
- except:
- return
+ except:
+ return
self.mox.StubOutWithMock(libvirt_conn.LibvirtConnection, '_conn',
use_mock_anything=True)
vdmock = self.mox.CreateMock(libvirt.virDomain)
self.mox.StubOutWithMock(vdmock, "migrateToURI",
use_mock_anything=True)
- vdmock.migrateToURI(FLAGS.live_migration_uri % dest, mox.IgnoreArg(),
+ vdmock.migrateToURI(FLAGS.live_migration_uri % i_ref['host'],
+ mox.IgnoreArg(),
None, FLAGS.live_migration_bandwidth).\
AndReturn(None)
- libvirt_conn.LibvirtConnection._conn.lookupByName(instance_ref.name).\
+ libvirt_conn.LibvirtConnection._conn.lookupByName(i_ref.name).\
AndReturn(vdmock)
- # below description is also ok.
- #self.mox.StubOutWithMock(libvirt_conn.LibvirtConnection._conn,
- # "lookupByName", use_mock_anything=True)
-
libvirt_conn.utils.LoopingCall(f=None).AndReturn(dummyCall())
-
self.mox.ReplayAll()
conn = libvirt_conn.LibvirtConnection(False)
- ret = conn._live_migration(ctxt, instance_ref, dest)
+ # Not setting post_method/recover_method in this testcase.
+ ret = conn._live_migration(ctxt, i_ref, i_ref['host'], '', '')
self.assertTrue(ret == None)
self.mox.UnsetStubs()
def test_live_migration_raises_exception(self):
"""
_live_migration raises exception, then this testcase confirms
- state_description/state for the instances/volumes are recovered.
+ recovered method is called.
"""
- class Instance(models.NovaBase):
- id = 0
- volumes = None
- name = 'name'
-
+ i_ref = models.Instance()
+ i_ref.__setitem__('id', 1)
+ i_ref.__setitem__('host', 'dummy')
ctxt = context.get_admin_context()
- dest = 'desthost'
- instance_ref = Instance()
- instance_ref.__setitem__('id', 1)
- instance_ref.__setitem__('volumes', [{'id':1}, {'id':2}])
- try:
+ def dummy_recover_method(self, c, instance):
+ pass
+
+ try:
nwmock, fwmock = self._driver_dependent_test_setup()
- except:
- return
+ except:
+ return
self.mox.StubOutWithMock(libvirt_conn.LibvirtConnection, '_conn',
use_mock_anything=True)
@@ -709,167 +712,18 @@ class LibvirtConnTestCase(test.TestCase):
libvirt_conn.LibvirtConnection._conn.lookupByName(instance_ref.name).\
AndReturn(vdmock)
self.mox.StubOutWithMock(db, 'instance_set_state')
- db.instance_set_state(ctxt, instance_ref['id'],
+ db.instance_set_state(ctxt, instance_ref['id'],
power_state.RUNNING, 'running')
self.mox.StubOutWithMock(db, 'volume_update')
for v in instance_ref.volumes:
- db.volume_update(ctxt, v['id'], {'status': 'in-use'}).\
- InAnyOrder('g1')
-
- self.mox.ReplayAll()
- conn = libvirt_conn.LibvirtConnection(False)
- self.assertRaises(libvirt.libvirtError,
- conn._live_migration,
- ctxt, instance_ref, dest)
- self.mox.UnsetStubs()
-
- def test_post_live_migration_working_correctly(self):
- """_post_live_migration works as expected correctly """
-
- dest = 'dummydest'
- ctxt = context.get_admin_context()
- instance_ref = {'id':1, 'hostname':'i-00000001', 'host':dest,
- 'fixed_ip':'dummyip', 'floating_ip':'dummyflip',
- 'volumes':[{'id':1}, {'id':2} ]}
- network_ref = {'id':1, 'host':dest}
- floating_ip_ref = {'id':1, 'address':'1.1.1.1'}
-
- try:
- nwmock, fwmock = self._driver_dependent_test_setup()
- except:
- return
- fwmock.unfilter_instance(instance_ref)
-
- fixed_ip = instance_ref['fixed_ip']
- self.mox.StubOutWithMock(db, 'instance_get_fixed_address')
- db.instance_get_fixed_address(ctxt, instance_ref['id']).AndReturn(fixed_ip)
- self.mox.StubOutWithMock(db, 'fixed_ip_update')
- db.fixed_ip_update(ctxt, fixed_ip, {'host': dest})
- self.mox.StubOutWithMock(db, 'fixed_ip_get_network')
- db.fixed_ip_get_network(ctxt, fixed_ip).AndReturn(network_ref)
-
- fl_ip = instance_ref['floating_ip']
- self.mox.StubOutWithMock(db, 'instance_get_floating_address')
- db.instance_get_floating_address(ctxt, instance_ref['id']).AndReturn(fl_ip)
- self.mox.StubOutWithMock(db, 'floating_ip_get_by_address')
- db.floating_ip_get_by_address(ctxt, instance_ref['floating_ip']).\
- AndReturn(floating_ip_ref)
- self.mox.StubOutWithMock(db, 'floating_ip_update')
- db.floating_ip_update(ctxt, floating_ip_ref['address'], {'host': dest})
-
- self.mox.StubOutWithMock(db, 'instance_update')
- db.instance_update(ctxt, instance_ref['id'],
- {'state_description': 'running',
- 'state': power_state.RUNNING, 'host': dest})
- self.mox.StubOutWithMock(db, 'volume_update')
- for v in instance_ref['volumes']:
db.volume_update(ctxt, v['id'], {'status': 'in-use'})
self.mox.ReplayAll()
conn = libvirt_conn.LibvirtConnection(False)
- conn._post_live_migration( ctxt, instance_ref, dest)
- self.mox.UnsetStubs()
-
- def test_post_live_migration_no_floating_ip(self):
- """
- _post_live_migration works as expected correctly
- (in case instance doesnt have floaitng ip)
- """
- dest = 'dummydest'
- ctxt = context.get_admin_context()
- instance_ref = {'id':1, 'hostname':'i-00000001', 'host':dest,
- 'fixed_ip':'dummyip', 'floating_ip':'dummyflip',
- 'volumes':[{'id':1}, {'id':2} ]}
- network_ref = {'id':1, 'host':dest}
- floating_ip_ref = {'id':1, 'address':'1.1.1.1'}
-
- try:
- nwmock, fwmock = self._driver_dependent_test_setup()
- except:
- return
- fwmock.unfilter_instance(instance_ref)
-
- fixed_ip = instance_ref['fixed_ip']
- self.mox.StubOutWithMock(db, 'instance_get_fixed_address')
- db.instance_get_fixed_address(ctxt, instance_ref['id']).AndReturn(fixed_ip)
- self.mox.StubOutWithMock(db, 'fixed_ip_update')
- db.fixed_ip_update(ctxt, fixed_ip, {'host': dest})
- self.mox.StubOutWithMock(db, 'fixed_ip_get_network')
- db.fixed_ip_get_network(ctxt, fixed_ip).AndReturn(network_ref)
-
- self.mox.StubOutWithMock(db, 'instance_get_floating_address')
- db.instance_get_floating_address(ctxt, instance_ref['id']).AndReturn(None)
- self.mox.StubOutWithMock(libvirt_conn.LOG, 'info')
- libvirt_conn.LOG.info(_('post livemigration operation is started..'))
- libvirt_conn.LOG.info(_('floating_ip is not found for %s'),
- instance_ref['hostname'])
- # Checking last messages are ignored. may be no need to check so strictly?
- libvirt_conn.LOG.info(mox.IgnoreArg())
- libvirt_conn.LOG.info(mox.IgnoreArg())
-
- self.mox.StubOutWithMock(db, 'instance_update')
- db.instance_update(ctxt, instance_ref['id'],
- {'state_description': 'running',
- 'state': power_state.RUNNING,
- 'host': dest})
- self.mox.StubOutWithMock(db, 'volume_update')
- for v in instance_ref['volumes']:
- db.volume_update(ctxt, v['id'], {'status': 'in-use'})
-
- self.mox.ReplayAll()
- conn = libvirt_conn.LibvirtConnection(False)
- conn._post_live_migration( ctxt, instance_ref, dest)
- self.mox.UnsetStubs()
-
- def test_post_live_migration_no_floating_ip_with_exception(self):
- """
- _post_live_migration works as expected correctly
- (in case instance doesnt have floaitng ip, and raise exception)
- """
- dest = 'dummydest'
- ctxt = context.get_admin_context()
- instance_ref = {'id':1, 'hostname':'i-00000001', 'host':dest,
- 'fixed_ip':'dummyip', 'floating_ip':'dummyflip',
- 'volumes':[{'id':1}, {'id':2} ]}
- network_ref = {'id':1, 'host':dest}
- floating_ip_ref = {'id':1, 'address':'1.1.1.1'}
-
- try:
- nwmock, fwmock = self._driver_dependent_test_setup()
- except:
- return
- fwmock.unfilter_instance(instance_ref)
-
- fixed_ip = instance_ref['fixed_ip']
- self.mox.StubOutWithMock(db, 'instance_get_fixed_address')
- db.instance_get_fixed_address(ctxt, instance_ref['id']).AndReturn(fixed_ip)
- self.mox.StubOutWithMock(db, 'fixed_ip_update')
- db.fixed_ip_update(ctxt, fixed_ip, {'host': dest})
- self.mox.StubOutWithMock(db, 'fixed_ip_get_network')
- db.fixed_ip_get_network(ctxt, fixed_ip).AndReturn(network_ref)
-
- self.mox.StubOutWithMock(db, 'instance_get_floating_address')
- db.instance_get_floating_address(ctxt, instance_ref['id']).\
- AndRaise(exception.NotFound())
- self.mox.StubOutWithMock(libvirt_conn.LOG, 'info')
- libvirt_conn.LOG.info(_('post livemigration operation is started..'))
- libvirt_conn.LOG.info(_('floating_ip is not found for %s'),
- instance_ref['hostname'])
- # the last message is ignored. may be no need to check so strictly?
- libvirt_conn.LOG.info(mox.IgnoreArg())
- libvirt_conn.LOG.info(mox.IgnoreArg())
-
- self.mox.StubOutWithMock(db, 'instance_update')
- db.instance_update(ctxt, instance_ref['id'],
- {'state_description': 'running',
- 'state': power_state.RUNNING, 'host': dest})
- self.mox.StubOutWithMock(db, 'volume_update')
- for v in instance_ref['volumes']:
- db.volume_update(ctxt, v['id'], {'status': 'in-use'})
-
- self.mox.ReplayAll()
- conn = libvirt_conn.LibvirtConnection(False)
- conn._post_live_migration( ctxt, instance_ref, dest)
+ self.assertRaises(libvirt.libvirtError,
+ conn._mlive_migration,
+ ctxt, instance_ref, dest,
+ '', dummy_recover_method)
self.mox.UnsetStubs()
def tearDown(self):
@@ -1181,4 +1035,3 @@ class NWFilterTestCase(test.TestCase):
self.fw.apply_instance_filter(instance)
_ensure_all_called()
self.teardown_security_group()
-
diff --git a/nova/tests/test_volume.py b/nova/tests/test_volume.py
index b35cf4eb5..6ae075caa 100644
--- a/nova/tests/test_volume.py
+++ b/nova/tests/test_volume.py
@@ -177,12 +177,13 @@ class VolumeTestCase(test.TestCase):
pass
-class AOETestCase(test.TestCase):
- """Test Case for AOEDriver"""
+class DriverTestCase(test.TestCase):
+ """Base Test class for Drivers."""
+ driver_name = "nova.volume.driver.FakeAOEDriver"
def setUp(self):
- super(AOETestCase, self).setUp()
- self.flags(volume_driver='nova.volume.driver.AOEDriver',
+ super(DriverTestCase, self).setUp()
+ self.flags(volume_driver=self.driver_name,
logging_default_format_string="%(message)s")
self.volume = utils.import_object(FLAGS.volume_manager)
self.context = context.get_admin_context()
@@ -202,8 +203,29 @@ class AOETestCase(test.TestCase):
self.instance_id = db.instance_create(self.context, inst)['id']
def tearDown(self):
+ super(DriverTestCase, self).tearDown()
+
+ def _attach_volume(self):
+ """Attach volumes to an instance. This function also sets
+ a fake log message."""
+ return []
+
+ def _detach_volume(self, volume_id_list):
+ """Detach volumes from an instance."""
+ for volume_id in volume_id_list:
+ db.volume_detached(self.context, volume_id)
+ self.volume.delete_volume(self.context, volume_id)
+
+
+class AOETestCase(DriverTestCase):
+ """Test Case for AOEDriver"""
+ driver_name = "nova.volume.driver.AOEDriver"
+
+ def setUp(self):
+ super(AOETestCase, self).setUp()
+
+ def tearDown(self):
super(AOETestCase, self).tearDown()
- db.instance_destroy(self.context, self.instance_id)
def _attach_volume(self):
"""Attach volumes to an instance. This function also sets
@@ -212,7 +234,7 @@ class AOETestCase(test.TestCase):
for index in xrange(3):
vol = {}
vol['size'] = 0
- volume_id = db.volume_create(context.get_admin_context(),
+ volume_id = db.volume_create(self.context,
vol)['id']
self.volume.create_volume(self.context, volume_id)
@@ -230,12 +252,6 @@ class AOETestCase(test.TestCase):
return volume_id_list
- def _detach_volume(self, volume_id_list):
- """Detach volumes from an instance."""
- for volume_id in volume_id_list:
- db.volume_detached(self.context, volume_id)
- self.volume.delete_volume(self.context, volume_id)
-
def test_check_for_export_with_no_volume(self):
"""No log message when no volume is attached to an instance."""
self.stream.truncate(0)
@@ -262,10 +278,95 @@ class AOETestCase(test.TestCase):
(shelf_id, blade_id) = db.volume_get_shelf_and_blade(self.context,
volume_id_list[0])
+ msg_is_match = False
+ self.stream.truncate(0)
+ try:
+ self.volume.check_for_export(self.context, self.instance_id)
+ except exception.ProcessExecutionError, e:
+ volume_id = volume_id_list[0]
+ msg = _("""Cannot confirm exported volume id:%(volume_id)s."""
+ """vblade process for e%(shelf_id)s.%(blade_id)s """
+ """isn't running.""") % locals()
+ msg_is_match = (0 <= e.message.find(msg))
+
+ self.assertTrue(msg_is_match)
+ self._detach_volume(volume_id_list)
+
+
+class ISCSITestCase(DriverTestCase):
+ """Test Case for ISCSIDriver"""
+ driver_name = "nova.volume.driver.ISCSIDriver"
+
+ def setUp(self):
+ super(ISCSITestCase, self).setUp()
+
+ def tearDown(self):
+ super(ISCSITestCase, self).tearDown()
+
+ def _attach_volume(self):
+ """Attach volumes to an instance. This function also sets
+ a fake log message."""
+ volume_id_list = []
+ for index in xrange(3):
+ vol = {}
+ vol['size'] = 0
+ vol_ref = db.volume_create(self.context, vol)
+ self.volume.create_volume(self.context, vol_ref['id'])
+ vol_ref = db.volume_get(self.context, vol_ref['id'])
+
+ # each volume has a different mountpoint
+ mountpoint = "/dev/sd" + chr((ord('b') + index))
+ db.volume_attached(self.context, vol_ref['id'], self.instance_id,
+ mountpoint)
+ #iscsi_target = db.volume_allocate_iscsi_target(self.context,
+ # vol_ref['id'],
+ # vol_ref['host'])
+ volume_id_list.append(vol_ref['id'])
+
+ return volume_id_list
+
+ def test_check_for_export_with_no_volume(self):
+ """No log message when no volume is attached to an instance."""
+ self.stream.truncate(0)
+ self.volume.check_for_export(self.context, self.instance_id)
+ self.assertEqual(self.stream.getvalue(), '')
+
+ def test_check_for_export_with_all_volume_exported(self):
+ """No log message when all the vblade processes are running."""
+ volume_id_list = self._attach_volume()
+
+ self.mox.StubOutWithMock(self.volume.driver, '_execute')
+ for i in volume_id_list:
+ tid = db.volume_get_iscsi_target_num(self.context, i)
+ self.volume.driver._execute("sudo ietadm --op show --tid=%(tid)d"
+ % locals())
+
self.stream.truncate(0)
+ self.mox.ReplayAll()
self.volume.check_for_export(self.context, self.instance_id)
- self.assertEqual(self.stream.getvalue(),
- _("vblade process for e%s.%s isn't running.\n")
- % (shelf_id, blade_id))
+ self.assertEqual(self.stream.getvalue(), '')
+ self.mox.UnsetStubs()
+
+ self._detach_volume(volume_id_list)
+
+ def test_check_for_export_with_some_volume_missing(self):
+ """Output a warning message when some volumes are not recognied
+ by ietd."""
+ volume_id_list = self._attach_volume()
+
+ # the first vblade process isn't running
+ tid = db.volume_get_iscsi_target_num(self.context, volume_id_list[0])
+ self.mox.StubOutWithMock(self.volume.driver, '_execute')
+ self.volume.driver._execute("sudo ietadm --op show --tid=%(tid)d"
+ % locals()).AndRaise(exception.ProcessExecutionError())
+
+ self.mox.ReplayAll()
+ self.assertRaises(exception.ProcessExecutionError,
+ self.volume.check_for_export,
+ self.context,
+ self.instance_id)
+ msg = _("Cannot confirm exported volume id:%s.") % volume_id_list[0]
+ self.assertTrue(0 <= self.stream.getvalue().find(msg))
+ self.mox.UnsetStubs()
self._detach_volume(volume_id_list)
diff --git a/nova/utils.py b/nova/utils.py
index 884221473..966dde667 100644
--- a/nova/utils.py
+++ b/nova/utils.py
@@ -152,6 +152,42 @@ def execute(cmd, process_input=None, addl_env=None, check_exit_code=True):
return result
+def ssh_execute(ssh, cmd, process_input=None,
+ addl_env=None, check_exit_code=True):
+ LOG.debug(_("Running cmd (SSH): %s"), cmd)
+ if addl_env:
+ raise exception.Error("Environment not supported over SSH")
+
+ if process_input:
+ # This is (probably) fixable if we need it...
+ raise exception.Error("process_input not supported over SSH")
+
+ stdin_stream, stdout_stream, stderr_stream = ssh.exec_command(cmd)
+ channel = stdout_stream.channel
+
+ #stdin.write('process_input would go here')
+ #stdin.flush()
+
+ # NOTE(justinsb): This seems suspicious...
+ # ...other SSH clients have buffering issues with this approach
+ stdout = stdout_stream.read()
+ stderr = stderr_stream.read()
+ stdin_stream.close()
+
+ exit_status = channel.recv_exit_status()
+
+ # exit_status == -1 if no exit code was returned
+ if exit_status != -1:
+ LOG.debug(_("Result was %s") % exit_status)
+ if check_exit_code and exit_status != 0:
+ raise exception.ProcessExecutionError(exit_code=exit_status,
+ stdout=stdout,
+ stderr=stderr,
+ cmd=cmd)
+
+ return (stdout, stderr)
+
+
def abspath(s):
return os.path.join(os.path.dirname(__file__), s)
@@ -305,7 +341,7 @@ def mktmpfile(dir):
"""create tmpfile under dir, and return filename."""
filename = datetime.datetime.utcnow().strftime('%Y%m%d%H%M%S')
fpath = os.path.join(dir, filename)
- open(fpath, 'a+').write(fpath+'\n')
+ open(fpath, 'a+').write(fpath + '\n')
return fpath
diff --git a/nova/virt/disk.py b/nova/virt/disk.py
index c5565abfa..ec4acc452 100644
--- a/nova/virt/disk.py
+++ b/nova/virt/disk.py
@@ -112,6 +112,7 @@ def inject_data(image, key=None, net=None, partition=None, nbd=False):
def _link_device(image, nbd):
"""Link image to device using loopback or nbd"""
+ print '_link_device:0:', nbd, '::', image
if nbd:
device = _allocate_device()
utils.execute('sudo qemu-nbd -c %s %s' % (device, image))
diff --git a/nova/virt/fake.py b/nova/virt/fake.py
index b865f8e5e..b0e0de975 100644
--- a/nova/virt/fake.py
+++ b/nova/virt/fake.py
@@ -353,9 +353,14 @@ class FakeConnection(object):
def ensure_filtering_rules_for_instance(self, instance_ref):
"""This method is supported only libvirt.."""
- raise NotImplementedError('This method is supported only libvirt.')
+ return
- def live_migration(self, context, instance_ref, dest):
+ def live_migration(self, context, instance_ref, dest,
+ post_method, recover_method):
+ """This method is supported only libvirt.."""
+ return
+
+ def unfilter_instance(self, instance_ref):
"""This method is supported only libvirt.."""
raise NotImplementedError('This method is supported only libvirt.')
diff --git a/nova/virt/libvirt_conn.py b/nova/virt/libvirt_conn.py
index cbf287907..9b7a9ddbe 100644
--- a/nova/virt/libvirt_conn.py
+++ b/nova/virt/libvirt_conn.py
@@ -880,7 +880,7 @@ class LibvirtConnection(object):
idx1 = m.index('MemFree:')
idx2 = m.index('Buffers:')
idx3 = m.index('Cached:')
- avail = (int(m[idx1+1]) + int(m[idx2+1]) + int(m[idx3+1])) / 1024
+ avail = (int(m[idx1 + 1]) + int(m[idx2 + 1]) + int(m[idx3 + 1])) / 1024
return self.get_memory_mb_total() - avail
def get_local_gb_used(self):
@@ -978,11 +978,11 @@ class LibvirtConnection(object):
Update compute manager resource info on Service table.
This method is called when nova-coompute launches, and
whenever admin executes "nova-manage service updateresource".
-
+
"""
try:
s_refs = db.service_get_all_by_topic(ctxt, 'compute')
- s_refs = [s for s in s_refs if s.host == host]
+ s_refs = [s for s in s_refs if s.host == host]
if 0 == len(s_refs):
raise exception.NotFound('')
service_ref = s_refs[0]
@@ -1007,7 +1007,7 @@ class LibvirtConnection(object):
{'vcpus': vcpu,
'memory_mb': memory_mb,
'local_gb': local_gb,
- 'vcpus_used':vcpu_u,
+ 'vcpus_used': vcpu_u,
'memory_mb_used': memory_mb_u,
'local_gb_used': local_gb_u,
'hypervisor_type': hypervisor,
@@ -1050,7 +1050,7 @@ class LibvirtConnection(object):
return
def ensure_filtering_rules_for_instance(self, instance_ref):
- """
+ """
Setting up inevitable filtering rules on compute node,
and waiting for its completion.
To migrate an instance, filtering rules to hypervisors
@@ -1093,14 +1093,17 @@ class LibvirtConnection(object):
raise exception.Error(msg % locals())
time.sleep(1)
- def live_migration(self, ctxt, instance_ref, dest):
+ def live_migration(self, ctxt, instance_ref, dest,
+ post_method, recover_method):
"""
Just spawning live_migration operation for
distributing high-load.
"""
- greenthread.spawn(self._live_migration, ctxt, instance_ref, dest)
+ greenthread.spawn(self._live_migration, ctxt, instance_ref, dest,
+ post_method, recover_method)
- def _live_migration(self, ctxt, instance_ref, dest):
+ def _live_migration(self, ctxt, instance_ref, dest,
+ post_method, recover_method):
""" Do live migration."""
# Do live migration.
@@ -1125,15 +1128,7 @@ class LibvirtConnection(object):
FLAGS.live_migration_bandwidth)
except Exception, e:
- db.instance_set_state(ctxt,
- instance_ref['id'],
- power_state.RUNNING,
- 'running')
- for v in instance_ref['volumes']:
- db.volume_update(ctxt,
- v['id'],
- {'status': 'in-use'})
-
+ recover_method(ctxt, instance_ref)
raise e
# Waiting for completion of live_migration.
@@ -1145,82 +1140,14 @@ class LibvirtConnection(object):
self.get_info(instance_ref.name)['state']
except exception.NotFound:
timer.stop()
- self._post_live_migration(ctxt, instance_ref, dest)
+ post_method(ctxt, instance_ref, dest)
timer.f = wait_for_live_migration
timer.start(interval=0.5, now=True)
- def _post_live_migration(self, ctxt, instance_ref, dest):
- """
- Post operations for live migration.
- Mainly, database updating.
- """
- LOG.info('post livemigration operation is started..')
- # Detaching volumes.
- # (not necessary in current version )
-
- # Releasing vlan.
- # (not necessary in current implementation?)
-
- # Releasing security group ingress rule.
- if FLAGS.firewall_driver == \
- 'nova.virt.libvirt_conn.IptablesFirewallDriver':
- try:
- self.firewall_driver.unfilter_instance(instance_ref)
- except KeyError:
- pass
-
- # Database updating.
- ec2_id = instance_ref['hostname']
-
- instance_id = instance_ref['id']
- fixed_ip = db.instance_get_fixed_address(ctxt, instance_id)
- # Not return if fixed_ip is not found, otherwise,
- # instance never be accessible..
- if None == fixed_ip:
- logging.warn('fixed_ip is not found for %s ' % ec2_id)
- db.fixed_ip_update(ctxt, fixed_ip, {'host': dest})
- network_ref = db.fixed_ip_get_network(ctxt, fixed_ip)
-
- try:
- floating_ip \
- = db.instance_get_floating_address(ctxt, instance_id)
- # Not return if floating_ip is not found, otherwise,
- # instance never be accessible..
- if None == floating_ip:
- LOG.info(_('floating_ip is not found for %s'), ec2_id)
- else:
- floating_ip_ref = db.floating_ip_get_by_address(ctxt,
- floating_ip)
- db.floating_ip_update(ctxt,
- floating_ip_ref['address'],
- {'host': dest})
- except exception.NotFound:
- LOG.info(_('floating_ip is not found for %s'), ec2_id)
- except:
- msg = ("""Live migration: Unexpected error:"""
- """%s cannot inherit floating ip..""")
- LOG.error(_(msg), ec2_id)
-
- # Restore instance/volume state
- db.instance_update(ctxt,
- instance_id,
- {'state_description': 'running',
- 'state': power_state.RUNNING,
- 'host': dest})
-
- for v in instance_ref['volumes']:
- db.volume_update(ctxt,
- v['id'],
- {'status': 'in-use'})
-
- msg = _('Migrating %(ec2_id)s to %(dest)s finishes successfully')
- LOG.info(msg % locals())
- msg = _(("""The below error is normally occurs."""
- """Just check if instance is successfully migrated.\n"""
- """libvir: QEMU error : Domain not found: no domain """
- """with matching name.."""))
- LOG.info(msg)
+ def unfilter_instance(self, instance_ref):
+ """See comments of same method in firewall_driver"""
+ self.firewall_driver.unfilter_instance(instance_ref)
class FirewallDriver(object):
diff --git a/nova/virt/xenapi_conn.py b/nova/virt/xenapi_conn.py
index 30a7da1d4..8ebbfdb78 100644
--- a/nova/virt/xenapi_conn.py
+++ b/nova/virt/xenapi_conn.py
@@ -230,9 +230,14 @@ class XenAPIConnection(object):
def ensure_filtering_rules_for_instance(self, instance_ref):
"""This method is supported only libvirt.."""
- raise NotImplementedError('This method is supported only libvirt.')
+ return
+
+ def live_migration(self, context, instance_ref, dest,
+ post_method, recover_method):
+ """This method is supported only libvirt.."""
+ return
- def live_migration(self, context, instance_ref, dest):
+ def unfilter_instance(self, instance_ref):
"""This method is supported only libvirt.."""
raise NotImplementedError('This method is supported only libvirt.')
diff --git a/nova/volume/driver.py b/nova/volume/driver.py
index da7307733..5dd32e3d7 100644
--- a/nova/volume/driver.py
+++ b/nova/volume/driver.py
@@ -138,6 +138,10 @@ class VolumeDriver(object):
"""Undiscover volume on a remote host."""
raise NotImplementedError()
+ def check_for_export(self, context, volume_id):
+ """Make sure whether volume is exported."""
+ return True
+
class AOEDriver(VolumeDriver):
"""Implements AOE specific volume commands."""
@@ -192,15 +196,45 @@ class AOEDriver(VolumeDriver):
self._try_execute("sudo vblade-persist destroy %s %s" %
(shelf_id, blade_id))
- def discover_volume(self, _volume):
+ def discover_volume(self, context, _volume):
"""Discover volume on a remote host."""
+ (shelf_id,
+ blade_id) = self.db.volume_get_shelf_and_blade(context,
+ _volume['id'])
self._execute("sudo aoe-discover")
- self._execute("sudo aoe-stat", check_exit_code=False)
+ out, err = self._execute("sudo aoe-stat", check_exit_code=False)
+ device_path = 'e%(shelf_id)d.%(blade_id)d' % locals()
+ if 0 <= out.find(device_path):
+ return "/dev/etherd/%s" % device_path
+ else:
+ return
def undiscover_volume(self, _volume):
"""Undiscover volume on a remote host."""
pass
+ def check_for_export(self, context, volume_id):
+ """Make sure whether volume is exported."""
+ (shelf_id,
+ blade_id) = self.db.volume_get_shelf_and_blade(context,
+ volume_id)
+ cmd = "sudo vblade-persist ls --no-header"
+ out, _err = self._execute(cmd)
+ exported = False
+ for line in out.split('\n'):
+ param = line.split(' ')
+ if len(param) == 6 and param[0] == str(shelf_id) \
+ and param[1] == str(blade_id) and param[-1] == "run":
+ exported = True
+ break
+ if not exported:
+ # Instance will be terminated in this case.
+ desc = _("""Cannot confirm exported volume id:%(volume_id)s."""
+ """vblade process for e%(shelf_id)s.%(blade_id)s """
+ """isn't running.""") % locals()
+ raise exception.ProcessExecutionError(out, _err, cmd=cmd,
+ description=desc)
+
class FakeAOEDriver(AOEDriver):
"""Logs calls instead of executing."""
@@ -294,8 +328,10 @@ class ISCSIDriver(VolumeDriver):
self._execute("sudo ietadm --op delete --tid=%s" %
iscsi_target)
- def _get_name_and_portal(self, volume_name, host):
+ def _get_name_and_portal(self, volume):
"""Gets iscsi name and portal from volume name and host."""
+ volume_name = volume['name']
+ host = volume['host']
(out, _err) = self._execute("sudo iscsiadm -m discovery -t "
"sendtargets -p %s" % host)
for target in out.splitlines():
@@ -305,10 +341,9 @@ class ISCSIDriver(VolumeDriver):
iscsi_portal = location.split(",")[0]
return (iscsi_name, iscsi_portal)
- def discover_volume(self, volume):
+ def discover_volume(self, context, volume):
"""Discover volume on a remote host."""
- iscsi_name, iscsi_portal = self._get_name_and_portal(volume['name'],
- volume['host'])
+ iscsi_name, iscsi_portal = self._get_name_and_portal(volume)
self._execute("sudo iscsiadm -m node -T %s -p %s --login" %
(iscsi_name, iscsi_portal))
self._execute("sudo iscsiadm -m node -T %s -p %s --op update "
@@ -319,8 +354,7 @@ class ISCSIDriver(VolumeDriver):
def undiscover_volume(self, volume):
"""Undiscover volume on a remote host."""
- iscsi_name, iscsi_portal = self._get_name_and_portal(volume['name'],
- volume['host'])
+ iscsi_name, iscsi_portal = self._get_name_and_portal(volume)
self._execute("sudo iscsiadm -m node -T %s -p %s --op update "
"-n node.startup -v manual" %
(iscsi_name, iscsi_portal))
@@ -329,6 +363,20 @@ class ISCSIDriver(VolumeDriver):
self._execute("sudo iscsiadm -m node --op delete "
"--targetname %s" % iscsi_name)
+ def check_for_export(self, context, volume_id):
+ """Make sure whether volume is exported."""
+
+ tid = self.db.volume_get_iscsi_target_num(context, volume_id)
+ try:
+ self._execute("sudo ietadm --op show --tid=%(tid)d" % locals())
+ except exception.ProcessExecutionError, e:
+ # Instances remount read-only in this case.
+ # /etc/init.d/iscsitarget restart and rebooting nova-volume
+ # is better since ensure_export() works at boot time.
+ logging.error(_("""Cannot confirm exported volume """
+ """id:%(volume_id)s.""") % locals())
+ raise e
+
class FakeISCSIDriver(ISCSIDriver):
"""Logs calls instead of executing."""
diff --git a/nova/volume/manager.py b/nova/volume/manager.py
index 8e29a21b2..2ce58bebe 100644
--- a/nova/volume/manager.py
+++ b/nova/volume/manager.py
@@ -87,7 +87,7 @@ class VolumeManager(manager.Manager):
if volume['status'] in ['available', 'in-use']:
self.driver.ensure_export(ctxt, volume)
else:
- LOG.info(_("volume %s: skipping export"), volume_ref['name'])
+ LOG.info(_("volume %s: skipping export"), volume['name'])
def create_volume(self, context, volume_id):
"""Creates and exports the volume."""
@@ -156,7 +156,7 @@ class VolumeManager(manager.Manager):
if volume_ref['host'] == self.host and FLAGS.use_local_volumes:
path = self.driver.local_path(volume_ref)
else:
- path = self.driver.discover_volume(volume_ref)
+ path = self.driver.discover_volume(context, volume_ref)
return path
def remove_compute_volume(self, context, volume_id):
@@ -170,7 +170,6 @@ class VolumeManager(manager.Manager):
def check_for_export(self, context, instance_id):
"""Make sure whether volume is exported."""
- if FLAGS.volume_driver == 'nova.volume.driver.AOEDriver':
- instance_ref = self.db.instance_get(context, instance_id)
- for v in instance_ref['volumes']:
- self.driver.check_for_export(context, v['id'])
+ instance_ref = self.db.instance_get(context, instance_id)
+ for v in instance_ref['volumes']:
+ self.driver.check_for_export(context, v['id'])
diff --git a/nova/volume/san.py b/nova/volume/san.py
new file mode 100644
index 000000000..26d6125e7
--- /dev/null
+++ b/nova/volume/san.py
@@ -0,0 +1,335 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright 2011 Justin Santa Barbara
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+"""
+Drivers for san-stored volumes.
+The unique thing about a SAN is that we don't expect that we can run the volume
+ controller on the SAN hardware. We expect to access it over SSH or some API.
+"""
+
+import os
+import paramiko
+
+from nova import exception
+from nova import flags
+from nova import log as logging
+from nova.utils import ssh_execute
+from nova.volume.driver import ISCSIDriver
+
+LOG = logging.getLogger("nova.volume.driver")
+FLAGS = flags.FLAGS
+flags.DEFINE_boolean('san_thin_provision', 'true',
+ 'Use thin provisioning for SAN volumes?')
+flags.DEFINE_string('san_ip', '',
+ 'IP address of SAN controller')
+flags.DEFINE_string('san_login', 'admin',
+ 'Username for SAN controller')
+flags.DEFINE_string('san_password', '',
+ 'Password for SAN controller')
+flags.DEFINE_string('san_privatekey', '',
+ 'Filename of private key to use for SSH authentication')
+
+
+class SanISCSIDriver(ISCSIDriver):
+ """ Base class for SAN-style storage volumes
+ (storage providers we access over SSH)"""
+ #Override because SAN ip != host ip
+ def _get_name_and_portal(self, volume):
+ """Gets iscsi name and portal from volume name and host."""
+ volume_name = volume['name']
+
+ # TODO(justinsb): store in volume, remerge with generic iSCSI code
+ host = FLAGS.san_ip
+
+ (out, _err) = self._execute("sudo iscsiadm -m discovery -t "
+ "sendtargets -p %s" % host)
+
+ location = None
+ find_iscsi_name = self._build_iscsi_target_name(volume)
+ for target in out.splitlines():
+ if find_iscsi_name in target:
+ (location, _sep, iscsi_name) = target.partition(" ")
+ break
+ if not location:
+ raise exception.Error(_("Could not find iSCSI export "
+ " for volume %s") %
+ volume_name)
+
+ iscsi_portal = location.split(",")[0]
+ LOG.debug("iscsi_name=%s, iscsi_portal=%s" %
+ (iscsi_name, iscsi_portal))
+ return (iscsi_name, iscsi_portal)
+
+ def _build_iscsi_target_name(self, volume):
+ return "%s%s" % (FLAGS.iscsi_target_prefix, volume['name'])
+
+ # discover_volume is still OK
+ # undiscover_volume is still OK
+
+ def _connect_to_ssh(self):
+ ssh = paramiko.SSHClient()
+ #TODO(justinsb): We need a better SSH key policy
+ ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
+ if FLAGS.san_password:
+ ssh.connect(FLAGS.san_ip,
+ username=FLAGS.san_login,
+ password=FLAGS.san_password)
+ elif FLAGS.san_privatekey:
+ privatekeyfile = os.path.expanduser(FLAGS.san_privatekey)
+ # It sucks that paramiko doesn't support DSA keys
+ privatekey = paramiko.RSAKey.from_private_key_file(privatekeyfile)
+ ssh.connect(FLAGS.san_ip,
+ username=FLAGS.san_login,
+ pkey=privatekey)
+ else:
+ raise exception.Error("Specify san_password or san_privatekey")
+ return ssh
+
+ def _run_ssh(self, command, check_exit_code=True):
+ #TODO(justinsb): SSH connection caching (?)
+ ssh = self._connect_to_ssh()
+
+ #TODO(justinsb): Reintroduce the retry hack
+ ret = ssh_execute(ssh, command, check_exit_code=check_exit_code)
+
+ ssh.close()
+
+ return ret
+
+ def ensure_export(self, context, volume):
+ """Synchronously recreates an export for a logical volume."""
+ pass
+
+ def create_export(self, context, volume):
+ """Exports the volume."""
+ pass
+
+ def remove_export(self, context, volume):
+ """Removes an export for a logical volume."""
+ pass
+
+ def check_for_setup_error(self):
+ """Returns an error if prerequisites aren't met"""
+ if not (FLAGS.san_password or FLAGS.san_privatekey):
+ raise exception.Error("Specify san_password or san_privatekey")
+
+ if not (FLAGS.san_ip):
+ raise exception.Error("san_ip must be set")
+
+
+def _collect_lines(data):
+ """ Split lines from data into an array, trimming them """
+ matches = []
+ for line in data.splitlines():
+ match = line.strip()
+ matches.append(match)
+
+ return matches
+
+
+def _get_prefixed_values(data, prefix):
+ """Collect lines which start with prefix; with trimming"""
+ matches = []
+ for line in data.splitlines():
+ line = line.strip()
+ if line.startswith(prefix):
+ match = line[len(prefix):]
+ match = match.strip()
+ matches.append(match)
+
+ return matches
+
+
+class SolarisISCSIDriver(SanISCSIDriver):
+ """Executes commands relating to Solaris-hosted ISCSI volumes.
+ Basic setup for a Solaris iSCSI server:
+ pkg install storage-server SUNWiscsit
+ svcadm enable stmf
+ svcadm enable -r svc:/network/iscsi/target:default
+ pfexec itadm create-tpg e1000g0 ${MYIP}
+ pfexec itadm create-target -t e1000g0
+
+ Then grant the user that will be logging on lots of permissions.
+ I'm not sure exactly which though:
+ zfs allow justinsb create,mount,destroy rpool
+ usermod -P'File System Management' justinsb
+ usermod -P'Primary Administrator' justinsb
+
+ Also make sure you can login using san_login & san_password/san_privatekey
+ """
+
+ def _view_exists(self, luid):
+ cmd = "pfexec /usr/sbin/stmfadm list-view -l %s" % (luid)
+ (out, _err) = self._run_ssh(cmd,
+ check_exit_code=False)
+ if "no views found" in out:
+ return False
+
+ if "View Entry:" in out:
+ return True
+
+ raise exception.Error("Cannot parse list-view output: %s" % (out))
+
+ def _get_target_groups(self):
+ """Gets list of target groups from host."""
+ (out, _err) = self._run_ssh("pfexec /usr/sbin/stmfadm list-tg")
+ matches = _get_prefixed_values(out, 'Target group: ')
+ LOG.debug("target_groups=%s" % matches)
+ return matches
+
+ def _target_group_exists(self, target_group_name):
+ return target_group_name not in self._get_target_groups()
+
+ def _get_target_group_members(self, target_group_name):
+ (out, _err) = self._run_ssh("pfexec /usr/sbin/stmfadm list-tg -v %s" %
+ (target_group_name))
+ matches = _get_prefixed_values(out, 'Member: ')
+ LOG.debug("members of %s=%s" % (target_group_name, matches))
+ return matches
+
+ def _is_target_group_member(self, target_group_name, iscsi_target_name):
+ return iscsi_target_name in (
+ self._get_target_group_members(target_group_name))
+
+ def _get_iscsi_targets(self):
+ cmd = ("pfexec /usr/sbin/itadm list-target | "
+ "awk '{print $1}' | grep -v ^TARGET")
+ (out, _err) = self._run_ssh(cmd)
+ matches = _collect_lines(out)
+ LOG.debug("_get_iscsi_targets=%s" % (matches))
+ return matches
+
+ def _iscsi_target_exists(self, iscsi_target_name):
+ return iscsi_target_name in self._get_iscsi_targets()
+
+ def _build_zfs_poolname(self, volume):
+ #TODO(justinsb): rpool should be configurable
+ zfs_poolname = 'rpool/%s' % (volume['name'])
+ return zfs_poolname
+
+ def create_volume(self, volume):
+ """Creates a volume."""
+ if int(volume['size']) == 0:
+ sizestr = '100M'
+ else:
+ sizestr = '%sG' % volume['size']
+
+ zfs_poolname = self._build_zfs_poolname(volume)
+
+ thin_provision_arg = '-s' if FLAGS.san_thin_provision else ''
+ # Create a zfs volume
+ self._run_ssh("pfexec /usr/sbin/zfs create %s -V %s %s" %
+ (thin_provision_arg,
+ sizestr,
+ zfs_poolname))
+
+ def _get_luid(self, volume):
+ zfs_poolname = self._build_zfs_poolname(volume)
+
+ cmd = ("pfexec /usr/sbin/sbdadm list-lu | "
+ "grep -w %s | awk '{print $1}'" %
+ (zfs_poolname))
+
+ (stdout, _stderr) = self._run_ssh(cmd)
+
+ luid = stdout.strip()
+ return luid
+
+ def _is_lu_created(self, volume):
+ luid = self._get_luid(volume)
+ return luid
+
+ def delete_volume(self, volume):
+ """Deletes a volume."""
+ zfs_poolname = self._build_zfs_poolname(volume)
+ self._run_ssh("pfexec /usr/sbin/zfs destroy %s" %
+ (zfs_poolname))
+
+ def local_path(self, volume):
+ # TODO(justinsb): Is this needed here?
+ escaped_group = FLAGS.volume_group.replace('-', '--')
+ escaped_name = volume['name'].replace('-', '--')
+ return "/dev/mapper/%s-%s" % (escaped_group, escaped_name)
+
+ def ensure_export(self, context, volume):
+ """Synchronously recreates an export for a logical volume."""
+ #TODO(justinsb): On bootup, this is called for every volume.
+ # It then runs ~5 SSH commands for each volume,
+ # most of which fetch the same info each time
+ # This makes initial start stupid-slow
+ self._do_export(volume, force_create=False)
+
+ def create_export(self, context, volume):
+ self._do_export(volume, force_create=True)
+
+ def _do_export(self, volume, force_create):
+ # Create a Logical Unit (LU) backed by the zfs volume
+ zfs_poolname = self._build_zfs_poolname(volume)
+
+ if force_create or not self._is_lu_created(volume):
+ cmd = ("pfexec /usr/sbin/sbdadm create-lu /dev/zvol/rdsk/%s" %
+ (zfs_poolname))
+ self._run_ssh(cmd)
+
+ luid = self._get_luid(volume)
+ iscsi_name = self._build_iscsi_target_name(volume)
+ target_group_name = 'tg-%s' % volume['name']
+
+ # Create a iSCSI target, mapped to just this volume
+ if force_create or not self._target_group_exists(target_group_name):
+ self._run_ssh("pfexec /usr/sbin/stmfadm create-tg %s" %
+ (target_group_name))
+
+ # Yes, we add the initiatior before we create it!
+ # Otherwise, it complains that the target is already active
+ if force_create or not self._is_target_group_member(target_group_name,
+ iscsi_name):
+ self._run_ssh("pfexec /usr/sbin/stmfadm add-tg-member -g %s %s" %
+ (target_group_name, iscsi_name))
+ if force_create or not self._iscsi_target_exists(iscsi_name):
+ self._run_ssh("pfexec /usr/sbin/itadm create-target -n %s" %
+ (iscsi_name))
+ if force_create or not self._view_exists(luid):
+ self._run_ssh("pfexec /usr/sbin/stmfadm add-view -t %s %s" %
+ (target_group_name, luid))
+
+ def remove_export(self, context, volume):
+ """Removes an export for a logical volume."""
+
+ # This is the reverse of _do_export
+ luid = self._get_luid(volume)
+ iscsi_name = self._build_iscsi_target_name(volume)
+ target_group_name = 'tg-%s' % volume['name']
+
+ if self._view_exists(luid):
+ self._run_ssh("pfexec /usr/sbin/stmfadm remove-view -l %s -a" %
+ (luid))
+
+ if self._iscsi_target_exists(iscsi_name):
+ self._run_ssh("pfexec /usr/sbin/stmfadm offline-target %s" %
+ (iscsi_name))
+ self._run_ssh("pfexec /usr/sbin/itadm delete-target %s" %
+ (iscsi_name))
+
+ # We don't delete the tg-member; we delete the whole tg!
+
+ if self._target_group_exists(target_group_name):
+ self._run_ssh("pfexec /usr/sbin/stmfadm delete-tg %s" %
+ (target_group_name))
+
+ if self._is_lu_created(volume):
+ self._run_ssh("pfexec /usr/sbin/sbdadm delete-lu %s" %
+ (luid))