From 2925ca3ac3010b9a65276ad2cfc8118679827da3 Mon Sep 17 00:00:00 2001 From: masumotok Date: Tue, 7 Dec 2010 19:25:43 +0900 Subject: rev439ベースにライブマイグレーションの機能をマージ このバージョンはEBSなし、CPUフラグのチェックなし MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- bin/nova-manage | 122 +++++++++++++++++++++++++++++++++++++- nova/api/ec2/cloud.py | 13 +++- nova/compute/api.py | 3 + nova/compute/manager.py | 138 +++++++++++++++++++++++++++++++++++++++++++ nova/db/api.py | 69 ++++++++++++++++++++++ nova/db/sqlalchemy/api.py | 131 ++++++++++++++++++++++++++++++++++++++++ nova/db/sqlalchemy/models.py | 24 +++++++- nova/network/manager.py | 7 +++ nova/scheduler/manager.py | 107 +++++++++++++++++++++++++++++++++ nova/service.py | 22 +++++++ nova/utils.py | 13 +++- nova/virt/libvirt_conn.py | 101 +++++++++++++++++++++++++++++++ setup.py | 5 +- 13 files changed, 745 insertions(+), 10 deletions(-) diff --git a/bin/nova-manage b/bin/nova-manage index eb7c6b87b..d41ae8600 100755 --- a/bin/nova-manage +++ b/bin/nova-manage @@ -76,6 +76,13 @@ from nova import quota from nova import utils from nova.auth import manager from nova.cloudpipe import pipelib +#added by masumotok +from nova import rpc +# added by masumotok +from nova.api.ec2 import cloud +# added by masumotok +from nova.compute import power_state + FLAGS = flags.FLAGS @@ -424,6 +431,116 @@ class NetworkCommands(object): int(network_size), int(vlan_start), int(vpn_start)) +# this class is added by masumotok +class InstanceCommands(object): + """Class for mangaging VM instances.""" + + def live_migration(self, ec2_id, dest): + """live_migration""" + + logging.basicConfig() + ctxt = context.get_admin_context() + + if 'nova.network.manager.VlanManager' != FLAGS.network_manager : + msg = 'Only nova.network.manager.VlanManager is supported now. Sorry!' + raise Exception(msg) + + # 1. whether destination host exists + host_ref = db.host_get_by_name(ctxt, dest) + + # 2. whether instance exists and running + # try-catch clause is necessary because only internal_id is shown + # when NotFound exception occurs. it isnot understandable to admins. + try : + internal_id = cloud.ec2_id_to_internal_id(ec2_id) + instance_ref = db.instance_get_by_internal_id(ctxt, internal_id) + except exception.NotFound : + print 'Not found instance_id(%s (internal_id:%s))' % ( ec2_id, internal_id) + raise + + if power_state.RUNNING != instance_ref['state'] or \ + 'running' != instance_ref['state_description']: + print 'Instance(%s) is not running' % ec2_id + sys.exit(1) + + # 3. the host where instance is running and dst host is not same + if dest == instance_ref['host'] : + print '%s is where %s is running now. choose different host.' \ + % (dest, ec2_id) + sys.exit(2) + + # 4. live migration + rpc.cast(ctxt, + FLAGS.scheduler_topic, + { "method": "live_migration", + "args": {"ec2_id": ec2_id, + "dest":dest}}) + + print 'Finished all procedure. check instance are migrated successfully' + print 'chech status by using euca-describe-instances.' + + +# this class is created by masumotok +class HostCommands(object): + """Class for mangaging host(physical nodes).""" + + + def list(self): + """describe host list.""" + + # to supress msg: No handlers could be found for logger "amqplib" + logging.basicConfig() + + host_refs = db.host_get_all(context.get_admin_context()) + for host_ref in host_refs: + print host_ref['name'] + + def show(self, host): + """describe cpu/memory/hdd info for host.""" + + # to supress msg: No handlers could be found for logger "amqplib" + logging.basicConfig() + + result = rpc.call(context.get_admin_context(), + FLAGS.scheduler_topic, + {"method": "show_host_resource", + "args": {"host": host}}) + + # checing result msg format is necessary, that will have done + # when this feture is included in API. + if dict != type(result): + print 'Unexpected error occurs' + elif not result['ret'] : + print '%s' % result['msg'] + else : + cpu = result['phy_resource']['cpu'] + mem = result['phy_resource']['memory_mb'] + hdd = result['phy_resource']['hdd_gb'] + + print 'HOST\t\tPROJECT\t\tcpu\tmem(mb)\tdisk(gb)' + print '%s\t\t\t%s\t%s\t%s' % ( host,cpu, mem, hdd) + for p_id, val in result['usage'].items() : + print '%s\t%s\t\t%s\t%s\t%s' % ( host, + p_id, + val['cpu'], + val['memory_mb'], + val['hdd_gb']) + + def has_keys(self, dic, keys): + not_found = [ key for key in keys if not dict.has_key(key) ] + return ( (0 == len(not_found)), not_found ) + + + +# modified by masumotok +#CATEGORIES = [ +# ('user', UserCommands), +# ('project', ProjectCommands), +# ('role', RoleCommands), +# ('shell', ShellCommands), +# ('vpn', VpnCommands), +# ('floating', FloatingIpCommands), +# ('network', NetworkCommands)] CATEGORIES = [ ('user', UserCommands), ('project', ProjectCommands), @@ -431,8 +548,9 @@ CATEGORIES = [ ('shell', ShellCommands), ('vpn', VpnCommands), ('floating', FloatingIpCommands), - ('network', NetworkCommands)] - + ('network', NetworkCommands), + ('instance', InstanceCommands), + ('host',HostCommands)] def lazy_match(name, key_value_tuples): """Finds all objects that have a key that case insensitively contains diff --git a/nova/api/ec2/cloud.py b/nova/api/ec2/cloud.py index e50906ae1..ebf5bcf0b 100644 --- a/nova/api/ec2/cloud.py +++ b/nova/api/ec2/cloud.py @@ -678,13 +678,22 @@ class CloudController(object): ec2_id = None if (floating_ip_ref['fixed_ip'] and floating_ip_ref['fixed_ip']['instance']): - internal_id = floating_ip_ref['fixed_ip']['instance']['ec2_id'] + # modified by masumotok + internal_id = \ + floating_ip_ref['fixed_ip']['instance']['internal_id'] ec2_id = internal_id_to_ec2_id(internal_id) address_rv = {'public_ip': address, 'instance_id': ec2_id} if context.user.is_admin(): + # modified by masumotok- b/c proj_id is never inserted + #details = "%s (%s)" % (address_rv['instance_id'], + # floating_ip_ref['project_id']) + if None != address_rv['instance_id']: + status = 'reserved' + else: + status = None details = "%s (%s)" % (address_rv['instance_id'], - floating_ip_ref['project_id']) + status) address_rv['instance_id'] = details addresses.append(address_rv) return {'addressesSet': addresses} diff --git a/nova/compute/api.py b/nova/compute/api.py index 929342a1e..03922c272 100644 --- a/nova/compute/api.py +++ b/nova/compute/api.py @@ -84,6 +84,7 @@ class ComputeAPI(base.Base): if not type(security_group) is list: security_group = [security_group] + print '<<<<<<<<<<<<<<<<<<<<<<<<<<1' security_groups = [] self.ensure_default_security_group(context) for security_group_name in security_group: @@ -92,6 +93,7 @@ class ComputeAPI(base.Base): security_group_name) security_groups.append(group['id']) + print '<<<<<<<<<<<<<<<<<<<<<<<<<<2' if key_data is None and key_name: key_pair = db.key_pair_get(context, context.user_id, key_name) key_data = key_pair['public_key'] @@ -115,6 +117,7 @@ class ComputeAPI(base.Base): 'key_name': key_name, 'key_data': key_data} + print '<<<<<<<<<<<<<<<<<<<<<<<<<<3' elevated = context.elevated() instances = [] logging.debug("Going to run %s instances...", num_instances) diff --git a/nova/compute/manager.py b/nova/compute/manager.py index dd8d41129..4c42153b6 100644 --- a/nova/compute/manager.py +++ b/nova/compute/manager.py @@ -36,6 +36,13 @@ terminating it. import datetime import logging +# added by masumotok +import sys +# added by masumotok +import traceback +# added by masumotok +import os + from twisted.internet import defer @@ -44,12 +51,19 @@ from nova import flags from nova import manager from nova import utils from nova.compute import power_state +# added by masumotok +from nova import rpc +# added by masumotok +from nova import db FLAGS = flags.FLAGS flags.DEFINE_string('instances_path', '$state_path/instances', 'where instances are stored on disk') flags.DEFINE_string('compute_driver', 'nova.virt.connection.get_connection', 'Driver to use for controlling virtualization') +# created by masumotok +flags.DEFINE_string('live_migration_timeout', 30, + 'Timeout value for pre_live_migration is completed.') class ComputeManager(manager.Manager): @@ -251,3 +265,127 @@ class ComputeManager(manager.Manager): yield self.volume_manager.remove_compute_volume(context, volume_id) self.db.volume_detached(context, volume_id) defer.returnValue(True) + + # created by masumotok + def get_cpu_number(self): + """Get the number of physical computer cpu core .""" + return open('/proc/cpuinfo').read().count('processor') + + # created by masumotok + def get_mem_size(self): + """Get the memory size of physical computer .""" + meminfo = open('/proc/meminfo').read().split() + idx = meminfo.index('MemTotal:') + # transforming kb to mb. + return int(meminfo[idx + 1]) / 1024 + + # created by masumotok + def get_hdd_size(self): + """Get the hdd size of physical computer .""" + hddinfo = os.statvfs(FLAGS.instances_path) + return hddinfo.f_bsize * hddinfo.f_blocks / 1024 / 1024 / 1024 + + # created by masumotok + def pre_live_migration(self, context, instance_id, dest): + """Any preparation for live migration at dst host.""" + + # 1. getting volume info ( shlf/slot number ) + instance_ref = db.instance_get(context, instance_id) + ec2_id = instance_ref['hostname'] + + volumes = [] + try: + volumes = db.volume_get_by_ec2_id(context, ec2_id) + except exception.NotFound: + logging.debug('%s has no volume.', ec2_id) + + shelf_slots = {} + for vol in volumes: + shelf, slot = db.volume_get_shelf_and_blade(context, vol['id']) + shelf_slots[vol.id] = (shelf, slot) + + # 2. getting fixed ips + fixed_ip = db.instance_get_fixed_address(context, instance_id) + if None == fixed_ip: + logging.error('Not found fixedip for %s\n%s', + ec2_id, + ''.join(traceback.format_tb(sys.exc_info()[2]))) + return + + # 3. getting network refs + network_ref = db.fixed_ip_get_network(context, fixed_ip) + + # 4. security rules (filtering rules) + secgrp_refs = db.security_group_get_by_instance(context, instance_id) + + # 5. if any volume is mounted, prepare here. + if 0 != len(shelf_slots): + pass + + # 6. create nova-instance-instance-xxx in hypervisor through libvirt + # (This rule can be seen by executing virsh nwfilter-list) + self.driver.setup_nwfilters_for_instance(instance_ref) + + # 7. insert filtering rule + for secgrp_ref in secgrp_refs: + self.driver.refresh_security_group(secgrp_ref.id) + + # 8. vlan settings + self.network_manager.driver.ensure_vlan_bridge(network_ref['vlan'], + network_ref['bridge']) + + # created by masumotok + def nwfilter_for_instance_exists(self, context, instance_id): + """Check nova-instance-instance-xxx filter exists """ + instance_ref = db.instance_get(context, instance_id) + return self.driver.nwfilter_for_instance_exists(instance_ref) + + # created by masumotok + def live_migration(self, context, instance_id, dest): + """executes live migration.""" + + import time + # 1. ask dest host to preparing live migration. + compute_topic = db.queue_get_for(context, FLAGS.compute_topic, dest) + ret = rpc.call(context, + compute_topic, + {"method": "pre_live_migration", + "args": {'instance_id': instance_id, + 'dest': dest}}) + + if rpc.RemoteError == type(ret): + logging.error('Live migration failed(err at %s)', dest) + db.instance_set_state(context, + instance_id, + power_state.RUNNING, + 'running') + return + + # waiting for setting up nwfilter(nova-instance-instance-xxx) + # otherwise, live migration fail. + timeout_count = range(FLAGS.live_migration_timeout) + while 0 != len(timeout_count): + ret = rpc.call(context, + compute_topic, + {"method": "nwfilter_for_instance_exists", + "args": {'instance_id': instance_id}}) + if ret: + break + + timeout_count.pop() + time.sleep(1) + + if not ret: + logging.error('Timeout for pre_live_migration at %s', dest) + return + + # 2. executing live migration + # live_migration might raises ProcessExecution error, but + # nothing must be recovered in this version. + instance_ref = db.instance_get(context, instance_id) + ret = self.driver.live_migration(instance_ref, dest) + if not ret: + logging.debug('Fail to live migration') + return + + diff --git a/nova/db/api.py b/nova/db/api.py index 8f9dc2443..c62d5d6ef 100644 --- a/nova/db/api.py +++ b/nova/db/api.py @@ -195,6 +195,11 @@ def floating_ip_get_by_address(context, address): return IMPL.floating_ip_get_by_address(context, address) +# this method is created by masumotok +def floating_ip_update(context, address, values): + """update floating ip information.""" + return IMPL.floating_ip_update(context, address, values) + #################### @@ -334,6 +339,36 @@ def instance_add_security_group(context, instance_id, security_group_id): security_group_id) +# created by masumotok +def instance_get_all_by_host(context, hostname): + """Get instances by host""" + return IMPL.instance_get_all_by_host(context, hostname) + + +# created by masumotok +def instance_get_vcpu_sum_by_host_and_project(context, hostname, proj_id): + """Get instances.vcpus by host and project""" + return IMPL.instance_get_vcpu_sum_by_host_and_project(context, + hostname, + proj_id) + + +# created by masumotok +def instance_get_memory_sum_by_host_and_project(context, hostname, proj_id): + """Get amount of memory by host and project """ + return IMPL.instance_get_memory_sum_by_host_and_project(context, + hostname, + proj_id) + + +# created by masumotok +def instance_get_disk_sum_by_host_and_project(context, hostname, proj_id): + """Get total amount of disk by host and project """ + return IMPL.instance_get_disk_sum_by_host_and_project(context, + hostname, + proj_id) + + ################### @@ -833,3 +868,37 @@ def host_get_networks(context, host): """ return IMPL.host_get_networks(context, host) + + +# below all methods related to host table are created by masumotok +################### + + +def host_create(context, value): + """Create a host from the values dictionary.""" + return IMPL.host_create(context, value) + + +def host_get(context, host_id): + """Get an host or raise if it does not exist.""" + return IMPL.host_get(context, host_id) + + +def host_get_all(context, session=None): + """Get all hosts or raise if it does not exist.""" + return IMPL.host_get_all(context) + + +def host_get_by_name(context, host): + """Get an host or raise if it does not exist.""" + return IMPL.host_get_by_name(context, host) + + +def host_update(context, host, values): + """Set the given properties on an host and update it.""" + return IMPL.host_update(context, host, values) + + +def host_deactivated(context, host): + """set deleted flag to a given host""" + return IMPL.host_deactivated(context, host) diff --git a/nova/db/sqlalchemy/api.py b/nova/db/sqlalchemy/api.py index dd9649054..811575765 100644 --- a/nova/db/sqlalchemy/api.py +++ b/nova/db/sqlalchemy/api.py @@ -394,6 +394,17 @@ def floating_ip_get_by_address(context, address, session=None): return result +# created by masumotok +@require_context +def floating_ip_update(context, address, values): + session = get_session() + with session.begin(): + floating_ip_ref = floating_ip_get_by_address(context, address, session) + for (key, value) in values.iteritems(): + floating_ip_ref[key] = value + floating_ip_ref.save(session=session) + + ################### @@ -746,6 +757,52 @@ def instance_add_security_group(context, instance_id, security_group_id): instance_ref.save(session=session) +# created by masumotok +def instance_get_all_by_host(context, hostname): + session = get_session() + if not session: + session = get_session() + + result = session.query(models.Instance + ).filter_by(host=hostname + ).filter_by(deleted=can_read_deleted(context) + ).all() + if None == result: + return [] + return result + + +# created by masumotok +def _instance_get_sum_by_host_and_project(context, column, hostname, proj_id): + session = get_session() + + result = session.query(models.Instance + ).filter_by(host=hostname + ).filter_by(project_id=proj_id + ).filter_by(deleted=can_read_deleted(context) + ).value(column) + if None == result: + return 0 + return result + + +# created by masumotok +def instance_get_vcpu_sum_by_host_and_project(context, hostname, proj_id): + return _instance_get_sum_by_host_and_project(context, 'vcpus', hostname, + proj_id) + + +# created by masumotok +def instance_get_memory_sum_by_host_and_project(context, hostname, proj_id): + return _instance_get_sum_by_host_and_project(context, 'memory_mb', + hostname, proj_id) + + +# created by masumotok +def instance_get_disk_sum_by_host_and_project(context, hostname, proj_id): + return _instance_get_sum_by_host_and_project(context, 'local_gb', + hostname, proj_id) + ################### @@ -1746,3 +1803,77 @@ def host_get_networks(context, host): filter_by(deleted=False).\ filter_by(host=host).\ all() + + +#below all methods related to host table are created by masumotok +################### + +@require_admin_context +def host_create(context, values): + host_ref = models.Host() + for (key, value) in values.iteritems(): + host_ref[key] = value + host_ref.save() + return host_ref + + +@require_admin_context +def host_get(context, host_id, session=None): + if not session: + session = get_session() + + result = session.query(models.Host + ).filter_by(deleted=False + ).filter_by(id=host_id + ).first() + + if not result: + raise exception.NotFound('No host for id %s' % host_id) + + return result + + +@require_admin_context +def host_get_all(context, session=None): + if not session: + session = get_session() + + result = session.query(models.Host + ).filter_by(deleted=False + ).all() + + if not result: + raise exception.NotFound('No host record found .') + + return result + + +@require_admin_context +def host_get_by_name(context, host, session=None): + if not session: + session = get_session() + + result = session.query(models.Host + ).filter_by(deleted=False + ).filter_by(name=host + ).first() + + if not result: + raise exception.NotFound('No host for name %s' % host) + + return result + + +@require_admin_context +def host_update(context, host_id, values): + session = get_session() + with session.begin(): + host_ref = host_get(context, host_id, session=session) + for (key, value) in values.iteritems(): + host_ref[key] = value + host_ref.save(session=session) + + +@require_admin_context +def host_deactivated(context, host): + host_update(context, host, {'deleted': True}) diff --git a/nova/db/sqlalchemy/models.py b/nova/db/sqlalchemy/models.py index fe0a9a921..16406f79a 100644 --- a/nova/db/sqlalchemy/models.py +++ b/nova/db/sqlalchemy/models.py @@ -138,6 +138,24 @@ class NovaBase(object): # __tablename__ = 'hosts' # id = Column(String(255), primary_key=True) +# this class is created by masumotok +class Host(BASE, NovaBase): + """Represents a host where services are running""" + __tablename__ = 'hosts' + id = Column(Integer, primary_key=True) + name = Column(String(255)) + cpu = Column(Integer, nullable=False, default=-1) + memory_mb = Column(Integer, nullable=False, default=-1) + hdd_gb = Column(Integer, nullable=False, default=-1) + #cpuid = Column(Integer, nullable=False) + deleted = Column(Boolean, default=False) + # C: when calling service_create() + # D: never deleted. instead of deleting cloumn "deleted" is true + # when host is down + # b/c Host.id is foreign key of service, and records + # of the "service" table are not deleted. + # R: Column "deleted" is true when calling hosts_up() and host is down. + class Service(BASE, NovaBase): """Represents a running service on a host.""" @@ -526,10 +544,14 @@ def register_models(): it will never need to be called explicitly elsewhere. """ from sqlalchemy import create_engine + #models = (Service, Instance, Volume, ExportDevice, IscsiTarget, FixedIp, + # FloatingIp, Network, SecurityGroup, + # SecurityGroupIngressRule, SecurityGroupInstanceAssociation, + # AuthToken, User, Project) # , Image, Host models = (Service, Instance, Volume, ExportDevice, IscsiTarget, FixedIp, FloatingIp, Network, SecurityGroup, SecurityGroupIngressRule, SecurityGroupInstanceAssociation, - AuthToken, User, Project) # , Image, Host + AuthToken, User, Project, Host) # , Image engine = create_engine(FLAGS.sql_connection, echo=False) for model in models: model.metadata.create_all(engine) diff --git a/nova/network/manager.py b/nova/network/manager.py index a7298b47f..e84b8a8f9 100644 --- a/nova/network/manager.py +++ b/nova/network/manager.py @@ -112,10 +112,14 @@ class NetworkManager(manager.Manager): # the designated network host. ctxt = context.get_admin_context() for network in self.db.host_get_networks(ctxt, self.host): + print '<<<<<< nova.network.manager.init_host <<<<' + print '<<<<<< nova.network.manager.init_host (%s)<<<<' % network['id'] self._on_set_network_host(ctxt, network['id']) def set_network_host(self, context, network_id): """Safely sets the host of the network.""" + print '<<<<<< nova.network.manager.set_network_host <<<<' + print '<<<<<< nova.network.manager.set_network_host (%s)<<<<' % network_id logging.debug("setting network host") host = self.db.network_set_host(context, network_id, @@ -452,6 +456,9 @@ class VlanManager(NetworkManager): self.driver.ensure_vlan_forward(network_ref['vpn_public_address'], network_ref['vpn_public_port'], network_ref['vpn_private_address']) + print '--------------------' + print 'UUUUUUPdate dhcp!' + print '--------------------' self.driver.update_dhcp(context, network_ref['id']) def setup_compute_network(self, context, instance_id): diff --git a/nova/scheduler/manager.py b/nova/scheduler/manager.py index 60a3d2b4b..db8c3c30c 100644 --- a/nova/scheduler/manager.py +++ b/nova/scheduler/manager.py @@ -29,6 +29,10 @@ from nova import flags from nova import manager from nova import rpc from nova import utils +# 3 modules are added by masumotok +from nova import exception +from nova.api.ec2 import cloud +from nova.compute import power_state FLAGS = flags.FLAGS flags.DEFINE_string('scheduler_driver', @@ -66,3 +70,106 @@ class SchedulerManager(manager.Manager): {"method": method, "args": kwargs}) logging.debug("Casting to %s %s for %s", topic, host, method) + + # created by masumotok + def live_migration(self, context, ec2_id, dest): + """ live migration method""" + + # 1. get instance id + internal_id = cloud.ec2_id_to_internal_id(ec2_id) + instance_ref = db.instance_get_by_internal_id(context, internal_id) + instance_id = instance_ref['id'] + + # 2. check dst host still has enough capacities + if not self.has_enough_resource(context, instance_id, dest): + return False + + # 3. change instance_state + db.instance_set_state(context, + instance_id, + power_state.PAUSED, + 'migrating') + + # 4. request live migration + host = instance_ref['host'] + rpc.cast(context, + db.queue_get_for(context, FLAGS.compute_topic, host), + {"method": 'live_migration', + "args": {'instance_id': instance_id, + 'dest': dest}}) + return True + + # this method is created by masumotok + def has_enough_resource(self, context, instance_id, dest): + + # get instance information + instance_ref = db.instance_get(context, instance_id) + ec2_id = instance_ref['hostname'] + vcpus = instance_ref['vcpus'] + mem = instance_ref['memory_mb'] + hdd = instance_ref['local_gb'] + + # get host information + host_ref = db.host_get_by_name(context, dest) + total_cpu = int(host_ref['cpu']) + total_mem = int(host_ref['memory_mb']) + total_hdd = int(host_ref['hdd_gb']) + + instances_ref = db.instance_get_all_by_host(context, dest) + for i_ref in instances_ref: + total_cpu -= int(i_ref['vcpus']) + total_mem -= int(i_ref['memory_mb']) + total_hdd -= int(i_ref['local_gb']) + + # check host has enough information + logging.debug('host(%s) remains vcpu:%s mem:%s hdd:%s,' % + (dest, total_cpu, total_mem, total_hdd)) + logging.debug('instance(%s) has vcpu:%s mem:%s hdd:%s,' % + (ec2_id, total_cpu, total_mem, total_hdd)) + + if total_cpu <= vcpus or total_mem <= mem or total_hdd <= hdd: + logging.debug('%s doesnt have enough resource for %s' % + (dest, ec2_id)) + return False + + logging.debug('%s has enough resource for %s' % (dest, ec2_id)) + return True + + # this method is created by masumotok + def show_host_resource(self, context, host, *args): + """ show the physical/usage resource given by hosts.""" + + try: + host_ref = db.host_get_by_name(context, host) + except exception.NotFound: + return {'ret': False, 'msg': 'No such Host'} + except: + raise + + # get physical resource information + h_resource = {'cpu': host_ref['cpu'], + 'memory_mb': host_ref['memory_mb'], + 'hdd_gb': host_ref['hdd_gb']} + + # get usage resource information + u_resource = {} + instances_ref = db.instance_get_all_by_host(context, host_ref['name']) + + if 0 == len(instances_ref): + return {'ret': True, 'phy_resource': h_resource, 'usage': {}} + + project_ids = [i['project_id'] for i in instances_ref] + project_ids = list(set(project_ids)) + for p_id in project_ids: + cpu = db.instance_get_vcpu_sum_by_host_and_project(context, + host, + p_id) + mem = db.instance_get_memory_sum_by_host_and_project(context, + host, + p_id) + hdd = db.instance_get_disk_sum_by_host_and_project(context, + host, + p_id) + u_resource[p_id] = {'cpu': cpu, 'memory_mb': mem, 'hdd_gb': hdd} + + return {'ret': True, 'phy_resource': h_resource, 'usage': u_resource} diff --git a/nova/service.py b/nova/service.py index 9454d4049..4b97062b4 100644 --- a/nova/service.py +++ b/nova/service.py @@ -72,6 +72,14 @@ class Service(object, service.Service): self.manager.init_host() self.model_disconnected = False ctxt = context.get_admin_context() + + # this try-except operations are added by masumotok + try: + host_ref = db.host_get_by_name(ctxt, self.host) + except exception.NotFound: + host_ref = db.host_create(ctxt, {'name': self.host}) + host_ref = self._update_host_ref(ctxt, host_ref) + try: service_ref = db.service_get_by_args(ctxt, self.host, @@ -109,6 +117,20 @@ class Service(object, service.Service): 'report_count': 0}) self.service_id = service_ref['id'] + # created by masumotok + def _update_host_ref(self, context, host_ref): + + if 0 <= self.manager_class_name.find('ComputeManager'): + cpu = self.manager.get_cpu_number() + memory_mb = self.manager.get_mem_size() + hdd_gb = self.manager.get_hdd_size() + db.host_update(context, + host_ref['id'], + {'cpu': cpu, + 'memory_mb': memory_mb, + 'hdd_gb': hdd_gb}) + return host_ref + def __getattr__(self, key): manager = self.__dict__.get('manager', None) return getattr(manager, key) diff --git a/nova/utils.py b/nova/utils.py index 142584df8..9e4ba6bc2 100644 --- a/nova/utils.py +++ b/nova/utils.py @@ -133,9 +133,16 @@ def runthis(prompt, cmd, check_exit_code=True): def generate_uid(topic, size=8): - characters = '01234567890abcdefghijklmnopqrstuvwxyz' - choices = [random.choice(characters) for x in xrange(size)] - return '%s-%s' % (topic, ''.join(choices)) + #modified by masumotok + #characters = '01234567890abcdefghijklmnopqrstuvwxyz' + #choices = [random.choice(characters) for x in xrange(size)] + #return '%s-%s' % (topic, ''.join(choices)) + if topic == "i": + return random.randint(0, 2 ** 28 - 1) + else: + characters = '01234567890abcdefghijklmnopqrstuvwxyz' + choices = [random.choice(characters) for x in xrange(size)] + return '%s-%s' % (topic, ''.join(choices)) def generate_mac(): diff --git a/nova/virt/libvirt_conn.py b/nova/virt/libvirt_conn.py index 18085089f..4ed791130 100644 --- a/nova/virt/libvirt_conn.py +++ b/nova/virt/libvirt_conn.py @@ -44,6 +44,8 @@ Supports KVM, QEMU, UML, and XEN. import logging import os import shutil +# appended by masumotok +#import libvirt import IPy from twisted.internet import defer @@ -101,6 +103,10 @@ flags.DEFINE_string('libvirt_uri', '', 'Override the default libvirt URI (which is dependent' ' on libvirt_type)') +# added by masumotok +flags.DEFINE_string('live_migration_uri', + "qemu+tcp://%s/system", + 'Define protocol used by live_migration feature') flags.DEFINE_bool('allow_project_net_traffic', True, 'Whether to allow in project network traffic') @@ -648,6 +654,101 @@ class LibvirtConnection(object): fw = NWFilterFirewall(self._conn) fw.ensure_security_group_filter(security_group_id) + # created by masumotok + def setup_nwfilters_for_instance(self, instance): + nwfilter = NWFilterFirewall(self._conn) + return nwfilter.setup_nwfilters_for_instance(instance) + + # created by masumotok + def nwfilter_for_instance_exists(self, instance_ref): + try: + filter = 'nova-instance-%s' % instance_ref.name + self._conn.nwfilterLookupByName(filter) + return True + except libvirt.libvirtError: + return False + + # created by masumotok + def live_migration(self, instance_ref, dest): + uri = FLAGS.live_migration_uri % dest + out, err = utils.execute("sudo virsh migrate --live %s %s" + % (instance_ref.name, uri)) + + # wait for completion of live_migration + d = defer.Deferred() + d.addCallback(lambda _: self._post_live_migration(instance_ref, dest)) + timer = task.LoopingCall(f=None) + + def _wait_for_live_migration(): + try: + state = self.get_info(instance_ref.name)['state'] + #except libvirt.libvirtError, e: + except exception.NotFound: + timer.stop() + d.callback(None) + timer.f = _wait_for_live_migration + timer.start(interval=0.5, now=True) + return d + + # created by masumotok + def _post_live_migration(self, instance_ref, dest): + + # 1. detaching volumes + # (not necessary in current version ) + #try : + # ec2_id = instance_ref['ec2_id'] + # volumes = db.volume_get_by_ec2_id(context, ec2_id) + # for volume in volumes : + # self.detach_volume(context, instance_id, volume.id) + #except exception.NotFound: + # logging.debug('%s doesnt mount any volumes.. ' % ec2_id) + + # 2. releasing vlan + # (not necessary in current implementation?) + + # 3. releasing security group ingress rule + # (not necessary in current implementation?) + + # 4. database updating + ec2_id = instance_ref['hostname'] + ctxt = context.get_admin_context() + + instance_id = instance_ref['id'] + fixed_ip = db.instance_get_fixed_address(ctxt, instance_id) + # not return if fixed_ip is not found, otherwise, + # instance never be accessible.. + if None == fixed_ip: + logging.error('fixed_ip is not found for %s ' % ec2_id) + db.fixed_ip_update(ctxt, fixed_ip, {'host': dest}) + network_ref = db.fixed_ip_get_network(ctxt, fixed_ip) + db.network_update(ctxt, network_ref['id'], {'host': dest}) + + try: + floating_ip = db.instance_get_floating_address(ctxt, instance_id) + # not return if floating_ip is not found, otherwise, + # instance never be accessible.. + if None == floating_ip: + logging.error('floating_ip is not found for %s ' % ec2_id) + floating_ip_ref = db.floating_ip_get_by_address(ctxt, floating_ip) + db.floating_ip_update(ctxt, + floating_ip_ref['address'], + {'host': dest}) + except exception.NotFound: + logging.debug('%s doesnt have floating_ip.. ' % ec2_id) + except: + msg = 'Live migration: Unexpected error:' + msg += '%s cannot inherit floating ip.. ' % ec2_id + logging.error(msg) + + db.instance_update(ctxt, + instance_id, + {'state_description': 'running', + 'state': power_state.RUNNING, + 'host': dest}) + + logging.info('Live migrating %s to %s finishes successfully' + % (ec2_id, dest)) + class NWFilterFirewall(object): """ diff --git a/setup.py b/setup.py index d88bc1e6f..938043d21 100644 --- a/setup.py +++ b/setup.py @@ -25,6 +25,7 @@ from sphinx.setup_command import BuildDoc from nova.utils import parse_mailmap, str_dict_replace + class local_BuildDoc(BuildDoc): def run(self): for builder in ['html', 'man']: @@ -54,8 +55,8 @@ setup(name='nova', author='OpenStack', author_email='nova@lists.launchpad.net', url='http://www.openstack.org/', - cmdclass={ 'sdist': local_sdist, - 'build_sphinx' : local_BuildDoc }, + cmdclass={'sdist': local_sdist, + 'build_sphinx': local_BuildDoc}, packages=find_packages(exclude=['bin', 'smoketests']), include_package_data=True, scripts=['bin/nova-api', -- cgit From 096a06c26ea64af36b38bbeb1c99e2eeaa48aec5 Mon Sep 17 00:00:00 2001 From: masumotok Date: Tue, 7 Dec 2010 19:38:19 +0900 Subject: adding README.livemigration.txt --- README.livemigration.txt | 136 +++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 136 insertions(+) create mode 100644 README.livemigration.txt diff --git a/README.livemigration.txt b/README.livemigration.txt new file mode 100644 index 000000000..3b403edf1 --- /dev/null +++ b/README.livemigration.txt @@ -0,0 +1,136 @@ +# +# Live migration feature usage: +# +# @auther Kei Masumoto +# @date 2010.12.01 +# +# @history ver.1 2010.12.01 ( masumotok ) +# initial version +# + +0. pre-requisit settings + OS: Ubuntu lucid 10.04 for both instances and host. + NFS: nova-install-dir/instances has to be mounted by shared storage. + ( this version is tested using NFS) + Network manager: Only VlanManager can be used in this version. + instances : Instance must keep running without any EBS volume. + + +1. pre-requisite settings. + + (a) shared storage + As mentioned above, shared storage is inevitable for the live_migration functionality. + An example is NFS( my test environment ), and example setting is as follows. + + Prepare NFS server machine( nova-api server is OK), and add below line /etc/exports: + + > nova-install-dir/instances a.b.c.d/255.255.0.0(rw,sync,fsid=0,no_root_squash) + + where "nova-install-dir" is the directory which openstack is installed, and + add appropriate ip address and netmask for "a.b.c.d/255.255.0.0" , which should include + compute nodes which try to mount this directory. + + Then restart nfs server. + + > /etc/init.d/nfs-kernel-server restart + + Also, at any compute nodes, add below line to /etc/fstab: + + >172.19.0.131:/ DIR nfs4 defaults 0 0 + + where "DIR" must be same as 'instance_path'( see nova.compute.manager for the default value) + + Then try to mount, + + > mount -a -v + + Check exported directory is successfully mounted. if fail, try this at any hosts, + + > iptables -F + + + (b) libvirt settings + In default configuration, this feature use simple tcp protocol(qemu+tcp://). + To use this protocol, below configuration is necessary. + + a. modify /etc/libvirt/libvirt.conf + + before : #listen_tls = 0 + after : listen_tls = 0 + + before : #listen_tcp = 0 + after : listen_tcp = 0 + + append : auth_tcp = "none" + + b. modify /etc/init/libvirt-bin.conf + + before : exec /usr/sbin/libvirtd -d + after : exec /usr/sbin/libvirtd -d -l + + c. modify /etc/default/libvirt-bin + + before :libvirtd_opts=" -d" + after :libvirtd_opts=" -d -l" + + then, restart libvirt + stop libvirt-bin && start libvirt-bin + ps -ef | grep libvirt + + make sure you get the below result. + > root@openstack2:/opt/nova-2010.2# ps -ef | grep libvirt + > root 1145 1 0 Nov27 ? 00:00:03 /usr/sbin/libvirtd -d -l + + if you would like to use qemu+ssh or other protocol, change "live_migration_uri" flag. + by adding "--live_migration_uri" to /etc/nova/nova.conf (Note that file name may be + changed depends on version). + + +2. command usage + + To get a list of physical hosts, + nova-manage host list + + To get a available pysical resource of each host, + nova-manage host show hostname + + an example result is below: + > HOST PROJECT cpu mem(mb) disk(gb) + > openstack2-c2 16 32232 878 + > openstack2-c2 admin 1 2048 20 + + The 1st line shows total amount of resource that the specified host has. + The 2nd and latter lines show usage resource per project. + This command is created because admins can decide which host should be + a destination of live migration. + + For live migration, + nova-manage instances ec2-id(i-xxxx) destination-host-name. + + once this command is executed, admins will check the status through + euca-describe-instances. The status is changed from 'running' to 'migrating', + and changed to 'running' when live migration finishes. + Note that it depends on an environment how long it takes to live migration finishes. + If it finishes too fast, admins cannot see 'migrating' status. + + > root@openstack2:/opt/nova-2010.2# euca-describe-instances + > Reservation:r-qlg3favp + > RESERVATION r-qlg3favp admin + > INSTANCE i-2ah453 ami-tiny 172.19.0.134 10.0.0.3 + > migrating testkey (admin, openstack2-c2) 0 m1.small + > 2010-11-28 16:09:16 openstack2-c2 + + When live migration finishes successfully, admin can check the last part of + euca-describe-instances which shows physical node information. + ( only when euca-describe-instances is executed by admin user ) + Admins also can check live migration source compute node logfile which may + show a log. + > Live migration i-xxxx to DESTHOST finishes successfully. + + +3. error checking + When live migration fails somehow, error message shows at: + a. scheduler logfile + b. source compute node logfile + c. dest compute node logfile + -- cgit From 3313a5170a83feb6e571faa6296ffea7f065ec25 Mon Sep 17 00:00:00 2001 From: masumotok Date: Wed, 8 Dec 2010 17:21:04 +0900 Subject: コメントを除去 README.live_migration.txtのレビュー結果を修正 MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- README.livemigration.txt | 25 +++++++++++++++++++++---- nova/compute/api.py | 3 --- nova/compute/manager.py | 12 ------------ nova/db/api.py | 6 ------ nova/db/sqlalchemy/api.py | 7 ------- nova/network/manager.py | 7 ------- nova/scheduler/manager.py | 5 +---- nova/service.py | 2 -- nova/virt/libvirt_conn.py | 7 ------- 9 files changed, 22 insertions(+), 52 deletions(-) diff --git a/README.livemigration.txt b/README.livemigration.txt index 3b403edf1..2a2f7fbe8 100644 --- a/README.livemigration.txt +++ b/README.livemigration.txt @@ -33,12 +33,13 @@ Then restart nfs server. > /etc/init.d/nfs-kernel-server restart + > /etc/init.d/idmapd restart Also, at any compute nodes, add below line to /etc/fstab: >172.19.0.131:/ DIR nfs4 defaults 0 0 - where "DIR" must be same as 'instance_path'( see nova.compute.manager for the default value) + where "DIR" must be same as 'instances_path'( see nova.compute.manager for the default value) Then try to mount, @@ -48,6 +49,22 @@ > iptables -F + Also, check file/daemon permissions. + we expect any nova daemons are running as root. + > root@openstack2-api:/opt/nova-2010.4# ps -ef | grep nova + > root 5948 5904 9 11:29 pts/4 00:00:00 python /opt/nova-2010.4//bin/nova-api + > root 5952 5908 6 11:29 pts/5 00:00:00 python /opt/nova-2010.4//bin/nova-objectstore + > ... (snip) + + "instances/" directory can be seen from server side: + > root@openstack:~# ls -ld nova-install-dir/instances/ + > drwxr-xr-x 2 root root 4096 2010-12-07 14:34 nova-install-dir/instances/ + + also, client side: + > root@openstack-client:~# ls -ld nova-install-dir/instances/ + > drwxr-xr-x 2 root root 4096 2010-12-07 14:34 nova-install-dir/instances/ + + (b) libvirt settings In default configuration, this feature use simple tcp protocol(qemu+tcp://). @@ -58,8 +75,8 @@ before : #listen_tls = 0 after : listen_tls = 0 - before : #listen_tcp = 0 - after : listen_tcp = 0 + before : #listen_tcp = 1 + after : listen_tcp = 1 append : auth_tcp = "none" @@ -105,7 +122,7 @@ a destination of live migration. For live migration, - nova-manage instances ec2-id(i-xxxx) destination-host-name. + nova-manage instances live_migration ec2-id(i-xxxx) destination-host-name. once this command is executed, admins will check the status through euca-describe-instances. The status is changed from 'running' to 'migrating', diff --git a/nova/compute/api.py b/nova/compute/api.py index 03922c272..929342a1e 100644 --- a/nova/compute/api.py +++ b/nova/compute/api.py @@ -84,7 +84,6 @@ class ComputeAPI(base.Base): if not type(security_group) is list: security_group = [security_group] - print '<<<<<<<<<<<<<<<<<<<<<<<<<<1' security_groups = [] self.ensure_default_security_group(context) for security_group_name in security_group: @@ -93,7 +92,6 @@ class ComputeAPI(base.Base): security_group_name) security_groups.append(group['id']) - print '<<<<<<<<<<<<<<<<<<<<<<<<<<2' if key_data is None and key_name: key_pair = db.key_pair_get(context, context.user_id, key_name) key_data = key_pair['public_key'] @@ -117,7 +115,6 @@ class ComputeAPI(base.Base): 'key_name': key_name, 'key_data': key_data} - print '<<<<<<<<<<<<<<<<<<<<<<<<<<3' elevated = context.elevated() instances = [] logging.debug("Going to run %s instances...", num_instances) diff --git a/nova/compute/manager.py b/nova/compute/manager.py index 4c42153b6..d271d17a4 100644 --- a/nova/compute/manager.py +++ b/nova/compute/manager.py @@ -36,11 +36,8 @@ terminating it. import datetime import logging -# added by masumotok import sys -# added by masumotok import traceback -# added by masumotok import os @@ -51,9 +48,7 @@ from nova import flags from nova import manager from nova import utils from nova.compute import power_state -# added by masumotok from nova import rpc -# added by masumotok from nova import db FLAGS = flags.FLAGS @@ -61,7 +56,6 @@ flags.DEFINE_string('instances_path', '$state_path/instances', 'where instances are stored on disk') flags.DEFINE_string('compute_driver', 'nova.virt.connection.get_connection', 'Driver to use for controlling virtualization') -# created by masumotok flags.DEFINE_string('live_migration_timeout', 30, 'Timeout value for pre_live_migration is completed.') @@ -266,12 +260,10 @@ class ComputeManager(manager.Manager): self.db.volume_detached(context, volume_id) defer.returnValue(True) - # created by masumotok def get_cpu_number(self): """Get the number of physical computer cpu core .""" return open('/proc/cpuinfo').read().count('processor') - # created by masumotok def get_mem_size(self): """Get the memory size of physical computer .""" meminfo = open('/proc/meminfo').read().split() @@ -279,13 +271,11 @@ class ComputeManager(manager.Manager): # transforming kb to mb. return int(meminfo[idx + 1]) / 1024 - # created by masumotok def get_hdd_size(self): """Get the hdd size of physical computer .""" hddinfo = os.statvfs(FLAGS.instances_path) return hddinfo.f_bsize * hddinfo.f_blocks / 1024 / 1024 / 1024 - # created by masumotok def pre_live_migration(self, context, instance_id, dest): """Any preparation for live migration at dst host.""" @@ -334,13 +324,11 @@ class ComputeManager(manager.Manager): self.network_manager.driver.ensure_vlan_bridge(network_ref['vlan'], network_ref['bridge']) - # created by masumotok def nwfilter_for_instance_exists(self, context, instance_id): """Check nova-instance-instance-xxx filter exists """ instance_ref = db.instance_get(context, instance_id) return self.driver.nwfilter_for_instance_exists(instance_ref) - # created by masumotok def live_migration(self, context, instance_id, dest): """executes live migration.""" diff --git a/nova/db/api.py b/nova/db/api.py index c62d5d6ef..71e8151e7 100644 --- a/nova/db/api.py +++ b/nova/db/api.py @@ -195,7 +195,6 @@ def floating_ip_get_by_address(context, address): return IMPL.floating_ip_get_by_address(context, address) -# this method is created by masumotok def floating_ip_update(context, address, values): """update floating ip information.""" return IMPL.floating_ip_update(context, address, values) @@ -339,13 +338,11 @@ def instance_add_security_group(context, instance_id, security_group_id): security_group_id) -# created by masumotok def instance_get_all_by_host(context, hostname): """Get instances by host""" return IMPL.instance_get_all_by_host(context, hostname) -# created by masumotok def instance_get_vcpu_sum_by_host_and_project(context, hostname, proj_id): """Get instances.vcpus by host and project""" return IMPL.instance_get_vcpu_sum_by_host_and_project(context, @@ -353,7 +350,6 @@ def instance_get_vcpu_sum_by_host_and_project(context, hostname, proj_id): proj_id) -# created by masumotok def instance_get_memory_sum_by_host_and_project(context, hostname, proj_id): """Get amount of memory by host and project """ return IMPL.instance_get_memory_sum_by_host_and_project(context, @@ -361,7 +357,6 @@ def instance_get_memory_sum_by_host_and_project(context, hostname, proj_id): proj_id) -# created by masumotok def instance_get_disk_sum_by_host_and_project(context, hostname, proj_id): """Get total amount of disk by host and project """ return IMPL.instance_get_disk_sum_by_host_and_project(context, @@ -870,7 +865,6 @@ def host_get_networks(context, host): return IMPL.host_get_networks(context, host) -# below all methods related to host table are created by masumotok ################### diff --git a/nova/db/sqlalchemy/api.py b/nova/db/sqlalchemy/api.py index 811575765..45a10bc22 100644 --- a/nova/db/sqlalchemy/api.py +++ b/nova/db/sqlalchemy/api.py @@ -394,7 +394,6 @@ def floating_ip_get_by_address(context, address, session=None): return result -# created by masumotok @require_context def floating_ip_update(context, address, values): session = get_session() @@ -757,7 +756,6 @@ def instance_add_security_group(context, instance_id, security_group_id): instance_ref.save(session=session) -# created by masumotok def instance_get_all_by_host(context, hostname): session = get_session() if not session: @@ -772,7 +770,6 @@ def instance_get_all_by_host(context, hostname): return result -# created by masumotok def _instance_get_sum_by_host_and_project(context, column, hostname, proj_id): session = get_session() @@ -786,19 +783,16 @@ def _instance_get_sum_by_host_and_project(context, column, hostname, proj_id): return result -# created by masumotok def instance_get_vcpu_sum_by_host_and_project(context, hostname, proj_id): return _instance_get_sum_by_host_and_project(context, 'vcpus', hostname, proj_id) -# created by masumotok def instance_get_memory_sum_by_host_and_project(context, hostname, proj_id): return _instance_get_sum_by_host_and_project(context, 'memory_mb', hostname, proj_id) -# created by masumotok def instance_get_disk_sum_by_host_and_project(context, hostname, proj_id): return _instance_get_sum_by_host_and_project(context, 'local_gb', hostname, proj_id) @@ -1805,7 +1799,6 @@ def host_get_networks(context, host): all() -#below all methods related to host table are created by masumotok ################### @require_admin_context diff --git a/nova/network/manager.py b/nova/network/manager.py index e84b8a8f9..a7298b47f 100644 --- a/nova/network/manager.py +++ b/nova/network/manager.py @@ -112,14 +112,10 @@ class NetworkManager(manager.Manager): # the designated network host. ctxt = context.get_admin_context() for network in self.db.host_get_networks(ctxt, self.host): - print '<<<<<< nova.network.manager.init_host <<<<' - print '<<<<<< nova.network.manager.init_host (%s)<<<<' % network['id'] self._on_set_network_host(ctxt, network['id']) def set_network_host(self, context, network_id): """Safely sets the host of the network.""" - print '<<<<<< nova.network.manager.set_network_host <<<<' - print '<<<<<< nova.network.manager.set_network_host (%s)<<<<' % network_id logging.debug("setting network host") host = self.db.network_set_host(context, network_id, @@ -456,9 +452,6 @@ class VlanManager(NetworkManager): self.driver.ensure_vlan_forward(network_ref['vpn_public_address'], network_ref['vpn_public_port'], network_ref['vpn_private_address']) - print '--------------------' - print 'UUUUUUPdate dhcp!' - print '--------------------' self.driver.update_dhcp(context, network_ref['id']) def setup_compute_network(self, context, instance_id): diff --git a/nova/scheduler/manager.py b/nova/scheduler/manager.py index db8c3c30c..4345cfb0a 100644 --- a/nova/scheduler/manager.py +++ b/nova/scheduler/manager.py @@ -29,7 +29,6 @@ from nova import flags from nova import manager from nova import rpc from nova import utils -# 3 modules are added by masumotok from nova import exception from nova.api.ec2 import cloud from nova.compute import power_state @@ -71,7 +70,6 @@ class SchedulerManager(manager.Manager): "args": kwargs}) logging.debug("Casting to %s %s for %s", topic, host, method) - # created by masumotok def live_migration(self, context, ec2_id, dest): """ live migration method""" @@ -99,8 +97,8 @@ class SchedulerManager(manager.Manager): 'dest': dest}}) return True - # this method is created by masumotok def has_enough_resource(self, context, instance_id, dest): + """ check if destination host has enough resource for live migration""" # get instance information instance_ref = db.instance_get(context, instance_id) @@ -135,7 +133,6 @@ class SchedulerManager(manager.Manager): logging.debug('%s has enough resource for %s' % (dest, ec2_id)) return True - # this method is created by masumotok def show_host_resource(self, context, host, *args): """ show the physical/usage resource given by hosts.""" diff --git a/nova/service.py b/nova/service.py index 4b97062b4..648293cea 100644 --- a/nova/service.py +++ b/nova/service.py @@ -73,7 +73,6 @@ class Service(object, service.Service): self.model_disconnected = False ctxt = context.get_admin_context() - # this try-except operations are added by masumotok try: host_ref = db.host_get_by_name(ctxt, self.host) except exception.NotFound: @@ -117,7 +116,6 @@ class Service(object, service.Service): 'report_count': 0}) self.service_id = service_ref['id'] - # created by masumotok def _update_host_ref(self, context, host_ref): if 0 <= self.manager_class_name.find('ComputeManager'): diff --git a/nova/virt/libvirt_conn.py b/nova/virt/libvirt_conn.py index 4ed791130..783f2409e 100644 --- a/nova/virt/libvirt_conn.py +++ b/nova/virt/libvirt_conn.py @@ -44,8 +44,6 @@ Supports KVM, QEMU, UML, and XEN. import logging import os import shutil -# appended by masumotok -#import libvirt import IPy from twisted.internet import defer @@ -103,7 +101,6 @@ flags.DEFINE_string('libvirt_uri', '', 'Override the default libvirt URI (which is dependent' ' on libvirt_type)') -# added by masumotok flags.DEFINE_string('live_migration_uri', "qemu+tcp://%s/system", 'Define protocol used by live_migration feature') @@ -654,12 +651,10 @@ class LibvirtConnection(object): fw = NWFilterFirewall(self._conn) fw.ensure_security_group_filter(security_group_id) - # created by masumotok def setup_nwfilters_for_instance(self, instance): nwfilter = NWFilterFirewall(self._conn) return nwfilter.setup_nwfilters_for_instance(instance) - # created by masumotok def nwfilter_for_instance_exists(self, instance_ref): try: filter = 'nova-instance-%s' % instance_ref.name @@ -668,7 +663,6 @@ class LibvirtConnection(object): except libvirt.libvirtError: return False - # created by masumotok def live_migration(self, instance_ref, dest): uri = FLAGS.live_migration_uri % dest out, err = utils.execute("sudo virsh migrate --live %s %s" @@ -690,7 +684,6 @@ class LibvirtConnection(object): timer.start(interval=0.5, now=True) return d - # created by masumotok def _post_live_migration(self, instance_ref, dest): # 1. detaching volumes -- cgit From 64b2ada5efe69709aeca54768130fb0fa01987e7 Mon Sep 17 00:00:00 2001 From: masumotok Date: Wed, 8 Dec 2010 17:23:51 +0900 Subject: test commit --- README.livemigration.txt | 1 + 1 file changed, 1 insertion(+) diff --git a/README.livemigration.txt b/README.livemigration.txt index 2a2f7fbe8..9229bafa6 100644 --- a/README.livemigration.txt +++ b/README.livemigration.txt @@ -8,6 +8,7 @@ # initial version # + 0. pre-requisit settings OS: Ubuntu lucid 10.04 for both instances and host. NFS: nova-install-dir/instances has to be mounted by shared storage. -- cgit From 4809c1bf82130f969614a8f0458636a462b81a88 Mon Sep 17 00:00:00 2001 From: masumotok Date: Thu, 16 Dec 2010 18:20:04 +0900 Subject: Hostテーブルのカラム名を修正 FlatManager, FlatDHCPManagerに対応 MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- bin/nova-manage | 12 ++++-------- nova/compute/manager.py | 32 +++++++++++--------------------- nova/db/sqlalchemy/api.py | 6 ++++++ nova/db/sqlalchemy/models.py | 4 ++-- nova/network/manager.py | 15 +++++++++------ nova/scheduler/manager.py | 14 ++++++++------ nova/service.py | 8 ++++---- nova/virt/libvirt_conn.py | 4 ++++ 8 files changed, 48 insertions(+), 47 deletions(-) diff --git a/bin/nova-manage b/bin/nova-manage index d41ae8600..d1cda72b7 100755 --- a/bin/nova-manage +++ b/bin/nova-manage @@ -441,10 +441,6 @@ class InstanceCommands(object): logging.basicConfig() ctxt = context.get_admin_context() - if 'nova.network.manager.VlanManager' != FLAGS.network_manager : - msg = 'Only nova.network.manager.VlanManager is supported now. Sorry!' - raise Exception(msg) - # 1. whether destination host exists host_ref = db.host_get_by_name(ctxt, dest) @@ -513,18 +509,18 @@ class HostCommands(object): elif not result['ret'] : print '%s' % result['msg'] else : - cpu = result['phy_resource']['cpu'] + cpu = result['phy_resource']['vcpus'] mem = result['phy_resource']['memory_mb'] - hdd = result['phy_resource']['hdd_gb'] + hdd = result['phy_resource']['local_gb'] print 'HOST\t\tPROJECT\t\tcpu\tmem(mb)\tdisk(gb)' print '%s\t\t\t%s\t%s\t%s' % ( host,cpu, mem, hdd) for p_id, val in result['usage'].items() : print '%s\t%s\t\t%s\t%s\t%s' % ( host, p_id, - val['cpu'], + val['vcpus'], val['memory_mb'], - val['hdd_gb']) + val['local_gb']) def has_keys(self, dic, keys): not_found = [ key for key in keys if not dict.has_key(key) ] diff --git a/nova/compute/manager.py b/nova/compute/manager.py index d271d17a4..81cca7770 100644 --- a/nova/compute/manager.py +++ b/nova/compute/manager.py @@ -260,9 +260,9 @@ class ComputeManager(manager.Manager): self.db.volume_detached(context, volume_id) defer.returnValue(True) - def get_cpu_number(self): - """Get the number of physical computer cpu core .""" - return open('/proc/cpuinfo').read().count('processor') + def get_vcpu_number(self): + """Get the number of vcpu on physical computer.""" + return self.driver.get_vcpu_number() def get_mem_size(self): """Get the memory size of physical computer .""" @@ -302,27 +302,19 @@ class ComputeManager(manager.Manager): ''.join(traceback.format_tb(sys.exc_info()[2]))) return - # 3. getting network refs - network_ref = db.fixed_ip_get_network(context, fixed_ip) - - # 4. security rules (filtering rules) - secgrp_refs = db.security_group_get_by_instance(context, instance_id) - - # 5. if any volume is mounted, prepare here. + # 3. if any volume is mounted, prepare here. if 0 != len(shelf_slots): pass - # 6. create nova-instance-instance-xxx in hypervisor through libvirt - # (This rule can be seen by executing virsh nwfilter-list) + # 4. Creating nova-instance-instance-xxx, this is written to libvirt.xml, + # and can be seen when executin "virsh nwfiter-list" On destination host, + # this nwfilter is necessary. + # In addition this method is creating security rule ingress rule onto + # destination host. self.driver.setup_nwfilters_for_instance(instance_ref) - # 7. insert filtering rule - for secgrp_ref in secgrp_refs: - self.driver.refresh_security_group(secgrp_ref.id) - - # 8. vlan settings - self.network_manager.driver.ensure_vlan_bridge(network_ref['vlan'], - network_ref['bridge']) + # 5. bridge settings + self.network_manager.setup_compute_network(instance_id) def nwfilter_for_instance_exists(self, context, instance_id): """Check nova-instance-instance-xxx filter exists """ @@ -375,5 +367,3 @@ class ComputeManager(manager.Manager): if not ret: logging.debug('Fail to live migration') return - - diff --git a/nova/db/sqlalchemy/api.py b/nova/db/sqlalchemy/api.py index 45a10bc22..e4792fe23 100644 --- a/nova/db/sqlalchemy/api.py +++ b/nova/db/sqlalchemy/api.py @@ -744,6 +744,7 @@ def instance_update(context, instance_id, values): instance_ref.save(session=session) +@require_context def instance_add_security_group(context, instance_id, security_group_id): """Associate the given security group with the given instance""" session = get_session() @@ -756,6 +757,7 @@ def instance_add_security_group(context, instance_id, security_group_id): instance_ref.save(session=session) +@require_context def instance_get_all_by_host(context, hostname): session = get_session() if not session: @@ -770,6 +772,7 @@ def instance_get_all_by_host(context, hostname): return result +@require_context def _instance_get_sum_by_host_and_project(context, column, hostname, proj_id): session = get_session() @@ -783,16 +786,19 @@ def _instance_get_sum_by_host_and_project(context, column, hostname, proj_id): return result +@require_context def instance_get_vcpu_sum_by_host_and_project(context, hostname, proj_id): return _instance_get_sum_by_host_and_project(context, 'vcpus', hostname, proj_id) +@require_context def instance_get_memory_sum_by_host_and_project(context, hostname, proj_id): return _instance_get_sum_by_host_and_project(context, 'memory_mb', hostname, proj_id) +@require_context def instance_get_disk_sum_by_host_and_project(context, hostname, proj_id): return _instance_get_sum_by_host_and_project(context, 'local_gb', hostname, proj_id) diff --git a/nova/db/sqlalchemy/models.py b/nova/db/sqlalchemy/models.py index 16406f79a..db6f51948 100644 --- a/nova/db/sqlalchemy/models.py +++ b/nova/db/sqlalchemy/models.py @@ -144,9 +144,9 @@ class Host(BASE, NovaBase): __tablename__ = 'hosts' id = Column(Integer, primary_key=True) name = Column(String(255)) - cpu = Column(Integer, nullable=False, default=-1) + vcpus = Column(Integer, nullable=False, default=-1) memory_mb = Column(Integer, nullable=False, default=-1) - hdd_gb = Column(Integer, nullable=False, default=-1) + local_gb = Column(Integer, nullable=False, default=-1) #cpuid = Column(Integer, nullable=False) deleted = Column(Boolean, default=False) # C: when calling service_create() diff --git a/nova/network/manager.py b/nova/network/manager.py index a7298b47f..a08b6094d 100644 --- a/nova/network/manager.py +++ b/nova/network/manager.py @@ -139,7 +139,7 @@ class NetworkManager(manager.Manager): """Called when this host becomes the host for a network.""" raise NotImplementedError() - def setup_compute_network(self, context, instance_id): + def setup_compute_network(self, context, instance_id, network_ref=None): """Sets up matching network for compute hosts.""" raise NotImplementedError() @@ -298,7 +298,7 @@ class FlatManager(NetworkManager): self.db.fixed_ip_update(context, address, {'allocated': False}) self.db.fixed_ip_disassociate(context.elevated(), address) - def setup_compute_network(self, context, instance_id): + def setup_compute_network(self, context, instance_id, network_ref=None): """Network is created manually.""" pass @@ -358,9 +358,10 @@ class FlatDHCPManager(FlatManager): super(FlatDHCPManager, self).init_host() self.driver.metadata_forward() - def setup_compute_network(self, context, instance_id): + def setup_compute_network(self, context, instance_id, network_ref=None): """Sets up matching network for compute hosts.""" - network_ref = db.network_get_by_instance(context, instance_id) + if network_ref is None: + network_ref = db.network_get_by_instance(context, instance_id) self.driver.ensure_bridge(network_ref['bridge'], FLAGS.flat_interface, network_ref) @@ -454,9 +455,11 @@ class VlanManager(NetworkManager): network_ref['vpn_private_address']) self.driver.update_dhcp(context, network_ref['id']) - def setup_compute_network(self, context, instance_id): + + def setup_compute_network(self, context, instance_id, network_ref=None): """Sets up matching network for compute hosts.""" - network_ref = db.network_get_by_instance(context, instance_id) + if network_ref is None: + network_ref = db.network_get_by_instance(context, instance_id) self.driver.ensure_vlan_bridge(network_ref['vlan'], network_ref['bridge']) diff --git a/nova/scheduler/manager.py b/nova/scheduler/manager.py index 4345cfb0a..d36525506 100644 --- a/nova/scheduler/manager.py +++ b/nova/scheduler/manager.py @@ -109,9 +109,9 @@ class SchedulerManager(manager.Manager): # get host information host_ref = db.host_get_by_name(context, dest) - total_cpu = int(host_ref['cpu']) + total_cpu = int(host_ref['vcpus']) total_mem = int(host_ref['memory_mb']) - total_hdd = int(host_ref['hdd_gb']) + total_hdd = int(host_ref['local_gb']) instances_ref = db.instance_get_all_by_host(context, dest) for i_ref in instances_ref: @@ -144,9 +144,9 @@ class SchedulerManager(manager.Manager): raise # get physical resource information - h_resource = {'cpu': host_ref['cpu'], + h_resource = {'vcpus': host_ref['vcpus'], 'memory_mb': host_ref['memory_mb'], - 'hdd_gb': host_ref['hdd_gb']} + 'local_gb': host_ref['local_gb']} # get usage resource information u_resource = {} @@ -158,7 +158,7 @@ class SchedulerManager(manager.Manager): project_ids = [i['project_id'] for i in instances_ref] project_ids = list(set(project_ids)) for p_id in project_ids: - cpu = db.instance_get_vcpu_sum_by_host_and_project(context, + vcpus = db.instance_get_vcpu_sum_by_host_and_project(context, host, p_id) mem = db.instance_get_memory_sum_by_host_and_project(context, @@ -167,6 +167,8 @@ class SchedulerManager(manager.Manager): hdd = db.instance_get_disk_sum_by_host_and_project(context, host, p_id) - u_resource[p_id] = {'cpu': cpu, 'memory_mb': mem, 'hdd_gb': hdd} + u_resource[p_id] = {'vcpus': vcpus, + 'memory_mb': mem, + 'local_gb': hdd} return {'ret': True, 'phy_resource': h_resource, 'usage': u_resource} diff --git a/nova/service.py b/nova/service.py index 648293cea..3ce07a3e0 100644 --- a/nova/service.py +++ b/nova/service.py @@ -119,14 +119,14 @@ class Service(object, service.Service): def _update_host_ref(self, context, host_ref): if 0 <= self.manager_class_name.find('ComputeManager'): - cpu = self.manager.get_cpu_number() + cpu = self.manager.get_vcpu_number() memory_mb = self.manager.get_mem_size() - hdd_gb = self.manager.get_hdd_size() + local_gb = self.manager.get_hdd_size() db.host_update(context, host_ref['id'], - {'cpu': cpu, + {'vcpus': cpu, 'memory_mb': memory_mb, - 'hdd_gb': hdd_gb}) + 'local_gb': local_gb}) return host_ref def __getattr__(self, key): diff --git a/nova/virt/libvirt_conn.py b/nova/virt/libvirt_conn.py index 783f2409e..f2b5cf794 100644 --- a/nova/virt/libvirt_conn.py +++ b/nova/virt/libvirt_conn.py @@ -631,6 +631,10 @@ class LibvirtConnection(object): return interfaces + def get_vcpu_number(self): + """ get vcpu number of physical computer """ + return self._conn.getMaxVcpus(None) + def block_stats(self, instance_name, disk): """ Note that this function takes an instance name, not an Instance, so -- cgit From 70f1f0d8c7a7214c5b6683c0be863cdbf0f060af Mon Sep 17 00:00:00 2001 From: masumotok Date: Mon, 20 Dec 2010 08:03:25 +0900 Subject: テストコードをレポジトリに追加 nova.compute.manager.pre_live_migration()について、異常終了しているのに正常終了の戻り値を返すことがあったため変更 - 正常終了の戻り値をTrueに変更 - fixed_ipが見つからないときにはRemoteErrorをraiseする - それに合わせてnova.compute.manager.live_migrationも変更 MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- nova/compute/manager.py | 13 +- nova/livemigration_test/SI/picture.pptx | Bin 0 -> 137730 bytes nova/livemigration_test/SI/testCase_SI.xls | Bin 0 -> 35840 bytes .../SI/testParameterSheet_SI.xls | Bin 0 -> 464384 bytes nova/livemigration_test/SI/utils/demo-firstboot.sh | 39 ++ .../SI/utils/demo-runInstance.sh | 57 +++ nova/livemigration_test/SI/utils/nova-manage.conf | 18 + nova/livemigration_test/SI/utils/nova.conf | 10 + nova/livemigration_test/SI/utils/nova.sh | 180 +++++++++ nova/livemigration_test/SI/utils/nova.sh.compute | 37 ++ nova/livemigration_test/UT/computeManager.test.py | 407 +++++++++++++++++++++ .../UT/libvirtConnection.test.py | 366 ++++++++++++++++++ nova/livemigration_test/UT/nova-manage.test.py | 318 ++++++++++++++++ .../livemigration_test/UT/schedulerManager.test.py | 360 ++++++++++++++++++ nova/livemigration_test/UT/testCase_UT.xls | Bin 0 -> 195072 bytes 15 files changed, 1799 insertions(+), 6 deletions(-) create mode 100644 nova/livemigration_test/SI/picture.pptx create mode 100644 nova/livemigration_test/SI/testCase_SI.xls create mode 100644 nova/livemigration_test/SI/testParameterSheet_SI.xls create mode 100755 nova/livemigration_test/SI/utils/demo-firstboot.sh create mode 100755 nova/livemigration_test/SI/utils/demo-runInstance.sh create mode 100644 nova/livemigration_test/SI/utils/nova-manage.conf create mode 100644 nova/livemigration_test/SI/utils/nova.conf create mode 100755 nova/livemigration_test/SI/utils/nova.sh create mode 100755 nova/livemigration_test/SI/utils/nova.sh.compute create mode 100644 nova/livemigration_test/UT/computeManager.test.py create mode 100644 nova/livemigration_test/UT/libvirtConnection.test.py create mode 100644 nova/livemigration_test/UT/nova-manage.test.py create mode 100644 nova/livemigration_test/UT/schedulerManager.test.py create mode 100644 nova/livemigration_test/UT/testCase_UT.xls diff --git a/nova/compute/manager.py b/nova/compute/manager.py index 81cca7770..bad525115 100644 --- a/nova/compute/manager.py +++ b/nova/compute/manager.py @@ -39,6 +39,7 @@ import logging import sys import traceback import os +import time from twisted.internet import defer @@ -297,10 +298,10 @@ class ComputeManager(manager.Manager): # 2. getting fixed ips fixed_ip = db.instance_get_fixed_address(context, instance_id) if None == fixed_ip: - logging.error('Not found fixedip for %s\n%s', - ec2_id, - ''.join(traceback.format_tb(sys.exc_info()[2]))) - return + exc_type = 'NotFoundError' + val = '%s(%s) doesnt have fixed_ip ' % (instance_id, ec2_id) + tb = ''.join(traceback.format_tb(sys.exc_info()[2])) + raise rpc.RemoteError(exc_type, val, tb) # 3. if any volume is mounted, prepare here. if 0 != len(shelf_slots): @@ -315,6 +316,7 @@ class ComputeManager(manager.Manager): # 5. bridge settings self.network_manager.setup_compute_network(instance_id) + return True def nwfilter_for_instance_exists(self, context, instance_id): """Check nova-instance-instance-xxx filter exists """ @@ -324,7 +326,6 @@ class ComputeManager(manager.Manager): def live_migration(self, context, instance_id, dest): """executes live migration.""" - import time # 1. ask dest host to preparing live migration. compute_topic = db.queue_get_for(context, FLAGS.compute_topic, dest) ret = rpc.call(context, @@ -333,7 +334,7 @@ class ComputeManager(manager.Manager): "args": {'instance_id': instance_id, 'dest': dest}}) - if rpc.RemoteError == type(ret): + if True != ret: logging.error('Live migration failed(err at %s)', dest) db.instance_set_state(context, instance_id, diff --git a/nova/livemigration_test/SI/picture.pptx b/nova/livemigration_test/SI/picture.pptx new file mode 100644 index 000000000..b47bec9b5 Binary files /dev/null and b/nova/livemigration_test/SI/picture.pptx differ diff --git a/nova/livemigration_test/SI/testCase_SI.xls b/nova/livemigration_test/SI/testCase_SI.xls new file mode 100644 index 000000000..723363c1e Binary files /dev/null and b/nova/livemigration_test/SI/testCase_SI.xls differ diff --git a/nova/livemigration_test/SI/testParameterSheet_SI.xls b/nova/livemigration_test/SI/testParameterSheet_SI.xls new file mode 100644 index 000000000..192d9705b Binary files /dev/null and b/nova/livemigration_test/SI/testParameterSheet_SI.xls differ diff --git a/nova/livemigration_test/SI/utils/demo-firstboot.sh b/nova/livemigration_test/SI/utils/demo-firstboot.sh new file mode 100755 index 000000000..3a6f7fb0b --- /dev/null +++ b/nova/livemigration_test/SI/utils/demo-firstboot.sh @@ -0,0 +1,39 @@ +#!/bin/bash + +DIR=/opt/nova-2010.1 + +# 1. 管理者ユーザを作成する +# nova-manage user admin ユーザ名 access-key secret-key +# +#$DIR/bin/nova-manage user admin admin admin admin + +# 2. プロジェクトを作成する +# nova-manage create project プロジェクト名 プロジェクトに属するユーザ名 +# +#$DIR/bin/nova-manage project create admin admin + +# 3. クラウドを使うための認証情報を生成する +# nova-manage project environment プロジェクト名 ユーザ名 認証情報を格納するファイル +# +#$DIR/bin/nova-manage project environment admin admin $DIR/novarc + +# 4. 認証情報の読み込み +. $DIR/novarc + +# 5. プロジェクト用仮想マシンネットワークの作成を行う +# nova-manage user admin ユーザ名 access-key secret-key +# +$DIR/bin/nova-manage network create 10.0.0.0/8 3 16 + +# 6. 初回ログインにはSSHの公開鍵認証が必要 +# +if [ "" == "`euca-describe-keypairs | grep testkey`" ]; then + euca-add-keypair testkey > testkey.pem +fi + +# 7. +for i in 172.19.0.134 172.19.0.135 172.19.0.136 172.19.0.137 ; do + sudo ip addr del $i dev eth0 2> /dev/null +done + + diff --git a/nova/livemigration_test/SI/utils/demo-runInstance.sh b/nova/livemigration_test/SI/utils/demo-runInstance.sh new file mode 100755 index 000000000..171291262 --- /dev/null +++ b/nova/livemigration_test/SI/utils/demo-runInstance.sh @@ -0,0 +1,57 @@ +#!/bin/bash + +DIR=/opt/nova-2010.1 + +function inc_assigned(){ + assigned=`expr $assigned + 1` +} + + +# 1. 認証情報の読み込み +. $DIR/novarc + +# 3. 仮想マシンの起動 +# +ret=`euca-run-instances -t m1.small -k testkey ami-centos` +#ret=`euca-run-instances -t m1.small -k testkey ami-tiny` + +# 4. 仮想マシン用IPの確保 +# 未登録なら登録しておく +registered=`euca-describe-addresses` +for ip in 172.19.0.134 172.19.0.135 172.19.0.136 172.19.0.137 ; do + + not_registered=`echo $registered | grep $ip` + if [ "" == "$not_registered" ]; then + echo "[INFO] registed $ip" + $DIR/bin/nova-manage floating create `hostname` $ip + fi +done + +# 5. IPの割当 +echo 0 > /tmp/demo-runinstance +euca-describe-addresses | grep -v reserved | while read line; do + # 割り当てられてないものを仮想マシンに割り当てる + ip=`echo $line | cut -d ' ' -f 2` + id=`echo $ret | cut -d ' ' -f 5` + if [ "" == "`echo $id | grep i- `" ] ; then + echo "[INFO] try again" $ret + break + fi + echo "[INFO] assigned to ipaddr($ip) to instance($id) " + euca-associate-address -i $id $ip + echo 1 > /tmp/demo-runinstance + break +done + +echo $assigned +if [ 0 -eq "`cat /tmp/demo-runinstance`" ] ; then + echo "[INFO] address is full." +fi +rm -rf /tmp/demo-runinstance + + +# 6. FWの設定 +euca-authorize -P tcp -p 22 default 2> /dev/null > /dev/null +euca-authorize -P tcp -p 80 default 2> /dev/null > /dev/null +euca-authorize -P tcp -p 5555 default 2> /dev/null > /dev/null + diff --git a/nova/livemigration_test/SI/utils/nova-manage.conf b/nova/livemigration_test/SI/utils/nova-manage.conf new file mode 100644 index 000000000..9f8a02b96 --- /dev/null +++ b/nova/livemigration_test/SI/utils/nova-manage.conf @@ -0,0 +1,18 @@ +--verbose +--nodaemon +--dhcpbridge_flagfile=/etc/nova/nova-manage.conf +--FAKE_subdomain=ec2 +--libvirt_type=qemu +--no_internet_conn=True +--public_netif=eth0 +--public_interface=eth0 + +--cc-host=172.19.0.131 +--routing_source_ip=172.19.0.131 +--sql_connection=mysql://root:nova@172.19.0.131/nova +--rabbit_host=172.19.0.131 +--redis_host=172.19.0.131 +--s3_host=172.19.0.131 +--auth_driver=nova.auth.ldapdriver.LdapDriver +--ldap_url=ldap://172.19.0.131 + diff --git a/nova/livemigration_test/SI/utils/nova.conf b/nova/livemigration_test/SI/utils/nova.conf new file mode 100644 index 000000000..c66bfbc53 --- /dev/null +++ b/nova/livemigration_test/SI/utils/nova.conf @@ -0,0 +1,10 @@ +--verbose +--nodaemon +--dhcpbridge_flagfile=/opt/nova-2010.4//bin/nova.conf +--network_manager=nova.network.manager.VlanManager +--cc_host=172.19.0.131 +--routing_source_ip=172.19.0.131 +--sql_connection=mysql://root:nova@localhost/nova +--auth_driver=nova.auth.ldapdriver.LdapDriver +--libvirt_type=qemu +--public_interface=eth0 diff --git a/nova/livemigration_test/SI/utils/nova.sh b/nova/livemigration_test/SI/utils/nova.sh new file mode 100755 index 000000000..b8e2e9f26 --- /dev/null +++ b/nova/livemigration_test/SI/utils/nova.sh @@ -0,0 +1,180 @@ +#!/usr/bin/env bash +DIR=`pwd` +CMD=$1 +SOURCE_BRANCH=lp:nova +if [ -n "$2" ]; then + SOURCE_BRANCH=$2 +fi +#DIRNAME=nova +DIRNAME="" +NOVA_DIR=$DIR/$DIRNAME +if [ -n "$3" ]; then + NOVA_DIR=$DIR/$3 +fi + +if [ ! -n "$HOST_IP" ]; then + # NOTE(vish): This will just get the first ip in the list, so if you + # have more than one eth device set up, this will fail, and + # you should explicitly set HOST_IP in your environment + HOST_IP=`ifconfig | grep -m 1 'inet addr:'| cut -d: -f2 | awk '{print $1}'` +fi + +#USE_MYSQL=${USE_MYSQL:-0} +USE_MYSQL=1 +MYSQL_PASS=${MYSQL_PASS:-nova} +TEST=${TEST:-0} +#USE_LDAP=${USE_LDAP:-0} +USE_LDAP=1 +LIBVIRT_TYPE=${LIBVIRT_TYPE:-qemu} +NET_MAN=${NET_MAN:-VlanManager} +# NOTE(vish): If you are using FlatDHCP on multiple hosts, set the interface +# below but make sure that the interface doesn't already have an +# ip or you risk breaking things. +# FLAT_INTERFACE=eth0 + +if [ "$USE_MYSQL" == 1 ]; then + SQL_CONN=mysql://root:$MYSQL_PASS@localhost/nova +else + SQL_CONN=sqlite:///$NOVA_DIR/nova.sqlite +fi + +if [ "$USE_LDAP" == 1 ]; then + AUTH=ldapdriver.LdapDriver +else + AUTH=dbdriver.DbDriver +fi + +mkdir -p /etc/nova +cat >$NOVA_DIR/bin/nova.conf << NOVA_CONF_EOF +--verbose +--nodaemon +--dhcpbridge_flagfile=$NOVA_DIR/bin/nova.conf +--network_manager=nova.network.manager.$NET_MAN +--cc_host=$HOST_IP +--routing_source_ip=$HOST_IP +--sql_connection=$SQL_CONN +--auth_driver=nova.auth.$AUTH +--libvirt_type=$LIBVIRT_TYPE +--public_interface=eth0 +NOVA_CONF_EOF + +if [ -n "$FLAT_INTERFACE" ]; then + echo "--flat_interface=$FLAT_INTERFACE" >>$NOVA_DIR/bin/nova.conf +fi + +if [ "$CMD" == "branch" ]; then + sudo apt-get install -y bzr + rm -rf $NOVA_DIR + bzr branch $SOURCE_BRANCH $NOVA_DIR + cd $NOVA_DIR + mkdir -p $NOVA_DIR/instances + mkdir -p $NOVA_DIR/networks +fi + +# You should only have to run this once +if [ "$CMD" == "install" ]; then + sudo apt-get install -y python-software-properties + sudo add-apt-repository ppa:nova-core/ppa + sudo apt-get update + sudo apt-get install -y dnsmasq kpartx kvm gawk iptables ebtables + sudo apt-get install -y user-mode-linux kvm libvirt-bin + sudo apt-get install -y screen euca2ools vlan curl rabbitmq-server + sudo apt-get install -y lvm2 iscsitarget open-iscsi + echo "ISCSITARGET_ENABLE=true" | sudo tee /etc/default/iscsitarget + sudo /etc/init.d/iscsitarget restart + sudo modprobe kvm + sudo /etc/init.d/libvirt-bin restart + sudo apt-get install -y python-twisted python-sqlalchemy python-mox python-greenlet python-carrot + sudo apt-get install -y python-daemon python-eventlet python-gflags python-tornado python-ipy + sudo apt-get install -y python-libvirt python-libxml2 python-routes + if [ "$USE_MYSQL" == 1 ]; then + cat </etc/nova/nova-manage.conf << NOVA_CONF_EOF +--verbose +--nodaemon +--dhcpbridge_flagfile=/etc/nova/nova-manage.conf +--FAKE_subdomain=ec2 +--libvirt_type=qemu +--no_internet_conn=True +--public_netif=eth0 +--public_interface=eth0 + +--cc-host=$HOST_IP +--routing_source_ip=$HOST_IP +--sql_connection=mysql://root:nova@$HOST_IP/nova +--rabbit_host=$HOST_IP +--redis_host=$HOST_IP +--s3_host=$HOST_IP +--auth_driver=nova.auth.ldapdriver.LdapDriver +--ldap_url=ldap://$HOST_IP + +NOVA_CONF_EOF + +$DIR/bin/nova-compute --flagfile=/etc/nova/nova-manage.conf + diff --git a/nova/livemigration_test/UT/computeManager.test.py b/nova/livemigration_test/UT/computeManager.test.py new file mode 100644 index 000000000..d28d3ccb6 --- /dev/null +++ b/nova/livemigration_test/UT/computeManager.test.py @@ -0,0 +1,407 @@ +#!/usr/bin/python +# -*- coding: UTF-8 -*- + +NOVA_DIR = '/opt/openstack/nova' +#NOVA_DIR = '/opt/nova-2010.4' + +import sys +import unittest +import commands +import re +import logging + +from mock import Mock +import twisted + +try: + print + print 'checking %s/bin/nova-manage exists, set the NOVA_DIR properly..' \ + % NOVA_DIR + print + + sys.path.append(NOVA_DIR) + + from nova.compute.manager import ComputeManager + from nova.virt.libvirt_conn import LibvirtConnection + + from nova import context + from nova import db + from nova import exception + from nova import flags + from nova import quota + from nova import utils + from nova.auth import manager + from nova.cloudpipe import pipelib + from nova import rpc + from nova.api.ec2 import cloud + from nova.compute import power_state + + from nova.db.sqlalchemy.models import * + + +except: + print 'set PYTHONPATH to nova-install-dir' + raise + + +class tmpStdout: + def __init__(self): + self.buffer = "" + + def write(self, arg): + self.buffer += arg + + def writelines(self, arg): + self.buffer += arg + + def flush(self): + print 'flush' + self.buffer = '' + + +class tmpStderr(tmpStdout): + def write(self, arg): + self.buffer += arg + + def flush(self): + pass + + def realFlush(self): + self.buffer = '' + +dummyCallReturnValue={ 0:True } +dummyCallCount=0 +def dummyCall(context, topic, method): + global dummyCallReturnValue, dummyCallCount + if dummyCallCount in dummyCallReturnValue.keys() : + ret = dummyCallReturnValue[ dummyCallCount ] + dummyCallCount += 1 + return ret + else : + dummyCallCount += 1 + return False + + +class ComputeTestFunctions(unittest.TestCase): + + stdout = None + stdoutBak = None + stderr = None + stderrBak = None + manager = None + + # 共通の初期化処理 + def setUp(self): + """common init method. """ + + #if self.stdout is None: + # self.__class__.stdout = tmpStdout() + #self.stdoutBak = sys.stdout + #sys.stdout = self.stdout + if self.stderr is None: + self.__class__.stderr = tmpStderr() + self.stderrBak = sys.stderr + sys.stderr = self.stderr + + self.host = 'openstack2-api' + if self.manager is None: + self.__class__.manager = ComputeManager(host=self.host) + + self.setTestData() + self.setMocks() + + def setTestData(self): + + self.host1 = Host() + for key, val in [('name', 'host1'), ('cpu', 5), + ('memory_mb', 20480), ('hdd_gb', 876)]: + self.host1.__setitem__(key, val) + + self.host2 = Host() + for key, val in [('name', 'host2'), ('cpu', 5), + ('memory_mb', 20480), ('hdd_gb', 876)]: + self.host2.__setitem__(key, val) + + self.instance1 = Instance() + for key, val in [('id', 1), ('host', 'host1'), + ('hostname', 'i-12345'), ('state', power_state.RUNNING), + ('project_id', 'testPJ'), ('vcpus', 3), ('memory_mb', 1024), + ('hdd_gb', 5), ('internal_id', 12345)]: + self.instance1.__setitem__(key, val) + + self.instance2 = Instance() + for key, val in [('id', 2), ('host', 'host1'), + ('hostname', 'i-12345'), ('state', power_state.RUNNING), + ('project_id', 'testPJ'), ('vcpus', 3), ('memory_mb', 1024), + ('hdd_gb', 5)]: + self.instance2.__setitem__(key, val) + + self.fixed_ip1 = FixedIp() + for key, val in [('id', 1), ('address', '1.1.1.1'), + ('network_id', '1'), ('instance_id', 1)]: + self.fixed_ip1.__setitem__(key, val) + + self.vol1 = Volume() + for key, val in [('id', 1), ('ec2_id', 'vol-qijjuc7e'), + ('availability_zone', 'nova'), ('host', 'host1')]: + self.vol1.__setitem__(key, val) + + self.vol2 = Volume() + for key, val in [('id', 2), ('ec2_id', 'vol-qi22222'), + ('availability_zone', 'nova'), ('host', 'host1')]: + self.vol2.__setitem__(key, val) + + self.secgrp1 = Volume() + for key, val in [('id', 1), ('ec2_id', 'default')]: + self.secgrp1.__setitem__(key, val) + + self.secgrp2 = Volume() + for key, val in [('id', 2), ('ec2_id', 'def2')]: + self.secgrp2.__setitem__(key, val) + + self.netref1 = Network() + + def setMocks(self): + + # mocks for pre_live_migration + self.ctxt = context.get_admin_context() + db.instance_get = Mock(return_value=self.instance1) + db.volume_get_by_ec2_id = Mock(return_value=[self.vol1, self.vol2]) + db.volume_get_shelf_and_blade = Mock(return_value=(3, 4)) + db.instance_get_fixed_address = Mock(return_value=self.fixed_ip1) + db.security_group_get_by_instance \ + = Mock(return_value=[self.secgrp1, self.secgrp2]) + self.manager.driver.setup_nwfilters_for_instance \ + = Mock(return_value=None) + self.manager.driver.nwfilter_for_instance_exists = Mock(return_value=None) + self.manager.network_manager.setup_compute_network \ + = Mock(return_value=None) + # mocks for live_migration_ + rpc.call = Mock(return_value=True) + db.instance_set_state = Mock(return_value=True) + + # ---> test for nova.compute.manager.pre_live_migration() + def test01(self): + """01: NotFound error occurs on finding instance on DB. """ + + db.instance_get = Mock(side_effect=exception.NotFound('ERR')) + + self.assertRaises(exception.NotFound, + self.manager.pre_live_migration, + self.ctxt, + 'dummy_ec2_id', + 'host2') + + def test02(self): + """02: NotAuthrized occurs on finding volume on DB. """ + + db.volume_get_by_ec2_id \ + = Mock(side_effect=exception.NotAuthorized('ERR')) + + self.assertRaises(exception.NotAuthorized, + self.manager.pre_live_migration, + self.ctxt, + 'dummy_ec2_id', + 'host2') + + def test03(self): + """03: Unexpected exception occurs on finding volume on DB. """ + + db.volume_get_by_ec2_id = Mock(side_effect=TypeError('ERR')) + + self.assertRaises(TypeError, + self.manager.pre_live_migration, + self.ctxt, + 'dummy_ec2_id', + 'host2') + + def test04(self): + """04: no volume and fixed ip found on DB, """ + + db.volume_get_by_ec2_id = Mock(side_effect=exception.NotFound('ERR')) + db.instance_get_fixed_address = Mock(return_value=None) + + self.assertRaises(rpc.RemoteError, + self.manager.pre_live_migration, + self.ctxt, + 'dummy_ec2_id', + 'host2') + + c1 = (0 <= sys.stderr.buffer.find('has no volume')) + + self.assertEqual(c1, True) + + def test05(self): + """05: volume found and no fixed_ip found on DB. """ + + db.instance_get_fixed_address \ + = Mock(side_effect=exception.NotFound('ERR')) + + self.assertRaises(exception.NotFound, + self.manager.pre_live_migration, + self.ctxt, + 'dummy_ec2_id', + 'host2') + + def test06(self): + """06: self.driver.setup_nwfilters_for_instance causes NotFound. """ + self.manager.driver.setup_nwfilters_for_instance \ + = Mock(side_effect=exception.NotFound("ERR")) + + self.assertRaises(exception.NotFound, + self.manager.pre_live_migration, + self.ctxt, + 'dummy_ec2_id', + 'host2') + + def test07(self): + """07: self.network_manager.setup_compute_network causes ProcessExecutionError. """ + self.manager.network_manager.setup_compute_network \ + = Mock(side_effect=exception.ProcessExecutionError("ERR")) + + self.assertRaises(exception.ProcessExecutionError, + self.manager.pre_live_migration, + self.ctxt, + 'dummy_ec2_id', + 'host2') + + + def test08(self): + """08: self.manager.network_manager.setup_compute_network + exception.NotFound. """ + self.manager.network_manager.setup_compute_network \ + = Mock(side_effect=exception.NotFound("ERR")) + + self.assertRaises(exception.NotFound, + self.manager.pre_live_migration, + self.ctxt, + 'dummy_ec2_id', + 'host2') + + # those 2 cases are omitted : + # self.driver.setup_nwfilters_for_instance causes + # twisted.python.failure.Failure. + # self.driver.refresh_security_group causes twisted.python.failure.Failure. + # + # twisted.python.failure.Failure can not be used with assertRaises, + # it doesnt have __call___ + # + + def test09(self): + """09: volume/fixed_ip found on DB, all procedure finish + successfully.. """ + + result = self.manager.pre_live_migration(self.ctxt, 'dummy_ec2_id', + 'host2') + self.assertEqual(result, True) + + # ---> test for nova.compute.manager.live_migration() + + def test10(self): + """10: rpc.call(pre_live_migration returns Error(Not None). """ + rpc.call = Mock(side_effect=exception.NotFound("ERR")) + + self.assertRaises(exception.NotFound, + self.manager.live_migration, + self.ctxt, + 'dummy_ec2_id', + 'host2') + + def test11(self): + """11: if rpc.call returns rpc.RemoteError. """ + + rpc.call = Mock(return_value=rpc.RemoteError(None, None, None)) + db.instance_set_state = Mock(return_value=True) + result = self.manager.live_migration(self.ctxt, 'dummy_ec2_id', + 'host2') + c1 = (None == result) + c2 = (0 <= sys.stderr.buffer.find('err at')) + self.assertEqual(c1 and c2, True) + + def test12(self): + """12: if rpc.call returns rpc.RemoteError and instance_set_state + also ends up err. (then , unexpected err occurs, in this case + TypeError) + """ + rpc.call = Mock(return_value=rpc.RemoteError(None, None, None)) + db.instance_set_state = Mock(side_effect=TypeError("ERR")) + self.assertRaises(TypeError, + self.manager.live_migration, + self.ctxt, + 'dummy_ec2_id', + 'host2') + + def test13(self): + """13: if wait for pre_live_migration, but timeout. """ + rpc.call = dummyCall + + db.instance_get = Mock(return_value=self.instance1) + + result = self.manager.live_migration(self.ctxt, 'dummy_ec2_id', + 'host2') + c1 = (None == result) + c2 = (0 <= sys.stderr.buffer.find('Timeout for')) + self.assertEqual(c1 and c2, True) + + def test14(self): + """14: if db_instance_get issues NotFound. + """ + rpc.call = Mock(return_value=True) + db.instance_get = Mock(side_effect=exception.NotFound("ERR")) + self.assertRaises(exception.NotFound, + self.manager.live_migration, + self.ctxt, + 'dummy_ec2_id', + 'host2') + + def test15(self): + """15: if rpc.call returns True, and instance_get() cause other + exception. (Unexpected case - b/c it already checked by + nova-manage) + """ + rpc.call = Mock(return_value=True) + db.instance_get = Mock(side_effect=TypeError("ERR")) + + self.assertRaises(TypeError, + self.manager.live_migration, + self.ctxt, + 'dummy_ec2_id', + 'host2') + + def test16(self): + """16: if rpc.call returns True, and live_migration issues + ProcessExecutionError. """ + rpc.call = Mock(return_value=True) + db.instance_get = Mock(return_value=self.instance1) + ret = self.manager.driver.live_migration \ + = Mock(side_effect=utils.ProcessExecutionError("ERR")) + + self.assertRaises(utils.ProcessExecutionError, + self.manager.live_migration, + self.ctxt, + 'dummy_ec2_id', + 'host2') + + def test17(self): + """17: everything goes well. """ + self.manager.driver.live_migration = Mock(return_value=True) + ret = self.manager.live_migration(self.ctxt, 'i-12345', 'host1') + self.assertEqual(True, True) + + def tearDown(self): + """common terminating method. """ + self.stderr.realFlush() + sys.stderr = self.stderrBak + #sys.stdout = self.stdoutBak + +if __name__ == '__main__': + logging.getLogger().setLevel(logging.DEBUG) + #unittest.main() + + suite = unittest.TestLoader().loadTestsFromTestCase(ComputeTestFunctions) + unittest.TextTestRunner(verbosity=2).run(suite) + + #suite = unittest.TestSuite() + #suite.addTest(ComputeTestFunctions("test15")) + #suite.addTest(ComputeTestFunctions("test16")) + #unittest.TextTestRunner(verbosity=2).run(suite) diff --git a/nova/livemigration_test/UT/libvirtConnection.test.py b/nova/livemigration_test/UT/libvirtConnection.test.py new file mode 100644 index 000000000..6a353508d --- /dev/null +++ b/nova/livemigration_test/UT/libvirtConnection.test.py @@ -0,0 +1,366 @@ +#!/usr/bin/python +# -*- coding: UTF-8 -*- + +NOVA_DIR='/opt/nova-2010.4' + +import sys +import unittest +import commands +import re +import logging +import libvirt + +from mock import Mock +import twisted + +try : + print + print 'checking %s/bin/nova-manage exists, set the NOVA_DIR properly..' % NOVA_DIR + print + + sys.path.append(NOVA_DIR) + + from nova.compute.manager import ComputeManager + from nova.virt import libvirt_conn + + from nova import context + from nova import db + from nova import exception + from nova import flags + from nova import quota + from nova import utils + from nova import process + from nova.auth import manager + from nova.cloudpipe import pipelib + from nova import rpc + from nova.api.ec2 import cloud + from nova.compute import power_state + + from nova.db.sqlalchemy.models import * + + +except: + print 'set PYTHONPATH to nova-install-dir' + raise + + +class tmpStdout: + def __init__(self): + self.buffer = "" + def write(self,arg): + self.buffer += arg + def writelines(self, arg): + self.buffer += arg + def flush(self): + print 'flush' + self.buffer = '' + +class tmpStderr(tmpStdout): + def write(self,arg): + self.buffer += arg + def flush(self): + pass + def realFlush(self): + self.buffer = '' + +class DummyLibvirtConn(object): + nwfilterLookupByName = None + def __init__(self): + pass + + +class LibvirtConnectionTestFunctions(unittest.TestCase): + + stdout = None + stdoutBak = None + stderr = None + stderrBak = None + manager = None + + # 共通の初期化処理 + def setUp(self): + """common init method. """ + + #if self.stdout is None: + # self.__class__.stdout = tmpStdout() + #self.stdoutBak = sys.stdout + #sys.stdout = self.stdout + if self.stderr is None: + self.__class__.stderr = tmpStderr() + self.stderrBak = sys.stderr + sys.stderr = self.stderr + + self.host = 'openstack2-api' + if self.manager is None: + self.__class__.manager = libvirt_conn.get_connection(False) + + self.setTestData() + self.setMocks() + + def setTestData(self): + + self.host1 = Host() + for key, val in [ ('name', 'host1'), ('cpu', 5), ('memory_mb', 20480), ('hdd_gb', 876) ]: + self.host1.__setitem__(key, val) + + self.instance1 = Instance() + for key, val in [ ('id', 1), ('host', 'host1'), ('hostname', 'i-12345'), + ('state', power_state.RUNNING), ('project_id', 'testPJ'), + ('vcpus', 3), ('memory_mb', 1024), ('hdd_gb', 5), ('internal_id',12345) ]: + self.instance1.__setitem__(key, val) + + + self.instance2 = Instance() + for key, val in [ ('id', 2), ('host', 'host1'), ('hostname', 'i-12345'), + ('state', power_state.RUNNING), ('project_id', 'testPJ'), + ('vcpus', 3), ('memory_mb', 1024), ('hdd_gb', 5) ]: + self.instance2.__setitem__(key, val) + + + self.fixed_ip1 = FixedIp() + for key, val in [ ('id', 1), ('address', '1.1.1.1'), ('network_id', '1'), + ('instance_id', 1)]: + self.fixed_ip1.__setitem__(key, val) + + self.floating_ip1 = FloatingIp() + for key, val in [ ('id', 1), ('address', '1.1.1.200') ]: + self.floating_ip1.__setitem__(key, val) + + self.netref1 = Network() + for key, val in [ ('id', 1) ]: + self.netref1.__setitem__(key, val) + + + def setMocks(self): + + self.ctxt = context.get_admin_context() + db.instance_get_fixed_address = Mock(return_value = '1.1.1.1') + db.fixed_ip_update = Mock(return_value = None) + db.fixed_ip_get_network = Mock(return_value = self.netref1) + db.network_update = Mock(return_value = None) + db.instance_get_floating_address = Mock(return_value = '1.1.1.200') + db.floating_ip_get_by_address = Mock(return_value = self.floating_ip1) + db.floating_ip_update = Mock(return_value = None) + db.instance_update = Mock(return_value = None) + + + # ---> test for nova.virt.libvirt_conn.nwfilter_for_instance_exists() + + def test01(self): + """01: libvirt.libvirtError occurs. """ + + self.manager._wrapped_conn = DummyLibvirtConn() + self.manager._test_connection = Mock(return_value=True) + self.manager._conn.nwfilterLookupByName = \ + Mock(side_effect=libvirt.libvirtError("ERR")) + ret = self.manager.nwfilter_for_instance_exists(self.instance1) + self.assertEqual(ret, False) + + def test02(self): + """02: libvirt.libvirtError not occurs. """ + + self.manager._wrapped_conn = DummyLibvirtConn() + self.manager._test_connection = Mock(return_value=True) + self.manager._conn.nwfilterLookupByName = \ + Mock(return_value=True) + ret = self.manager.nwfilter_for_instance_exists(self.instance1) + self.assertEqual(ret, True) + + # ---> test for nova.virt.libvirt_conn.live_migraiton() + + def test03(self): + """03: Unexpected exception occurs on finding volume on DB. """ + + utils.execute = Mock( side_effect=process.ProcessExecutionError('ERR') ) + + self.assertRaises(process.ProcessExecutionError, + self.manager.live_migration, + self.instance1, + 'host2') + + # ---> other case cannot be tested because live_migraiton + # is synchronized/asynchronized method are mixed together + + + # ---> test for nova.virt.libvirt_conn._post_live_migraiton + + def test04(self): + """04: instance_ref is not nova.db.sqlalchemy.models.Instances""" + + self.assertRaises(TypeError, + self.manager._post_live_migration, + "dummy string", + 'host2') + + def test05(self): + """05: db.instance_get_fixed_address return None""" + + db.instance_get_fixed_address = Mock( return_value=None ) + ret = self.manager._post_live_migration(self.instance1, 'host1') + c1 = (ret == None) + c2 = (0 <= sys.stderr.buffer.find('fixed_ip is not found')) + self.assertEqual(c1 and c2, True) + + def test06(self): + """06: db.instance_get_fixed_address raises NotFound""" + + db.instance_get_fixed_address = Mock( side_effect=exception.NotFound('ERR') ) + self.assertRaises(exception.NotFound, + self.manager._post_live_migration, + self.instance1, + 'host2') + + def test07(self): + """07: db.instance_get_fixed_address raises Unknown exception""" + + db.instance_get_fixed_address = Mock( side_effect=TypeError('ERR') ) + self.assertRaises(TypeError, + self.manager._post_live_migration, + self.instance1, + 'host1') + + def test08(self): + """08: db.fixed_ip_update return NotFound. """ + + db.fixed_ip_update = Mock( side_effect=exception.NotFound('ERR') ) + self.assertRaises(exception.NotFound, + self.manager._post_live_migration, + self.instance1, + 'host1') + + def test09(self): + """09: db.fixed_ip_update return NotAuthorized. """ + db.fixed_ip_update = Mock( side_effect=exception.NotAuthorized('ERR') ) + self.assertRaises(exception.NotAuthorized, + self.manager._post_live_migration, + self.instance1, + 'host1') + + def test10(self): + """10: db.fixed_ip_update return Unknown exception. """ + db.fixed_ip_update = Mock( side_effect=TypeError('ERR') ) + self.assertRaises(TypeError, + self.manager._post_live_migration, + self.instance1, + 'host1') + + def test11(self): + """11: db.fixed_ip_get_network causes NotFound. """ + + db.fixed_ip_get_network = Mock( side_effect=exception.NotFound('ERR') ) + self.assertRaises(exception.NotFound, + self.manager._post_live_migration, + self.instance1, + 'host1') + + # not tested db.fixed_ip_get_network raises NotAuthorized + # because same test has been done at previous test. + + def test12(self): + """12: db.fixed_ip_get_network causes Unknown exception. """ + + db.fixed_ip_get_network = Mock( side_effect=TypeError('ERR') ) + self.assertRaises(TypeError, + self.manager._post_live_migration, + self.instance1, + 'host1') + + def test13(self): + """13: db.network_update raises Unknown exception. """ + db.network_update = Mock( side_effect=TypeError('ERR') ) + self.assertRaises(TypeError, + self.manager._post_live_migration, + self.instance1, + 'host1') + + def test14(self): + """14: db.instance_get_floating_address raises NotFound. """ + db.instance_get_floating_address = Mock(side_effect=exception.NotFound("ERR")) + ret = self.manager._post_live_migration(self.instance1, 'host1') + c1 = (ret == None) + c2 = (0 <= sys.stderr.buffer.find('doesnt have floating_ip')) + self.assertEqual(c1 and c2, True) + + + def test15(self): + """15: db.instance_get_floating_address returns None. """ + + db.instance_get_floating_address = Mock( return_value=None ) + ret = self.manager._post_live_migration(self.instance1, 'host1') + c1 = (ret == None) + c2 = (0 <= sys.stderr.buffer.find('floating_ip is not found')) + self.assertEqual(c1 and c2, True) + + def test16(self): + """16: db.instance_get_floating_address raises NotFound. """ + + db.instance_get_floating_address = Mock(side_effect=exception.NotFound("ERR")) + ret = self.manager._post_live_migration(self.instance1, 'host1') + c1 = (ret == None) + c2 = (0 <= sys.stderr.buffer.find('doesnt have floating_ip')) + self.assertEqual(c1 and c2, True) + + def test17(self): + """17: db.instance_get_floating_address raises Unknown exception. """ + db.instance_get_floating_address = Mock(side_effect=TypeError("ERR")) + ret = self.manager._post_live_migration(self.instance1, 'host1') + c1 = (ret == None) + c2 = (0 <= sys.stderr.buffer.find('Live migration: Unexpected error')) + self.assertEqual(c1 and c2, True) + + + def test18(self): + """18: db.floating_ip_get_by_address raises NotFound """ + + db.floating_ip_get_by_address = Mock(side_effect=exception.NotFound("ERR")) + ret = self.manager._post_live_migration(self.instance1, 'host1') + c1 = (ret == None) + c2 = (0 <= sys.stderr.buffer.find('doesnt have floating_ip')) + self.assertEqual(c1 and c2, True) + + def test19(self): + """19: db.floating_ip_get_by_address raises Unknown exception. """ + db.floating_ip_get_by_address = Mock(side_effect=TypeError("ERR")) + ret = self.manager._post_live_migration(self.instance1, 'host1') + c1 = (ret == None) + c2 = (0 <= sys.stderr.buffer.find('Live migration: Unexpected error')) + self.assertEqual(c1 and c2, True) + + + def test20(self): + """20: db.floating_ip_update raises Unknown exception. + """ + db.floating_ip_update = Mock(side_effect=TypeError("ERR")) + ret = self.manager._post_live_migration(self.instance1, 'host1') + c1 = (ret == None) + c2 = (0 <= sys.stderr.buffer.find('Live migration: Unexpected error')) + self.assertEqual(c1 and c2, True) + + def test21(self): + """21: db.instance_update raises unknown exception. """ + + db.instance_update = Mock(side_effect=TypeError("ERR")) + self.assertRaises(TypeError, + self.manager._post_live_migration, + self.instance1, + 'host1') + + def tearDown(self): + """common terminating method. """ + self.stderr.realFlush() + sys.stderr = self.stderrBak + #sys.stdout = self.stdoutBak + +if __name__ == '__main__': + logging.getLogger().setLevel(logging.DEBUG) + #unittest.main() + + suite = unittest.TestLoader().loadTestsFromTestCase(LibvirtConnectionTestFunctions) + unittest.TextTestRunner(verbosity=2).run(suite) + + #suite = unittest.TestSuite() + #suite.addTest(LibvirtConnectionTestFunctions("test14")) + #suite.addTest(LibvirtConnectionTestFunctions("test16")) + #unittest.TextTestRunner(verbosity=2).run(suite) + + diff --git a/nova/livemigration_test/UT/nova-manage.test.py b/nova/livemigration_test/UT/nova-manage.test.py new file mode 100644 index 000000000..dabdba001 --- /dev/null +++ b/nova/livemigration_test/UT/nova-manage.test.py @@ -0,0 +1,318 @@ +#!/usr/bin/python +# -*- coding: UTF-8 -*- + +NOVA_DIR='/opt/nova-2010.2' + +import sys +import unittest +import commands +import re + +from mock import Mock + +try : + print + print 'Testing %s/bin/nova-manage, set the NOVA_DIR properly..' % NOVA_DIR + print + + sys.path.append(NOVA_DIR) + + from nova import context + from nova import db + from nova import exception + from nova import flags + from nova import quota + from nova import utils + from nova.auth import manager + from nova.cloudpipe import pipelib + from nova import rpc + from nova.api.ec2 import cloud + from nova.compute import power_state + + from nova.db.sqlalchemy.models import * + + +except: + print 'set PYTHONPATH to nova-install-dir' + raise + + +class tmpStdout: + def __init__(self): + self.buffer = "" + def write(self,arg): + self.buffer += arg + def flush(self): + self.buffer = '' + + +class NovaManageTestFunctions(unittest.TestCase): + + stdout = None + stdoutBak = None + + hostCmds = None + + # 共通の初期化処理 + def setUp(self): + """common init method. """ + + commands.getstatusoutput('cp -f %s/bin/nova-manage %s' % ( NOVA_DIR, self.getNovaManageCopyPath() )) + commands.getstatusoutput('touch %s' % self.getInitpyPath() ) + try : + import bin.novamanagetest + except: + print 'Fail to import nova-manage . check bin/nova-manage exists' + raise + + # replace stdout for checking nova-manage output + if self.stdout is None : + self.__class__.stdout = tmpStdout() + self.stdoutBak = sys.stdout + sys.stdout = self.stdout + + # prepare test data + self.setTestData() + + + def setTestData(self): + import bin.novamanagetest + + if self.hostCmds is None : + self.__class__.hostCmds = bin.novamanagetest.HostCommands() + self.instanceCmds = bin.novamanagetest.InstanceCommands() + + self.host1 = Host() + self.host1.__setitem__('name', 'host1') + + self.host2 = Host() + self.host2.__setitem__('name', 'host2') + + self.instance1 = Instance() + self.instance1.__setitem__('id', 1) + self.instance1.__setitem__('host', 'host1') + self.instance1.__setitem__('hostname', 'i-12345') + self.instance1.__setitem__('state', power_state.NOSTATE) + self.instance1.__setitem__('state_description', 'running') + + self.instance2 = Instance() + self.instance2.__setitem__('id', 2) + self.instance2.__setitem__('host', 'host1') + self.instance2.__setitem__('hostname', 'i-12345') + self.instance2.__setitem__('state', power_state.RUNNING) + self.instance2.__setitem__('state_description', 'pending') + + self.instance3 = Instance() + self.instance3.__setitem__('id', 3) + self.instance3.__setitem__('host', 'host1') + self.instance3.__setitem__('hostname', 'i-12345') + self.instance3.__setitem__('state', power_state.RUNNING) + self.instance3.__setitem__('state_description', 'running') + + db.host_get_all = Mock(return_value=[self.host1, self.host2]) + + def getInitpyPath(self): + return '%s/bin/__init__.py' % NOVA_DIR + + def getNovaManageCopyPath(self): + return '%s/bin/novamanagetest.py' % NOVA_DIR + + # -----> Test for nova-manage host list + + def test01(self): + """01: Got some host lists. """ + + self.hostCmds.list() + + c1 = (2 == self.stdout.buffer.count('\n')) + c2 = (0 <= self.stdout.buffer.find('host1')) + c3 = (0 <= self.stdout.buffer.find('host2')) + self.assertEqual(c1 and c2 and c3, True) + + def test02(self): + """02: Got empty lsit. """ + + db.host_get_all = Mock(return_value=[]) + self.hostCmds.list() + + # result should be empty + c = (0 == len(self.stdout.buffer) ) + self.assertEqual(c, True) + + def test03(self): + """03: Got notFound """ + + db.host_get_all = Mock(side_effect=exception.NotFound("ERR")) + self.assertRaises(exception.NotFound, self.hostCmds.list) + + # --------> Test For nova-manage host show + + def test04(self): + """04: args are not enough(nova-manage host show) """ + self.assertRaises(TypeError, self.hostCmds.show ) + + + def test05(self): + """05: nova-manage host show not-registered-host, and got an error""" + + rpc.call = Mock(return_value={'ret' : False, 'msg': 'ERR'} ) + self.hostCmds.show('host1') + self.assertEqual( self.stdout.buffer[:3]=='ERR', True ) + + + def test06(self): + """06: nova-manage host show registerd-host, and no project uses the host""" + + dic = {'ret': True, + 'phy_resource': {'cpu':1, 'memory_mb':2, 'hdd_gb':3}, + 'usage': {}} + + rpc.call = Mock(return_value=dic ) + self.hostCmds.show('host1') + + # result should be : + # HOST PROJECT cpu mem(mb) disk(gb) + # host1 1 2 3 + line = self.stdout.buffer.split('\n')[1] + line = re.compile('\t+').sub(' ', line).strip() + c1 = ( 'host1 1 2 3' == line ) + c2 = ( self.stdout.buffer.count('\n') == 2 ) + + self.assertEqual( c1 and c2, True ) + + def test07(self): + """07: nova-manage host show registerd-host, + and some projects use the host + """ + dic = {'ret': True, + 'phy_resource': {'cpu':1, 'memory_mb':2, 'hdd_gb':3}, + 'usage': {'p1': {'cpu':1, 'memory_mb':2, 'hdd_gb':3}, + 'p2': {'cpu':1, 'memory_mb':2, 'hdd_gb':3} }} + + rpc.call = Mock(return_value=dic ) + self.hostCmds.show('host1') + + # result should be : + # HOST PROJECT cpu mem(mb) disk(gb) + # host1 1 2 3 + # host1 p1 1 2 3 + # host1 p2 4 5 6 + line = self.stdout.buffer.split('\n')[1] + ret = re.compile('\t+').sub(' ', line).strip() + c1 = ( 'host1 1 2 3' == ret ) + + line = self.stdout.buffer.split('\n')[2] + line = re.compile('\t+').sub(' ', line).strip() + c2 = ( 'host1 p1 1 2 3' == line ) or ( 'host1 p2 1 2 3' == line ) + + line = self.stdout.buffer.split('\n')[3] + ret = re.compile('\t+').sub(' ', line).strip() + c3 = ( 'host1 p1 1 2 3' == ret ) or ( 'host1 p2 1 2 3' == ret ) + + self.assertEqual( c1 and c2 and c3, True ) + + def test08(self): + """08: nova-manage host show registerd-host, and rpc.call returns None + (unexpected error) + """ + rpc.call = Mock(return_value=None ) + self.hostCmds.show('host1') + c1 = ( 0 <= self.stdout.buffer.find('Unexpected error') ) + self.assertEqual( c1, True ) + + # ----------> Test for bin/nova-manage instance live_migration + + def test09(self): + """09: arguments are not enough(nova-manage instances live_migration) + """ + self.assertRaises(TypeError, self.instanceCmds.live_migration ) + + def test10(self): + """10: arguments are not enough(nova-manage instances live_migration ec2_id) + """ + self.assertRaises(TypeError, self.instanceCmds.live_migration, 'i-xxx' ) + + def test11(self): + """11: nova-manage instances live_migration ec2_id host, + where hostname is invalid + """ + db.host_get_by_name = Mock( side_effect=exception.NotFound('ERR') ) + self.assertRaises(exception.NotFound, self.instanceCmds.live_migration, 'i-xxx', 'host1' ) + + def test12(self): + """12: nova-manage instances live_migration ec2_id(invalid id) host""" + + db.host_get_by_name = Mock(return_value = self.host1) + db.instance_get_by_internal_id = Mock( side_effect=exception.NotFound('ERR') ) + + self.assertRaises(exception.NotFound, self.instanceCmds.live_migration, 'i-xxx', 'host1' ) + + def test13(self): + """13: nova-manage instances live_migration ec2_id host, + but instance specifed by ec2 id is not running (state is not power_state.RUNNING) + """ + db.host_get_by_name = Mock(return_value = self.host1) + db.instance_get_by_internal_id = Mock( return_value = self.instance1 ) + c1 = c2 = False + try : + self.instanceCmds.live_migration('i-12345', 'host1') + except SystemExit, e: + c1 = (1 == e.code) + c2 = (0 < self.stdout.buffer.find('is not running') ) + self.assertEqual( c1 and c2 , True ) + + + def test14(self): + """14: nova-manage instances live_migration ec2_id host, + but instance specifed by ec2 id is not running (state_description is not running) + """ + db.host_get_by_name = Mock(return_value = self.host2) + db.instance_get_by_internal_id = Mock( return_value = self.instance1 ) + c1 = c2 = False + try : + self.instanceCmds.live_migration('i-12345', 'host2') + except SystemExit, e: + c1 = (1 == e.code) + c2 = (0 < self.stdout.buffer.find('is not running') ) + self.assertEqual( c1 and c2 , True ) + + def test15(self): + """15: nova-manage instances live_migration ec2_id host, + but instance is running at the same host specifed above, so err should be occured. + """ + db.host_get_by_name = Mock(return_value = self.host1) + db.instance_get_by_internal_id = Mock( return_value = self.instance3 ) + c1 = c2 = False + try : + self.instanceCmds.live_migration('i-12345', 'host1') + except SystemExit, e: + c1 = (2 == e.code) + c2 = (0 < self.stdout.buffer.find('is running now') ) + self.assertEqual( c1 and c2 , True ) + + def test16(self): + """16: nova-manage instances live_migration ec2_id host, + everything goes well, ang gets success messages. + """ + db.host_get_by_name = Mock(return_value = self.host1) + db.instance_get_by_internal_id = Mock( return_value = self.instance3 ) + rpc.cast = Mock(return_value = None) + + self.instanceCmds.live_migration('i-12345', 'host2') + c1 = (0 <= self.stdout.buffer.find('Finished all procedure') ) + self.assertEqual( c1, True ) + + + def tearDown(self): + """common terminating method. """ + commands.getstatusoutput('rm -rf %s' % self.getInitpyPath() ) + commands.getstatusoutput('rm -rf %s' % self.getNovaManageCopyPath() ) + sys.stdout.flush() + sys.stdout = self.stdoutBak + +if __name__ == '__main__': + #unittest.main() + suite = unittest.TestLoader().loadTestsFromTestCase(NovaManageTestFunctions) + unittest.TextTestRunner(verbosity=3).run(suite) + + diff --git a/nova/livemigration_test/UT/schedulerManager.test.py b/nova/livemigration_test/UT/schedulerManager.test.py new file mode 100644 index 000000000..2fe4d0994 --- /dev/null +++ b/nova/livemigration_test/UT/schedulerManager.test.py @@ -0,0 +1,360 @@ +#!/usr/bin/python +# -*- coding: UTF-8 -*- + +NOVA_DIR='/opt/nova-2010.2' + +import sys +import unittest +import commands +import re + +from mock import Mock + +try : + print + print 'Checking %s/bin/nova-manage exists, set the NOVA_DIR properly..' % NOVA_DIR + print + + sys.path.append(NOVA_DIR) + + from nova.scheduler.manager import SchedulerManager + + from nova import context + from nova import db + from nova import exception + from nova import flags + from nova import quota + from nova import utils + from nova.auth import manager + from nova.cloudpipe import pipelib + from nova import rpc + from nova.api.ec2 import cloud + from nova.compute import power_state + + from nova.db.sqlalchemy.models import * + +except: + print 'set PYTHONPATH to nova-install-dir' + raise + + +class tmpStdout: + def __init__(self): + self.buffer = "" + def write(self,arg): + self.buffer += arg + def flush(self): + self.buffer = '' + + +class SchedulerTestFunctions(unittest.TestCase): + + # 共通の初期化処理 + def setUp(self): + """common init method. """ + + self.host = 'openstack2-api' + self.manager = SchedulerManager(host=self.host) + + self.setTestData() + + def setTestData(self): + + self.host1 = Host() + self.host1.__setitem__('name', 'host1') + self.host1.__setitem__('cpu', 5) + self.host1.__setitem__('memory_mb', 20480) + self.host1.__setitem__('hdd_gb', 876) + + self.host2 = Host() + self.host2.__setitem__('name', 'host2') + self.host2.__setitem__('cpu', 5) + self.host2.__setitem__('memory_mb', 20480) + self.host2.__setitem__('hdd_gb', 876) + + self.instance1 = Instance() + for key, val in [ ('id', 1), ('host', 'host1'), ('hostname', 'i-12345'), + ('state', power_state.RUNNING), ('project_id', 'testPJ'), + ('vcpus', 3), ('memory_mb', 1024), ('hdd_gb', 5) ]: + self.instance1.__setitem__(key, val) + + + self.instance2 = Instance() + for key, val in [ ('id', 2), ('host', 'host1'), ('hostname', 'i-12345'), + ('state', power_state.RUNNING), ('project_id', 'testPJ'), + ('vcpus', 3), ('memory_mb', 1024), ('hdd_gb', 5) ]: + self.instance2.__setitem__(key, val) + + + self.instance3 = Instance() + for key, val in [ ('id', 3), ('host', 'host1'), ('hostname', 'i-12345'), + ('state', power_state.RUNNING), ('project_id', 'testPJ2'), + ('vcpus', 1), ('memory_mb', 1024), ('hdd_gb', 5) ]: + self.instance3.__setitem__(key, val) + + self.instance4 = Instance() + for key, val in [ ('id', 4), ('host', 'host2'), ('hostname', 'i-12345'), + ('state', power_state.RUNNING), ('project_id', 'testPJ2'), + ('vcpus', 1), ('memory_mb', 1024), ('local_gb', 5) ]: + self.instance4.__setitem__(key, val) + + self.instance5 = Instance() + for key, val in [ ('id', 5), ('host', 'host2'), ('hostname', 'i-12345'), + ('state', power_state.RUNNING), ('project_id', 'testPJ2'), + ('vcpus', 1), ('memory_mb', 1024), ('local_gb', 5) ]: + self.instance5.__setitem__(key, val) + + self.instance6 = Instance() + for key, val in [ ('id', 6), ('host', 'host1'), ('hostname', 'i-12345'), + ('state', power_state.RUNNING), ('project_id', 'testPJ2'), + ('vcpus', 3), ('memory_mb', 1024), ('local_gb', 5) ]: + self.instance6.__setitem__(key, val) + + self.instance7 = Instance() + for key, val in [ ('id', 7), ('host', 'host1'), ('hostname', 'i-12345'), + ('state', power_state.RUNNING), ('project_id', 'testPJ2'), + ('vcpus', 1), ('memory_mb', 18432), ('local_gb', 5) ]: + self.instance7.__setitem__(key, val) + + self.instance8 = Instance() + for key, val in [ ('id', 8), ('host', 'host1'), ('hostname', 'i-12345'), + ('state', power_state.RUNNING), ('project_id', 'testPJ2'), + ('vcpus', 1), ('memory_mb', 1024), ('local_gb', 866) ]: + self.instance8.__setitem__(key, val) + + + + def check_format(self, val): + """check result format of show_host_resource """ + + if dict != type(val) : + sys.stderr.write('return value is not dict') + return False + + if not val.has_key('ret'): + sys.stderr.write('invalid format(missing "ret"). ') + return False + + if not val['ret'] : + if not val.has_key('msg') : + sys.stderr.write( 'invalid format(missing "msg").' ) + return False + + else : + if not val.has_key('phy_resource') : + sys.stderr.write('invalid format(missing "phy_resource"). ') + return False + + if not val.has_key('usage'): + sys.stderr.write('invalid format(missing "usage"). ') + return False + + if not self._check_format(val['phy_resource']): + return False + + for key, dic in val['usage'].items() : + if not self._check_format(dic): + return False + return True + + def _check_format(self, val): + if dict != type(val) : + sys.stderr.write('return value is not dict') + return False + + for key in ['cpu', 'memory_mb', 'hdd_gb']: + if not val.has_key(key) : + sys.stderr.write('invalid format(missing "%s"). ' % key ) + return False + + return True + + # ---> test for nova.scheduler.manager.show_host_resource() + + def test01(self): + """01: get NotFound exception when dest host not found on DB """ + + ctxt = context.get_admin_context() + db.host_get_by_name = Mock( side_effect=exception.NotFound('ERR') ) + result = self.manager.show_host_resource(ctxt, 'not-registered-host') + c1 = ( not result['ret'] ) + c2 = ( 0 == result['msg'].find('No such') ) + self.assertEqual(c1 and c2, True) + + def test02(self): + """02: get other exception if unexpected err. """ + + ctxt = context.get_admin_context() + db.host_get_by_name = Mock( side_effect=TypeError('ERR') ) + self.assertRaises(TypeError, self.manager.show_host_resource, ctxt, 'host1' ) + + def test03(self): + """03: no instance found on dest host. """ + + ctxt = context.get_admin_context() + db.host_get_by_name = Mock( return_value = self.host1 ) + db.instance_get_all_by_host = Mock( return_value=[]) + ret= self.manager.show_host_resource(ctxt, 'host1') + + c1 = self.check_format(ret) + v = ret['phy_resource'] + c2 = ( (5 == v['cpu']) and (20480 == v['memory_mb']) and (876 == v['hdd_gb'])) + c3 = ( 0 == len(ret['usage']) ) + + self.assertEqual(c1 and c2 and c3, True) + + def test04(self): + """04: some instance found on dest host. """ + + ctxt = context.get_admin_context() + db.host_get_by_name = Mock( return_value = self.host1 ) + db.instance_get_all_by_host = Mock( return_value=[ self.instance1, + self.instance2, + self.instance3] ) + + db.instance_get_vcpu_sum_by_host_and_project = Mock(return_value=3) + db.instance_get_memory_sum_by_host_and_project = Mock(return_value=1024) + db.instance_get_disk_sum_by_host_and_project = Mock(return_value=5) + + ret= self.manager.show_host_resource(ctxt, 'host1') + + c1 = self.check_format(ret) + v = ret['phy_resource'] + c2 = ( (5 == v['cpu']) and (20480 == v['memory_mb']) and (876 == v['hdd_gb'])) + c3 = ( 2 == len(ret['usage']) ) + c4 = ( self.instance1['project_id'] in ret['usage'].keys()) + c5 = ( self.instance3['project_id'] in ret['usage'].keys()) + + self.assertEqual(c1 and c2 and c3 and c4 and c5, True) + + + # ---> test for nova.scheduler.manager.has_enough_resource() + def test05(self): + """05: when cpu is exccded some instance found on dest host. """ + + ctxt = context.get_admin_context() + db.instance_get = Mock(return_value = self.instance6) + db.host_get_by_name = Mock(return_value = self.host2) + db.instance_get_all_by_host = Mock(return_value = [self.instance4, self.instance5] ) + + ret= self.manager.has_enough_resource(ctxt, 'i-12345', 'host1') + self.assertEqual(ret, False) + + def test06(self): + """06: when memory is exccded some instance found on dest host. """ + + ctxt = context.get_admin_context() + db.instance_get = Mock(return_value = self.instance7) + db.host_get_by_name = Mock(return_value = self.host2) + db.instance_get_all_by_host = Mock(return_value = [self.instance4, self.instance5] ) + + ret= self.manager.has_enough_resource(ctxt, 'i-12345', 'host1') + self.assertEqual(ret, False) + + def test07(self): + """07: when hdd is exccded some instance found on dest host. """ + + ctxt = context.get_admin_context() + db.instance_get = Mock(return_value = self.instance8) + db.host_get_by_name = Mock(return_value = self.host2) + db.instance_get_all_by_host = Mock(return_value = [self.instance4, self.instance5] ) + + ret= self.manager.has_enough_resource(ctxt, 'i-12345', 'host1') + self.assertEqual(ret, False) + + def test08(self): + """08: everything goes well. (instance_get_all_by_host returns list)""" + + ctxt = context.get_admin_context() + db.instance_get = Mock(return_value = self.instance3) + db.host_get_by_name = Mock(return_value = self.host2) + db.instance_get_all_by_host = Mock(return_value = [self.instance4, self.instance5] ) + + ret= self.manager.has_enough_resource(ctxt, 'i-12345', 'host1') + self.assertEqual(ret, True) + + + def test09(self): + """09: everything goes well(instance_get_all_by_host returns[]). """ + + ctxt = context.get_admin_context() + db.instance_get = Mock(return_value = self.instance3) + db.host_get_by_name = Mock(return_value = self.host2) + db.instance_get_all_by_host = Mock(return_value = [] ) + + ret= self.manager.has_enough_resource(ctxt, 'i-12345', 'host1') + self.assertEqual(ret, True) + + + # ---> test for nova.scheduler.manager.live_migration() + + + def test10(self): + """10: instance_get_by_internal_id issue NotFound. """ + # Mocks for has_enough_resource() + ctxt = context.get_admin_context() + db.instance_get = Mock(return_value = self.instance8) + db.host_get_by_name = Mock(return_value = self.host2) + db.instance_get_all_by_host = Mock(return_value = [self.instance4, self.instance5] ) + + # Mocks for live_migration()db.instance_get_by_internal_id + # (any Mock is ok here. important mock is all above) + db.instance_get_by_internal_id = Mock(side_effect=exception.NotFound("ERR")) + + self.assertRaises(exception.NotFound, + self.manager.live_migration, + ctxt, + 'i-12345', + 'host1') + + + def test11(self): + """11: return False if host doesnt have enough resource. """ + + # Mocks for has_enough_resource() + ctxt = context.get_admin_context() + db.instance_get = Mock(return_value = self.instance8) + db.host_get_by_name = Mock(return_value = self.host2) + db.instance_get_all_by_host = Mock(return_value = [self.instance4, self.instance5] ) + + # Mocks for live_migration()db.instance_get_by_internal_id + # (any Mock is ok here. important mock is all above) + db.instance_get_by_internal_id = Mock(return_value = self.instance8) + db.instance_set_state = Mock(return_value = True) + rpc_cast = Mock(return_value = True) + + ret= self.manager.live_migration(ctxt, 'i-12345', 'host1') + self.assertEqual(ret, False) + + + + def test12(self): + """12: everything goes well. """ + + # Mocks for has_enough_resource() + ctxt = context.get_admin_context() + db.instance_get = Mock(return_value = self.instance3) + db.host_get_by_name = Mock(return_value = self.host2) + db.instance_get_all_by_host = Mock(return_value = [self.instance4, self.instance5] ) + + # Mocks for live_migration()db.instance_get_by_internal_id + # (any Mock is ok here. important mock is all above) + db.instance_get_by_internal_id = Mock(return_value = self.instance8) + db.instance_set_state = Mock(return_value = True) + rpc.cast = Mock(return_value = True) + + ret= self.manager.live_migration(ctxt, 'i-12345', 'host1') + self.assertEqual(ret, True) + + + def tearDown(self): + """common terminating method. """ + #sys.stdout = self.stdoutBak + pass + +if __name__ == '__main__': + #unittest.main() + suite = unittest.TestLoader().loadTestsFromTestCase(SchedulerTestFunctions) + unittest.TextTestRunner(verbosity=3).run(suite) + + diff --git a/nova/livemigration_test/UT/testCase_UT.xls b/nova/livemigration_test/UT/testCase_UT.xls new file mode 100644 index 000000000..0524526b6 Binary files /dev/null and b/nova/livemigration_test/UT/testCase_UT.xls differ -- cgit From df045f9252f6a50171d477c265564f062294e47a Mon Sep 17 00:00:00 2001 From: masumotok Date: Mon, 20 Dec 2010 08:06:11 +0900 Subject: テストコードをレポジトリに追加 nova.compute.manager.pre_live_migration()について、異常終了しているのに正常終了の戻り値を返すことがあったため変更 - 正常終了の戻り値をTrueに変更 - fixed_ipが見つからないときにはRemoteErrorをraiseする - それに合わせてnova.compute.manager.live_migrationも変更 MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- nova/compute/manager.py | 13 +- nova/livemigration_test/SI/picture.pptx | Bin 0 -> 137730 bytes nova/livemigration_test/SI/testCase_SI.xls | Bin 0 -> 35840 bytes .../SI/testParameterSheet_SI.xls | Bin 0 -> 464384 bytes nova/livemigration_test/SI/utils/demo-firstboot.sh | 39 ++ .../SI/utils/demo-runInstance.sh | 57 +++ nova/livemigration_test/SI/utils/nova-manage.conf | 18 + nova/livemigration_test/SI/utils/nova.conf | 10 + nova/livemigration_test/SI/utils/nova.sh | 180 +++++++++ nova/livemigration_test/SI/utils/nova.sh.compute | 37 ++ nova/livemigration_test/UT/computeManager.test.py | 407 +++++++++++++++++++++ .../UT/libvirtConnection.test.py | 366 ++++++++++++++++++ nova/livemigration_test/UT/nova-manage.test.py | 318 ++++++++++++++++ .../livemigration_test/UT/schedulerManager.test.py | 360 ++++++++++++++++++ nova/livemigration_test/UT/testCase_UT.xls | Bin 0 -> 195072 bytes 15 files changed, 1799 insertions(+), 6 deletions(-) create mode 100644 nova/livemigration_test/SI/picture.pptx create mode 100644 nova/livemigration_test/SI/testCase_SI.xls create mode 100644 nova/livemigration_test/SI/testParameterSheet_SI.xls create mode 100755 nova/livemigration_test/SI/utils/demo-firstboot.sh create mode 100755 nova/livemigration_test/SI/utils/demo-runInstance.sh create mode 100644 nova/livemigration_test/SI/utils/nova-manage.conf create mode 100644 nova/livemigration_test/SI/utils/nova.conf create mode 100755 nova/livemigration_test/SI/utils/nova.sh create mode 100755 nova/livemigration_test/SI/utils/nova.sh.compute create mode 100644 nova/livemigration_test/UT/computeManager.test.py create mode 100644 nova/livemigration_test/UT/libvirtConnection.test.py create mode 100644 nova/livemigration_test/UT/nova-manage.test.py create mode 100644 nova/livemigration_test/UT/schedulerManager.test.py create mode 100644 nova/livemigration_test/UT/testCase_UT.xls diff --git a/nova/compute/manager.py b/nova/compute/manager.py index 81cca7770..bad525115 100644 --- a/nova/compute/manager.py +++ b/nova/compute/manager.py @@ -39,6 +39,7 @@ import logging import sys import traceback import os +import time from twisted.internet import defer @@ -297,10 +298,10 @@ class ComputeManager(manager.Manager): # 2. getting fixed ips fixed_ip = db.instance_get_fixed_address(context, instance_id) if None == fixed_ip: - logging.error('Not found fixedip for %s\n%s', - ec2_id, - ''.join(traceback.format_tb(sys.exc_info()[2]))) - return + exc_type = 'NotFoundError' + val = '%s(%s) doesnt have fixed_ip ' % (instance_id, ec2_id) + tb = ''.join(traceback.format_tb(sys.exc_info()[2])) + raise rpc.RemoteError(exc_type, val, tb) # 3. if any volume is mounted, prepare here. if 0 != len(shelf_slots): @@ -315,6 +316,7 @@ class ComputeManager(manager.Manager): # 5. bridge settings self.network_manager.setup_compute_network(instance_id) + return True def nwfilter_for_instance_exists(self, context, instance_id): """Check nova-instance-instance-xxx filter exists """ @@ -324,7 +326,6 @@ class ComputeManager(manager.Manager): def live_migration(self, context, instance_id, dest): """executes live migration.""" - import time # 1. ask dest host to preparing live migration. compute_topic = db.queue_get_for(context, FLAGS.compute_topic, dest) ret = rpc.call(context, @@ -333,7 +334,7 @@ class ComputeManager(manager.Manager): "args": {'instance_id': instance_id, 'dest': dest}}) - if rpc.RemoteError == type(ret): + if True != ret: logging.error('Live migration failed(err at %s)', dest) db.instance_set_state(context, instance_id, diff --git a/nova/livemigration_test/SI/picture.pptx b/nova/livemigration_test/SI/picture.pptx new file mode 100644 index 000000000..b47bec9b5 Binary files /dev/null and b/nova/livemigration_test/SI/picture.pptx differ diff --git a/nova/livemigration_test/SI/testCase_SI.xls b/nova/livemigration_test/SI/testCase_SI.xls new file mode 100644 index 000000000..723363c1e Binary files /dev/null and b/nova/livemigration_test/SI/testCase_SI.xls differ diff --git a/nova/livemigration_test/SI/testParameterSheet_SI.xls b/nova/livemigration_test/SI/testParameterSheet_SI.xls new file mode 100644 index 000000000..192d9705b Binary files /dev/null and b/nova/livemigration_test/SI/testParameterSheet_SI.xls differ diff --git a/nova/livemigration_test/SI/utils/demo-firstboot.sh b/nova/livemigration_test/SI/utils/demo-firstboot.sh new file mode 100755 index 000000000..3a6f7fb0b --- /dev/null +++ b/nova/livemigration_test/SI/utils/demo-firstboot.sh @@ -0,0 +1,39 @@ +#!/bin/bash + +DIR=/opt/nova-2010.1 + +# 1. 管理者ユーザを作成する +# nova-manage user admin ユーザ名 access-key secret-key +# +#$DIR/bin/nova-manage user admin admin admin admin + +# 2. プロジェクトを作成する +# nova-manage create project プロジェクト名 プロジェクトに属するユーザ名 +# +#$DIR/bin/nova-manage project create admin admin + +# 3. クラウドを使うための認証情報を生成する +# nova-manage project environment プロジェクト名 ユーザ名 認証情報を格納するファイル +# +#$DIR/bin/nova-manage project environment admin admin $DIR/novarc + +# 4. 認証情報の読み込み +. $DIR/novarc + +# 5. プロジェクト用仮想マシンネットワークの作成を行う +# nova-manage user admin ユーザ名 access-key secret-key +# +$DIR/bin/nova-manage network create 10.0.0.0/8 3 16 + +# 6. 初回ログインにはSSHの公開鍵認証が必要 +# +if [ "" == "`euca-describe-keypairs | grep testkey`" ]; then + euca-add-keypair testkey > testkey.pem +fi + +# 7. +for i in 172.19.0.134 172.19.0.135 172.19.0.136 172.19.0.137 ; do + sudo ip addr del $i dev eth0 2> /dev/null +done + + diff --git a/nova/livemigration_test/SI/utils/demo-runInstance.sh b/nova/livemigration_test/SI/utils/demo-runInstance.sh new file mode 100755 index 000000000..171291262 --- /dev/null +++ b/nova/livemigration_test/SI/utils/demo-runInstance.sh @@ -0,0 +1,57 @@ +#!/bin/bash + +DIR=/opt/nova-2010.1 + +function inc_assigned(){ + assigned=`expr $assigned + 1` +} + + +# 1. 認証情報の読み込み +. $DIR/novarc + +# 3. 仮想マシンの起動 +# +ret=`euca-run-instances -t m1.small -k testkey ami-centos` +#ret=`euca-run-instances -t m1.small -k testkey ami-tiny` + +# 4. 仮想マシン用IPの確保 +# 未登録なら登録しておく +registered=`euca-describe-addresses` +for ip in 172.19.0.134 172.19.0.135 172.19.0.136 172.19.0.137 ; do + + not_registered=`echo $registered | grep $ip` + if [ "" == "$not_registered" ]; then + echo "[INFO] registed $ip" + $DIR/bin/nova-manage floating create `hostname` $ip + fi +done + +# 5. IPの割当 +echo 0 > /tmp/demo-runinstance +euca-describe-addresses | grep -v reserved | while read line; do + # 割り当てられてないものを仮想マシンに割り当てる + ip=`echo $line | cut -d ' ' -f 2` + id=`echo $ret | cut -d ' ' -f 5` + if [ "" == "`echo $id | grep i- `" ] ; then + echo "[INFO] try again" $ret + break + fi + echo "[INFO] assigned to ipaddr($ip) to instance($id) " + euca-associate-address -i $id $ip + echo 1 > /tmp/demo-runinstance + break +done + +echo $assigned +if [ 0 -eq "`cat /tmp/demo-runinstance`" ] ; then + echo "[INFO] address is full." +fi +rm -rf /tmp/demo-runinstance + + +# 6. FWの設定 +euca-authorize -P tcp -p 22 default 2> /dev/null > /dev/null +euca-authorize -P tcp -p 80 default 2> /dev/null > /dev/null +euca-authorize -P tcp -p 5555 default 2> /dev/null > /dev/null + diff --git a/nova/livemigration_test/SI/utils/nova-manage.conf b/nova/livemigration_test/SI/utils/nova-manage.conf new file mode 100644 index 000000000..9f8a02b96 --- /dev/null +++ b/nova/livemigration_test/SI/utils/nova-manage.conf @@ -0,0 +1,18 @@ +--verbose +--nodaemon +--dhcpbridge_flagfile=/etc/nova/nova-manage.conf +--FAKE_subdomain=ec2 +--libvirt_type=qemu +--no_internet_conn=True +--public_netif=eth0 +--public_interface=eth0 + +--cc-host=172.19.0.131 +--routing_source_ip=172.19.0.131 +--sql_connection=mysql://root:nova@172.19.0.131/nova +--rabbit_host=172.19.0.131 +--redis_host=172.19.0.131 +--s3_host=172.19.0.131 +--auth_driver=nova.auth.ldapdriver.LdapDriver +--ldap_url=ldap://172.19.0.131 + diff --git a/nova/livemigration_test/SI/utils/nova.conf b/nova/livemigration_test/SI/utils/nova.conf new file mode 100644 index 000000000..c66bfbc53 --- /dev/null +++ b/nova/livemigration_test/SI/utils/nova.conf @@ -0,0 +1,10 @@ +--verbose +--nodaemon +--dhcpbridge_flagfile=/opt/nova-2010.4//bin/nova.conf +--network_manager=nova.network.manager.VlanManager +--cc_host=172.19.0.131 +--routing_source_ip=172.19.0.131 +--sql_connection=mysql://root:nova@localhost/nova +--auth_driver=nova.auth.ldapdriver.LdapDriver +--libvirt_type=qemu +--public_interface=eth0 diff --git a/nova/livemigration_test/SI/utils/nova.sh b/nova/livemigration_test/SI/utils/nova.sh new file mode 100755 index 000000000..b8e2e9f26 --- /dev/null +++ b/nova/livemigration_test/SI/utils/nova.sh @@ -0,0 +1,180 @@ +#!/usr/bin/env bash +DIR=`pwd` +CMD=$1 +SOURCE_BRANCH=lp:nova +if [ -n "$2" ]; then + SOURCE_BRANCH=$2 +fi +#DIRNAME=nova +DIRNAME="" +NOVA_DIR=$DIR/$DIRNAME +if [ -n "$3" ]; then + NOVA_DIR=$DIR/$3 +fi + +if [ ! -n "$HOST_IP" ]; then + # NOTE(vish): This will just get the first ip in the list, so if you + # have more than one eth device set up, this will fail, and + # you should explicitly set HOST_IP in your environment + HOST_IP=`ifconfig | grep -m 1 'inet addr:'| cut -d: -f2 | awk '{print $1}'` +fi + +#USE_MYSQL=${USE_MYSQL:-0} +USE_MYSQL=1 +MYSQL_PASS=${MYSQL_PASS:-nova} +TEST=${TEST:-0} +#USE_LDAP=${USE_LDAP:-0} +USE_LDAP=1 +LIBVIRT_TYPE=${LIBVIRT_TYPE:-qemu} +NET_MAN=${NET_MAN:-VlanManager} +# NOTE(vish): If you are using FlatDHCP on multiple hosts, set the interface +# below but make sure that the interface doesn't already have an +# ip or you risk breaking things. +# FLAT_INTERFACE=eth0 + +if [ "$USE_MYSQL" == 1 ]; then + SQL_CONN=mysql://root:$MYSQL_PASS@localhost/nova +else + SQL_CONN=sqlite:///$NOVA_DIR/nova.sqlite +fi + +if [ "$USE_LDAP" == 1 ]; then + AUTH=ldapdriver.LdapDriver +else + AUTH=dbdriver.DbDriver +fi + +mkdir -p /etc/nova +cat >$NOVA_DIR/bin/nova.conf << NOVA_CONF_EOF +--verbose +--nodaemon +--dhcpbridge_flagfile=$NOVA_DIR/bin/nova.conf +--network_manager=nova.network.manager.$NET_MAN +--cc_host=$HOST_IP +--routing_source_ip=$HOST_IP +--sql_connection=$SQL_CONN +--auth_driver=nova.auth.$AUTH +--libvirt_type=$LIBVIRT_TYPE +--public_interface=eth0 +NOVA_CONF_EOF + +if [ -n "$FLAT_INTERFACE" ]; then + echo "--flat_interface=$FLAT_INTERFACE" >>$NOVA_DIR/bin/nova.conf +fi + +if [ "$CMD" == "branch" ]; then + sudo apt-get install -y bzr + rm -rf $NOVA_DIR + bzr branch $SOURCE_BRANCH $NOVA_DIR + cd $NOVA_DIR + mkdir -p $NOVA_DIR/instances + mkdir -p $NOVA_DIR/networks +fi + +# You should only have to run this once +if [ "$CMD" == "install" ]; then + sudo apt-get install -y python-software-properties + sudo add-apt-repository ppa:nova-core/ppa + sudo apt-get update + sudo apt-get install -y dnsmasq kpartx kvm gawk iptables ebtables + sudo apt-get install -y user-mode-linux kvm libvirt-bin + sudo apt-get install -y screen euca2ools vlan curl rabbitmq-server + sudo apt-get install -y lvm2 iscsitarget open-iscsi + echo "ISCSITARGET_ENABLE=true" | sudo tee /etc/default/iscsitarget + sudo /etc/init.d/iscsitarget restart + sudo modprobe kvm + sudo /etc/init.d/libvirt-bin restart + sudo apt-get install -y python-twisted python-sqlalchemy python-mox python-greenlet python-carrot + sudo apt-get install -y python-daemon python-eventlet python-gflags python-tornado python-ipy + sudo apt-get install -y python-libvirt python-libxml2 python-routes + if [ "$USE_MYSQL" == 1 ]; then + cat </etc/nova/nova-manage.conf << NOVA_CONF_EOF +--verbose +--nodaemon +--dhcpbridge_flagfile=/etc/nova/nova-manage.conf +--FAKE_subdomain=ec2 +--libvirt_type=qemu +--no_internet_conn=True +--public_netif=eth0 +--public_interface=eth0 + +--cc-host=$HOST_IP +--routing_source_ip=$HOST_IP +--sql_connection=mysql://root:nova@$HOST_IP/nova +--rabbit_host=$HOST_IP +--redis_host=$HOST_IP +--s3_host=$HOST_IP +--auth_driver=nova.auth.ldapdriver.LdapDriver +--ldap_url=ldap://$HOST_IP + +NOVA_CONF_EOF + +$DIR/bin/nova-compute --flagfile=/etc/nova/nova-manage.conf + diff --git a/nova/livemigration_test/UT/computeManager.test.py b/nova/livemigration_test/UT/computeManager.test.py new file mode 100644 index 000000000..d28d3ccb6 --- /dev/null +++ b/nova/livemigration_test/UT/computeManager.test.py @@ -0,0 +1,407 @@ +#!/usr/bin/python +# -*- coding: UTF-8 -*- + +NOVA_DIR = '/opt/openstack/nova' +#NOVA_DIR = '/opt/nova-2010.4' + +import sys +import unittest +import commands +import re +import logging + +from mock import Mock +import twisted + +try: + print + print 'checking %s/bin/nova-manage exists, set the NOVA_DIR properly..' \ + % NOVA_DIR + print + + sys.path.append(NOVA_DIR) + + from nova.compute.manager import ComputeManager + from nova.virt.libvirt_conn import LibvirtConnection + + from nova import context + from nova import db + from nova import exception + from nova import flags + from nova import quota + from nova import utils + from nova.auth import manager + from nova.cloudpipe import pipelib + from nova import rpc + from nova.api.ec2 import cloud + from nova.compute import power_state + + from nova.db.sqlalchemy.models import * + + +except: + print 'set PYTHONPATH to nova-install-dir' + raise + + +class tmpStdout: + def __init__(self): + self.buffer = "" + + def write(self, arg): + self.buffer += arg + + def writelines(self, arg): + self.buffer += arg + + def flush(self): + print 'flush' + self.buffer = '' + + +class tmpStderr(tmpStdout): + def write(self, arg): + self.buffer += arg + + def flush(self): + pass + + def realFlush(self): + self.buffer = '' + +dummyCallReturnValue={ 0:True } +dummyCallCount=0 +def dummyCall(context, topic, method): + global dummyCallReturnValue, dummyCallCount + if dummyCallCount in dummyCallReturnValue.keys() : + ret = dummyCallReturnValue[ dummyCallCount ] + dummyCallCount += 1 + return ret + else : + dummyCallCount += 1 + return False + + +class ComputeTestFunctions(unittest.TestCase): + + stdout = None + stdoutBak = None + stderr = None + stderrBak = None + manager = None + + # 共通の初期化処理 + def setUp(self): + """common init method. """ + + #if self.stdout is None: + # self.__class__.stdout = tmpStdout() + #self.stdoutBak = sys.stdout + #sys.stdout = self.stdout + if self.stderr is None: + self.__class__.stderr = tmpStderr() + self.stderrBak = sys.stderr + sys.stderr = self.stderr + + self.host = 'openstack2-api' + if self.manager is None: + self.__class__.manager = ComputeManager(host=self.host) + + self.setTestData() + self.setMocks() + + def setTestData(self): + + self.host1 = Host() + for key, val in [('name', 'host1'), ('cpu', 5), + ('memory_mb', 20480), ('hdd_gb', 876)]: + self.host1.__setitem__(key, val) + + self.host2 = Host() + for key, val in [('name', 'host2'), ('cpu', 5), + ('memory_mb', 20480), ('hdd_gb', 876)]: + self.host2.__setitem__(key, val) + + self.instance1 = Instance() + for key, val in [('id', 1), ('host', 'host1'), + ('hostname', 'i-12345'), ('state', power_state.RUNNING), + ('project_id', 'testPJ'), ('vcpus', 3), ('memory_mb', 1024), + ('hdd_gb', 5), ('internal_id', 12345)]: + self.instance1.__setitem__(key, val) + + self.instance2 = Instance() + for key, val in [('id', 2), ('host', 'host1'), + ('hostname', 'i-12345'), ('state', power_state.RUNNING), + ('project_id', 'testPJ'), ('vcpus', 3), ('memory_mb', 1024), + ('hdd_gb', 5)]: + self.instance2.__setitem__(key, val) + + self.fixed_ip1 = FixedIp() + for key, val in [('id', 1), ('address', '1.1.1.1'), + ('network_id', '1'), ('instance_id', 1)]: + self.fixed_ip1.__setitem__(key, val) + + self.vol1 = Volume() + for key, val in [('id', 1), ('ec2_id', 'vol-qijjuc7e'), + ('availability_zone', 'nova'), ('host', 'host1')]: + self.vol1.__setitem__(key, val) + + self.vol2 = Volume() + for key, val in [('id', 2), ('ec2_id', 'vol-qi22222'), + ('availability_zone', 'nova'), ('host', 'host1')]: + self.vol2.__setitem__(key, val) + + self.secgrp1 = Volume() + for key, val in [('id', 1), ('ec2_id', 'default')]: + self.secgrp1.__setitem__(key, val) + + self.secgrp2 = Volume() + for key, val in [('id', 2), ('ec2_id', 'def2')]: + self.secgrp2.__setitem__(key, val) + + self.netref1 = Network() + + def setMocks(self): + + # mocks for pre_live_migration + self.ctxt = context.get_admin_context() + db.instance_get = Mock(return_value=self.instance1) + db.volume_get_by_ec2_id = Mock(return_value=[self.vol1, self.vol2]) + db.volume_get_shelf_and_blade = Mock(return_value=(3, 4)) + db.instance_get_fixed_address = Mock(return_value=self.fixed_ip1) + db.security_group_get_by_instance \ + = Mock(return_value=[self.secgrp1, self.secgrp2]) + self.manager.driver.setup_nwfilters_for_instance \ + = Mock(return_value=None) + self.manager.driver.nwfilter_for_instance_exists = Mock(return_value=None) + self.manager.network_manager.setup_compute_network \ + = Mock(return_value=None) + # mocks for live_migration_ + rpc.call = Mock(return_value=True) + db.instance_set_state = Mock(return_value=True) + + # ---> test for nova.compute.manager.pre_live_migration() + def test01(self): + """01: NotFound error occurs on finding instance on DB. """ + + db.instance_get = Mock(side_effect=exception.NotFound('ERR')) + + self.assertRaises(exception.NotFound, + self.manager.pre_live_migration, + self.ctxt, + 'dummy_ec2_id', + 'host2') + + def test02(self): + """02: NotAuthrized occurs on finding volume on DB. """ + + db.volume_get_by_ec2_id \ + = Mock(side_effect=exception.NotAuthorized('ERR')) + + self.assertRaises(exception.NotAuthorized, + self.manager.pre_live_migration, + self.ctxt, + 'dummy_ec2_id', + 'host2') + + def test03(self): + """03: Unexpected exception occurs on finding volume on DB. """ + + db.volume_get_by_ec2_id = Mock(side_effect=TypeError('ERR')) + + self.assertRaises(TypeError, + self.manager.pre_live_migration, + self.ctxt, + 'dummy_ec2_id', + 'host2') + + def test04(self): + """04: no volume and fixed ip found on DB, """ + + db.volume_get_by_ec2_id = Mock(side_effect=exception.NotFound('ERR')) + db.instance_get_fixed_address = Mock(return_value=None) + + self.assertRaises(rpc.RemoteError, + self.manager.pre_live_migration, + self.ctxt, + 'dummy_ec2_id', + 'host2') + + c1 = (0 <= sys.stderr.buffer.find('has no volume')) + + self.assertEqual(c1, True) + + def test05(self): + """05: volume found and no fixed_ip found on DB. """ + + db.instance_get_fixed_address \ + = Mock(side_effect=exception.NotFound('ERR')) + + self.assertRaises(exception.NotFound, + self.manager.pre_live_migration, + self.ctxt, + 'dummy_ec2_id', + 'host2') + + def test06(self): + """06: self.driver.setup_nwfilters_for_instance causes NotFound. """ + self.manager.driver.setup_nwfilters_for_instance \ + = Mock(side_effect=exception.NotFound("ERR")) + + self.assertRaises(exception.NotFound, + self.manager.pre_live_migration, + self.ctxt, + 'dummy_ec2_id', + 'host2') + + def test07(self): + """07: self.network_manager.setup_compute_network causes ProcessExecutionError. """ + self.manager.network_manager.setup_compute_network \ + = Mock(side_effect=exception.ProcessExecutionError("ERR")) + + self.assertRaises(exception.ProcessExecutionError, + self.manager.pre_live_migration, + self.ctxt, + 'dummy_ec2_id', + 'host2') + + + def test08(self): + """08: self.manager.network_manager.setup_compute_network + exception.NotFound. """ + self.manager.network_manager.setup_compute_network \ + = Mock(side_effect=exception.NotFound("ERR")) + + self.assertRaises(exception.NotFound, + self.manager.pre_live_migration, + self.ctxt, + 'dummy_ec2_id', + 'host2') + + # those 2 cases are omitted : + # self.driver.setup_nwfilters_for_instance causes + # twisted.python.failure.Failure. + # self.driver.refresh_security_group causes twisted.python.failure.Failure. + # + # twisted.python.failure.Failure can not be used with assertRaises, + # it doesnt have __call___ + # + + def test09(self): + """09: volume/fixed_ip found on DB, all procedure finish + successfully.. """ + + result = self.manager.pre_live_migration(self.ctxt, 'dummy_ec2_id', + 'host2') + self.assertEqual(result, True) + + # ---> test for nova.compute.manager.live_migration() + + def test10(self): + """10: rpc.call(pre_live_migration returns Error(Not None). """ + rpc.call = Mock(side_effect=exception.NotFound("ERR")) + + self.assertRaises(exception.NotFound, + self.manager.live_migration, + self.ctxt, + 'dummy_ec2_id', + 'host2') + + def test11(self): + """11: if rpc.call returns rpc.RemoteError. """ + + rpc.call = Mock(return_value=rpc.RemoteError(None, None, None)) + db.instance_set_state = Mock(return_value=True) + result = self.manager.live_migration(self.ctxt, 'dummy_ec2_id', + 'host2') + c1 = (None == result) + c2 = (0 <= sys.stderr.buffer.find('err at')) + self.assertEqual(c1 and c2, True) + + def test12(self): + """12: if rpc.call returns rpc.RemoteError and instance_set_state + also ends up err. (then , unexpected err occurs, in this case + TypeError) + """ + rpc.call = Mock(return_value=rpc.RemoteError(None, None, None)) + db.instance_set_state = Mock(side_effect=TypeError("ERR")) + self.assertRaises(TypeError, + self.manager.live_migration, + self.ctxt, + 'dummy_ec2_id', + 'host2') + + def test13(self): + """13: if wait for pre_live_migration, but timeout. """ + rpc.call = dummyCall + + db.instance_get = Mock(return_value=self.instance1) + + result = self.manager.live_migration(self.ctxt, 'dummy_ec2_id', + 'host2') + c1 = (None == result) + c2 = (0 <= sys.stderr.buffer.find('Timeout for')) + self.assertEqual(c1 and c2, True) + + def test14(self): + """14: if db_instance_get issues NotFound. + """ + rpc.call = Mock(return_value=True) + db.instance_get = Mock(side_effect=exception.NotFound("ERR")) + self.assertRaises(exception.NotFound, + self.manager.live_migration, + self.ctxt, + 'dummy_ec2_id', + 'host2') + + def test15(self): + """15: if rpc.call returns True, and instance_get() cause other + exception. (Unexpected case - b/c it already checked by + nova-manage) + """ + rpc.call = Mock(return_value=True) + db.instance_get = Mock(side_effect=TypeError("ERR")) + + self.assertRaises(TypeError, + self.manager.live_migration, + self.ctxt, + 'dummy_ec2_id', + 'host2') + + def test16(self): + """16: if rpc.call returns True, and live_migration issues + ProcessExecutionError. """ + rpc.call = Mock(return_value=True) + db.instance_get = Mock(return_value=self.instance1) + ret = self.manager.driver.live_migration \ + = Mock(side_effect=utils.ProcessExecutionError("ERR")) + + self.assertRaises(utils.ProcessExecutionError, + self.manager.live_migration, + self.ctxt, + 'dummy_ec2_id', + 'host2') + + def test17(self): + """17: everything goes well. """ + self.manager.driver.live_migration = Mock(return_value=True) + ret = self.manager.live_migration(self.ctxt, 'i-12345', 'host1') + self.assertEqual(True, True) + + def tearDown(self): + """common terminating method. """ + self.stderr.realFlush() + sys.stderr = self.stderrBak + #sys.stdout = self.stdoutBak + +if __name__ == '__main__': + logging.getLogger().setLevel(logging.DEBUG) + #unittest.main() + + suite = unittest.TestLoader().loadTestsFromTestCase(ComputeTestFunctions) + unittest.TextTestRunner(verbosity=2).run(suite) + + #suite = unittest.TestSuite() + #suite.addTest(ComputeTestFunctions("test15")) + #suite.addTest(ComputeTestFunctions("test16")) + #unittest.TextTestRunner(verbosity=2).run(suite) diff --git a/nova/livemigration_test/UT/libvirtConnection.test.py b/nova/livemigration_test/UT/libvirtConnection.test.py new file mode 100644 index 000000000..6a353508d --- /dev/null +++ b/nova/livemigration_test/UT/libvirtConnection.test.py @@ -0,0 +1,366 @@ +#!/usr/bin/python +# -*- coding: UTF-8 -*- + +NOVA_DIR='/opt/nova-2010.4' + +import sys +import unittest +import commands +import re +import logging +import libvirt + +from mock import Mock +import twisted + +try : + print + print 'checking %s/bin/nova-manage exists, set the NOVA_DIR properly..' % NOVA_DIR + print + + sys.path.append(NOVA_DIR) + + from nova.compute.manager import ComputeManager + from nova.virt import libvirt_conn + + from nova import context + from nova import db + from nova import exception + from nova import flags + from nova import quota + from nova import utils + from nova import process + from nova.auth import manager + from nova.cloudpipe import pipelib + from nova import rpc + from nova.api.ec2 import cloud + from nova.compute import power_state + + from nova.db.sqlalchemy.models import * + + +except: + print 'set PYTHONPATH to nova-install-dir' + raise + + +class tmpStdout: + def __init__(self): + self.buffer = "" + def write(self,arg): + self.buffer += arg + def writelines(self, arg): + self.buffer += arg + def flush(self): + print 'flush' + self.buffer = '' + +class tmpStderr(tmpStdout): + def write(self,arg): + self.buffer += arg + def flush(self): + pass + def realFlush(self): + self.buffer = '' + +class DummyLibvirtConn(object): + nwfilterLookupByName = None + def __init__(self): + pass + + +class LibvirtConnectionTestFunctions(unittest.TestCase): + + stdout = None + stdoutBak = None + stderr = None + stderrBak = None + manager = None + + # 共通の初期化処理 + def setUp(self): + """common init method. """ + + #if self.stdout is None: + # self.__class__.stdout = tmpStdout() + #self.stdoutBak = sys.stdout + #sys.stdout = self.stdout + if self.stderr is None: + self.__class__.stderr = tmpStderr() + self.stderrBak = sys.stderr + sys.stderr = self.stderr + + self.host = 'openstack2-api' + if self.manager is None: + self.__class__.manager = libvirt_conn.get_connection(False) + + self.setTestData() + self.setMocks() + + def setTestData(self): + + self.host1 = Host() + for key, val in [ ('name', 'host1'), ('cpu', 5), ('memory_mb', 20480), ('hdd_gb', 876) ]: + self.host1.__setitem__(key, val) + + self.instance1 = Instance() + for key, val in [ ('id', 1), ('host', 'host1'), ('hostname', 'i-12345'), + ('state', power_state.RUNNING), ('project_id', 'testPJ'), + ('vcpus', 3), ('memory_mb', 1024), ('hdd_gb', 5), ('internal_id',12345) ]: + self.instance1.__setitem__(key, val) + + + self.instance2 = Instance() + for key, val in [ ('id', 2), ('host', 'host1'), ('hostname', 'i-12345'), + ('state', power_state.RUNNING), ('project_id', 'testPJ'), + ('vcpus', 3), ('memory_mb', 1024), ('hdd_gb', 5) ]: + self.instance2.__setitem__(key, val) + + + self.fixed_ip1 = FixedIp() + for key, val in [ ('id', 1), ('address', '1.1.1.1'), ('network_id', '1'), + ('instance_id', 1)]: + self.fixed_ip1.__setitem__(key, val) + + self.floating_ip1 = FloatingIp() + for key, val in [ ('id', 1), ('address', '1.1.1.200') ]: + self.floating_ip1.__setitem__(key, val) + + self.netref1 = Network() + for key, val in [ ('id', 1) ]: + self.netref1.__setitem__(key, val) + + + def setMocks(self): + + self.ctxt = context.get_admin_context() + db.instance_get_fixed_address = Mock(return_value = '1.1.1.1') + db.fixed_ip_update = Mock(return_value = None) + db.fixed_ip_get_network = Mock(return_value = self.netref1) + db.network_update = Mock(return_value = None) + db.instance_get_floating_address = Mock(return_value = '1.1.1.200') + db.floating_ip_get_by_address = Mock(return_value = self.floating_ip1) + db.floating_ip_update = Mock(return_value = None) + db.instance_update = Mock(return_value = None) + + + # ---> test for nova.virt.libvirt_conn.nwfilter_for_instance_exists() + + def test01(self): + """01: libvirt.libvirtError occurs. """ + + self.manager._wrapped_conn = DummyLibvirtConn() + self.manager._test_connection = Mock(return_value=True) + self.manager._conn.nwfilterLookupByName = \ + Mock(side_effect=libvirt.libvirtError("ERR")) + ret = self.manager.nwfilter_for_instance_exists(self.instance1) + self.assertEqual(ret, False) + + def test02(self): + """02: libvirt.libvirtError not occurs. """ + + self.manager._wrapped_conn = DummyLibvirtConn() + self.manager._test_connection = Mock(return_value=True) + self.manager._conn.nwfilterLookupByName = \ + Mock(return_value=True) + ret = self.manager.nwfilter_for_instance_exists(self.instance1) + self.assertEqual(ret, True) + + # ---> test for nova.virt.libvirt_conn.live_migraiton() + + def test03(self): + """03: Unexpected exception occurs on finding volume on DB. """ + + utils.execute = Mock( side_effect=process.ProcessExecutionError('ERR') ) + + self.assertRaises(process.ProcessExecutionError, + self.manager.live_migration, + self.instance1, + 'host2') + + # ---> other case cannot be tested because live_migraiton + # is synchronized/asynchronized method are mixed together + + + # ---> test for nova.virt.libvirt_conn._post_live_migraiton + + def test04(self): + """04: instance_ref is not nova.db.sqlalchemy.models.Instances""" + + self.assertRaises(TypeError, + self.manager._post_live_migration, + "dummy string", + 'host2') + + def test05(self): + """05: db.instance_get_fixed_address return None""" + + db.instance_get_fixed_address = Mock( return_value=None ) + ret = self.manager._post_live_migration(self.instance1, 'host1') + c1 = (ret == None) + c2 = (0 <= sys.stderr.buffer.find('fixed_ip is not found')) + self.assertEqual(c1 and c2, True) + + def test06(self): + """06: db.instance_get_fixed_address raises NotFound""" + + db.instance_get_fixed_address = Mock( side_effect=exception.NotFound('ERR') ) + self.assertRaises(exception.NotFound, + self.manager._post_live_migration, + self.instance1, + 'host2') + + def test07(self): + """07: db.instance_get_fixed_address raises Unknown exception""" + + db.instance_get_fixed_address = Mock( side_effect=TypeError('ERR') ) + self.assertRaises(TypeError, + self.manager._post_live_migration, + self.instance1, + 'host1') + + def test08(self): + """08: db.fixed_ip_update return NotFound. """ + + db.fixed_ip_update = Mock( side_effect=exception.NotFound('ERR') ) + self.assertRaises(exception.NotFound, + self.manager._post_live_migration, + self.instance1, + 'host1') + + def test09(self): + """09: db.fixed_ip_update return NotAuthorized. """ + db.fixed_ip_update = Mock( side_effect=exception.NotAuthorized('ERR') ) + self.assertRaises(exception.NotAuthorized, + self.manager._post_live_migration, + self.instance1, + 'host1') + + def test10(self): + """10: db.fixed_ip_update return Unknown exception. """ + db.fixed_ip_update = Mock( side_effect=TypeError('ERR') ) + self.assertRaises(TypeError, + self.manager._post_live_migration, + self.instance1, + 'host1') + + def test11(self): + """11: db.fixed_ip_get_network causes NotFound. """ + + db.fixed_ip_get_network = Mock( side_effect=exception.NotFound('ERR') ) + self.assertRaises(exception.NotFound, + self.manager._post_live_migration, + self.instance1, + 'host1') + + # not tested db.fixed_ip_get_network raises NotAuthorized + # because same test has been done at previous test. + + def test12(self): + """12: db.fixed_ip_get_network causes Unknown exception. """ + + db.fixed_ip_get_network = Mock( side_effect=TypeError('ERR') ) + self.assertRaises(TypeError, + self.manager._post_live_migration, + self.instance1, + 'host1') + + def test13(self): + """13: db.network_update raises Unknown exception. """ + db.network_update = Mock( side_effect=TypeError('ERR') ) + self.assertRaises(TypeError, + self.manager._post_live_migration, + self.instance1, + 'host1') + + def test14(self): + """14: db.instance_get_floating_address raises NotFound. """ + db.instance_get_floating_address = Mock(side_effect=exception.NotFound("ERR")) + ret = self.manager._post_live_migration(self.instance1, 'host1') + c1 = (ret == None) + c2 = (0 <= sys.stderr.buffer.find('doesnt have floating_ip')) + self.assertEqual(c1 and c2, True) + + + def test15(self): + """15: db.instance_get_floating_address returns None. """ + + db.instance_get_floating_address = Mock( return_value=None ) + ret = self.manager._post_live_migration(self.instance1, 'host1') + c1 = (ret == None) + c2 = (0 <= sys.stderr.buffer.find('floating_ip is not found')) + self.assertEqual(c1 and c2, True) + + def test16(self): + """16: db.instance_get_floating_address raises NotFound. """ + + db.instance_get_floating_address = Mock(side_effect=exception.NotFound("ERR")) + ret = self.manager._post_live_migration(self.instance1, 'host1') + c1 = (ret == None) + c2 = (0 <= sys.stderr.buffer.find('doesnt have floating_ip')) + self.assertEqual(c1 and c2, True) + + def test17(self): + """17: db.instance_get_floating_address raises Unknown exception. """ + db.instance_get_floating_address = Mock(side_effect=TypeError("ERR")) + ret = self.manager._post_live_migration(self.instance1, 'host1') + c1 = (ret == None) + c2 = (0 <= sys.stderr.buffer.find('Live migration: Unexpected error')) + self.assertEqual(c1 and c2, True) + + + def test18(self): + """18: db.floating_ip_get_by_address raises NotFound """ + + db.floating_ip_get_by_address = Mock(side_effect=exception.NotFound("ERR")) + ret = self.manager._post_live_migration(self.instance1, 'host1') + c1 = (ret == None) + c2 = (0 <= sys.stderr.buffer.find('doesnt have floating_ip')) + self.assertEqual(c1 and c2, True) + + def test19(self): + """19: db.floating_ip_get_by_address raises Unknown exception. """ + db.floating_ip_get_by_address = Mock(side_effect=TypeError("ERR")) + ret = self.manager._post_live_migration(self.instance1, 'host1') + c1 = (ret == None) + c2 = (0 <= sys.stderr.buffer.find('Live migration: Unexpected error')) + self.assertEqual(c1 and c2, True) + + + def test20(self): + """20: db.floating_ip_update raises Unknown exception. + """ + db.floating_ip_update = Mock(side_effect=TypeError("ERR")) + ret = self.manager._post_live_migration(self.instance1, 'host1') + c1 = (ret == None) + c2 = (0 <= sys.stderr.buffer.find('Live migration: Unexpected error')) + self.assertEqual(c1 and c2, True) + + def test21(self): + """21: db.instance_update raises unknown exception. """ + + db.instance_update = Mock(side_effect=TypeError("ERR")) + self.assertRaises(TypeError, + self.manager._post_live_migration, + self.instance1, + 'host1') + + def tearDown(self): + """common terminating method. """ + self.stderr.realFlush() + sys.stderr = self.stderrBak + #sys.stdout = self.stdoutBak + +if __name__ == '__main__': + logging.getLogger().setLevel(logging.DEBUG) + #unittest.main() + + suite = unittest.TestLoader().loadTestsFromTestCase(LibvirtConnectionTestFunctions) + unittest.TextTestRunner(verbosity=2).run(suite) + + #suite = unittest.TestSuite() + #suite.addTest(LibvirtConnectionTestFunctions("test14")) + #suite.addTest(LibvirtConnectionTestFunctions("test16")) + #unittest.TextTestRunner(verbosity=2).run(suite) + + diff --git a/nova/livemigration_test/UT/nova-manage.test.py b/nova/livemigration_test/UT/nova-manage.test.py new file mode 100644 index 000000000..dabdba001 --- /dev/null +++ b/nova/livemigration_test/UT/nova-manage.test.py @@ -0,0 +1,318 @@ +#!/usr/bin/python +# -*- coding: UTF-8 -*- + +NOVA_DIR='/opt/nova-2010.2' + +import sys +import unittest +import commands +import re + +from mock import Mock + +try : + print + print 'Testing %s/bin/nova-manage, set the NOVA_DIR properly..' % NOVA_DIR + print + + sys.path.append(NOVA_DIR) + + from nova import context + from nova import db + from nova import exception + from nova import flags + from nova import quota + from nova import utils + from nova.auth import manager + from nova.cloudpipe import pipelib + from nova import rpc + from nova.api.ec2 import cloud + from nova.compute import power_state + + from nova.db.sqlalchemy.models import * + + +except: + print 'set PYTHONPATH to nova-install-dir' + raise + + +class tmpStdout: + def __init__(self): + self.buffer = "" + def write(self,arg): + self.buffer += arg + def flush(self): + self.buffer = '' + + +class NovaManageTestFunctions(unittest.TestCase): + + stdout = None + stdoutBak = None + + hostCmds = None + + # 共通の初期化処理 + def setUp(self): + """common init method. """ + + commands.getstatusoutput('cp -f %s/bin/nova-manage %s' % ( NOVA_DIR, self.getNovaManageCopyPath() )) + commands.getstatusoutput('touch %s' % self.getInitpyPath() ) + try : + import bin.novamanagetest + except: + print 'Fail to import nova-manage . check bin/nova-manage exists' + raise + + # replace stdout for checking nova-manage output + if self.stdout is None : + self.__class__.stdout = tmpStdout() + self.stdoutBak = sys.stdout + sys.stdout = self.stdout + + # prepare test data + self.setTestData() + + + def setTestData(self): + import bin.novamanagetest + + if self.hostCmds is None : + self.__class__.hostCmds = bin.novamanagetest.HostCommands() + self.instanceCmds = bin.novamanagetest.InstanceCommands() + + self.host1 = Host() + self.host1.__setitem__('name', 'host1') + + self.host2 = Host() + self.host2.__setitem__('name', 'host2') + + self.instance1 = Instance() + self.instance1.__setitem__('id', 1) + self.instance1.__setitem__('host', 'host1') + self.instance1.__setitem__('hostname', 'i-12345') + self.instance1.__setitem__('state', power_state.NOSTATE) + self.instance1.__setitem__('state_description', 'running') + + self.instance2 = Instance() + self.instance2.__setitem__('id', 2) + self.instance2.__setitem__('host', 'host1') + self.instance2.__setitem__('hostname', 'i-12345') + self.instance2.__setitem__('state', power_state.RUNNING) + self.instance2.__setitem__('state_description', 'pending') + + self.instance3 = Instance() + self.instance3.__setitem__('id', 3) + self.instance3.__setitem__('host', 'host1') + self.instance3.__setitem__('hostname', 'i-12345') + self.instance3.__setitem__('state', power_state.RUNNING) + self.instance3.__setitem__('state_description', 'running') + + db.host_get_all = Mock(return_value=[self.host1, self.host2]) + + def getInitpyPath(self): + return '%s/bin/__init__.py' % NOVA_DIR + + def getNovaManageCopyPath(self): + return '%s/bin/novamanagetest.py' % NOVA_DIR + + # -----> Test for nova-manage host list + + def test01(self): + """01: Got some host lists. """ + + self.hostCmds.list() + + c1 = (2 == self.stdout.buffer.count('\n')) + c2 = (0 <= self.stdout.buffer.find('host1')) + c3 = (0 <= self.stdout.buffer.find('host2')) + self.assertEqual(c1 and c2 and c3, True) + + def test02(self): + """02: Got empty lsit. """ + + db.host_get_all = Mock(return_value=[]) + self.hostCmds.list() + + # result should be empty + c = (0 == len(self.stdout.buffer) ) + self.assertEqual(c, True) + + def test03(self): + """03: Got notFound """ + + db.host_get_all = Mock(side_effect=exception.NotFound("ERR")) + self.assertRaises(exception.NotFound, self.hostCmds.list) + + # --------> Test For nova-manage host show + + def test04(self): + """04: args are not enough(nova-manage host show) """ + self.assertRaises(TypeError, self.hostCmds.show ) + + + def test05(self): + """05: nova-manage host show not-registered-host, and got an error""" + + rpc.call = Mock(return_value={'ret' : False, 'msg': 'ERR'} ) + self.hostCmds.show('host1') + self.assertEqual( self.stdout.buffer[:3]=='ERR', True ) + + + def test06(self): + """06: nova-manage host show registerd-host, and no project uses the host""" + + dic = {'ret': True, + 'phy_resource': {'cpu':1, 'memory_mb':2, 'hdd_gb':3}, + 'usage': {}} + + rpc.call = Mock(return_value=dic ) + self.hostCmds.show('host1') + + # result should be : + # HOST PROJECT cpu mem(mb) disk(gb) + # host1 1 2 3 + line = self.stdout.buffer.split('\n')[1] + line = re.compile('\t+').sub(' ', line).strip() + c1 = ( 'host1 1 2 3' == line ) + c2 = ( self.stdout.buffer.count('\n') == 2 ) + + self.assertEqual( c1 and c2, True ) + + def test07(self): + """07: nova-manage host show registerd-host, + and some projects use the host + """ + dic = {'ret': True, + 'phy_resource': {'cpu':1, 'memory_mb':2, 'hdd_gb':3}, + 'usage': {'p1': {'cpu':1, 'memory_mb':2, 'hdd_gb':3}, + 'p2': {'cpu':1, 'memory_mb':2, 'hdd_gb':3} }} + + rpc.call = Mock(return_value=dic ) + self.hostCmds.show('host1') + + # result should be : + # HOST PROJECT cpu mem(mb) disk(gb) + # host1 1 2 3 + # host1 p1 1 2 3 + # host1 p2 4 5 6 + line = self.stdout.buffer.split('\n')[1] + ret = re.compile('\t+').sub(' ', line).strip() + c1 = ( 'host1 1 2 3' == ret ) + + line = self.stdout.buffer.split('\n')[2] + line = re.compile('\t+').sub(' ', line).strip() + c2 = ( 'host1 p1 1 2 3' == line ) or ( 'host1 p2 1 2 3' == line ) + + line = self.stdout.buffer.split('\n')[3] + ret = re.compile('\t+').sub(' ', line).strip() + c3 = ( 'host1 p1 1 2 3' == ret ) or ( 'host1 p2 1 2 3' == ret ) + + self.assertEqual( c1 and c2 and c3, True ) + + def test08(self): + """08: nova-manage host show registerd-host, and rpc.call returns None + (unexpected error) + """ + rpc.call = Mock(return_value=None ) + self.hostCmds.show('host1') + c1 = ( 0 <= self.stdout.buffer.find('Unexpected error') ) + self.assertEqual( c1, True ) + + # ----------> Test for bin/nova-manage instance live_migration + + def test09(self): + """09: arguments are not enough(nova-manage instances live_migration) + """ + self.assertRaises(TypeError, self.instanceCmds.live_migration ) + + def test10(self): + """10: arguments are not enough(nova-manage instances live_migration ec2_id) + """ + self.assertRaises(TypeError, self.instanceCmds.live_migration, 'i-xxx' ) + + def test11(self): + """11: nova-manage instances live_migration ec2_id host, + where hostname is invalid + """ + db.host_get_by_name = Mock( side_effect=exception.NotFound('ERR') ) + self.assertRaises(exception.NotFound, self.instanceCmds.live_migration, 'i-xxx', 'host1' ) + + def test12(self): + """12: nova-manage instances live_migration ec2_id(invalid id) host""" + + db.host_get_by_name = Mock(return_value = self.host1) + db.instance_get_by_internal_id = Mock( side_effect=exception.NotFound('ERR') ) + + self.assertRaises(exception.NotFound, self.instanceCmds.live_migration, 'i-xxx', 'host1' ) + + def test13(self): + """13: nova-manage instances live_migration ec2_id host, + but instance specifed by ec2 id is not running (state is not power_state.RUNNING) + """ + db.host_get_by_name = Mock(return_value = self.host1) + db.instance_get_by_internal_id = Mock( return_value = self.instance1 ) + c1 = c2 = False + try : + self.instanceCmds.live_migration('i-12345', 'host1') + except SystemExit, e: + c1 = (1 == e.code) + c2 = (0 < self.stdout.buffer.find('is not running') ) + self.assertEqual( c1 and c2 , True ) + + + def test14(self): + """14: nova-manage instances live_migration ec2_id host, + but instance specifed by ec2 id is not running (state_description is not running) + """ + db.host_get_by_name = Mock(return_value = self.host2) + db.instance_get_by_internal_id = Mock( return_value = self.instance1 ) + c1 = c2 = False + try : + self.instanceCmds.live_migration('i-12345', 'host2') + except SystemExit, e: + c1 = (1 == e.code) + c2 = (0 < self.stdout.buffer.find('is not running') ) + self.assertEqual( c1 and c2 , True ) + + def test15(self): + """15: nova-manage instances live_migration ec2_id host, + but instance is running at the same host specifed above, so err should be occured. + """ + db.host_get_by_name = Mock(return_value = self.host1) + db.instance_get_by_internal_id = Mock( return_value = self.instance3 ) + c1 = c2 = False + try : + self.instanceCmds.live_migration('i-12345', 'host1') + except SystemExit, e: + c1 = (2 == e.code) + c2 = (0 < self.stdout.buffer.find('is running now') ) + self.assertEqual( c1 and c2 , True ) + + def test16(self): + """16: nova-manage instances live_migration ec2_id host, + everything goes well, ang gets success messages. + """ + db.host_get_by_name = Mock(return_value = self.host1) + db.instance_get_by_internal_id = Mock( return_value = self.instance3 ) + rpc.cast = Mock(return_value = None) + + self.instanceCmds.live_migration('i-12345', 'host2') + c1 = (0 <= self.stdout.buffer.find('Finished all procedure') ) + self.assertEqual( c1, True ) + + + def tearDown(self): + """common terminating method. """ + commands.getstatusoutput('rm -rf %s' % self.getInitpyPath() ) + commands.getstatusoutput('rm -rf %s' % self.getNovaManageCopyPath() ) + sys.stdout.flush() + sys.stdout = self.stdoutBak + +if __name__ == '__main__': + #unittest.main() + suite = unittest.TestLoader().loadTestsFromTestCase(NovaManageTestFunctions) + unittest.TextTestRunner(verbosity=3).run(suite) + + diff --git a/nova/livemigration_test/UT/schedulerManager.test.py b/nova/livemigration_test/UT/schedulerManager.test.py new file mode 100644 index 000000000..2fe4d0994 --- /dev/null +++ b/nova/livemigration_test/UT/schedulerManager.test.py @@ -0,0 +1,360 @@ +#!/usr/bin/python +# -*- coding: UTF-8 -*- + +NOVA_DIR='/opt/nova-2010.2' + +import sys +import unittest +import commands +import re + +from mock import Mock + +try : + print + print 'Checking %s/bin/nova-manage exists, set the NOVA_DIR properly..' % NOVA_DIR + print + + sys.path.append(NOVA_DIR) + + from nova.scheduler.manager import SchedulerManager + + from nova import context + from nova import db + from nova import exception + from nova import flags + from nova import quota + from nova import utils + from nova.auth import manager + from nova.cloudpipe import pipelib + from nova import rpc + from nova.api.ec2 import cloud + from nova.compute import power_state + + from nova.db.sqlalchemy.models import * + +except: + print 'set PYTHONPATH to nova-install-dir' + raise + + +class tmpStdout: + def __init__(self): + self.buffer = "" + def write(self,arg): + self.buffer += arg + def flush(self): + self.buffer = '' + + +class SchedulerTestFunctions(unittest.TestCase): + + # 共通の初期化処理 + def setUp(self): + """common init method. """ + + self.host = 'openstack2-api' + self.manager = SchedulerManager(host=self.host) + + self.setTestData() + + def setTestData(self): + + self.host1 = Host() + self.host1.__setitem__('name', 'host1') + self.host1.__setitem__('cpu', 5) + self.host1.__setitem__('memory_mb', 20480) + self.host1.__setitem__('hdd_gb', 876) + + self.host2 = Host() + self.host2.__setitem__('name', 'host2') + self.host2.__setitem__('cpu', 5) + self.host2.__setitem__('memory_mb', 20480) + self.host2.__setitem__('hdd_gb', 876) + + self.instance1 = Instance() + for key, val in [ ('id', 1), ('host', 'host1'), ('hostname', 'i-12345'), + ('state', power_state.RUNNING), ('project_id', 'testPJ'), + ('vcpus', 3), ('memory_mb', 1024), ('hdd_gb', 5) ]: + self.instance1.__setitem__(key, val) + + + self.instance2 = Instance() + for key, val in [ ('id', 2), ('host', 'host1'), ('hostname', 'i-12345'), + ('state', power_state.RUNNING), ('project_id', 'testPJ'), + ('vcpus', 3), ('memory_mb', 1024), ('hdd_gb', 5) ]: + self.instance2.__setitem__(key, val) + + + self.instance3 = Instance() + for key, val in [ ('id', 3), ('host', 'host1'), ('hostname', 'i-12345'), + ('state', power_state.RUNNING), ('project_id', 'testPJ2'), + ('vcpus', 1), ('memory_mb', 1024), ('hdd_gb', 5) ]: + self.instance3.__setitem__(key, val) + + self.instance4 = Instance() + for key, val in [ ('id', 4), ('host', 'host2'), ('hostname', 'i-12345'), + ('state', power_state.RUNNING), ('project_id', 'testPJ2'), + ('vcpus', 1), ('memory_mb', 1024), ('local_gb', 5) ]: + self.instance4.__setitem__(key, val) + + self.instance5 = Instance() + for key, val in [ ('id', 5), ('host', 'host2'), ('hostname', 'i-12345'), + ('state', power_state.RUNNING), ('project_id', 'testPJ2'), + ('vcpus', 1), ('memory_mb', 1024), ('local_gb', 5) ]: + self.instance5.__setitem__(key, val) + + self.instance6 = Instance() + for key, val in [ ('id', 6), ('host', 'host1'), ('hostname', 'i-12345'), + ('state', power_state.RUNNING), ('project_id', 'testPJ2'), + ('vcpus', 3), ('memory_mb', 1024), ('local_gb', 5) ]: + self.instance6.__setitem__(key, val) + + self.instance7 = Instance() + for key, val in [ ('id', 7), ('host', 'host1'), ('hostname', 'i-12345'), + ('state', power_state.RUNNING), ('project_id', 'testPJ2'), + ('vcpus', 1), ('memory_mb', 18432), ('local_gb', 5) ]: + self.instance7.__setitem__(key, val) + + self.instance8 = Instance() + for key, val in [ ('id', 8), ('host', 'host1'), ('hostname', 'i-12345'), + ('state', power_state.RUNNING), ('project_id', 'testPJ2'), + ('vcpus', 1), ('memory_mb', 1024), ('local_gb', 866) ]: + self.instance8.__setitem__(key, val) + + + + def check_format(self, val): + """check result format of show_host_resource """ + + if dict != type(val) : + sys.stderr.write('return value is not dict') + return False + + if not val.has_key('ret'): + sys.stderr.write('invalid format(missing "ret"). ') + return False + + if not val['ret'] : + if not val.has_key('msg') : + sys.stderr.write( 'invalid format(missing "msg").' ) + return False + + else : + if not val.has_key('phy_resource') : + sys.stderr.write('invalid format(missing "phy_resource"). ') + return False + + if not val.has_key('usage'): + sys.stderr.write('invalid format(missing "usage"). ') + return False + + if not self._check_format(val['phy_resource']): + return False + + for key, dic in val['usage'].items() : + if not self._check_format(dic): + return False + return True + + def _check_format(self, val): + if dict != type(val) : + sys.stderr.write('return value is not dict') + return False + + for key in ['cpu', 'memory_mb', 'hdd_gb']: + if not val.has_key(key) : + sys.stderr.write('invalid format(missing "%s"). ' % key ) + return False + + return True + + # ---> test for nova.scheduler.manager.show_host_resource() + + def test01(self): + """01: get NotFound exception when dest host not found on DB """ + + ctxt = context.get_admin_context() + db.host_get_by_name = Mock( side_effect=exception.NotFound('ERR') ) + result = self.manager.show_host_resource(ctxt, 'not-registered-host') + c1 = ( not result['ret'] ) + c2 = ( 0 == result['msg'].find('No such') ) + self.assertEqual(c1 and c2, True) + + def test02(self): + """02: get other exception if unexpected err. """ + + ctxt = context.get_admin_context() + db.host_get_by_name = Mock( side_effect=TypeError('ERR') ) + self.assertRaises(TypeError, self.manager.show_host_resource, ctxt, 'host1' ) + + def test03(self): + """03: no instance found on dest host. """ + + ctxt = context.get_admin_context() + db.host_get_by_name = Mock( return_value = self.host1 ) + db.instance_get_all_by_host = Mock( return_value=[]) + ret= self.manager.show_host_resource(ctxt, 'host1') + + c1 = self.check_format(ret) + v = ret['phy_resource'] + c2 = ( (5 == v['cpu']) and (20480 == v['memory_mb']) and (876 == v['hdd_gb'])) + c3 = ( 0 == len(ret['usage']) ) + + self.assertEqual(c1 and c2 and c3, True) + + def test04(self): + """04: some instance found on dest host. """ + + ctxt = context.get_admin_context() + db.host_get_by_name = Mock( return_value = self.host1 ) + db.instance_get_all_by_host = Mock( return_value=[ self.instance1, + self.instance2, + self.instance3] ) + + db.instance_get_vcpu_sum_by_host_and_project = Mock(return_value=3) + db.instance_get_memory_sum_by_host_and_project = Mock(return_value=1024) + db.instance_get_disk_sum_by_host_and_project = Mock(return_value=5) + + ret= self.manager.show_host_resource(ctxt, 'host1') + + c1 = self.check_format(ret) + v = ret['phy_resource'] + c2 = ( (5 == v['cpu']) and (20480 == v['memory_mb']) and (876 == v['hdd_gb'])) + c3 = ( 2 == len(ret['usage']) ) + c4 = ( self.instance1['project_id'] in ret['usage'].keys()) + c5 = ( self.instance3['project_id'] in ret['usage'].keys()) + + self.assertEqual(c1 and c2 and c3 and c4 and c5, True) + + + # ---> test for nova.scheduler.manager.has_enough_resource() + def test05(self): + """05: when cpu is exccded some instance found on dest host. """ + + ctxt = context.get_admin_context() + db.instance_get = Mock(return_value = self.instance6) + db.host_get_by_name = Mock(return_value = self.host2) + db.instance_get_all_by_host = Mock(return_value = [self.instance4, self.instance5] ) + + ret= self.manager.has_enough_resource(ctxt, 'i-12345', 'host1') + self.assertEqual(ret, False) + + def test06(self): + """06: when memory is exccded some instance found on dest host. """ + + ctxt = context.get_admin_context() + db.instance_get = Mock(return_value = self.instance7) + db.host_get_by_name = Mock(return_value = self.host2) + db.instance_get_all_by_host = Mock(return_value = [self.instance4, self.instance5] ) + + ret= self.manager.has_enough_resource(ctxt, 'i-12345', 'host1') + self.assertEqual(ret, False) + + def test07(self): + """07: when hdd is exccded some instance found on dest host. """ + + ctxt = context.get_admin_context() + db.instance_get = Mock(return_value = self.instance8) + db.host_get_by_name = Mock(return_value = self.host2) + db.instance_get_all_by_host = Mock(return_value = [self.instance4, self.instance5] ) + + ret= self.manager.has_enough_resource(ctxt, 'i-12345', 'host1') + self.assertEqual(ret, False) + + def test08(self): + """08: everything goes well. (instance_get_all_by_host returns list)""" + + ctxt = context.get_admin_context() + db.instance_get = Mock(return_value = self.instance3) + db.host_get_by_name = Mock(return_value = self.host2) + db.instance_get_all_by_host = Mock(return_value = [self.instance4, self.instance5] ) + + ret= self.manager.has_enough_resource(ctxt, 'i-12345', 'host1') + self.assertEqual(ret, True) + + + def test09(self): + """09: everything goes well(instance_get_all_by_host returns[]). """ + + ctxt = context.get_admin_context() + db.instance_get = Mock(return_value = self.instance3) + db.host_get_by_name = Mock(return_value = self.host2) + db.instance_get_all_by_host = Mock(return_value = [] ) + + ret= self.manager.has_enough_resource(ctxt, 'i-12345', 'host1') + self.assertEqual(ret, True) + + + # ---> test for nova.scheduler.manager.live_migration() + + + def test10(self): + """10: instance_get_by_internal_id issue NotFound. """ + # Mocks for has_enough_resource() + ctxt = context.get_admin_context() + db.instance_get = Mock(return_value = self.instance8) + db.host_get_by_name = Mock(return_value = self.host2) + db.instance_get_all_by_host = Mock(return_value = [self.instance4, self.instance5] ) + + # Mocks for live_migration()db.instance_get_by_internal_id + # (any Mock is ok here. important mock is all above) + db.instance_get_by_internal_id = Mock(side_effect=exception.NotFound("ERR")) + + self.assertRaises(exception.NotFound, + self.manager.live_migration, + ctxt, + 'i-12345', + 'host1') + + + def test11(self): + """11: return False if host doesnt have enough resource. """ + + # Mocks for has_enough_resource() + ctxt = context.get_admin_context() + db.instance_get = Mock(return_value = self.instance8) + db.host_get_by_name = Mock(return_value = self.host2) + db.instance_get_all_by_host = Mock(return_value = [self.instance4, self.instance5] ) + + # Mocks for live_migration()db.instance_get_by_internal_id + # (any Mock is ok here. important mock is all above) + db.instance_get_by_internal_id = Mock(return_value = self.instance8) + db.instance_set_state = Mock(return_value = True) + rpc_cast = Mock(return_value = True) + + ret= self.manager.live_migration(ctxt, 'i-12345', 'host1') + self.assertEqual(ret, False) + + + + def test12(self): + """12: everything goes well. """ + + # Mocks for has_enough_resource() + ctxt = context.get_admin_context() + db.instance_get = Mock(return_value = self.instance3) + db.host_get_by_name = Mock(return_value = self.host2) + db.instance_get_all_by_host = Mock(return_value = [self.instance4, self.instance5] ) + + # Mocks for live_migration()db.instance_get_by_internal_id + # (any Mock is ok here. important mock is all above) + db.instance_get_by_internal_id = Mock(return_value = self.instance8) + db.instance_set_state = Mock(return_value = True) + rpc.cast = Mock(return_value = True) + + ret= self.manager.live_migration(ctxt, 'i-12345', 'host1') + self.assertEqual(ret, True) + + + def tearDown(self): + """common terminating method. """ + #sys.stdout = self.stdoutBak + pass + +if __name__ == '__main__': + #unittest.main() + suite = unittest.TestLoader().loadTestsFromTestCase(SchedulerTestFunctions) + unittest.TextTestRunner(verbosity=3).run(suite) + + diff --git a/nova/livemigration_test/UT/testCase_UT.xls b/nova/livemigration_test/UT/testCase_UT.xls new file mode 100644 index 000000000..0524526b6 Binary files /dev/null and b/nova/livemigration_test/UT/testCase_UT.xls differ -- cgit From f983884dd262f46907f80a04121d957347881240 Mon Sep 17 00:00:00 2001 From: root Date: Fri, 24 Dec 2010 15:09:05 +0900 Subject: nova.compute.managerがこれまでの修正でデグレしていたので修正 CPUID, その他のチェックルーチンをnova.scheduler.manager.live_migrationに追加 MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- bin/nova-manage | 23 +-- nova/compute/manager.py | 15 +- nova/db/sqlalchemy/models.py | 8 +- nova/livemigration_test/SI/testCase_SI.xls | Bin 35840 -> 43520 bytes .../SI/testParameterSheet_SI.xls | Bin 464384 -> 464384 bytes nova/livemigration_test/UT/computeManager.test.py | 10 +- .../UT/libvirtConnection.test.py | 10 +- nova/livemigration_test/UT/nova-manage.test.py | 77 ++++++--- .../livemigration_test/UT/schedulerManager.test.py | 192 +++++++++++++-------- nova/livemigration_test/UT/testCase_UT.xls | Bin 195072 -> 202752 bytes nova/scheduler/manager.py | 70 ++++++-- nova/service.py | 17 +- nova/virt/libvirt_conn.py | 40 ++++- 13 files changed, 327 insertions(+), 135 deletions(-) diff --git a/bin/nova-manage b/bin/nova-manage index d1cda72b7..d6aa29679 100755 --- a/bin/nova-manage +++ b/bin/nova-manage @@ -456,24 +456,25 @@ class InstanceCommands(object): if power_state.RUNNING != instance_ref['state'] or \ 'running' != instance_ref['state_description']: - print 'Instance(%s) is not running' % ec2_id - sys.exit(1) + raise exception.Invalid('Instance(%s) is not running' % ec2_id) # 3. the host where instance is running and dst host is not same if dest == instance_ref['host'] : - print '%s is where %s is running now. choose different host.' \ - % (dest, ec2_id) - sys.exit(2) + msg = '%s is where %s is running now. choose other host.' % (dest, ec2_id) + raise exception.Invalid(msg) # 4. live migration - rpc.cast(ctxt, - FLAGS.scheduler_topic, - { "method": "live_migration", - "args": {"ec2_id": ec2_id, - "dest":dest}}) + ret = rpc.call(ctxt, + FLAGS.scheduler_topic, + { "method": "live_migration", + "args": {"ec2_id": ec2_id, + "dest":dest}}) + + if None != ret : + raise ret print 'Finished all procedure. check instance are migrated successfully' - print 'chech status by using euca-describe-instances.' + print 'check status by using euca-describe-instances.' # this class is created by masumotok diff --git a/nova/compute/manager.py b/nova/compute/manager.py index bad525115..c2c532c6d 100644 --- a/nova/compute/manager.py +++ b/nova/compute/manager.py @@ -105,7 +105,7 @@ class ComputeManager(manager.Manager): self.network_manager.setup_compute_network(context, instance_id) self.db.instance_update(context, instance_id, - {'host': self.host}) + {'host': self.host, 'launch_at':self.host}) # TODO(vish) check to make sure the availability zone matches self.db.instance_set_state(context, @@ -261,18 +261,19 @@ class ComputeManager(manager.Manager): self.db.volume_detached(context, volume_id) defer.returnValue(True) - def get_vcpu_number(self): - """Get the number of vcpu on physical computer.""" - return self.driver.get_vcpu_number() + def compareCPU(self, context, xml): + """ Check the host cpu is compatible to a cpu given by xml.""" + logging.warn('good!') + return self.driver.compareCPU(xml) - def get_mem_size(self): + def get_memory_mb(self): """Get the memory size of physical computer .""" meminfo = open('/proc/meminfo').read().split() idx = meminfo.index('MemTotal:') # transforming kb to mb. return int(meminfo[idx + 1]) / 1024 - def get_hdd_size(self): + def get_local_gb(self): """Get the hdd size of physical computer .""" hddinfo = os.statvfs(FLAGS.instances_path) return hddinfo.f_bsize * hddinfo.f_blocks / 1024 / 1024 / 1024 @@ -315,7 +316,7 @@ class ComputeManager(manager.Manager): self.driver.setup_nwfilters_for_instance(instance_ref) # 5. bridge settings - self.network_manager.setup_compute_network(instance_id) + self.network_manager.setup_compute_network(context, instance_id) return True def nwfilter_for_instance_exists(self, context, instance_id): diff --git a/nova/db/sqlalchemy/models.py b/nova/db/sqlalchemy/models.py index db6f51948..7f3a58bcb 100644 --- a/nova/db/sqlalchemy/models.py +++ b/nova/db/sqlalchemy/models.py @@ -147,7 +147,9 @@ class Host(BASE, NovaBase): vcpus = Column(Integer, nullable=False, default=-1) memory_mb = Column(Integer, nullable=False, default=-1) local_gb = Column(Integer, nullable=False, default=-1) - #cpuid = Column(Integer, nullable=False) + hypervisor_type = Column(String(128)) + hypervisor_version = Column(Integer, nullable=False, default=-1) + cpu_info = Column(String(1024)) deleted = Column(Boolean, default=False) # C: when calling service_create() # D: never deleted. instead of deleting cloumn "deleted" is true @@ -232,6 +234,10 @@ class Instance(BASE, NovaBase): display_name = Column(String(255)) display_description = Column(String(255)) + # To remember at which host a instance booted. + # An instance may moved to other host by live migraiton. + launch_at = Column(String(255)) + # TODO(vish): see Ewan's email about state improvements, probably # should be in a driver base class or some such # vmstate_state = running, halted, suspended, paused diff --git a/nova/livemigration_test/SI/testCase_SI.xls b/nova/livemigration_test/SI/testCase_SI.xls index 723363c1e..65cf96fd7 100644 Binary files a/nova/livemigration_test/SI/testCase_SI.xls and b/nova/livemigration_test/SI/testCase_SI.xls differ diff --git a/nova/livemigration_test/SI/testParameterSheet_SI.xls b/nova/livemigration_test/SI/testParameterSheet_SI.xls index 192d9705b..400b43b43 100644 Binary files a/nova/livemigration_test/SI/testParameterSheet_SI.xls and b/nova/livemigration_test/SI/testParameterSheet_SI.xls differ diff --git a/nova/livemigration_test/UT/computeManager.test.py b/nova/livemigration_test/UT/computeManager.test.py index d28d3ccb6..69ee876d1 100644 --- a/nova/livemigration_test/UT/computeManager.test.py +++ b/nova/livemigration_test/UT/computeManager.test.py @@ -1,10 +1,9 @@ #!/usr/bin/python # -*- coding: UTF-8 -*- -NOVA_DIR = '/opt/openstack/nova' -#NOVA_DIR = '/opt/nova-2010.4' import sys +import os import unittest import commands import re @@ -13,6 +12,11 @@ import logging from mock import Mock import twisted +# getting /nova-inst-dir +NOVA_DIR = os.path.abspath(sys.argv[0]) +for i in range(4): + NOVA_DIR = os.path.dirname(NOVA_DIR) + try: print print 'checking %s/bin/nova-manage exists, set the NOVA_DIR properly..' \ @@ -40,7 +44,7 @@ try: except: - print 'set PYTHONPATH to nova-install-dir' + print 'set correct NOVA_DIR in this script. ' raise diff --git a/nova/livemigration_test/UT/libvirtConnection.test.py b/nova/livemigration_test/UT/libvirtConnection.test.py index 6a353508d..5dfe8702c 100644 --- a/nova/livemigration_test/UT/libvirtConnection.test.py +++ b/nova/livemigration_test/UT/libvirtConnection.test.py @@ -1,9 +1,9 @@ #!/usr/bin/python # -*- coding: UTF-8 -*- -NOVA_DIR='/opt/nova-2010.4' import sys +import os import unittest import commands import re @@ -13,6 +13,12 @@ import libvirt from mock import Mock import twisted +# getting /nova-inst-dir +NOVA_DIR = os.path.abspath(sys.argv[0]) +for i in range(4): + NOVA_DIR = os.path.dirname(NOVA_DIR) + + try : print print 'checking %s/bin/nova-manage exists, set the NOVA_DIR properly..' % NOVA_DIR @@ -40,7 +46,7 @@ try : except: - print 'set PYTHONPATH to nova-install-dir' + print 'set correct NOVA_DIR in this script. ' raise diff --git a/nova/livemigration_test/UT/nova-manage.test.py b/nova/livemigration_test/UT/nova-manage.test.py index dabdba001..f1653d21a 100644 --- a/nova/livemigration_test/UT/nova-manage.test.py +++ b/nova/livemigration_test/UT/nova-manage.test.py @@ -1,15 +1,22 @@ #!/usr/bin/python # -*- coding: UTF-8 -*- -NOVA_DIR='/opt/nova-2010.2' +NOVA_DIR='/opt/nova-2010.4' import sys +import os import unittest import commands import re from mock import Mock +# getting /nova-inst-dir +NOVA_DIR = os.path.abspath(sys.argv[0]) +for i in range(4): + NOVA_DIR = os.path.dirname(NOVA_DIR) + + try : print print 'Testing %s/bin/nova-manage, set the NOVA_DIR properly..' % NOVA_DIR @@ -33,7 +40,7 @@ try : except: - print 'set PYTHONPATH to nova-install-dir' + print 'set correct NOVA_DIR in this script. ' raise @@ -42,7 +49,15 @@ class tmpStdout: self.buffer = "" def write(self,arg): self.buffer += arg + def flush(self): + self.buffer = '' + +class tmpStderr(tmpStdout): + def write(self, arg): + self.buffer += arg def flush(self): + pass + def realFlush(self): self.buffer = '' @@ -50,6 +65,8 @@ class NovaManageTestFunctions(unittest.TestCase): stdout = None stdoutBak = None + stderr = None + stderrBak = None hostCmds = None @@ -71,6 +88,12 @@ class NovaManageTestFunctions(unittest.TestCase): self.stdoutBak = sys.stdout sys.stdout = self.stdout + # replace stderr for checking nova-manage output + if self.stderr is None: + self.__class__.stderr = tmpStderr() + self.stderrBak = sys.stderr + sys.stderr = self.stderr + # prepare test data self.setTestData() @@ -164,7 +187,7 @@ class NovaManageTestFunctions(unittest.TestCase): """06: nova-manage host show registerd-host, and no project uses the host""" dic = {'ret': True, - 'phy_resource': {'cpu':1, 'memory_mb':2, 'hdd_gb':3}, + 'phy_resource': {'vcpus':1, 'memory_mb':2, 'local_gb':3}, 'usage': {}} rpc.call = Mock(return_value=dic ) @@ -185,9 +208,9 @@ class NovaManageTestFunctions(unittest.TestCase): and some projects use the host """ dic = {'ret': True, - 'phy_resource': {'cpu':1, 'memory_mb':2, 'hdd_gb':3}, - 'usage': {'p1': {'cpu':1, 'memory_mb':2, 'hdd_gb':3}, - 'p2': {'cpu':1, 'memory_mb':2, 'hdd_gb':3} }} + 'phy_resource': {'vcpus':1, 'memory_mb':2, 'local_gb':3}, + 'usage': {'p1': {'vcpus':1, 'memory_mb':2, 'local_gb':3}, + 'p2': {'vcpus':1, 'memory_mb':2, 'local_gb':3} }} rpc.call = Mock(return_value=dic ) self.hostCmds.show('host1') @@ -253,13 +276,12 @@ class NovaManageTestFunctions(unittest.TestCase): """ db.host_get_by_name = Mock(return_value = self.host1) db.instance_get_by_internal_id = Mock( return_value = self.instance1 ) - c1 = c2 = False try : self.instanceCmds.live_migration('i-12345', 'host1') - except SystemExit, e: - c1 = (1 == e.code) - c2 = (0 < self.stdout.buffer.find('is not running') ) - self.assertEqual( c1 and c2 , True ) + except exception.Invalid, e: + c1 = (0 < e.message.find('is not running') ) + self.assertTrue(c1, True) + return False def test14(self): @@ -268,13 +290,12 @@ class NovaManageTestFunctions(unittest.TestCase): """ db.host_get_by_name = Mock(return_value = self.host2) db.instance_get_by_internal_id = Mock( return_value = self.instance1 ) - c1 = c2 = False try : self.instanceCmds.live_migration('i-12345', 'host2') - except SystemExit, e: - c1 = (1 == e.code) - c2 = (0 < self.stdout.buffer.find('is not running') ) - self.assertEqual( c1 and c2 , True ) + except exception.Invalid, e: + c1 = (0 < e.message.find('is not running') ) + self.assertTrue(c1, True) + return False def test15(self): """15: nova-manage instances live_migration ec2_id host, @@ -282,21 +303,31 @@ class NovaManageTestFunctions(unittest.TestCase): """ db.host_get_by_name = Mock(return_value = self.host1) db.instance_get_by_internal_id = Mock( return_value = self.instance3 ) - c1 = c2 = False try : self.instanceCmds.live_migration('i-12345', 'host1') - except SystemExit, e: - c1 = (2 == e.code) - c2 = (0 < self.stdout.buffer.find('is running now') ) - self.assertEqual( c1 and c2 , True ) + except exception.Invalid, e: + c1 = ( 0 <= e.message.find('is running now') ) + self.assertTrue(c1, True) + return False + def test16(self): """16: nova-manage instances live_migration ec2_id host, + rpc.call raises RemoteError because destination doesnt have enough resource. + """ + db.host_get_by_name = Mock(return_value = self.host1) + db.instance_get_by_internal_id = Mock( return_value = self.instance3 ) + rpc.call = Mock(return_value = rpc.RemoteError(TypeError, 'val', 'traceback')) + self.assertRaises(rpc.RemoteError, self.instanceCmds.live_migration, 'i-xxx', 'host2' ) + + + def test17(self): + """17: nova-manage instances live_migration ec2_id host, everything goes well, ang gets success messages. """ db.host_get_by_name = Mock(return_value = self.host1) db.instance_get_by_internal_id = Mock( return_value = self.instance3 ) - rpc.cast = Mock(return_value = None) + rpc.call = Mock(return_value = None) self.instanceCmds.live_migration('i-12345', 'host2') c1 = (0 <= self.stdout.buffer.find('Finished all procedure') ) @@ -309,6 +340,8 @@ class NovaManageTestFunctions(unittest.TestCase): commands.getstatusoutput('rm -rf %s' % self.getNovaManageCopyPath() ) sys.stdout.flush() sys.stdout = self.stdoutBak + self.stderr.realFlush() + sys.stderr = self.stderrBak if __name__ == '__main__': #unittest.main() diff --git a/nova/livemigration_test/UT/schedulerManager.test.py b/nova/livemigration_test/UT/schedulerManager.test.py index 2fe4d0994..a0b76c918 100644 --- a/nova/livemigration_test/UT/schedulerManager.test.py +++ b/nova/livemigration_test/UT/schedulerManager.test.py @@ -1,15 +1,21 @@ #!/usr/bin/python # -*- coding: UTF-8 -*- -NOVA_DIR='/opt/nova-2010.2' import sys +import os import unittest import commands import re +import libvirt from mock import Mock +# getting /nova-inst-dir +NOVA_DIR = os.path.abspath(sys.argv[0]) +for i in range(4): + NOVA_DIR = os.path.dirname(NOVA_DIR) + try : print print 'Checking %s/bin/nova-manage exists, set the NOVA_DIR properly..' % NOVA_DIR @@ -34,7 +40,7 @@ try : from nova.db.sqlalchemy.models import * except: - print 'set PYTHONPATH to nova-install-dir' + print 'set correct NOVA_DIR in this script. ' raise @@ -57,20 +63,25 @@ class SchedulerTestFunctions(unittest.TestCase): self.manager = SchedulerManager(host=self.host) self.setTestData() + self.setMocks() def setTestData(self): self.host1 = Host() self.host1.__setitem__('name', 'host1') - self.host1.__setitem__('cpu', 5) + self.host1.__setitem__('vcpus', 5) self.host1.__setitem__('memory_mb', 20480) - self.host1.__setitem__('hdd_gb', 876) + self.host1.__setitem__('local_gb', 876) self.host2 = Host() self.host2.__setitem__('name', 'host2') - self.host2.__setitem__('cpu', 5) + self.host2.__setitem__('vcpus', 5) self.host2.__setitem__('memory_mb', 20480) - self.host2.__setitem__('hdd_gb', 876) + self.host2.__setitem__('local_gb', 876) + self.host2.__setitem__('hypervisor_type', 'QEMU') + self.host2.__setitem__('hypervisor_version', 12003) + xml="x86_64NehalemIntel" + self.host2.__setitem__('cpu_info', xml) self.instance1 = Instance() for key, val in [ ('id', 1), ('host', 'host1'), ('hostname', 'i-12345'), @@ -122,7 +133,24 @@ class SchedulerTestFunctions(unittest.TestCase): ('vcpus', 1), ('memory_mb', 1024), ('local_gb', 866) ]: self.instance8.__setitem__(key, val) + self.service1 = Service() + for key, val in [ ('id', 1), ('host', 'host1'), ('binary', 'nova-compute'), + ('topic', 'compute')]: + self.service1.__setitem__(key, val) + + + def setMocks(self): + self.ctxt = context.get_admin_context() + # Mocks for has_enough_resource() + db.instance_get = Mock(return_value = self.instance3) + db.host_get_by_name = Mock(return_value = self.host2) + db.instance_get_all_by_host = Mock(return_value = [self.instance4, self.instance5] ) + # Mocks for live_migration + db.instance_get_by_internal_id = Mock(return_value = self.instance1) + # db.host_get_by_name <- defined above. + db.service_get_all_by_topic = Mock(return_value = [self.service1] ) + rpc.call = Mock(return_value=1) def check_format(self, val): """check result format of show_host_resource """ @@ -162,21 +190,21 @@ class SchedulerTestFunctions(unittest.TestCase): sys.stderr.write('return value is not dict') return False - for key in ['cpu', 'memory_mb', 'hdd_gb']: + for key in ['vcpus', 'memory_mb', 'local_gb']: if not val.has_key(key) : sys.stderr.write('invalid format(missing "%s"). ' % key ) return False return True + # ---> test for nova.scheduler.manager.show_host_resource() def test01(self): """01: get NotFound exception when dest host not found on DB """ - ctxt = context.get_admin_context() db.host_get_by_name = Mock( side_effect=exception.NotFound('ERR') ) - result = self.manager.show_host_resource(ctxt, 'not-registered-host') + result = self.manager.show_host_resource(self.ctxt, 'not-registered-host') c1 = ( not result['ret'] ) c2 = ( 0 == result['msg'].find('No such') ) self.assertEqual(c1 and c2, True) @@ -184,21 +212,19 @@ class SchedulerTestFunctions(unittest.TestCase): def test02(self): """02: get other exception if unexpected err. """ - ctxt = context.get_admin_context() db.host_get_by_name = Mock( side_effect=TypeError('ERR') ) - self.assertRaises(TypeError, self.manager.show_host_resource, ctxt, 'host1' ) + self.assertRaises(TypeError, self.manager.show_host_resource, self.ctxt, 'host1' ) def test03(self): """03: no instance found on dest host. """ - ctxt = context.get_admin_context() db.host_get_by_name = Mock( return_value = self.host1 ) db.instance_get_all_by_host = Mock( return_value=[]) - ret= self.manager.show_host_resource(ctxt, 'host1') + ret= self.manager.show_host_resource(self.ctxt, 'host1') c1 = self.check_format(ret) v = ret['phy_resource'] - c2 = ( (5 == v['cpu']) and (20480 == v['memory_mb']) and (876 == v['hdd_gb'])) + c2 = ( (5 == v['vcpus']) and (20480 == v['memory_mb']) and (876 == v['local_gb'])) c3 = ( 0 == len(ret['usage']) ) self.assertEqual(c1 and c2 and c3, True) @@ -206,7 +232,6 @@ class SchedulerTestFunctions(unittest.TestCase): def test04(self): """04: some instance found on dest host. """ - ctxt = context.get_admin_context() db.host_get_by_name = Mock( return_value = self.host1 ) db.instance_get_all_by_host = Mock( return_value=[ self.instance1, self.instance2, @@ -216,11 +241,11 @@ class SchedulerTestFunctions(unittest.TestCase): db.instance_get_memory_sum_by_host_and_project = Mock(return_value=1024) db.instance_get_disk_sum_by_host_and_project = Mock(return_value=5) - ret= self.manager.show_host_resource(ctxt, 'host1') + ret= self.manager.show_host_resource(self.ctxt, 'host1') c1 = self.check_format(ret) v = ret['phy_resource'] - c2 = ( (5 == v['cpu']) and (20480 == v['memory_mb']) and (876 == v['hdd_gb'])) + c2 = ( (5 == v['vcpus']) and (20480 == v['memory_mb']) and (876 == v['local_gb'])) c3 = ( 2 == len(ret['usage']) ) c4 = ( self.instance1['project_id'] in ret['usage'].keys()) c5 = ( self.instance3['project_id'] in ret['usage'].keys()) @@ -232,58 +257,51 @@ class SchedulerTestFunctions(unittest.TestCase): def test05(self): """05: when cpu is exccded some instance found on dest host. """ - ctxt = context.get_admin_context() db.instance_get = Mock(return_value = self.instance6) - db.host_get_by_name = Mock(return_value = self.host2) - db.instance_get_all_by_host = Mock(return_value = [self.instance4, self.instance5] ) + try : + self.manager.has_enough_resource(self.ctxt, 'i-12345', 'host1') + except exception.NotEmpty, e: + c1 = ( 0 < e.message.find('doesnt have enough resource') ) + self.assertTrue(c1, True) + return False - ret= self.manager.has_enough_resource(ctxt, 'i-12345', 'host1') - self.assertEqual(ret, False) def test06(self): """06: when memory is exccded some instance found on dest host. """ - ctxt = context.get_admin_context() db.instance_get = Mock(return_value = self.instance7) - db.host_get_by_name = Mock(return_value = self.host2) - db.instance_get_all_by_host = Mock(return_value = [self.instance4, self.instance5] ) - - ret= self.manager.has_enough_resource(ctxt, 'i-12345', 'host1') - self.assertEqual(ret, False) + try : + self.manager.has_enough_resource(self.ctxt, 'i-12345', 'host1') + except exception.NotEmpty, e: + c1 = ( 0 <= e.message.find('doesnt have enough resource') ) + self.assertTrue(c1, True) + return False def test07(self): """07: when hdd is exccded some instance found on dest host. """ - ctxt = context.get_admin_context() db.instance_get = Mock(return_value = self.instance8) - db.host_get_by_name = Mock(return_value = self.host2) - db.instance_get_all_by_host = Mock(return_value = [self.instance4, self.instance5] ) + try : + self.manager.has_enough_resource(self.ctxt, 'i-12345', 'host1') + except exception.NotEmpty, e: + c1 = ( 0 <= e.message.find('doesnt have enough resource') ) + self.assertTrue(c1, True) + return False - ret= self.manager.has_enough_resource(ctxt, 'i-12345', 'host1') - self.assertEqual(ret, False) def test08(self): """08: everything goes well. (instance_get_all_by_host returns list)""" - ctxt = context.get_admin_context() - db.instance_get = Mock(return_value = self.instance3) - db.host_get_by_name = Mock(return_value = self.host2) - db.instance_get_all_by_host = Mock(return_value = [self.instance4, self.instance5] ) - - ret= self.manager.has_enough_resource(ctxt, 'i-12345', 'host1') - self.assertEqual(ret, True) + ret= self.manager.has_enough_resource(self.ctxt, 'i-12345', 'host1') + self.assertEqual(ret, None) def test09(self): """09: everything goes well(instance_get_all_by_host returns[]). """ - ctxt = context.get_admin_context() - db.instance_get = Mock(return_value = self.instance3) - db.host_get_by_name = Mock(return_value = self.host2) db.instance_get_all_by_host = Mock(return_value = [] ) - - ret= self.manager.has_enough_resource(ctxt, 'i-12345', 'host1') - self.assertEqual(ret, True) + ret= self.manager.has_enough_resource(self.ctxt, 'i-12345', 'host1') + self.assertEqual(ret, None) # ---> test for nova.scheduler.manager.live_migration() @@ -291,60 +309,90 @@ class SchedulerTestFunctions(unittest.TestCase): def test10(self): """10: instance_get_by_internal_id issue NotFound. """ + # Mocks for has_enough_resource() - ctxt = context.get_admin_context() db.instance_get = Mock(return_value = self.instance8) - db.host_get_by_name = Mock(return_value = self.host2) - db.instance_get_all_by_host = Mock(return_value = [self.instance4, self.instance5] ) - # Mocks for live_migration()db.instance_get_by_internal_id # (any Mock is ok here. important mock is all above) db.instance_get_by_internal_id = Mock(side_effect=exception.NotFound("ERR")) self.assertRaises(exception.NotFound, self.manager.live_migration, - ctxt, + self.ctxt, 'i-12345', 'host1') def test11(self): - """11: return False if host doesnt have enough resource. """ + """11: get NotFound exception when dest host not found on DB """ + + db.host_get_by_name = Mock( side_effect=exception.NotFound('ERR') ) + self.assertRaises(exception.NotFound, + self.manager.live_migration, + self.ctxt, + 'i-12345', + 'host1') + + + def test12(self): + """12: Destination host is not compute node """ + self.assertRaises(exception.Invalid, + self.manager.live_migration, + self.ctxt, + 'i-12345', + 'host2') + + + # Cannot test the case of hypervisor type difference and hypervisor + # version difference, since we cannot set different mocks to same method.. + + def test13(self): + """13: rpc.call raises RemoteError(Unexpected error occurs when executing compareCPU) """ + rpc.call = Mock(return_value = rpc.RemoteError(libvirt.libvirtError, 'val', 'traceback')) + self.assertRaises(rpc.RemoteError, + self.manager.live_migration, + self.ctxt, + 'i-12345', + 'host1') + + def test14(self): + """14: rpc.call returns 0 (cpu is not compatible between src and dest) """ + rpc.call = Mock(return_value = 0) + try : + self.manager.live_migration(self.ctxt, 'i-12345', 'host1') + except exception.Invalid, e: + c1 = ( 0 <= e.message.find('doesnt have compatibility to')) + self.assertTrue(c1, True) + return False + + def test15(self): + """15: raise NotEmpty if host doesnt have enough resource. """ # Mocks for has_enough_resource() - ctxt = context.get_admin_context() db.instance_get = Mock(return_value = self.instance8) - db.host_get_by_name = Mock(return_value = self.host2) - db.instance_get_all_by_host = Mock(return_value = [self.instance4, self.instance5] ) - # Mocks for live_migration()db.instance_get_by_internal_id - # (any Mock is ok here. important mock is all above) + # Mocks for live_migration() db.instance_get_by_internal_id = Mock(return_value = self.instance8) db.instance_set_state = Mock(return_value = True) rpc_cast = Mock(return_value = True) - ret= self.manager.live_migration(ctxt, 'i-12345', 'host1') - self.assertEqual(ret, False) - - + try : + self.manager.live_migration(self.ctxt, 'i-12345', 'host1') + except exception.NotEmpty, e: + c1 = ( 0 <= e.message.find('doesnt have enough resource') ) + self.assertTrue(c1, True) + return False - def test12(self): - """12: everything goes well. """ - # Mocks for has_enough_resource() - ctxt = context.get_admin_context() - db.instance_get = Mock(return_value = self.instance3) - db.host_get_by_name = Mock(return_value = self.host2) - db.instance_get_all_by_host = Mock(return_value = [self.instance4, self.instance5] ) + def test16(self): + """16: everything goes well. """ - # Mocks for live_migration()db.instance_get_by_internal_id - # (any Mock is ok here. important mock is all above) db.instance_get_by_internal_id = Mock(return_value = self.instance8) db.instance_set_state = Mock(return_value = True) rpc.cast = Mock(return_value = True) - ret= self.manager.live_migration(ctxt, 'i-12345', 'host1') - self.assertEqual(ret, True) + ret= self.manager.live_migration(self.ctxt, 'i-12345', 'host1') + self.assertEqual(ret, None) def tearDown(self): diff --git a/nova/livemigration_test/UT/testCase_UT.xls b/nova/livemigration_test/UT/testCase_UT.xls index 0524526b6..f73e8c5aa 100644 Binary files a/nova/livemigration_test/UT/testCase_UT.xls and b/nova/livemigration_test/UT/testCase_UT.xls differ diff --git a/nova/scheduler/manager.py b/nova/scheduler/manager.py index d36525506..0921e3791 100644 --- a/nova/scheduler/manager.py +++ b/nova/scheduler/manager.py @@ -73,29 +73,79 @@ class SchedulerManager(manager.Manager): def live_migration(self, context, ec2_id, dest): """ live migration method""" + # (masumotok) below pre-checking is followed by + # http://wiki.libvirt.org/page/TodoPreMigrationChecks + # 1. get instance id internal_id = cloud.ec2_id_to_internal_id(ec2_id) instance_ref = db.instance_get_by_internal_id(context, internal_id) instance_id = instance_ref['id'] - # 2. check dst host still has enough capacities - if not self.has_enough_resource(context, instance_id, dest): - return False - - # 3. change instance_state + # 2. get src host and dst host + src = instance_ref['launch_at'] + shost_ref = db.host_get_by_name(context, src ) + dhost_ref = db.host_get_by_name(context, dest) + + # 3. dest should be compute + services = db.service_get_all_by_topic(context, 'compute') + logging.warn('%s' % [service.host for service in services]) + if dest not in [service.host for service in services] : + raise exception.Invalid('%s must be compute node' % dest) + + # 4. check hypervisor is same + shypervisor = shost_ref['hypervisor_type'] + dhypervisor = dhost_ref['hypervisor_type'] + if shypervisor != dhypervisor: + msg = 'Different hypervisor type(%s->%s)' % (shypervisor, dhypervisor) + raise exception.Invalid(msg) + + # 5. check hypervisor version + shypervisor = shost_ref['hypervisor_version'] + dhypervisor = dhost_ref['hypervisor_version'] + if shypervisor > dhypervisor: + msg = 'Older hypervisor version(%s->%s)' % (shypervisor, dhypervisor) + raise exception.Invalid(msg) + + # 6. check cpuinfo + cpuinfo = shost_ref['cpu_info'] + if str != type(cpuinfo): + msg = 'Unexpected err: no cpu_info for %s found on DB.hosts' % src + raise exception.Invalid(msg) + + logging.warn('cpuinfo %s %d' % (cpuinfo, len(cpuinfo))) + ret = rpc.call(context, + db.queue_get_for(context, FLAGS.compute_topic, dest), + {"method": 'compareCPU', + "args": {'xml': cpuinfo}}) + + if int != type(ret): + raise ret + + if 0 >= ret : + msg = '%s doesnt have compatibility to %s(where %s launching at)\n' \ + % (dest, src, ec2_id) + msg += 'result:%d \n' % ret + msg += 'Refer to %s' % \ + 'http://libvirt.org/html/libvirt-libvirt.html#virCPUCompareResult' + raise exception.Invalid(msg) + + # 7. check dst host still has enough capacities + self.has_enough_resource(context, instance_id, dest) + + # 8. change instance_state db.instance_set_state(context, instance_id, power_state.PAUSED, 'migrating') - # 4. request live migration + # 9. request live migration host = instance_ref['host'] rpc.cast(context, db.queue_get_for(context, FLAGS.compute_topic, host), {"method": 'live_migration', "args": {'instance_id': instance_id, 'dest': dest}}) - return True + def has_enough_resource(self, context, instance_id, dest): """ check if destination host has enough resource for live migration""" @@ -126,12 +176,10 @@ class SchedulerManager(manager.Manager): (ec2_id, total_cpu, total_mem, total_hdd)) if total_cpu <= vcpus or total_mem <= mem or total_hdd <= hdd: - logging.debug('%s doesnt have enough resource for %s' % - (dest, ec2_id)) - return False + msg = '%s doesnt have enough resource for %s' % (dest, ec2_id) + raise exception.NotEmpty(msg) logging.debug('%s has enough resource for %s' % (dest, ec2_id)) - return True def show_host_resource(self, context, host, *args): """ show the physical/usage resource given by hosts.""" diff --git a/nova/service.py b/nova/service.py index 3ce07a3e0..416448faa 100644 --- a/nova/service.py +++ b/nova/service.py @@ -119,14 +119,21 @@ class Service(object, service.Service): def _update_host_ref(self, context, host_ref): if 0 <= self.manager_class_name.find('ComputeManager'): - cpu = self.manager.get_vcpu_number() - memory_mb = self.manager.get_mem_size() - local_gb = self.manager.get_hdd_size() + vcpu = self.manager.driver.get_vcpu_number() + memory_mb = self.manager.get_memory_mb() + local_gb = self.manager.get_local_gb() + hypervisor = self.manager.driver.get_hypervisor_type() + version = self.manager.driver.get_hypervisor_version() + cpu_xml = self.manager.driver.get_cpu_xml() + db.host_update(context, host_ref['id'], - {'vcpus': cpu, + {'vcpus': vcpu, 'memory_mb': memory_mb, - 'local_gb': local_gb}) + 'local_gb': local_gb, + 'hypervisor_type': hypervisor, + 'hypervisor_version': version, + 'cpu_info':cpu_xml }) return host_ref def __getattr__(self, key): diff --git a/nova/virt/libvirt_conn.py b/nova/virt/libvirt_conn.py index f2b5cf794..6450db8bd 100644 --- a/nova/virt/libvirt_conn.py +++ b/nova/virt/libvirt_conn.py @@ -44,6 +44,7 @@ Supports KVM, QEMU, UML, and XEN. import logging import os import shutil +import re import IPy from twisted.internet import defer @@ -632,9 +633,30 @@ class LibvirtConnection(object): return interfaces def get_vcpu_number(self): - """ get vcpu number of physical computer """ + """ Get vcpu number of physical computer. """ return self._conn.getMaxVcpus(None) + def get_hypervisor_type(self): + """ Get hypervisor type """ + return self._conn.getType() + + def get_hypervisor_version(self): + """ Get hypervisor version """ + return self._conn.getVersion() + + def get_cpu_xml(self): + """ Get cpuinfo information """ + xmlstr = self._conn.getCapabilities() + xml = libxml2.parseDoc(xmlstr) + nodes = xml.xpathEval('//cpu') + if 1 != len(nodes): + msg = 'Unexpected xml format. tag "cpu" must be 1, but %d.' % len(nodes) + msg += '\n'+xml.serialize() + raise exception.Invalid(msg) + cpuxmlstr = re.sub("\n|[ ]+", ' ', nodes[0].serialize()) + return cpuxmlstr + + def block_stats(self, instance_name, disk): """ Note that this function takes an instance name, not an Instance, so @@ -651,14 +673,17 @@ class LibvirtConnection(object): domain = self._conn.lookupByName(instance_name) return domain.interfaceStats(interface) + def refresh_security_group(self, security_group_id): fw = NWFilterFirewall(self._conn) fw.ensure_security_group_filter(security_group_id) + def setup_nwfilters_for_instance(self, instance): nwfilter = NWFilterFirewall(self._conn) return nwfilter.setup_nwfilters_for_instance(instance) + def nwfilter_for_instance_exists(self, instance_ref): try: filter = 'nova-instance-%s' % instance_ref.name @@ -667,6 +692,19 @@ class LibvirtConnection(object): except libvirt.libvirtError: return False + + def compareCPU(self, xml): + """ + Check the host cpu is compatible to a cpu given by xml. + "xml" must be a part of libvirt.openReadonly().getCapabilities(). + return values follows by virCPUCompareResult. + if 0 > return value, do live migration. + + 'http://libvirt.org/html/libvirt-libvirt.html#virCPUCompareResult' + """ + return self._conn.compareCPU(xml,0) + + def live_migration(self, instance_ref, dest): uri = FLAGS.live_migration_uri % dest out, err = utils.execute("sudo virsh migrate --live %s %s" -- cgit From 6b4511d33562da46f9954bf5423ada49f9696d75 Mon Sep 17 00:00:00 2001 From: masumotok Date: Fri, 24 Dec 2010 15:45:05 +0900 Subject: テスト項目表がなぜか消えたので追加 MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- nova/livemigration_test/SI/testCase_SI.xls | Bin 0 -> 43520 bytes nova/livemigration_test/SI/testParameterSheet_SI.xls | Bin 0 -> 464384 bytes nova/livemigration_test/UT/testCase_UT.xls | Bin 0 -> 202752 bytes 3 files changed, 0 insertions(+), 0 deletions(-) create mode 100644 nova/livemigration_test/SI/testCase_SI.xls create mode 100644 nova/livemigration_test/SI/testParameterSheet_SI.xls create mode 100644 nova/livemigration_test/UT/testCase_UT.xls diff --git a/nova/livemigration_test/SI/testCase_SI.xls b/nova/livemigration_test/SI/testCase_SI.xls new file mode 100644 index 000000000..65cf96fd7 Binary files /dev/null and b/nova/livemigration_test/SI/testCase_SI.xls differ diff --git a/nova/livemigration_test/SI/testParameterSheet_SI.xls b/nova/livemigration_test/SI/testParameterSheet_SI.xls new file mode 100644 index 000000000..400b43b43 Binary files /dev/null and b/nova/livemigration_test/SI/testParameterSheet_SI.xls differ diff --git a/nova/livemigration_test/UT/testCase_UT.xls b/nova/livemigration_test/UT/testCase_UT.xls new file mode 100644 index 000000000..f73e8c5aa Binary files /dev/null and b/nova/livemigration_test/UT/testCase_UT.xls differ -- cgit From 21c1ba77cdb95b95a13a81c243ac13e0cf8a632f Mon Sep 17 00:00:00 2001 From: masumotok Date: Fri, 24 Dec 2010 16:05:24 +0900 Subject: テスト項目表をローカルから一度削除した状態でコミット MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- nova/livemigration_test/SI/picture.pptx | Bin 137730 -> 0 bytes nova/livemigration_test/SI/testCase_SI.xls | Bin 43520 -> 0 bytes .../SI/testParameterSheet_SI.xls | Bin 464384 -> 0 bytes nova/livemigration_test/SI/utils/demo-firstboot.sh | 39 - .../SI/utils/demo-runInstance.sh | 57 -- nova/livemigration_test/SI/utils/nova-manage.conf | 18 - nova/livemigration_test/SI/utils/nova.conf | 10 - nova/livemigration_test/SI/utils/nova.sh | 180 ----- nova/livemigration_test/SI/utils/nova.sh.compute | 37 - nova/livemigration_test/UT/computeManager.test.py | 821 --------------------- .../UT/libvirtConnection.test.py | 741 ------------------- nova/livemigration_test/UT/nova-manage.test.py | 672 ----------------- .../livemigration_test/UT/schedulerManager.test.py | 771 ------------------- nova/livemigration_test/UT/testCase_UT.xls | Bin 202752 -> 0 bytes 14 files changed, 3346 deletions(-) delete mode 100644 nova/livemigration_test/SI/picture.pptx delete mode 100644 nova/livemigration_test/SI/testCase_SI.xls delete mode 100644 nova/livemigration_test/SI/testParameterSheet_SI.xls delete mode 100755 nova/livemigration_test/SI/utils/demo-firstboot.sh delete mode 100755 nova/livemigration_test/SI/utils/demo-runInstance.sh delete mode 100644 nova/livemigration_test/SI/utils/nova-manage.conf delete mode 100644 nova/livemigration_test/SI/utils/nova.conf delete mode 100755 nova/livemigration_test/SI/utils/nova.sh delete mode 100755 nova/livemigration_test/SI/utils/nova.sh.compute delete mode 100644 nova/livemigration_test/UT/computeManager.test.py delete mode 100644 nova/livemigration_test/UT/libvirtConnection.test.py delete mode 100644 nova/livemigration_test/UT/nova-manage.test.py delete mode 100644 nova/livemigration_test/UT/schedulerManager.test.py delete mode 100644 nova/livemigration_test/UT/testCase_UT.xls diff --git a/nova/livemigration_test/SI/picture.pptx b/nova/livemigration_test/SI/picture.pptx deleted file mode 100644 index b47bec9b5..000000000 Binary files a/nova/livemigration_test/SI/picture.pptx and /dev/null differ diff --git a/nova/livemigration_test/SI/testCase_SI.xls b/nova/livemigration_test/SI/testCase_SI.xls deleted file mode 100644 index 65cf96fd7..000000000 Binary files a/nova/livemigration_test/SI/testCase_SI.xls and /dev/null differ diff --git a/nova/livemigration_test/SI/testParameterSheet_SI.xls b/nova/livemigration_test/SI/testParameterSheet_SI.xls deleted file mode 100644 index 400b43b43..000000000 Binary files a/nova/livemigration_test/SI/testParameterSheet_SI.xls and /dev/null differ diff --git a/nova/livemigration_test/SI/utils/demo-firstboot.sh b/nova/livemigration_test/SI/utils/demo-firstboot.sh deleted file mode 100755 index 3a6f7fb0b..000000000 --- a/nova/livemigration_test/SI/utils/demo-firstboot.sh +++ /dev/null @@ -1,39 +0,0 @@ -#!/bin/bash - -DIR=/opt/nova-2010.1 - -# 1. 管理者ユーザを作成する -# nova-manage user admin ユーザ名 access-key secret-key -# -#$DIR/bin/nova-manage user admin admin admin admin - -# 2. プロジェクトを作成する -# nova-manage create project プロジェクト名 プロジェクトに属するユーザ名 -# -#$DIR/bin/nova-manage project create admin admin - -# 3. クラウドを使うための認証情報を生成する -# nova-manage project environment プロジェクト名 ユーザ名 認証情報を格納するファイル -# -#$DIR/bin/nova-manage project environment admin admin $DIR/novarc - -# 4. 認証情報の読み込み -. $DIR/novarc - -# 5. プロジェクト用仮想マシンネットワークの作成を行う -# nova-manage user admin ユーザ名 access-key secret-key -# -$DIR/bin/nova-manage network create 10.0.0.0/8 3 16 - -# 6. 初回ログインにはSSHの公開鍵認証が必要 -# -if [ "" == "`euca-describe-keypairs | grep testkey`" ]; then - euca-add-keypair testkey > testkey.pem -fi - -# 7. -for i in 172.19.0.134 172.19.0.135 172.19.0.136 172.19.0.137 ; do - sudo ip addr del $i dev eth0 2> /dev/null -done - - diff --git a/nova/livemigration_test/SI/utils/demo-runInstance.sh b/nova/livemigration_test/SI/utils/demo-runInstance.sh deleted file mode 100755 index 171291262..000000000 --- a/nova/livemigration_test/SI/utils/demo-runInstance.sh +++ /dev/null @@ -1,57 +0,0 @@ -#!/bin/bash - -DIR=/opt/nova-2010.1 - -function inc_assigned(){ - assigned=`expr $assigned + 1` -} - - -# 1. 認証情報の読み込み -. $DIR/novarc - -# 3. 仮想マシンの起動 -# -ret=`euca-run-instances -t m1.small -k testkey ami-centos` -#ret=`euca-run-instances -t m1.small -k testkey ami-tiny` - -# 4. 仮想マシン用IPの確保 -# 未登録なら登録しておく -registered=`euca-describe-addresses` -for ip in 172.19.0.134 172.19.0.135 172.19.0.136 172.19.0.137 ; do - - not_registered=`echo $registered | grep $ip` - if [ "" == "$not_registered" ]; then - echo "[INFO] registed $ip" - $DIR/bin/nova-manage floating create `hostname` $ip - fi -done - -# 5. IPの割当 -echo 0 > /tmp/demo-runinstance -euca-describe-addresses | grep -v reserved | while read line; do - # 割り当てられてないものを仮想マシンに割り当てる - ip=`echo $line | cut -d ' ' -f 2` - id=`echo $ret | cut -d ' ' -f 5` - if [ "" == "`echo $id | grep i- `" ] ; then - echo "[INFO] try again" $ret - break - fi - echo "[INFO] assigned to ipaddr($ip) to instance($id) " - euca-associate-address -i $id $ip - echo 1 > /tmp/demo-runinstance - break -done - -echo $assigned -if [ 0 -eq "`cat /tmp/demo-runinstance`" ] ; then - echo "[INFO] address is full." -fi -rm -rf /tmp/demo-runinstance - - -# 6. FWの設定 -euca-authorize -P tcp -p 22 default 2> /dev/null > /dev/null -euca-authorize -P tcp -p 80 default 2> /dev/null > /dev/null -euca-authorize -P tcp -p 5555 default 2> /dev/null > /dev/null - diff --git a/nova/livemigration_test/SI/utils/nova-manage.conf b/nova/livemigration_test/SI/utils/nova-manage.conf deleted file mode 100644 index 9f8a02b96..000000000 --- a/nova/livemigration_test/SI/utils/nova-manage.conf +++ /dev/null @@ -1,18 +0,0 @@ ---verbose ---nodaemon ---dhcpbridge_flagfile=/etc/nova/nova-manage.conf ---FAKE_subdomain=ec2 ---libvirt_type=qemu ---no_internet_conn=True ---public_netif=eth0 ---public_interface=eth0 - ---cc-host=172.19.0.131 ---routing_source_ip=172.19.0.131 ---sql_connection=mysql://root:nova@172.19.0.131/nova ---rabbit_host=172.19.0.131 ---redis_host=172.19.0.131 ---s3_host=172.19.0.131 ---auth_driver=nova.auth.ldapdriver.LdapDriver ---ldap_url=ldap://172.19.0.131 - diff --git a/nova/livemigration_test/SI/utils/nova.conf b/nova/livemigration_test/SI/utils/nova.conf deleted file mode 100644 index c66bfbc53..000000000 --- a/nova/livemigration_test/SI/utils/nova.conf +++ /dev/null @@ -1,10 +0,0 @@ ---verbose ---nodaemon ---dhcpbridge_flagfile=/opt/nova-2010.4//bin/nova.conf ---network_manager=nova.network.manager.VlanManager ---cc_host=172.19.0.131 ---routing_source_ip=172.19.0.131 ---sql_connection=mysql://root:nova@localhost/nova ---auth_driver=nova.auth.ldapdriver.LdapDriver ---libvirt_type=qemu ---public_interface=eth0 diff --git a/nova/livemigration_test/SI/utils/nova.sh b/nova/livemigration_test/SI/utils/nova.sh deleted file mode 100755 index b8e2e9f26..000000000 --- a/nova/livemigration_test/SI/utils/nova.sh +++ /dev/null @@ -1,180 +0,0 @@ -#!/usr/bin/env bash -DIR=`pwd` -CMD=$1 -SOURCE_BRANCH=lp:nova -if [ -n "$2" ]; then - SOURCE_BRANCH=$2 -fi -#DIRNAME=nova -DIRNAME="" -NOVA_DIR=$DIR/$DIRNAME -if [ -n "$3" ]; then - NOVA_DIR=$DIR/$3 -fi - -if [ ! -n "$HOST_IP" ]; then - # NOTE(vish): This will just get the first ip in the list, so if you - # have more than one eth device set up, this will fail, and - # you should explicitly set HOST_IP in your environment - HOST_IP=`ifconfig | grep -m 1 'inet addr:'| cut -d: -f2 | awk '{print $1}'` -fi - -#USE_MYSQL=${USE_MYSQL:-0} -USE_MYSQL=1 -MYSQL_PASS=${MYSQL_PASS:-nova} -TEST=${TEST:-0} -#USE_LDAP=${USE_LDAP:-0} -USE_LDAP=1 -LIBVIRT_TYPE=${LIBVIRT_TYPE:-qemu} -NET_MAN=${NET_MAN:-VlanManager} -# NOTE(vish): If you are using FlatDHCP on multiple hosts, set the interface -# below but make sure that the interface doesn't already have an -# ip or you risk breaking things. -# FLAT_INTERFACE=eth0 - -if [ "$USE_MYSQL" == 1 ]; then - SQL_CONN=mysql://root:$MYSQL_PASS@localhost/nova -else - SQL_CONN=sqlite:///$NOVA_DIR/nova.sqlite -fi - -if [ "$USE_LDAP" == 1 ]; then - AUTH=ldapdriver.LdapDriver -else - AUTH=dbdriver.DbDriver -fi - -mkdir -p /etc/nova -cat >$NOVA_DIR/bin/nova.conf << NOVA_CONF_EOF ---verbose ---nodaemon ---dhcpbridge_flagfile=$NOVA_DIR/bin/nova.conf ---network_manager=nova.network.manager.$NET_MAN ---cc_host=$HOST_IP ---routing_source_ip=$HOST_IP ---sql_connection=$SQL_CONN ---auth_driver=nova.auth.$AUTH ---libvirt_type=$LIBVIRT_TYPE ---public_interface=eth0 -NOVA_CONF_EOF - -if [ -n "$FLAT_INTERFACE" ]; then - echo "--flat_interface=$FLAT_INTERFACE" >>$NOVA_DIR/bin/nova.conf -fi - -if [ "$CMD" == "branch" ]; then - sudo apt-get install -y bzr - rm -rf $NOVA_DIR - bzr branch $SOURCE_BRANCH $NOVA_DIR - cd $NOVA_DIR - mkdir -p $NOVA_DIR/instances - mkdir -p $NOVA_DIR/networks -fi - -# You should only have to run this once -if [ "$CMD" == "install" ]; then - sudo apt-get install -y python-software-properties - sudo add-apt-repository ppa:nova-core/ppa - sudo apt-get update - sudo apt-get install -y dnsmasq kpartx kvm gawk iptables ebtables - sudo apt-get install -y user-mode-linux kvm libvirt-bin - sudo apt-get install -y screen euca2ools vlan curl rabbitmq-server - sudo apt-get install -y lvm2 iscsitarget open-iscsi - echo "ISCSITARGET_ENABLE=true" | sudo tee /etc/default/iscsitarget - sudo /etc/init.d/iscsitarget restart - sudo modprobe kvm - sudo /etc/init.d/libvirt-bin restart - sudo apt-get install -y python-twisted python-sqlalchemy python-mox python-greenlet python-carrot - sudo apt-get install -y python-daemon python-eventlet python-gflags python-tornado python-ipy - sudo apt-get install -y python-libvirt python-libxml2 python-routes - if [ "$USE_MYSQL" == 1 ]; then - cat </etc/nova/nova-manage.conf << NOVA_CONF_EOF ---verbose ---nodaemon ---dhcpbridge_flagfile=/etc/nova/nova-manage.conf ---FAKE_subdomain=ec2 ---libvirt_type=qemu ---no_internet_conn=True ---public_netif=eth0 ---public_interface=eth0 - ---cc-host=$HOST_IP ---routing_source_ip=$HOST_IP ---sql_connection=mysql://root:nova@$HOST_IP/nova ---rabbit_host=$HOST_IP ---redis_host=$HOST_IP ---s3_host=$HOST_IP ---auth_driver=nova.auth.ldapdriver.LdapDriver ---ldap_url=ldap://$HOST_IP - -NOVA_CONF_EOF - -$DIR/bin/nova-compute --flagfile=/etc/nova/nova-manage.conf - diff --git a/nova/livemigration_test/UT/computeManager.test.py b/nova/livemigration_test/UT/computeManager.test.py deleted file mode 100644 index bc1091299..000000000 --- a/nova/livemigration_test/UT/computeManager.test.py +++ /dev/null @@ -1,821 +0,0 @@ -<<<<<<< TREE -#!/usr/bin/python -# -*- coding: UTF-8 -*- - -NOVA_DIR = '/opt/openstack/nova' -#NOVA_DIR = '/opt/nova-2010.4' - -import sys -import unittest -import commands -import re -import logging - -from mock import Mock -import twisted - -try: - print - print 'checking %s/bin/nova-manage exists, set the NOVA_DIR properly..' \ - % NOVA_DIR - print - - sys.path.append(NOVA_DIR) - - from nova.compute.manager import ComputeManager - from nova.virt.libvirt_conn import LibvirtConnection - - from nova import context - from nova import db - from nova import exception - from nova import flags - from nova import quota - from nova import utils - from nova.auth import manager - from nova.cloudpipe import pipelib - from nova import rpc - from nova.api.ec2 import cloud - from nova.compute import power_state - - from nova.db.sqlalchemy.models import * - - -except: - print 'set PYTHONPATH to nova-install-dir' - raise - - -class tmpStdout: - def __init__(self): - self.buffer = "" - - def write(self, arg): - self.buffer += arg - - def writelines(self, arg): - self.buffer += arg - - def flush(self): - print 'flush' - self.buffer = '' - - -class tmpStderr(tmpStdout): - def write(self, arg): - self.buffer += arg - - def flush(self): - pass - - def realFlush(self): - self.buffer = '' - -dummyCallReturnValue={ 0:True } -dummyCallCount=0 -def dummyCall(context, topic, method): - global dummyCallReturnValue, dummyCallCount - if dummyCallCount in dummyCallReturnValue.keys() : - ret = dummyCallReturnValue[ dummyCallCount ] - dummyCallCount += 1 - return ret - else : - dummyCallCount += 1 - return False - - -class ComputeTestFunctions(unittest.TestCase): - - stdout = None - stdoutBak = None - stderr = None - stderrBak = None - manager = None - - # 共通の初期化処理 - def setUp(self): - """common init method. """ - - #if self.stdout is None: - # self.__class__.stdout = tmpStdout() - #self.stdoutBak = sys.stdout - #sys.stdout = self.stdout - if self.stderr is None: - self.__class__.stderr = tmpStderr() - self.stderrBak = sys.stderr - sys.stderr = self.stderr - - self.host = 'openstack2-api' - if self.manager is None: - self.__class__.manager = ComputeManager(host=self.host) - - self.setTestData() - self.setMocks() - - def setTestData(self): - - self.host1 = Host() - for key, val in [('name', 'host1'), ('cpu', 5), - ('memory_mb', 20480), ('hdd_gb', 876)]: - self.host1.__setitem__(key, val) - - self.host2 = Host() - for key, val in [('name', 'host2'), ('cpu', 5), - ('memory_mb', 20480), ('hdd_gb', 876)]: - self.host2.__setitem__(key, val) - - self.instance1 = Instance() - for key, val in [('id', 1), ('host', 'host1'), - ('hostname', 'i-12345'), ('state', power_state.RUNNING), - ('project_id', 'testPJ'), ('vcpus', 3), ('memory_mb', 1024), - ('hdd_gb', 5), ('internal_id', 12345)]: - self.instance1.__setitem__(key, val) - - self.instance2 = Instance() - for key, val in [('id', 2), ('host', 'host1'), - ('hostname', 'i-12345'), ('state', power_state.RUNNING), - ('project_id', 'testPJ'), ('vcpus', 3), ('memory_mb', 1024), - ('hdd_gb', 5)]: - self.instance2.__setitem__(key, val) - - self.fixed_ip1 = FixedIp() - for key, val in [('id', 1), ('address', '1.1.1.1'), - ('network_id', '1'), ('instance_id', 1)]: - self.fixed_ip1.__setitem__(key, val) - - self.vol1 = Volume() - for key, val in [('id', 1), ('ec2_id', 'vol-qijjuc7e'), - ('availability_zone', 'nova'), ('host', 'host1')]: - self.vol1.__setitem__(key, val) - - self.vol2 = Volume() - for key, val in [('id', 2), ('ec2_id', 'vol-qi22222'), - ('availability_zone', 'nova'), ('host', 'host1')]: - self.vol2.__setitem__(key, val) - - self.secgrp1 = Volume() - for key, val in [('id', 1), ('ec2_id', 'default')]: - self.secgrp1.__setitem__(key, val) - - self.secgrp2 = Volume() - for key, val in [('id', 2), ('ec2_id', 'def2')]: - self.secgrp2.__setitem__(key, val) - - self.netref1 = Network() - - def setMocks(self): - - # mocks for pre_live_migration - self.ctxt = context.get_admin_context() - db.instance_get = Mock(return_value=self.instance1) - db.volume_get_by_ec2_id = Mock(return_value=[self.vol1, self.vol2]) - db.volume_get_shelf_and_blade = Mock(return_value=(3, 4)) - db.instance_get_fixed_address = Mock(return_value=self.fixed_ip1) - db.security_group_get_by_instance \ - = Mock(return_value=[self.secgrp1, self.secgrp2]) - self.manager.driver.setup_nwfilters_for_instance \ - = Mock(return_value=None) - self.manager.driver.nwfilter_for_instance_exists = Mock(return_value=None) - self.manager.network_manager.setup_compute_network \ - = Mock(return_value=None) - # mocks for live_migration_ - rpc.call = Mock(return_value=True) - db.instance_set_state = Mock(return_value=True) - - # ---> test for nova.compute.manager.pre_live_migration() - def test01(self): - """01: NotFound error occurs on finding instance on DB. """ - - db.instance_get = Mock(side_effect=exception.NotFound('ERR')) - - self.assertRaises(exception.NotFound, - self.manager.pre_live_migration, - self.ctxt, - 'dummy_ec2_id', - 'host2') - - def test02(self): - """02: NotAuthrized occurs on finding volume on DB. """ - - db.volume_get_by_ec2_id \ - = Mock(side_effect=exception.NotAuthorized('ERR')) - - self.assertRaises(exception.NotAuthorized, - self.manager.pre_live_migration, - self.ctxt, - 'dummy_ec2_id', - 'host2') - - def test03(self): - """03: Unexpected exception occurs on finding volume on DB. """ - - db.volume_get_by_ec2_id = Mock(side_effect=TypeError('ERR')) - - self.assertRaises(TypeError, - self.manager.pre_live_migration, - self.ctxt, - 'dummy_ec2_id', - 'host2') - - def test04(self): - """04: no volume and fixed ip found on DB, """ - - db.volume_get_by_ec2_id = Mock(side_effect=exception.NotFound('ERR')) - db.instance_get_fixed_address = Mock(return_value=None) - - self.assertRaises(rpc.RemoteError, - self.manager.pre_live_migration, - self.ctxt, - 'dummy_ec2_id', - 'host2') - - c1 = (0 <= sys.stderr.buffer.find('has no volume')) - - self.assertEqual(c1, True) - - def test05(self): - """05: volume found and no fixed_ip found on DB. """ - - db.instance_get_fixed_address \ - = Mock(side_effect=exception.NotFound('ERR')) - - self.assertRaises(exception.NotFound, - self.manager.pre_live_migration, - self.ctxt, - 'dummy_ec2_id', - 'host2') - - def test06(self): - """06: self.driver.setup_nwfilters_for_instance causes NotFound. """ - self.manager.driver.setup_nwfilters_for_instance \ - = Mock(side_effect=exception.NotFound("ERR")) - - self.assertRaises(exception.NotFound, - self.manager.pre_live_migration, - self.ctxt, - 'dummy_ec2_id', - 'host2') - - def test07(self): - """07: self.network_manager.setup_compute_network causes ProcessExecutionError. """ - self.manager.network_manager.setup_compute_network \ - = Mock(side_effect=exception.ProcessExecutionError("ERR")) - - self.assertRaises(exception.ProcessExecutionError, - self.manager.pre_live_migration, - self.ctxt, - 'dummy_ec2_id', - 'host2') - - - def test08(self): - """08: self.manager.network_manager.setup_compute_network - exception.NotFound. """ - self.manager.network_manager.setup_compute_network \ - = Mock(side_effect=exception.NotFound("ERR")) - - self.assertRaises(exception.NotFound, - self.manager.pre_live_migration, - self.ctxt, - 'dummy_ec2_id', - 'host2') - - # those 2 cases are omitted : - # self.driver.setup_nwfilters_for_instance causes - # twisted.python.failure.Failure. - # self.driver.refresh_security_group causes twisted.python.failure.Failure. - # - # twisted.python.failure.Failure can not be used with assertRaises, - # it doesnt have __call___ - # - - def test09(self): - """09: volume/fixed_ip found on DB, all procedure finish - successfully.. """ - - result = self.manager.pre_live_migration(self.ctxt, 'dummy_ec2_id', - 'host2') - self.assertEqual(result, True) - - # ---> test for nova.compute.manager.live_migration() - - def test10(self): - """10: rpc.call(pre_live_migration returns Error(Not None). """ - rpc.call = Mock(side_effect=exception.NotFound("ERR")) - - self.assertRaises(exception.NotFound, - self.manager.live_migration, - self.ctxt, - 'dummy_ec2_id', - 'host2') - - def test11(self): - """11: if rpc.call returns rpc.RemoteError. """ - - rpc.call = Mock(return_value=rpc.RemoteError(None, None, None)) - db.instance_set_state = Mock(return_value=True) - result = self.manager.live_migration(self.ctxt, 'dummy_ec2_id', - 'host2') - c1 = (None == result) - c2 = (0 <= sys.stderr.buffer.find('err at')) - self.assertEqual(c1 and c2, True) - - def test12(self): - """12: if rpc.call returns rpc.RemoteError and instance_set_state - also ends up err. (then , unexpected err occurs, in this case - TypeError) - """ - rpc.call = Mock(return_value=rpc.RemoteError(None, None, None)) - db.instance_set_state = Mock(side_effect=TypeError("ERR")) - self.assertRaises(TypeError, - self.manager.live_migration, - self.ctxt, - 'dummy_ec2_id', - 'host2') - - def test13(self): - """13: if wait for pre_live_migration, but timeout. """ - rpc.call = dummyCall - - db.instance_get = Mock(return_value=self.instance1) - - result = self.manager.live_migration(self.ctxt, 'dummy_ec2_id', - 'host2') - c1 = (None == result) - c2 = (0 <= sys.stderr.buffer.find('Timeout for')) - self.assertEqual(c1 and c2, True) - - def test14(self): - """14: if db_instance_get issues NotFound. - """ - rpc.call = Mock(return_value=True) - db.instance_get = Mock(side_effect=exception.NotFound("ERR")) - self.assertRaises(exception.NotFound, - self.manager.live_migration, - self.ctxt, - 'dummy_ec2_id', - 'host2') - - def test15(self): - """15: if rpc.call returns True, and instance_get() cause other - exception. (Unexpected case - b/c it already checked by - nova-manage) - """ - rpc.call = Mock(return_value=True) - db.instance_get = Mock(side_effect=TypeError("ERR")) - - self.assertRaises(TypeError, - self.manager.live_migration, - self.ctxt, - 'dummy_ec2_id', - 'host2') - - def test16(self): - """16: if rpc.call returns True, and live_migration issues - ProcessExecutionError. """ - rpc.call = Mock(return_value=True) - db.instance_get = Mock(return_value=self.instance1) - ret = self.manager.driver.live_migration \ - = Mock(side_effect=utils.ProcessExecutionError("ERR")) - - self.assertRaises(utils.ProcessExecutionError, - self.manager.live_migration, - self.ctxt, - 'dummy_ec2_id', - 'host2') - - def test17(self): - """17: everything goes well. """ - self.manager.driver.live_migration = Mock(return_value=True) - ret = self.manager.live_migration(self.ctxt, 'i-12345', 'host1') - self.assertEqual(True, True) - - def tearDown(self): - """common terminating method. """ - self.stderr.realFlush() - sys.stderr = self.stderrBak - #sys.stdout = self.stdoutBak - -if __name__ == '__main__': - logging.getLogger().setLevel(logging.DEBUG) - #unittest.main() - - suite = unittest.TestLoader().loadTestsFromTestCase(ComputeTestFunctions) - unittest.TextTestRunner(verbosity=2).run(suite) - - #suite = unittest.TestSuite() - #suite.addTest(ComputeTestFunctions("test15")) - #suite.addTest(ComputeTestFunctions("test16")) - #unittest.TextTestRunner(verbosity=2).run(suite) -======= -#!/usr/bin/python -# -*- coding: UTF-8 -*- - - -import sys -import os -import unittest -import commands -import re -import logging - -from mock import Mock -import twisted - -# getting /nova-inst-dir -NOVA_DIR = os.path.abspath(sys.argv[0]) -for i in range(4): - NOVA_DIR = os.path.dirname(NOVA_DIR) - -try: - print - print 'checking %s/bin/nova-manage exists, set the NOVA_DIR properly..' \ - % NOVA_DIR - print - - sys.path.append(NOVA_DIR) - - from nova.compute.manager import ComputeManager - from nova.virt.libvirt_conn import LibvirtConnection - - from nova import context - from nova import db - from nova import exception - from nova import flags - from nova import quota - from nova import utils - from nova.auth import manager - from nova.cloudpipe import pipelib - from nova import rpc - from nova.api.ec2 import cloud - from nova.compute import power_state - - from nova.db.sqlalchemy.models import * - - -except: - print 'set correct NOVA_DIR in this script. ' - raise - - -class tmpStdout: - def __init__(self): - self.buffer = "" - - def write(self, arg): - self.buffer += arg - - def writelines(self, arg): - self.buffer += arg - - def flush(self): - print 'flush' - self.buffer = '' - - -class tmpStderr(tmpStdout): - def write(self, arg): - self.buffer += arg - - def flush(self): - pass - - def realFlush(self): - self.buffer = '' - -dummyCallReturnValue={ 0:True } -dummyCallCount=0 -def dummyCall(context, topic, method): - global dummyCallReturnValue, dummyCallCount - if dummyCallCount in dummyCallReturnValue.keys() : - ret = dummyCallReturnValue[ dummyCallCount ] - dummyCallCount += 1 - return ret - else : - dummyCallCount += 1 - return False - - -class ComputeTestFunctions(unittest.TestCase): - - stdout = None - stdoutBak = None - stderr = None - stderrBak = None - manager = None - - # 共通の初期化処理 - def setUp(self): - """common init method. """ - - #if self.stdout is None: - # self.__class__.stdout = tmpStdout() - #self.stdoutBak = sys.stdout - #sys.stdout = self.stdout - if self.stderr is None: - self.__class__.stderr = tmpStderr() - self.stderrBak = sys.stderr - sys.stderr = self.stderr - - self.host = 'openstack2-api' - if self.manager is None: - self.__class__.manager = ComputeManager(host=self.host) - - self.setTestData() - self.setMocks() - - def setTestData(self): - - self.host1 = Host() - for key, val in [('name', 'host1'), ('cpu', 5), - ('memory_mb', 20480), ('hdd_gb', 876)]: - self.host1.__setitem__(key, val) - - self.host2 = Host() - for key, val in [('name', 'host2'), ('cpu', 5), - ('memory_mb', 20480), ('hdd_gb', 876)]: - self.host2.__setitem__(key, val) - - self.instance1 = Instance() - for key, val in [('id', 1), ('host', 'host1'), - ('hostname', 'i-12345'), ('state', power_state.RUNNING), - ('project_id', 'testPJ'), ('vcpus', 3), ('memory_mb', 1024), - ('hdd_gb', 5), ('internal_id', 12345)]: - self.instance1.__setitem__(key, val) - - self.instance2 = Instance() - for key, val in [('id', 2), ('host', 'host1'), - ('hostname', 'i-12345'), ('state', power_state.RUNNING), - ('project_id', 'testPJ'), ('vcpus', 3), ('memory_mb', 1024), - ('hdd_gb', 5)]: - self.instance2.__setitem__(key, val) - - self.fixed_ip1 = FixedIp() - for key, val in [('id', 1), ('address', '1.1.1.1'), - ('network_id', '1'), ('instance_id', 1)]: - self.fixed_ip1.__setitem__(key, val) - - self.vol1 = Volume() - for key, val in [('id', 1), ('ec2_id', 'vol-qijjuc7e'), - ('availability_zone', 'nova'), ('host', 'host1')]: - self.vol1.__setitem__(key, val) - - self.vol2 = Volume() - for key, val in [('id', 2), ('ec2_id', 'vol-qi22222'), - ('availability_zone', 'nova'), ('host', 'host1')]: - self.vol2.__setitem__(key, val) - - self.secgrp1 = Volume() - for key, val in [('id', 1), ('ec2_id', 'default')]: - self.secgrp1.__setitem__(key, val) - - self.secgrp2 = Volume() - for key, val in [('id', 2), ('ec2_id', 'def2')]: - self.secgrp2.__setitem__(key, val) - - self.netref1 = Network() - - def setMocks(self): - - # mocks for pre_live_migration - self.ctxt = context.get_admin_context() - db.instance_get = Mock(return_value=self.instance1) - db.volume_get_by_ec2_id = Mock(return_value=[self.vol1, self.vol2]) - db.volume_get_shelf_and_blade = Mock(return_value=(3, 4)) - db.instance_get_fixed_address = Mock(return_value=self.fixed_ip1) - db.security_group_get_by_instance \ - = Mock(return_value=[self.secgrp1, self.secgrp2]) - self.manager.driver.setup_nwfilters_for_instance \ - = Mock(return_value=None) - self.manager.driver.nwfilter_for_instance_exists = Mock(return_value=None) - self.manager.network_manager.setup_compute_network \ - = Mock(return_value=None) - # mocks for live_migration_ - rpc.call = Mock(return_value=True) - db.instance_set_state = Mock(return_value=True) - - # ---> test for nova.compute.manager.pre_live_migration() - def test01(self): - """01: NotFound error occurs on finding instance on DB. """ - - db.instance_get = Mock(side_effect=exception.NotFound('ERR')) - - self.assertRaises(exception.NotFound, - self.manager.pre_live_migration, - self.ctxt, - 'dummy_ec2_id', - 'host2') - - def test02(self): - """02: NotAuthrized occurs on finding volume on DB. """ - - db.volume_get_by_ec2_id \ - = Mock(side_effect=exception.NotAuthorized('ERR')) - - self.assertRaises(exception.NotAuthorized, - self.manager.pre_live_migration, - self.ctxt, - 'dummy_ec2_id', - 'host2') - - def test03(self): - """03: Unexpected exception occurs on finding volume on DB. """ - - db.volume_get_by_ec2_id = Mock(side_effect=TypeError('ERR')) - - self.assertRaises(TypeError, - self.manager.pre_live_migration, - self.ctxt, - 'dummy_ec2_id', - 'host2') - - def test04(self): - """04: no volume and fixed ip found on DB, """ - - db.volume_get_by_ec2_id = Mock(side_effect=exception.NotFound('ERR')) - db.instance_get_fixed_address = Mock(return_value=None) - - self.assertRaises(rpc.RemoteError, - self.manager.pre_live_migration, - self.ctxt, - 'dummy_ec2_id', - 'host2') - - c1 = (0 <= sys.stderr.buffer.find('has no volume')) - - self.assertEqual(c1, True) - - def test05(self): - """05: volume found and no fixed_ip found on DB. """ - - db.instance_get_fixed_address \ - = Mock(side_effect=exception.NotFound('ERR')) - - self.assertRaises(exception.NotFound, - self.manager.pre_live_migration, - self.ctxt, - 'dummy_ec2_id', - 'host2') - - def test06(self): - """06: self.driver.setup_nwfilters_for_instance causes NotFound. """ - self.manager.driver.setup_nwfilters_for_instance \ - = Mock(side_effect=exception.NotFound("ERR")) - - self.assertRaises(exception.NotFound, - self.manager.pre_live_migration, - self.ctxt, - 'dummy_ec2_id', - 'host2') - - def test07(self): - """07: self.network_manager.setup_compute_network causes ProcessExecutionError. """ - self.manager.network_manager.setup_compute_network \ - = Mock(side_effect=exception.ProcessExecutionError("ERR")) - - self.assertRaises(exception.ProcessExecutionError, - self.manager.pre_live_migration, - self.ctxt, - 'dummy_ec2_id', - 'host2') - - - def test08(self): - """08: self.manager.network_manager.setup_compute_network - exception.NotFound. """ - self.manager.network_manager.setup_compute_network \ - = Mock(side_effect=exception.NotFound("ERR")) - - self.assertRaises(exception.NotFound, - self.manager.pre_live_migration, - self.ctxt, - 'dummy_ec2_id', - 'host2') - - # those 2 cases are omitted : - # self.driver.setup_nwfilters_for_instance causes - # twisted.python.failure.Failure. - # self.driver.refresh_security_group causes twisted.python.failure.Failure. - # - # twisted.python.failure.Failure can not be used with assertRaises, - # it doesnt have __call___ - # - - def test09(self): - """09: volume/fixed_ip found on DB, all procedure finish - successfully.. """ - - result = self.manager.pre_live_migration(self.ctxt, 'dummy_ec2_id', - 'host2') - self.assertEqual(result, True) - - # ---> test for nova.compute.manager.live_migration() - - def test10(self): - """10: rpc.call(pre_live_migration returns Error(Not None). """ - rpc.call = Mock(side_effect=exception.NotFound("ERR")) - - self.assertRaises(exception.NotFound, - self.manager.live_migration, - self.ctxt, - 'dummy_ec2_id', - 'host2') - - def test11(self): - """11: if rpc.call returns rpc.RemoteError. """ - - rpc.call = Mock(return_value=rpc.RemoteError(None, None, None)) - db.instance_set_state = Mock(return_value=True) - result = self.manager.live_migration(self.ctxt, 'dummy_ec2_id', - 'host2') - c1 = (None == result) - c2 = (0 <= sys.stderr.buffer.find('err at')) - self.assertEqual(c1 and c2, True) - - def test12(self): - """12: if rpc.call returns rpc.RemoteError and instance_set_state - also ends up err. (then , unexpected err occurs, in this case - TypeError) - """ - rpc.call = Mock(return_value=rpc.RemoteError(None, None, None)) - db.instance_set_state = Mock(side_effect=TypeError("ERR")) - self.assertRaises(TypeError, - self.manager.live_migration, - self.ctxt, - 'dummy_ec2_id', - 'host2') - - def test13(self): - """13: if wait for pre_live_migration, but timeout. """ - rpc.call = dummyCall - - db.instance_get = Mock(return_value=self.instance1) - - result = self.manager.live_migration(self.ctxt, 'dummy_ec2_id', - 'host2') - c1 = (None == result) - c2 = (0 <= sys.stderr.buffer.find('Timeout for')) - self.assertEqual(c1 and c2, True) - - def test14(self): - """14: if db_instance_get issues NotFound. - """ - rpc.call = Mock(return_value=True) - db.instance_get = Mock(side_effect=exception.NotFound("ERR")) - self.assertRaises(exception.NotFound, - self.manager.live_migration, - self.ctxt, - 'dummy_ec2_id', - 'host2') - - def test15(self): - """15: if rpc.call returns True, and instance_get() cause other - exception. (Unexpected case - b/c it already checked by - nova-manage) - """ - rpc.call = Mock(return_value=True) - db.instance_get = Mock(side_effect=TypeError("ERR")) - - self.assertRaises(TypeError, - self.manager.live_migration, - self.ctxt, - 'dummy_ec2_id', - 'host2') - - def test16(self): - """16: if rpc.call returns True, and live_migration issues - ProcessExecutionError. """ - rpc.call = Mock(return_value=True) - db.instance_get = Mock(return_value=self.instance1) - ret = self.manager.driver.live_migration \ - = Mock(side_effect=utils.ProcessExecutionError("ERR")) - - self.assertRaises(utils.ProcessExecutionError, - self.manager.live_migration, - self.ctxt, - 'dummy_ec2_id', - 'host2') - - def test17(self): - """17: everything goes well. """ - self.manager.driver.live_migration = Mock(return_value=True) - ret = self.manager.live_migration(self.ctxt, 'i-12345', 'host1') - self.assertEqual(True, True) - - def tearDown(self): - """common terminating method. """ - self.stderr.realFlush() - sys.stderr = self.stderrBak - #sys.stdout = self.stdoutBak - -if __name__ == '__main__': - logging.getLogger().setLevel(logging.DEBUG) - #unittest.main() - - suite = unittest.TestLoader().loadTestsFromTestCase(ComputeTestFunctions) - unittest.TextTestRunner(verbosity=2).run(suite) - - #suite = unittest.TestSuite() - #suite.addTest(ComputeTestFunctions("test15")) - #suite.addTest(ComputeTestFunctions("test16")) - #unittest.TextTestRunner(verbosity=2).run(suite) ->>>>>>> MERGE-SOURCE diff --git a/nova/livemigration_test/UT/libvirtConnection.test.py b/nova/livemigration_test/UT/libvirtConnection.test.py deleted file mode 100644 index 093d19ea3..000000000 --- a/nova/livemigration_test/UT/libvirtConnection.test.py +++ /dev/null @@ -1,741 +0,0 @@ -<<<<<<< TREE -#!/usr/bin/python -# -*- coding: UTF-8 -*- - -NOVA_DIR='/opt/nova-2010.4' - -import sys -import unittest -import commands -import re -import logging -import libvirt - -from mock import Mock -import twisted - -try : - print - print 'checking %s/bin/nova-manage exists, set the NOVA_DIR properly..' % NOVA_DIR - print - - sys.path.append(NOVA_DIR) - - from nova.compute.manager import ComputeManager - from nova.virt import libvirt_conn - - from nova import context - from nova import db - from nova import exception - from nova import flags - from nova import quota - from nova import utils - from nova import process - from nova.auth import manager - from nova.cloudpipe import pipelib - from nova import rpc - from nova.api.ec2 import cloud - from nova.compute import power_state - - from nova.db.sqlalchemy.models import * - - -except: - print 'set PYTHONPATH to nova-install-dir' - raise - - -class tmpStdout: - def __init__(self): - self.buffer = "" - def write(self,arg): - self.buffer += arg - def writelines(self, arg): - self.buffer += arg - def flush(self): - print 'flush' - self.buffer = '' - -class tmpStderr(tmpStdout): - def write(self,arg): - self.buffer += arg - def flush(self): - pass - def realFlush(self): - self.buffer = '' - -class DummyLibvirtConn(object): - nwfilterLookupByName = None - def __init__(self): - pass - - -class LibvirtConnectionTestFunctions(unittest.TestCase): - - stdout = None - stdoutBak = None - stderr = None - stderrBak = None - manager = None - - # 共通の初期化処理 - def setUp(self): - """common init method. """ - - #if self.stdout is None: - # self.__class__.stdout = tmpStdout() - #self.stdoutBak = sys.stdout - #sys.stdout = self.stdout - if self.stderr is None: - self.__class__.stderr = tmpStderr() - self.stderrBak = sys.stderr - sys.stderr = self.stderr - - self.host = 'openstack2-api' - if self.manager is None: - self.__class__.manager = libvirt_conn.get_connection(False) - - self.setTestData() - self.setMocks() - - def setTestData(self): - - self.host1 = Host() - for key, val in [ ('name', 'host1'), ('cpu', 5), ('memory_mb', 20480), ('hdd_gb', 876) ]: - self.host1.__setitem__(key, val) - - self.instance1 = Instance() - for key, val in [ ('id', 1), ('host', 'host1'), ('hostname', 'i-12345'), - ('state', power_state.RUNNING), ('project_id', 'testPJ'), - ('vcpus', 3), ('memory_mb', 1024), ('hdd_gb', 5), ('internal_id',12345) ]: - self.instance1.__setitem__(key, val) - - - self.instance2 = Instance() - for key, val in [ ('id', 2), ('host', 'host1'), ('hostname', 'i-12345'), - ('state', power_state.RUNNING), ('project_id', 'testPJ'), - ('vcpus', 3), ('memory_mb', 1024), ('hdd_gb', 5) ]: - self.instance2.__setitem__(key, val) - - - self.fixed_ip1 = FixedIp() - for key, val in [ ('id', 1), ('address', '1.1.1.1'), ('network_id', '1'), - ('instance_id', 1)]: - self.fixed_ip1.__setitem__(key, val) - - self.floating_ip1 = FloatingIp() - for key, val in [ ('id', 1), ('address', '1.1.1.200') ]: - self.floating_ip1.__setitem__(key, val) - - self.netref1 = Network() - for key, val in [ ('id', 1) ]: - self.netref1.__setitem__(key, val) - - - def setMocks(self): - - self.ctxt = context.get_admin_context() - db.instance_get_fixed_address = Mock(return_value = '1.1.1.1') - db.fixed_ip_update = Mock(return_value = None) - db.fixed_ip_get_network = Mock(return_value = self.netref1) - db.network_update = Mock(return_value = None) - db.instance_get_floating_address = Mock(return_value = '1.1.1.200') - db.floating_ip_get_by_address = Mock(return_value = self.floating_ip1) - db.floating_ip_update = Mock(return_value = None) - db.instance_update = Mock(return_value = None) - - - # ---> test for nova.virt.libvirt_conn.nwfilter_for_instance_exists() - - def test01(self): - """01: libvirt.libvirtError occurs. """ - - self.manager._wrapped_conn = DummyLibvirtConn() - self.manager._test_connection = Mock(return_value=True) - self.manager._conn.nwfilterLookupByName = \ - Mock(side_effect=libvirt.libvirtError("ERR")) - ret = self.manager.nwfilter_for_instance_exists(self.instance1) - self.assertEqual(ret, False) - - def test02(self): - """02: libvirt.libvirtError not occurs. """ - - self.manager._wrapped_conn = DummyLibvirtConn() - self.manager._test_connection = Mock(return_value=True) - self.manager._conn.nwfilterLookupByName = \ - Mock(return_value=True) - ret = self.manager.nwfilter_for_instance_exists(self.instance1) - self.assertEqual(ret, True) - - # ---> test for nova.virt.libvirt_conn.live_migraiton() - - def test03(self): - """03: Unexpected exception occurs on finding volume on DB. """ - - utils.execute = Mock( side_effect=process.ProcessExecutionError('ERR') ) - - self.assertRaises(process.ProcessExecutionError, - self.manager.live_migration, - self.instance1, - 'host2') - - # ---> other case cannot be tested because live_migraiton - # is synchronized/asynchronized method are mixed together - - - # ---> test for nova.virt.libvirt_conn._post_live_migraiton - - def test04(self): - """04: instance_ref is not nova.db.sqlalchemy.models.Instances""" - - self.assertRaises(TypeError, - self.manager._post_live_migration, - "dummy string", - 'host2') - - def test05(self): - """05: db.instance_get_fixed_address return None""" - - db.instance_get_fixed_address = Mock( return_value=None ) - ret = self.manager._post_live_migration(self.instance1, 'host1') - c1 = (ret == None) - c2 = (0 <= sys.stderr.buffer.find('fixed_ip is not found')) - self.assertEqual(c1 and c2, True) - - def test06(self): - """06: db.instance_get_fixed_address raises NotFound""" - - db.instance_get_fixed_address = Mock( side_effect=exception.NotFound('ERR') ) - self.assertRaises(exception.NotFound, - self.manager._post_live_migration, - self.instance1, - 'host2') - - def test07(self): - """07: db.instance_get_fixed_address raises Unknown exception""" - - db.instance_get_fixed_address = Mock( side_effect=TypeError('ERR') ) - self.assertRaises(TypeError, - self.manager._post_live_migration, - self.instance1, - 'host1') - - def test08(self): - """08: db.fixed_ip_update return NotFound. """ - - db.fixed_ip_update = Mock( side_effect=exception.NotFound('ERR') ) - self.assertRaises(exception.NotFound, - self.manager._post_live_migration, - self.instance1, - 'host1') - - def test09(self): - """09: db.fixed_ip_update return NotAuthorized. """ - db.fixed_ip_update = Mock( side_effect=exception.NotAuthorized('ERR') ) - self.assertRaises(exception.NotAuthorized, - self.manager._post_live_migration, - self.instance1, - 'host1') - - def test10(self): - """10: db.fixed_ip_update return Unknown exception. """ - db.fixed_ip_update = Mock( side_effect=TypeError('ERR') ) - self.assertRaises(TypeError, - self.manager._post_live_migration, - self.instance1, - 'host1') - - def test11(self): - """11: db.fixed_ip_get_network causes NotFound. """ - - db.fixed_ip_get_network = Mock( side_effect=exception.NotFound('ERR') ) - self.assertRaises(exception.NotFound, - self.manager._post_live_migration, - self.instance1, - 'host1') - - # not tested db.fixed_ip_get_network raises NotAuthorized - # because same test has been done at previous test. - - def test12(self): - """12: db.fixed_ip_get_network causes Unknown exception. """ - - db.fixed_ip_get_network = Mock( side_effect=TypeError('ERR') ) - self.assertRaises(TypeError, - self.manager._post_live_migration, - self.instance1, - 'host1') - - def test13(self): - """13: db.network_update raises Unknown exception. """ - db.network_update = Mock( side_effect=TypeError('ERR') ) - self.assertRaises(TypeError, - self.manager._post_live_migration, - self.instance1, - 'host1') - - def test14(self): - """14: db.instance_get_floating_address raises NotFound. """ - db.instance_get_floating_address = Mock(side_effect=exception.NotFound("ERR")) - ret = self.manager._post_live_migration(self.instance1, 'host1') - c1 = (ret == None) - c2 = (0 <= sys.stderr.buffer.find('doesnt have floating_ip')) - self.assertEqual(c1 and c2, True) - - - def test15(self): - """15: db.instance_get_floating_address returns None. """ - - db.instance_get_floating_address = Mock( return_value=None ) - ret = self.manager._post_live_migration(self.instance1, 'host1') - c1 = (ret == None) - c2 = (0 <= sys.stderr.buffer.find('floating_ip is not found')) - self.assertEqual(c1 and c2, True) - - def test16(self): - """16: db.instance_get_floating_address raises NotFound. """ - - db.instance_get_floating_address = Mock(side_effect=exception.NotFound("ERR")) - ret = self.manager._post_live_migration(self.instance1, 'host1') - c1 = (ret == None) - c2 = (0 <= sys.stderr.buffer.find('doesnt have floating_ip')) - self.assertEqual(c1 and c2, True) - - def test17(self): - """17: db.instance_get_floating_address raises Unknown exception. """ - db.instance_get_floating_address = Mock(side_effect=TypeError("ERR")) - ret = self.manager._post_live_migration(self.instance1, 'host1') - c1 = (ret == None) - c2 = (0 <= sys.stderr.buffer.find('Live migration: Unexpected error')) - self.assertEqual(c1 and c2, True) - - - def test18(self): - """18: db.floating_ip_get_by_address raises NotFound """ - - db.floating_ip_get_by_address = Mock(side_effect=exception.NotFound("ERR")) - ret = self.manager._post_live_migration(self.instance1, 'host1') - c1 = (ret == None) - c2 = (0 <= sys.stderr.buffer.find('doesnt have floating_ip')) - self.assertEqual(c1 and c2, True) - - def test19(self): - """19: db.floating_ip_get_by_address raises Unknown exception. """ - db.floating_ip_get_by_address = Mock(side_effect=TypeError("ERR")) - ret = self.manager._post_live_migration(self.instance1, 'host1') - c1 = (ret == None) - c2 = (0 <= sys.stderr.buffer.find('Live migration: Unexpected error')) - self.assertEqual(c1 and c2, True) - - - def test20(self): - """20: db.floating_ip_update raises Unknown exception. - """ - db.floating_ip_update = Mock(side_effect=TypeError("ERR")) - ret = self.manager._post_live_migration(self.instance1, 'host1') - c1 = (ret == None) - c2 = (0 <= sys.stderr.buffer.find('Live migration: Unexpected error')) - self.assertEqual(c1 and c2, True) - - def test21(self): - """21: db.instance_update raises unknown exception. """ - - db.instance_update = Mock(side_effect=TypeError("ERR")) - self.assertRaises(TypeError, - self.manager._post_live_migration, - self.instance1, - 'host1') - - def tearDown(self): - """common terminating method. """ - self.stderr.realFlush() - sys.stderr = self.stderrBak - #sys.stdout = self.stdoutBak - -if __name__ == '__main__': - logging.getLogger().setLevel(logging.DEBUG) - #unittest.main() - - suite = unittest.TestLoader().loadTestsFromTestCase(LibvirtConnectionTestFunctions) - unittest.TextTestRunner(verbosity=2).run(suite) - - #suite = unittest.TestSuite() - #suite.addTest(LibvirtConnectionTestFunctions("test14")) - #suite.addTest(LibvirtConnectionTestFunctions("test16")) - #unittest.TextTestRunner(verbosity=2).run(suite) - - -======= -#!/usr/bin/python -# -*- coding: UTF-8 -*- - - -import sys -import os -import unittest -import commands -import re -import logging -import libvirt - -from mock import Mock -import twisted - -# getting /nova-inst-dir -NOVA_DIR = os.path.abspath(sys.argv[0]) -for i in range(4): - NOVA_DIR = os.path.dirname(NOVA_DIR) - - -try : - print - print 'checking %s/bin/nova-manage exists, set the NOVA_DIR properly..' % NOVA_DIR - print - - sys.path.append(NOVA_DIR) - - from nova.compute.manager import ComputeManager - from nova.virt import libvirt_conn - - from nova import context - from nova import db - from nova import exception - from nova import flags - from nova import quota - from nova import utils - from nova import process - from nova.auth import manager - from nova.cloudpipe import pipelib - from nova import rpc - from nova.api.ec2 import cloud - from nova.compute import power_state - - from nova.db.sqlalchemy.models import * - - -except: - print 'set correct NOVA_DIR in this script. ' - raise - - -class tmpStdout: - def __init__(self): - self.buffer = "" - def write(self,arg): - self.buffer += arg - def writelines(self, arg): - self.buffer += arg - def flush(self): - print 'flush' - self.buffer = '' - -class tmpStderr(tmpStdout): - def write(self,arg): - self.buffer += arg - def flush(self): - pass - def realFlush(self): - self.buffer = '' - -class DummyLibvirtConn(object): - nwfilterLookupByName = None - def __init__(self): - pass - - -class LibvirtConnectionTestFunctions(unittest.TestCase): - - stdout = None - stdoutBak = None - stderr = None - stderrBak = None - manager = None - - # 共通の初期化処理 - def setUp(self): - """common init method. """ - - #if self.stdout is None: - # self.__class__.stdout = tmpStdout() - #self.stdoutBak = sys.stdout - #sys.stdout = self.stdout - if self.stderr is None: - self.__class__.stderr = tmpStderr() - self.stderrBak = sys.stderr - sys.stderr = self.stderr - - self.host = 'openstack2-api' - if self.manager is None: - self.__class__.manager = libvirt_conn.get_connection(False) - - self.setTestData() - self.setMocks() - - def setTestData(self): - - self.host1 = Host() - for key, val in [ ('name', 'host1'), ('cpu', 5), ('memory_mb', 20480), ('hdd_gb', 876) ]: - self.host1.__setitem__(key, val) - - self.instance1 = Instance() - for key, val in [ ('id', 1), ('host', 'host1'), ('hostname', 'i-12345'), - ('state', power_state.RUNNING), ('project_id', 'testPJ'), - ('vcpus', 3), ('memory_mb', 1024), ('hdd_gb', 5), ('internal_id',12345) ]: - self.instance1.__setitem__(key, val) - - - self.instance2 = Instance() - for key, val in [ ('id', 2), ('host', 'host1'), ('hostname', 'i-12345'), - ('state', power_state.RUNNING), ('project_id', 'testPJ'), - ('vcpus', 3), ('memory_mb', 1024), ('hdd_gb', 5) ]: - self.instance2.__setitem__(key, val) - - - self.fixed_ip1 = FixedIp() - for key, val in [ ('id', 1), ('address', '1.1.1.1'), ('network_id', '1'), - ('instance_id', 1)]: - self.fixed_ip1.__setitem__(key, val) - - self.floating_ip1 = FloatingIp() - for key, val in [ ('id', 1), ('address', '1.1.1.200') ]: - self.floating_ip1.__setitem__(key, val) - - self.netref1 = Network() - for key, val in [ ('id', 1) ]: - self.netref1.__setitem__(key, val) - - - def setMocks(self): - - self.ctxt = context.get_admin_context() - db.instance_get_fixed_address = Mock(return_value = '1.1.1.1') - db.fixed_ip_update = Mock(return_value = None) - db.fixed_ip_get_network = Mock(return_value = self.netref1) - db.network_update = Mock(return_value = None) - db.instance_get_floating_address = Mock(return_value = '1.1.1.200') - db.floating_ip_get_by_address = Mock(return_value = self.floating_ip1) - db.floating_ip_update = Mock(return_value = None) - db.instance_update = Mock(return_value = None) - - - # ---> test for nova.virt.libvirt_conn.nwfilter_for_instance_exists() - - def test01(self): - """01: libvirt.libvirtError occurs. """ - - self.manager._wrapped_conn = DummyLibvirtConn() - self.manager._test_connection = Mock(return_value=True) - self.manager._conn.nwfilterLookupByName = \ - Mock(side_effect=libvirt.libvirtError("ERR")) - ret = self.manager.nwfilter_for_instance_exists(self.instance1) - self.assertEqual(ret, False) - - def test02(self): - """02: libvirt.libvirtError not occurs. """ - - self.manager._wrapped_conn = DummyLibvirtConn() - self.manager._test_connection = Mock(return_value=True) - self.manager._conn.nwfilterLookupByName = \ - Mock(return_value=True) - ret = self.manager.nwfilter_for_instance_exists(self.instance1) - self.assertEqual(ret, True) - - # ---> test for nova.virt.libvirt_conn.live_migraiton() - - def test03(self): - """03: Unexpected exception occurs on finding volume on DB. """ - - utils.execute = Mock( side_effect=process.ProcessExecutionError('ERR') ) - - self.assertRaises(process.ProcessExecutionError, - self.manager.live_migration, - self.instance1, - 'host2') - - # ---> other case cannot be tested because live_migraiton - # is synchronized/asynchronized method are mixed together - - - # ---> test for nova.virt.libvirt_conn._post_live_migraiton - - def test04(self): - """04: instance_ref is not nova.db.sqlalchemy.models.Instances""" - - self.assertRaises(TypeError, - self.manager._post_live_migration, - "dummy string", - 'host2') - - def test05(self): - """05: db.instance_get_fixed_address return None""" - - db.instance_get_fixed_address = Mock( return_value=None ) - ret = self.manager._post_live_migration(self.instance1, 'host1') - c1 = (ret == None) - c2 = (0 <= sys.stderr.buffer.find('fixed_ip is not found')) - self.assertEqual(c1 and c2, True) - - def test06(self): - """06: db.instance_get_fixed_address raises NotFound""" - - db.instance_get_fixed_address = Mock( side_effect=exception.NotFound('ERR') ) - self.assertRaises(exception.NotFound, - self.manager._post_live_migration, - self.instance1, - 'host2') - - def test07(self): - """07: db.instance_get_fixed_address raises Unknown exception""" - - db.instance_get_fixed_address = Mock( side_effect=TypeError('ERR') ) - self.assertRaises(TypeError, - self.manager._post_live_migration, - self.instance1, - 'host1') - - def test08(self): - """08: db.fixed_ip_update return NotFound. """ - - db.fixed_ip_update = Mock( side_effect=exception.NotFound('ERR') ) - self.assertRaises(exception.NotFound, - self.manager._post_live_migration, - self.instance1, - 'host1') - - def test09(self): - """09: db.fixed_ip_update return NotAuthorized. """ - db.fixed_ip_update = Mock( side_effect=exception.NotAuthorized('ERR') ) - self.assertRaises(exception.NotAuthorized, - self.manager._post_live_migration, - self.instance1, - 'host1') - - def test10(self): - """10: db.fixed_ip_update return Unknown exception. """ - db.fixed_ip_update = Mock( side_effect=TypeError('ERR') ) - self.assertRaises(TypeError, - self.manager._post_live_migration, - self.instance1, - 'host1') - - def test11(self): - """11: db.fixed_ip_get_network causes NotFound. """ - - db.fixed_ip_get_network = Mock( side_effect=exception.NotFound('ERR') ) - self.assertRaises(exception.NotFound, - self.manager._post_live_migration, - self.instance1, - 'host1') - - # not tested db.fixed_ip_get_network raises NotAuthorized - # because same test has been done at previous test. - - def test12(self): - """12: db.fixed_ip_get_network causes Unknown exception. """ - - db.fixed_ip_get_network = Mock( side_effect=TypeError('ERR') ) - self.assertRaises(TypeError, - self.manager._post_live_migration, - self.instance1, - 'host1') - - def test13(self): - """13: db.network_update raises Unknown exception. """ - db.network_update = Mock( side_effect=TypeError('ERR') ) - self.assertRaises(TypeError, - self.manager._post_live_migration, - self.instance1, - 'host1') - - def test14(self): - """14: db.instance_get_floating_address raises NotFound. """ - db.instance_get_floating_address = Mock(side_effect=exception.NotFound("ERR")) - ret = self.manager._post_live_migration(self.instance1, 'host1') - c1 = (ret == None) - c2 = (0 <= sys.stderr.buffer.find('doesnt have floating_ip')) - self.assertEqual(c1 and c2, True) - - - def test15(self): - """15: db.instance_get_floating_address returns None. """ - - db.instance_get_floating_address = Mock( return_value=None ) - ret = self.manager._post_live_migration(self.instance1, 'host1') - c1 = (ret == None) - c2 = (0 <= sys.stderr.buffer.find('floating_ip is not found')) - self.assertEqual(c1 and c2, True) - - def test16(self): - """16: db.instance_get_floating_address raises NotFound. """ - - db.instance_get_floating_address = Mock(side_effect=exception.NotFound("ERR")) - ret = self.manager._post_live_migration(self.instance1, 'host1') - c1 = (ret == None) - c2 = (0 <= sys.stderr.buffer.find('doesnt have floating_ip')) - self.assertEqual(c1 and c2, True) - - def test17(self): - """17: db.instance_get_floating_address raises Unknown exception. """ - db.instance_get_floating_address = Mock(side_effect=TypeError("ERR")) - ret = self.manager._post_live_migration(self.instance1, 'host1') - c1 = (ret == None) - c2 = (0 <= sys.stderr.buffer.find('Live migration: Unexpected error')) - self.assertEqual(c1 and c2, True) - - - def test18(self): - """18: db.floating_ip_get_by_address raises NotFound """ - - db.floating_ip_get_by_address = Mock(side_effect=exception.NotFound("ERR")) - ret = self.manager._post_live_migration(self.instance1, 'host1') - c1 = (ret == None) - c2 = (0 <= sys.stderr.buffer.find('doesnt have floating_ip')) - self.assertEqual(c1 and c2, True) - - def test19(self): - """19: db.floating_ip_get_by_address raises Unknown exception. """ - db.floating_ip_get_by_address = Mock(side_effect=TypeError("ERR")) - ret = self.manager._post_live_migration(self.instance1, 'host1') - c1 = (ret == None) - c2 = (0 <= sys.stderr.buffer.find('Live migration: Unexpected error')) - self.assertEqual(c1 and c2, True) - - - def test20(self): - """20: db.floating_ip_update raises Unknown exception. - """ - db.floating_ip_update = Mock(side_effect=TypeError("ERR")) - ret = self.manager._post_live_migration(self.instance1, 'host1') - c1 = (ret == None) - c2 = (0 <= sys.stderr.buffer.find('Live migration: Unexpected error')) - self.assertEqual(c1 and c2, True) - - def test21(self): - """21: db.instance_update raises unknown exception. """ - - db.instance_update = Mock(side_effect=TypeError("ERR")) - self.assertRaises(TypeError, - self.manager._post_live_migration, - self.instance1, - 'host1') - - def tearDown(self): - """common terminating method. """ - self.stderr.realFlush() - sys.stderr = self.stderrBak - #sys.stdout = self.stdoutBak - -if __name__ == '__main__': - logging.getLogger().setLevel(logging.DEBUG) - #unittest.main() - - suite = unittest.TestLoader().loadTestsFromTestCase(LibvirtConnectionTestFunctions) - unittest.TextTestRunner(verbosity=2).run(suite) - - #suite = unittest.TestSuite() - #suite.addTest(LibvirtConnectionTestFunctions("test14")) - #suite.addTest(LibvirtConnectionTestFunctions("test16")) - #unittest.TextTestRunner(verbosity=2).run(suite) - - ->>>>>>> MERGE-SOURCE diff --git a/nova/livemigration_test/UT/nova-manage.test.py b/nova/livemigration_test/UT/nova-manage.test.py deleted file mode 100644 index b39af3c60..000000000 --- a/nova/livemigration_test/UT/nova-manage.test.py +++ /dev/null @@ -1,672 +0,0 @@ -<<<<<<< TREE -#!/usr/bin/python -# -*- coding: UTF-8 -*- - -NOVA_DIR='/opt/nova-2010.2' - -import sys -import unittest -import commands -import re - -from mock import Mock - -try : - print - print 'Testing %s/bin/nova-manage, set the NOVA_DIR properly..' % NOVA_DIR - print - - sys.path.append(NOVA_DIR) - - from nova import context - from nova import db - from nova import exception - from nova import flags - from nova import quota - from nova import utils - from nova.auth import manager - from nova.cloudpipe import pipelib - from nova import rpc - from nova.api.ec2 import cloud - from nova.compute import power_state - - from nova.db.sqlalchemy.models import * - - -except: - print 'set PYTHONPATH to nova-install-dir' - raise - - -class tmpStdout: - def __init__(self): - self.buffer = "" - def write(self,arg): - self.buffer += arg - def flush(self): - self.buffer = '' - - -class NovaManageTestFunctions(unittest.TestCase): - - stdout = None - stdoutBak = None - - hostCmds = None - - # 共通の初期化処理 - def setUp(self): - """common init method. """ - - commands.getstatusoutput('cp -f %s/bin/nova-manage %s' % ( NOVA_DIR, self.getNovaManageCopyPath() )) - commands.getstatusoutput('touch %s' % self.getInitpyPath() ) - try : - import bin.novamanagetest - except: - print 'Fail to import nova-manage . check bin/nova-manage exists' - raise - - # replace stdout for checking nova-manage output - if self.stdout is None : - self.__class__.stdout = tmpStdout() - self.stdoutBak = sys.stdout - sys.stdout = self.stdout - - # prepare test data - self.setTestData() - - - def setTestData(self): - import bin.novamanagetest - - if self.hostCmds is None : - self.__class__.hostCmds = bin.novamanagetest.HostCommands() - self.instanceCmds = bin.novamanagetest.InstanceCommands() - - self.host1 = Host() - self.host1.__setitem__('name', 'host1') - - self.host2 = Host() - self.host2.__setitem__('name', 'host2') - - self.instance1 = Instance() - self.instance1.__setitem__('id', 1) - self.instance1.__setitem__('host', 'host1') - self.instance1.__setitem__('hostname', 'i-12345') - self.instance1.__setitem__('state', power_state.NOSTATE) - self.instance1.__setitem__('state_description', 'running') - - self.instance2 = Instance() - self.instance2.__setitem__('id', 2) - self.instance2.__setitem__('host', 'host1') - self.instance2.__setitem__('hostname', 'i-12345') - self.instance2.__setitem__('state', power_state.RUNNING) - self.instance2.__setitem__('state_description', 'pending') - - self.instance3 = Instance() - self.instance3.__setitem__('id', 3) - self.instance3.__setitem__('host', 'host1') - self.instance3.__setitem__('hostname', 'i-12345') - self.instance3.__setitem__('state', power_state.RUNNING) - self.instance3.__setitem__('state_description', 'running') - - db.host_get_all = Mock(return_value=[self.host1, self.host2]) - - def getInitpyPath(self): - return '%s/bin/__init__.py' % NOVA_DIR - - def getNovaManageCopyPath(self): - return '%s/bin/novamanagetest.py' % NOVA_DIR - - # -----> Test for nova-manage host list - - def test01(self): - """01: Got some host lists. """ - - self.hostCmds.list() - - c1 = (2 == self.stdout.buffer.count('\n')) - c2 = (0 <= self.stdout.buffer.find('host1')) - c3 = (0 <= self.stdout.buffer.find('host2')) - self.assertEqual(c1 and c2 and c3, True) - - def test02(self): - """02: Got empty lsit. """ - - db.host_get_all = Mock(return_value=[]) - self.hostCmds.list() - - # result should be empty - c = (0 == len(self.stdout.buffer) ) - self.assertEqual(c, True) - - def test03(self): - """03: Got notFound """ - - db.host_get_all = Mock(side_effect=exception.NotFound("ERR")) - self.assertRaises(exception.NotFound, self.hostCmds.list) - - # --------> Test For nova-manage host show - - def test04(self): - """04: args are not enough(nova-manage host show) """ - self.assertRaises(TypeError, self.hostCmds.show ) - - - def test05(self): - """05: nova-manage host show not-registered-host, and got an error""" - - rpc.call = Mock(return_value={'ret' : False, 'msg': 'ERR'} ) - self.hostCmds.show('host1') - self.assertEqual( self.stdout.buffer[:3]=='ERR', True ) - - - def test06(self): - """06: nova-manage host show registerd-host, and no project uses the host""" - - dic = {'ret': True, - 'phy_resource': {'cpu':1, 'memory_mb':2, 'hdd_gb':3}, - 'usage': {}} - - rpc.call = Mock(return_value=dic ) - self.hostCmds.show('host1') - - # result should be : - # HOST PROJECT cpu mem(mb) disk(gb) - # host1 1 2 3 - line = self.stdout.buffer.split('\n')[1] - line = re.compile('\t+').sub(' ', line).strip() - c1 = ( 'host1 1 2 3' == line ) - c2 = ( self.stdout.buffer.count('\n') == 2 ) - - self.assertEqual( c1 and c2, True ) - - def test07(self): - """07: nova-manage host show registerd-host, - and some projects use the host - """ - dic = {'ret': True, - 'phy_resource': {'cpu':1, 'memory_mb':2, 'hdd_gb':3}, - 'usage': {'p1': {'cpu':1, 'memory_mb':2, 'hdd_gb':3}, - 'p2': {'cpu':1, 'memory_mb':2, 'hdd_gb':3} }} - - rpc.call = Mock(return_value=dic ) - self.hostCmds.show('host1') - - # result should be : - # HOST PROJECT cpu mem(mb) disk(gb) - # host1 1 2 3 - # host1 p1 1 2 3 - # host1 p2 4 5 6 - line = self.stdout.buffer.split('\n')[1] - ret = re.compile('\t+').sub(' ', line).strip() - c1 = ( 'host1 1 2 3' == ret ) - - line = self.stdout.buffer.split('\n')[2] - line = re.compile('\t+').sub(' ', line).strip() - c2 = ( 'host1 p1 1 2 3' == line ) or ( 'host1 p2 1 2 3' == line ) - - line = self.stdout.buffer.split('\n')[3] - ret = re.compile('\t+').sub(' ', line).strip() - c3 = ( 'host1 p1 1 2 3' == ret ) or ( 'host1 p2 1 2 3' == ret ) - - self.assertEqual( c1 and c2 and c3, True ) - - def test08(self): - """08: nova-manage host show registerd-host, and rpc.call returns None - (unexpected error) - """ - rpc.call = Mock(return_value=None ) - self.hostCmds.show('host1') - c1 = ( 0 <= self.stdout.buffer.find('Unexpected error') ) - self.assertEqual( c1, True ) - - # ----------> Test for bin/nova-manage instance live_migration - - def test09(self): - """09: arguments are not enough(nova-manage instances live_migration) - """ - self.assertRaises(TypeError, self.instanceCmds.live_migration ) - - def test10(self): - """10: arguments are not enough(nova-manage instances live_migration ec2_id) - """ - self.assertRaises(TypeError, self.instanceCmds.live_migration, 'i-xxx' ) - - def test11(self): - """11: nova-manage instances live_migration ec2_id host, - where hostname is invalid - """ - db.host_get_by_name = Mock( side_effect=exception.NotFound('ERR') ) - self.assertRaises(exception.NotFound, self.instanceCmds.live_migration, 'i-xxx', 'host1' ) - - def test12(self): - """12: nova-manage instances live_migration ec2_id(invalid id) host""" - - db.host_get_by_name = Mock(return_value = self.host1) - db.instance_get_by_internal_id = Mock( side_effect=exception.NotFound('ERR') ) - - self.assertRaises(exception.NotFound, self.instanceCmds.live_migration, 'i-xxx', 'host1' ) - - def test13(self): - """13: nova-manage instances live_migration ec2_id host, - but instance specifed by ec2 id is not running (state is not power_state.RUNNING) - """ - db.host_get_by_name = Mock(return_value = self.host1) - db.instance_get_by_internal_id = Mock( return_value = self.instance1 ) - c1 = c2 = False - try : - self.instanceCmds.live_migration('i-12345', 'host1') - except SystemExit, e: - c1 = (1 == e.code) - c2 = (0 < self.stdout.buffer.find('is not running') ) - self.assertEqual( c1 and c2 , True ) - - - def test14(self): - """14: nova-manage instances live_migration ec2_id host, - but instance specifed by ec2 id is not running (state_description is not running) - """ - db.host_get_by_name = Mock(return_value = self.host2) - db.instance_get_by_internal_id = Mock( return_value = self.instance1 ) - c1 = c2 = False - try : - self.instanceCmds.live_migration('i-12345', 'host2') - except SystemExit, e: - c1 = (1 == e.code) - c2 = (0 < self.stdout.buffer.find('is not running') ) - self.assertEqual( c1 and c2 , True ) - - def test15(self): - """15: nova-manage instances live_migration ec2_id host, - but instance is running at the same host specifed above, so err should be occured. - """ - db.host_get_by_name = Mock(return_value = self.host1) - db.instance_get_by_internal_id = Mock( return_value = self.instance3 ) - c1 = c2 = False - try : - self.instanceCmds.live_migration('i-12345', 'host1') - except SystemExit, e: - c1 = (2 == e.code) - c2 = (0 < self.stdout.buffer.find('is running now') ) - self.assertEqual( c1 and c2 , True ) - - def test16(self): - """16: nova-manage instances live_migration ec2_id host, - everything goes well, ang gets success messages. - """ - db.host_get_by_name = Mock(return_value = self.host1) - db.instance_get_by_internal_id = Mock( return_value = self.instance3 ) - rpc.cast = Mock(return_value = None) - - self.instanceCmds.live_migration('i-12345', 'host2') - c1 = (0 <= self.stdout.buffer.find('Finished all procedure') ) - self.assertEqual( c1, True ) - - - def tearDown(self): - """common terminating method. """ - commands.getstatusoutput('rm -rf %s' % self.getInitpyPath() ) - commands.getstatusoutput('rm -rf %s' % self.getNovaManageCopyPath() ) - sys.stdout.flush() - sys.stdout = self.stdoutBak - -if __name__ == '__main__': - #unittest.main() - suite = unittest.TestLoader().loadTestsFromTestCase(NovaManageTestFunctions) - unittest.TextTestRunner(verbosity=3).run(suite) - - -======= -#!/usr/bin/python -# -*- coding: UTF-8 -*- - -NOVA_DIR='/opt/nova-2010.4' - -import sys -import os -import unittest -import commands -import re - -from mock import Mock - -# getting /nova-inst-dir -NOVA_DIR = os.path.abspath(sys.argv[0]) -for i in range(4): - NOVA_DIR = os.path.dirname(NOVA_DIR) - - -try : - print - print 'Testing %s/bin/nova-manage, set the NOVA_DIR properly..' % NOVA_DIR - print - - sys.path.append(NOVA_DIR) - - from nova import context - from nova import db - from nova import exception - from nova import flags - from nova import quota - from nova import utils - from nova.auth import manager - from nova.cloudpipe import pipelib - from nova import rpc - from nova.api.ec2 import cloud - from nova.compute import power_state - - from nova.db.sqlalchemy.models import * - - -except: - print 'set correct NOVA_DIR in this script. ' - raise - - -class tmpStdout: - def __init__(self): - self.buffer = "" - def write(self,arg): - self.buffer += arg - def flush(self): - self.buffer = '' - -class tmpStderr(tmpStdout): - def write(self, arg): - self.buffer += arg - def flush(self): - pass - def realFlush(self): - self.buffer = '' - - -class NovaManageTestFunctions(unittest.TestCase): - - stdout = None - stdoutBak = None - stderr = None - stderrBak = None - - hostCmds = None - - # 共通の初期化処理 - def setUp(self): - """common init method. """ - - commands.getstatusoutput('cp -f %s/bin/nova-manage %s' % ( NOVA_DIR, self.getNovaManageCopyPath() )) - commands.getstatusoutput('touch %s' % self.getInitpyPath() ) - try : - import bin.novamanagetest - except: - print 'Fail to import nova-manage . check bin/nova-manage exists' - raise - - # replace stdout for checking nova-manage output - if self.stdout is None : - self.__class__.stdout = tmpStdout() - self.stdoutBak = sys.stdout - sys.stdout = self.stdout - - # replace stderr for checking nova-manage output - if self.stderr is None: - self.__class__.stderr = tmpStderr() - self.stderrBak = sys.stderr - sys.stderr = self.stderr - - # prepare test data - self.setTestData() - - - def setTestData(self): - import bin.novamanagetest - - if self.hostCmds is None : - self.__class__.hostCmds = bin.novamanagetest.HostCommands() - self.instanceCmds = bin.novamanagetest.InstanceCommands() - - self.host1 = Host() - self.host1.__setitem__('name', 'host1') - - self.host2 = Host() - self.host2.__setitem__('name', 'host2') - - self.instance1 = Instance() - self.instance1.__setitem__('id', 1) - self.instance1.__setitem__('host', 'host1') - self.instance1.__setitem__('hostname', 'i-12345') - self.instance1.__setitem__('state', power_state.NOSTATE) - self.instance1.__setitem__('state_description', 'running') - - self.instance2 = Instance() - self.instance2.__setitem__('id', 2) - self.instance2.__setitem__('host', 'host1') - self.instance2.__setitem__('hostname', 'i-12345') - self.instance2.__setitem__('state', power_state.RUNNING) - self.instance2.__setitem__('state_description', 'pending') - - self.instance3 = Instance() - self.instance3.__setitem__('id', 3) - self.instance3.__setitem__('host', 'host1') - self.instance3.__setitem__('hostname', 'i-12345') - self.instance3.__setitem__('state', power_state.RUNNING) - self.instance3.__setitem__('state_description', 'running') - - db.host_get_all = Mock(return_value=[self.host1, self.host2]) - - def getInitpyPath(self): - return '%s/bin/__init__.py' % NOVA_DIR - - def getNovaManageCopyPath(self): - return '%s/bin/novamanagetest.py' % NOVA_DIR - - # -----> Test for nova-manage host list - - def test01(self): - """01: Got some host lists. """ - - self.hostCmds.list() - - c1 = (2 == self.stdout.buffer.count('\n')) - c2 = (0 <= self.stdout.buffer.find('host1')) - c3 = (0 <= self.stdout.buffer.find('host2')) - self.assertEqual(c1 and c2 and c3, True) - - def test02(self): - """02: Got empty lsit. """ - - db.host_get_all = Mock(return_value=[]) - self.hostCmds.list() - - # result should be empty - c = (0 == len(self.stdout.buffer) ) - self.assertEqual(c, True) - - def test03(self): - """03: Got notFound """ - - db.host_get_all = Mock(side_effect=exception.NotFound("ERR")) - self.assertRaises(exception.NotFound, self.hostCmds.list) - - # --------> Test For nova-manage host show - - def test04(self): - """04: args are not enough(nova-manage host show) """ - self.assertRaises(TypeError, self.hostCmds.show ) - - - def test05(self): - """05: nova-manage host show not-registered-host, and got an error""" - - rpc.call = Mock(return_value={'ret' : False, 'msg': 'ERR'} ) - self.hostCmds.show('host1') - self.assertEqual( self.stdout.buffer[:3]=='ERR', True ) - - - def test06(self): - """06: nova-manage host show registerd-host, and no project uses the host""" - - dic = {'ret': True, - 'phy_resource': {'vcpus':1, 'memory_mb':2, 'local_gb':3}, - 'usage': {}} - - rpc.call = Mock(return_value=dic ) - self.hostCmds.show('host1') - - # result should be : - # HOST PROJECT cpu mem(mb) disk(gb) - # host1 1 2 3 - line = self.stdout.buffer.split('\n')[1] - line = re.compile('\t+').sub(' ', line).strip() - c1 = ( 'host1 1 2 3' == line ) - c2 = ( self.stdout.buffer.count('\n') == 2 ) - - self.assertEqual( c1 and c2, True ) - - def test07(self): - """07: nova-manage host show registerd-host, - and some projects use the host - """ - dic = {'ret': True, - 'phy_resource': {'vcpus':1, 'memory_mb':2, 'local_gb':3}, - 'usage': {'p1': {'vcpus':1, 'memory_mb':2, 'local_gb':3}, - 'p2': {'vcpus':1, 'memory_mb':2, 'local_gb':3} }} - - rpc.call = Mock(return_value=dic ) - self.hostCmds.show('host1') - - # result should be : - # HOST PROJECT cpu mem(mb) disk(gb) - # host1 1 2 3 - # host1 p1 1 2 3 - # host1 p2 4 5 6 - line = self.stdout.buffer.split('\n')[1] - ret = re.compile('\t+').sub(' ', line).strip() - c1 = ( 'host1 1 2 3' == ret ) - - line = self.stdout.buffer.split('\n')[2] - line = re.compile('\t+').sub(' ', line).strip() - c2 = ( 'host1 p1 1 2 3' == line ) or ( 'host1 p2 1 2 3' == line ) - - line = self.stdout.buffer.split('\n')[3] - ret = re.compile('\t+').sub(' ', line).strip() - c3 = ( 'host1 p1 1 2 3' == ret ) or ( 'host1 p2 1 2 3' == ret ) - - self.assertEqual( c1 and c2 and c3, True ) - - def test08(self): - """08: nova-manage host show registerd-host, and rpc.call returns None - (unexpected error) - """ - rpc.call = Mock(return_value=None ) - self.hostCmds.show('host1') - c1 = ( 0 <= self.stdout.buffer.find('Unexpected error') ) - self.assertEqual( c1, True ) - - # ----------> Test for bin/nova-manage instance live_migration - - def test09(self): - """09: arguments are not enough(nova-manage instances live_migration) - """ - self.assertRaises(TypeError, self.instanceCmds.live_migration ) - - def test10(self): - """10: arguments are not enough(nova-manage instances live_migration ec2_id) - """ - self.assertRaises(TypeError, self.instanceCmds.live_migration, 'i-xxx' ) - - def test11(self): - """11: nova-manage instances live_migration ec2_id host, - where hostname is invalid - """ - db.host_get_by_name = Mock( side_effect=exception.NotFound('ERR') ) - self.assertRaises(exception.NotFound, self.instanceCmds.live_migration, 'i-xxx', 'host1' ) - - def test12(self): - """12: nova-manage instances live_migration ec2_id(invalid id) host""" - - db.host_get_by_name = Mock(return_value = self.host1) - db.instance_get_by_internal_id = Mock( side_effect=exception.NotFound('ERR') ) - - self.assertRaises(exception.NotFound, self.instanceCmds.live_migration, 'i-xxx', 'host1' ) - - def test13(self): - """13: nova-manage instances live_migration ec2_id host, - but instance specifed by ec2 id is not running (state is not power_state.RUNNING) - """ - db.host_get_by_name = Mock(return_value = self.host1) - db.instance_get_by_internal_id = Mock( return_value = self.instance1 ) - try : - self.instanceCmds.live_migration('i-12345', 'host1') - except exception.Invalid, e: - c1 = (0 < e.message.find('is not running') ) - self.assertTrue(c1, True) - return False - - - def test14(self): - """14: nova-manage instances live_migration ec2_id host, - but instance specifed by ec2 id is not running (state_description is not running) - """ - db.host_get_by_name = Mock(return_value = self.host2) - db.instance_get_by_internal_id = Mock( return_value = self.instance1 ) - try : - self.instanceCmds.live_migration('i-12345', 'host2') - except exception.Invalid, e: - c1 = (0 < e.message.find('is not running') ) - self.assertTrue(c1, True) - return False - - def test15(self): - """15: nova-manage instances live_migration ec2_id host, - but instance is running at the same host specifed above, so err should be occured. - """ - db.host_get_by_name = Mock(return_value = self.host1) - db.instance_get_by_internal_id = Mock( return_value = self.instance3 ) - try : - self.instanceCmds.live_migration('i-12345', 'host1') - except exception.Invalid, e: - c1 = ( 0 <= e.message.find('is running now') ) - self.assertTrue(c1, True) - return False - - - def test16(self): - """16: nova-manage instances live_migration ec2_id host, - rpc.call raises RemoteError because destination doesnt have enough resource. - """ - db.host_get_by_name = Mock(return_value = self.host1) - db.instance_get_by_internal_id = Mock( return_value = self.instance3 ) - rpc.call = Mock(return_value = rpc.RemoteError(TypeError, 'val', 'traceback')) - self.assertRaises(rpc.RemoteError, self.instanceCmds.live_migration, 'i-xxx', 'host2' ) - - - def test17(self): - """17: nova-manage instances live_migration ec2_id host, - everything goes well, ang gets success messages. - """ - db.host_get_by_name = Mock(return_value = self.host1) - db.instance_get_by_internal_id = Mock( return_value = self.instance3 ) - rpc.call = Mock(return_value = None) - - self.instanceCmds.live_migration('i-12345', 'host2') - c1 = (0 <= self.stdout.buffer.find('Finished all procedure') ) - self.assertEqual( c1, True ) - - - def tearDown(self): - """common terminating method. """ - commands.getstatusoutput('rm -rf %s' % self.getInitpyPath() ) - commands.getstatusoutput('rm -rf %s' % self.getNovaManageCopyPath() ) - sys.stdout.flush() - sys.stdout = self.stdoutBak - self.stderr.realFlush() - sys.stderr = self.stderrBak - -if __name__ == '__main__': - #unittest.main() - suite = unittest.TestLoader().loadTestsFromTestCase(NovaManageTestFunctions) - unittest.TextTestRunner(verbosity=3).run(suite) - - ->>>>>>> MERGE-SOURCE diff --git a/nova/livemigration_test/UT/schedulerManager.test.py b/nova/livemigration_test/UT/schedulerManager.test.py deleted file mode 100644 index 2fd8bd511..000000000 --- a/nova/livemigration_test/UT/schedulerManager.test.py +++ /dev/null @@ -1,771 +0,0 @@ -<<<<<<< TREE -#!/usr/bin/python -# -*- coding: UTF-8 -*- - -NOVA_DIR='/opt/nova-2010.2' - -import sys -import unittest -import commands -import re - -from mock import Mock - -try : - print - print 'Checking %s/bin/nova-manage exists, set the NOVA_DIR properly..' % NOVA_DIR - print - - sys.path.append(NOVA_DIR) - - from nova.scheduler.manager import SchedulerManager - - from nova import context - from nova import db - from nova import exception - from nova import flags - from nova import quota - from nova import utils - from nova.auth import manager - from nova.cloudpipe import pipelib - from nova import rpc - from nova.api.ec2 import cloud - from nova.compute import power_state - - from nova.db.sqlalchemy.models import * - -except: - print 'set PYTHONPATH to nova-install-dir' - raise - - -class tmpStdout: - def __init__(self): - self.buffer = "" - def write(self,arg): - self.buffer += arg - def flush(self): - self.buffer = '' - - -class SchedulerTestFunctions(unittest.TestCase): - - # 共通の初期化処理 - def setUp(self): - """common init method. """ - - self.host = 'openstack2-api' - self.manager = SchedulerManager(host=self.host) - - self.setTestData() - - def setTestData(self): - - self.host1 = Host() - self.host1.__setitem__('name', 'host1') - self.host1.__setitem__('cpu', 5) - self.host1.__setitem__('memory_mb', 20480) - self.host1.__setitem__('hdd_gb', 876) - - self.host2 = Host() - self.host2.__setitem__('name', 'host2') - self.host2.__setitem__('cpu', 5) - self.host2.__setitem__('memory_mb', 20480) - self.host2.__setitem__('hdd_gb', 876) - - self.instance1 = Instance() - for key, val in [ ('id', 1), ('host', 'host1'), ('hostname', 'i-12345'), - ('state', power_state.RUNNING), ('project_id', 'testPJ'), - ('vcpus', 3), ('memory_mb', 1024), ('hdd_gb', 5) ]: - self.instance1.__setitem__(key, val) - - - self.instance2 = Instance() - for key, val in [ ('id', 2), ('host', 'host1'), ('hostname', 'i-12345'), - ('state', power_state.RUNNING), ('project_id', 'testPJ'), - ('vcpus', 3), ('memory_mb', 1024), ('hdd_gb', 5) ]: - self.instance2.__setitem__(key, val) - - - self.instance3 = Instance() - for key, val in [ ('id', 3), ('host', 'host1'), ('hostname', 'i-12345'), - ('state', power_state.RUNNING), ('project_id', 'testPJ2'), - ('vcpus', 1), ('memory_mb', 1024), ('hdd_gb', 5) ]: - self.instance3.__setitem__(key, val) - - self.instance4 = Instance() - for key, val in [ ('id', 4), ('host', 'host2'), ('hostname', 'i-12345'), - ('state', power_state.RUNNING), ('project_id', 'testPJ2'), - ('vcpus', 1), ('memory_mb', 1024), ('local_gb', 5) ]: - self.instance4.__setitem__(key, val) - - self.instance5 = Instance() - for key, val in [ ('id', 5), ('host', 'host2'), ('hostname', 'i-12345'), - ('state', power_state.RUNNING), ('project_id', 'testPJ2'), - ('vcpus', 1), ('memory_mb', 1024), ('local_gb', 5) ]: - self.instance5.__setitem__(key, val) - - self.instance6 = Instance() - for key, val in [ ('id', 6), ('host', 'host1'), ('hostname', 'i-12345'), - ('state', power_state.RUNNING), ('project_id', 'testPJ2'), - ('vcpus', 3), ('memory_mb', 1024), ('local_gb', 5) ]: - self.instance6.__setitem__(key, val) - - self.instance7 = Instance() - for key, val in [ ('id', 7), ('host', 'host1'), ('hostname', 'i-12345'), - ('state', power_state.RUNNING), ('project_id', 'testPJ2'), - ('vcpus', 1), ('memory_mb', 18432), ('local_gb', 5) ]: - self.instance7.__setitem__(key, val) - - self.instance8 = Instance() - for key, val in [ ('id', 8), ('host', 'host1'), ('hostname', 'i-12345'), - ('state', power_state.RUNNING), ('project_id', 'testPJ2'), - ('vcpus', 1), ('memory_mb', 1024), ('local_gb', 866) ]: - self.instance8.__setitem__(key, val) - - - - def check_format(self, val): - """check result format of show_host_resource """ - - if dict != type(val) : - sys.stderr.write('return value is not dict') - return False - - if not val.has_key('ret'): - sys.stderr.write('invalid format(missing "ret"). ') - return False - - if not val['ret'] : - if not val.has_key('msg') : - sys.stderr.write( 'invalid format(missing "msg").' ) - return False - - else : - if not val.has_key('phy_resource') : - sys.stderr.write('invalid format(missing "phy_resource"). ') - return False - - if not val.has_key('usage'): - sys.stderr.write('invalid format(missing "usage"). ') - return False - - if not self._check_format(val['phy_resource']): - return False - - for key, dic in val['usage'].items() : - if not self._check_format(dic): - return False - return True - - def _check_format(self, val): - if dict != type(val) : - sys.stderr.write('return value is not dict') - return False - - for key in ['cpu', 'memory_mb', 'hdd_gb']: - if not val.has_key(key) : - sys.stderr.write('invalid format(missing "%s"). ' % key ) - return False - - return True - - # ---> test for nova.scheduler.manager.show_host_resource() - - def test01(self): - """01: get NotFound exception when dest host not found on DB """ - - ctxt = context.get_admin_context() - db.host_get_by_name = Mock( side_effect=exception.NotFound('ERR') ) - result = self.manager.show_host_resource(ctxt, 'not-registered-host') - c1 = ( not result['ret'] ) - c2 = ( 0 == result['msg'].find('No such') ) - self.assertEqual(c1 and c2, True) - - def test02(self): - """02: get other exception if unexpected err. """ - - ctxt = context.get_admin_context() - db.host_get_by_name = Mock( side_effect=TypeError('ERR') ) - self.assertRaises(TypeError, self.manager.show_host_resource, ctxt, 'host1' ) - - def test03(self): - """03: no instance found on dest host. """ - - ctxt = context.get_admin_context() - db.host_get_by_name = Mock( return_value = self.host1 ) - db.instance_get_all_by_host = Mock( return_value=[]) - ret= self.manager.show_host_resource(ctxt, 'host1') - - c1 = self.check_format(ret) - v = ret['phy_resource'] - c2 = ( (5 == v['cpu']) and (20480 == v['memory_mb']) and (876 == v['hdd_gb'])) - c3 = ( 0 == len(ret['usage']) ) - - self.assertEqual(c1 and c2 and c3, True) - - def test04(self): - """04: some instance found on dest host. """ - - ctxt = context.get_admin_context() - db.host_get_by_name = Mock( return_value = self.host1 ) - db.instance_get_all_by_host = Mock( return_value=[ self.instance1, - self.instance2, - self.instance3] ) - - db.instance_get_vcpu_sum_by_host_and_project = Mock(return_value=3) - db.instance_get_memory_sum_by_host_and_project = Mock(return_value=1024) - db.instance_get_disk_sum_by_host_and_project = Mock(return_value=5) - - ret= self.manager.show_host_resource(ctxt, 'host1') - - c1 = self.check_format(ret) - v = ret['phy_resource'] - c2 = ( (5 == v['cpu']) and (20480 == v['memory_mb']) and (876 == v['hdd_gb'])) - c3 = ( 2 == len(ret['usage']) ) - c4 = ( self.instance1['project_id'] in ret['usage'].keys()) - c5 = ( self.instance3['project_id'] in ret['usage'].keys()) - - self.assertEqual(c1 and c2 and c3 and c4 and c5, True) - - - # ---> test for nova.scheduler.manager.has_enough_resource() - def test05(self): - """05: when cpu is exccded some instance found on dest host. """ - - ctxt = context.get_admin_context() - db.instance_get = Mock(return_value = self.instance6) - db.host_get_by_name = Mock(return_value = self.host2) - db.instance_get_all_by_host = Mock(return_value = [self.instance4, self.instance5] ) - - ret= self.manager.has_enough_resource(ctxt, 'i-12345', 'host1') - self.assertEqual(ret, False) - - def test06(self): - """06: when memory is exccded some instance found on dest host. """ - - ctxt = context.get_admin_context() - db.instance_get = Mock(return_value = self.instance7) - db.host_get_by_name = Mock(return_value = self.host2) - db.instance_get_all_by_host = Mock(return_value = [self.instance4, self.instance5] ) - - ret= self.manager.has_enough_resource(ctxt, 'i-12345', 'host1') - self.assertEqual(ret, False) - - def test07(self): - """07: when hdd is exccded some instance found on dest host. """ - - ctxt = context.get_admin_context() - db.instance_get = Mock(return_value = self.instance8) - db.host_get_by_name = Mock(return_value = self.host2) - db.instance_get_all_by_host = Mock(return_value = [self.instance4, self.instance5] ) - - ret= self.manager.has_enough_resource(ctxt, 'i-12345', 'host1') - self.assertEqual(ret, False) - - def test08(self): - """08: everything goes well. (instance_get_all_by_host returns list)""" - - ctxt = context.get_admin_context() - db.instance_get = Mock(return_value = self.instance3) - db.host_get_by_name = Mock(return_value = self.host2) - db.instance_get_all_by_host = Mock(return_value = [self.instance4, self.instance5] ) - - ret= self.manager.has_enough_resource(ctxt, 'i-12345', 'host1') - self.assertEqual(ret, True) - - - def test09(self): - """09: everything goes well(instance_get_all_by_host returns[]). """ - - ctxt = context.get_admin_context() - db.instance_get = Mock(return_value = self.instance3) - db.host_get_by_name = Mock(return_value = self.host2) - db.instance_get_all_by_host = Mock(return_value = [] ) - - ret= self.manager.has_enough_resource(ctxt, 'i-12345', 'host1') - self.assertEqual(ret, True) - - - # ---> test for nova.scheduler.manager.live_migration() - - - def test10(self): - """10: instance_get_by_internal_id issue NotFound. """ - # Mocks for has_enough_resource() - ctxt = context.get_admin_context() - db.instance_get = Mock(return_value = self.instance8) - db.host_get_by_name = Mock(return_value = self.host2) - db.instance_get_all_by_host = Mock(return_value = [self.instance4, self.instance5] ) - - # Mocks for live_migration()db.instance_get_by_internal_id - # (any Mock is ok here. important mock is all above) - db.instance_get_by_internal_id = Mock(side_effect=exception.NotFound("ERR")) - - self.assertRaises(exception.NotFound, - self.manager.live_migration, - ctxt, - 'i-12345', - 'host1') - - - def test11(self): - """11: return False if host doesnt have enough resource. """ - - # Mocks for has_enough_resource() - ctxt = context.get_admin_context() - db.instance_get = Mock(return_value = self.instance8) - db.host_get_by_name = Mock(return_value = self.host2) - db.instance_get_all_by_host = Mock(return_value = [self.instance4, self.instance5] ) - - # Mocks for live_migration()db.instance_get_by_internal_id - # (any Mock is ok here. important mock is all above) - db.instance_get_by_internal_id = Mock(return_value = self.instance8) - db.instance_set_state = Mock(return_value = True) - rpc_cast = Mock(return_value = True) - - ret= self.manager.live_migration(ctxt, 'i-12345', 'host1') - self.assertEqual(ret, False) - - - - def test12(self): - """12: everything goes well. """ - - # Mocks for has_enough_resource() - ctxt = context.get_admin_context() - db.instance_get = Mock(return_value = self.instance3) - db.host_get_by_name = Mock(return_value = self.host2) - db.instance_get_all_by_host = Mock(return_value = [self.instance4, self.instance5] ) - - # Mocks for live_migration()db.instance_get_by_internal_id - # (any Mock is ok here. important mock is all above) - db.instance_get_by_internal_id = Mock(return_value = self.instance8) - db.instance_set_state = Mock(return_value = True) - rpc.cast = Mock(return_value = True) - - ret= self.manager.live_migration(ctxt, 'i-12345', 'host1') - self.assertEqual(ret, True) - - - def tearDown(self): - """common terminating method. """ - #sys.stdout = self.stdoutBak - pass - -if __name__ == '__main__': - #unittest.main() - suite = unittest.TestLoader().loadTestsFromTestCase(SchedulerTestFunctions) - unittest.TextTestRunner(verbosity=3).run(suite) - - -======= -#!/usr/bin/python -# -*- coding: UTF-8 -*- - - -import sys -import os -import unittest -import commands -import re -import libvirt - -from mock import Mock - -# getting /nova-inst-dir -NOVA_DIR = os.path.abspath(sys.argv[0]) -for i in range(4): - NOVA_DIR = os.path.dirname(NOVA_DIR) - -try : - print - print 'Checking %s/bin/nova-manage exists, set the NOVA_DIR properly..' % NOVA_DIR - print - - sys.path.append(NOVA_DIR) - - from nova.scheduler.manager import SchedulerManager - - from nova import context - from nova import db - from nova import exception - from nova import flags - from nova import quota - from nova import utils - from nova.auth import manager - from nova.cloudpipe import pipelib - from nova import rpc - from nova.api.ec2 import cloud - from nova.compute import power_state - - from nova.db.sqlalchemy.models import * - -except: - print 'set correct NOVA_DIR in this script. ' - raise - - -class tmpStdout: - def __init__(self): - self.buffer = "" - def write(self,arg): - self.buffer += arg - def flush(self): - self.buffer = '' - - -class SchedulerTestFunctions(unittest.TestCase): - - # 共通の初期化処理 - def setUp(self): - """common init method. """ - - self.host = 'openstack2-api' - self.manager = SchedulerManager(host=self.host) - - self.setTestData() - self.setMocks() - - def setTestData(self): - - self.host1 = Host() - self.host1.__setitem__('name', 'host1') - self.host1.__setitem__('vcpus', 5) - self.host1.__setitem__('memory_mb', 20480) - self.host1.__setitem__('local_gb', 876) - - self.host2 = Host() - self.host2.__setitem__('name', 'host2') - self.host2.__setitem__('vcpus', 5) - self.host2.__setitem__('memory_mb', 20480) - self.host2.__setitem__('local_gb', 876) - self.host2.__setitem__('hypervisor_type', 'QEMU') - self.host2.__setitem__('hypervisor_version', 12003) - xml="x86_64NehalemIntel" - self.host2.__setitem__('cpu_info', xml) - - self.instance1 = Instance() - for key, val in [ ('id', 1), ('host', 'host1'), ('hostname', 'i-12345'), - ('state', power_state.RUNNING), ('project_id', 'testPJ'), - ('vcpus', 3), ('memory_mb', 1024), ('hdd_gb', 5) ]: - self.instance1.__setitem__(key, val) - - - self.instance2 = Instance() - for key, val in [ ('id', 2), ('host', 'host1'), ('hostname', 'i-12345'), - ('state', power_state.RUNNING), ('project_id', 'testPJ'), - ('vcpus', 3), ('memory_mb', 1024), ('hdd_gb', 5) ]: - self.instance2.__setitem__(key, val) - - - self.instance3 = Instance() - for key, val in [ ('id', 3), ('host', 'host1'), ('hostname', 'i-12345'), - ('state', power_state.RUNNING), ('project_id', 'testPJ2'), - ('vcpus', 1), ('memory_mb', 1024), ('hdd_gb', 5) ]: - self.instance3.__setitem__(key, val) - - self.instance4 = Instance() - for key, val in [ ('id', 4), ('host', 'host2'), ('hostname', 'i-12345'), - ('state', power_state.RUNNING), ('project_id', 'testPJ2'), - ('vcpus', 1), ('memory_mb', 1024), ('local_gb', 5) ]: - self.instance4.__setitem__(key, val) - - self.instance5 = Instance() - for key, val in [ ('id', 5), ('host', 'host2'), ('hostname', 'i-12345'), - ('state', power_state.RUNNING), ('project_id', 'testPJ2'), - ('vcpus', 1), ('memory_mb', 1024), ('local_gb', 5) ]: - self.instance5.__setitem__(key, val) - - self.instance6 = Instance() - for key, val in [ ('id', 6), ('host', 'host1'), ('hostname', 'i-12345'), - ('state', power_state.RUNNING), ('project_id', 'testPJ2'), - ('vcpus', 3), ('memory_mb', 1024), ('local_gb', 5) ]: - self.instance6.__setitem__(key, val) - - self.instance7 = Instance() - for key, val in [ ('id', 7), ('host', 'host1'), ('hostname', 'i-12345'), - ('state', power_state.RUNNING), ('project_id', 'testPJ2'), - ('vcpus', 1), ('memory_mb', 18432), ('local_gb', 5) ]: - self.instance7.__setitem__(key, val) - - self.instance8 = Instance() - for key, val in [ ('id', 8), ('host', 'host1'), ('hostname', 'i-12345'), - ('state', power_state.RUNNING), ('project_id', 'testPJ2'), - ('vcpus', 1), ('memory_mb', 1024), ('local_gb', 866) ]: - self.instance8.__setitem__(key, val) - - self.service1 = Service() - for key, val in [ ('id', 1), ('host', 'host1'), ('binary', 'nova-compute'), - ('topic', 'compute')]: - self.service1.__setitem__(key, val) - - - def setMocks(self): - self.ctxt = context.get_admin_context() - # Mocks for has_enough_resource() - db.instance_get = Mock(return_value = self.instance3) - db.host_get_by_name = Mock(return_value = self.host2) - db.instance_get_all_by_host = Mock(return_value = [self.instance4, self.instance5] ) - - # Mocks for live_migration - db.instance_get_by_internal_id = Mock(return_value = self.instance1) - # db.host_get_by_name <- defined above. - db.service_get_all_by_topic = Mock(return_value = [self.service1] ) - rpc.call = Mock(return_value=1) - - def check_format(self, val): - """check result format of show_host_resource """ - - if dict != type(val) : - sys.stderr.write('return value is not dict') - return False - - if not val.has_key('ret'): - sys.stderr.write('invalid format(missing "ret"). ') - return False - - if not val['ret'] : - if not val.has_key('msg') : - sys.stderr.write( 'invalid format(missing "msg").' ) - return False - - else : - if not val.has_key('phy_resource') : - sys.stderr.write('invalid format(missing "phy_resource"). ') - return False - - if not val.has_key('usage'): - sys.stderr.write('invalid format(missing "usage"). ') - return False - - if not self._check_format(val['phy_resource']): - return False - - for key, dic in val['usage'].items() : - if not self._check_format(dic): - return False - return True - - def _check_format(self, val): - if dict != type(val) : - sys.stderr.write('return value is not dict') - return False - - for key in ['vcpus', 'memory_mb', 'local_gb']: - if not val.has_key(key) : - sys.stderr.write('invalid format(missing "%s"). ' % key ) - return False - - return True - - - # ---> test for nova.scheduler.manager.show_host_resource() - - def test01(self): - """01: get NotFound exception when dest host not found on DB """ - - db.host_get_by_name = Mock( side_effect=exception.NotFound('ERR') ) - result = self.manager.show_host_resource(self.ctxt, 'not-registered-host') - c1 = ( not result['ret'] ) - c2 = ( 0 == result['msg'].find('No such') ) - self.assertEqual(c1 and c2, True) - - def test02(self): - """02: get other exception if unexpected err. """ - - db.host_get_by_name = Mock( side_effect=TypeError('ERR') ) - self.assertRaises(TypeError, self.manager.show_host_resource, self.ctxt, 'host1' ) - - def test03(self): - """03: no instance found on dest host. """ - - db.host_get_by_name = Mock( return_value = self.host1 ) - db.instance_get_all_by_host = Mock( return_value=[]) - ret= self.manager.show_host_resource(self.ctxt, 'host1') - - c1 = self.check_format(ret) - v = ret['phy_resource'] - c2 = ( (5 == v['vcpus']) and (20480 == v['memory_mb']) and (876 == v['local_gb'])) - c3 = ( 0 == len(ret['usage']) ) - - self.assertEqual(c1 and c2 and c3, True) - - def test04(self): - """04: some instance found on dest host. """ - - db.host_get_by_name = Mock( return_value = self.host1 ) - db.instance_get_all_by_host = Mock( return_value=[ self.instance1, - self.instance2, - self.instance3] ) - - db.instance_get_vcpu_sum_by_host_and_project = Mock(return_value=3) - db.instance_get_memory_sum_by_host_and_project = Mock(return_value=1024) - db.instance_get_disk_sum_by_host_and_project = Mock(return_value=5) - - ret= self.manager.show_host_resource(self.ctxt, 'host1') - - c1 = self.check_format(ret) - v = ret['phy_resource'] - c2 = ( (5 == v['vcpus']) and (20480 == v['memory_mb']) and (876 == v['local_gb'])) - c3 = ( 2 == len(ret['usage']) ) - c4 = ( self.instance1['project_id'] in ret['usage'].keys()) - c5 = ( self.instance3['project_id'] in ret['usage'].keys()) - - self.assertEqual(c1 and c2 and c3 and c4 and c5, True) - - - # ---> test for nova.scheduler.manager.has_enough_resource() - def test05(self): - """05: when cpu is exccded some instance found on dest host. """ - - db.instance_get = Mock(return_value = self.instance6) - try : - self.manager.has_enough_resource(self.ctxt, 'i-12345', 'host1') - except exception.NotEmpty, e: - c1 = ( 0 < e.message.find('doesnt have enough resource') ) - self.assertTrue(c1, True) - return False - - - def test06(self): - """06: when memory is exccded some instance found on dest host. """ - - db.instance_get = Mock(return_value = self.instance7) - try : - self.manager.has_enough_resource(self.ctxt, 'i-12345', 'host1') - except exception.NotEmpty, e: - c1 = ( 0 <= e.message.find('doesnt have enough resource') ) - self.assertTrue(c1, True) - return False - - def test07(self): - """07: when hdd is exccded some instance found on dest host. """ - - db.instance_get = Mock(return_value = self.instance8) - try : - self.manager.has_enough_resource(self.ctxt, 'i-12345', 'host1') - except exception.NotEmpty, e: - c1 = ( 0 <= e.message.find('doesnt have enough resource') ) - self.assertTrue(c1, True) - return False - - - def test08(self): - """08: everything goes well. (instance_get_all_by_host returns list)""" - - ret= self.manager.has_enough_resource(self.ctxt, 'i-12345', 'host1') - self.assertEqual(ret, None) - - - def test09(self): - """09: everything goes well(instance_get_all_by_host returns[]). """ - - db.instance_get_all_by_host = Mock(return_value = [] ) - ret= self.manager.has_enough_resource(self.ctxt, 'i-12345', 'host1') - self.assertEqual(ret, None) - - - # ---> test for nova.scheduler.manager.live_migration() - - - def test10(self): - """10: instance_get_by_internal_id issue NotFound. """ - - # Mocks for has_enough_resource() - db.instance_get = Mock(return_value = self.instance8) - # Mocks for live_migration()db.instance_get_by_internal_id - # (any Mock is ok here. important mock is all above) - db.instance_get_by_internal_id = Mock(side_effect=exception.NotFound("ERR")) - - self.assertRaises(exception.NotFound, - self.manager.live_migration, - self.ctxt, - 'i-12345', - 'host1') - - - def test11(self): - """11: get NotFound exception when dest host not found on DB """ - - db.host_get_by_name = Mock( side_effect=exception.NotFound('ERR') ) - self.assertRaises(exception.NotFound, - self.manager.live_migration, - self.ctxt, - 'i-12345', - 'host1') - - - def test12(self): - """12: Destination host is not compute node """ - self.assertRaises(exception.Invalid, - self.manager.live_migration, - self.ctxt, - 'i-12345', - 'host2') - - - # Cannot test the case of hypervisor type difference and hypervisor - # version difference, since we cannot set different mocks to same method.. - - def test13(self): - """13: rpc.call raises RemoteError(Unexpected error occurs when executing compareCPU) """ - rpc.call = Mock(return_value = rpc.RemoteError(libvirt.libvirtError, 'val', 'traceback')) - self.assertRaises(rpc.RemoteError, - self.manager.live_migration, - self.ctxt, - 'i-12345', - 'host1') - - def test14(self): - """14: rpc.call returns 0 (cpu is not compatible between src and dest) """ - rpc.call = Mock(return_value = 0) - try : - self.manager.live_migration(self.ctxt, 'i-12345', 'host1') - except exception.Invalid, e: - c1 = ( 0 <= e.message.find('doesnt have compatibility to')) - self.assertTrue(c1, True) - return False - - def test15(self): - """15: raise NotEmpty if host doesnt have enough resource. """ - - # Mocks for has_enough_resource() - db.instance_get = Mock(return_value = self.instance8) - - # Mocks for live_migration() - db.instance_get_by_internal_id = Mock(return_value = self.instance8) - db.instance_set_state = Mock(return_value = True) - rpc_cast = Mock(return_value = True) - - try : - self.manager.live_migration(self.ctxt, 'i-12345', 'host1') - except exception.NotEmpty, e: - c1 = ( 0 <= e.message.find('doesnt have enough resource') ) - self.assertTrue(c1, True) - return False - - - def test16(self): - """16: everything goes well. """ - - db.instance_get_by_internal_id = Mock(return_value = self.instance8) - db.instance_set_state = Mock(return_value = True) - rpc.cast = Mock(return_value = True) - - ret= self.manager.live_migration(self.ctxt, 'i-12345', 'host1') - self.assertEqual(ret, None) - - - def tearDown(self): - """common terminating method. """ - #sys.stdout = self.stdoutBak - pass - -if __name__ == '__main__': - #unittest.main() - suite = unittest.TestLoader().loadTestsFromTestCase(SchedulerTestFunctions) - unittest.TextTestRunner(verbosity=3).run(suite) - - ->>>>>>> MERGE-SOURCE diff --git a/nova/livemigration_test/UT/testCase_UT.xls b/nova/livemigration_test/UT/testCase_UT.xls deleted file mode 100644 index f73e8c5aa..000000000 Binary files a/nova/livemigration_test/UT/testCase_UT.xls and /dev/null differ -- cgit From a32ccff2e224d0d2bf72a0471d9e9599ba4d8990 Mon Sep 17 00:00:00 2001 From: masumotok Date: Fri, 24 Dec 2010 16:06:11 +0900 Subject: テスト項目表を再び追加した状態でコミット MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- nova/livemigration_test/SI/picture.pptx | Bin 0 -> 137730 bytes nova/livemigration_test/SI/testCase_SI.xls | Bin 0 -> 43520 bytes .../SI/testParameterSheet_SI.xls | Bin 0 -> 464384 bytes nova/livemigration_test/SI/utils/demo-firstboot.sh | 39 ++ .../SI/utils/demo-runInstance.sh | 57 +++ nova/livemigration_test/SI/utils/nova-manage.conf | 18 + nova/livemigration_test/SI/utils/nova.conf | 10 + nova/livemigration_test/SI/utils/nova.sh | 180 +++++++++ nova/livemigration_test/SI/utils/nova.sh.compute | 37 ++ nova/livemigration_test/UT/computeManager.test.py | 411 +++++++++++++++++++++ .../UT/libvirtConnection.test.py | 372 +++++++++++++++++++ nova/livemigration_test/UT/nova-manage.test.py | 351 ++++++++++++++++++ .../livemigration_test/UT/schedulerManager.test.py | 408 ++++++++++++++++++++ nova/livemigration_test/UT/testCase_UT.xls | Bin 0 -> 202752 bytes 14 files changed, 1883 insertions(+) create mode 100644 nova/livemigration_test/SI/picture.pptx create mode 100644 nova/livemigration_test/SI/testCase_SI.xls create mode 100644 nova/livemigration_test/SI/testParameterSheet_SI.xls create mode 100755 nova/livemigration_test/SI/utils/demo-firstboot.sh create mode 100755 nova/livemigration_test/SI/utils/demo-runInstance.sh create mode 100644 nova/livemigration_test/SI/utils/nova-manage.conf create mode 100644 nova/livemigration_test/SI/utils/nova.conf create mode 100755 nova/livemigration_test/SI/utils/nova.sh create mode 100755 nova/livemigration_test/SI/utils/nova.sh.compute create mode 100644 nova/livemigration_test/UT/computeManager.test.py create mode 100644 nova/livemigration_test/UT/libvirtConnection.test.py create mode 100644 nova/livemigration_test/UT/nova-manage.test.py create mode 100644 nova/livemigration_test/UT/schedulerManager.test.py create mode 100644 nova/livemigration_test/UT/testCase_UT.xls diff --git a/nova/livemigration_test/SI/picture.pptx b/nova/livemigration_test/SI/picture.pptx new file mode 100644 index 000000000..b47bec9b5 Binary files /dev/null and b/nova/livemigration_test/SI/picture.pptx differ diff --git a/nova/livemigration_test/SI/testCase_SI.xls b/nova/livemigration_test/SI/testCase_SI.xls new file mode 100644 index 000000000..65cf96fd7 Binary files /dev/null and b/nova/livemigration_test/SI/testCase_SI.xls differ diff --git a/nova/livemigration_test/SI/testParameterSheet_SI.xls b/nova/livemigration_test/SI/testParameterSheet_SI.xls new file mode 100644 index 000000000..400b43b43 Binary files /dev/null and b/nova/livemigration_test/SI/testParameterSheet_SI.xls differ diff --git a/nova/livemigration_test/SI/utils/demo-firstboot.sh b/nova/livemigration_test/SI/utils/demo-firstboot.sh new file mode 100755 index 000000000..3a6f7fb0b --- /dev/null +++ b/nova/livemigration_test/SI/utils/demo-firstboot.sh @@ -0,0 +1,39 @@ +#!/bin/bash + +DIR=/opt/nova-2010.1 + +# 1. 管理者ユーザを作成する +# nova-manage user admin ユーザ名 access-key secret-key +# +#$DIR/bin/nova-manage user admin admin admin admin + +# 2. プロジェクトを作成する +# nova-manage create project プロジェクト名 プロジェクトに属するユーザ名 +# +#$DIR/bin/nova-manage project create admin admin + +# 3. クラウドを使うための認証情報を生成する +# nova-manage project environment プロジェクト名 ユーザ名 認証情報を格納するファイル +# +#$DIR/bin/nova-manage project environment admin admin $DIR/novarc + +# 4. 認証情報の読み込み +. $DIR/novarc + +# 5. プロジェクト用仮想マシンネットワークの作成を行う +# nova-manage user admin ユーザ名 access-key secret-key +# +$DIR/bin/nova-manage network create 10.0.0.0/8 3 16 + +# 6. 初回ログインにはSSHの公開鍵認証が必要 +# +if [ "" == "`euca-describe-keypairs | grep testkey`" ]; then + euca-add-keypair testkey > testkey.pem +fi + +# 7. +for i in 172.19.0.134 172.19.0.135 172.19.0.136 172.19.0.137 ; do + sudo ip addr del $i dev eth0 2> /dev/null +done + + diff --git a/nova/livemigration_test/SI/utils/demo-runInstance.sh b/nova/livemigration_test/SI/utils/demo-runInstance.sh new file mode 100755 index 000000000..171291262 --- /dev/null +++ b/nova/livemigration_test/SI/utils/demo-runInstance.sh @@ -0,0 +1,57 @@ +#!/bin/bash + +DIR=/opt/nova-2010.1 + +function inc_assigned(){ + assigned=`expr $assigned + 1` +} + + +# 1. 認証情報の読み込み +. $DIR/novarc + +# 3. 仮想マシンの起動 +# +ret=`euca-run-instances -t m1.small -k testkey ami-centos` +#ret=`euca-run-instances -t m1.small -k testkey ami-tiny` + +# 4. 仮想マシン用IPの確保 +# 未登録なら登録しておく +registered=`euca-describe-addresses` +for ip in 172.19.0.134 172.19.0.135 172.19.0.136 172.19.0.137 ; do + + not_registered=`echo $registered | grep $ip` + if [ "" == "$not_registered" ]; then + echo "[INFO] registed $ip" + $DIR/bin/nova-manage floating create `hostname` $ip + fi +done + +# 5. IPの割当 +echo 0 > /tmp/demo-runinstance +euca-describe-addresses | grep -v reserved | while read line; do + # 割り当てられてないものを仮想マシンに割り当てる + ip=`echo $line | cut -d ' ' -f 2` + id=`echo $ret | cut -d ' ' -f 5` + if [ "" == "`echo $id | grep i- `" ] ; then + echo "[INFO] try again" $ret + break + fi + echo "[INFO] assigned to ipaddr($ip) to instance($id) " + euca-associate-address -i $id $ip + echo 1 > /tmp/demo-runinstance + break +done + +echo $assigned +if [ 0 -eq "`cat /tmp/demo-runinstance`" ] ; then + echo "[INFO] address is full." +fi +rm -rf /tmp/demo-runinstance + + +# 6. FWの設定 +euca-authorize -P tcp -p 22 default 2> /dev/null > /dev/null +euca-authorize -P tcp -p 80 default 2> /dev/null > /dev/null +euca-authorize -P tcp -p 5555 default 2> /dev/null > /dev/null + diff --git a/nova/livemigration_test/SI/utils/nova-manage.conf b/nova/livemigration_test/SI/utils/nova-manage.conf new file mode 100644 index 000000000..9f8a02b96 --- /dev/null +++ b/nova/livemigration_test/SI/utils/nova-manage.conf @@ -0,0 +1,18 @@ +--verbose +--nodaemon +--dhcpbridge_flagfile=/etc/nova/nova-manage.conf +--FAKE_subdomain=ec2 +--libvirt_type=qemu +--no_internet_conn=True +--public_netif=eth0 +--public_interface=eth0 + +--cc-host=172.19.0.131 +--routing_source_ip=172.19.0.131 +--sql_connection=mysql://root:nova@172.19.0.131/nova +--rabbit_host=172.19.0.131 +--redis_host=172.19.0.131 +--s3_host=172.19.0.131 +--auth_driver=nova.auth.ldapdriver.LdapDriver +--ldap_url=ldap://172.19.0.131 + diff --git a/nova/livemigration_test/SI/utils/nova.conf b/nova/livemigration_test/SI/utils/nova.conf new file mode 100644 index 000000000..c66bfbc53 --- /dev/null +++ b/nova/livemigration_test/SI/utils/nova.conf @@ -0,0 +1,10 @@ +--verbose +--nodaemon +--dhcpbridge_flagfile=/opt/nova-2010.4//bin/nova.conf +--network_manager=nova.network.manager.VlanManager +--cc_host=172.19.0.131 +--routing_source_ip=172.19.0.131 +--sql_connection=mysql://root:nova@localhost/nova +--auth_driver=nova.auth.ldapdriver.LdapDriver +--libvirt_type=qemu +--public_interface=eth0 diff --git a/nova/livemigration_test/SI/utils/nova.sh b/nova/livemigration_test/SI/utils/nova.sh new file mode 100755 index 000000000..b8e2e9f26 --- /dev/null +++ b/nova/livemigration_test/SI/utils/nova.sh @@ -0,0 +1,180 @@ +#!/usr/bin/env bash +DIR=`pwd` +CMD=$1 +SOURCE_BRANCH=lp:nova +if [ -n "$2" ]; then + SOURCE_BRANCH=$2 +fi +#DIRNAME=nova +DIRNAME="" +NOVA_DIR=$DIR/$DIRNAME +if [ -n "$3" ]; then + NOVA_DIR=$DIR/$3 +fi + +if [ ! -n "$HOST_IP" ]; then + # NOTE(vish): This will just get the first ip in the list, so if you + # have more than one eth device set up, this will fail, and + # you should explicitly set HOST_IP in your environment + HOST_IP=`ifconfig | grep -m 1 'inet addr:'| cut -d: -f2 | awk '{print $1}'` +fi + +#USE_MYSQL=${USE_MYSQL:-0} +USE_MYSQL=1 +MYSQL_PASS=${MYSQL_PASS:-nova} +TEST=${TEST:-0} +#USE_LDAP=${USE_LDAP:-0} +USE_LDAP=1 +LIBVIRT_TYPE=${LIBVIRT_TYPE:-qemu} +NET_MAN=${NET_MAN:-VlanManager} +# NOTE(vish): If you are using FlatDHCP on multiple hosts, set the interface +# below but make sure that the interface doesn't already have an +# ip or you risk breaking things. +# FLAT_INTERFACE=eth0 + +if [ "$USE_MYSQL" == 1 ]; then + SQL_CONN=mysql://root:$MYSQL_PASS@localhost/nova +else + SQL_CONN=sqlite:///$NOVA_DIR/nova.sqlite +fi + +if [ "$USE_LDAP" == 1 ]; then + AUTH=ldapdriver.LdapDriver +else + AUTH=dbdriver.DbDriver +fi + +mkdir -p /etc/nova +cat >$NOVA_DIR/bin/nova.conf << NOVA_CONF_EOF +--verbose +--nodaemon +--dhcpbridge_flagfile=$NOVA_DIR/bin/nova.conf +--network_manager=nova.network.manager.$NET_MAN +--cc_host=$HOST_IP +--routing_source_ip=$HOST_IP +--sql_connection=$SQL_CONN +--auth_driver=nova.auth.$AUTH +--libvirt_type=$LIBVIRT_TYPE +--public_interface=eth0 +NOVA_CONF_EOF + +if [ -n "$FLAT_INTERFACE" ]; then + echo "--flat_interface=$FLAT_INTERFACE" >>$NOVA_DIR/bin/nova.conf +fi + +if [ "$CMD" == "branch" ]; then + sudo apt-get install -y bzr + rm -rf $NOVA_DIR + bzr branch $SOURCE_BRANCH $NOVA_DIR + cd $NOVA_DIR + mkdir -p $NOVA_DIR/instances + mkdir -p $NOVA_DIR/networks +fi + +# You should only have to run this once +if [ "$CMD" == "install" ]; then + sudo apt-get install -y python-software-properties + sudo add-apt-repository ppa:nova-core/ppa + sudo apt-get update + sudo apt-get install -y dnsmasq kpartx kvm gawk iptables ebtables + sudo apt-get install -y user-mode-linux kvm libvirt-bin + sudo apt-get install -y screen euca2ools vlan curl rabbitmq-server + sudo apt-get install -y lvm2 iscsitarget open-iscsi + echo "ISCSITARGET_ENABLE=true" | sudo tee /etc/default/iscsitarget + sudo /etc/init.d/iscsitarget restart + sudo modprobe kvm + sudo /etc/init.d/libvirt-bin restart + sudo apt-get install -y python-twisted python-sqlalchemy python-mox python-greenlet python-carrot + sudo apt-get install -y python-daemon python-eventlet python-gflags python-tornado python-ipy + sudo apt-get install -y python-libvirt python-libxml2 python-routes + if [ "$USE_MYSQL" == 1 ]; then + cat </etc/nova/nova-manage.conf << NOVA_CONF_EOF +--verbose +--nodaemon +--dhcpbridge_flagfile=/etc/nova/nova-manage.conf +--FAKE_subdomain=ec2 +--libvirt_type=qemu +--no_internet_conn=True +--public_netif=eth0 +--public_interface=eth0 + +--cc-host=$HOST_IP +--routing_source_ip=$HOST_IP +--sql_connection=mysql://root:nova@$HOST_IP/nova +--rabbit_host=$HOST_IP +--redis_host=$HOST_IP +--s3_host=$HOST_IP +--auth_driver=nova.auth.ldapdriver.LdapDriver +--ldap_url=ldap://$HOST_IP + +NOVA_CONF_EOF + +$DIR/bin/nova-compute --flagfile=/etc/nova/nova-manage.conf + diff --git a/nova/livemigration_test/UT/computeManager.test.py b/nova/livemigration_test/UT/computeManager.test.py new file mode 100644 index 000000000..69ee876d1 --- /dev/null +++ b/nova/livemigration_test/UT/computeManager.test.py @@ -0,0 +1,411 @@ +#!/usr/bin/python +# -*- coding: UTF-8 -*- + + +import sys +import os +import unittest +import commands +import re +import logging + +from mock import Mock +import twisted + +# getting /nova-inst-dir +NOVA_DIR = os.path.abspath(sys.argv[0]) +for i in range(4): + NOVA_DIR = os.path.dirname(NOVA_DIR) + +try: + print + print 'checking %s/bin/nova-manage exists, set the NOVA_DIR properly..' \ + % NOVA_DIR + print + + sys.path.append(NOVA_DIR) + + from nova.compute.manager import ComputeManager + from nova.virt.libvirt_conn import LibvirtConnection + + from nova import context + from nova import db + from nova import exception + from nova import flags + from nova import quota + from nova import utils + from nova.auth import manager + from nova.cloudpipe import pipelib + from nova import rpc + from nova.api.ec2 import cloud + from nova.compute import power_state + + from nova.db.sqlalchemy.models import * + + +except: + print 'set correct NOVA_DIR in this script. ' + raise + + +class tmpStdout: + def __init__(self): + self.buffer = "" + + def write(self, arg): + self.buffer += arg + + def writelines(self, arg): + self.buffer += arg + + def flush(self): + print 'flush' + self.buffer = '' + + +class tmpStderr(tmpStdout): + def write(self, arg): + self.buffer += arg + + def flush(self): + pass + + def realFlush(self): + self.buffer = '' + +dummyCallReturnValue={ 0:True } +dummyCallCount=0 +def dummyCall(context, topic, method): + global dummyCallReturnValue, dummyCallCount + if dummyCallCount in dummyCallReturnValue.keys() : + ret = dummyCallReturnValue[ dummyCallCount ] + dummyCallCount += 1 + return ret + else : + dummyCallCount += 1 + return False + + +class ComputeTestFunctions(unittest.TestCase): + + stdout = None + stdoutBak = None + stderr = None + stderrBak = None + manager = None + + # 共通の初期化処理 + def setUp(self): + """common init method. """ + + #if self.stdout is None: + # self.__class__.stdout = tmpStdout() + #self.stdoutBak = sys.stdout + #sys.stdout = self.stdout + if self.stderr is None: + self.__class__.stderr = tmpStderr() + self.stderrBak = sys.stderr + sys.stderr = self.stderr + + self.host = 'openstack2-api' + if self.manager is None: + self.__class__.manager = ComputeManager(host=self.host) + + self.setTestData() + self.setMocks() + + def setTestData(self): + + self.host1 = Host() + for key, val in [('name', 'host1'), ('cpu', 5), + ('memory_mb', 20480), ('hdd_gb', 876)]: + self.host1.__setitem__(key, val) + + self.host2 = Host() + for key, val in [('name', 'host2'), ('cpu', 5), + ('memory_mb', 20480), ('hdd_gb', 876)]: + self.host2.__setitem__(key, val) + + self.instance1 = Instance() + for key, val in [('id', 1), ('host', 'host1'), + ('hostname', 'i-12345'), ('state', power_state.RUNNING), + ('project_id', 'testPJ'), ('vcpus', 3), ('memory_mb', 1024), + ('hdd_gb', 5), ('internal_id', 12345)]: + self.instance1.__setitem__(key, val) + + self.instance2 = Instance() + for key, val in [('id', 2), ('host', 'host1'), + ('hostname', 'i-12345'), ('state', power_state.RUNNING), + ('project_id', 'testPJ'), ('vcpus', 3), ('memory_mb', 1024), + ('hdd_gb', 5)]: + self.instance2.__setitem__(key, val) + + self.fixed_ip1 = FixedIp() + for key, val in [('id', 1), ('address', '1.1.1.1'), + ('network_id', '1'), ('instance_id', 1)]: + self.fixed_ip1.__setitem__(key, val) + + self.vol1 = Volume() + for key, val in [('id', 1), ('ec2_id', 'vol-qijjuc7e'), + ('availability_zone', 'nova'), ('host', 'host1')]: + self.vol1.__setitem__(key, val) + + self.vol2 = Volume() + for key, val in [('id', 2), ('ec2_id', 'vol-qi22222'), + ('availability_zone', 'nova'), ('host', 'host1')]: + self.vol2.__setitem__(key, val) + + self.secgrp1 = Volume() + for key, val in [('id', 1), ('ec2_id', 'default')]: + self.secgrp1.__setitem__(key, val) + + self.secgrp2 = Volume() + for key, val in [('id', 2), ('ec2_id', 'def2')]: + self.secgrp2.__setitem__(key, val) + + self.netref1 = Network() + + def setMocks(self): + + # mocks for pre_live_migration + self.ctxt = context.get_admin_context() + db.instance_get = Mock(return_value=self.instance1) + db.volume_get_by_ec2_id = Mock(return_value=[self.vol1, self.vol2]) + db.volume_get_shelf_and_blade = Mock(return_value=(3, 4)) + db.instance_get_fixed_address = Mock(return_value=self.fixed_ip1) + db.security_group_get_by_instance \ + = Mock(return_value=[self.secgrp1, self.secgrp2]) + self.manager.driver.setup_nwfilters_for_instance \ + = Mock(return_value=None) + self.manager.driver.nwfilter_for_instance_exists = Mock(return_value=None) + self.manager.network_manager.setup_compute_network \ + = Mock(return_value=None) + # mocks for live_migration_ + rpc.call = Mock(return_value=True) + db.instance_set_state = Mock(return_value=True) + + # ---> test for nova.compute.manager.pre_live_migration() + def test01(self): + """01: NotFound error occurs on finding instance on DB. """ + + db.instance_get = Mock(side_effect=exception.NotFound('ERR')) + + self.assertRaises(exception.NotFound, + self.manager.pre_live_migration, + self.ctxt, + 'dummy_ec2_id', + 'host2') + + def test02(self): + """02: NotAuthrized occurs on finding volume on DB. """ + + db.volume_get_by_ec2_id \ + = Mock(side_effect=exception.NotAuthorized('ERR')) + + self.assertRaises(exception.NotAuthorized, + self.manager.pre_live_migration, + self.ctxt, + 'dummy_ec2_id', + 'host2') + + def test03(self): + """03: Unexpected exception occurs on finding volume on DB. """ + + db.volume_get_by_ec2_id = Mock(side_effect=TypeError('ERR')) + + self.assertRaises(TypeError, + self.manager.pre_live_migration, + self.ctxt, + 'dummy_ec2_id', + 'host2') + + def test04(self): + """04: no volume and fixed ip found on DB, """ + + db.volume_get_by_ec2_id = Mock(side_effect=exception.NotFound('ERR')) + db.instance_get_fixed_address = Mock(return_value=None) + + self.assertRaises(rpc.RemoteError, + self.manager.pre_live_migration, + self.ctxt, + 'dummy_ec2_id', + 'host2') + + c1 = (0 <= sys.stderr.buffer.find('has no volume')) + + self.assertEqual(c1, True) + + def test05(self): + """05: volume found and no fixed_ip found on DB. """ + + db.instance_get_fixed_address \ + = Mock(side_effect=exception.NotFound('ERR')) + + self.assertRaises(exception.NotFound, + self.manager.pre_live_migration, + self.ctxt, + 'dummy_ec2_id', + 'host2') + + def test06(self): + """06: self.driver.setup_nwfilters_for_instance causes NotFound. """ + self.manager.driver.setup_nwfilters_for_instance \ + = Mock(side_effect=exception.NotFound("ERR")) + + self.assertRaises(exception.NotFound, + self.manager.pre_live_migration, + self.ctxt, + 'dummy_ec2_id', + 'host2') + + def test07(self): + """07: self.network_manager.setup_compute_network causes ProcessExecutionError. """ + self.manager.network_manager.setup_compute_network \ + = Mock(side_effect=exception.ProcessExecutionError("ERR")) + + self.assertRaises(exception.ProcessExecutionError, + self.manager.pre_live_migration, + self.ctxt, + 'dummy_ec2_id', + 'host2') + + + def test08(self): + """08: self.manager.network_manager.setup_compute_network + exception.NotFound. """ + self.manager.network_manager.setup_compute_network \ + = Mock(side_effect=exception.NotFound("ERR")) + + self.assertRaises(exception.NotFound, + self.manager.pre_live_migration, + self.ctxt, + 'dummy_ec2_id', + 'host2') + + # those 2 cases are omitted : + # self.driver.setup_nwfilters_for_instance causes + # twisted.python.failure.Failure. + # self.driver.refresh_security_group causes twisted.python.failure.Failure. + # + # twisted.python.failure.Failure can not be used with assertRaises, + # it doesnt have __call___ + # + + def test09(self): + """09: volume/fixed_ip found on DB, all procedure finish + successfully.. """ + + result = self.manager.pre_live_migration(self.ctxt, 'dummy_ec2_id', + 'host2') + self.assertEqual(result, True) + + # ---> test for nova.compute.manager.live_migration() + + def test10(self): + """10: rpc.call(pre_live_migration returns Error(Not None). """ + rpc.call = Mock(side_effect=exception.NotFound("ERR")) + + self.assertRaises(exception.NotFound, + self.manager.live_migration, + self.ctxt, + 'dummy_ec2_id', + 'host2') + + def test11(self): + """11: if rpc.call returns rpc.RemoteError. """ + + rpc.call = Mock(return_value=rpc.RemoteError(None, None, None)) + db.instance_set_state = Mock(return_value=True) + result = self.manager.live_migration(self.ctxt, 'dummy_ec2_id', + 'host2') + c1 = (None == result) + c2 = (0 <= sys.stderr.buffer.find('err at')) + self.assertEqual(c1 and c2, True) + + def test12(self): + """12: if rpc.call returns rpc.RemoteError and instance_set_state + also ends up err. (then , unexpected err occurs, in this case + TypeError) + """ + rpc.call = Mock(return_value=rpc.RemoteError(None, None, None)) + db.instance_set_state = Mock(side_effect=TypeError("ERR")) + self.assertRaises(TypeError, + self.manager.live_migration, + self.ctxt, + 'dummy_ec2_id', + 'host2') + + def test13(self): + """13: if wait for pre_live_migration, but timeout. """ + rpc.call = dummyCall + + db.instance_get = Mock(return_value=self.instance1) + + result = self.manager.live_migration(self.ctxt, 'dummy_ec2_id', + 'host2') + c1 = (None == result) + c2 = (0 <= sys.stderr.buffer.find('Timeout for')) + self.assertEqual(c1 and c2, True) + + def test14(self): + """14: if db_instance_get issues NotFound. + """ + rpc.call = Mock(return_value=True) + db.instance_get = Mock(side_effect=exception.NotFound("ERR")) + self.assertRaises(exception.NotFound, + self.manager.live_migration, + self.ctxt, + 'dummy_ec2_id', + 'host2') + + def test15(self): + """15: if rpc.call returns True, and instance_get() cause other + exception. (Unexpected case - b/c it already checked by + nova-manage) + """ + rpc.call = Mock(return_value=True) + db.instance_get = Mock(side_effect=TypeError("ERR")) + + self.assertRaises(TypeError, + self.manager.live_migration, + self.ctxt, + 'dummy_ec2_id', + 'host2') + + def test16(self): + """16: if rpc.call returns True, and live_migration issues + ProcessExecutionError. """ + rpc.call = Mock(return_value=True) + db.instance_get = Mock(return_value=self.instance1) + ret = self.manager.driver.live_migration \ + = Mock(side_effect=utils.ProcessExecutionError("ERR")) + + self.assertRaises(utils.ProcessExecutionError, + self.manager.live_migration, + self.ctxt, + 'dummy_ec2_id', + 'host2') + + def test17(self): + """17: everything goes well. """ + self.manager.driver.live_migration = Mock(return_value=True) + ret = self.manager.live_migration(self.ctxt, 'i-12345', 'host1') + self.assertEqual(True, True) + + def tearDown(self): + """common terminating method. """ + self.stderr.realFlush() + sys.stderr = self.stderrBak + #sys.stdout = self.stdoutBak + +if __name__ == '__main__': + logging.getLogger().setLevel(logging.DEBUG) + #unittest.main() + + suite = unittest.TestLoader().loadTestsFromTestCase(ComputeTestFunctions) + unittest.TextTestRunner(verbosity=2).run(suite) + + #suite = unittest.TestSuite() + #suite.addTest(ComputeTestFunctions("test15")) + #suite.addTest(ComputeTestFunctions("test16")) + #unittest.TextTestRunner(verbosity=2).run(suite) diff --git a/nova/livemigration_test/UT/libvirtConnection.test.py b/nova/livemigration_test/UT/libvirtConnection.test.py new file mode 100644 index 000000000..5dfe8702c --- /dev/null +++ b/nova/livemigration_test/UT/libvirtConnection.test.py @@ -0,0 +1,372 @@ +#!/usr/bin/python +# -*- coding: UTF-8 -*- + + +import sys +import os +import unittest +import commands +import re +import logging +import libvirt + +from mock import Mock +import twisted + +# getting /nova-inst-dir +NOVA_DIR = os.path.abspath(sys.argv[0]) +for i in range(4): + NOVA_DIR = os.path.dirname(NOVA_DIR) + + +try : + print + print 'checking %s/bin/nova-manage exists, set the NOVA_DIR properly..' % NOVA_DIR + print + + sys.path.append(NOVA_DIR) + + from nova.compute.manager import ComputeManager + from nova.virt import libvirt_conn + + from nova import context + from nova import db + from nova import exception + from nova import flags + from nova import quota + from nova import utils + from nova import process + from nova.auth import manager + from nova.cloudpipe import pipelib + from nova import rpc + from nova.api.ec2 import cloud + from nova.compute import power_state + + from nova.db.sqlalchemy.models import * + + +except: + print 'set correct NOVA_DIR in this script. ' + raise + + +class tmpStdout: + def __init__(self): + self.buffer = "" + def write(self,arg): + self.buffer += arg + def writelines(self, arg): + self.buffer += arg + def flush(self): + print 'flush' + self.buffer = '' + +class tmpStderr(tmpStdout): + def write(self,arg): + self.buffer += arg + def flush(self): + pass + def realFlush(self): + self.buffer = '' + +class DummyLibvirtConn(object): + nwfilterLookupByName = None + def __init__(self): + pass + + +class LibvirtConnectionTestFunctions(unittest.TestCase): + + stdout = None + stdoutBak = None + stderr = None + stderrBak = None + manager = None + + # 共通の初期化処理 + def setUp(self): + """common init method. """ + + #if self.stdout is None: + # self.__class__.stdout = tmpStdout() + #self.stdoutBak = sys.stdout + #sys.stdout = self.stdout + if self.stderr is None: + self.__class__.stderr = tmpStderr() + self.stderrBak = sys.stderr + sys.stderr = self.stderr + + self.host = 'openstack2-api' + if self.manager is None: + self.__class__.manager = libvirt_conn.get_connection(False) + + self.setTestData() + self.setMocks() + + def setTestData(self): + + self.host1 = Host() + for key, val in [ ('name', 'host1'), ('cpu', 5), ('memory_mb', 20480), ('hdd_gb', 876) ]: + self.host1.__setitem__(key, val) + + self.instance1 = Instance() + for key, val in [ ('id', 1), ('host', 'host1'), ('hostname', 'i-12345'), + ('state', power_state.RUNNING), ('project_id', 'testPJ'), + ('vcpus', 3), ('memory_mb', 1024), ('hdd_gb', 5), ('internal_id',12345) ]: + self.instance1.__setitem__(key, val) + + + self.instance2 = Instance() + for key, val in [ ('id', 2), ('host', 'host1'), ('hostname', 'i-12345'), + ('state', power_state.RUNNING), ('project_id', 'testPJ'), + ('vcpus', 3), ('memory_mb', 1024), ('hdd_gb', 5) ]: + self.instance2.__setitem__(key, val) + + + self.fixed_ip1 = FixedIp() + for key, val in [ ('id', 1), ('address', '1.1.1.1'), ('network_id', '1'), + ('instance_id', 1)]: + self.fixed_ip1.__setitem__(key, val) + + self.floating_ip1 = FloatingIp() + for key, val in [ ('id', 1), ('address', '1.1.1.200') ]: + self.floating_ip1.__setitem__(key, val) + + self.netref1 = Network() + for key, val in [ ('id', 1) ]: + self.netref1.__setitem__(key, val) + + + def setMocks(self): + + self.ctxt = context.get_admin_context() + db.instance_get_fixed_address = Mock(return_value = '1.1.1.1') + db.fixed_ip_update = Mock(return_value = None) + db.fixed_ip_get_network = Mock(return_value = self.netref1) + db.network_update = Mock(return_value = None) + db.instance_get_floating_address = Mock(return_value = '1.1.1.200') + db.floating_ip_get_by_address = Mock(return_value = self.floating_ip1) + db.floating_ip_update = Mock(return_value = None) + db.instance_update = Mock(return_value = None) + + + # ---> test for nova.virt.libvirt_conn.nwfilter_for_instance_exists() + + def test01(self): + """01: libvirt.libvirtError occurs. """ + + self.manager._wrapped_conn = DummyLibvirtConn() + self.manager._test_connection = Mock(return_value=True) + self.manager._conn.nwfilterLookupByName = \ + Mock(side_effect=libvirt.libvirtError("ERR")) + ret = self.manager.nwfilter_for_instance_exists(self.instance1) + self.assertEqual(ret, False) + + def test02(self): + """02: libvirt.libvirtError not occurs. """ + + self.manager._wrapped_conn = DummyLibvirtConn() + self.manager._test_connection = Mock(return_value=True) + self.manager._conn.nwfilterLookupByName = \ + Mock(return_value=True) + ret = self.manager.nwfilter_for_instance_exists(self.instance1) + self.assertEqual(ret, True) + + # ---> test for nova.virt.libvirt_conn.live_migraiton() + + def test03(self): + """03: Unexpected exception occurs on finding volume on DB. """ + + utils.execute = Mock( side_effect=process.ProcessExecutionError('ERR') ) + + self.assertRaises(process.ProcessExecutionError, + self.manager.live_migration, + self.instance1, + 'host2') + + # ---> other case cannot be tested because live_migraiton + # is synchronized/asynchronized method are mixed together + + + # ---> test for nova.virt.libvirt_conn._post_live_migraiton + + def test04(self): + """04: instance_ref is not nova.db.sqlalchemy.models.Instances""" + + self.assertRaises(TypeError, + self.manager._post_live_migration, + "dummy string", + 'host2') + + def test05(self): + """05: db.instance_get_fixed_address return None""" + + db.instance_get_fixed_address = Mock( return_value=None ) + ret = self.manager._post_live_migration(self.instance1, 'host1') + c1 = (ret == None) + c2 = (0 <= sys.stderr.buffer.find('fixed_ip is not found')) + self.assertEqual(c1 and c2, True) + + def test06(self): + """06: db.instance_get_fixed_address raises NotFound""" + + db.instance_get_fixed_address = Mock( side_effect=exception.NotFound('ERR') ) + self.assertRaises(exception.NotFound, + self.manager._post_live_migration, + self.instance1, + 'host2') + + def test07(self): + """07: db.instance_get_fixed_address raises Unknown exception""" + + db.instance_get_fixed_address = Mock( side_effect=TypeError('ERR') ) + self.assertRaises(TypeError, + self.manager._post_live_migration, + self.instance1, + 'host1') + + def test08(self): + """08: db.fixed_ip_update return NotFound. """ + + db.fixed_ip_update = Mock( side_effect=exception.NotFound('ERR') ) + self.assertRaises(exception.NotFound, + self.manager._post_live_migration, + self.instance1, + 'host1') + + def test09(self): + """09: db.fixed_ip_update return NotAuthorized. """ + db.fixed_ip_update = Mock( side_effect=exception.NotAuthorized('ERR') ) + self.assertRaises(exception.NotAuthorized, + self.manager._post_live_migration, + self.instance1, + 'host1') + + def test10(self): + """10: db.fixed_ip_update return Unknown exception. """ + db.fixed_ip_update = Mock( side_effect=TypeError('ERR') ) + self.assertRaises(TypeError, + self.manager._post_live_migration, + self.instance1, + 'host1') + + def test11(self): + """11: db.fixed_ip_get_network causes NotFound. """ + + db.fixed_ip_get_network = Mock( side_effect=exception.NotFound('ERR') ) + self.assertRaises(exception.NotFound, + self.manager._post_live_migration, + self.instance1, + 'host1') + + # not tested db.fixed_ip_get_network raises NotAuthorized + # because same test has been done at previous test. + + def test12(self): + """12: db.fixed_ip_get_network causes Unknown exception. """ + + db.fixed_ip_get_network = Mock( side_effect=TypeError('ERR') ) + self.assertRaises(TypeError, + self.manager._post_live_migration, + self.instance1, + 'host1') + + def test13(self): + """13: db.network_update raises Unknown exception. """ + db.network_update = Mock( side_effect=TypeError('ERR') ) + self.assertRaises(TypeError, + self.manager._post_live_migration, + self.instance1, + 'host1') + + def test14(self): + """14: db.instance_get_floating_address raises NotFound. """ + db.instance_get_floating_address = Mock(side_effect=exception.NotFound("ERR")) + ret = self.manager._post_live_migration(self.instance1, 'host1') + c1 = (ret == None) + c2 = (0 <= sys.stderr.buffer.find('doesnt have floating_ip')) + self.assertEqual(c1 and c2, True) + + + def test15(self): + """15: db.instance_get_floating_address returns None. """ + + db.instance_get_floating_address = Mock( return_value=None ) + ret = self.manager._post_live_migration(self.instance1, 'host1') + c1 = (ret == None) + c2 = (0 <= sys.stderr.buffer.find('floating_ip is not found')) + self.assertEqual(c1 and c2, True) + + def test16(self): + """16: db.instance_get_floating_address raises NotFound. """ + + db.instance_get_floating_address = Mock(side_effect=exception.NotFound("ERR")) + ret = self.manager._post_live_migration(self.instance1, 'host1') + c1 = (ret == None) + c2 = (0 <= sys.stderr.buffer.find('doesnt have floating_ip')) + self.assertEqual(c1 and c2, True) + + def test17(self): + """17: db.instance_get_floating_address raises Unknown exception. """ + db.instance_get_floating_address = Mock(side_effect=TypeError("ERR")) + ret = self.manager._post_live_migration(self.instance1, 'host1') + c1 = (ret == None) + c2 = (0 <= sys.stderr.buffer.find('Live migration: Unexpected error')) + self.assertEqual(c1 and c2, True) + + + def test18(self): + """18: db.floating_ip_get_by_address raises NotFound """ + + db.floating_ip_get_by_address = Mock(side_effect=exception.NotFound("ERR")) + ret = self.manager._post_live_migration(self.instance1, 'host1') + c1 = (ret == None) + c2 = (0 <= sys.stderr.buffer.find('doesnt have floating_ip')) + self.assertEqual(c1 and c2, True) + + def test19(self): + """19: db.floating_ip_get_by_address raises Unknown exception. """ + db.floating_ip_get_by_address = Mock(side_effect=TypeError("ERR")) + ret = self.manager._post_live_migration(self.instance1, 'host1') + c1 = (ret == None) + c2 = (0 <= sys.stderr.buffer.find('Live migration: Unexpected error')) + self.assertEqual(c1 and c2, True) + + + def test20(self): + """20: db.floating_ip_update raises Unknown exception. + """ + db.floating_ip_update = Mock(side_effect=TypeError("ERR")) + ret = self.manager._post_live_migration(self.instance1, 'host1') + c1 = (ret == None) + c2 = (0 <= sys.stderr.buffer.find('Live migration: Unexpected error')) + self.assertEqual(c1 and c2, True) + + def test21(self): + """21: db.instance_update raises unknown exception. """ + + db.instance_update = Mock(side_effect=TypeError("ERR")) + self.assertRaises(TypeError, + self.manager._post_live_migration, + self.instance1, + 'host1') + + def tearDown(self): + """common terminating method. """ + self.stderr.realFlush() + sys.stderr = self.stderrBak + #sys.stdout = self.stdoutBak + +if __name__ == '__main__': + logging.getLogger().setLevel(logging.DEBUG) + #unittest.main() + + suite = unittest.TestLoader().loadTestsFromTestCase(LibvirtConnectionTestFunctions) + unittest.TextTestRunner(verbosity=2).run(suite) + + #suite = unittest.TestSuite() + #suite.addTest(LibvirtConnectionTestFunctions("test14")) + #suite.addTest(LibvirtConnectionTestFunctions("test16")) + #unittest.TextTestRunner(verbosity=2).run(suite) + + diff --git a/nova/livemigration_test/UT/nova-manage.test.py b/nova/livemigration_test/UT/nova-manage.test.py new file mode 100644 index 000000000..f1653d21a --- /dev/null +++ b/nova/livemigration_test/UT/nova-manage.test.py @@ -0,0 +1,351 @@ +#!/usr/bin/python +# -*- coding: UTF-8 -*- + +NOVA_DIR='/opt/nova-2010.4' + +import sys +import os +import unittest +import commands +import re + +from mock import Mock + +# getting /nova-inst-dir +NOVA_DIR = os.path.abspath(sys.argv[0]) +for i in range(4): + NOVA_DIR = os.path.dirname(NOVA_DIR) + + +try : + print + print 'Testing %s/bin/nova-manage, set the NOVA_DIR properly..' % NOVA_DIR + print + + sys.path.append(NOVA_DIR) + + from nova import context + from nova import db + from nova import exception + from nova import flags + from nova import quota + from nova import utils + from nova.auth import manager + from nova.cloudpipe import pipelib + from nova import rpc + from nova.api.ec2 import cloud + from nova.compute import power_state + + from nova.db.sqlalchemy.models import * + + +except: + print 'set correct NOVA_DIR in this script. ' + raise + + +class tmpStdout: + def __init__(self): + self.buffer = "" + def write(self,arg): + self.buffer += arg + def flush(self): + self.buffer = '' + +class tmpStderr(tmpStdout): + def write(self, arg): + self.buffer += arg + def flush(self): + pass + def realFlush(self): + self.buffer = '' + + +class NovaManageTestFunctions(unittest.TestCase): + + stdout = None + stdoutBak = None + stderr = None + stderrBak = None + + hostCmds = None + + # 共通の初期化処理 + def setUp(self): + """common init method. """ + + commands.getstatusoutput('cp -f %s/bin/nova-manage %s' % ( NOVA_DIR, self.getNovaManageCopyPath() )) + commands.getstatusoutput('touch %s' % self.getInitpyPath() ) + try : + import bin.novamanagetest + except: + print 'Fail to import nova-manage . check bin/nova-manage exists' + raise + + # replace stdout for checking nova-manage output + if self.stdout is None : + self.__class__.stdout = tmpStdout() + self.stdoutBak = sys.stdout + sys.stdout = self.stdout + + # replace stderr for checking nova-manage output + if self.stderr is None: + self.__class__.stderr = tmpStderr() + self.stderrBak = sys.stderr + sys.stderr = self.stderr + + # prepare test data + self.setTestData() + + + def setTestData(self): + import bin.novamanagetest + + if self.hostCmds is None : + self.__class__.hostCmds = bin.novamanagetest.HostCommands() + self.instanceCmds = bin.novamanagetest.InstanceCommands() + + self.host1 = Host() + self.host1.__setitem__('name', 'host1') + + self.host2 = Host() + self.host2.__setitem__('name', 'host2') + + self.instance1 = Instance() + self.instance1.__setitem__('id', 1) + self.instance1.__setitem__('host', 'host1') + self.instance1.__setitem__('hostname', 'i-12345') + self.instance1.__setitem__('state', power_state.NOSTATE) + self.instance1.__setitem__('state_description', 'running') + + self.instance2 = Instance() + self.instance2.__setitem__('id', 2) + self.instance2.__setitem__('host', 'host1') + self.instance2.__setitem__('hostname', 'i-12345') + self.instance2.__setitem__('state', power_state.RUNNING) + self.instance2.__setitem__('state_description', 'pending') + + self.instance3 = Instance() + self.instance3.__setitem__('id', 3) + self.instance3.__setitem__('host', 'host1') + self.instance3.__setitem__('hostname', 'i-12345') + self.instance3.__setitem__('state', power_state.RUNNING) + self.instance3.__setitem__('state_description', 'running') + + db.host_get_all = Mock(return_value=[self.host1, self.host2]) + + def getInitpyPath(self): + return '%s/bin/__init__.py' % NOVA_DIR + + def getNovaManageCopyPath(self): + return '%s/bin/novamanagetest.py' % NOVA_DIR + + # -----> Test for nova-manage host list + + def test01(self): + """01: Got some host lists. """ + + self.hostCmds.list() + + c1 = (2 == self.stdout.buffer.count('\n')) + c2 = (0 <= self.stdout.buffer.find('host1')) + c3 = (0 <= self.stdout.buffer.find('host2')) + self.assertEqual(c1 and c2 and c3, True) + + def test02(self): + """02: Got empty lsit. """ + + db.host_get_all = Mock(return_value=[]) + self.hostCmds.list() + + # result should be empty + c = (0 == len(self.stdout.buffer) ) + self.assertEqual(c, True) + + def test03(self): + """03: Got notFound """ + + db.host_get_all = Mock(side_effect=exception.NotFound("ERR")) + self.assertRaises(exception.NotFound, self.hostCmds.list) + + # --------> Test For nova-manage host show + + def test04(self): + """04: args are not enough(nova-manage host show) """ + self.assertRaises(TypeError, self.hostCmds.show ) + + + def test05(self): + """05: nova-manage host show not-registered-host, and got an error""" + + rpc.call = Mock(return_value={'ret' : False, 'msg': 'ERR'} ) + self.hostCmds.show('host1') + self.assertEqual( self.stdout.buffer[:3]=='ERR', True ) + + + def test06(self): + """06: nova-manage host show registerd-host, and no project uses the host""" + + dic = {'ret': True, + 'phy_resource': {'vcpus':1, 'memory_mb':2, 'local_gb':3}, + 'usage': {}} + + rpc.call = Mock(return_value=dic ) + self.hostCmds.show('host1') + + # result should be : + # HOST PROJECT cpu mem(mb) disk(gb) + # host1 1 2 3 + line = self.stdout.buffer.split('\n')[1] + line = re.compile('\t+').sub(' ', line).strip() + c1 = ( 'host1 1 2 3' == line ) + c2 = ( self.stdout.buffer.count('\n') == 2 ) + + self.assertEqual( c1 and c2, True ) + + def test07(self): + """07: nova-manage host show registerd-host, + and some projects use the host + """ + dic = {'ret': True, + 'phy_resource': {'vcpus':1, 'memory_mb':2, 'local_gb':3}, + 'usage': {'p1': {'vcpus':1, 'memory_mb':2, 'local_gb':3}, + 'p2': {'vcpus':1, 'memory_mb':2, 'local_gb':3} }} + + rpc.call = Mock(return_value=dic ) + self.hostCmds.show('host1') + + # result should be : + # HOST PROJECT cpu mem(mb) disk(gb) + # host1 1 2 3 + # host1 p1 1 2 3 + # host1 p2 4 5 6 + line = self.stdout.buffer.split('\n')[1] + ret = re.compile('\t+').sub(' ', line).strip() + c1 = ( 'host1 1 2 3' == ret ) + + line = self.stdout.buffer.split('\n')[2] + line = re.compile('\t+').sub(' ', line).strip() + c2 = ( 'host1 p1 1 2 3' == line ) or ( 'host1 p2 1 2 3' == line ) + + line = self.stdout.buffer.split('\n')[3] + ret = re.compile('\t+').sub(' ', line).strip() + c3 = ( 'host1 p1 1 2 3' == ret ) or ( 'host1 p2 1 2 3' == ret ) + + self.assertEqual( c1 and c2 and c3, True ) + + def test08(self): + """08: nova-manage host show registerd-host, and rpc.call returns None + (unexpected error) + """ + rpc.call = Mock(return_value=None ) + self.hostCmds.show('host1') + c1 = ( 0 <= self.stdout.buffer.find('Unexpected error') ) + self.assertEqual( c1, True ) + + # ----------> Test for bin/nova-manage instance live_migration + + def test09(self): + """09: arguments are not enough(nova-manage instances live_migration) + """ + self.assertRaises(TypeError, self.instanceCmds.live_migration ) + + def test10(self): + """10: arguments are not enough(nova-manage instances live_migration ec2_id) + """ + self.assertRaises(TypeError, self.instanceCmds.live_migration, 'i-xxx' ) + + def test11(self): + """11: nova-manage instances live_migration ec2_id host, + where hostname is invalid + """ + db.host_get_by_name = Mock( side_effect=exception.NotFound('ERR') ) + self.assertRaises(exception.NotFound, self.instanceCmds.live_migration, 'i-xxx', 'host1' ) + + def test12(self): + """12: nova-manage instances live_migration ec2_id(invalid id) host""" + + db.host_get_by_name = Mock(return_value = self.host1) + db.instance_get_by_internal_id = Mock( side_effect=exception.NotFound('ERR') ) + + self.assertRaises(exception.NotFound, self.instanceCmds.live_migration, 'i-xxx', 'host1' ) + + def test13(self): + """13: nova-manage instances live_migration ec2_id host, + but instance specifed by ec2 id is not running (state is not power_state.RUNNING) + """ + db.host_get_by_name = Mock(return_value = self.host1) + db.instance_get_by_internal_id = Mock( return_value = self.instance1 ) + try : + self.instanceCmds.live_migration('i-12345', 'host1') + except exception.Invalid, e: + c1 = (0 < e.message.find('is not running') ) + self.assertTrue(c1, True) + return False + + + def test14(self): + """14: nova-manage instances live_migration ec2_id host, + but instance specifed by ec2 id is not running (state_description is not running) + """ + db.host_get_by_name = Mock(return_value = self.host2) + db.instance_get_by_internal_id = Mock( return_value = self.instance1 ) + try : + self.instanceCmds.live_migration('i-12345', 'host2') + except exception.Invalid, e: + c1 = (0 < e.message.find('is not running') ) + self.assertTrue(c1, True) + return False + + def test15(self): + """15: nova-manage instances live_migration ec2_id host, + but instance is running at the same host specifed above, so err should be occured. + """ + db.host_get_by_name = Mock(return_value = self.host1) + db.instance_get_by_internal_id = Mock( return_value = self.instance3 ) + try : + self.instanceCmds.live_migration('i-12345', 'host1') + except exception.Invalid, e: + c1 = ( 0 <= e.message.find('is running now') ) + self.assertTrue(c1, True) + return False + + + def test16(self): + """16: nova-manage instances live_migration ec2_id host, + rpc.call raises RemoteError because destination doesnt have enough resource. + """ + db.host_get_by_name = Mock(return_value = self.host1) + db.instance_get_by_internal_id = Mock( return_value = self.instance3 ) + rpc.call = Mock(return_value = rpc.RemoteError(TypeError, 'val', 'traceback')) + self.assertRaises(rpc.RemoteError, self.instanceCmds.live_migration, 'i-xxx', 'host2' ) + + + def test17(self): + """17: nova-manage instances live_migration ec2_id host, + everything goes well, ang gets success messages. + """ + db.host_get_by_name = Mock(return_value = self.host1) + db.instance_get_by_internal_id = Mock( return_value = self.instance3 ) + rpc.call = Mock(return_value = None) + + self.instanceCmds.live_migration('i-12345', 'host2') + c1 = (0 <= self.stdout.buffer.find('Finished all procedure') ) + self.assertEqual( c1, True ) + + + def tearDown(self): + """common terminating method. """ + commands.getstatusoutput('rm -rf %s' % self.getInitpyPath() ) + commands.getstatusoutput('rm -rf %s' % self.getNovaManageCopyPath() ) + sys.stdout.flush() + sys.stdout = self.stdoutBak + self.stderr.realFlush() + sys.stderr = self.stderrBak + +if __name__ == '__main__': + #unittest.main() + suite = unittest.TestLoader().loadTestsFromTestCase(NovaManageTestFunctions) + unittest.TextTestRunner(verbosity=3).run(suite) + + diff --git a/nova/livemigration_test/UT/schedulerManager.test.py b/nova/livemigration_test/UT/schedulerManager.test.py new file mode 100644 index 000000000..a0b76c918 --- /dev/null +++ b/nova/livemigration_test/UT/schedulerManager.test.py @@ -0,0 +1,408 @@ +#!/usr/bin/python +# -*- coding: UTF-8 -*- + + +import sys +import os +import unittest +import commands +import re +import libvirt + +from mock import Mock + +# getting /nova-inst-dir +NOVA_DIR = os.path.abspath(sys.argv[0]) +for i in range(4): + NOVA_DIR = os.path.dirname(NOVA_DIR) + +try : + print + print 'Checking %s/bin/nova-manage exists, set the NOVA_DIR properly..' % NOVA_DIR + print + + sys.path.append(NOVA_DIR) + + from nova.scheduler.manager import SchedulerManager + + from nova import context + from nova import db + from nova import exception + from nova import flags + from nova import quota + from nova import utils + from nova.auth import manager + from nova.cloudpipe import pipelib + from nova import rpc + from nova.api.ec2 import cloud + from nova.compute import power_state + + from nova.db.sqlalchemy.models import * + +except: + print 'set correct NOVA_DIR in this script. ' + raise + + +class tmpStdout: + def __init__(self): + self.buffer = "" + def write(self,arg): + self.buffer += arg + def flush(self): + self.buffer = '' + + +class SchedulerTestFunctions(unittest.TestCase): + + # 共通の初期化処理 + def setUp(self): + """common init method. """ + + self.host = 'openstack2-api' + self.manager = SchedulerManager(host=self.host) + + self.setTestData() + self.setMocks() + + def setTestData(self): + + self.host1 = Host() + self.host1.__setitem__('name', 'host1') + self.host1.__setitem__('vcpus', 5) + self.host1.__setitem__('memory_mb', 20480) + self.host1.__setitem__('local_gb', 876) + + self.host2 = Host() + self.host2.__setitem__('name', 'host2') + self.host2.__setitem__('vcpus', 5) + self.host2.__setitem__('memory_mb', 20480) + self.host2.__setitem__('local_gb', 876) + self.host2.__setitem__('hypervisor_type', 'QEMU') + self.host2.__setitem__('hypervisor_version', 12003) + xml="x86_64NehalemIntel" + self.host2.__setitem__('cpu_info', xml) + + self.instance1 = Instance() + for key, val in [ ('id', 1), ('host', 'host1'), ('hostname', 'i-12345'), + ('state', power_state.RUNNING), ('project_id', 'testPJ'), + ('vcpus', 3), ('memory_mb', 1024), ('hdd_gb', 5) ]: + self.instance1.__setitem__(key, val) + + + self.instance2 = Instance() + for key, val in [ ('id', 2), ('host', 'host1'), ('hostname', 'i-12345'), + ('state', power_state.RUNNING), ('project_id', 'testPJ'), + ('vcpus', 3), ('memory_mb', 1024), ('hdd_gb', 5) ]: + self.instance2.__setitem__(key, val) + + + self.instance3 = Instance() + for key, val in [ ('id', 3), ('host', 'host1'), ('hostname', 'i-12345'), + ('state', power_state.RUNNING), ('project_id', 'testPJ2'), + ('vcpus', 1), ('memory_mb', 1024), ('hdd_gb', 5) ]: + self.instance3.__setitem__(key, val) + + self.instance4 = Instance() + for key, val in [ ('id', 4), ('host', 'host2'), ('hostname', 'i-12345'), + ('state', power_state.RUNNING), ('project_id', 'testPJ2'), + ('vcpus', 1), ('memory_mb', 1024), ('local_gb', 5) ]: + self.instance4.__setitem__(key, val) + + self.instance5 = Instance() + for key, val in [ ('id', 5), ('host', 'host2'), ('hostname', 'i-12345'), + ('state', power_state.RUNNING), ('project_id', 'testPJ2'), + ('vcpus', 1), ('memory_mb', 1024), ('local_gb', 5) ]: + self.instance5.__setitem__(key, val) + + self.instance6 = Instance() + for key, val in [ ('id', 6), ('host', 'host1'), ('hostname', 'i-12345'), + ('state', power_state.RUNNING), ('project_id', 'testPJ2'), + ('vcpus', 3), ('memory_mb', 1024), ('local_gb', 5) ]: + self.instance6.__setitem__(key, val) + + self.instance7 = Instance() + for key, val in [ ('id', 7), ('host', 'host1'), ('hostname', 'i-12345'), + ('state', power_state.RUNNING), ('project_id', 'testPJ2'), + ('vcpus', 1), ('memory_mb', 18432), ('local_gb', 5) ]: + self.instance7.__setitem__(key, val) + + self.instance8 = Instance() + for key, val in [ ('id', 8), ('host', 'host1'), ('hostname', 'i-12345'), + ('state', power_state.RUNNING), ('project_id', 'testPJ2'), + ('vcpus', 1), ('memory_mb', 1024), ('local_gb', 866) ]: + self.instance8.__setitem__(key, val) + + self.service1 = Service() + for key, val in [ ('id', 1), ('host', 'host1'), ('binary', 'nova-compute'), + ('topic', 'compute')]: + self.service1.__setitem__(key, val) + + + def setMocks(self): + self.ctxt = context.get_admin_context() + # Mocks for has_enough_resource() + db.instance_get = Mock(return_value = self.instance3) + db.host_get_by_name = Mock(return_value = self.host2) + db.instance_get_all_by_host = Mock(return_value = [self.instance4, self.instance5] ) + + # Mocks for live_migration + db.instance_get_by_internal_id = Mock(return_value = self.instance1) + # db.host_get_by_name <- defined above. + db.service_get_all_by_topic = Mock(return_value = [self.service1] ) + rpc.call = Mock(return_value=1) + + def check_format(self, val): + """check result format of show_host_resource """ + + if dict != type(val) : + sys.stderr.write('return value is not dict') + return False + + if not val.has_key('ret'): + sys.stderr.write('invalid format(missing "ret"). ') + return False + + if not val['ret'] : + if not val.has_key('msg') : + sys.stderr.write( 'invalid format(missing "msg").' ) + return False + + else : + if not val.has_key('phy_resource') : + sys.stderr.write('invalid format(missing "phy_resource"). ') + return False + + if not val.has_key('usage'): + sys.stderr.write('invalid format(missing "usage"). ') + return False + + if not self._check_format(val['phy_resource']): + return False + + for key, dic in val['usage'].items() : + if not self._check_format(dic): + return False + return True + + def _check_format(self, val): + if dict != type(val) : + sys.stderr.write('return value is not dict') + return False + + for key in ['vcpus', 'memory_mb', 'local_gb']: + if not val.has_key(key) : + sys.stderr.write('invalid format(missing "%s"). ' % key ) + return False + + return True + + + # ---> test for nova.scheduler.manager.show_host_resource() + + def test01(self): + """01: get NotFound exception when dest host not found on DB """ + + db.host_get_by_name = Mock( side_effect=exception.NotFound('ERR') ) + result = self.manager.show_host_resource(self.ctxt, 'not-registered-host') + c1 = ( not result['ret'] ) + c2 = ( 0 == result['msg'].find('No such') ) + self.assertEqual(c1 and c2, True) + + def test02(self): + """02: get other exception if unexpected err. """ + + db.host_get_by_name = Mock( side_effect=TypeError('ERR') ) + self.assertRaises(TypeError, self.manager.show_host_resource, self.ctxt, 'host1' ) + + def test03(self): + """03: no instance found on dest host. """ + + db.host_get_by_name = Mock( return_value = self.host1 ) + db.instance_get_all_by_host = Mock( return_value=[]) + ret= self.manager.show_host_resource(self.ctxt, 'host1') + + c1 = self.check_format(ret) + v = ret['phy_resource'] + c2 = ( (5 == v['vcpus']) and (20480 == v['memory_mb']) and (876 == v['local_gb'])) + c3 = ( 0 == len(ret['usage']) ) + + self.assertEqual(c1 and c2 and c3, True) + + def test04(self): + """04: some instance found on dest host. """ + + db.host_get_by_name = Mock( return_value = self.host1 ) + db.instance_get_all_by_host = Mock( return_value=[ self.instance1, + self.instance2, + self.instance3] ) + + db.instance_get_vcpu_sum_by_host_and_project = Mock(return_value=3) + db.instance_get_memory_sum_by_host_and_project = Mock(return_value=1024) + db.instance_get_disk_sum_by_host_and_project = Mock(return_value=5) + + ret= self.manager.show_host_resource(self.ctxt, 'host1') + + c1 = self.check_format(ret) + v = ret['phy_resource'] + c2 = ( (5 == v['vcpus']) and (20480 == v['memory_mb']) and (876 == v['local_gb'])) + c3 = ( 2 == len(ret['usage']) ) + c4 = ( self.instance1['project_id'] in ret['usage'].keys()) + c5 = ( self.instance3['project_id'] in ret['usage'].keys()) + + self.assertEqual(c1 and c2 and c3 and c4 and c5, True) + + + # ---> test for nova.scheduler.manager.has_enough_resource() + def test05(self): + """05: when cpu is exccded some instance found on dest host. """ + + db.instance_get = Mock(return_value = self.instance6) + try : + self.manager.has_enough_resource(self.ctxt, 'i-12345', 'host1') + except exception.NotEmpty, e: + c1 = ( 0 < e.message.find('doesnt have enough resource') ) + self.assertTrue(c1, True) + return False + + + def test06(self): + """06: when memory is exccded some instance found on dest host. """ + + db.instance_get = Mock(return_value = self.instance7) + try : + self.manager.has_enough_resource(self.ctxt, 'i-12345', 'host1') + except exception.NotEmpty, e: + c1 = ( 0 <= e.message.find('doesnt have enough resource') ) + self.assertTrue(c1, True) + return False + + def test07(self): + """07: when hdd is exccded some instance found on dest host. """ + + db.instance_get = Mock(return_value = self.instance8) + try : + self.manager.has_enough_resource(self.ctxt, 'i-12345', 'host1') + except exception.NotEmpty, e: + c1 = ( 0 <= e.message.find('doesnt have enough resource') ) + self.assertTrue(c1, True) + return False + + + def test08(self): + """08: everything goes well. (instance_get_all_by_host returns list)""" + + ret= self.manager.has_enough_resource(self.ctxt, 'i-12345', 'host1') + self.assertEqual(ret, None) + + + def test09(self): + """09: everything goes well(instance_get_all_by_host returns[]). """ + + db.instance_get_all_by_host = Mock(return_value = [] ) + ret= self.manager.has_enough_resource(self.ctxt, 'i-12345', 'host1') + self.assertEqual(ret, None) + + + # ---> test for nova.scheduler.manager.live_migration() + + + def test10(self): + """10: instance_get_by_internal_id issue NotFound. """ + + # Mocks for has_enough_resource() + db.instance_get = Mock(return_value = self.instance8) + # Mocks for live_migration()db.instance_get_by_internal_id + # (any Mock is ok here. important mock is all above) + db.instance_get_by_internal_id = Mock(side_effect=exception.NotFound("ERR")) + + self.assertRaises(exception.NotFound, + self.manager.live_migration, + self.ctxt, + 'i-12345', + 'host1') + + + def test11(self): + """11: get NotFound exception when dest host not found on DB """ + + db.host_get_by_name = Mock( side_effect=exception.NotFound('ERR') ) + self.assertRaises(exception.NotFound, + self.manager.live_migration, + self.ctxt, + 'i-12345', + 'host1') + + + def test12(self): + """12: Destination host is not compute node """ + self.assertRaises(exception.Invalid, + self.manager.live_migration, + self.ctxt, + 'i-12345', + 'host2') + + + # Cannot test the case of hypervisor type difference and hypervisor + # version difference, since we cannot set different mocks to same method.. + + def test13(self): + """13: rpc.call raises RemoteError(Unexpected error occurs when executing compareCPU) """ + rpc.call = Mock(return_value = rpc.RemoteError(libvirt.libvirtError, 'val', 'traceback')) + self.assertRaises(rpc.RemoteError, + self.manager.live_migration, + self.ctxt, + 'i-12345', + 'host1') + + def test14(self): + """14: rpc.call returns 0 (cpu is not compatible between src and dest) """ + rpc.call = Mock(return_value = 0) + try : + self.manager.live_migration(self.ctxt, 'i-12345', 'host1') + except exception.Invalid, e: + c1 = ( 0 <= e.message.find('doesnt have compatibility to')) + self.assertTrue(c1, True) + return False + + def test15(self): + """15: raise NotEmpty if host doesnt have enough resource. """ + + # Mocks for has_enough_resource() + db.instance_get = Mock(return_value = self.instance8) + + # Mocks for live_migration() + db.instance_get_by_internal_id = Mock(return_value = self.instance8) + db.instance_set_state = Mock(return_value = True) + rpc_cast = Mock(return_value = True) + + try : + self.manager.live_migration(self.ctxt, 'i-12345', 'host1') + except exception.NotEmpty, e: + c1 = ( 0 <= e.message.find('doesnt have enough resource') ) + self.assertTrue(c1, True) + return False + + + def test16(self): + """16: everything goes well. """ + + db.instance_get_by_internal_id = Mock(return_value = self.instance8) + db.instance_set_state = Mock(return_value = True) + rpc.cast = Mock(return_value = True) + + ret= self.manager.live_migration(self.ctxt, 'i-12345', 'host1') + self.assertEqual(ret, None) + + + def tearDown(self): + """common terminating method. """ + #sys.stdout = self.stdoutBak + pass + +if __name__ == '__main__': + #unittest.main() + suite = unittest.TestLoader().loadTestsFromTestCase(SchedulerTestFunctions) + unittest.TextTestRunner(verbosity=3).run(suite) + + diff --git a/nova/livemigration_test/UT/testCase_UT.xls b/nova/livemigration_test/UT/testCase_UT.xls new file mode 100644 index 000000000..f73e8c5aa Binary files /dev/null and b/nova/livemigration_test/UT/testCase_UT.xls differ -- cgit -- cgit From 85acbbe916df8b2d18f0dc3a0b8cad9fcfdd6907 Mon Sep 17 00:00:00 2001 From: masumotok Date: Mon, 27 Dec 2010 17:49:07 +0900 Subject: launch_at を前回コミット時に追加したが、lauched_atというカラムが既に存在し、 紛らわしいのでlauched_onにした。 MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- nova/compute/manager.py | 2 +- nova/db/sqlalchemy/models.py | 4 ++-- nova/scheduler/manager.py | 2 +- 3 files changed, 4 insertions(+), 4 deletions(-) diff --git a/nova/compute/manager.py b/nova/compute/manager.py index 3e468b3a4..66bbb8d5a 100644 --- a/nova/compute/manager.py +++ b/nova/compute/manager.py @@ -105,7 +105,7 @@ class ComputeManager(manager.Manager): self.network_manager.setup_compute_network(context, instance_id) self.db.instance_update(context, instance_id, - {'host': self.host, 'launch_at':self.host}) + {'host': self.host, 'launched_on':self.host}) # TODO(vish) check to make sure the availability zone matches self.db.instance_set_state(context, diff --git a/nova/db/sqlalchemy/models.py b/nova/db/sqlalchemy/models.py index 7f3a58bcb..c3e566c6a 100644 --- a/nova/db/sqlalchemy/models.py +++ b/nova/db/sqlalchemy/models.py @@ -234,9 +234,9 @@ class Instance(BASE, NovaBase): display_name = Column(String(255)) display_description = Column(String(255)) - # To remember at which host a instance booted. + # To remember on which host a instance booted. # An instance may moved to other host by live migraiton. - launch_at = Column(String(255)) + launched_on = Column(String(255)) # TODO(vish): see Ewan's email about state improvements, probably # should be in a driver base class or some such diff --git a/nova/scheduler/manager.py b/nova/scheduler/manager.py index 0921e3791..fe3ca9d5e 100644 --- a/nova/scheduler/manager.py +++ b/nova/scheduler/manager.py @@ -82,7 +82,7 @@ class SchedulerManager(manager.Manager): instance_id = instance_ref['id'] # 2. get src host and dst host - src = instance_ref['launch_at'] + src = instance_ref['launched_on'] shost_ref = db.host_get_by_name(context, src ) dhost_ref = db.host_get_by_name(context, dest) -- cgit From ea28b3117b02bcfd26e4017e850313cf5272d354 Mon Sep 17 00:00:00 2001 From: Kei Masumoto Date: Fri, 31 Dec 2010 12:43:40 +0900 Subject: deleting README.livemigration.txt and nova/livemigration_test/* --- README.livemigration.txt | 154 ------- nova/livemigration_test/SI/picture.pptx | Bin 137730 -> 0 bytes nova/livemigration_test/SI/testCase_SI.xls | Bin 49152 -> 0 bytes .../SI/testParameterSheet_SI.xls | Bin 464384 -> 0 bytes nova/livemigration_test/SI/utils/demo-firstboot.sh | 39 -- .../SI/utils/demo-runInstance.sh | 57 --- nova/livemigration_test/SI/utils/nova-manage.conf | 18 - nova/livemigration_test/SI/utils/nova.conf | 10 - nova/livemigration_test/SI/utils/nova.sh | 180 -------- nova/livemigration_test/SI/utils/nova.sh.compute | 37 -- nova/livemigration_test/UT/computeManager.test.py | 411 ------------------- .../UT/libvirtConnection.test.py | 382 ----------------- nova/livemigration_test/UT/nova-manage.test.py | 313 -------------- .../livemigration_test/UT/schedulerManager.test.py | 456 --------------------- nova/livemigration_test/UT/testCase_UT.xls | Bin 203776 -> 0 bytes 15 files changed, 2057 deletions(-) delete mode 100644 README.livemigration.txt delete mode 100644 nova/livemigration_test/SI/picture.pptx delete mode 100644 nova/livemigration_test/SI/testCase_SI.xls delete mode 100644 nova/livemigration_test/SI/testParameterSheet_SI.xls delete mode 100755 nova/livemigration_test/SI/utils/demo-firstboot.sh delete mode 100755 nova/livemigration_test/SI/utils/demo-runInstance.sh delete mode 100644 nova/livemigration_test/SI/utils/nova-manage.conf delete mode 100644 nova/livemigration_test/SI/utils/nova.conf delete mode 100755 nova/livemigration_test/SI/utils/nova.sh delete mode 100755 nova/livemigration_test/SI/utils/nova.sh.compute delete mode 100644 nova/livemigration_test/UT/computeManager.test.py delete mode 100644 nova/livemigration_test/UT/libvirtConnection.test.py delete mode 100644 nova/livemigration_test/UT/nova-manage.test.py delete mode 100644 nova/livemigration_test/UT/schedulerManager.test.py delete mode 100644 nova/livemigration_test/UT/testCase_UT.xls diff --git a/README.livemigration.txt b/README.livemigration.txt deleted file mode 100644 index d859c24ca..000000000 --- a/README.livemigration.txt +++ /dev/null @@ -1,154 +0,0 @@ -# -# Live migration feature usage: -# -# @auther Kei Masumoto -# @date 2010.12.01 -# -# @history ver.1 2010.12.01 ( masumotok ) -# initial version -# - - -0. pre-requisit settings - OS: Ubuntu lucid 10.04 for both instances and host. - NFS: nova-install-dir/instances has to be mounted by shared storage. - ( this version is tested using NFS) - Network manager: Only VlanManager can be used in this version. - instances : Instance must keep running without any EBS volume. - - -1. pre-requisite settings. - - (a) shared storage - As mentioned above, shared storage is inevitable for the live_migration functionality. - An example is NFS( my test environment ), and example setting is as follows. - - Prepare NFS server machine( nova-api server is OK), and add below line /etc/exports: - - > nova-install-dir/instances a.b.c.d/255.255.0.0(rw,sync,fsid=0,no_root_squash) - - where "nova-install-dir" is the directory which openstack is installed, and - add appropriate ip address and netmask for "a.b.c.d/255.255.0.0" , which should include - compute nodes which try to mount this directory. - - Then restart nfs server. - - > /etc/init.d/nfs-kernel-server restart - > /etc/init.d/idmapd restart - - Also, at any compute nodes, add below line to /etc/fstab: - - >172.19.0.131:/ DIR nfs4 defaults 0 0 - - where "DIR" must be same as 'instances_path'( see nova.compute.manager for the default value) - - Then try to mount, - - > mount -a -v - - Check exported directory is successfully mounted. if fail, try this at any hosts, - - > iptables -F - - Also, check file/daemon permissions. - we expect any nova daemons are running as root. - > root@openstack2-api:/opt/nova-2010.4# ps -ef | grep nova - > root 5948 5904 9 11:29 pts/4 00:00:00 python /opt/nova-2010.4//bin/nova-api - > root 5952 5908 6 11:29 pts/5 00:00:00 python /opt/nova-2010.4//bin/nova-objectstore - > ... (snip) - - "instances/" directory can be seen from server side: - > root@openstack:~# ls -ld nova-install-dir/instances/ - > drwxr-xr-x 2 root root 4096 2010-12-07 14:34 nova-install-dir/instances/ - - also, client side: - > root@openstack-client:~# ls -ld nova-install-dir/instances/ - > drwxr-xr-x 2 root root 4096 2010-12-07 14:34 nova-install-dir/instances/ - - - - (b) libvirt settings - In default configuration, this feature use simple tcp protocol(qemu+tcp://). - To use this protocol, below configuration is necessary. - - a. modify /etc/libvirt/libvirt.conf - - before : #listen_tls = 0 - after : listen_tls = 0 - - before : #listen_tcp = 1 - after : listen_tcp = 1 - - append : auth_tcp = "none" - - b. modify /etc/init/libvirt-bin.conf - - before : exec /usr/sbin/libvirtd -d - after : exec /usr/sbin/libvirtd -d -l - - c. modify /etc/default/libvirt-bin - - before :libvirtd_opts=" -d" - after :libvirtd_opts=" -d -l" - - then, restart libvirt - stop libvirt-bin && start libvirt-bin - ps -ef | grep libvirt - - make sure you get the below result. - > root@openstack2:/opt/nova-2010.2# ps -ef | grep libvirt - > root 1145 1 0 Nov27 ? 00:00:03 /usr/sbin/libvirtd -d -l - - if you would like to use qemu+ssh or other protocol, change "live_migration_uri" flag. - by adding "--live_migration_uri" to /etc/nova/nova.conf (Note that file name may be - changed depends on version). - - -2. command usage - - To get a list of physical hosts, - nova-manage host list - - To get a available pysical resource of each host, - nova-manage host show hostname - - an example result is below: - > HOST PROJECT cpu mem(mb) disk(gb) - > openstack2-c2 16 32232 878 - > openstack2-c2 admin 1 2048 20 - - The 1st line shows total amount of resource that the specified host has. - The 2nd and latter lines show usage resource per project. - This command is created because admins can decide which host should be - a destination of live migration. - - For live migration, - nova-manage instances live_migration ec2-id(i-xxxx) destination-host-name. - - once this command is executed, admins will check the status through - euca-describe-instances. The status is changed from 'running' to 'migrating', - and changed to 'running' when live migration finishes. - Note that it depends on an environment how long it takes to live migration finishes. - If it finishes too fast, admins cannot see 'migrating' status. - - > root@openstack2:/opt/nova-2010.2# euca-describe-instances - > Reservation:r-qlg3favp - > RESERVATION r-qlg3favp admin - > INSTANCE i-2ah453 ami-tiny 172.19.0.134 10.0.0.3 - > migrating testkey (admin, openstack2-c2) 0 m1.small - > 2010-11-28 16:09:16 openstack2-c2 - - When live migration finishes successfully, admin can check the last part of - euca-describe-instances which shows physical node information. - ( only when euca-describe-instances is executed by admin user ) - Admins also can check live migration source compute node logfile which may - show a log. - > Live migration i-xxxx to DESTHOST finishes successfully. - - -3. error checking - When live migration fails somehow, error messages are shown at: - a. scheduler logfile - b. source compute node logfile - c. dest compute node logfile - diff --git a/nova/livemigration_test/SI/picture.pptx b/nova/livemigration_test/SI/picture.pptx deleted file mode 100644 index b47bec9b5..000000000 Binary files a/nova/livemigration_test/SI/picture.pptx and /dev/null differ diff --git a/nova/livemigration_test/SI/testCase_SI.xls b/nova/livemigration_test/SI/testCase_SI.xls deleted file mode 100644 index be98b391a..000000000 Binary files a/nova/livemigration_test/SI/testCase_SI.xls and /dev/null differ diff --git a/nova/livemigration_test/SI/testParameterSheet_SI.xls b/nova/livemigration_test/SI/testParameterSheet_SI.xls deleted file mode 100644 index 400b43b43..000000000 Binary files a/nova/livemigration_test/SI/testParameterSheet_SI.xls and /dev/null differ diff --git a/nova/livemigration_test/SI/utils/demo-firstboot.sh b/nova/livemigration_test/SI/utils/demo-firstboot.sh deleted file mode 100755 index 3a6f7fb0b..000000000 --- a/nova/livemigration_test/SI/utils/demo-firstboot.sh +++ /dev/null @@ -1,39 +0,0 @@ -#!/bin/bash - -DIR=/opt/nova-2010.1 - -# 1. 管理者ユーザを作成する -# nova-manage user admin ユーザ名 access-key secret-key -# -#$DIR/bin/nova-manage user admin admin admin admin - -# 2. プロジェクトを作成する -# nova-manage create project プロジェクト名 プロジェクトに属するユーザ名 -# -#$DIR/bin/nova-manage project create admin admin - -# 3. クラウドを使うための認証情報を生成する -# nova-manage project environment プロジェクト名 ユーザ名 認証情報を格納するファイル -# -#$DIR/bin/nova-manage project environment admin admin $DIR/novarc - -# 4. 認証情報の読み込み -. $DIR/novarc - -# 5. プロジェクト用仮想マシンネットワークの作成を行う -# nova-manage user admin ユーザ名 access-key secret-key -# -$DIR/bin/nova-manage network create 10.0.0.0/8 3 16 - -# 6. 初回ログインにはSSHの公開鍵認証が必要 -# -if [ "" == "`euca-describe-keypairs | grep testkey`" ]; then - euca-add-keypair testkey > testkey.pem -fi - -# 7. -for i in 172.19.0.134 172.19.0.135 172.19.0.136 172.19.0.137 ; do - sudo ip addr del $i dev eth0 2> /dev/null -done - - diff --git a/nova/livemigration_test/SI/utils/demo-runInstance.sh b/nova/livemigration_test/SI/utils/demo-runInstance.sh deleted file mode 100755 index 171291262..000000000 --- a/nova/livemigration_test/SI/utils/demo-runInstance.sh +++ /dev/null @@ -1,57 +0,0 @@ -#!/bin/bash - -DIR=/opt/nova-2010.1 - -function inc_assigned(){ - assigned=`expr $assigned + 1` -} - - -# 1. 認証情報の読み込み -. $DIR/novarc - -# 3. 仮想マシンの起動 -# -ret=`euca-run-instances -t m1.small -k testkey ami-centos` -#ret=`euca-run-instances -t m1.small -k testkey ami-tiny` - -# 4. 仮想マシン用IPの確保 -# 未登録なら登録しておく -registered=`euca-describe-addresses` -for ip in 172.19.0.134 172.19.0.135 172.19.0.136 172.19.0.137 ; do - - not_registered=`echo $registered | grep $ip` - if [ "" == "$not_registered" ]; then - echo "[INFO] registed $ip" - $DIR/bin/nova-manage floating create `hostname` $ip - fi -done - -# 5. IPの割当 -echo 0 > /tmp/demo-runinstance -euca-describe-addresses | grep -v reserved | while read line; do - # 割り当てられてないものを仮想マシンに割り当てる - ip=`echo $line | cut -d ' ' -f 2` - id=`echo $ret | cut -d ' ' -f 5` - if [ "" == "`echo $id | grep i- `" ] ; then - echo "[INFO] try again" $ret - break - fi - echo "[INFO] assigned to ipaddr($ip) to instance($id) " - euca-associate-address -i $id $ip - echo 1 > /tmp/demo-runinstance - break -done - -echo $assigned -if [ 0 -eq "`cat /tmp/demo-runinstance`" ] ; then - echo "[INFO] address is full." -fi -rm -rf /tmp/demo-runinstance - - -# 6. FWの設定 -euca-authorize -P tcp -p 22 default 2> /dev/null > /dev/null -euca-authorize -P tcp -p 80 default 2> /dev/null > /dev/null -euca-authorize -P tcp -p 5555 default 2> /dev/null > /dev/null - diff --git a/nova/livemigration_test/SI/utils/nova-manage.conf b/nova/livemigration_test/SI/utils/nova-manage.conf deleted file mode 100644 index 9f8a02b96..000000000 --- a/nova/livemigration_test/SI/utils/nova-manage.conf +++ /dev/null @@ -1,18 +0,0 @@ ---verbose ---nodaemon ---dhcpbridge_flagfile=/etc/nova/nova-manage.conf ---FAKE_subdomain=ec2 ---libvirt_type=qemu ---no_internet_conn=True ---public_netif=eth0 ---public_interface=eth0 - ---cc-host=172.19.0.131 ---routing_source_ip=172.19.0.131 ---sql_connection=mysql://root:nova@172.19.0.131/nova ---rabbit_host=172.19.0.131 ---redis_host=172.19.0.131 ---s3_host=172.19.0.131 ---auth_driver=nova.auth.ldapdriver.LdapDriver ---ldap_url=ldap://172.19.0.131 - diff --git a/nova/livemigration_test/SI/utils/nova.conf b/nova/livemigration_test/SI/utils/nova.conf deleted file mode 100644 index c66bfbc53..000000000 --- a/nova/livemigration_test/SI/utils/nova.conf +++ /dev/null @@ -1,10 +0,0 @@ ---verbose ---nodaemon ---dhcpbridge_flagfile=/opt/nova-2010.4//bin/nova.conf ---network_manager=nova.network.manager.VlanManager ---cc_host=172.19.0.131 ---routing_source_ip=172.19.0.131 ---sql_connection=mysql://root:nova@localhost/nova ---auth_driver=nova.auth.ldapdriver.LdapDriver ---libvirt_type=qemu ---public_interface=eth0 diff --git a/nova/livemigration_test/SI/utils/nova.sh b/nova/livemigration_test/SI/utils/nova.sh deleted file mode 100755 index b8e2e9f26..000000000 --- a/nova/livemigration_test/SI/utils/nova.sh +++ /dev/null @@ -1,180 +0,0 @@ -#!/usr/bin/env bash -DIR=`pwd` -CMD=$1 -SOURCE_BRANCH=lp:nova -if [ -n "$2" ]; then - SOURCE_BRANCH=$2 -fi -#DIRNAME=nova -DIRNAME="" -NOVA_DIR=$DIR/$DIRNAME -if [ -n "$3" ]; then - NOVA_DIR=$DIR/$3 -fi - -if [ ! -n "$HOST_IP" ]; then - # NOTE(vish): This will just get the first ip in the list, so if you - # have more than one eth device set up, this will fail, and - # you should explicitly set HOST_IP in your environment - HOST_IP=`ifconfig | grep -m 1 'inet addr:'| cut -d: -f2 | awk '{print $1}'` -fi - -#USE_MYSQL=${USE_MYSQL:-0} -USE_MYSQL=1 -MYSQL_PASS=${MYSQL_PASS:-nova} -TEST=${TEST:-0} -#USE_LDAP=${USE_LDAP:-0} -USE_LDAP=1 -LIBVIRT_TYPE=${LIBVIRT_TYPE:-qemu} -NET_MAN=${NET_MAN:-VlanManager} -# NOTE(vish): If you are using FlatDHCP on multiple hosts, set the interface -# below but make sure that the interface doesn't already have an -# ip or you risk breaking things. -# FLAT_INTERFACE=eth0 - -if [ "$USE_MYSQL" == 1 ]; then - SQL_CONN=mysql://root:$MYSQL_PASS@localhost/nova -else - SQL_CONN=sqlite:///$NOVA_DIR/nova.sqlite -fi - -if [ "$USE_LDAP" == 1 ]; then - AUTH=ldapdriver.LdapDriver -else - AUTH=dbdriver.DbDriver -fi - -mkdir -p /etc/nova -cat >$NOVA_DIR/bin/nova.conf << NOVA_CONF_EOF ---verbose ---nodaemon ---dhcpbridge_flagfile=$NOVA_DIR/bin/nova.conf ---network_manager=nova.network.manager.$NET_MAN ---cc_host=$HOST_IP ---routing_source_ip=$HOST_IP ---sql_connection=$SQL_CONN ---auth_driver=nova.auth.$AUTH ---libvirt_type=$LIBVIRT_TYPE ---public_interface=eth0 -NOVA_CONF_EOF - -if [ -n "$FLAT_INTERFACE" ]; then - echo "--flat_interface=$FLAT_INTERFACE" >>$NOVA_DIR/bin/nova.conf -fi - -if [ "$CMD" == "branch" ]; then - sudo apt-get install -y bzr - rm -rf $NOVA_DIR - bzr branch $SOURCE_BRANCH $NOVA_DIR - cd $NOVA_DIR - mkdir -p $NOVA_DIR/instances - mkdir -p $NOVA_DIR/networks -fi - -# You should only have to run this once -if [ "$CMD" == "install" ]; then - sudo apt-get install -y python-software-properties - sudo add-apt-repository ppa:nova-core/ppa - sudo apt-get update - sudo apt-get install -y dnsmasq kpartx kvm gawk iptables ebtables - sudo apt-get install -y user-mode-linux kvm libvirt-bin - sudo apt-get install -y screen euca2ools vlan curl rabbitmq-server - sudo apt-get install -y lvm2 iscsitarget open-iscsi - echo "ISCSITARGET_ENABLE=true" | sudo tee /etc/default/iscsitarget - sudo /etc/init.d/iscsitarget restart - sudo modprobe kvm - sudo /etc/init.d/libvirt-bin restart - sudo apt-get install -y python-twisted python-sqlalchemy python-mox python-greenlet python-carrot - sudo apt-get install -y python-daemon python-eventlet python-gflags python-tornado python-ipy - sudo apt-get install -y python-libvirt python-libxml2 python-routes - if [ "$USE_MYSQL" == 1 ]; then - cat </etc/nova/nova-manage.conf << NOVA_CONF_EOF ---verbose ---nodaemon ---dhcpbridge_flagfile=/etc/nova/nova-manage.conf ---FAKE_subdomain=ec2 ---libvirt_type=qemu ---no_internet_conn=True ---public_netif=eth0 ---public_interface=eth0 - ---cc-host=$HOST_IP ---routing_source_ip=$HOST_IP ---sql_connection=mysql://root:nova@$HOST_IP/nova ---rabbit_host=$HOST_IP ---redis_host=$HOST_IP ---s3_host=$HOST_IP ---auth_driver=nova.auth.ldapdriver.LdapDriver ---ldap_url=ldap://$HOST_IP - -NOVA_CONF_EOF - -$DIR/bin/nova-compute --flagfile=/etc/nova/nova-manage.conf - diff --git a/nova/livemigration_test/UT/computeManager.test.py b/nova/livemigration_test/UT/computeManager.test.py deleted file mode 100644 index 69ee876d1..000000000 --- a/nova/livemigration_test/UT/computeManager.test.py +++ /dev/null @@ -1,411 +0,0 @@ -#!/usr/bin/python -# -*- coding: UTF-8 -*- - - -import sys -import os -import unittest -import commands -import re -import logging - -from mock import Mock -import twisted - -# getting /nova-inst-dir -NOVA_DIR = os.path.abspath(sys.argv[0]) -for i in range(4): - NOVA_DIR = os.path.dirname(NOVA_DIR) - -try: - print - print 'checking %s/bin/nova-manage exists, set the NOVA_DIR properly..' \ - % NOVA_DIR - print - - sys.path.append(NOVA_DIR) - - from nova.compute.manager import ComputeManager - from nova.virt.libvirt_conn import LibvirtConnection - - from nova import context - from nova import db - from nova import exception - from nova import flags - from nova import quota - from nova import utils - from nova.auth import manager - from nova.cloudpipe import pipelib - from nova import rpc - from nova.api.ec2 import cloud - from nova.compute import power_state - - from nova.db.sqlalchemy.models import * - - -except: - print 'set correct NOVA_DIR in this script. ' - raise - - -class tmpStdout: - def __init__(self): - self.buffer = "" - - def write(self, arg): - self.buffer += arg - - def writelines(self, arg): - self.buffer += arg - - def flush(self): - print 'flush' - self.buffer = '' - - -class tmpStderr(tmpStdout): - def write(self, arg): - self.buffer += arg - - def flush(self): - pass - - def realFlush(self): - self.buffer = '' - -dummyCallReturnValue={ 0:True } -dummyCallCount=0 -def dummyCall(context, topic, method): - global dummyCallReturnValue, dummyCallCount - if dummyCallCount in dummyCallReturnValue.keys() : - ret = dummyCallReturnValue[ dummyCallCount ] - dummyCallCount += 1 - return ret - else : - dummyCallCount += 1 - return False - - -class ComputeTestFunctions(unittest.TestCase): - - stdout = None - stdoutBak = None - stderr = None - stderrBak = None - manager = None - - # 共通の初期化処理 - def setUp(self): - """common init method. """ - - #if self.stdout is None: - # self.__class__.stdout = tmpStdout() - #self.stdoutBak = sys.stdout - #sys.stdout = self.stdout - if self.stderr is None: - self.__class__.stderr = tmpStderr() - self.stderrBak = sys.stderr - sys.stderr = self.stderr - - self.host = 'openstack2-api' - if self.manager is None: - self.__class__.manager = ComputeManager(host=self.host) - - self.setTestData() - self.setMocks() - - def setTestData(self): - - self.host1 = Host() - for key, val in [('name', 'host1'), ('cpu', 5), - ('memory_mb', 20480), ('hdd_gb', 876)]: - self.host1.__setitem__(key, val) - - self.host2 = Host() - for key, val in [('name', 'host2'), ('cpu', 5), - ('memory_mb', 20480), ('hdd_gb', 876)]: - self.host2.__setitem__(key, val) - - self.instance1 = Instance() - for key, val in [('id', 1), ('host', 'host1'), - ('hostname', 'i-12345'), ('state', power_state.RUNNING), - ('project_id', 'testPJ'), ('vcpus', 3), ('memory_mb', 1024), - ('hdd_gb', 5), ('internal_id', 12345)]: - self.instance1.__setitem__(key, val) - - self.instance2 = Instance() - for key, val in [('id', 2), ('host', 'host1'), - ('hostname', 'i-12345'), ('state', power_state.RUNNING), - ('project_id', 'testPJ'), ('vcpus', 3), ('memory_mb', 1024), - ('hdd_gb', 5)]: - self.instance2.__setitem__(key, val) - - self.fixed_ip1 = FixedIp() - for key, val in [('id', 1), ('address', '1.1.1.1'), - ('network_id', '1'), ('instance_id', 1)]: - self.fixed_ip1.__setitem__(key, val) - - self.vol1 = Volume() - for key, val in [('id', 1), ('ec2_id', 'vol-qijjuc7e'), - ('availability_zone', 'nova'), ('host', 'host1')]: - self.vol1.__setitem__(key, val) - - self.vol2 = Volume() - for key, val in [('id', 2), ('ec2_id', 'vol-qi22222'), - ('availability_zone', 'nova'), ('host', 'host1')]: - self.vol2.__setitem__(key, val) - - self.secgrp1 = Volume() - for key, val in [('id', 1), ('ec2_id', 'default')]: - self.secgrp1.__setitem__(key, val) - - self.secgrp2 = Volume() - for key, val in [('id', 2), ('ec2_id', 'def2')]: - self.secgrp2.__setitem__(key, val) - - self.netref1 = Network() - - def setMocks(self): - - # mocks for pre_live_migration - self.ctxt = context.get_admin_context() - db.instance_get = Mock(return_value=self.instance1) - db.volume_get_by_ec2_id = Mock(return_value=[self.vol1, self.vol2]) - db.volume_get_shelf_and_blade = Mock(return_value=(3, 4)) - db.instance_get_fixed_address = Mock(return_value=self.fixed_ip1) - db.security_group_get_by_instance \ - = Mock(return_value=[self.secgrp1, self.secgrp2]) - self.manager.driver.setup_nwfilters_for_instance \ - = Mock(return_value=None) - self.manager.driver.nwfilter_for_instance_exists = Mock(return_value=None) - self.manager.network_manager.setup_compute_network \ - = Mock(return_value=None) - # mocks for live_migration_ - rpc.call = Mock(return_value=True) - db.instance_set_state = Mock(return_value=True) - - # ---> test for nova.compute.manager.pre_live_migration() - def test01(self): - """01: NotFound error occurs on finding instance on DB. """ - - db.instance_get = Mock(side_effect=exception.NotFound('ERR')) - - self.assertRaises(exception.NotFound, - self.manager.pre_live_migration, - self.ctxt, - 'dummy_ec2_id', - 'host2') - - def test02(self): - """02: NotAuthrized occurs on finding volume on DB. """ - - db.volume_get_by_ec2_id \ - = Mock(side_effect=exception.NotAuthorized('ERR')) - - self.assertRaises(exception.NotAuthorized, - self.manager.pre_live_migration, - self.ctxt, - 'dummy_ec2_id', - 'host2') - - def test03(self): - """03: Unexpected exception occurs on finding volume on DB. """ - - db.volume_get_by_ec2_id = Mock(side_effect=TypeError('ERR')) - - self.assertRaises(TypeError, - self.manager.pre_live_migration, - self.ctxt, - 'dummy_ec2_id', - 'host2') - - def test04(self): - """04: no volume and fixed ip found on DB, """ - - db.volume_get_by_ec2_id = Mock(side_effect=exception.NotFound('ERR')) - db.instance_get_fixed_address = Mock(return_value=None) - - self.assertRaises(rpc.RemoteError, - self.manager.pre_live_migration, - self.ctxt, - 'dummy_ec2_id', - 'host2') - - c1 = (0 <= sys.stderr.buffer.find('has no volume')) - - self.assertEqual(c1, True) - - def test05(self): - """05: volume found and no fixed_ip found on DB. """ - - db.instance_get_fixed_address \ - = Mock(side_effect=exception.NotFound('ERR')) - - self.assertRaises(exception.NotFound, - self.manager.pre_live_migration, - self.ctxt, - 'dummy_ec2_id', - 'host2') - - def test06(self): - """06: self.driver.setup_nwfilters_for_instance causes NotFound. """ - self.manager.driver.setup_nwfilters_for_instance \ - = Mock(side_effect=exception.NotFound("ERR")) - - self.assertRaises(exception.NotFound, - self.manager.pre_live_migration, - self.ctxt, - 'dummy_ec2_id', - 'host2') - - def test07(self): - """07: self.network_manager.setup_compute_network causes ProcessExecutionError. """ - self.manager.network_manager.setup_compute_network \ - = Mock(side_effect=exception.ProcessExecutionError("ERR")) - - self.assertRaises(exception.ProcessExecutionError, - self.manager.pre_live_migration, - self.ctxt, - 'dummy_ec2_id', - 'host2') - - - def test08(self): - """08: self.manager.network_manager.setup_compute_network - exception.NotFound. """ - self.manager.network_manager.setup_compute_network \ - = Mock(side_effect=exception.NotFound("ERR")) - - self.assertRaises(exception.NotFound, - self.manager.pre_live_migration, - self.ctxt, - 'dummy_ec2_id', - 'host2') - - # those 2 cases are omitted : - # self.driver.setup_nwfilters_for_instance causes - # twisted.python.failure.Failure. - # self.driver.refresh_security_group causes twisted.python.failure.Failure. - # - # twisted.python.failure.Failure can not be used with assertRaises, - # it doesnt have __call___ - # - - def test09(self): - """09: volume/fixed_ip found on DB, all procedure finish - successfully.. """ - - result = self.manager.pre_live_migration(self.ctxt, 'dummy_ec2_id', - 'host2') - self.assertEqual(result, True) - - # ---> test for nova.compute.manager.live_migration() - - def test10(self): - """10: rpc.call(pre_live_migration returns Error(Not None). """ - rpc.call = Mock(side_effect=exception.NotFound("ERR")) - - self.assertRaises(exception.NotFound, - self.manager.live_migration, - self.ctxt, - 'dummy_ec2_id', - 'host2') - - def test11(self): - """11: if rpc.call returns rpc.RemoteError. """ - - rpc.call = Mock(return_value=rpc.RemoteError(None, None, None)) - db.instance_set_state = Mock(return_value=True) - result = self.manager.live_migration(self.ctxt, 'dummy_ec2_id', - 'host2') - c1 = (None == result) - c2 = (0 <= sys.stderr.buffer.find('err at')) - self.assertEqual(c1 and c2, True) - - def test12(self): - """12: if rpc.call returns rpc.RemoteError and instance_set_state - also ends up err. (then , unexpected err occurs, in this case - TypeError) - """ - rpc.call = Mock(return_value=rpc.RemoteError(None, None, None)) - db.instance_set_state = Mock(side_effect=TypeError("ERR")) - self.assertRaises(TypeError, - self.manager.live_migration, - self.ctxt, - 'dummy_ec2_id', - 'host2') - - def test13(self): - """13: if wait for pre_live_migration, but timeout. """ - rpc.call = dummyCall - - db.instance_get = Mock(return_value=self.instance1) - - result = self.manager.live_migration(self.ctxt, 'dummy_ec2_id', - 'host2') - c1 = (None == result) - c2 = (0 <= sys.stderr.buffer.find('Timeout for')) - self.assertEqual(c1 and c2, True) - - def test14(self): - """14: if db_instance_get issues NotFound. - """ - rpc.call = Mock(return_value=True) - db.instance_get = Mock(side_effect=exception.NotFound("ERR")) - self.assertRaises(exception.NotFound, - self.manager.live_migration, - self.ctxt, - 'dummy_ec2_id', - 'host2') - - def test15(self): - """15: if rpc.call returns True, and instance_get() cause other - exception. (Unexpected case - b/c it already checked by - nova-manage) - """ - rpc.call = Mock(return_value=True) - db.instance_get = Mock(side_effect=TypeError("ERR")) - - self.assertRaises(TypeError, - self.manager.live_migration, - self.ctxt, - 'dummy_ec2_id', - 'host2') - - def test16(self): - """16: if rpc.call returns True, and live_migration issues - ProcessExecutionError. """ - rpc.call = Mock(return_value=True) - db.instance_get = Mock(return_value=self.instance1) - ret = self.manager.driver.live_migration \ - = Mock(side_effect=utils.ProcessExecutionError("ERR")) - - self.assertRaises(utils.ProcessExecutionError, - self.manager.live_migration, - self.ctxt, - 'dummy_ec2_id', - 'host2') - - def test17(self): - """17: everything goes well. """ - self.manager.driver.live_migration = Mock(return_value=True) - ret = self.manager.live_migration(self.ctxt, 'i-12345', 'host1') - self.assertEqual(True, True) - - def tearDown(self): - """common terminating method. """ - self.stderr.realFlush() - sys.stderr = self.stderrBak - #sys.stdout = self.stdoutBak - -if __name__ == '__main__': - logging.getLogger().setLevel(logging.DEBUG) - #unittest.main() - - suite = unittest.TestLoader().loadTestsFromTestCase(ComputeTestFunctions) - unittest.TextTestRunner(verbosity=2).run(suite) - - #suite = unittest.TestSuite() - #suite.addTest(ComputeTestFunctions("test15")) - #suite.addTest(ComputeTestFunctions("test16")) - #unittest.TextTestRunner(verbosity=2).run(suite) diff --git a/nova/livemigration_test/UT/libvirtConnection.test.py b/nova/livemigration_test/UT/libvirtConnection.test.py deleted file mode 100644 index 0b737e140..000000000 --- a/nova/livemigration_test/UT/libvirtConnection.test.py +++ /dev/null @@ -1,382 +0,0 @@ -#!/usr/bin/python -# -*- coding: UTF-8 -*- - - -import sys -import os -import unittest -import commands -import re -import logging -import libvirt - -from mock import Mock -import twisted - -# getting /nova-inst-dir -NOVA_DIR = os.path.abspath(sys.argv[0]) -for i in range(4): - NOVA_DIR = os.path.dirname(NOVA_DIR) - - -try : - print - print 'checking %s/bin/nova-manage exists, set the NOVA_DIR properly..' % NOVA_DIR - print - - sys.path.append(NOVA_DIR) - - from nova.compute.manager import ComputeManager - from nova.virt import libvirt_conn - - from nova import context - from nova import db - from nova import exception - from nova import flags - from nova import quota - from nova import utils - from nova.auth import manager - from nova.cloudpipe import pipelib - from nova import rpc - from nova.api.ec2 import cloud - from nova.compute import power_state - - from nova.db.sqlalchemy.models import * - - -except: - print 'set correct NOVA_DIR in this script. ' - raise - - -class tmpStdout: - def __init__(self): - self.buffer = "" - def write(self,arg): - self.buffer += arg - def writelines(self, arg): - self.buffer += arg - def flush(self): - print 'flush' - self.buffer = '' - -class tmpStderr(tmpStdout): - def write(self,arg): - self.buffer += arg - def flush(self): - pass - def realFlush(self): - self.buffer = '' - -class DummyLibvirtConn(object): - nwfilterLookupByName = None - def __init__(self): - pass - - -class LibvirtConnectionTestFunctions(unittest.TestCase): - - stdout = None - stdoutBak = None - stderr = None - stderrBak = None - manager = None - - # 共通の初期化処理 - def setUp(self): - """common init method. """ - - #if self.stdout is None: - # self.__class__.stdout = tmpStdout() - #self.stdoutBak = sys.stdout - #sys.stdout = self.stdout - if self.stderr is None: - self.__class__.stderr = tmpStderr() - self.stderrBak = sys.stderr - sys.stderr = self.stderr - - self.host = 'openstack2-api' - if self.manager is None: - self.__class__.manager = libvirt_conn.get_connection(False) - - self.setTestData() - self.setMocks() - - def setTestData(self): - - self.host1 = Host() - for key, val in [ ('name', 'host1'), ('cpu', 5), ('memory_mb', 20480), ('hdd_gb', 876) ]: - self.host1.__setitem__(key, val) - - self.instance1 = Instance() - for key, val in [ ('id', 1), ('host', 'host1'), ('hostname', 'i-12345'), - ('state', power_state.RUNNING), ('project_id', 'testPJ'), - ('vcpus', 3), ('memory_mb', 1024), ('hdd_gb', 5), ('internal_id',12345) ]: - self.instance1.__setitem__(key, val) - - - self.instance2 = Instance() - for key, val in [ ('id', 2), ('host', 'host1'), ('hostname', 'i-12345'), - ('state', power_state.RUNNING), ('project_id', 'testPJ'), - ('vcpus', 3), ('memory_mb', 1024), ('hdd_gb', 5) ]: - self.instance2.__setitem__(key, val) - - - self.fixed_ip1 = FixedIp() - for key, val in [ ('id', 1), ('address', '1.1.1.1'), ('network_id', '1'), - ('instance_id', 1)]: - self.fixed_ip1.__setitem__(key, val) - - self.floating_ip1 = FloatingIp() - for key, val in [ ('id', 1), ('address', '1.1.1.200') ]: - self.floating_ip1.__setitem__(key, val) - - self.netref1 = Network() - for key, val in [ ('id', 1) ]: - self.netref1.__setitem__(key, val) - - - def setMocks(self): - - self.ctxt = context.get_admin_context() - db.instance_get_fixed_address = Mock(return_value = '1.1.1.1') - db.fixed_ip_update = Mock(return_value = None) - db.fixed_ip_get_network = Mock(return_value = self.netref1) - db.network_update = Mock(return_value = None) - db.instance_get_floating_address = Mock(return_value = '1.1.1.200') - db.floating_ip_get_by_address = Mock(return_value = self.floating_ip1) - db.floating_ip_update = Mock(return_value = None) - db.instance_update = Mock(return_value = None) - - - # ---> test for nova.virt.libvirt_conn.nwfilter_for_instance_exists() - - def test01(self): - """01: libvirt.libvirtError occurs. """ - - self.manager._wrapped_conn = DummyLibvirtConn() - self.manager._test_connection = Mock(return_value=True) - self.manager._conn.nwfilterLookupByName = \ - Mock(side_effect=libvirt.libvirtError("ERR")) - ret = self.manager.nwfilter_for_instance_exists(self.instance1) - self.assertEqual(ret, False) - - def test02(self): - """02: libvirt.libvirtError not occurs. """ - - self.manager._wrapped_conn = DummyLibvirtConn() - self.manager._test_connection = Mock(return_value=True) - self.manager._conn.nwfilterLookupByName = \ - Mock(return_value=True) - ret = self.manager.nwfilter_for_instance_exists(self.instance1) - self.assertEqual(ret, True) - - # ---> test for nova.virt.libvirt_conn.live_migraiton() - - def test03(self): - """03: Unexpected exception occurs on finding volume on DB. """ - - utils.execute = Mock( side_effect=exception.ProcessExecutionError('ERR') ) - - self.assertRaises(exception.ProcessExecutionError, - self.manager._live_migration, - self.ctxt, - self.instance1, - 'host2') - - # ---> other case cannot be tested because live_migraiton - # is synchronized/asynchronized method are mixed together - - - # ---> test for nova.virt.libvirt_conn._post_live_migraiton - - def test04(self): - """04: instance_ref is not nova.db.sqlalchemy.models.Instances""" - - self.assertRaises(TypeError, - self.manager._post_live_migration, - self.ctxt, - "dummy string", - 'host2') - - def test05(self): - """05: db.instance_get_fixed_address return None""" - - db.instance_get_fixed_address = Mock( return_value=None ) - ret = self.manager._post_live_migration(self.ctxt, self.instance1, 'host1') - c1 = (ret == None) - c2 = (0 <= sys.stderr.buffer.find('fixed_ip is not found')) - self.assertEqual(c1 and c2, True) - - def test06(self): - """06: db.instance_get_fixed_address raises NotFound""" - - db.instance_get_fixed_address = Mock( side_effect=exception.NotFound('ERR') ) - self.assertRaises(exception.NotFound, - self.manager._post_live_migration, - self.ctxt, - self.instance1, - 'host2') - - def test07(self): - """07: db.instance_get_fixed_address raises Unknown exception""" - - db.instance_get_fixed_address = Mock( side_effect=TypeError('ERR') ) - self.assertRaises(TypeError, - self.manager._post_live_migration, - self.ctxt, - self.instance1, - 'host1') - - def test08(self): - """08: db.fixed_ip_update return NotFound. """ - - db.fixed_ip_update = Mock( side_effect=exception.NotFound('ERR') ) - self.assertRaises(exception.NotFound, - self.manager._post_live_migration, - self.ctxt, - self.instance1, - 'host1') - - def test09(self): - """09: db.fixed_ip_update return NotAuthorized. """ - db.fixed_ip_update = Mock( side_effect=exception.NotAuthorized('ERR') ) - self.assertRaises(exception.NotAuthorized, - self.manager._post_live_migration, - self.ctxt, - self.instance1, - 'host1') - - def test10(self): - """10: db.fixed_ip_update return Unknown exception. """ - db.fixed_ip_update = Mock( side_effect=TypeError('ERR') ) - self.assertRaises(TypeError, - self.manager._post_live_migration, - self.ctxt, - self.instance1, - 'host1') - - def test11(self): - """11: db.fixed_ip_get_network causes NotFound. """ - - db.fixed_ip_get_network = Mock( side_effect=exception.NotFound('ERR') ) - self.assertRaises(exception.NotFound, - self.manager._post_live_migration, - self.ctxt, - self.instance1, - 'host1') - - # not tested db.fixed_ip_get_network raises NotAuthorized - # because same test has been done at previous test. - - def test12(self): - """12: db.fixed_ip_get_network causes Unknown exception. """ - - db.fixed_ip_get_network = Mock( side_effect=TypeError('ERR') ) - self.assertRaises(TypeError, - self.manager._post_live_migration, - self.ctxt, - self.instance1, - 'host1') - - def test13(self): - """13: db.network_update raises Unknown exception. """ - db.network_update = Mock( side_effect=TypeError('ERR') ) - self.assertRaises(TypeError, - self.manager._post_live_migration, - self.ctxt, - self.instance1, - 'host1') - - def test14(self): - """14: db.instance_get_floating_address raises NotFound. """ - db.instance_get_floating_address = Mock(side_effect=exception.NotFound("ERR")) - ret = self.manager._post_live_migration(self.ctxt, self.instance1, 'host1') - c1 = (ret == None) - c2 = (0 <= sys.stderr.buffer.find('doesnt have floating_ip')) - self.assertEqual(c1 and c2, True) - - - def test15(self): - """15: db.instance_get_floating_address returns None. """ - - db.instance_get_floating_address = Mock( return_value=None ) - ret = self.manager._post_live_migration(self.ctxt, self.instance1, 'host1') - c1 = (ret == None) - c2 = (0 <= sys.stderr.buffer.find('floating_ip is not found')) - self.assertEqual(c1 and c2, True) - - def test16(self): - """16: db.instance_get_floating_address raises NotFound. """ - - db.instance_get_floating_address = Mock(side_effect=exception.NotFound("ERR")) - ret = self.manager._post_live_migration(self.ctxt, self.instance1, 'host1') - c1 = (ret == None) - c2 = (0 <= sys.stderr.buffer.find('doesnt have floating_ip')) - self.assertEqual(c1 and c2, True) - - def test17(self): - """17: db.instance_get_floating_address raises Unknown exception. """ - db.instance_get_floating_address = Mock(side_effect=TypeError("ERR")) - ret = self.manager._post_live_migration(self.ctxt, self.instance1, 'host1') - c1 = (ret == None) - c2 = (0 <= sys.stderr.buffer.find('Live migration: Unexpected error')) - self.assertEqual(c1 and c2, True) - - - def test18(self): - """18: db.floating_ip_get_by_address raises NotFound """ - - db.floating_ip_get_by_address = Mock(side_effect=exception.NotFound("ERR")) - ret = self.manager._post_live_migration(self.ctxt, self.instance1, 'host1') - c1 = (ret == None) - c2 = (0 <= sys.stderr.buffer.find('doesnt have floating_ip')) - self.assertEqual(c1 and c2, True) - - def test19(self): - """19: db.floating_ip_get_by_address raises Unknown exception. """ - db.floating_ip_get_by_address = Mock(side_effect=TypeError("ERR")) - ret = self.manager._post_live_migration(self.ctxt, self.instance1, 'host1') - c1 = (ret == None) - c2 = (0 <= sys.stderr.buffer.find('Live migration: Unexpected error')) - self.assertEqual(c1 and c2, True) - - - def test20(self): - """20: db.floating_ip_update raises Unknown exception. - """ - db.floating_ip_update = Mock(side_effect=TypeError("ERR")) - ret = self.manager._post_live_migration(self.ctxt, self.instance1, 'host1') - c1 = (ret == None) - c2 = (0 <= sys.stderr.buffer.find('Live migration: Unexpected error')) - self.assertEqual(c1 and c2, True) - - def test21(self): - """21: db.instance_update raises unknown exception. """ - - db.instance_update = Mock(side_effect=TypeError("ERR")) - self.assertRaises(TypeError, - self.manager._post_live_migration, - self.ctxt, - self.instance1, - 'host1') - - def tearDown(self): - """common terminating method. """ - self.stderr.realFlush() - sys.stderr = self.stderrBak - #sys.stdout = self.stdoutBak - -if __name__ == '__main__': - logging.getLogger().setLevel(logging.DEBUG) - #unittest.main() - - suite = unittest.TestLoader().loadTestsFromTestCase(LibvirtConnectionTestFunctions) - unittest.TextTestRunner(verbosity=2).run(suite) - - #suite = unittest.TestSuite() - #suite.addTest(LibvirtConnectionTestFunctions("test14")) - #suite.addTest(LibvirtConnectionTestFunctions("test16")) - #unittest.TextTestRunner(verbosity=2).run(suite) - - diff --git a/nova/livemigration_test/UT/nova-manage.test.py b/nova/livemigration_test/UT/nova-manage.test.py deleted file mode 100644 index 6db15cea0..000000000 --- a/nova/livemigration_test/UT/nova-manage.test.py +++ /dev/null @@ -1,313 +0,0 @@ -#!/usr/bin/python -# -*- coding: UTF-8 -*- - -NOVA_DIR='/opt/nova-2010.4' - -import sys -import os -import unittest -import commands -import re - -from mock import Mock - -# getting /nova-inst-dir -NOVA_DIR = os.path.abspath(sys.argv[0]) -for i in range(4): - NOVA_DIR = os.path.dirname(NOVA_DIR) - - -try : - print - print 'Testing %s/bin/nova-manage, set the NOVA_DIR properly..' % NOVA_DIR - print - - sys.path.append(NOVA_DIR) - - from nova import context - from nova import db - from nova import exception - from nova import flags - from nova import quota - from nova import utils - from nova.auth import manager - from nova.cloudpipe import pipelib - from nova import rpc - from nova.api.ec2 import cloud - from nova.compute import power_state - - from nova.db.sqlalchemy.models import * - - -except: - print 'set correct NOVA_DIR in this script. ' - raise - - -class tmpStdout: - def __init__(self): - self.buffer = "" - def write(self,arg): - self.buffer += arg - def flush(self): - self.buffer = '' - -class tmpStderr(tmpStdout): - def write(self, arg): - self.buffer += arg - def flush(self): - pass - def realFlush(self): - self.buffer = '' - - -class NovaManageTestFunctions(unittest.TestCase): - - stdout = None - stdoutBak = None - stderr = None - stderrBak = None - - hostCmds = None - - # 共通の初期化処理 - def setUp(self): - """common init method. """ - - commands.getstatusoutput('cp -f %s/bin/nova-manage %s' % ( NOVA_DIR, self.getNovaManageCopyPath() )) - commands.getstatusoutput('touch %s' % self.getInitpyPath() ) - try : - import bin.novamanagetest - except: - print 'Fail to import nova-manage . check bin/nova-manage exists' - raise - - # replace stdout for checking nova-manage output - if self.stdout is None : - self.__class__.stdout = tmpStdout() - self.stdoutBak = sys.stdout - sys.stdout = self.stdout - - # replace stderr for checking nova-manage output - if self.stderr is None: - self.__class__.stderr = tmpStderr() - self.stderrBak = sys.stderr - sys.stderr = self.stderr - - # prepare test data - self.setTestData() - - - def setTestData(self): - import bin.novamanagetest - - if self.hostCmds is None : - self.__class__.hostCmds = bin.novamanagetest.HostCommands() - self.instanceCmds = bin.novamanagetest.InstanceCommands() - - self.host1 = Host() - self.host1.__setitem__('name', 'host1') - - self.host2 = Host() - self.host2.__setitem__('name', 'host2') - - self.instance1 = Instance() - self.instance1.__setitem__('id', 1) - self.instance1.__setitem__('host', 'host1') - self.instance1.__setitem__('hostname', 'i-12345') - self.instance1.__setitem__('state', power_state.NOSTATE) - self.instance1.__setitem__('state_description', 'running') - - self.instance2 = Instance() - self.instance2.__setitem__('id', 2) - self.instance2.__setitem__('host', 'host1') - self.instance2.__setitem__('hostname', 'i-12345') - self.instance2.__setitem__('state', power_state.RUNNING) - self.instance2.__setitem__('state_description', 'pending') - - self.instance3 = Instance() - self.instance3.__setitem__('id', 3) - self.instance3.__setitem__('host', 'host1') - self.instance3.__setitem__('hostname', 'i-12345') - self.instance3.__setitem__('state', power_state.RUNNING) - self.instance3.__setitem__('state_description', 'running') - - db.host_get_all = Mock(return_value=[self.host1, self.host2]) - - def getInitpyPath(self): - return '%s/bin/__init__.py' % NOVA_DIR - - def getNovaManageCopyPath(self): - return '%s/bin/novamanagetest.py' % NOVA_DIR - - # -----> Test for nova-manage host list - - def test01(self): - """01: Got some host lists. """ - - self.hostCmds.list() - - c1 = (2 == self.stdout.buffer.count('\n')) - c2 = (0 <= self.stdout.buffer.find('host1')) - c3 = (0 <= self.stdout.buffer.find('host2')) - self.assertEqual(c1 and c2 and c3, True) - - def test02(self): - """02: Got empty lsit. """ - - db.host_get_all = Mock(return_value=[]) - self.hostCmds.list() - - # result should be empty - c = (0 == len(self.stdout.buffer) ) - self.assertEqual(c, True) - - def test03(self): - """03: Got notFound """ - - db.host_get_all = Mock(side_effect=exception.NotFound("ERR")) - self.assertRaises(exception.NotFound, self.hostCmds.list) - - # --------> Test For nova-manage host show - - def test04(self): - """04: args are not enough(nova-manage host show) """ - self.assertRaises(TypeError, self.hostCmds.show ) - - - def test05(self): - """05: nova-manage host show not-registered-host, and got an error""" - - rpc.call = Mock(return_value={'ret' : False, 'msg': 'ERR'} ) - self.hostCmds.show('host1') - self.assertEqual( self.stdout.buffer[:3]=='ERR', True ) - - - def test06(self): - """06: nova-manage host show registerd-host, and no project uses the host""" - - dic = {'ret': True, - 'phy_resource': {'vcpus':1, 'memory_mb':2, 'local_gb':3}, - 'usage': {}} - - rpc.call = Mock(return_value=dic ) - self.hostCmds.show('host1') - - # result should be : - # HOST PROJECT cpu mem(mb) disk(gb) - # host1 1 2 3 - line = self.stdout.buffer.split('\n')[1] - line = re.compile('\t+').sub(' ', line).strip() - c1 = ( 'host1 1 2 3' == line ) - c2 = ( self.stdout.buffer.count('\n') == 2 ) - - self.assertEqual( c1 and c2, True ) - - def test07(self): - """07: nova-manage host show registerd-host, - and some projects use the host - """ - dic = {'ret': True, - 'phy_resource': {'vcpus':1, 'memory_mb':2, 'local_gb':3}, - 'usage': {'p1': {'vcpus':1, 'memory_mb':2, 'local_gb':3}, - 'p2': {'vcpus':1, 'memory_mb':2, 'local_gb':3} }} - - rpc.call = Mock(return_value=dic ) - self.hostCmds.show('host1') - - # result should be : - # HOST PROJECT cpu mem(mb) disk(gb) - # host1 1 2 3 - # host1 p1 1 2 3 - # host1 p2 4 5 6 - line = self.stdout.buffer.split('\n')[1] - ret = re.compile('\t+').sub(' ', line).strip() - c1 = ( 'host1 1 2 3' == ret ) - - line = self.stdout.buffer.split('\n')[2] - line = re.compile('\t+').sub(' ', line).strip() - c2 = ( 'host1 p1 1 2 3' == line ) or ( 'host1 p2 1 2 3' == line ) - - line = self.stdout.buffer.split('\n')[3] - ret = re.compile('\t+').sub(' ', line).strip() - c3 = ( 'host1 p1 1 2 3' == ret ) or ( 'host1 p2 1 2 3' == ret ) - - self.assertEqual( c1 and c2 and c3, True ) - - def test08(self): - """08: nova-manage host show registerd-host, and rpc.call returns None - (unexpected error) - """ - rpc.call = Mock(return_value=None ) - self.hostCmds.show('host1') - c1 = ( 0 <= self.stdout.buffer.find('Unexpected error') ) - self.assertEqual( c1, True ) - - # ----------> Test for bin/nova-manage instance live_migration - - def test09(self): - """09: arguments are not enough(nova-manage instances live_migration) - """ - self.assertRaises(TypeError, self.instanceCmds.live_migration ) - - def test10(self): - """10: arguments are not enough(nova-manage instances live_migration ec2_id) - """ - self.assertRaises(TypeError, self.instanceCmds.live_migration, 'i-xxx' ) - - def test11(self): - """11: nova-manage instances live_migration ec2_id(invalid id) host""" - - db.instance_get_by_internal_id = Mock( side_effect=exception.NotFound('ERR') ) - try : - self.instanceCmds.live_migration('i-xxx', 'host1') - except exception.NotFound, e: - c1 = (0 < str(e.args).find('is not found') ) - self.assertTrue(c1, True) - return False - - def test12(self): - """12: nova-manage instances live_migration ec2_id host - and db.instance_get_by_internal_id raises unexpected exceptioin. - """ - db.instance_get_by_internal_id = Mock( side_effect=TypeError('ERR') ) - self.assertRaises(TypeError, self.instanceCmds.live_migration, 'i-xxx' ) - - def test13(self): - """13: nova-manage instances live_migration ec2_id host, - rpc.call raises RemoteError because destination doesnt have enough resource. - """ - db.host_get_by_name = Mock(return_value = self.host1) - db.instance_get_by_internal_id = Mock( return_value = self.instance3 ) - rpc.call = Mock(return_value = rpc.RemoteError(TypeError, 'val', 'traceback')) - self.assertRaises(rpc.RemoteError, self.instanceCmds.live_migration, 'i-xxx', 'host2' ) - - - def test14(self): - """14: nova-manage instances live_migration ec2_id host, - everything goes well, ang gets success messages. - """ - db.host_get_by_name = Mock(return_value = self.host1) - db.instance_get_by_internal_id = Mock( return_value = self.instance3 ) - rpc.call = Mock(return_value = None) - - self.instanceCmds.live_migration('i-12345', 'host2') - c1 = (0 <= self.stdout.buffer.find('Finished all procedure') ) - self.assertEqual( c1, True ) - - - def tearDown(self): - """common terminating method. """ - commands.getstatusoutput('rm -rf %s' % self.getInitpyPath() ) - commands.getstatusoutput('rm -rf %s' % self.getNovaManageCopyPath() ) - sys.stdout.flush() - sys.stdout = self.stdoutBak - self.stderr.realFlush() - sys.stderr = self.stderrBak - -if __name__ == '__main__': - #unittest.main() - suite = unittest.TestLoader().loadTestsFromTestCase(NovaManageTestFunctions) - unittest.TextTestRunner(verbosity=3).run(suite) - - diff --git a/nova/livemigration_test/UT/schedulerManager.test.py b/nova/livemigration_test/UT/schedulerManager.test.py deleted file mode 100644 index 33a38c660..000000000 --- a/nova/livemigration_test/UT/schedulerManager.test.py +++ /dev/null @@ -1,456 +0,0 @@ -#!/usr/bin/python -# -*- coding: UTF-8 -*- - - -import sys -import os -import unittest -import commands -import re -import libvirt - -from mock import Mock - -# getting /nova-inst-dir -NOVA_DIR = os.path.abspath(sys.argv[0]) -for i in range(4): - NOVA_DIR = os.path.dirname(NOVA_DIR) - -try : - print - print 'Checking %s/bin/nova-manage exists, set the NOVA_DIR properly..' % NOVA_DIR - print - - sys.path.append(NOVA_DIR) - - from nova.scheduler.manager import SchedulerManager - - from nova import context - from nova import db - from nova import exception - from nova import flags - from nova import quota - from nova import utils - from nova.auth import manager - from nova.cloudpipe import pipelib - from nova import rpc - from nova.api.ec2 import cloud - from nova.compute import power_state - - from nova.db.sqlalchemy.models import * - -except: - print 'set correct NOVA_DIR in this script. ' - raise - - -class tmpStdout: - def __init__(self): - self.buffer = "" - def write(self,arg): - self.buffer += arg - def flush(self): - self.buffer = '' - - -class SchedulerTestFunctions(unittest.TestCase): - - manager = None - - # 共通の初期化処理 - def setUp(self): - """common init method. """ - - self.host = 'openstack2-api' - if self.manager is None: - self.manager = SchedulerManager(host=self.host) - - self.setTestData() - self.setMocks() - - def setTestData(self): - - self.host1 = Host() - self.host1.__setitem__('name', 'host1') - self.host1.__setitem__('vcpus', 5) - self.host1.__setitem__('memory_mb', 20480) - self.host1.__setitem__('local_gb', 876) - self.host1.__setitem__('cpu_info', 1) - - self.host2 = Host() - self.host2.__setitem__('name', 'host2') - self.host2.__setitem__('vcpus', 5) - self.host2.__setitem__('memory_mb', 20480) - self.host2.__setitem__('local_gb', 876) - self.host2.__setitem__('hypervisor_type', 'QEMU') - self.host2.__setitem__('hypervisor_version', 12003) - xml="x86_64NehalemIntel" - self.host2.__setitem__('cpu_info', xml) - - self.instance1 = Instance() - for key, val in [ ('id', 1), ('host', 'host1'), ('hostname', 'i-12345'), - ('state', power_state.RUNNING), ('project_id', 'testPJ'), - ('vcpus', 3), ('memory_mb', 1024), ('local_gb', 5) ]: - self.instance1.__setitem__(key, val) - - - self.instance2 = Instance() - for key, val in [ ('id', 2), ('host', 'host1'), ('hostname', 'i-12345'), - ('state', power_state.RUNNING), ('project_id', 'testPJ'), - ('vcpus', 3), ('memory_mb', 1024), ('local_gb', 5) ]: - self.instance2.__setitem__(key, val) - - - self.instance3 = Instance() - for key, val in [ ('id', 3), ('host', 'host1'), ('hostname', 'i-12345'), - ('state', power_state.RUNNING), ('project_id', 'testPJ2'), - ('vcpus', 1), ('memory_mb', 1024), ('local_gb', 5), - ('internal_id', 123456), ('state', 1), - ('state_description', 'running') ]: - self.instance3.__setitem__(key, val) - - self.instance4 = Instance() - for key, val in [ ('id', 4), ('host', 'host2'), ('hostname', 'i-12345'), - ('state', power_state.RUNNING), ('project_id', 'testPJ2'), - ('vcpus', 1), ('memory_mb', 1024), ('local_gb', 5), - ('internal_id', 123456), ('state', 0), - ('state_description', 'running') ]: - self.instance4.__setitem__(key, val) - - self.instance5 = Instance() - for key, val in [ ('id', 5), ('host', 'host2'), ('hostname', 'i-12345'), - ('state', power_state.RUNNING), ('project_id', 'testPJ2'), - ('vcpus', 1), ('memory_mb', 1024), ('local_gb', 5), - ('internal_id', 123456), ('state', 1), - ('state_description', 'migrating') ]: - self.instance5.__setitem__(key, val) - - self.instance6 = Instance() - for key, val in [ ('id', 6), ('host', 'host2'), ('hostname', 'i-12345'), - ('state', power_state.RUNNING), ('project_id', 'testPJ2'), - ('vcpus', 3), ('memory_mb', 1024), ('local_gb', 5) ]: - self.instance6.__setitem__(key, val) - - self.instance7 = Instance() - for key, val in [ ('id', 7), ('host', 'host1'), ('hostname', 'i-12345'), - ('state', power_state.RUNNING), ('project_id', 'testPJ2'), - ('vcpus', 1), ('memory_mb', 18432), ('local_gb', 5) ]: - self.instance7.__setitem__(key, val) - - self.instance8 = Instance() - for key, val in [ ('id', 8), ('host', 'host1'), ('hostname', 'i-12345'), - ('state', power_state.RUNNING), - ('state_description', 'running'),('project_id', 'testPJ2'), - ('vcpus', 1), ('memory_mb', 1024), ('local_gb', 866) ]: - self.instance8.__setitem__(key, val) - - self.service1 = Service() - for key, val in [ ('id', 1), ('host', 'host1'), ('binary', 'nova-compute'), - ('topic', 'compute')]: - self.service1.__setitem__(key, val) - - self.service2 = Service() - for key, val in [ ('id', 2), ('host', 'host2'), ('binary', 'nova-compute'), - ('topic', 'compute')]: - self.service1.__setitem__(key, val) - - def setMocks(self): - self.ctxt = context.get_admin_context() - # Mocks for has_enough_resource() - db.instance_get = Mock(return_value = self.instance3) - db.host_get_by_name = Mock(return_value = self.host2) - db.instance_get_all_by_host = Mock(return_value = [self.instance4, self.instance5] ) - - # Mocks for live_migration - db.service_get_all_by_topic = Mock(return_value = [self.service1] ) - self.manager.service_ip_up = Mock(return_value = True) - rpc.call = Mock(return_value=1) - db.instance_set_state = Mock(return_value = True) - self.manager.driver.service_is_up = Mock(return_value = True) - - def check_format(self, val): - """check result format of show_host_resource """ - - if dict != type(val) : - sys.stderr.write('return value is not dict') - return False - - if not val.has_key('ret'): - sys.stderr.write('invalid format(missing "ret"). ') - return False - - if not val['ret'] : - if not val.has_key('msg') : - sys.stderr.write( 'invalid format(missing "msg").' ) - return False - - else : - if not val.has_key('phy_resource') : - sys.stderr.write('invalid format(missing "phy_resource"). ') - return False - - if not val.has_key('usage'): - sys.stderr.write('invalid format(missing "usage"). ') - return False - - if not self._check_format(val['phy_resource']): - return False - - for key, dic in val['usage'].items() : - if not self._check_format(dic): - return False - return True - - def _check_format(self, val): - if dict != type(val) : - sys.stderr.write('return value is not dict') - return False - - for key in ['vcpus', 'memory_mb', 'local_gb']: - if not val.has_key(key) : - sys.stderr.write('invalid format(missing "%s"). ' % key ) - return False - - return True - - - # ---> test for nova.scheduler.manager.show_host_resource() - - def test01(self): - """01: get NotFound exception when dest host not found on DB """ - - db.host_get_by_name = Mock( side_effect=exception.NotFound('ERR') ) - result = self.manager.show_host_resource(self.ctxt, 'not-registered-host') - c1 = ( not result['ret'] ) - c2 = ( 0 == result['msg'].find('No such') ) - self.assertEqual(c1 and c2, True) - - def test02(self): - """02: get other exception if unexpected err. """ - - db.host_get_by_name = Mock( side_effect=TypeError('ERR') ) - self.assertRaises(TypeError, self.manager.show_host_resource, self.ctxt, 'host1' ) - - def test03(self): - """03: no instance found on dest host. """ - - db.host_get_by_name = Mock( return_value = self.host1 ) - db.instance_get_all_by_host = Mock( return_value=[]) - ret= self.manager.show_host_resource(self.ctxt, 'host1') - - c1 = self.check_format(ret) - v = ret['phy_resource'] - c2 = ( (5 == v['vcpus']) and (20480 == v['memory_mb']) and (876 == v['local_gb'])) - c3 = ( 0 == len(ret['usage']) ) - - self.assertEqual(c1 and c2 and c3, True) - - def test04(self): - """04: some instance found on dest host. """ - - db.host_get_by_name = Mock( return_value = self.host1 ) - db.instance_get_all_by_host = Mock( return_value=[ self.instance1, - self.instance2, - self.instance3] ) - - db.instance_get_vcpu_sum_by_host_and_project = Mock(return_value=3) - db.instance_get_memory_sum_by_host_and_project = Mock(return_value=1024) - db.instance_get_disk_sum_by_host_and_project = Mock(return_value=5) - - ret= self.manager.show_host_resource(self.ctxt, 'host1') - - c1 = self.check_format(ret) - v = ret['phy_resource'] - c2 = ( (5 == v['vcpus']) and (20480 == v['memory_mb']) and (876 == v['local_gb'])) - c3 = ( 2 == len(ret['usage']) ) - c4 = ( self.instance1['project_id'] in ret['usage'].keys()) - c5 = ( self.instance3['project_id'] in ret['usage'].keys()) - - self.assertEqual(c1 and c2 and c3 and c4 and c5, True) - - - # ---> test for nova.scheduler.manager.has_enough_resource() - def test05(self): - """05: when cpu is exccded some instance found on dest host. """ - - db.instance_get = Mock(return_value = self.instance6) - try : - self.manager.driver.has_enough_resource(self.ctxt, 'i-12345', 'host1') - except exception.NotEmpty, e: - # dont do e.message.find(), because the below message is occured. - # DeprecationWarning: BaseException.message has been deprecated - # as of Python 2.6 - c1 = ( 0 < str(e.args).find('doesnt have enough resource') ) - self.assertTrue(c1, True) - return False - - - def test06(self): - """06: when memory is exccded some instance found on dest host. """ - - db.instance_get = Mock(return_value = self.instance7) - try : - self.manager.driver.has_enough_resource(self.ctxt, 'i-12345', 'host1') - except exception.NotEmpty, e: - c1 = ( 0 <= str(e.args).find('doesnt have enough resource') ) - self.assertTrue(c1, True) - return False - - def test07(self): - """07: when hdd is exccded some instance found on dest host. """ - - db.instance_get = Mock(return_value = self.instance8) - try : - self.manager.driver.has_enough_resource(self.ctxt, 'i-12345', 'host1') - except exception.NotEmpty, e: - c1 = ( 0 <= str(e.args).find('doesnt have enough resource') ) - self.assertTrue(c1, True) - return False - - - def test08(self): - """08: everything goes well. (instance_get_all_by_host returns list)""" - - ret= self.manager.driver.has_enough_resource(self.ctxt, 'i-12345', 'host1') - self.assertEqual(ret, None) - - - def test09(self): - """09: everything goes well(instance_get_all_by_host returns[]). """ - - db.instance_get_all_by_host = Mock(return_value = [] ) - ret= self.manager.driver.has_enough_resource(self.ctxt, 'i-12345', 'host1') - self.assertEqual(ret, None) - - - # ---> test for nova.scheduler.manager.live_migration() - - - def test10(self): - """10: instance_get issues NotFound. """ - - db.instance_get = Mock(side_effect=exception.NotFound("ERR")) - self.assertRaises(exception.NotFound, - self.manager.driver.schedule_live_migration, - self.ctxt, - 'i-12345', - 'host1') - - def test11(self): - """11: instance_get issues Unexpected error. """ - - db.instance_get = Mock(side_effect=TypeError("ERR")) - self.assertRaises(TypeError, - self.manager.driver.schedule_live_migration, - self.ctxt, - 'i-12345', - 'host1') - - def test12(self): - """12: instance state is not power_state.RUNNING. """ - - db.instance_get = Mock(return_value=self.instance4) - try : - self.manager.driver.schedule_live_migration(self.ctxt, 'i-12345', 'host1') - except exception.Invalid, e: - c1 = (0 <= str(e.args).find('is not running')) - self.assertTrue(c1, True) - return False - - def test13(self): - """13: instance state_description is not running. """ - - db.instance_get = Mock(return_value=self.instance5) - try : - self.manager.driver.schedule_live_migration(self.ctxt, 'i-12345', 'host1') - except exception.Invalid, e: - c1 = (0 <= str(e.args).find('is not running')) - self.assertTrue(c1, True) - return False - - def test14(self): - """14: dest is not compute node. - (dest is not included in the result of db.service_get_all_by_topic) - """ - try : - self.manager.driver.schedule_live_migration(self.ctxt, 'i-12345', 'host2') - except exception.Invalid, e: - c1 = (0 <= str(e.args).find('must be compute node')) - self.assertTrue(c1, True) - return False - - def test15(self): - """ 15: dest is not alive.(service_is up returns False) """ - - self.manager.driver.service_is_up = Mock(return_value=False) - try : - self.manager.driver.schedule_live_migration(self.ctxt, 'i-12345', 'host2') - except exception.Invalid, e: - c1 = (0 <= str(e.args).find('is not alive')) - self.assertTrue(c1, True) - return False - - # Cannot test the case of hypervisor type difference and hypervisor - # version difference, since we cannot set different mocks to same method.. - - def test16(self): - """ 16: stored "cpuinfo" is not string """ - - try : - self.manager.driver.schedule_live_migration(self.ctxt, 'i-12345', 'host2') - except exception.Invalid, e: - c1 = (0 <= str(e.args).find('Unexpected err') ) - self.assertTrue(c1, True) - return False - - - def test17(self): - """17: rpc.call raises RemoteError(Unexpected error occurs when executing compareCPU) """ - rpc.call = Mock(return_value = rpc.RemoteError(libvirt.libvirtError, 'val', 'traceback')) - self.assertRaises(rpc.RemoteError, - self.manager.driver.schedule_live_migration, - self.ctxt, - 'i-12345', - 'host2') - - def test18(self): - """18: rpc.call returns 0 (cpu is not compatible between src and dest) """ - rpc.call = Mock(return_value = 0) - try : - self.manager.driver.schedule_live_migration(self.ctxt, 'i-12345', 'host2') - except exception.Invalid, e: - c1 = ( 0 <= str(e.args).find('doesnt have compatibility to')) - self.assertTrue(c1, True) - return False - - def test19(self): - """19: raise NotEmpty if host doesnt have enough resource. """ - - db.instance_get = Mock(return_value = self.instance8) - try : - self.manager.driver.schedule_live_migration(self.ctxt, 'i-12345', 'host2') - except exception.NotEmpty, e: - c1 = ( 0 <= str(e.args).find('doesnt have enough resource') ) - self.assertTrue(c1, True) - return False - - - def test20(self): - """20: everything goes well. """ - - #db.instance_get = Mock(return_value = self.instance8) - ret= self.manager.driver.schedule_live_migration(self.ctxt, 'i-12345', 'host2') - self.assertEqual(ret, self.instance8['host']) - - - def tearDown(self): - """common terminating method. """ - #sys.stdout = self.stdoutBak - pass - -if __name__ == '__main__': - #unittest.main() - suite = unittest.TestLoader().loadTestsFromTestCase(SchedulerTestFunctions) - unittest.TextTestRunner(verbosity=3).run(suite) - - diff --git a/nova/livemigration_test/UT/testCase_UT.xls b/nova/livemigration_test/UT/testCase_UT.xls deleted file mode 100644 index 2850e70f0..000000000 Binary files a/nova/livemigration_test/UT/testCase_UT.xls and /dev/null differ -- cgit From 193ef47ae87afde18f780c5141a597480845de1e Mon Sep 17 00:00:00 2001 From: Ewan Mellor Date: Fri, 7 Jan 2011 03:08:38 +0000 Subject: Fix Nova not to immediately blow up when talking to Glance: we were using the wrong URL to get the image metadata, and ended up getting the whole image instead (and trying to parse it as json). Also, fix some URLs, all of which were missing the leading slash. --- nova/image/glance.py | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/nova/image/glance.py b/nova/image/glance.py index cb3936df1..e40468364 100644 --- a/nova/image/glance.py +++ b/nova/image/glance.py @@ -89,7 +89,7 @@ class ParallaxClient(object): """ try: c = self.connection_type(self.netloc, self.port) - c.request("GET", "images/detail") + c.request("GET", "/images/detail") res = c.getresponse() if res.status == 200: # Parallax returns a JSONified dict(images=image_list) @@ -108,12 +108,12 @@ class ParallaxClient(object): """ try: c = self.connection_type(self.netloc, self.port) - c.request("GET", "images/%s" % image_id) + c.request("HEAD", "/images/%s" % image_id) res = c.getresponse() if res.status == 200: - # Parallax returns a JSONified dict(image=image_info) - data = json.loads(res.read())['image'] - return data + # TODO(ewanmellor): Temporary hack! We should be parsing + # the response from Glance properly. + return { 'url': '/images/%s' % image_id } else: # TODO(jaypipes): log the error? return None @@ -146,7 +146,7 @@ class ParallaxClient(object): try: c = self.connection_type(self.netloc, self.port) body = json.dumps(image_metadata) - c.request("PUT", "images/%s" % image_id, body) + c.request("PUT", "/images/%s" % image_id, body) res = c.getresponse() return res.status == 200 finally: @@ -158,7 +158,7 @@ class ParallaxClient(object): """ try: c = self.connection_type(self.netloc, self.port) - c.request("DELETE", "images/%s" % image_id) + c.request("DELETE", "/images/%s" % image_id) res = c.getresponse() return res.status == 200 finally: -- cgit From df2785fb12d38cf0842921d380de2ed2d1c6bf5b Mon Sep 17 00:00:00 2001 From: Ewan Mellor Date: Fri, 7 Jan 2011 03:10:28 +0000 Subject: Half-finished implementation of the streaming from Glance to a VDI through nova-compute. --- nova/virt/xenapi/vm_utils.py | 158 ++++++++++++++++++++++++++++++++++++++++++- 1 file changed, 157 insertions(+), 1 deletion(-) diff --git a/nova/virt/xenapi/vm_utils.py b/nova/virt/xenapi/vm_utils.py index 9d1b51848..074ca9f87 100644 --- a/nova/virt/xenapi/vm_utils.py +++ b/nova/virt/xenapi/vm_utils.py @@ -19,6 +19,7 @@ Helper methods for operations related to the management of VM records and their attributes like VDIs, VIFs, as well as their lookup functions. """ +import glance import logging import pickle import urllib @@ -45,6 +46,8 @@ XENAPI_POWER_STATE = { 'Suspended': power_state.SUSPENDED, 'Crashed': power_state.CRASHED} +BUFSIZE = 65536 + class ImageType: """ @@ -206,6 +209,25 @@ class VMHelper(HelperBase): vm_ref, network_ref) return vif_ref + @classmethod + def create_vdi(cls, session, sr_ref, name_label, virtual_size, read_only): + """Create a VDI record and returns its reference.""" + vdi_ref = session.xenapi.VDI.create( + {'name_label': name_label, + 'name_description': '', + 'SR': sr_ref, + 'virtual_size': str(virtual_size), + 'type': 'User', + 'sharable': False, + 'read_only': read_only, + 'xenstore_data': {}, + 'other_config': {}, + 'sm_config': {}, + 'tags': []}) + logging.debug(_('Created VDI %s (%s, %s, %s) on %s.'), vdi_ref, + name_label, virtual_size, read_only, sr_ref) + return vdi_ref + @classmethod def create_snapshot(cls, session, instance_id, vm_ref, label): """ Creates Snapshot (Template) VM, Snapshot VBD, Snapshot VDI, @@ -257,9 +279,52 @@ class VMHelper(HelperBase): def fetch_image(cls, session, instance_id, image, user, project, type): """ type is interpreted as an ImageType instance + Related flags: + xenapi_image_service = ['glance', 'objectstore'] + glance_address = 'address for glance services' + glance_port = 'port for glance services' """ - url = images.image_url(image) access = AuthManager().get_access_key(user, project) + + if FLAGS.xenapi_image_service == 'glance': + cls._fetch_image_glance(session, instance_id, image, access, type) + else: + cls._fetch_image_objectstore(session, instance_id, image, access, + type) + + #### raw_image=validate_bool(args, 'raw', 'false') + #### add_partition = validate_bool(args, 'add_partition', 'false') + + @classmethod + def _fetch_image_glance(cls, session, instance_id, image, access, type): + sr = find_sr(session) + if sr is None: + raise exception.NotFound('Cannot find SR to write VDI to') + + c = glance.client.Client(FLAGS.glance_host, FLAGS.glance_port) + + raise exception.NotFound("DAM") + + meta, image_file = c.get_image(image) + vdi_size = meta['size'] + + vdi = create_vdi(session, sr, _('Glance image %s') % image, vdi_size, + False) + + def stream(dev): + with open('/dev/%s' % dev, 'wb') as f: + while True: + buf = image_file.read(BUFSIZE) + if not buf: + break + f.write(buf) + with_vdi_attached_here(session, vdi, False, stream) + return session.xenapi.VDI.get_uuid(vdi) + + @classmethod + def _fetch_image_objectstore(cls, session, instance_id, image, access, + type): + url = images.image_url(image) logging.debug("Asking xapi to fetch %s as %s", url, access) fn = (type != ImageType.KERNEL_RAMDISK) and 'get_vdi' or 'get_kernel' args = {} @@ -461,3 +526,94 @@ def get_vdi_for_vm_safely(session, vm_ref): vdi_ref = vdi_refs[0] vdi_rec = session.get_xenapi().VDI.get_record(vdi_ref) return vdi_ref, vdi_rec + + +def find_sr(session): + host = get_this_host(session) + srs = session.xenapi.SR.get_all() + for sr in srs: + sr_rec = session.xenapi.SR.get_record(sr) + if not ('i18n-key' in sr_rec['other_config'] and + sr_rec['other_config']['i18n-key'] == 'local-storage'): + continue + for pbd in sr_rec['PBDs']: + pbd_rec = session.xenapi.PBD.get_record(pbd) + if pbd_rec['host'] == host: + return sr + return None + + +def with_vdi_attached_here(session, vdi, read_only, f): + this_vm_ref = get_this_vm_ref(session) + vbd_rec = {} + vbd_rec['VM'] = this_vm_ref + vbd_rec['VDI'] = vdi + vbd_rec['userdevice'] = 'autodetect' + vbd_rec['bootable'] = False + vbd_rec['mode'] = read_only and 'RO' or 'RW' + vbd_rec['type'] = 'disk' + vbd_rec['unpluggable'] = True + vbd_rec['empty'] = False + vbd_rec['other_config'] = {} + vbd_rec['qos_algorithm_type'] = '' + vbd_rec['qos_algorithm_params'] = {} + vbd_rec['qos_supported_algorithms'] = [] + logging.debug(_('Creating VBD for VDI %s ... '), vdi) + vbd = session.xenapi.VBD.create(vbd_rec) + logging.debug(_('Creating VBD for VDI %s done.'), vdi) + try: + logging.debug(_('Plugging VBD %s ... '), vbd) + session.xenapi.VBD.plug(vbd) + logging.debug(_('Plugging VBD %s done.'), vbd) + return f(session.xenapi.VBD.get_device(vbd)) + finally: + logging.debug(_('Destroying VBD for VDI %s ... '), vdi) + vbd_unplug_with_retry(session, vbd) + ignore_failure(session.xenapi.VBD.destroy, vbd) + logging.debug(_('Destroying VBD for VDI %s done.'), vdi) + + +def vbd_unplug_with_retry(session, vbd): + """Call VBD.unplug on the given VBD, with a retry if we get + DEVICE_DETACH_REJECTED. For reasons which I don't understand, we're + seeing the device still in use, even when all processes using the device + should be dead.""" + while True: + try: + session.xenapi.VBD.unplug(vbd) + logging.debug(_('VBD.unplug successful first time.')) + return + except XenAPI.Failure, e: + if (len(e.details) > 0 and + e.details[0] == 'DEVICE_DETACH_REJECTED'): + logging.debug(_('VBD.unplug rejected: retrying...')) + time.sleep(1) + elif (len(e.details) > 0 and + e.details[0] == 'DEVICE_ALREADY_DETACHED'): + logging.debug(_('VBD.unplug successful eventually.')) + return + else: + logging.error(_('Ignoring XenAPI.Failure in VBD.unplug: %s'), + e) + return + + +def ignore_failure(func, *args, **kwargs): + try: + return func(*args, **kwargs) + except XenAPI.Failure, e: + logging.error(_('Ignoring XenAPI.Failure %s'), e) + return None + + +def get_this_host(session): + return session.xenapi.session.get_this_host(session.handle) + + +def get_this_vm_uuid(): + with file('/sys/hypervisor/uuid') as f: + return f.readline().strip() + + +def get_this_vm_ref(session): + return session.xenapi.VM.get_by_uuid(get_this_vm_uuid()) -- cgit From b23dece0d29d295f6ee140b96230ed27c7fd1268 Mon Sep 17 00:00:00 2001 From: Salvatore Orlando Date: Fri, 7 Jan 2011 18:11:41 +0000 Subject: pv/hvm detection with pygrub updated for glance --- nova/virt/xenapi/vm_utils.py | 27 +++++++++++++++++++++++++++ 1 file changed, 27 insertions(+) diff --git a/nova/virt/xenapi/vm_utils.py b/nova/virt/xenapi/vm_utils.py index 074ca9f87..9d333bcea 100644 --- a/nova/virt/xenapi/vm_utils.py +++ b/nova/virt/xenapi/vm_utils.py @@ -343,6 +343,14 @@ class VMHelper(HelperBase): @classmethod def lookup_image(cls, session, vdi_ref): + if FLAGS.xenapi_image_service == 'glance': + cls.lookup_image_glance(session, vdi_ref) + else: + cls.lookup_image_objectstore(session, vdi_ref) + return + + @classmethod + def _lookup_image_objectstore(cls,session,vdi_ref): logging.debug("Looking up vdi %s for PV kernel", vdi_ref) fn = "is_vdi_pv" args = {} @@ -357,6 +365,25 @@ class VMHelper(HelperBase): logging.debug("PV Kernel in VDI:%d", pv) return pv + @classmethod + def _lookup_image_glance(cls,session,vdi_ref): + logging.debug("Looking up vdi %s for PV kernel", vdi_ref) + + def is_vdi_pv(dest): + logging.debug("Running pygrub against %s",dest) + output=os.popen('pygrub -qn %s' % dest) + pv=False + for line in output.readlines(): + #try to find kernel string + m=re.search('(?<=kernel:)/.*(?:>)',line) + if m: + if m.group(0).find('xen')!=-1: + pv=True + logging.debug("PV:%d",pv) + return pv + pv=with_vdi_attached_here(session, vdi_ref, False, is_vdi_pv) + return pv + @classmethod def lookup(cls, session, i): """Look the instance i up, and returns it if available""" -- cgit From e92f0a9352bf7de0f42951b5b6f1bb452a609bf6 Mon Sep 17 00:00:00 2001 From: Ewan Mellor Date: Fri, 7 Jan 2011 20:19:59 +0000 Subject: Many fixes to the Glance integration. --- nova/virt/xenapi/vm_utils.py | 75 +++++++++++++++++++------------------------- nova/virt/xenapi_conn.py | 3 ++ 2 files changed, 36 insertions(+), 42 deletions(-) diff --git a/nova/virt/xenapi/vm_utils.py b/nova/virt/xenapi/vm_utils.py index 9d333bcea..98427acee 100644 --- a/nova/virt/xenapi/vm_utils.py +++ b/nova/virt/xenapi/vm_utils.py @@ -19,8 +19,9 @@ Helper methods for operations related to the management of VM records and their attributes like VDIs, VIFs, as well as their lookup functions. """ -import glance +import glance.client import logging +import os import pickle import urllib from xml.dom import minidom @@ -46,8 +47,6 @@ XENAPI_POWER_STATE = { 'Suspended': power_state.SUSPENDED, 'Crashed': power_state.CRASHED} -BUFSIZE = 65536 - class ImageType: """ @@ -212,7 +211,7 @@ class VMHelper(HelperBase): @classmethod def create_vdi(cls, session, sr_ref, name_label, virtual_size, read_only): """Create a VDI record and returns its reference.""" - vdi_ref = session.xenapi.VDI.create( + vdi_ref = session.get_xenapi().VDI.create( {'name_label': name_label, 'name_description': '', 'SR': sr_ref, @@ -287,39 +286,35 @@ class VMHelper(HelperBase): access = AuthManager().get_access_key(user, project) if FLAGS.xenapi_image_service == 'glance': - cls._fetch_image_glance(session, instance_id, image, access, type) + return cls._fetch_image_glance(session, instance_id, image, + access, type) else: - cls._fetch_image_objectstore(session, instance_id, image, access, - type) + return cls._fetch_image_objectstore(session, instance_id, image, + access, type) #### raw_image=validate_bool(args, 'raw', 'false') #### add_partition = validate_bool(args, 'add_partition', 'false') @classmethod - def _fetch_image_glance(cls, session, instance_id, image, access, type): + def _fetch_image_glance(cls, session, instance_id, image, access, typ): sr = find_sr(session) if sr is None: raise exception.NotFound('Cannot find SR to write VDI to') c = glance.client.Client(FLAGS.glance_host, FLAGS.glance_port) - raise exception.NotFound("DAM") - meta, image_file = c.get_image(image) vdi_size = meta['size'] - vdi = create_vdi(session, sr, _('Glance image %s') % image, vdi_size, - False) + vdi = cls.create_vdi(session, sr, _('Glance image %s') % image, + vdi_size, False) def stream(dev): with open('/dev/%s' % dev, 'wb') as f: - while True: - buf = image_file.read(BUFSIZE) - if not buf: - break - f.write(buf) + for chunk in image_file: + f.write(chunk) with_vdi_attached_here(session, vdi, False, stream) - return session.xenapi.VDI.get_uuid(vdi) + return session.get_xenapi().VDI.get_uuid(vdi) @classmethod def _fetch_image_objectstore(cls, session, instance_id, image, access, @@ -344,9 +339,9 @@ class VMHelper(HelperBase): @classmethod def lookup_image(cls, session, vdi_ref): if FLAGS.xenapi_image_service == 'glance': - cls.lookup_image_glance(session, vdi_ref) + cls._lookup_image_glance(session, vdi_ref) else: - cls.lookup_image_objectstore(session, vdi_ref) + cls._lookup_image_objectstore(session, vdi_ref) return @classmethod @@ -369,19 +364,19 @@ class VMHelper(HelperBase): def _lookup_image_glance(cls,session,vdi_ref): logging.debug("Looking up vdi %s for PV kernel", vdi_ref) - def is_vdi_pv(dest): - logging.debug("Running pygrub against %s",dest) - output=os.popen('pygrub -qn %s' % dest) - pv=False + def is_vdi_pv(dev): + logging.debug("Running pygrub against %s", dev) + output = os.popen('pygrub -qn /dev/%s' % dev) + pv = False for line in output.readlines(): #try to find kernel string - m=re.search('(?<=kernel:)/.*(?:>)',line) + m = re.search('(?<=kernel:)/.*(?:>)',line) if m: if m.group(0).find('xen')!=-1: - pv=True + pv = True logging.debug("PV:%d",pv) - return pv - pv=with_vdi_attached_here(session, vdi_ref, False, is_vdi_pv) + return pv + pv = with_vdi_attached_here(session, vdi_ref, False, is_vdi_pv) return pv @classmethod @@ -556,15 +551,15 @@ def get_vdi_for_vm_safely(session, vm_ref): def find_sr(session): - host = get_this_host(session) - srs = session.xenapi.SR.get_all() + host = session.get_xenapi_host() + srs = session.get_xenapi().SR.get_all() for sr in srs: - sr_rec = session.xenapi.SR.get_record(sr) + sr_rec = session.get_xenapi().SR.get_record(sr) if not ('i18n-key' in sr_rec['other_config'] and sr_rec['other_config']['i18n-key'] == 'local-storage'): continue for pbd in sr_rec['PBDs']: - pbd_rec = session.xenapi.PBD.get_record(pbd) + pbd_rec = session.get_xenapi().PBD.get_record(pbd) if pbd_rec['host'] == host: return sr return None @@ -586,17 +581,17 @@ def with_vdi_attached_here(session, vdi, read_only, f): vbd_rec['qos_algorithm_params'] = {} vbd_rec['qos_supported_algorithms'] = [] logging.debug(_('Creating VBD for VDI %s ... '), vdi) - vbd = session.xenapi.VBD.create(vbd_rec) + vbd = session.get_xenapi().VBD.create(vbd_rec) logging.debug(_('Creating VBD for VDI %s done.'), vdi) try: logging.debug(_('Plugging VBD %s ... '), vbd) - session.xenapi.VBD.plug(vbd) + session.get_xenapi().VBD.plug(vbd) logging.debug(_('Plugging VBD %s done.'), vbd) - return f(session.xenapi.VBD.get_device(vbd)) + return f(session.get_xenapi().VBD.get_device(vbd)) finally: logging.debug(_('Destroying VBD for VDI %s ... '), vdi) vbd_unplug_with_retry(session, vbd) - ignore_failure(session.xenapi.VBD.destroy, vbd) + ignore_failure(session.get_xenapi().VBD.destroy, vbd) logging.debug(_('Destroying VBD for VDI %s done.'), vdi) @@ -607,7 +602,7 @@ def vbd_unplug_with_retry(session, vbd): should be dead.""" while True: try: - session.xenapi.VBD.unplug(vbd) + session.get_xenapi().VBD.unplug(vbd) logging.debug(_('VBD.unplug successful first time.')) return except XenAPI.Failure, e: @@ -633,14 +628,10 @@ def ignore_failure(func, *args, **kwargs): return None -def get_this_host(session): - return session.xenapi.session.get_this_host(session.handle) - - def get_this_vm_uuid(): with file('/sys/hypervisor/uuid') as f: return f.readline().strip() def get_this_vm_ref(session): - return session.xenapi.VM.get_by_uuid(get_this_vm_uuid()) + return session.get_xenapi().VM.get_by_uuid(get_this_vm_uuid()) diff --git a/nova/virt/xenapi_conn.py b/nova/virt/xenapi_conn.py index c48f5b7cb..3820d3d30 100644 --- a/nova/virt/xenapi_conn.py +++ b/nova/virt/xenapi_conn.py @@ -84,6 +84,9 @@ flags.DEFINE_float('xenapi_task_poll_interval', 'The interval used for polling of remote tasks ' '(Async.VM.start, etc). Used only if ' 'connection_type=xenapi.') +flags.DEFINE_string('xenapi_image_service', + 'glance', + 'Where to get VM images: glance or objectstore.') flags.DEFINE_float('xenapi_vhd_coalesce_poll_interval', 5.0, 'The interval used for polling of coalescing vhds.' -- cgit From 5ca8ec42037ed4e2a1475bf29064f61068308687 Mon Sep 17 00:00:00 2001 From: Ewan Mellor Date: Fri, 7 Jan 2011 20:26:25 +0000 Subject: Fix pep8 errors. --- nova/image/glance.py | 2 +- nova/virt/xenapi/vm_utils.py | 18 +++++++++--------- 2 files changed, 10 insertions(+), 10 deletions(-) diff --git a/nova/image/glance.py b/nova/image/glance.py index e40468364..3f8982c51 100644 --- a/nova/image/glance.py +++ b/nova/image/glance.py @@ -113,7 +113,7 @@ class ParallaxClient(object): if res.status == 200: # TODO(ewanmellor): Temporary hack! We should be parsing # the response from Glance properly. - return { 'url': '/images/%s' % image_id } + return {'url': '/images/%s' % image_id} else: # TODO(jaypipes): log the error? return None diff --git a/nova/virt/xenapi/vm_utils.py b/nova/virt/xenapi/vm_utils.py index 98427acee..c5bd83b27 100644 --- a/nova/virt/xenapi/vm_utils.py +++ b/nova/virt/xenapi/vm_utils.py @@ -344,8 +344,8 @@ class VMHelper(HelperBase): cls._lookup_image_objectstore(session, vdi_ref) return - @classmethod - def _lookup_image_objectstore(cls,session,vdi_ref): + @classmethod + def _lookup_image_objectstore(cls, session, vdi_ref): logging.debug("Looking up vdi %s for PV kernel", vdi_ref) fn = "is_vdi_pv" args = {} @@ -360,25 +360,25 @@ class VMHelper(HelperBase): logging.debug("PV Kernel in VDI:%d", pv) return pv - @classmethod - def _lookup_image_glance(cls,session,vdi_ref): + @classmethod + def _lookup_image_glance(cls, session, vdi_ref): logging.debug("Looking up vdi %s for PV kernel", vdi_ref) - + def is_vdi_pv(dev): logging.debug("Running pygrub against %s", dev) output = os.popen('pygrub -qn /dev/%s' % dev) pv = False for line in output.readlines(): #try to find kernel string - m = re.search('(?<=kernel:)/.*(?:>)',line) + m = re.search('(?<=kernel:)/.*(?:>)', line) if m: - if m.group(0).find('xen')!=-1: + if m.group(0).find('xen') != -1: pv = True - logging.debug("PV:%d",pv) + logging.debug("PV:%d", pv) return pv pv = with_vdi_attached_here(session, vdi_ref, False, is_vdi_pv) return pv - + @classmethod def lookup(cls, session, i): """Look the instance i up, and returns it if available""" -- cgit From fedf946c7d04465fb958707e143d8de558ea4321 Mon Sep 17 00:00:00 2001 From: Ewan Mellor Date: Fri, 7 Jan 2011 22:28:59 +0000 Subject: Some fixes to _lookup_image_glance: fix the return value from lookup_image, attach the disk read-only before running pygrub, and add some debug logging. --- nova/virt/xenapi/vm_utils.py | 20 +++++++++----------- 1 file changed, 9 insertions(+), 11 deletions(-) diff --git a/nova/virt/xenapi/vm_utils.py b/nova/virt/xenapi/vm_utils.py index c5bd83b27..76094f35c 100644 --- a/nova/virt/xenapi/vm_utils.py +++ b/nova/virt/xenapi/vm_utils.py @@ -23,6 +23,7 @@ import glance.client import logging import os import pickle +import re import urllib from xml.dom import minidom @@ -339,10 +340,9 @@ class VMHelper(HelperBase): @classmethod def lookup_image(cls, session, vdi_ref): if FLAGS.xenapi_image_service == 'glance': - cls._lookup_image_glance(session, vdi_ref) + return cls._lookup_image_glance(session, vdi_ref) else: - cls._lookup_image_objectstore(session, vdi_ref) - return + return cls._lookup_image_objectstore(session, vdi_ref) @classmethod def _lookup_image_objectstore(cls, session, vdi_ref): @@ -367,17 +367,15 @@ class VMHelper(HelperBase): def is_vdi_pv(dev): logging.debug("Running pygrub against %s", dev) output = os.popen('pygrub -qn /dev/%s' % dev) - pv = False for line in output.readlines(): #try to find kernel string m = re.search('(?<=kernel:)/.*(?:>)', line) - if m: - if m.group(0).find('xen') != -1: - pv = True - logging.debug("PV:%d", pv) - return pv - pv = with_vdi_attached_here(session, vdi_ref, False, is_vdi_pv) - return pv + if m and m.group(0).find('xen') != -1: + logging.debug("Found Xen kernel %s" % m.group(0)) + return True + logging.debug("No Xen kernel found. Booting HVM.") + return False + return with_vdi_attached_here(session, vdi_ref, True, is_vdi_pv) @classmethod def lookup(cls, session, i): -- cgit From 6e0665415a65bc800b4c6f2778d66e944cbbe81e Mon Sep 17 00:00:00 2001 From: Ewan Mellor Date: Fri, 7 Jan 2011 22:56:11 +0000 Subject: Fix indentation. --- nova/virt/xenapi/vm_utils.py | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) diff --git a/nova/virt/xenapi/vm_utils.py b/nova/virt/xenapi/vm_utils.py index 76094f35c..c9c22f7b2 100644 --- a/nova/virt/xenapi/vm_utils.py +++ b/nova/virt/xenapi/vm_utils.py @@ -50,16 +50,16 @@ XENAPI_POWER_STATE = { class ImageType: - """ - Enumeration class for distinguishing different image types - 0 - kernel/ramdisk image (goes on dom0's filesystem) - 1 - disk image (local SR, partitioned by objectstore plugin) - 2 - raw disk image (local SR, NOT partitioned by plugin) - """ + """ + Enumeration class for distinguishing different image types + 0 - kernel/ramdisk image (goes on dom0's filesystem) + 1 - disk image (local SR, partitioned by objectstore plugin) + 2 - raw disk image (local SR, NOT partitioned by plugin) + """ - KERNEL_RAMDISK = 0 - DISK = 1 - DISK_RAW = 2 + KERNEL_RAMDISK = 0 + DISK = 1 + DISK_RAW = 2 class VMHelper(HelperBase): -- cgit From 9f8719b334df28521154be8587bd7d30c431a993 Mon Sep 17 00:00:00 2001 From: Ewan Mellor Date: Sat, 8 Jan 2011 00:25:54 +0000 Subject: First cut at implementing partition-adding in combination with the Glance streaming. Untested. --- nova/virt/xenapi/vm_utils.py | 43 +++++++++++++++++++++++++++++++++++++++---- 1 file changed, 39 insertions(+), 4 deletions(-) diff --git a/nova/virt/xenapi/vm_utils.py b/nova/virt/xenapi/vm_utils.py index c9c22f7b2..e601ccff9 100644 --- a/nova/virt/xenapi/vm_utils.py +++ b/nova/virt/xenapi/vm_utils.py @@ -49,6 +49,11 @@ XENAPI_POWER_STATE = { 'Crashed': power_state.CRASHED} +SECTOR_SIZE = 512 +MBR_SIZE_SECTORS = 63 +MBR_SIZE_BYTES = MBR_SIZE_SECTORS * SECTOR_SIZE + + class ImageType: """ Enumeration class for distinguishing different image types @@ -293,9 +298,6 @@ class VMHelper(HelperBase): return cls._fetch_image_objectstore(session, instance_id, image, access, type) - #### raw_image=validate_bool(args, 'raw', 'false') - #### add_partition = validate_bool(args, 'add_partition', 'false') - @classmethod def _fetch_image_glance(cls, session, instance_id, image, access, typ): sr = find_sr(session) @@ -305,15 +307,27 @@ class VMHelper(HelperBase): c = glance.client.Client(FLAGS.glance_host, FLAGS.glance_port) meta, image_file = c.get_image(image) - vdi_size = meta['size'] + virtual_size = meta['size'] + + vdi_size = virtual_size + if typ == ImageType.DISK: + # Make room for MBR. + vdi_size += MBR_SIZE_BYTES vdi = cls.create_vdi(session, sr, _('Glance image %s') % image, vdi_size, False) def stream(dev): + offset = 0 + if typ == ImageType.DISK: + offset = MBR_SIZE_BYTES + _write_partition(virtual_size, dev) + with open('/dev/%s' % dev, 'wb') as f: + f.seek(offset) for chunk in image_file: f.write(chunk) + with_vdi_attached_here(session, vdi, False, stream) return session.get_xenapi().VDI.get_uuid(vdi) @@ -633,3 +647,24 @@ def get_this_vm_uuid(): def get_this_vm_ref(session): return session.get_xenapi().VM.get_by_uuid(get_this_vm_uuid()) + + +def _write_partition(virtual_size, dev): + dest = '/dev/%s' % dev + mbr_last = MBR_SIZE_SECTORS - 1 + primary_first = MBR_SIZE_SECTORS + primary_last = MBR_SIZE_SECTORS + (virtual_size / SECTOR_SIZE) - 1 + + logging.debug('Writing partition table %d %d to %s...', + primary_first, primary_last, dest) + + def execute(cmd, process_input=None, check_exit_code=True): + return utils.execute(cmd=cmd, + process_input=process_input, + check_exit_code=check_exit_code) + + execute('parted --script %s mklabel msdos' % dest) + execute('parted --script %s mkpart primary %ds %ds' % + (dest, primary_first, primary_last)) + + logging.debug('Writing partition table %s done.', dest) -- cgit From ed84e51475471c5ae37eacdd4d5c93ef91ebcf10 Mon Sep 17 00:00:00 2001 From: Salvatore Orlando Date: Sun, 9 Jan 2011 01:40:51 +0000 Subject: fixed small glitch in _fetch_image_glance virtual_size = imeta['size'] changed to virtual_size = int(meta['size']) --- nova/virt/xenapi/vm_utils.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/nova/virt/xenapi/vm_utils.py b/nova/virt/xenapi/vm_utils.py index e601ccff9..51418c444 100644 --- a/nova/virt/xenapi/vm_utils.py +++ b/nova/virt/xenapi/vm_utils.py @@ -307,7 +307,7 @@ class VMHelper(HelperBase): c = glance.client.Client(FLAGS.glance_host, FLAGS.glance_port) meta, image_file = c.get_image(image) - virtual_size = meta['size'] + virtual_size = int(meta['size']) vdi_size = virtual_size if typ == ImageType.DISK: @@ -585,6 +585,7 @@ def with_vdi_attached_here(session, vdi, read_only, f): vbd_rec['userdevice'] = 'autodetect' vbd_rec['bootable'] = False vbd_rec['mode'] = read_only and 'RO' or 'RW' + logging.debug("read_only: %s",str(read_only)) vbd_rec['type'] = 'disk' vbd_rec['unpluggable'] = True vbd_rec['empty'] = False -- cgit From 97ff39bd1d83f3cfa412f291087e025a91d147cd Mon Sep 17 00:00:00 2001 From: Salvatore Orlando Date: Mon, 10 Jan 2011 18:26:40 +0000 Subject: Can now correctly launch images with external kernels through glance. Further tests and Pep8 fixes to come. --- nova/virt/xenapi/vm_utils.py | 23 ++++++++++++----- plugins/xenserver/xenapi/etc/xapi.d/plugins/glance | 30 +++++++++++++++++++++- .../xenapi/etc/xapi.d/plugins/pluginlib_nova.py | 4 +-- 3 files changed, 48 insertions(+), 9 deletions(-) diff --git a/nova/virt/xenapi/vm_utils.py b/nova/virt/xenapi/vm_utils.py index 51418c444..674459841 100644 --- a/nova/virt/xenapi/vm_utils.py +++ b/nova/virt/xenapi/vm_utils.py @@ -52,7 +52,7 @@ XENAPI_POWER_STATE = { SECTOR_SIZE = 512 MBR_SIZE_SECTORS = 63 MBR_SIZE_BYTES = MBR_SIZE_SECTORS * SECTOR_SIZE - +KERNEL_DIR = '/boot/guest' class ImageType: """ @@ -299,7 +299,7 @@ class VMHelper(HelperBase): access, type) @classmethod - def _fetch_image_glance(cls, session, instance_id, image, access, typ): + def _fetch_image_glance(cls, session, instance_id, image, access, type): sr = find_sr(session) if sr is None: raise exception.NotFound('Cannot find SR to write VDI to') @@ -310,7 +310,8 @@ class VMHelper(HelperBase): virtual_size = int(meta['size']) vdi_size = virtual_size - if typ == ImageType.DISK: + logging.debug("Size for image %s:%d",image,virtual_size) + if type == ImageType.DISK: # Make room for MBR. vdi_size += MBR_SIZE_BYTES @@ -319,7 +320,7 @@ class VMHelper(HelperBase): def stream(dev): offset = 0 - if typ == ImageType.DISK: + if type == ImageType.DISK: offset = MBR_SIZE_BYTES _write_partition(virtual_size, dev) @@ -329,7 +330,18 @@ class VMHelper(HelperBase): f.write(chunk) with_vdi_attached_here(session, vdi, False, stream) - return session.get_xenapi().VDI.get_uuid(vdi) + if (type==ImageType.KERNEL_RAMDISK): + #we need to invoke a plugin for copying VDI's content into proper path + fn = "copy_kernel_vdi" + args = {} + args['vdi-ref'] = vdi + args['image-size']=str(vdi_size) + task = session.async_call_plugin('glance', fn, args) + filename=session.wait_for_task(instance_id,task) + #TODO(salvatore-orlando): remove the VDI as it is not needed anymore + return filename + else: + return session.get_xenapi().VDI.get_uuid(vdi) @classmethod def _fetch_image_objectstore(cls, session, instance_id, image, access, @@ -364,7 +376,6 @@ class VMHelper(HelperBase): fn = "is_vdi_pv" args = {} args['vdi-ref'] = vdi_ref - #TODO: Call proper function in plugin task = session.async_call_plugin('objectstore', fn, args) pv_str = session.wait_for_task(task) if pv_str.lower() == 'true': diff --git a/plugins/xenserver/xenapi/etc/xapi.d/plugins/glance b/plugins/xenserver/xenapi/etc/xapi.d/plugins/glance index 5e648b970..7f0b375e1 100644 --- a/plugins/xenserver/xenapi/etc/xapi.d/plugins/glance +++ b/plugins/xenserver/xenapi/etc/xapi.d/plugins/glance @@ -40,8 +40,35 @@ from pluginlib_nova import * configure_logging('glance') CHUNK_SIZE = 8192 +KERNEL_DIR = '/boot/guest' FILE_SR_PATH = '/var/run/sr-mount' +def copy_kernel_vdi(session,args): + vdi = exists(args, 'vdi-ref') + size = exists(args,'image-size') + #Use the uuid as a filename + vdi_uuid=session.xenapi.VDI.get_uuid(vdi) + copy_args={'vdi_uuid':vdi_uuid,'vdi_size':int(size)} + filename=with_vdi_in_dom0(session, vdi, False, + lambda dev: + _copy_kernel_vdi('/dev/%s' % dev,copy_args)) + return filename + +def _copy_kernel_vdi(dest,copy_args): + vdi_uuid=copy_args['vdi_uuid'] + vdi_size=copy_args['vdi_size'] + logging.debug("copying kernel/ramdisk file from %s to /boot/guest/%s",dest,vdi_uuid) + filename=KERNEL_DIR + '/' + vdi_uuid + #read data from /dev/ and write into a file on /boot/guest + of=open(filename,'wb') + f=open(dest,'rb') + data=f.read(vdi_size) + of.write(data) + f.close() + of.close() + logging.debug("Done. Filename: %s",filename) + return filename + def put_vdis(session, args): params = pickle.loads(exists(args, 'params')) vdi_uuids = params["vdi_uuids"] @@ -129,4 +156,5 @@ def find_sr(session): if __name__ == '__main__': - XenAPIPlugin.dispatch({'put_vdis': put_vdis}) + XenAPIPlugin.dispatch({'put_vdis': put_vdis, + 'copy_kernel_vdi': copy_kernel_vdi}) diff --git a/plugins/xenserver/xenapi/etc/xapi.d/plugins/pluginlib_nova.py b/plugins/xenserver/xenapi/etc/xapi.d/plugins/pluginlib_nova.py index 8e7a829d5..17fcd474e 100755 --- a/plugins/xenserver/xenapi/etc/xapi.d/plugins/pluginlib_nova.py +++ b/plugins/xenserver/xenapi/etc/xapi.d/plugins/pluginlib_nova.py @@ -154,7 +154,7 @@ def create_vdi(session, sr_ref, name_label, virtual_size, read_only): return vdi_ref -def with_vdi_in_dom0(session, vdi, read_only, f): +def with_vdi_in_dom0(session, vdi, read_only, f,args=None): dom0 = get_domain_0(session) vbd_rec = {} vbd_rec['VM'] = dom0 @@ -176,7 +176,7 @@ def with_vdi_in_dom0(session, vdi, read_only, f): logging.debug('Plugging VBD %s ... ', vbd) session.xenapi.VBD.plug(vbd) logging.debug('Plugging VBD %s done.', vbd) - return f(session.xenapi.VBD.get_device(vbd)) + return f(session.xenapi.VBD.get_device(vbd),args) finally: logging.debug('Destroying VBD for VDI %s ... ', vdi) vbd_unplug_with_retry(session, vbd) -- cgit From 6ba35582eec774253d725ab7a6959fdc12cea215 Mon Sep 17 00:00:00 2001 From: Salvatore Orlando Date: Tue, 11 Jan 2011 01:50:14 +0000 Subject: Now removing kernel/ramdisk VDI after copy Code tested with PV and HVM guests Fixed pep8 errors Could not run tests - test environment broken on dev machine --- nova/tests/test_xenapi.py | 81 +++++++++++++--------- nova/virt/hyperv.py | 2 +- nova/virt/xenapi/vm_utils.py | 20 ++++-- plugins/xenserver/xenapi/etc/xapi.d/plugins/glance | 3 +- 4 files changed, 65 insertions(+), 41 deletions(-) diff --git a/nova/tests/test_xenapi.py b/nova/tests/test_xenapi.py index ec9462ada..7c256968f 100644 --- a/nova/tests/test_xenapi.py +++ b/nova/tests/test_xenapi.py @@ -206,40 +206,57 @@ class XenAPIVMTestCase(test.TestCase): check() - def test_spawn(self): - instance = self._create_instance() - - def check(): - instances = self.conn.list_instances() - self.assertEquals(instances, [1]) - - # Get Nova record for VM - vm_info = self.conn.get_info(1) + def check_vm_record(self, conn): + instances = conn.list_instances() + self.assertEquals(instances, [1]) + + # Get Nova record for VM + vm_info = conn.get_info(1) + + # Get XenAPI record for VM + vms = fake.get_all('VM') + vm = fake.get_record('VM', vms[0]) + + # Check that m1.large above turned into the right thing. + instance_type = instance_types.INSTANCE_TYPES['m1.large'] + mem_kib = long(instance_type['memory_mb']) << 10 + mem_bytes = str(mem_kib << 10) + vcpus = instance_type['vcpus'] + self.assertEquals(vm_info['max_mem'], mem_kib) + self.assertEquals(vm_info['mem'], mem_kib) + self.assertEquals(vm['memory_static_max'], mem_bytes) + self.assertEquals(vm['memory_dynamic_max'], mem_bytes) + self.assertEquals(vm['memory_dynamic_min'], mem_bytes) + self.assertEquals(vm['VCPUs_max'], str(vcpus)) + self.assertEquals(vm['VCPUs_at_startup'], str(vcpus)) + + # Check that the VM is running according to Nova + self.assertEquals(vm_info['state'], power_state.RUNNING) + + # Check that the VM is running according to XenAPI. + self.assertEquals(vm['power_state'], 'Running') + + def _test_spawn(self, image_id, kernel_id, ramdisk_id): + stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests) + values = {'name': 1, + 'project_id': self.project.id, + 'user_id': self.user.id, + 'image_id': image_id, + 'kernel_id': kernel_id, + 'ramdisk_id': ramdisk_id, + 'instance_type': 'm1.large', + 'mac_address': 'aa:bb:cc:dd:ee:ff', + } + conn = xenapi_conn.get_connection(False) + instance = db.instance_create(values) + conn.spawn(instance) + self.check_vm_record(conn) - # Get XenAPI record for VM - vms = xenapi_fake.get_all('VM') - vm = xenapi_fake.get_record('VM', vms[0]) - - # Check that m1.large above turned into the right thing. - instance_type = instance_types.INSTANCE_TYPES['m1.large'] - mem_kib = long(instance_type['memory_mb']) << 10 - mem_bytes = str(mem_kib << 10) - vcpus = instance_type['vcpus'] - self.assertEquals(vm_info['max_mem'], mem_kib) - self.assertEquals(vm_info['mem'], mem_kib) - self.assertEquals(vm['memory_static_max'], mem_bytes) - self.assertEquals(vm['memory_dynamic_max'], mem_bytes) - self.assertEquals(vm['memory_dynamic_min'], mem_bytes) - self.assertEquals(vm['VCPUs_max'], str(vcpus)) - self.assertEquals(vm['VCPUs_at_startup'], str(vcpus)) - - # Check that the VM is running according to Nova - self.assertEquals(vm_info['state'], power_state.RUNNING) - - # Check that the VM is running according to XenAPI. - self.assertEquals(vm['power_state'], 'Running') + def test_spawn_raw(self): + self._test_spawn(1, None, None) - check() + def test_spawn(self): + self._test_spawn(1, 2, 3) def tearDown(self): super(XenAPIVMTestCase, self).tearDown() diff --git a/nova/virt/hyperv.py b/nova/virt/hyperv.py index 4b9f6f946..4f0f6f9c7 100644 --- a/nova/virt/hyperv.py +++ b/nova/virt/hyperv.py @@ -89,7 +89,7 @@ REQ_POWER_STATE = { 'Reboot': 10, 'Reset': 11, 'Paused': 32768, - 'Suspended': 32769 + 'Suspended': 32769, } diff --git a/nova/virt/xenapi/vm_utils.py b/nova/virt/xenapi/vm_utils.py index 674459841..4f2c754fa 100644 --- a/nova/virt/xenapi/vm_utils.py +++ b/nova/virt/xenapi/vm_utils.py @@ -54,6 +54,7 @@ MBR_SIZE_SECTORS = 63 MBR_SIZE_BYTES = MBR_SIZE_SECTORS * SECTOR_SIZE KERNEL_DIR = '/boot/guest' + class ImageType: """ Enumeration class for distinguishing different image types @@ -310,7 +311,7 @@ class VMHelper(HelperBase): virtual_size = int(meta['size']) vdi_size = virtual_size - logging.debug("Size for image %s:%d",image,virtual_size) + logging.debug("Size for image %s:%d", image, virtual_size) if type == ImageType.DISK: # Make room for MBR. vdi_size += MBR_SIZE_BYTES @@ -330,15 +331,20 @@ class VMHelper(HelperBase): f.write(chunk) with_vdi_attached_here(session, vdi, False, stream) - if (type==ImageType.KERNEL_RAMDISK): - #we need to invoke a plugin for copying VDI's content into proper path + if (type == ImageType.KERNEL_RAMDISK): + #we need to invoke a plugin for copying VDI's + #content into proper path + logging.debug("Copying VDI %s to /boot/guest on dom0", vdi) fn = "copy_kernel_vdi" args = {} args['vdi-ref'] = vdi - args['image-size']=str(vdi_size) + #let the plugin copy the correct number of bytes + args['image-size'] = str(vdi_size) task = session.async_call_plugin('glance', fn, args) - filename=session.wait_for_task(instance_id,task) - #TODO(salvatore-orlando): remove the VDI as it is not needed anymore + filename = session.wait_for_task(instance_id, task) + #remove the VDI as it is not needed anymore + session.get_xenapi().VDI.destroy(vdi) + logging.debug("Kernel/Ramdisk VDI %s destroyed", vdi) return filename else: return session.get_xenapi().VDI.get_uuid(vdi) @@ -596,7 +602,7 @@ def with_vdi_attached_here(session, vdi, read_only, f): vbd_rec['userdevice'] = 'autodetect' vbd_rec['bootable'] = False vbd_rec['mode'] = read_only and 'RO' or 'RW' - logging.debug("read_only: %s",str(read_only)) + logging.debug("read_only: %s", str(read_only)) vbd_rec['type'] = 'disk' vbd_rec['unpluggable'] = True vbd_rec['empty'] = False diff --git a/plugins/xenserver/xenapi/etc/xapi.d/plugins/glance b/plugins/xenserver/xenapi/etc/xapi.d/plugins/glance index 7f0b375e1..97cf32dcf 100644 --- a/plugins/xenserver/xenapi/etc/xapi.d/plugins/glance +++ b/plugins/xenserver/xenapi/etc/xapi.d/plugins/glance @@ -18,7 +18,7 @@ # under the License. # -# XenAPI plugin for putting images into glance +# XenAPI plugin for managing glance images # import base64 @@ -62,6 +62,7 @@ def _copy_kernel_vdi(dest,copy_args): #read data from /dev/ and write into a file on /boot/guest of=open(filename,'wb') f=open(dest,'rb') + #copy only vdi_size bytes data=f.read(vdi_size) of.write(data) f.close() -- cgit From d47183b2685787cb250fe4025cbeb3f94455de54 Mon Sep 17 00:00:00 2001 From: Ewan Mellor Date: Tue, 11 Jan 2011 03:09:48 +0000 Subject: Add glance to pip-requires, as we're now using the Glance client code from Nova. --- tools/pip-requires | 1 + 1 file changed, 1 insertion(+) diff --git a/tools/pip-requires b/tools/pip-requires index 341043114..c9ce5c31c 100644 --- a/tools/pip-requires +++ b/tools/pip-requires @@ -25,3 +25,4 @@ bzr Twisted>=10.1.0 PasteDeploy paste +glance -- cgit From 5afd9848ad09414c00062ceebdad45bca0604888 Mon Sep 17 00:00:00 2001 From: Muneyuki Noguchi Date: Tue, 11 Jan 2011 18:01:23 +0900 Subject: Add support for EBS volumes to the live migration feature. Currently, only AoE is supported. --- Authors | 1 + bin/nova-manage | 9 +- nova/api/ec2/cloud.py | 13 +- nova/compute/api.py | 3 +- nova/compute/manager.py | 55 +++--- nova/db/api.py | 5 + nova/db/sqlalchemy/api.py | 14 ++ nova/db/sqlalchemy/models.py | 1 - nova/livemigration_test/UT/computeManager.test.py | 12 +- nova/livemigration_test/UT/nova-manage.test.py | 3 + nova/network/manager.py | 1 - nova/scheduler/driver.py | 9 + nova/service.py | 2 +- nova/service.py.THIS | 222 ---------------------- nova/virt/libvirt_conn.py | 45 +++-- nova/volume/driver.py | 26 ++- nova/volume/manager.py | 12 +- 17 files changed, 143 insertions(+), 290 deletions(-) delete mode 100644 nova/service.py.THIS diff --git a/Authors b/Authors index 79b1b6f68..c5a123e22 100644 --- a/Authors +++ b/Authors @@ -26,6 +26,7 @@ Kei Masumoto Matt Dietz Michael Gundlach Monty Taylor +Muneyuki Noguchi Paul Voccio Rick Clark Rick Harris diff --git a/bin/nova-manage b/bin/nova-manage index 7c87d21ff..fa044859d 100755 --- a/bin/nova-manage +++ b/bin/nova-manage @@ -83,8 +83,6 @@ from nova import rpc from nova.cloudpipe import pipelib from nova.api.ec2 import cloud - - FLAGS = flags.FLAGS flags.DECLARE('fixed_range', 'nova.network.manager') flags.DECLARE('num_networks', 'nova.network.manager') @@ -462,6 +460,10 @@ class InstanceCommands(object): def live_migration(self, ec2_id, dest): """live_migration""" + if FLAGS.volume_driver != 'nova.volume.driver.AOEDriver': + raise exception.Error('Only AOEDriver is supported for now. ' + 'Sorry.') + logging.basicConfig() ctxt = context.get_admin_context() @@ -491,7 +493,6 @@ class InstanceCommands(object): class HostCommands(object): """Class for mangaging host(physical nodes).""" - def list(self): """describe host list.""" @@ -502,7 +503,6 @@ class HostCommands(object): for host_ref in host_refs: print host_ref['name'] - def show(self, host): """describe cpu/memory/hdd info for host.""" @@ -546,6 +546,7 @@ CATEGORIES = [ ('instance', InstanceCommands), ('host', HostCommands)] + def lazy_match(name, key_value_tuples): """Finds all objects that have a key that case insensitively contains [name] key_value_tuples is a list of tuples of the form (key, value) diff --git a/nova/api/ec2/cloud.py b/nova/api/ec2/cloud.py index 3d88e5ef3..b8a553a92 100644 --- a/nova/api/ec2/cloud.py +++ b/nova/api/ec2/cloud.py @@ -679,22 +679,13 @@ class CloudController(object): ec2_id = None if (floating_ip_ref['fixed_ip'] and floating_ip_ref['fixed_ip']['instance']): - # modified by masumotok - internal_id = \ - floating_ip_ref['fixed_ip']['instance']['internal_id'] + internal_id = floating_ip_ref['fixed_ip']['instance'] ec2_id = internal_id_to_ec2_id(internal_id) address_rv = {'public_ip': address, 'instance_id': ec2_id} if context.user.is_admin(): - # modified by masumotok- b/c proj_id is never inserted - #details = "%s (%s)" % (address_rv['instance_id'], - # floating_ip_ref['project_id']) - if None != address_rv['instance_id']: - status = 'reserved' - else: - status = None details = "%s (%s)" % (address_rv['instance_id'], - status) + floating_ip_ref['project_id']) address_rv['instance_id'] = details addresses.append(address_rv) return {'addressesSet': addresses} diff --git a/nova/compute/api.py b/nova/compute/api.py index 5a776afa5..da41cc63c 100644 --- a/nova/compute/api.py +++ b/nova/compute/api.py @@ -233,7 +233,8 @@ class ComputeAPI(base.Base): terminated_at=datetime.datetime.utcnow()) host = instance['host'] - logging.error('terminate %s %s %s %s',context, FLAGS.compute_topic, host, self.db.queue_get_for(context, FLAGS.compute_topic, host)) + logging.error('terminate %s %s %s %s', context, FLAGS.compute_topic, + host, self.db.queue_get_for(context, FLAGS.compute_topic, host)) if host: rpc.cast(context, self.db.queue_get_for(context, FLAGS.compute_topic, host), diff --git a/nova/compute/manager.py b/nova/compute/manager.py index 1e8e11d04..a78789e63 100644 --- a/nova/compute/manager.py +++ b/nova/compute/manager.py @@ -122,7 +122,7 @@ class ComputeManager(manager.Manager): raise exception.Error(_("Instance has already been created")) self.db.instance_update(context, instance_id, - {'host': self.host, 'launched_on':self.host}) + {'host': self.host, 'launched_on': self.host}) self.db.instance_set_state(context, instance_id, @@ -443,21 +443,10 @@ class ComputeManager(manager.Manager): def pre_live_migration(self, context, instance_id, dest): """Any preparation for live migration at dst host.""" - # Getting volume info ( shlf/slot number ) + # Getting volume info instance_ref = db.instance_get(context, instance_id) ec2_id = instance_ref['hostname'] - volumes = [] - try: - volumes = db.volume_get_by_ec2_id(context, ec2_id) - except exception.NotFound: - logging.info(_('%s has no volume.'), ec2_id) - - shelf_slots = {} - for vol in volumes: - shelf, slot = db.volume_get_shelf_and_blade(context, vol['id']) - shelf_slots[vol.id] = (shelf, slot) - # Getting fixed ips fixed_ip = db.instance_get_fixed_address(context, instance_id) if None == fixed_ip: @@ -466,18 +455,22 @@ class ComputeManager(manager.Manager): tb = ''.join(traceback.format_tb(sys.exc_info()[2])) raise rpc.RemoteError(exc_type, val, tb) - # If any volume is mounted, prepare here. - if 0 != len(shelf_slots): - pass - - # Creating nova-instance-instance-xxx, this is written to libvirt.xml, - # and can be seen when executin "virsh nwfiter-list" On destination host, - # this nwfilter is necessary. - # In addition this method is creating security rule ingress rule onto - # destination host. + # if any volume is mounted, prepare here. + try: + for vol in db.volume_get_all_by_instance(context, instance_id): + self.volume_manager.setup_compute_volume(context, vol['id']) + except exception.NotFound: + logging.info(_("%s has no volume.") % ec2_id) + + # Creating nova-instance-instance-xxx, + # this is written to libvirt.xml, + # and can be seen when executin "virsh nwfiter-list" + # On destination host, this nwfilter is necessary. + # In addition this method is creating security rule ingress rule + # onto destination host. self.driver.setup_nwfilters_for_instance(instance_ref) - # 5. bridge settings + # bridge settings self.network_manager.setup_compute_network(context, instance_id) return True @@ -497,12 +490,23 @@ class ComputeManager(manager.Manager): "args": {'instance_id': instance_id, 'dest': dest}}) + instance_ref = db.instance_get(context, instance_id) + ec2_id = instance_ref['hostname'] if True != ret: logging.error(_('Pre live migration failed(err at %s)'), dest) db.instance_set_state(context, instance_id, power_state.RUNNING, 'running') + + try: + for vol in db.volume_get_all_by_instance(context, instance_id): + db.volume_update(context, + vol['id'], + {'status': 'in-use'}) + except exception.NotFound: + pass + return # Waiting for setting up nwfilter such as, nova-instance-instance-xxx. @@ -523,6 +527,11 @@ class ComputeManager(manager.Manager): logging.error(_('Timeout for pre_live_migration at %s'), dest) return + rpc.call(context, + FLAGS.volume_topic, + {"method": "check_for_export", + "args": {'instance_id': instance_id}}) + # Executing live migration # live_migration might raises ProcessExecution error, but # nothing must be recovered in this version. diff --git a/nova/db/api.py b/nova/db/api.py index cd35a2bd4..3004f5e6f 100644 --- a/nova/db/api.py +++ b/nova/db/api.py @@ -711,6 +711,11 @@ def volume_get_by_ec2_id(context, ec2_id): return IMPL.volume_get_by_ec2_id(context, ec2_id) +def volume_get_all_by_instance(context, instance_id): + """Get all volumes by instance id or raise if it does not exist.""" + return IMPL.volume_get_all_by_instance(context, instance_id) + + def volume_get_instance(context, volume_id): """Get the instance that a volume is attached to.""" return IMPL.volume_get_instance(context, volume_id) diff --git a/nova/db/sqlalchemy/api.py b/nova/db/sqlalchemy/api.py index b0d1ec1a7..04f60ccce 100644 --- a/nova/db/sqlalchemy/api.py +++ b/nova/db/sqlalchemy/api.py @@ -900,6 +900,8 @@ def instance_get_memory_sum_by_host_and_project(context, hostname, proj_id): def instance_get_disk_sum_by_host_and_project(context, hostname, proj_id): return _instance_get_sum_by_host_and_project(context, 'local_gb', hostname, proj_id) + + @require_context def instance_action_create(context, values): """Create an instance action from the values dictionary.""" @@ -1497,6 +1499,18 @@ def volume_get_by_ec2_id(context, ec2_id): return result +@require_admin_context +def volume_get_all_by_instance(context, instance_id): + session = get_session() + result = session.query(models.Volume).\ + filter_by(instance_id=instance_id).\ + filter_by(deleted=False).\ + all() + if not result: + raise exception.NotFound(_('No volume for instance %s') % instance_id) + return result + + @require_context def volume_ec2_id_exists(context, ec2_id, session=None): if not session: diff --git a/nova/db/sqlalchemy/models.py b/nova/db/sqlalchemy/models.py index 994165ad1..1c1c23239 100644 --- a/nova/db/sqlalchemy/models.py +++ b/nova/db/sqlalchemy/models.py @@ -138,7 +138,6 @@ class NovaBase(object): # __tablename__ = 'hosts' # id = Column(String(255), primary_key=True) -# this class is created by masumotok class Host(BASE, NovaBase): """Represents a host where services are running""" __tablename__ = 'hosts' diff --git a/nova/livemigration_test/UT/computeManager.test.py b/nova/livemigration_test/UT/computeManager.test.py index 69ee876d1..829265733 100644 --- a/nova/livemigration_test/UT/computeManager.test.py +++ b/nova/livemigration_test/UT/computeManager.test.py @@ -170,7 +170,8 @@ class ComputeTestFunctions(unittest.TestCase): # mocks for pre_live_migration self.ctxt = context.get_admin_context() db.instance_get = Mock(return_value=self.instance1) - db.volume_get_by_ec2_id = Mock(return_value=[self.vol1, self.vol2]) + db.volume_get_all_by_instance \ + = Mock(return_value=[self.vol1, self.vol2]) db.volume_get_shelf_and_blade = Mock(return_value=(3, 4)) db.instance_get_fixed_address = Mock(return_value=self.fixed_ip1) db.security_group_get_by_instance \ @@ -199,7 +200,7 @@ class ComputeTestFunctions(unittest.TestCase): def test02(self): """02: NotAuthrized occurs on finding volume on DB. """ - db.volume_get_by_ec2_id \ + db.volume_get_all_by_instance \ = Mock(side_effect=exception.NotAuthorized('ERR')) self.assertRaises(exception.NotAuthorized, @@ -211,7 +212,7 @@ class ComputeTestFunctions(unittest.TestCase): def test03(self): """03: Unexpected exception occurs on finding volume on DB. """ - db.volume_get_by_ec2_id = Mock(side_effect=TypeError('ERR')) + db.volume_get_all_by_instance = Mock(side_effect=TypeError('ERR')) self.assertRaises(TypeError, self.manager.pre_live_migration, @@ -222,7 +223,6 @@ class ComputeTestFunctions(unittest.TestCase): def test04(self): """04: no volume and fixed ip found on DB, """ - db.volume_get_by_ec2_id = Mock(side_effect=exception.NotFound('ERR')) db.instance_get_fixed_address = Mock(return_value=None) self.assertRaises(rpc.RemoteError, @@ -230,10 +230,6 @@ class ComputeTestFunctions(unittest.TestCase): self.ctxt, 'dummy_ec2_id', 'host2') - - c1 = (0 <= sys.stderr.buffer.find('has no volume')) - - self.assertEqual(c1, True) def test05(self): """05: volume found and no fixed_ip found on DB. """ diff --git a/nova/livemigration_test/UT/nova-manage.test.py b/nova/livemigration_test/UT/nova-manage.test.py index 6db15cea0..257728386 100644 --- a/nova/livemigration_test/UT/nova-manage.test.py +++ b/nova/livemigration_test/UT/nova-manage.test.py @@ -97,6 +97,9 @@ class NovaManageTestFunctions(unittest.TestCase): # prepare test data self.setTestData() + # only AoE is supported for now + FLAGS.volume_driver = 'nova.volume.driver.AOEDriver' + def setTestData(self): import bin.novamanagetest diff --git a/nova/network/manager.py b/nova/network/manager.py index 5ebc52e77..13c9b2d8c 100644 --- a/nova/network/manager.py +++ b/nova/network/manager.py @@ -474,7 +474,6 @@ class VlanManager(NetworkManager): """Returns a fixed ip to the pool.""" self.db.fixed_ip_update(context, address, {'allocated': False}) - #def setup_compute_network(self, context, instance_id): def setup_compute_network(self, context, instance_id, network_ref=None): """Sets up matching network for compute hosts.""" if network_ref is None: diff --git a/nova/scheduler/driver.py b/nova/scheduler/driver.py index 04061e38e..106c6ab7d 100644 --- a/nova/scheduler/driver.py +++ b/nova/scheduler/driver.py @@ -160,6 +160,15 @@ class Scheduler(object): power_state.PAUSED, 'migrating') + # Changing volume state + try: + for vol in db.volume_get_all_by_instance(context, instance_id): + db.volume_update(context, + vol['id'], + {'status': 'migrating'}) + except exception.NotFound: + pass + # Requesting live migration. return src diff --git a/nova/service.py b/nova/service.py index 04a60877e..7132a67b5 100644 --- a/nova/service.py +++ b/nova/service.py @@ -141,7 +141,7 @@ class Service(object): 'local_gb': local_gb, 'hypervisor_type': hypervisor, 'hypervisor_version': version, - 'cpu_info':cpu_xml }) + 'cpu_info': cpu_xml}) return host_ref def __getattr__(self, key): diff --git a/nova/service.py.THIS b/nova/service.py.THIS deleted file mode 100644 index 416448faa..000000000 --- a/nova/service.py.THIS +++ /dev/null @@ -1,222 +0,0 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 - -# Copyright 2010 United States Government as represented by the -# Administrator of the National Aeronautics and Space Administration. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -""" -A service is a very thin wrapper around a Manager object. It exposes the -manager's public methods to other components of the system via rpc. It will -report state periodically to the database and is responsible for initiating -any periodic tasts that need to be executed on a given host. - -This module contains Service, a generic baseclass for all workers. -""" - -import inspect -import logging -import os - -from twisted.internet import defer -from twisted.internet import task -from twisted.application import service - -from nova import context -from nova import db -from nova import exception -from nova import flags -from nova import rpc -from nova import utils - - -FLAGS = flags.FLAGS -flags.DEFINE_integer('report_interval', 10, - 'seconds between nodes reporting state to datastore', - lower_bound=1) - -flags.DEFINE_integer('periodic_interval', 60, - 'seconds between running periodic tasks', - lower_bound=1) - - -class Service(object, service.Service): - """Base class for workers that run on hosts.""" - - def __init__(self, host, binary, topic, manager, report_interval=None, - periodic_interval=None, *args, **kwargs): - self.host = host - self.binary = binary - self.topic = topic - self.manager_class_name = manager - self.report_interval = report_interval - self.periodic_interval = periodic_interval - super(Service, self).__init__(*args, **kwargs) - self.saved_args, self.saved_kwargs = args, kwargs - - def startService(self): # pylint: disable-msg C0103 - manager_class = utils.import_class(self.manager_class_name) - self.manager = manager_class(host=self.host, *self.saved_args, - **self.saved_kwargs) - self.manager.init_host() - self.model_disconnected = False - ctxt = context.get_admin_context() - - try: - host_ref = db.host_get_by_name(ctxt, self.host) - except exception.NotFound: - host_ref = db.host_create(ctxt, {'name': self.host}) - host_ref = self._update_host_ref(ctxt, host_ref) - - try: - service_ref = db.service_get_by_args(ctxt, - self.host, - self.binary) - self.service_id = service_ref['id'] - except exception.NotFound: - self._create_service_ref(ctxt) - - conn = rpc.Connection.instance() - if self.report_interval: - consumer_all = rpc.AdapterConsumer( - connection=conn, - topic=self.topic, - proxy=self) - consumer_node = rpc.AdapterConsumer( - connection=conn, - topic='%s.%s' % (self.topic, self.host), - proxy=self) - - consumer_all.attach_to_twisted() - consumer_node.attach_to_twisted() - - pulse = task.LoopingCall(self.report_state) - pulse.start(interval=self.report_interval, now=False) - - if self.periodic_interval: - pulse = task.LoopingCall(self.periodic_tasks) - pulse.start(interval=self.periodic_interval, now=False) - - def _create_service_ref(self, context): - service_ref = db.service_create(context, - {'host': self.host, - 'binary': self.binary, - 'topic': self.topic, - 'report_count': 0}) - self.service_id = service_ref['id'] - - def _update_host_ref(self, context, host_ref): - - if 0 <= self.manager_class_name.find('ComputeManager'): - vcpu = self.manager.driver.get_vcpu_number() - memory_mb = self.manager.get_memory_mb() - local_gb = self.manager.get_local_gb() - hypervisor = self.manager.driver.get_hypervisor_type() - version = self.manager.driver.get_hypervisor_version() - cpu_xml = self.manager.driver.get_cpu_xml() - - db.host_update(context, - host_ref['id'], - {'vcpus': vcpu, - 'memory_mb': memory_mb, - 'local_gb': local_gb, - 'hypervisor_type': hypervisor, - 'hypervisor_version': version, - 'cpu_info':cpu_xml }) - return host_ref - - def __getattr__(self, key): - manager = self.__dict__.get('manager', None) - return getattr(manager, key) - - @classmethod - def create(cls, - host=None, - binary=None, - topic=None, - manager=None, - report_interval=None, - periodic_interval=None): - """Instantiates class and passes back application object. - - Args: - host, defaults to FLAGS.host - binary, defaults to basename of executable - topic, defaults to bin_name - "nova-" part - manager, defaults to FLAGS._manager - report_interval, defaults to FLAGS.report_interval - periodic_interval, defaults to FLAGS.periodic_interval - """ - if not host: - host = FLAGS.host - if not binary: - binary = os.path.basename(inspect.stack()[-1][1]) - if not topic: - topic = binary.rpartition("nova-")[2] - if not manager: - manager = FLAGS.get('%s_manager' % topic, None) - if not report_interval: - report_interval = FLAGS.report_interval - if not periodic_interval: - periodic_interval = FLAGS.periodic_interval - logging.warn("Starting %s node", topic) - service_obj = cls(host, binary, topic, manager, - report_interval, periodic_interval) - - # This is the parent service that twistd will be looking for when it - # parses this file, return it so that we can get it into globals. - application = service.Application(binary) - service_obj.setServiceParent(application) - return application - - def kill(self): - """Destroy the service object in the datastore""" - try: - db.service_destroy(context.get_admin_context(), self.service_id) - except exception.NotFound: - logging.warn("Service killed that has no database entry") - - @defer.inlineCallbacks - def periodic_tasks(self): - """Tasks to be run at a periodic interval""" - yield self.manager.periodic_tasks(context.get_admin_context()) - - @defer.inlineCallbacks - def report_state(self): - """Update the state of this service in the datastore.""" - ctxt = context.get_admin_context() - try: - try: - service_ref = db.service_get(ctxt, self.service_id) - except exception.NotFound: - logging.debug("The service database object disappeared, " - "Recreating it.") - self._create_service_ref(ctxt) - service_ref = db.service_get(ctxt, self.service_id) - - db.service_update(ctxt, - self.service_id, - {'report_count': service_ref['report_count'] + 1}) - - # TODO(termie): make this pattern be more elegant. - if getattr(self, "model_disconnected", False): - self.model_disconnected = False - logging.error("Recovered model server connection!") - - # TODO(vish): this should probably only catch connection errors - except Exception: # pylint: disable-msg=W0702 - if not getattr(self, "model_disconnected", False): - self.model_disconnected = True - logging.exception("model server went away") - yield diff --git a/nova/virt/libvirt_conn.py b/nova/virt/libvirt_conn.py index d1a53f275..044e6584c 100644 --- a/nova/virt/libvirt_conn.py +++ b/nova/virt/libvirt_conn.py @@ -695,8 +695,9 @@ class LibvirtConnection(object): xmlstr = self._conn.getCapabilities() xml = libxml2.parseDoc(xmlstr) nodes = xml.xpathEval('//cpu') - if 1 != len(nodes): - msg = 'Unexpected xml format. tag "cpu" must be 1, but %d.' % len(nodes) + if 1 != len(nodes): + msg = 'Unexpected xml format. tag "cpu" must be 1, but %d.' \ + % len(nodes) msg += '\n' + xml.serialize() raise exception.Invalid(_(msg)) cpuxmlstr = re.sub("\n|[ ]+", ' ', nodes[0].serialize()) @@ -735,8 +736,8 @@ class LibvirtConnection(object): except libvirt.libvirtError: return False - def compareCPU(self, xml): - """ + def compareCPU(self, xml): + """ Check the host cpu is compatible to a cpu given by xml. "xml" must be a part of libvirt.openReadonly().getCapabilities(). return values follows by virCPUCompareResult. @@ -747,9 +748,9 @@ class LibvirtConnection(object): return self._conn.compareCPU(xml, 0) def live_migration(self, context, instance_ref, dest): - """ - Just spawning live_migration operation for - distributing high-load. + """ + Just spawning live_migration operation for + distributing high-load. """ greenthread.spawn(self._live_migration, context, instance_ref, dest) @@ -757,14 +758,21 @@ class LibvirtConnection(object): """ Do live migration.""" # Do live migration. - try: + try: uri = FLAGS.live_migration_uri % dest out, err = utils.execute("sudo virsh migrate --live %s %s" % (instance_ref.name, uri)) - except exception.ProcessExecutionError: + except exception.ProcessExecutionError: id = instance_ref['id'] db.instance_set_state(context, id, power_state.RUNNING, 'running') - raise + try: + for volume in db.volume_get_all_by_instance(context, id): + db.volume_update(context, + volume['id'], + {'status': 'in-use'}) + except exception.NotFound: + pass + raise exception.ProcessExecutionError # Waiting for completion of live_migration. timer = utils.LoopingCall(f=None) @@ -781,7 +789,7 @@ class LibvirtConnection(object): timer.start(interval=0.5, now=True) def _post_live_migration(self, context, instance_ref, dest): - """ + """ Post operations for live migration. Mainly, database updating. """ @@ -808,13 +816,14 @@ class LibvirtConnection(object): db.network_update(context, network_ref['id'], {'host': dest}) try: - floating_ip = db.instance_get_floating_address(context, instance_id) + floating_ip \ + = db.instance_get_floating_address(context, instance_id) # Not return if floating_ip is not found, otherwise, # instance never be accessible.. if None == floating_ip: logging.error('floating_ip is not found for %s ' % ec2_id) - else: - floating_ip_ref = db.floating_ip_get_by_address(context, + else: + floating_ip_ref = db.floating_ip_get_by_address(context, floating_ip) db.floating_ip_update(context, floating_ip_ref['address'], @@ -832,6 +841,14 @@ class LibvirtConnection(object): 'state': power_state.RUNNING, 'host': dest}) + try: + for volume in db.volume_get_all_by_instance(context, instance_id): + db.volume_update(context, + volume['id'], + {'status': 'in-use'}) + except exception.NotFound: + pass + logging.info(_('Live migrating %s to %s finishes successfully') % (ec2_id, dest)) diff --git a/nova/volume/driver.py b/nova/volume/driver.py index 8353b9712..aa40922e4 100644 --- a/nova/volume/driver.py +++ b/nova/volume/driver.py @@ -118,7 +118,7 @@ class VolumeDriver(object): """Removes an export for a logical volume.""" raise NotImplementedError() - def discover_volume(self, volume): + def discover_volume(self, context, volume): """Discover volume on a remote host.""" raise NotImplementedError() @@ -180,15 +180,35 @@ class AOEDriver(VolumeDriver): self._try_execute("sudo vblade-persist destroy %s %s" % (shelf_id, blade_id)) - def discover_volume(self, _volume): + def discover_volume(self, context, volume): """Discover volume on a remote host.""" self._execute("sudo aoe-discover") self._execute("sudo aoe-stat", check_exit_code=False) + shelf_id, blade_id = self.db.volume_get_shelf_and_blade(context, + volume['id']) + return "/dev/etherd/e%s.%s" % (shelf_id, blade_id) def undiscover_volume(self, _volume): """Undiscover volume on a remote host.""" pass + def check_for_export(self, context, volume_id): + """Make sure whether volume is exported.""" + (shelf_id, + blade_id) = self.db.volume_get_shelf_and_blade(context, + volume_id) + (out, _err) = self._execute("sudo vblade-persist ls --no-header") + exists = False + for line in out.split('\n'): + param = line.split(' ') + if len(param) == 6 and param[0] == str(shelf_id) \ + and param[1] == str(blade_id) and param[-1] == "run": + exists = True + break + if not exists: + logging.warning(_("vblade process for e%s.%s isn't running.") + % (shelf_id, blade_id)) + class FakeAOEDriver(AOEDriver): """Logs calls instead of executing.""" @@ -272,7 +292,7 @@ class ISCSIDriver(VolumeDriver): iscsi_portal = location.split(",")[0] return (iscsi_name, iscsi_portal) - def discover_volume(self, volume): + def discover_volume(self, _context, volume): """Discover volume on a remote host.""" iscsi_name, iscsi_portal = self._get_name_and_portal(volume['name'], volume['host']) diff --git a/nova/volume/manager.py b/nova/volume/manager.py index 966334c50..03b757d81 100644 --- a/nova/volume/manager.py +++ b/nova/volume/manager.py @@ -137,7 +137,7 @@ class VolumeManager(manager.Manager): if volume_ref['host'] == self.host and FLAGS.use_local_volumes: path = self.driver.local_path(volume_ref) else: - path = self.driver.discover_volume(volume_ref) + path = self.driver.discover_volume(context, volume_ref) return path def remove_compute_volume(self, context, volume_id): @@ -148,3 +148,13 @@ class VolumeManager(manager.Manager): return True else: self.driver.undiscover_volume(volume_ref) + + def check_for_export(self, context, instance_id): + """Make sure whether volume is exported.""" + if FLAGS.volume_driver == 'nova.volume.driver.AOEDriver': + try: + for vol in self.db.volume_get_all_by_instance(context, + instance_id): + self.driver.check_for_export(context, vol['id']) + except exception.NotFound: + pass -- cgit From a96c12f13421a7c27e7cb1459f73ca4bd5cdf917 Mon Sep 17 00:00:00 2001 From: Salvatore Orlando Date: Tue, 11 Jan 2011 16:11:08 +0000 Subject: fixed issue in pluginlib_nova.py --- plugins/xenserver/xenapi/etc/xapi.d/plugins/pluginlib_nova.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/plugins/xenserver/xenapi/etc/xapi.d/plugins/pluginlib_nova.py b/plugins/xenserver/xenapi/etc/xapi.d/plugins/pluginlib_nova.py index 17fcd474e..8e7a829d5 100755 --- a/plugins/xenserver/xenapi/etc/xapi.d/plugins/pluginlib_nova.py +++ b/plugins/xenserver/xenapi/etc/xapi.d/plugins/pluginlib_nova.py @@ -154,7 +154,7 @@ def create_vdi(session, sr_ref, name_label, virtual_size, read_only): return vdi_ref -def with_vdi_in_dom0(session, vdi, read_only, f,args=None): +def with_vdi_in_dom0(session, vdi, read_only, f): dom0 = get_domain_0(session) vbd_rec = {} vbd_rec['VM'] = dom0 @@ -176,7 +176,7 @@ def with_vdi_in_dom0(session, vdi, read_only, f,args=None): logging.debug('Plugging VBD %s ... ', vbd) session.xenapi.VBD.plug(vbd) logging.debug('Plugging VBD %s done.', vbd) - return f(session.xenapi.VBD.get_device(vbd),args) + return f(session.xenapi.VBD.get_device(vbd)) finally: logging.debug('Destroying VBD for VDI %s ... ', vdi) vbd_unplug_with_retry(session, vbd) -- cgit From 32316a4ed6b4c175af8dd8ad3c345e54f161fd1c Mon Sep 17 00:00:00 2001 From: Vishvananda Ishaya Date: Tue, 11 Jan 2011 17:07:24 -0800 Subject: use by-path instead of custom udev script --- nova/volume/driver.py | 2 +- tools/iscsidev.sh | 41 ----------------------------------------- 2 files changed, 1 insertion(+), 42 deletions(-) delete mode 100755 tools/iscsidev.sh diff --git a/nova/volume/driver.py b/nova/volume/driver.py index 477e0abf4..30d42af04 100644 --- a/nova/volume/driver.py +++ b/nova/volume/driver.py @@ -282,7 +282,7 @@ class ISCSIDriver(VolumeDriver): self._execute("sudo iscsiadm -m node -T %s -p %s --op update " "-n node.startup -v automatic" % (iscsi_name, iscsi_portal)) - return "/dev/iscsi/%s" % volume['name'] + return "/dev/by-path/ip-%s-iscsi-%s-lun-0" % (iscsi_portal, iscsi_name) def undiscover_volume(self, volume): """Undiscover volume on a remote host.""" diff --git a/tools/iscsidev.sh b/tools/iscsidev.sh deleted file mode 100755 index 6f5b572df..000000000 --- a/tools/iscsidev.sh +++ /dev/null @@ -1,41 +0,0 @@ -#!/bin/sh - -# Copyright 2010 United States Government as represented by the -# Administrator of the National Aeronautics and Space Administration. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -# NOTE(vish): This script helps udev create common names for discovered iscsi -# volumes under /dev/iscsi. To use it, create /dev/iscsi and add -# a file to /etc/udev/rules.d like so: -# mkdir /dev/iscsi -# echo 'KERNEL=="sd*", BUS=="scsi", PROGRAM="/path/to/iscsidev.sh -# %b",SYMLINK+="iscsi/%c%n"' > /etc/udev/rules.d/55-openiscsi.rules - -BUS=${1} -HOST=${BUS%%:*} - -if [ ! -e /sys/class/iscsi_host ]; then - exit 1 -fi - -file="/sys/class/iscsi_host/host${HOST}/device/session*/iscsi_session*/session*/targetname" - -target_name=$(cat ${file}) - -if [ -z "${target_name}" ]; then - exit 1 -fi - -echo "${target_name##*:}" -- cgit From 0cef0fe332859ce8b46d5d8d1745a63d377e1052 Mon Sep 17 00:00:00 2001 From: Vishvananda Ishaya Date: Tue, 11 Jan 2011 17:21:39 -0800 Subject: more useful prefix and fix typo in string --- nova/volume/driver.py | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/nova/volume/driver.py b/nova/volume/driver.py index 30d42af04..94e2aacf4 100644 --- a/nova/volume/driver.py +++ b/nova/volume/driver.py @@ -48,7 +48,7 @@ flags.DEFINE_integer('iscsi_num_targets', 'Number of iscsi target ids per host') flags.DEFINE_string('iscsi_target_prefix', 'iqn.2010-10.org.openstack:', 'prefix for iscsi volumes') -flags.DEFINE_string('iscsi_ip_prefix', '127.0', +flags.DEFINE_string('iscsi_ip_prefix', '$my_ip', 'discover volumes on the ip that starts with this prefix') @@ -282,7 +282,8 @@ class ISCSIDriver(VolumeDriver): self._execute("sudo iscsiadm -m node -T %s -p %s --op update " "-n node.startup -v automatic" % (iscsi_name, iscsi_portal)) - return "/dev/by-path/ip-%s-iscsi-%s-lun-0" % (iscsi_portal, iscsi_name) + return "/dev/disk/by-path/ip-%s-iscsi-%s-lun-0" % (iscsi_portal, + iscsi_name) def undiscover_volume(self, volume): """Undiscover volume on a remote host.""" -- cgit From 2f9ac0fd02115ff9af2e96f5a92f3442d273c6b0 Mon Sep 17 00:00:00 2001 From: Salvatore Orlando Date: Wed, 12 Jan 2011 02:41:44 +0000 Subject: Fixed test environments. Fixed bugs in _fetch_image_objecstore and _lookup_image_objcestore (objectstore was broken!) Added tests for glance NEED TO: - add SR & PBD records to fake xenapi session for find_sr to work - fake somehow stream in _fetch_image_glance --- nova/tests/test_xenapi.py | 26 ++++++++++++++++++++++---- nova/tests/xenapi/stubs.py | 11 +++++++++++ nova/virt/xenapi/vm_utils.py | 22 +++++++++++++--------- nova/virt/xenapi/vmops.py | 7 ++++++- 4 files changed, 52 insertions(+), 14 deletions(-) diff --git a/nova/tests/test_xenapi.py b/nova/tests/test_xenapi.py index 7c256968f..5829fa452 100644 --- a/nova/tests/test_xenapi.py +++ b/nova/tests/test_xenapi.py @@ -48,6 +48,7 @@ class XenAPIVolumeTestCase(test.TestCase): FLAGS.xenapi_connection_url = 'test_url' FLAGS.xenapi_connection_password = 'test_pass' db_fakes.stub_out_db_instance_api(self.stubs) + stubs.stubout_glance_client(self.stubs) stubs.stub_out_get_target(self.stubs) xenapi_fake.reset() self.values = {'name': 1, 'id': 1, @@ -104,6 +105,7 @@ class XenAPIVolumeTestCase(test.TestCase): def test_attach_volume(self): """ This shows how to test Ops classes' methods """ stubs.stubout_session(self.stubs, stubs.FakeSessionForVolumeTests) + stubs.stubout_glance_client(self.stubs) conn = xenapi_conn.get_connection(False) volume = self._create_volume() instance = db.instance_create(self.values) @@ -126,6 +128,7 @@ class XenAPIVolumeTestCase(test.TestCase): """ This shows how to test when exceptions are raised """ stubs.stubout_session(self.stubs, stubs.FakeSessionForVolumeFailedTests) + stubs.stubout_glance_client(self.stubs) conn = xenapi_conn.get_connection(False) volume = self._create_volume() instance = db.instance_create(self.values) @@ -159,6 +162,7 @@ class XenAPIVMTestCase(test.TestCase): db_fakes.stub_out_db_instance_api(self.stubs) xenapi_fake.create_network('fake', FLAGS.flat_network_bridge) stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests) + stubs.stubout_glance_client(self.stubs) self.conn = xenapi_conn.get_connection(False) def test_list_instances_0(self): @@ -214,8 +218,8 @@ class XenAPIVMTestCase(test.TestCase): vm_info = conn.get_info(1) # Get XenAPI record for VM - vms = fake.get_all('VM') - vm = fake.get_record('VM', vms[0]) + vms = xenapi_fake.get_all('VM') + vm = xenapi_fake.get_record('VM', vms[0]) # Check that m1.large above turned into the right thing. instance_type = instance_types.INSTANCE_TYPES['m1.large'] @@ -238,7 +242,9 @@ class XenAPIVMTestCase(test.TestCase): def _test_spawn(self, image_id, kernel_id, ramdisk_id): stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests) + stubs.stubout_glance_client(self.stubs) values = {'name': 1, + 'id':1, 'project_id': self.project.id, 'user_id': self.user.id, 'image_id': image_id, @@ -252,12 +258,24 @@ class XenAPIVMTestCase(test.TestCase): conn.spawn(instance) self.check_vm_record(conn) - def test_spawn_raw(self): + def test_spawn_raw_objectstore(self): + FLAGS.xenapi_image_service='objectstore' self._test_spawn(1, None, None) - def test_spawn(self): + def test_spawn_objectstore(self): + FLAGS.xenapi_image_service='objectstore' self._test_spawn(1, 2, 3) + def test_spawn_raw_glance(self): + xenapi_fake._create_sr('SR',['','',{'other_config':{'i18n-key':'local-storage'}},'', + '','','iscsi']) + FLAGS.xenapi_image_service='glance' + self._test_spawn(1, None, None) + + def test_spawn_glance(self): + FLAGS.xenapi_image_service='glance' + self._test_spawn(1, 2, 3) + def tearDown(self): super(XenAPIVMTestCase, self).tearDown() self.manager.delete_project(self.project) diff --git a/nova/tests/xenapi/stubs.py b/nova/tests/xenapi/stubs.py index 55f751f11..1b6cf1182 100644 --- a/nova/tests/xenapi/stubs.py +++ b/nova/tests/xenapi/stubs.py @@ -17,6 +17,7 @@ """Stubouts, mocks and fixtures for the test suite""" from nova.virt import xenapi_conn +from nova.virt.xenapi import vmops from nova.virt.xenapi import fake from nova.virt.xenapi import volume_utils from nova.virt.xenapi import vm_utils @@ -69,6 +70,16 @@ def stubout_instance_snapshot(stubs): stubs.Set(vm_utils, 'wait_for_vhd_coalesce', fake_wait_for_vhd_coalesce) +def stubout_glance_client(stubs): + """Stubs out glance import method for importing fake client""" + def fake_import(self): + """Stubs out get_imported_xenapi of XenAPISession""" + fake_module = 'nova.tests.glance.fake_client' + from_list = ['fake_client'] + return __import__(fake_module, globals(), locals(), from_list, -1) + + stubs.Set(vmops.VMOps, '_get_imported_glance',fake_import) + def stubout_session(stubs, cls): """Stubs out two methods from XenAPISession""" def fake_import(self): diff --git a/nova/virt/xenapi/vm_utils.py b/nova/virt/xenapi/vm_utils.py index 4f2c754fa..76b58b247 100644 --- a/nova/virt/xenapi/vm_utils.py +++ b/nova/virt/xenapi/vm_utils.py @@ -19,7 +19,6 @@ Helper methods for operations related to the management of VM records and their attributes like VDIs, VIFs, as well as their lookup functions. """ -import glance.client import logging import os import pickle @@ -73,6 +72,8 @@ class VMHelper(HelperBase): The class that wraps the helper methods together. """ + Glance = None + @classmethod def create_vm(cls, session, instance, kernel, ramdisk, pv_kernel=False): """Create a VM record. Returns a Deferred that gives the new @@ -297,7 +298,7 @@ class VMHelper(HelperBase): access, type) else: return cls._fetch_image_objectstore(session, instance_id, image, - access, type) + user,access, type) @classmethod def _fetch_image_glance(cls, session, instance_id, image, access, type): @@ -305,7 +306,7 @@ class VMHelper(HelperBase): if sr is None: raise exception.NotFound('Cannot find SR to write VDI to') - c = glance.client.Client(FLAGS.glance_host, FLAGS.glance_port) + c = cls.Glance.Client(FLAGS.glance_host, FLAGS.glance_port) meta, image_file = c.get_image(image) virtual_size = int(meta['size']) @@ -350,8 +351,8 @@ class VMHelper(HelperBase): return session.get_xenapi().VDI.get_uuid(vdi) @classmethod - def _fetch_image_objectstore(cls, session, instance_id, image, access, - type): + def _fetch_image_objectstore(cls, session, instance_id, image, + user,access,type): url = images.image_url(image) logging.debug("Asking xapi to fetch %s as %s", url, access) fn = (type != ImageType.KERNEL_RAMDISK) and 'get_vdi' or 'get_kernel' @@ -370,20 +371,21 @@ class VMHelper(HelperBase): return uuid @classmethod - def lookup_image(cls, session, vdi_ref): + def lookup_image(cls, session, instance_id,vdi_ref): if FLAGS.xenapi_image_service == 'glance': return cls._lookup_image_glance(session, vdi_ref) else: - return cls._lookup_image_objectstore(session, vdi_ref) + return cls._lookup_image_objectstore(session, instance_id,vdi_ref) @classmethod - def _lookup_image_objectstore(cls, session, vdi_ref): + def _lookup_image_objectstore(cls, session, instance_id,vdi_ref): logging.debug("Looking up vdi %s for PV kernel", vdi_ref) fn = "is_vdi_pv" args = {} args['vdi-ref'] = vdi_ref task = session.async_call_plugin('objectstore', fn, args) - pv_str = session.wait_for_task(task) + pv_str = session.wait_for_task(instance_id,task) + pv = None if pv_str.lower() == 'true': pv = True elif pv_str.lower() == 'false': @@ -580,10 +582,12 @@ def get_vdi_for_vm_safely(session, vm_ref): def find_sr(session): + logging.warning("IN find_sr") host = session.get_xenapi_host() srs = session.get_xenapi().SR.get_all() for sr in srs: sr_rec = session.get_xenapi().SR.get_record(sr) + logging.warning("HERE: %s",sr_rec['uuid']) if not ('i18n-key' in sr_rec['other_config'] and sr_rec['other_config']['i18n-key'] == 'local-storage'): continue diff --git a/nova/virt/xenapi/vmops.py b/nova/virt/xenapi/vmops.py index b6d620782..cec8ecdcc 100644 --- a/nova/virt/xenapi/vmops.py +++ b/nova/virt/xenapi/vmops.py @@ -42,6 +42,11 @@ class VMOps(object): self.XenAPI = session.get_imported_xenapi() self._session = session VMHelper.XenAPI = self.XenAPI + VMHelper.Glance = self._get_imported_glance() + + def _get_imported_glance(self): + """Stubout point. This can be replaced with a mock glance module.""" + return __import__('glance') def list_instances(self): """List VM instances""" @@ -77,7 +82,7 @@ class VMOps(object): #Have a look at the VDI and see if it has a PV kernel pv_kernel = False if not instance.kernel_id: - pv_kernel = VMHelper.lookup_image(self._session, vdi_ref) + pv_kernel = VMHelper.lookup_image(self._session, instance.id,vdi_ref) kernel = None if instance.kernel_id: kernel = VMHelper.fetch_image(self._session, instance.id, -- cgit From 6f9408d7ac38d5c857e1e1cdd92c49e000742734 Mon Sep 17 00:00:00 2001 From: Ewan Mellor Date: Wed, 12 Jan 2011 11:08:08 +0000 Subject: Added unit tests for the xenapi-glance integration. This adds a glance simulator that can stub in place of glance.client.Client, and enhances the xapi simulator to add the additional calls that the Glance-specific path requires. The test itself is just the spawn test, but now we run through with xenapi_image_service set to "objectstore", and then again set to "glance". --- nova/tests/glance/__init__.py | 20 ++++++++++ nova/tests/glance/stubs.py | 37 +++++++++++++++++++ nova/tests/test_xenapi.py | 28 ++++++++++---- nova/tests/xenapi/stubs.py | 24 +++++++++--- nova/virt/xenapi/fake.py | 86 ++++++++++++++++++++++++++++++++++++++----- nova/virt/xenapi/vm_utils.py | 35 +++++++++--------- 6 files changed, 191 insertions(+), 39 deletions(-) create mode 100644 nova/tests/glance/__init__.py create mode 100644 nova/tests/glance/stubs.py diff --git a/nova/tests/glance/__init__.py b/nova/tests/glance/__init__.py new file mode 100644 index 000000000..ef9fa05a7 --- /dev/null +++ b/nova/tests/glance/__init__.py @@ -0,0 +1,20 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright (c) 2011 Citrix Systems, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +:mod:`glance` -- Stubs for Glance +================================= +""" diff --git a/nova/tests/glance/stubs.py b/nova/tests/glance/stubs.py new file mode 100644 index 000000000..2ac5653bb --- /dev/null +++ b/nova/tests/glance/stubs.py @@ -0,0 +1,37 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright (c) 2011 Citrix Systems, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import StringIO + +import glance.client + + +def stubout_glance_client(stubs, cls): + """Stubs out glance.client.Client""" + stubs.Set(glance.client, 'Client', + lambda *args, **kwargs: cls(*args, **kwargs)) + + +class FakeGlance(object): + def __init__(self, host, port=None, use_ssl=False): + pass + + def get_image(self, image): + meta = { + 'size': 0, + } + image_file = StringIO.StringIO('') + return meta, image_file diff --git a/nova/tests/test_xenapi.py b/nova/tests/test_xenapi.py index ec9462ada..4ddfe6829 100644 --- a/nova/tests/test_xenapi.py +++ b/nova/tests/test_xenapi.py @@ -33,6 +33,7 @@ from nova.virt.xenapi import fake as xenapi_fake from nova.virt.xenapi import volume_utils from nova.tests.db import fakes as db_fakes from nova.tests.xenapi import stubs +from nova.tests.glance import stubs as glance_stubs FLAGS = flags.FLAGS @@ -107,18 +108,16 @@ class XenAPIVolumeTestCase(test.TestCase): conn = xenapi_conn.get_connection(False) volume = self._create_volume() instance = db.instance_create(self.values) - xenapi_fake.create_vm(instance.name, 'Running') + vm = xenapi_fake.create_vm(instance.name, 'Running') result = conn.attach_volume(instance.name, volume['id'], '/dev/sdc') def check(): # check that the VM has a VBD attached to it - # Get XenAPI reference for the VM - vms = xenapi_fake.get_all('VM') # Get XenAPI record for VBD vbds = xenapi_fake.get_all('VBD') vbd = xenapi_fake.get_record('VBD', vbds[0]) vm_ref = vbd['VM'] - self.assertEqual(vm_ref, vms[0]) + self.assertEqual(vm_ref, vm) check() @@ -156,9 +155,14 @@ class XenAPIVMTestCase(test.TestCase): FLAGS.xenapi_connection_url = 'test_url' FLAGS.xenapi_connection_password = 'test_pass' xenapi_fake.reset() + xenapi_fake.create_local_srs() db_fakes.stub_out_db_instance_api(self.stubs) xenapi_fake.create_network('fake', FLAGS.flat_network_bridge) stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests) + stubs.stubout_get_this_vm_uuid(self.stubs) + stubs.stubout_stream_disk(self.stubs) + glance_stubs.stubout_glance_client(self.stubs, + glance_stubs.FakeGlance) self.conn = xenapi_conn.get_connection(False) def test_list_instances_0(self): @@ -206,7 +210,15 @@ class XenAPIVMTestCase(test.TestCase): check() - def test_spawn(self): + def test_spawn_glance(self): + FLAGS.xenapi_image_service = 'glance' + self._test_spawn() + + def test_spawn_objectstore(self): + FLAGS.xenapi_image_service = 'objectstore' + self._test_spawn() + + def _test_spawn(self): instance = self._create_instance() def check(): @@ -217,8 +229,10 @@ class XenAPIVMTestCase(test.TestCase): vm_info = self.conn.get_info(1) # Get XenAPI record for VM - vms = xenapi_fake.get_all('VM') - vm = xenapi_fake.get_record('VM', vms[0]) + vms = [rec for ref, rec + in xenapi_fake.get_all_records('VM').iteritems() + if not rec['is_control_domain']] + vm = vms[0] # Check that m1.large above turned into the right thing. instance_type = instance_types.INSTANCE_TYPES['m1.large'] diff --git a/nova/tests/xenapi/stubs.py b/nova/tests/xenapi/stubs.py index 55f751f11..ffbca9560 100644 --- a/nova/tests/xenapi/stubs.py +++ b/nova/tests/xenapi/stubs.py @@ -91,6 +91,21 @@ def stub_out_get_target(stubs): stubs.Set(volume_utils, '_get_target', fake_get_target) +def stubout_get_this_vm_uuid(stubs): + def f(): + vms = [rec['uuid'] for ref, rec + in fake.get_all_records('VM').iteritems() + if rec['is_control_domain']] + return vms[0] + stubs.Set(vm_utils, 'get_this_vm_uuid', f) + + +def stubout_stream_disk(stubs): + def f(_): + pass + stubs.Set(vm_utils, '_stream_disk', f) + + class FakeSessionForVMTests(fake.SessionBase): """ Stubs out a XenAPISession for VM tests """ def __init__(self, uri): @@ -100,7 +115,10 @@ class FakeSessionForVMTests(fake.SessionBase): return self.xenapi.network.get_all_records() def host_call_plugin(self, _1, _2, _3, _4, _5): - return '' + sr_ref = fake.get_all('SR')[0] + vdi_ref = fake.create_vdi('', False, sr_ref, False) + vdi_rec = fake.get_record('VDI', vdi_ref) + return '%s' % vdi_rec['uuid'] def VM_start(self, _1, ref, _2, _3): vm = fake.get_record('VM', ref) @@ -135,10 +153,6 @@ class FakeSessionForVolumeTests(fake.SessionBase): def __init__(self, uri): super(FakeSessionForVolumeTests, self).__init__(uri) - def VBD_plug(self, _1, ref): - rec = fake.get_record('VBD', ref) - rec['currently-attached'] = True - def VDI_introduce(self, _1, uuid, _2, _3, _4, _5, _6, _7, _8, _9, _10, _11): valid_vdi = False diff --git a/nova/virt/xenapi/fake.py b/nova/virt/xenapi/fake.py index aa4026f97..cd7c96b22 100644 --- a/nova/virt/xenapi/fake.py +++ b/nova/virt/xenapi/fake.py @@ -74,6 +74,7 @@ def reset(): for c in _CLASSES: _db_content[c] = {} create_host('fake') + create_vm('fake', 'Running', is_a_template=False, is_control_domain=True) def create_host(name_label): @@ -134,14 +135,20 @@ def create_vdi(name_label, read_only, sr_ref, sharable): def create_vbd(vm_ref, vdi_ref): - vbd_rec = {'VM': vm_ref, 'VDI': vdi_ref} + vbd_rec = { + 'VM': vm_ref, + 'VDI': vdi_ref, + 'currently_attached': False, + } vbd_ref = _create_object('VBD', vbd_rec) after_VBD_create(vbd_ref, vbd_rec) return vbd_ref def after_VBD_create(vbd_ref, vbd_rec): - """Create backref from VM to VBD when VBD is created""" + """Create read-only fields and backref from VM to VBD when VBD is created""" + vbd_rec['currently_attached'] = False + vbd_rec['device'] = '' vm_ref = vbd_rec['VM'] vm_rec = _db_content['VM'][vm_ref] vm_rec['VBDs'] = [vbd_ref] @@ -150,9 +157,10 @@ def after_VBD_create(vbd_ref, vbd_rec): vbd_rec['vm_name_label'] = vm_name_label -def create_pbd(config, sr_ref, attached): +def create_pbd(config, host_ref, sr_ref, attached): return _create_object('PBD', { 'device-config': config, + 'host': host_ref, 'SR': sr_ref, 'currently-attached': attached, }) @@ -165,6 +173,33 @@ def create_task(name_label): }) +def create_local_srs(): + """Create an SR that looks like the one created on the local disk by + default by the XenServer installer. Do this one per host.""" + for host_ref in _db_content['host'].keys(): + _create_local_sr(host_ref) + + +def _create_local_sr(host_ref): + sr_ref = _create_object('SR', { + 'name_label': 'Local storage', + 'type': 'lvm', + 'content_type': 'user', + 'shared': False, + 'physical_size': str(1 << 30), + 'physical_utilisation': str(0), + 'virtual_allocation': str(0), + 'other_config': { + 'i18n-original-value-name_label': 'Local storage', + 'i18n-key': 'local-storage', + }, + 'VDIs': [] + }) + pbd_ref = create_pbd('', host_ref, sr_ref, True) + _db_content['SR'][sr_ref]['PBDs'] = [pbd_ref] + return sr_ref + + def _create_object(table, obj): ref = str(uuid.uuid4()) obj['uuid'] = str(uuid.uuid4()) @@ -177,9 +212,10 @@ def _create_sr(table, obj): # Forces fake to support iscsi only if sr_type != 'iscsi': raise Failure(['SR_UNKNOWN_DRIVER', sr_type]) + host_ref = _db_content['host'].keys()[0] sr_ref = _create_object(table, obj[2]) vdi_ref = create_vdi('', False, sr_ref, False) - pbd_ref = create_pbd('', sr_ref, True) + pbd_ref = create_pbd('', host_ref, sr_ref, True) _db_content['SR'][sr_ref]['VDIs'] = [vdi_ref] _db_content['SR'][sr_ref]['PBDs'] = [pbd_ref] _db_content['VDI'][vdi_ref]['SR'] = sr_ref @@ -231,6 +267,20 @@ class SessionBase(object): def __init__(self, uri): self._session = None + def VBD_plug(self, _1, ref): + rec = get_record('VBD', ref) + if rec['currently_attached']: + raise Failure(['DEVICE_ALREADY_ATTACHED', ref]) + rec['currently_attached'] = True + rec['device'] = rec['userdevice'] + + def VBD_unplug(self, _1, ref): + rec = get_record('VBD', ref) + if not rec['currently_attached']: + raise Failure(['DEVICE_ALREADY_DETACHED', ref]) + rec['currently_attached'] = False + rec['device'] = '' + def xenapi_request(self, methodname, params): if methodname.startswith('login'): self._login(methodname, params) @@ -287,6 +337,8 @@ class SessionBase(object): return lambda *params: self._getter(name, params) elif self._is_create(name): return lambda *params: self._create(name, params) + elif self._is_destroy(name): + return lambda *params: self._destroy(name, params) else: return None @@ -297,10 +349,16 @@ class SessionBase(object): bits[1].startswith(getter and 'get_' or 'set_')) def _is_create(self, name): + return self._is_method(name, 'create') + + def _is_destroy(self, name): + return self._is_method(name, 'destroy') + + def _is_method(self, name, meth): bits = name.split('.') return (len(bits) == 2 and bits[0] in _CLASSES and - bits[1] == 'create') + bits[1] == meth) def _getter(self, name, params): self._check_session(params) @@ -368,10 +426,9 @@ class SessionBase(object): _create_sr(cls, params) or _create_object(cls, params[1]) # Call hook to provide any fixups needed (ex. creating backrefs) - try: - globals()["after_%s_create" % cls](ref, params[1]) - except KeyError: - pass + after_hook = 'after_%s_create' % cls + if after_hook in globals(): + globals()[after_hook](ref, params[1]) obj = get_record(cls, ref) @@ -381,6 +438,15 @@ class SessionBase(object): return ref + def _destroy(self, name, params): + self._check_session(params) + self._check_arg_count(params, 2) + table, _ = name.split('.') + ref = params[1] + if ref not in _db_content[table]: + raise Failure(['HANDLE_INVALID', table, ref]) + del _db_content[table][ref] + def _async(self, name, params): task_ref = create_task(name) task = _db_content['task'][task_ref] @@ -418,7 +484,7 @@ class SessionBase(object): try: return result[0] except IndexError: - return None + raise Failure(['UUID_INVALID', v, result, recs, k]) return result diff --git a/nova/virt/xenapi/vm_utils.py b/nova/virt/xenapi/vm_utils.py index 674459841..63f25f76c 100644 --- a/nova/virt/xenapi/vm_utils.py +++ b/nova/virt/xenapi/vm_utils.py @@ -296,7 +296,7 @@ class VMHelper(HelperBase): access, type) else: return cls._fetch_image_objectstore(session, instance_id, image, - access, type) + access, user.secret, type) @classmethod def _fetch_image_glance(cls, session, instance_id, image, access, type): @@ -318,18 +318,7 @@ class VMHelper(HelperBase): vdi = cls.create_vdi(session, sr, _('Glance image %s') % image, vdi_size, False) - def stream(dev): - offset = 0 - if type == ImageType.DISK: - offset = MBR_SIZE_BYTES - _write_partition(virtual_size, dev) - - with open('/dev/%s' % dev, 'wb') as f: - f.seek(offset) - for chunk in image_file: - f.write(chunk) - - with_vdi_attached_here(session, vdi, False, stream) + with_vdi_attached_here(session, vdi, False, _stream_disk) if (type==ImageType.KERNEL_RAMDISK): #we need to invoke a plugin for copying VDI's content into proper path fn = "copy_kernel_vdi" @@ -345,14 +334,14 @@ class VMHelper(HelperBase): @classmethod def _fetch_image_objectstore(cls, session, instance_id, image, access, - type): + secret, type): url = images.image_url(image) logging.debug("Asking xapi to fetch %s as %s", url, access) fn = (type != ImageType.KERNEL_RAMDISK) and 'get_vdi' or 'get_kernel' args = {} args['src_url'] = url args['username'] = access - args['password'] = user.secret + args['password'] = secret args['add_partition'] = 'false' args['raw'] = 'false' if type != ImageType.KERNEL_RAMDISK: @@ -629,7 +618,7 @@ def vbd_unplug_with_retry(session, vbd): session.get_xenapi().VBD.unplug(vbd) logging.debug(_('VBD.unplug successful first time.')) return - except XenAPI.Failure, e: + except VMHelper.XenAPI.Failure, e: if (len(e.details) > 0 and e.details[0] == 'DEVICE_DETACH_REJECTED'): logging.debug(_('VBD.unplug rejected: retrying...')) @@ -647,7 +636,7 @@ def vbd_unplug_with_retry(session, vbd): def ignore_failure(func, *args, **kwargs): try: return func(*args, **kwargs) - except XenAPI.Failure, e: + except VMHelper.XenAPI.Failure, e: logging.error(_('Ignoring XenAPI.Failure %s'), e) return None @@ -661,6 +650,18 @@ def get_this_vm_ref(session): return session.get_xenapi().VM.get_by_uuid(get_this_vm_uuid()) +def _stream_disk(dev): + offset = 0 + if type == ImageType.DISK: + offset = MBR_SIZE_BYTES + _write_partition(virtual_size, dev) + + with open('/dev/%s' % dev, 'wb') as f: + f.seek(offset) + for chunk in image_file: + f.write(chunk) + + def _write_partition(virtual_size, dev): dest = '/dev/%s' % dev mbr_last = MBR_SIZE_SECTORS - 1 -- cgit From ba0f974c126c2a24ca6b1464ccc4a06be071b04e Mon Sep 17 00:00:00 2001 From: Ewan Mellor Date: Wed, 12 Jan 2011 11:54:58 +0000 Subject: PEP8 fixes, and switch to using the new LOG in vm_utils, matching what's just come in from trunk. --- nova/tests/glance/stubs.py | 2 +- nova/tests/test_xenapi.py | 14 +++++------ nova/virt/xenapi/fake.py | 3 ++- nova/virt/xenapi/vm_utils.py | 55 +++++++++++++++++++++----------------------- nova/virt/xenapi/vmops.py | 3 ++- 5 files changed, 37 insertions(+), 40 deletions(-) diff --git a/nova/tests/glance/stubs.py b/nova/tests/glance/stubs.py index 2ac5653bb..f182b857a 100644 --- a/nova/tests/glance/stubs.py +++ b/nova/tests/glance/stubs.py @@ -28,7 +28,7 @@ def stubout_glance_client(stubs, cls): class FakeGlance(object): def __init__(self, host, port=None, use_ssl=False): pass - + def get_image(self, image): meta = { 'size': 0, diff --git a/nova/tests/test_xenapi.py b/nova/tests/test_xenapi.py index 19e550636..ce0bc002a 100644 --- a/nova/tests/test_xenapi.py +++ b/nova/tests/test_xenapi.py @@ -245,7 +245,7 @@ class XenAPIVMTestCase(test.TestCase): def _test_spawn(self, image_id, kernel_id, ramdisk_id): stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests) values = {'name': 1, - 'id':1, + 'id': 1, 'project_id': self.project.id, 'user_id': self.user.id, 'image_id': image_id, @@ -260,23 +260,21 @@ class XenAPIVMTestCase(test.TestCase): self.check_vm_record(conn) def test_spawn_raw_objectstore(self): - FLAGS.xenapi_image_service='objectstore' + FLAGS.xenapi_image_service = 'objectstore' self._test_spawn(1, None, None) def test_spawn_objectstore(self): - FLAGS.xenapi_image_service='objectstore' + FLAGS.xenapi_image_service = 'objectstore' self._test_spawn(1, 2, 3) def test_spawn_raw_glance(self): - xenapi_fake._create_sr('SR',['','',{'other_config':{'i18n-key':'local-storage'}},'', - '','','iscsi']) - FLAGS.xenapi_image_service='glance' + FLAGS.xenapi_image_service = 'glance' self._test_spawn(1, None, None) def test_spawn_glance(self): - FLAGS.xenapi_image_service='glance' + FLAGS.xenapi_image_service = 'glance' self._test_spawn(1, 2, 3) - + def tearDown(self): super(XenAPIVMTestCase, self).tearDown() self.manager.delete_project(self.project) diff --git a/nova/virt/xenapi/fake.py b/nova/virt/xenapi/fake.py index 96333a58f..4bfaf4b57 100644 --- a/nova/virt/xenapi/fake.py +++ b/nova/virt/xenapi/fake.py @@ -148,7 +148,8 @@ def create_vbd(vm_ref, vdi_ref): def after_VBD_create(vbd_ref, vbd_rec): - """Create read-only fields and backref from VM to VBD when VBD is created""" + """Create read-only fields and backref from VM to VBD when VBD is + created.""" vbd_rec['currently_attached'] = False vbd_rec['device'] = '' vm_ref = vbd_rec['VM'] diff --git a/nova/virt/xenapi/vm_utils.py b/nova/virt/xenapi/vm_utils.py index 4e32c880e..7df00111e 100644 --- a/nova/virt/xenapi/vm_utils.py +++ b/nova/virt/xenapi/vm_utils.py @@ -19,6 +19,7 @@ Helper methods for operations related to the management of VM records and their attributes like VDIs, VIFs, as well as their lookup functions. """ +import os import pickle import re import urllib @@ -229,8 +230,8 @@ class VMHelper(HelperBase): 'other_config': {}, 'sm_config': {}, 'tags': []}) - logging.debug(_('Created VDI %s (%s, %s, %s) on %s.'), vdi_ref, - name_label, virtual_size, read_only, sr_ref) + LOG.debug(_('Created VDI %s (%s, %s, %s) on %s.'), vdi_ref, + name_label, virtual_size, read_only, sr_ref) return vdi_ref @classmethod @@ -308,7 +309,7 @@ class VMHelper(HelperBase): virtual_size = int(meta['size']) vdi_size = virtual_size - logging.debug("Size for image %s:%d", image, virtual_size) + LOG.debug(_("Size for image %s:%d"), image, virtual_size) if type == ImageType.DISK: # Make room for MBR. vdi_size += MBR_SIZE_BYTES @@ -320,7 +321,7 @@ class VMHelper(HelperBase): if (type == ImageType.KERNEL_RAMDISK): #we need to invoke a plugin for copying VDI's #content into proper path - logging.debug("Copying VDI %s to /boot/guest on dom0", vdi) + LOG.debug(_("Copying VDI %s to /boot/guest on dom0"), vdi) fn = "copy_kernel_vdi" args = {} args['vdi-ref'] = vdi @@ -330,7 +331,7 @@ class VMHelper(HelperBase): filename = session.wait_for_task(instance_id, task) #remove the VDI as it is not needed anymore session.get_xenapi().VDI.destroy(vdi) - logging.debug("Kernel/Ramdisk VDI %s destroyed", vdi) + LOG.debug(_("Kernel/Ramdisk VDI %s destroyed"), vdi) return filename else: return session.get_xenapi().VDI.get_uuid(vdi) @@ -339,7 +340,6 @@ class VMHelper(HelperBase): def _fetch_image_objectstore(cls, session, instance_id, image, access, secret, type): url = images.image_url(image) - access = AuthManager().get_access_key(user, project) LOG.debug(_("Asking xapi to fetch %s as %s"), url, access) fn = (type != ImageType.KERNEL_RAMDISK) and 'get_vdi' or 'get_kernel' args = {} @@ -357,7 +357,7 @@ class VMHelper(HelperBase): return uuid @classmethod - def lookup_image(cls, session, instance_id,vdi_ref): + def lookup_image(cls, session, instance_id, vdi_ref): if FLAGS.xenapi_image_service == 'glance': return cls._lookup_image_glance(session, vdi_ref) else: @@ -370,7 +370,7 @@ class VMHelper(HelperBase): args = {} args['vdi-ref'] = vdi_ref task = session.async_call_plugin('objectstore', fn, args) - pv_str = session.wait_for_task(instance_id,task) + pv_str = session.wait_for_task(instance_id, task) pv = None if pv_str.lower() == 'true': pv = True @@ -381,18 +381,18 @@ class VMHelper(HelperBase): @classmethod def _lookup_image_glance(cls, session, vdi_ref): - logging.debug("Looking up vdi %s for PV kernel", vdi_ref) + LOG.debug(_("Looking up vdi %s for PV kernel"), vdi_ref) def is_vdi_pv(dev): - logging.debug("Running pygrub against %s", dev) + LOG.debug(_("Running pygrub against %s"), dev) output = os.popen('pygrub -qn /dev/%s' % dev) for line in output.readlines(): #try to find kernel string m = re.search('(?<=kernel:)/.*(?:>)', line) if m and m.group(0).find('xen') != -1: - logging.debug("Found Xen kernel %s" % m.group(0)) + LOG.debug(_("Found Xen kernel %s") % m.group(0)) return True - logging.debug("No Xen kernel found. Booting HVM.") + LOG.debug(_("No Xen kernel found. Booting HVM.")) return False return with_vdi_attached_here(session, vdi_ref, True, is_vdi_pv) @@ -566,12 +566,10 @@ def get_vdi_for_vm_safely(session, vm_ref): def find_sr(session): - logging.warning("IN find_sr") host = session.get_xenapi_host() srs = session.get_xenapi().SR.get_all() for sr in srs: sr_rec = session.get_xenapi().SR.get_record(sr) - logging.warning("HERE: %s",sr_rec['uuid']) if not ('i18n-key' in sr_rec['other_config'] and sr_rec['other_config']['i18n-key'] == 'local-storage'): continue @@ -590,7 +588,6 @@ def with_vdi_attached_here(session, vdi, read_only, f): vbd_rec['userdevice'] = 'autodetect' vbd_rec['bootable'] = False vbd_rec['mode'] = read_only and 'RO' or 'RW' - logging.debug("read_only: %s", str(read_only)) vbd_rec['type'] = 'disk' vbd_rec['unpluggable'] = True vbd_rec['empty'] = False @@ -598,19 +595,19 @@ def with_vdi_attached_here(session, vdi, read_only, f): vbd_rec['qos_algorithm_type'] = '' vbd_rec['qos_algorithm_params'] = {} vbd_rec['qos_supported_algorithms'] = [] - logging.debug(_('Creating VBD for VDI %s ... '), vdi) + LOG.debug(_('Creating VBD for VDI %s ... '), vdi) vbd = session.get_xenapi().VBD.create(vbd_rec) - logging.debug(_('Creating VBD for VDI %s done.'), vdi) + LOG.debug(_('Creating VBD for VDI %s done.'), vdi) try: - logging.debug(_('Plugging VBD %s ... '), vbd) + LOG.debug(_('Plugging VBD %s ... '), vbd) session.get_xenapi().VBD.plug(vbd) - logging.debug(_('Plugging VBD %s done.'), vbd) + LOG.debug(_('Plugging VBD %s done.'), vbd) return f(session.get_xenapi().VBD.get_device(vbd)) finally: - logging.debug(_('Destroying VBD for VDI %s ... '), vdi) + LOG.debug(_('Destroying VBD for VDI %s ... '), vdi) vbd_unplug_with_retry(session, vbd) ignore_failure(session.get_xenapi().VBD.destroy, vbd) - logging.debug(_('Destroying VBD for VDI %s done.'), vdi) + LOG.debug(_('Destroying VBD for VDI %s done.'), vdi) def vbd_unplug_with_retry(session, vbd): @@ -621,19 +618,19 @@ def vbd_unplug_with_retry(session, vbd): while True: try: session.get_xenapi().VBD.unplug(vbd) - logging.debug(_('VBD.unplug successful first time.')) + LOG.debug(_('VBD.unplug successful first time.')) return except VMHelper.XenAPI.Failure, e: if (len(e.details) > 0 and e.details[0] == 'DEVICE_DETACH_REJECTED'): - logging.debug(_('VBD.unplug rejected: retrying...')) + LOG.debug(_('VBD.unplug rejected: retrying...')) time.sleep(1) elif (len(e.details) > 0 and e.details[0] == 'DEVICE_ALREADY_DETACHED'): - logging.debug(_('VBD.unplug successful eventually.')) + LOG.debug(_('VBD.unplug successful eventually.')) return else: - logging.error(_('Ignoring XenAPI.Failure in VBD.unplug: %s'), + LOG.error(_('Ignoring XenAPI.Failure in VBD.unplug: %s'), e) return @@ -642,7 +639,7 @@ def ignore_failure(func, *args, **kwargs): try: return func(*args, **kwargs) except VMHelper.XenAPI.Failure, e: - logging.error(_('Ignoring XenAPI.Failure %s'), e) + LOG.error(_('Ignoring XenAPI.Failure %s'), e) return None @@ -673,8 +670,8 @@ def _write_partition(virtual_size, dev): primary_first = MBR_SIZE_SECTORS primary_last = MBR_SIZE_SECTORS + (virtual_size / SECTOR_SIZE) - 1 - logging.debug('Writing partition table %d %d to %s...', - primary_first, primary_last, dest) + LOG.debug(_('Writing partition table %d %d to %s...'), + primary_first, primary_last, dest) def execute(cmd, process_input=None, check_exit_code=True): return utils.execute(cmd=cmd, @@ -685,4 +682,4 @@ def _write_partition(virtual_size, dev): execute('parted --script %s mkpart primary %ds %ds' % (dest, primary_first, primary_last)) - logging.debug('Writing partition table %s done.', dest) + LOG.debug(_('Writing partition table %s done.'), dest) diff --git a/nova/virt/xenapi/vmops.py b/nova/virt/xenapi/vmops.py index 34e3f9c9f..9ed8896b6 100644 --- a/nova/virt/xenapi/vmops.py +++ b/nova/virt/xenapi/vmops.py @@ -80,7 +80,8 @@ class VMOps(object): #Have a look at the VDI and see if it has a PV kernel pv_kernel = False if not instance.kernel_id: - pv_kernel = VMHelper.lookup_image(self._session, instance.id, vdi_ref) + pv_kernel = VMHelper.lookup_image(self._session, instance.id, + vdi_ref) kernel = None if instance.kernel_id: kernel = VMHelper.fetch_image(self._session, instance.id, -- cgit From 7d56986366a349f5636f8de6018fb52e9befd440 Mon Sep 17 00:00:00 2001 From: Salvatore Orlando Date: Wed, 12 Jan 2011 14:17:22 +0000 Subject: Fix for _stream_disk --- nova/virt/xenapi/vm_utils.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/nova/virt/xenapi/vm_utils.py b/nova/virt/xenapi/vm_utils.py index 7df00111e..4b8cec97b 100644 --- a/nova/virt/xenapi/vm_utils.py +++ b/nova/virt/xenapi/vm_utils.py @@ -307,7 +307,6 @@ class VMHelper(HelperBase): meta, image_file = c.get_image(image) virtual_size = int(meta['size']) - vdi_size = virtual_size LOG.debug(_("Size for image %s:%d"), image, virtual_size) if type == ImageType.DISK: @@ -317,7 +316,8 @@ class VMHelper(HelperBase): vdi = cls.create_vdi(session, sr, _('Glance image %s') % image, vdi_size, False) - with_vdi_attached_here(session, vdi, False, _stream_disk) + with_vdi_attached_here(session, vdi, False, + lambda dev:_stream_disk(dev,image_file)) if (type == ImageType.KERNEL_RAMDISK): #we need to invoke a plugin for copying VDI's #content into proper path @@ -652,7 +652,7 @@ def get_this_vm_ref(session): return session.get_xenapi().VM.get_by_uuid(get_this_vm_uuid()) -def _stream_disk(dev): +def _stream_disk(dev,image_file): offset = 0 if type == ImageType.DISK: offset = MBR_SIZE_BYTES -- cgit From 32eac05776d18dcbde49aa022f149fd597907cbe Mon Sep 17 00:00:00 2001 From: Salvatore Orlando Date: Wed, 12 Jan 2011 14:28:50 +0000 Subject: Fixing the stub for _stream_disk as well --- nova/tests/xenapi/stubs.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/nova/tests/xenapi/stubs.py b/nova/tests/xenapi/stubs.py index 36a984f0e..9add7e592 100644 --- a/nova/tests/xenapi/stubs.py +++ b/nova/tests/xenapi/stubs.py @@ -125,7 +125,7 @@ def stubout_get_this_vm_uuid(stubs): def stubout_stream_disk(stubs): - def f(_): + def f(_1,_2): pass stubs.Set(vm_utils, '_stream_disk', f) -- cgit From f3dba791b9f10fec759dce0fe4e2abc214e3fd61 Mon Sep 17 00:00:00 2001 From: Salvatore Orlando Date: Wed, 12 Jan 2011 14:37:03 +0000 Subject: pep8 fixes --- nova/tests/xenapi/stubs.py | 2 +- nova/virt/xenapi/vm_utils.py | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/nova/tests/xenapi/stubs.py b/nova/tests/xenapi/stubs.py index 9add7e592..0c0fe5bc9 100644 --- a/nova/tests/xenapi/stubs.py +++ b/nova/tests/xenapi/stubs.py @@ -125,7 +125,7 @@ def stubout_get_this_vm_uuid(stubs): def stubout_stream_disk(stubs): - def f(_1,_2): + def f(_1, _2): pass stubs.Set(vm_utils, '_stream_disk', f) diff --git a/nova/virt/xenapi/vm_utils.py b/nova/virt/xenapi/vm_utils.py index 4b8cec97b..6f19f5970 100644 --- a/nova/virt/xenapi/vm_utils.py +++ b/nova/virt/xenapi/vm_utils.py @@ -317,7 +317,7 @@ class VMHelper(HelperBase): vdi_size, False) with_vdi_attached_here(session, vdi, False, - lambda dev:_stream_disk(dev,image_file)) + lambda dev: _stream_disk(dev, image_file)) if (type == ImageType.KERNEL_RAMDISK): #we need to invoke a plugin for copying VDI's #content into proper path @@ -652,7 +652,7 @@ def get_this_vm_ref(session): return session.get_xenapi().VM.get_by_uuid(get_this_vm_uuid()) -def _stream_disk(dev,image_file): +def _stream_disk(dev, image_file): offset = 0 if type == ImageType.DISK: offset = MBR_SIZE_BYTES -- cgit From c71d5d41bb6e5d7a046a76563eed75a4d6e77e90 Mon Sep 17 00:00:00 2001 From: Salvatore Orlando Date: Wed, 12 Jan 2011 17:05:40 +0000 Subject: Fixed another issue in _stream_disk, as it did never execute _write_partition. Fixed fake method accordingly. Fixed pep8 errors. --- nova/tests/xenapi/stubs.py | 2 +- nova/virt/xenapi/vm_utils.py | 6 ++++-- 2 files changed, 5 insertions(+), 3 deletions(-) diff --git a/nova/tests/xenapi/stubs.py b/nova/tests/xenapi/stubs.py index 0c0fe5bc9..624995ada 100644 --- a/nova/tests/xenapi/stubs.py +++ b/nova/tests/xenapi/stubs.py @@ -125,7 +125,7 @@ def stubout_get_this_vm_uuid(stubs): def stubout_stream_disk(stubs): - def f(_1, _2): + def f(_1, _2, _3, _4): pass stubs.Set(vm_utils, '_stream_disk', f) diff --git a/nova/virt/xenapi/vm_utils.py b/nova/virt/xenapi/vm_utils.py index 6f19f5970..2c9d53858 100644 --- a/nova/virt/xenapi/vm_utils.py +++ b/nova/virt/xenapi/vm_utils.py @@ -317,7 +317,9 @@ class VMHelper(HelperBase): vdi_size, False) with_vdi_attached_here(session, vdi, False, - lambda dev: _stream_disk(dev, image_file)) + lambda dev: + _stream_disk(dev, type, + virtual_size, image_file)) if (type == ImageType.KERNEL_RAMDISK): #we need to invoke a plugin for copying VDI's #content into proper path @@ -652,7 +654,7 @@ def get_this_vm_ref(session): return session.get_xenapi().VM.get_by_uuid(get_this_vm_uuid()) -def _stream_disk(dev, image_file): +def _stream_disk(dev, type, virtual_size, image_file): offset = 0 if type == ImageType.DISK: offset = MBR_SIZE_BYTES -- cgit From 4f5c0c64ec9d397048dfd7b8d5c007ec0fa39ec5 Mon Sep 17 00:00:00 2001 From: Andy Smith Date: Wed, 12 Jan 2011 16:57:04 -0800 Subject: add support for database migration --- bin/nova-manage | 20 +- nova/db/migration.py | 38 ++ nova/db/sqlalchemy/__init__.py | 28 -- nova/db/sqlalchemy/migrate_repo/README | 4 + nova/db/sqlalchemy/migrate_repo/__init__.py | 0 nova/db/sqlalchemy/migrate_repo/manage.py | 4 + nova/db/sqlalchemy/migrate_repo/migrate.cfg | 20 + .../migrate_repo/versions/001_first_database.py | 547 +++++++++++++++++++++ .../migrate_repo/versions/002_update_to_trunk.py | 125 +++++ .../sqlalchemy/migrate_repo/versions/__init__.py | 0 nova/db/sqlalchemy/migration.py | 72 +++ nova/db/sqlalchemy/models.py | 45 -- nova/service.py | 13 - run_tests.py | 9 +- tools/pip-requires | 1 + 15 files changed, 838 insertions(+), 88 deletions(-) create mode 100644 nova/db/migration.py create mode 100644 nova/db/sqlalchemy/migrate_repo/README create mode 100644 nova/db/sqlalchemy/migrate_repo/__init__.py create mode 100644 nova/db/sqlalchemy/migrate_repo/manage.py create mode 100644 nova/db/sqlalchemy/migrate_repo/migrate.cfg create mode 100644 nova/db/sqlalchemy/migrate_repo/versions/001_first_database.py create mode 100644 nova/db/sqlalchemy/migrate_repo/versions/002_update_to_trunk.py create mode 100644 nova/db/sqlalchemy/migrate_repo/versions/__init__.py create mode 100644 nova/db/sqlalchemy/migration.py diff --git a/bin/nova-manage b/bin/nova-manage index 3e290567c..c441fa7f2 100755 --- a/bin/nova-manage +++ b/bin/nova-manage @@ -82,6 +82,7 @@ from nova import quota from nova import utils from nova.auth import manager from nova.cloudpipe import pipelib +from nova.db import migration logging.basicConfig() @@ -515,6 +516,22 @@ class LogCommands(object): print re.sub('#012', "\n", "\n".join(lines)) +class DbCommands(object): + """Class for managing the database.""" + + def __init__(self): + pass + + def sync(self, version=None): + """adds role to user + if project is specified, adds project specific role + arguments: user, role [project]""" + return migration.db_sync(version) + + def version(self): + print migration.db_version() + + CATEGORIES = [ ('user', UserCommands), ('project', ProjectCommands), @@ -524,7 +541,8 @@ CATEGORIES = [ ('floating', FloatingIpCommands), ('network', NetworkCommands), ('service', ServiceCommands), - ('log', LogCommands)] + ('log', LogCommands), + ('db', DbCommands)] def lazy_match(name, key_value_tuples): diff --git a/nova/db/migration.py b/nova/db/migration.py new file mode 100644 index 000000000..e54b90cd8 --- /dev/null +++ b/nova/db/migration.py @@ -0,0 +1,38 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2010 United States Government as represented by the +# Administrator of the National Aeronautics and Space Administration. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +"""Database setup and migration commands.""" + +from nova import flags +from nova import utils + +FLAGS = flags.FLAGS +flags.DECLARE('db_backend', 'nova.db.api') + + +IMPL = utils.LazyPluggable(FLAGS['db_backend'], + sqlalchemy='nova.db.sqlalchemy.migration') + + +def db_sync(version=None): + """Migrate the database to `version` or the most recent version.""" + return IMPL.db_sync(version=version) + + +def db_version(): + """Display the current database version.""" + return IMPL.db_version() diff --git a/nova/db/sqlalchemy/__init__.py b/nova/db/sqlalchemy/__init__.py index 501373942..747015af5 100644 --- a/nova/db/sqlalchemy/__init__.py +++ b/nova/db/sqlalchemy/__init__.py @@ -15,31 +15,3 @@ # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. - -""" -SQLAlchemy database backend -""" -import time - -from sqlalchemy.exc import OperationalError - -from nova import flags -from nova import log as logging -from nova.db.sqlalchemy import models - - -FLAGS = flags.FLAGS -LOG = logging.getLogger('nova.db.sqlalchemy') - - -for i in xrange(FLAGS.sql_max_retries): - if i > 0: - time.sleep(FLAGS.sql_retry_interval) - - try: - models.register_models() - break - except OperationalError: - LOG.exception(_("Data store %s is unreachable." - " Trying again in %d seconds."), - FLAGS.sql_connection, FLAGS.sql_retry_interval) diff --git a/nova/db/sqlalchemy/migrate_repo/README b/nova/db/sqlalchemy/migrate_repo/README new file mode 100644 index 000000000..6218f8cac --- /dev/null +++ b/nova/db/sqlalchemy/migrate_repo/README @@ -0,0 +1,4 @@ +This is a database migration repository. + +More information at +http://code.google.com/p/sqlalchemy-migrate/ diff --git a/nova/db/sqlalchemy/migrate_repo/__init__.py b/nova/db/sqlalchemy/migrate_repo/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/nova/db/sqlalchemy/migrate_repo/manage.py b/nova/db/sqlalchemy/migrate_repo/manage.py new file mode 100644 index 000000000..74c09ae4a --- /dev/null +++ b/nova/db/sqlalchemy/migrate_repo/manage.py @@ -0,0 +1,4 @@ +#!/usr/bin/env python +from migrate.versioning.shell import main +if __name__ == '__main__': + main(debug='False', repository='.') diff --git a/nova/db/sqlalchemy/migrate_repo/migrate.cfg b/nova/db/sqlalchemy/migrate_repo/migrate.cfg new file mode 100644 index 000000000..2c75fb763 --- /dev/null +++ b/nova/db/sqlalchemy/migrate_repo/migrate.cfg @@ -0,0 +1,20 @@ +[db_settings] +# Used to identify which repository this database is versioned under. +# You can use the name of your project. +repository_id=nova + +# The name of the database table used to track the schema version. +# This name shouldn't already be used by your project. +# If this is changed once a database is under version control, you'll need to +# change the table name in each database too. +version_table=migrate_version + +# When committing a change script, Migrate will attempt to generate the +# sql for all supported databases; normally, if one of them fails - probably +# because you don't have that database installed - it is ignored and the +# commit continues, perhaps ending successfully. +# Databases in this list MUST compile successfully during a commit, or the +# entire commit will fail. List the databases your application will actually +# be using to ensure your updates to that database work properly. +# This must be a list; example: ['postgres','sqlite'] +required_dbs=[] diff --git a/nova/db/sqlalchemy/migrate_repo/versions/001_first_database.py b/nova/db/sqlalchemy/migrate_repo/versions/001_first_database.py new file mode 100644 index 000000000..8a60bd890 --- /dev/null +++ b/nova/db/sqlalchemy/migrate_repo/versions/001_first_database.py @@ -0,0 +1,547 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2010 United States Government as represented by the +# Administrator of the National Aeronautics and Space Administration. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +## Table code mostly autogenerated by genmodel.py +from sqlalchemy import * +from migrate import * + +from nova import log as logging + + +meta = MetaData() + + +auth_tokens = Table('auth_tokens', meta, + Column('created_at', DateTime(timezone=False)), + Column('updated_at', DateTime(timezone=False)), + Column('deleted_at', DateTime(timezone=False)), + Column('deleted', Boolean(create_constraint=True, name=None)), + Column('token_hash', + String(length=None, convert_unicode=False, assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False), + primary_key=True, + nullable=False), + Column('user_id', Integer()), + Column('server_manageent_url', + String(length=None, convert_unicode=False, assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('storage_url', + String(length=None, convert_unicode=False, assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('cdn_management_url', + String(length=None, convert_unicode=False, assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + ) + + +export_devices = Table('export_devices', meta, + Column('created_at', DateTime(timezone=False)), + Column('updated_at', DateTime(timezone=False)), + Column('deleted_at', DateTime(timezone=False)), + Column('deleted', Boolean(create_constraint=True, name=None)), + Column('id', Integer(), primary_key=True, nullable=False), + Column('shelf_id', Integer()), + Column('blade_id', Integer()), + Column('volume_id', + Integer(), + ForeignKey('volumes.id'), + nullable=True), + ) + + +fixed_ips = Table('fixed_ips', meta, + Column('created_at', DateTime(timezone=False)), + Column('updated_at', DateTime(timezone=False)), + Column('deleted_at', DateTime(timezone=False)), + Column('deleted', Boolean(create_constraint=True, name=None)), + Column('id', Integer(), primary_key=True, nullable=False), + Column('address', + String(length=None, convert_unicode=False, assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('network_id', + Integer(), + ForeignKey('networks.id'), + nullable=True), + Column('instance_id', + Integer(), + ForeignKey('instances.id'), + nullable=True), + Column('allocated', Boolean(create_constraint=True, name=None)), + Column('leased', Boolean(create_constraint=True, name=None)), + Column('reserved', Boolean(create_constraint=True, name=None)), + ) + + +floating_ips = Table('floating_ips', meta, + Column('created_at', DateTime(timezone=False)), + Column('updated_at', DateTime(timezone=False)), + Column('deleted_at', DateTime(timezone=False)), + Column('deleted', Boolean(create_constraint=True, name=None)), + Column('id', Integer(), primary_key=True, nullable=False), + Column('address', + String(length=None, convert_unicode=False, assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('fixed_ip_id', + Integer(), + ForeignKey('fixed_ips.id'), + nullable=True), + Column('project_id', + String(length=None, convert_unicode=False, assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('host', + String(length=None, convert_unicode=False, assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + ) + + +instances = Table('instances', meta, + Column('created_at', DateTime(timezone=False)), + Column('updated_at', DateTime(timezone=False)), + Column('deleted_at', DateTime(timezone=False)), + Column('deleted', Boolean(create_constraint=True, name=None)), + Column('id', Integer(), primary_key=True, nullable=False), + Column('internal_id', Integer()), + Column('admin_pass', + String(length=None, convert_unicode=False, assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('user_id', + String(length=None, convert_unicode=False, assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('project_id', + String(length=None, convert_unicode=False, assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('image_id', + String(length=None, convert_unicode=False, assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('kernel_id', + String(length=None, convert_unicode=False, assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('ramdisk_id', + String(length=None, convert_unicode=False, assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('launch_index', Integer()), + Column('key_name', + String(length=None, convert_unicode=False, assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('key_data', + Text(length=None, convert_unicode=False, assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('state', Integer()), + Column('state_description', + String(length=None, convert_unicode=False, assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('memory_mb', Integer()), + Column('vcpus', Integer()), + Column('local_gb', Integer()), + Column('hostname', + String(length=None, convert_unicode=False, assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('host', + String(length=None, convert_unicode=False, assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('instance_type', + String(length=None, convert_unicode=False, assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('user_data', + Text(length=None, convert_unicode=False, assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('reservation_id', + String(length=None, convert_unicode=False, assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('mac_address', + String(length=None, convert_unicode=False, assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('scheduled_at', DateTime(timezone=False)), + Column('launched_at', DateTime(timezone=False)), + Column('terminated_at', DateTime(timezone=False)), + Column('display_name', + String(length=None, convert_unicode=False, assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('display_description', + String(length=None, convert_unicode=False, assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + ) + + +iscsi_targets = Table('iscsi_targets', meta, + Column('created_at', DateTime(timezone=False)), + Column('updated_at', DateTime(timezone=False)), + Column('deleted_at', DateTime(timezone=False)), + Column('deleted', Boolean(create_constraint=True, name=None)), + Column('id', Integer(), primary_key=True, nullable=False), + Column('target_num', Integer()), + Column('host', + String(length=None, convert_unicode=False, assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('volume_id', + Integer(), + ForeignKey('volumes.id'), + nullable=True), + ) + + +key_pairs = Table('key_pairs', meta, + Column('created_at', DateTime(timezone=False)), + Column('updated_at', DateTime(timezone=False)), + Column('deleted_at', DateTime(timezone=False)), + Column('deleted', Boolean(create_constraint=True, name=None)), + Column('id', Integer(), primary_key=True, nullable=False), + Column('name', + String(length=None, convert_unicode=False, assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('user_id', + String(length=None, convert_unicode=False, assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('fingerprint', + String(length=None, convert_unicode=False, assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('public_key', + Text(length=None, convert_unicode=False, assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + ) + + +networks = Table('networks', meta, + Column('created_at', DateTime(timezone=False)), + Column('updated_at', DateTime(timezone=False)), + Column('deleted_at', DateTime(timezone=False)), + Column('deleted', Boolean(create_constraint=True, name=None)), + Column('id', Integer(), primary_key=True, nullable=False), + Column('injected', Boolean(create_constraint=True, name=None)), + Column('cidr', + String(length=None, convert_unicode=False, assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('netmask', + String(length=None, convert_unicode=False, assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('bridge', + String(length=None, convert_unicode=False, assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('gateway', + String(length=None, convert_unicode=False, assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('broadcast', + String(length=None, convert_unicode=False, assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('dns', + String(length=None, convert_unicode=False, assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('vlan', Integer()), + Column('vpn_public_address', + String(length=None, convert_unicode=False, assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('vpn_public_port', Integer()), + Column('vpn_private_address', + String(length=None, convert_unicode=False, assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('dhcp_start', + String(length=None, convert_unicode=False, assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('project_id', + String(length=None, convert_unicode=False, assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('host', + String(length=None, convert_unicode=False, assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + ) + + +projects = Table('projects', meta, + Column('created_at', DateTime(timezone=False)), + Column('updated_at', DateTime(timezone=False)), + Column('deleted_at', DateTime(timezone=False)), + Column('deleted', Boolean(create_constraint=True, name=None)), + Column('id', + String(length=None, convert_unicode=False, assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False), + primary_key=True, + nullable=False), + Column('name', + String(length=None, convert_unicode=False, assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('description', + String(length=None, convert_unicode=False, assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('project_manager', + String(length=None, convert_unicode=False, assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False), + ForeignKey('users.id')), + ) + + +quotas = Table('quotas', meta, + Column('created_at', DateTime(timezone=False)), + Column('updated_at', DateTime(timezone=False)), + Column('deleted_at', DateTime(timezone=False)), + Column('deleted', Boolean(create_constraint=True, name=None)), + Column('id', Integer(), primary_key=True, nullable=False), + Column('project_id', + String(length=None, convert_unicode=False, assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('instances', Integer()), + Column('cores', Integer()), + Column('volumes', Integer()), + Column('gigabytes', Integer()), + Column('floating_ips', Integer()), + ) + + +security_groups = Table('security_groups', meta, + Column('created_at', DateTime(timezone=False)), + Column('updated_at', DateTime(timezone=False)), + Column('deleted_at', DateTime(timezone=False)), + Column('deleted', Boolean(create_constraint=True, name=None)), + Column('id', Integer(), primary_key=True, nullable=False), + Column('name', + String(length=None, convert_unicode=False, assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('description', + String(length=None, convert_unicode=False, assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('user_id', + String(length=None, convert_unicode=False, assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('project_id', + String(length=None, convert_unicode=False, assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + ) + + +security_group_inst_assoc = Table('security_group_instance_association', meta, + Column('created_at', DateTime(timezone=False)), + Column('updated_at', DateTime(timezone=False)), + Column('deleted_at', DateTime(timezone=False)), + Column('deleted', Boolean(create_constraint=True, name=None)), + Column('id', Integer(), primary_key=True, nullable=False), + Column('security_group_id', + Integer(), + ForeignKey('security_groups.id')), + Column('instance_id', Integer(), ForeignKey('instances.id')), + ) + + +security_group_rules = Table('security_group_rules', meta, + Column('created_at', DateTime(timezone=False)), + Column('updated_at', DateTime(timezone=False)), + Column('deleted_at', DateTime(timezone=False)), + Column('deleted', Boolean(create_constraint=True, name=None)), + Column('id', Integer(), primary_key=True, nullable=False), + Column('parent_group_id', + Integer(), + ForeignKey('security_groups.id')), + Column('protocol', + String(length=None, convert_unicode=False, assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('from_port', Integer()), + Column('to_port', Integer()), + Column('cidr', + String(length=None, convert_unicode=False, assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('group_id', + Integer(), + ForeignKey('security_groups.id')), + ) + + +services = Table('services', meta, + Column('created_at', DateTime(timezone=False)), + Column('updated_at', DateTime(timezone=False)), + Column('deleted_at', DateTime(timezone=False)), + Column('deleted', Boolean(create_constraint=True, name=None)), + Column('id', Integer(), primary_key=True, nullable=False), + Column('host', + String(length=None, convert_unicode=False, assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('binary', + String(length=None, convert_unicode=False, assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('topic', + String(length=None, convert_unicode=False, assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('report_count', Integer(), nullable=False), + Column('disabled', Boolean(create_constraint=True, name=None)), + ) + + +users = Table('users', meta, + Column('created_at', DateTime(timezone=False)), + Column('updated_at', DateTime(timezone=False)), + Column('deleted_at', DateTime(timezone=False)), + Column('deleted', Boolean(create_constraint=True, name=None)), + Column('id', + String(length=None, convert_unicode=False, assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False), + primary_key=True, + nullable=False), + Column('name', + String(length=None, convert_unicode=False, assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('access_key', + String(length=None, convert_unicode=False, assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('secret_key', + String(length=None, convert_unicode=False, assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('is_admin', Boolean(create_constraint=True, name=None)), + ) + + +user_project_association = Table('user_project_association', meta, + Column('created_at', DateTime(timezone=False)), + Column('updated_at', DateTime(timezone=False)), + Column('deleted_at', DateTime(timezone=False)), + Column('deleted', Boolean(create_constraint=True, name=None)), + Column('user_id', + String(length=None, convert_unicode=False, assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False), + ForeignKey('users.id'), + primary_key=True, + nullable=False), + Column('project_id', + String(length=None, convert_unicode=False, assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False), + ForeignKey('projects.id'), + primary_key=True, + nullable=False), + ) + + +user_project_role_association = Table('user_project_role_association', meta, + Column('created_at', DateTime(timezone=False)), + Column('updated_at', DateTime(timezone=False)), + Column('deleted_at', DateTime(timezone=False)), + Column('deleted', Boolean(create_constraint=True, name=None)), + Column('user_id', + String(length=None, convert_unicode=False, assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False), + primary_key=True, + nullable=False), + Column('project_id', + String(length=None, convert_unicode=False, assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False), + primary_key=True, + nullable=False), + Column('role', + String(length=None, convert_unicode=False, assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False), + primary_key=True, + nullable=False), + ForeignKeyConstraint(['user_id', + 'project_id'], + ['user_project_association.user_id', + 'user_project_association.project_id']), + ) + + +user_role_association = Table('user_role_association', meta, + Column('created_at', DateTime(timezone=False)), + Column('updated_at', DateTime(timezone=False)), + Column('deleted_at', DateTime(timezone=False)), + Column('deleted', Boolean(create_constraint=True, name=None)), + Column('user_id', + String(length=None, convert_unicode=False, assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False), + ForeignKey('users.id'), + primary_key=True, + nullable=False), + Column('role', + String(length=None, convert_unicode=False, assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False), + primary_key=True, + nullable=False), + ) + + +volumes = Table('volumes', meta, + Column('created_at', DateTime(timezone=False)), + Column('updated_at', DateTime(timezone=False)), + Column('deleted_at', DateTime(timezone=False)), + Column('deleted', Boolean(create_constraint=True, name=None)), + Column('id', Integer(), primary_key=True, nullable=False), + Column('ec2_id', + String(length=None, convert_unicode=False, assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('user_id', + String(length=None, convert_unicode=False, assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('project_id', + String(length=None, convert_unicode=False, assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('host', + String(length=None, convert_unicode=False, assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('size', Integer()), + Column('availability_zone', + String(length=None, convert_unicode=False, assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('instance_id', + Integer(), + ForeignKey('instances.id'), + nullable=True), + Column('mountpoint', + String(length=None, convert_unicode=False, assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('attach_time', + String(length=None, convert_unicode=False, assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('status', + String(length=None, convert_unicode=False, assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('attach_status', + String(length=None, convert_unicode=False, assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('scheduled_at', DateTime(timezone=False)), + Column('launched_at', DateTime(timezone=False)), + Column('terminated_at', DateTime(timezone=False)), + Column('display_name', + String(length=None, convert_unicode=False, assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('display_description', + String(length=None, convert_unicode=False, assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + ) + + +def upgrade(migrate_engine): + # Upgrade operations go here. Don't create your own engine; + # bind migrate_engine to your metadata + meta.bind = migrate_engine + + for table in (auth_tokens, export_devices, fixed_ips, floating_ips, + instances, iscsi_targets, key_pairs, networks, + projects, quotas, security_groups, security_group_inst_assoc, + security_group_rules, services, users, + user_project_association, user_project_role_association, + user_role_association, volumes): + try: + table.create() + except Exception: + logging.info(repr(table)) + logging.exception('Exception while creating table') + raise + + +def downgrade(migrate_engine): + # Operations to reverse the above upgrade go here. + for table in (auth_tokens, export_devices, fixed_ips, floating_ips, + instances, iscsi_targets, key_pairs, networks, + projects, quotas, security_groups, security_group_inst_assoc, + security_group_rules, services, users, + user_project_association, user_project_role_association, + user_role_association, volumes): + table.drop() diff --git a/nova/db/sqlalchemy/migrate_repo/versions/002_update_to_trunk.py b/nova/db/sqlalchemy/migrate_repo/versions/002_update_to_trunk.py new file mode 100644 index 000000000..f9468f005 --- /dev/null +++ b/nova/db/sqlalchemy/migrate_repo/versions/002_update_to_trunk.py @@ -0,0 +1,125 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2010 United States Government as represented by the +# Administrator of the National Aeronautics and Space Administration. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from sqlalchemy import * +from migrate import * + +from nova import log as logging + + +meta = MetaData() + + +# Just for the ForeignKey to succeed +instances = Table('instances', meta, + Column('id', Integer(), primary_key=True, nullable=False), + ) + +# +# New Tables +# +instance_actions = Table('instance_actions', meta, + Column('created_at', DateTime(timezone=False)), + Column('updated_at', DateTime(timezone=False)), + Column('deleted_at', DateTime(timezone=False)), + Column('deleted', Boolean(create_constraint=True, name=None)), + Column('id', Integer(), primary_key=True, nullable=False), + Column('instance_id', + Integer(), + ForeignKey('instances.id')), + Column('action', + String(length=None, convert_unicode=False, assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('error', + Text(length=None, convert_unicode=False, assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + ) + +certificates = Table('certificates', meta, + Column('created_at', DateTime(timezone=False)), + Column('updated_at', DateTime(timezone=False)), + Column('deleted_at', DateTime(timezone=False)), + Column('deleted', Boolean(create_constraint=True, name=None)), + Column('id', Integer(), primary_key=True, nullable=False), + Column('user_id', + String(length=None, convert_unicode=False, assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('project_id', + String(length=None, convert_unicode=False, assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('file_name', + String(length=None, convert_unicode=False, assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + ) + + +# +# Tables to alter +# +auth_tokens = Table('auth_tokens', meta, + Column('created_at', DateTime(timezone=False)), + Column('updated_at', DateTime(timezone=False)), + Column('deleted_at', DateTime(timezone=False)), + Column('deleted', Boolean(create_constraint=True, name=None)), + Column('token_hash', + String(length=None, convert_unicode=False, assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False), + primary_key=True, + nullable=False), + Column('user_id', Integer()), + Column('server_manageent_url', + String(length=None, convert_unicode=False, assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('storage_url', + String(length=None, convert_unicode=False, assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('cdn_management_url', + String(length=None, convert_unicode=False, assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + ) + + +instances_availability_zone = Column( + 'availability_zone', + String(length=None, convert_unicode=False, assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)) + + +instances_locked = Column('locked', + Boolean(create_constraint=True, name=None)) + + +def upgrade(migrate_engine): + # Upgrade operations go here. Don't create your own engine; + # bind migrate_engine to your metadata + meta.bind = migrate_engine + for table in (instance_actions, certificates): + try: + table.create() + except Exception: + logging.info(repr(table)) + logging.exception('Exception while creating table') + raise + + auth_tokens.c.user_id.alter(type=String(length=None, + convert_unicode=False, + assert_unicode=None, + unicode_error=None, + _warn_on_bytestring=False)) + instances_availability_zone.create(table=instances) + instances_locked.create(table=instances) diff --git a/nova/db/sqlalchemy/migrate_repo/versions/__init__.py b/nova/db/sqlalchemy/migrate_repo/versions/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/nova/db/sqlalchemy/migration.py b/nova/db/sqlalchemy/migration.py new file mode 100644 index 000000000..33d14827b --- /dev/null +++ b/nova/db/sqlalchemy/migration.py @@ -0,0 +1,72 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2010 United States Government as represented by the +# Administrator of the National Aeronautics and Space Administration. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import os + +from nova import flags + +import sqlalchemy +from migrate.versioning import api as versioning_api +from migrate.versioning import exceptions as versioning_exceptions + +FLAGS = flags.FLAGS + + +def db_sync(version=None): + db_version() + repo_path = _find_migrate_repo() + return versioning_api.upgrade(FLAGS.sql_connection, repo_path, version) + + +def db_version(): + repo_path = _find_migrate_repo() + try: + return versioning_api.db_version(FLAGS.sql_connection, repo_path) + except versioning_exceptions.DatabaseNotControlledError: + # If we aren't version controlled we may already have the database + # in the state from before we started version control, check for that + # and set up version_control appropriately + meta = sqlalchemy.MetaData() + engine = sqlalchemy.create_engine(FLAGS.sql_connection, echo=False) + meta.reflect(bind=engine) + try: + for table in ('auth_tokens', 'export_devices', 'fixed_ips', + 'floating_ips', 'instances', 'iscsi_targets', + 'key_pairs', 'networks', 'projects', 'quotas', + 'security_group_rules', + 'security_group_instance_association', 'services', + 'users', 'user_project_association', + 'user_project_role_association', 'volumes'): + assert table in meta.tables + return db_version_control(1) + except AssertionError: + return db_version_control(0) + + +def db_version_control(version=None): + repo_path = _find_migrate_repo() + versioning_api.version_control(FLAGS.sql_connection, repo_path, version) + return version + + +def _find_migrate_repo(): + """Get the path for the migrate repository.""" + path = os.path.join(os.path.abspath(os.path.dirname(__file__)), + 'migrate_repo') + assert os.path.exists(path) + return path diff --git a/nova/db/sqlalchemy/models.py b/nova/db/sqlalchemy/models.py index 1dc46fe78..6f0a00b3b 100644 --- a/nova/db/sqlalchemy/models.py +++ b/nova/db/sqlalchemy/models.py @@ -94,51 +94,6 @@ class NovaBase(object): return iter(self) -# TODO(vish): Store images in the database instead of file system -#class Image(BASE, NovaBase): -# """Represents an image in the datastore""" -# __tablename__ = 'images' -# id = Column(Integer, primary_key=True) -# ec2_id = Column(String(12), unique=True) -# user_id = Column(String(255)) -# project_id = Column(String(255)) -# image_type = Column(String(255)) -# public = Column(Boolean, default=False) -# state = Column(String(255)) -# location = Column(String(255)) -# arch = Column(String(255)) -# default_kernel_id = Column(String(255)) -# default_ramdisk_id = Column(String(255)) -# -# @validates('image_type') -# def validate_image_type(self, key, image_type): -# assert(image_type in ['machine', 'kernel', 'ramdisk', 'raw']) -# -# @validates('state') -# def validate_state(self, key, state): -# assert(state in ['available', 'pending', 'disabled']) -# -# @validates('default_kernel_id') -# def validate_kernel_id(self, key, val): -# if val != 'machine': -# assert(val is None) -# -# @validates('default_ramdisk_id') -# def validate_ramdisk_id(self, key, val): -# if val != 'machine': -# assert(val is None) -# -# -# TODO(vish): To make this into its own table, we need a good place to -# create the host entries. In config somwhere? Or the first -# time any object sets host? This only becomes particularly -# important if we need to store per-host data. -#class Host(BASE, NovaBase): -# """Represents a host where services are running""" -# __tablename__ = 'hosts' -# id = Column(String(255), primary_key=True) - - class Service(BASE, NovaBase): """Represents a running service on a host.""" diff --git a/nova/service.py b/nova/service.py index 8b2a22ce0..efc08fd63 100644 --- a/nova/service.py +++ b/nova/service.py @@ -209,19 +209,6 @@ class Service(object): self.model_disconnected = True logging.exception(_("model server went away")) - try: - # NOTE(vish): This is late-loaded to make sure that the - # database is not created before flags have - # been loaded. - from nova.db.sqlalchemy import models - models.register_models() - except OperationalError: - logging.exception(_("Data store %s is unreachable." - " Trying again in %d seconds.") % - (FLAGS.sql_connection, - FLAGS.sql_retry_interval)) - time.sleep(FLAGS.sql_retry_interval) - def serve(*services): FLAGS(sys.argv) diff --git a/run_tests.py b/run_tests.py index 5b8617f63..fbca3cbe3 100644 --- a/run_tests.py +++ b/run_tests.py @@ -17,7 +17,7 @@ # See the License for the specific language governing permissions and # limitations under the License. - +import gettext import os import unittest import sys @@ -26,6 +26,10 @@ from nose import config from nose import result from nose import core +gettext.install('nova', unicode=1) + +from nova.db import migration + class NovaTestResult(result.TextTestResult): def __init__(self, *args, **kw): @@ -61,6 +65,9 @@ if __name__ == '__main__': c = config.Config(stream=sys.stdout, env=os.environ, verbosity=3) + + migration.db_sync() + runner = NovaTestRunner(stream=c.stream, verbosity=c.verbosity, diff --git a/tools/pip-requires b/tools/pip-requires index 341043114..e30b51a92 100644 --- a/tools/pip-requires +++ b/tools/pip-requires @@ -25,3 +25,4 @@ bzr Twisted>=10.1.0 PasteDeploy paste +sqlalchemy-migrate -- cgit From 5e304292a99162c7d5f5aa88a111a9dcada0ed10 Mon Sep 17 00:00:00 2001 From: Andy Smith Date: Wed, 12 Jan 2011 16:57:39 -0800 Subject: fix indentation --- nova/db/sqlalchemy/migrate_repo/manage.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/nova/db/sqlalchemy/migrate_repo/manage.py b/nova/db/sqlalchemy/migrate_repo/manage.py index 74c09ae4a..09e340f44 100644 --- a/nova/db/sqlalchemy/migrate_repo/manage.py +++ b/nova/db/sqlalchemy/migrate_repo/manage.py @@ -1,4 +1,4 @@ #!/usr/bin/env python from migrate.versioning.shell import main if __name__ == '__main__': - main(debug='False', repository='.') + main(debug='False', repository='.') -- cgit From c57ccba743c54786e28317194000bcf22dc5b69e Mon Sep 17 00:00:00 2001 From: Kei Masumoto Date: Fri, 14 Jan 2011 08:26:25 +0900 Subject: checking based on pep8 --- bin/nova-manage | 1 - nova/compute/manager.py | 10 ++++++---- nova/scheduler/driver.py | 6 +++--- nova/scheduler/manager.py | 2 -- nova/virt/libvirt_conn.py | 36 +++++++++++++++++------------------- 5 files changed, 26 insertions(+), 29 deletions(-) diff --git a/bin/nova-manage b/bin/nova-manage index fb6b06694..b8a181343 100755 --- a/bin/nova-manage +++ b/bin/nova-manage @@ -468,7 +468,6 @@ class InstanceCommands(object): def live_migration(self, ec2_id, dest): """live_migration""" - if FLAGS.connection_type != 'libvirt': raise exception.Error('Only KVM is supported for now. ' 'Sorry.') diff --git a/nova/compute/manager.py b/nova/compute/manager.py index 00de85828..5db756362 100644 --- a/nova/compute/manager.py +++ b/nova/compute/manager.py @@ -561,7 +561,7 @@ class ComputeManager(manager.Manager): self.network_manager.setup_compute_network(context, instance_id) # Creating filters to hypervisors and firewalls. - # An example is that nova-instance-instance-xxx, + # An example is that nova-instance-instance-xxx, # which is written to libvirt.xml( check "virsh nwfilter-list ) # On destination host, this nwfilter is necessary. # In addition, this method is creating filtering rule @@ -575,8 +575,8 @@ class ComputeManager(manager.Manager): instance_ref = db.instance_get(context, instance_id) ec2_id = instance_ref['hostname'] - try: - # Checking volume node is working correctly when any volumes + try: + # Checking volume node is working correctly when any volumes # are attached to instances. rpc.call(context, FLAGS.volume_topic, @@ -584,7 +584,9 @@ class ComputeManager(manager.Manager): "args": {'instance_id': instance_id}}) # Asking dest host to preparing live migration. - compute_topic = db.queue_get_for(context, FLAGS.compute_topic, dest) + compute_topic = db.queue_get_for(context, + FLAGS.compute_topic, + dest) rpc.call(context, compute_topic, diff --git a/nova/scheduler/driver.py b/nova/scheduler/driver.py index d44a3ae44..699462b12 100644 --- a/nova/scheduler/driver.py +++ b/nova/scheduler/driver.py @@ -130,16 +130,16 @@ class Scheduler(object): msg = _('Unexpected err: not found cpu_info for %s on DB.hosts') raise exception.Invalid(msg % orighost) - try : + try: rpc.call(context, db.queue_get_for(context, FLAGS.compute_topic, dest), {"method": 'compare_cpu', "args": {'xml': cpuinfo}}) - except rpc.RemoteError, e: + except rpc.RemoteError, e: msg = '%s doesnt have compatibility to %s(where %s launching at)\n' msg += 'result:%s \n' - logging.error( _(msg) % (dest, src, ec2_id, ret)) + logging.error(_(msg) % (dest, src, ec2_id, ret)) raise e # Checking dst host still has enough capacities. diff --git a/nova/scheduler/manager.py b/nova/scheduler/manager.py index 308fcffa2..b6627453d 100644 --- a/nova/scheduler/manager.py +++ b/nova/scheduler/manager.py @@ -69,7 +69,6 @@ class SchedulerManager(manager.Manager): "args": kwargs}) LOG.debug(_("Casting to %s %s for %s"), topic, host, method) - # NOTE (masumotok) : This method should be moved to nova.api.ec2.admin. # Based on bear design summit discussion, # just put this here for bexar release. @@ -112,4 +111,3 @@ class SchedulerManager(manager.Manager): 'local_gb': hdd} return {'ret': True, 'phy_resource': h_resource, 'usage': u_resource} - diff --git a/nova/virt/libvirt_conn.py b/nova/virt/libvirt_conn.py index 3024515b8..f3f837153 100644 --- a/nova/virt/libvirt_conn.py +++ b/nova/virt/libvirt_conn.py @@ -96,7 +96,7 @@ flags.DEFINE_string('live_migration_uri', flags.DEFINE_string('live_migration_flag', "VIR_MIGRATE_UNDEFINE_SOURCE, VIR_MIGRATE_PEER2PEER", 'Define live migration behavior.') -flags.DEFINE_integer('live_migration_bandwidth', 0, +flags.DEFINE_integer('live_migration_bandwidth', 0, 'Define live migration behavior') flags.DEFINE_string('live_migration_timeout_sec', 10, 'Timeout second for pre_live_migration is completed.') @@ -817,7 +817,6 @@ class LibvirtConnection(object): def refresh_security_group_members(self, security_group_id): self.firewall_driver.refresh_security_group_members(security_group_id) - def compare_cpu(self, xml): """ Check the host cpu is compatible to a cpu given by xml. @@ -827,9 +826,8 @@ class LibvirtConnection(object): 'http://libvirt.org/html/libvirt-libvirt.html#virCPUCompareResult' """ - ret = self._conn.compareCPU(xml, 0) - if ret <= 0 : + if ret <= 0: url = 'http://libvirt.org/html/libvirt-libvirt.html' url += '#virCPUCompareResult\n' msg = 'CPU does not have compativility.\n' @@ -837,22 +835,22 @@ class LibvirtConnection(object): msg += 'Refer to %s' msg = _(msg) raise exception.Invalid(msg % (ret, url)) - return + return def ensure_filtering_rules_for_instance(self, instance_ref): - """ Setting up inevitable filtering rules on compute node, - and waiting for its completion. + """ Setting up inevitable filtering rules on compute node, + and waiting for its completion. To migrate an instance, filtering rules to hypervisors and firewalls are inevitable on destination host. - ( Waiting only for filterling rules to hypervisor, + ( Waiting only for filterling rules to hypervisor, since filtering rules to firewall rules can be set faster). Concretely, the below method must be called. - setup_basic_filtering (for nova-basic, etc.) - prepare_instance_filter(for nova-instance-instance-xxx, etc.) - + to_xml may have to be called since it defines PROJNET, PROJMASK. - but libvirt migrates those value through migrateToURI(), + but libvirt migrates those value through migrateToURI(), so , no need to be called. Don't use thread for this method since migration should @@ -879,7 +877,7 @@ class LibvirtConnection(object): msg = _('Timeout migrating for %s(%s)') raise exception.Error(msg % (ec2_id, instance_ref.name)) time.sleep(0.5) - + def live_migration(self, context, instance_ref, dest): """ Just spawning live_migration operation for @@ -895,21 +893,21 @@ class LibvirtConnection(object): duri = FLAGS.live_migration_uri % dest flaglist = FLAGS.live_migration_flag.split(',') - flagvals = [ getattr(libvirt, x.strip()) for x in flaglist ] - logical_sum = reduce(lambda x,y: x|y, flagvals) + flagvals = [getattr(libvirt, x.strip()) for x in flaglist] + logical_sum = reduce(lambda x, y: x | y, flagvals) bandwidth = FLAGS.live_migration_bandwidth - - if self.read_only: + + if self.read_only: tmpconn = self._connect(self.libvirt_uri, False) dom = tmpconn.lookupByName(instance_ref.name) dom.migrateToURI(duri, logical_sum, None, bandwidth) tmpconn.close() - else : + else: dom = self._conn.lookupByName(instance_ref.name) dom.migrateToURI(duri, logical_sum, None, bandwidth) - - except Exception, e: + + except Exception, e: id = instance_ref['id'] db.instance_set_state(context, id, power_state.RUNNING, 'running') try: @@ -950,7 +948,7 @@ class LibvirtConnection(object): # Releasing security group ingress rule. if FLAGS.firewall_driver == \ 'nova.virt.libvirt_conn.IptablesFirewallDriver': - try : + try: self.firewall_driver.remove_instance(instance_ref) except KeyError, e: pass -- cgit From fa5024b384953d30e91117a0c2874560e086aa58 Mon Sep 17 00:00:00 2001 From: Kei Masumoto Date: Fri, 14 Jan 2011 08:55:56 +0900 Subject: remove ">>>MERGE" iin nova/db/sqlalchemy/api.py --- nova/db/sqlalchemy/api.py | 1 - 1 file changed, 1 deletion(-) diff --git a/nova/db/sqlalchemy/api.py b/nova/db/sqlalchemy/api.py index b38a08a83..4ea85a094 100644 --- a/nova/db/sqlalchemy/api.py +++ b/nova/db/sqlalchemy/api.py @@ -2117,4 +2117,3 @@ def console_get(context, console_id, instance_id=None): {'instance': idesc, 'console_id': console_id}) return result ->>>>>>> MERGE-SOURCE -- cgit From 715b83a299a933468f01206fe6f771a51d00c3f5 Mon Sep 17 00:00:00 2001 From: Andy Smith Date: Thu, 13 Jan 2011 19:03:07 -0800 Subject: update migration script to add new tables since merge --- .../sqlalchemy/migrate_repo/versions/001_austin.py | 547 +++++++++++++++++++++ .../migrate_repo/versions/001_first_database.py | 547 --------------------- .../sqlalchemy/migrate_repo/versions/002_bexar.py | 189 +++++++ .../migrate_repo/versions/002_update_to_trunk.py | 125 ----- 4 files changed, 736 insertions(+), 672 deletions(-) create mode 100644 nova/db/sqlalchemy/migrate_repo/versions/001_austin.py delete mode 100644 nova/db/sqlalchemy/migrate_repo/versions/001_first_database.py create mode 100644 nova/db/sqlalchemy/migrate_repo/versions/002_bexar.py delete mode 100644 nova/db/sqlalchemy/migrate_repo/versions/002_update_to_trunk.py diff --git a/nova/db/sqlalchemy/migrate_repo/versions/001_austin.py b/nova/db/sqlalchemy/migrate_repo/versions/001_austin.py new file mode 100644 index 000000000..8a60bd890 --- /dev/null +++ b/nova/db/sqlalchemy/migrate_repo/versions/001_austin.py @@ -0,0 +1,547 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2010 United States Government as represented by the +# Administrator of the National Aeronautics and Space Administration. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +## Table code mostly autogenerated by genmodel.py +from sqlalchemy import * +from migrate import * + +from nova import log as logging + + +meta = MetaData() + + +auth_tokens = Table('auth_tokens', meta, + Column('created_at', DateTime(timezone=False)), + Column('updated_at', DateTime(timezone=False)), + Column('deleted_at', DateTime(timezone=False)), + Column('deleted', Boolean(create_constraint=True, name=None)), + Column('token_hash', + String(length=None, convert_unicode=False, assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False), + primary_key=True, + nullable=False), + Column('user_id', Integer()), + Column('server_manageent_url', + String(length=None, convert_unicode=False, assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('storage_url', + String(length=None, convert_unicode=False, assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('cdn_management_url', + String(length=None, convert_unicode=False, assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + ) + + +export_devices = Table('export_devices', meta, + Column('created_at', DateTime(timezone=False)), + Column('updated_at', DateTime(timezone=False)), + Column('deleted_at', DateTime(timezone=False)), + Column('deleted', Boolean(create_constraint=True, name=None)), + Column('id', Integer(), primary_key=True, nullable=False), + Column('shelf_id', Integer()), + Column('blade_id', Integer()), + Column('volume_id', + Integer(), + ForeignKey('volumes.id'), + nullable=True), + ) + + +fixed_ips = Table('fixed_ips', meta, + Column('created_at', DateTime(timezone=False)), + Column('updated_at', DateTime(timezone=False)), + Column('deleted_at', DateTime(timezone=False)), + Column('deleted', Boolean(create_constraint=True, name=None)), + Column('id', Integer(), primary_key=True, nullable=False), + Column('address', + String(length=None, convert_unicode=False, assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('network_id', + Integer(), + ForeignKey('networks.id'), + nullable=True), + Column('instance_id', + Integer(), + ForeignKey('instances.id'), + nullable=True), + Column('allocated', Boolean(create_constraint=True, name=None)), + Column('leased', Boolean(create_constraint=True, name=None)), + Column('reserved', Boolean(create_constraint=True, name=None)), + ) + + +floating_ips = Table('floating_ips', meta, + Column('created_at', DateTime(timezone=False)), + Column('updated_at', DateTime(timezone=False)), + Column('deleted_at', DateTime(timezone=False)), + Column('deleted', Boolean(create_constraint=True, name=None)), + Column('id', Integer(), primary_key=True, nullable=False), + Column('address', + String(length=None, convert_unicode=False, assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('fixed_ip_id', + Integer(), + ForeignKey('fixed_ips.id'), + nullable=True), + Column('project_id', + String(length=None, convert_unicode=False, assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('host', + String(length=None, convert_unicode=False, assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + ) + + +instances = Table('instances', meta, + Column('created_at', DateTime(timezone=False)), + Column('updated_at', DateTime(timezone=False)), + Column('deleted_at', DateTime(timezone=False)), + Column('deleted', Boolean(create_constraint=True, name=None)), + Column('id', Integer(), primary_key=True, nullable=False), + Column('internal_id', Integer()), + Column('admin_pass', + String(length=None, convert_unicode=False, assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('user_id', + String(length=None, convert_unicode=False, assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('project_id', + String(length=None, convert_unicode=False, assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('image_id', + String(length=None, convert_unicode=False, assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('kernel_id', + String(length=None, convert_unicode=False, assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('ramdisk_id', + String(length=None, convert_unicode=False, assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('launch_index', Integer()), + Column('key_name', + String(length=None, convert_unicode=False, assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('key_data', + Text(length=None, convert_unicode=False, assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('state', Integer()), + Column('state_description', + String(length=None, convert_unicode=False, assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('memory_mb', Integer()), + Column('vcpus', Integer()), + Column('local_gb', Integer()), + Column('hostname', + String(length=None, convert_unicode=False, assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('host', + String(length=None, convert_unicode=False, assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('instance_type', + String(length=None, convert_unicode=False, assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('user_data', + Text(length=None, convert_unicode=False, assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('reservation_id', + String(length=None, convert_unicode=False, assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('mac_address', + String(length=None, convert_unicode=False, assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('scheduled_at', DateTime(timezone=False)), + Column('launched_at', DateTime(timezone=False)), + Column('terminated_at', DateTime(timezone=False)), + Column('display_name', + String(length=None, convert_unicode=False, assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('display_description', + String(length=None, convert_unicode=False, assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + ) + + +iscsi_targets = Table('iscsi_targets', meta, + Column('created_at', DateTime(timezone=False)), + Column('updated_at', DateTime(timezone=False)), + Column('deleted_at', DateTime(timezone=False)), + Column('deleted', Boolean(create_constraint=True, name=None)), + Column('id', Integer(), primary_key=True, nullable=False), + Column('target_num', Integer()), + Column('host', + String(length=None, convert_unicode=False, assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('volume_id', + Integer(), + ForeignKey('volumes.id'), + nullable=True), + ) + + +key_pairs = Table('key_pairs', meta, + Column('created_at', DateTime(timezone=False)), + Column('updated_at', DateTime(timezone=False)), + Column('deleted_at', DateTime(timezone=False)), + Column('deleted', Boolean(create_constraint=True, name=None)), + Column('id', Integer(), primary_key=True, nullable=False), + Column('name', + String(length=None, convert_unicode=False, assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('user_id', + String(length=None, convert_unicode=False, assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('fingerprint', + String(length=None, convert_unicode=False, assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('public_key', + Text(length=None, convert_unicode=False, assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + ) + + +networks = Table('networks', meta, + Column('created_at', DateTime(timezone=False)), + Column('updated_at', DateTime(timezone=False)), + Column('deleted_at', DateTime(timezone=False)), + Column('deleted', Boolean(create_constraint=True, name=None)), + Column('id', Integer(), primary_key=True, nullable=False), + Column('injected', Boolean(create_constraint=True, name=None)), + Column('cidr', + String(length=None, convert_unicode=False, assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('netmask', + String(length=None, convert_unicode=False, assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('bridge', + String(length=None, convert_unicode=False, assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('gateway', + String(length=None, convert_unicode=False, assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('broadcast', + String(length=None, convert_unicode=False, assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('dns', + String(length=None, convert_unicode=False, assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('vlan', Integer()), + Column('vpn_public_address', + String(length=None, convert_unicode=False, assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('vpn_public_port', Integer()), + Column('vpn_private_address', + String(length=None, convert_unicode=False, assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('dhcp_start', + String(length=None, convert_unicode=False, assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('project_id', + String(length=None, convert_unicode=False, assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('host', + String(length=None, convert_unicode=False, assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + ) + + +projects = Table('projects', meta, + Column('created_at', DateTime(timezone=False)), + Column('updated_at', DateTime(timezone=False)), + Column('deleted_at', DateTime(timezone=False)), + Column('deleted', Boolean(create_constraint=True, name=None)), + Column('id', + String(length=None, convert_unicode=False, assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False), + primary_key=True, + nullable=False), + Column('name', + String(length=None, convert_unicode=False, assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('description', + String(length=None, convert_unicode=False, assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('project_manager', + String(length=None, convert_unicode=False, assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False), + ForeignKey('users.id')), + ) + + +quotas = Table('quotas', meta, + Column('created_at', DateTime(timezone=False)), + Column('updated_at', DateTime(timezone=False)), + Column('deleted_at', DateTime(timezone=False)), + Column('deleted', Boolean(create_constraint=True, name=None)), + Column('id', Integer(), primary_key=True, nullable=False), + Column('project_id', + String(length=None, convert_unicode=False, assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('instances', Integer()), + Column('cores', Integer()), + Column('volumes', Integer()), + Column('gigabytes', Integer()), + Column('floating_ips', Integer()), + ) + + +security_groups = Table('security_groups', meta, + Column('created_at', DateTime(timezone=False)), + Column('updated_at', DateTime(timezone=False)), + Column('deleted_at', DateTime(timezone=False)), + Column('deleted', Boolean(create_constraint=True, name=None)), + Column('id', Integer(), primary_key=True, nullable=False), + Column('name', + String(length=None, convert_unicode=False, assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('description', + String(length=None, convert_unicode=False, assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('user_id', + String(length=None, convert_unicode=False, assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('project_id', + String(length=None, convert_unicode=False, assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + ) + + +security_group_inst_assoc = Table('security_group_instance_association', meta, + Column('created_at', DateTime(timezone=False)), + Column('updated_at', DateTime(timezone=False)), + Column('deleted_at', DateTime(timezone=False)), + Column('deleted', Boolean(create_constraint=True, name=None)), + Column('id', Integer(), primary_key=True, nullable=False), + Column('security_group_id', + Integer(), + ForeignKey('security_groups.id')), + Column('instance_id', Integer(), ForeignKey('instances.id')), + ) + + +security_group_rules = Table('security_group_rules', meta, + Column('created_at', DateTime(timezone=False)), + Column('updated_at', DateTime(timezone=False)), + Column('deleted_at', DateTime(timezone=False)), + Column('deleted', Boolean(create_constraint=True, name=None)), + Column('id', Integer(), primary_key=True, nullable=False), + Column('parent_group_id', + Integer(), + ForeignKey('security_groups.id')), + Column('protocol', + String(length=None, convert_unicode=False, assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('from_port', Integer()), + Column('to_port', Integer()), + Column('cidr', + String(length=None, convert_unicode=False, assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('group_id', + Integer(), + ForeignKey('security_groups.id')), + ) + + +services = Table('services', meta, + Column('created_at', DateTime(timezone=False)), + Column('updated_at', DateTime(timezone=False)), + Column('deleted_at', DateTime(timezone=False)), + Column('deleted', Boolean(create_constraint=True, name=None)), + Column('id', Integer(), primary_key=True, nullable=False), + Column('host', + String(length=None, convert_unicode=False, assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('binary', + String(length=None, convert_unicode=False, assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('topic', + String(length=None, convert_unicode=False, assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('report_count', Integer(), nullable=False), + Column('disabled', Boolean(create_constraint=True, name=None)), + ) + + +users = Table('users', meta, + Column('created_at', DateTime(timezone=False)), + Column('updated_at', DateTime(timezone=False)), + Column('deleted_at', DateTime(timezone=False)), + Column('deleted', Boolean(create_constraint=True, name=None)), + Column('id', + String(length=None, convert_unicode=False, assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False), + primary_key=True, + nullable=False), + Column('name', + String(length=None, convert_unicode=False, assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('access_key', + String(length=None, convert_unicode=False, assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('secret_key', + String(length=None, convert_unicode=False, assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('is_admin', Boolean(create_constraint=True, name=None)), + ) + + +user_project_association = Table('user_project_association', meta, + Column('created_at', DateTime(timezone=False)), + Column('updated_at', DateTime(timezone=False)), + Column('deleted_at', DateTime(timezone=False)), + Column('deleted', Boolean(create_constraint=True, name=None)), + Column('user_id', + String(length=None, convert_unicode=False, assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False), + ForeignKey('users.id'), + primary_key=True, + nullable=False), + Column('project_id', + String(length=None, convert_unicode=False, assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False), + ForeignKey('projects.id'), + primary_key=True, + nullable=False), + ) + + +user_project_role_association = Table('user_project_role_association', meta, + Column('created_at', DateTime(timezone=False)), + Column('updated_at', DateTime(timezone=False)), + Column('deleted_at', DateTime(timezone=False)), + Column('deleted', Boolean(create_constraint=True, name=None)), + Column('user_id', + String(length=None, convert_unicode=False, assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False), + primary_key=True, + nullable=False), + Column('project_id', + String(length=None, convert_unicode=False, assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False), + primary_key=True, + nullable=False), + Column('role', + String(length=None, convert_unicode=False, assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False), + primary_key=True, + nullable=False), + ForeignKeyConstraint(['user_id', + 'project_id'], + ['user_project_association.user_id', + 'user_project_association.project_id']), + ) + + +user_role_association = Table('user_role_association', meta, + Column('created_at', DateTime(timezone=False)), + Column('updated_at', DateTime(timezone=False)), + Column('deleted_at', DateTime(timezone=False)), + Column('deleted', Boolean(create_constraint=True, name=None)), + Column('user_id', + String(length=None, convert_unicode=False, assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False), + ForeignKey('users.id'), + primary_key=True, + nullable=False), + Column('role', + String(length=None, convert_unicode=False, assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False), + primary_key=True, + nullable=False), + ) + + +volumes = Table('volumes', meta, + Column('created_at', DateTime(timezone=False)), + Column('updated_at', DateTime(timezone=False)), + Column('deleted_at', DateTime(timezone=False)), + Column('deleted', Boolean(create_constraint=True, name=None)), + Column('id', Integer(), primary_key=True, nullable=False), + Column('ec2_id', + String(length=None, convert_unicode=False, assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('user_id', + String(length=None, convert_unicode=False, assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('project_id', + String(length=None, convert_unicode=False, assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('host', + String(length=None, convert_unicode=False, assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('size', Integer()), + Column('availability_zone', + String(length=None, convert_unicode=False, assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('instance_id', + Integer(), + ForeignKey('instances.id'), + nullable=True), + Column('mountpoint', + String(length=None, convert_unicode=False, assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('attach_time', + String(length=None, convert_unicode=False, assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('status', + String(length=None, convert_unicode=False, assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('attach_status', + String(length=None, convert_unicode=False, assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('scheduled_at', DateTime(timezone=False)), + Column('launched_at', DateTime(timezone=False)), + Column('terminated_at', DateTime(timezone=False)), + Column('display_name', + String(length=None, convert_unicode=False, assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('display_description', + String(length=None, convert_unicode=False, assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + ) + + +def upgrade(migrate_engine): + # Upgrade operations go here. Don't create your own engine; + # bind migrate_engine to your metadata + meta.bind = migrate_engine + + for table in (auth_tokens, export_devices, fixed_ips, floating_ips, + instances, iscsi_targets, key_pairs, networks, + projects, quotas, security_groups, security_group_inst_assoc, + security_group_rules, services, users, + user_project_association, user_project_role_association, + user_role_association, volumes): + try: + table.create() + except Exception: + logging.info(repr(table)) + logging.exception('Exception while creating table') + raise + + +def downgrade(migrate_engine): + # Operations to reverse the above upgrade go here. + for table in (auth_tokens, export_devices, fixed_ips, floating_ips, + instances, iscsi_targets, key_pairs, networks, + projects, quotas, security_groups, security_group_inst_assoc, + security_group_rules, services, users, + user_project_association, user_project_role_association, + user_role_association, volumes): + table.drop() diff --git a/nova/db/sqlalchemy/migrate_repo/versions/001_first_database.py b/nova/db/sqlalchemy/migrate_repo/versions/001_first_database.py deleted file mode 100644 index 8a60bd890..000000000 --- a/nova/db/sqlalchemy/migrate_repo/versions/001_first_database.py +++ /dev/null @@ -1,547 +0,0 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 - -# Copyright 2010 United States Government as represented by the -# Administrator of the National Aeronautics and Space Administration. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -## Table code mostly autogenerated by genmodel.py -from sqlalchemy import * -from migrate import * - -from nova import log as logging - - -meta = MetaData() - - -auth_tokens = Table('auth_tokens', meta, - Column('created_at', DateTime(timezone=False)), - Column('updated_at', DateTime(timezone=False)), - Column('deleted_at', DateTime(timezone=False)), - Column('deleted', Boolean(create_constraint=True, name=None)), - Column('token_hash', - String(length=None, convert_unicode=False, assert_unicode=None, - unicode_error=None, _warn_on_bytestring=False), - primary_key=True, - nullable=False), - Column('user_id', Integer()), - Column('server_manageent_url', - String(length=None, convert_unicode=False, assert_unicode=None, - unicode_error=None, _warn_on_bytestring=False)), - Column('storage_url', - String(length=None, convert_unicode=False, assert_unicode=None, - unicode_error=None, _warn_on_bytestring=False)), - Column('cdn_management_url', - String(length=None, convert_unicode=False, assert_unicode=None, - unicode_error=None, _warn_on_bytestring=False)), - ) - - -export_devices = Table('export_devices', meta, - Column('created_at', DateTime(timezone=False)), - Column('updated_at', DateTime(timezone=False)), - Column('deleted_at', DateTime(timezone=False)), - Column('deleted', Boolean(create_constraint=True, name=None)), - Column('id', Integer(), primary_key=True, nullable=False), - Column('shelf_id', Integer()), - Column('blade_id', Integer()), - Column('volume_id', - Integer(), - ForeignKey('volumes.id'), - nullable=True), - ) - - -fixed_ips = Table('fixed_ips', meta, - Column('created_at', DateTime(timezone=False)), - Column('updated_at', DateTime(timezone=False)), - Column('deleted_at', DateTime(timezone=False)), - Column('deleted', Boolean(create_constraint=True, name=None)), - Column('id', Integer(), primary_key=True, nullable=False), - Column('address', - String(length=None, convert_unicode=False, assert_unicode=None, - unicode_error=None, _warn_on_bytestring=False)), - Column('network_id', - Integer(), - ForeignKey('networks.id'), - nullable=True), - Column('instance_id', - Integer(), - ForeignKey('instances.id'), - nullable=True), - Column('allocated', Boolean(create_constraint=True, name=None)), - Column('leased', Boolean(create_constraint=True, name=None)), - Column('reserved', Boolean(create_constraint=True, name=None)), - ) - - -floating_ips = Table('floating_ips', meta, - Column('created_at', DateTime(timezone=False)), - Column('updated_at', DateTime(timezone=False)), - Column('deleted_at', DateTime(timezone=False)), - Column('deleted', Boolean(create_constraint=True, name=None)), - Column('id', Integer(), primary_key=True, nullable=False), - Column('address', - String(length=None, convert_unicode=False, assert_unicode=None, - unicode_error=None, _warn_on_bytestring=False)), - Column('fixed_ip_id', - Integer(), - ForeignKey('fixed_ips.id'), - nullable=True), - Column('project_id', - String(length=None, convert_unicode=False, assert_unicode=None, - unicode_error=None, _warn_on_bytestring=False)), - Column('host', - String(length=None, convert_unicode=False, assert_unicode=None, - unicode_error=None, _warn_on_bytestring=False)), - ) - - -instances = Table('instances', meta, - Column('created_at', DateTime(timezone=False)), - Column('updated_at', DateTime(timezone=False)), - Column('deleted_at', DateTime(timezone=False)), - Column('deleted', Boolean(create_constraint=True, name=None)), - Column('id', Integer(), primary_key=True, nullable=False), - Column('internal_id', Integer()), - Column('admin_pass', - String(length=None, convert_unicode=False, assert_unicode=None, - unicode_error=None, _warn_on_bytestring=False)), - Column('user_id', - String(length=None, convert_unicode=False, assert_unicode=None, - unicode_error=None, _warn_on_bytestring=False)), - Column('project_id', - String(length=None, convert_unicode=False, assert_unicode=None, - unicode_error=None, _warn_on_bytestring=False)), - Column('image_id', - String(length=None, convert_unicode=False, assert_unicode=None, - unicode_error=None, _warn_on_bytestring=False)), - Column('kernel_id', - String(length=None, convert_unicode=False, assert_unicode=None, - unicode_error=None, _warn_on_bytestring=False)), - Column('ramdisk_id', - String(length=None, convert_unicode=False, assert_unicode=None, - unicode_error=None, _warn_on_bytestring=False)), - Column('launch_index', Integer()), - Column('key_name', - String(length=None, convert_unicode=False, assert_unicode=None, - unicode_error=None, _warn_on_bytestring=False)), - Column('key_data', - Text(length=None, convert_unicode=False, assert_unicode=None, - unicode_error=None, _warn_on_bytestring=False)), - Column('state', Integer()), - Column('state_description', - String(length=None, convert_unicode=False, assert_unicode=None, - unicode_error=None, _warn_on_bytestring=False)), - Column('memory_mb', Integer()), - Column('vcpus', Integer()), - Column('local_gb', Integer()), - Column('hostname', - String(length=None, convert_unicode=False, assert_unicode=None, - unicode_error=None, _warn_on_bytestring=False)), - Column('host', - String(length=None, convert_unicode=False, assert_unicode=None, - unicode_error=None, _warn_on_bytestring=False)), - Column('instance_type', - String(length=None, convert_unicode=False, assert_unicode=None, - unicode_error=None, _warn_on_bytestring=False)), - Column('user_data', - Text(length=None, convert_unicode=False, assert_unicode=None, - unicode_error=None, _warn_on_bytestring=False)), - Column('reservation_id', - String(length=None, convert_unicode=False, assert_unicode=None, - unicode_error=None, _warn_on_bytestring=False)), - Column('mac_address', - String(length=None, convert_unicode=False, assert_unicode=None, - unicode_error=None, _warn_on_bytestring=False)), - Column('scheduled_at', DateTime(timezone=False)), - Column('launched_at', DateTime(timezone=False)), - Column('terminated_at', DateTime(timezone=False)), - Column('display_name', - String(length=None, convert_unicode=False, assert_unicode=None, - unicode_error=None, _warn_on_bytestring=False)), - Column('display_description', - String(length=None, convert_unicode=False, assert_unicode=None, - unicode_error=None, _warn_on_bytestring=False)), - ) - - -iscsi_targets = Table('iscsi_targets', meta, - Column('created_at', DateTime(timezone=False)), - Column('updated_at', DateTime(timezone=False)), - Column('deleted_at', DateTime(timezone=False)), - Column('deleted', Boolean(create_constraint=True, name=None)), - Column('id', Integer(), primary_key=True, nullable=False), - Column('target_num', Integer()), - Column('host', - String(length=None, convert_unicode=False, assert_unicode=None, - unicode_error=None, _warn_on_bytestring=False)), - Column('volume_id', - Integer(), - ForeignKey('volumes.id'), - nullable=True), - ) - - -key_pairs = Table('key_pairs', meta, - Column('created_at', DateTime(timezone=False)), - Column('updated_at', DateTime(timezone=False)), - Column('deleted_at', DateTime(timezone=False)), - Column('deleted', Boolean(create_constraint=True, name=None)), - Column('id', Integer(), primary_key=True, nullable=False), - Column('name', - String(length=None, convert_unicode=False, assert_unicode=None, - unicode_error=None, _warn_on_bytestring=False)), - Column('user_id', - String(length=None, convert_unicode=False, assert_unicode=None, - unicode_error=None, _warn_on_bytestring=False)), - Column('fingerprint', - String(length=None, convert_unicode=False, assert_unicode=None, - unicode_error=None, _warn_on_bytestring=False)), - Column('public_key', - Text(length=None, convert_unicode=False, assert_unicode=None, - unicode_error=None, _warn_on_bytestring=False)), - ) - - -networks = Table('networks', meta, - Column('created_at', DateTime(timezone=False)), - Column('updated_at', DateTime(timezone=False)), - Column('deleted_at', DateTime(timezone=False)), - Column('deleted', Boolean(create_constraint=True, name=None)), - Column('id', Integer(), primary_key=True, nullable=False), - Column('injected', Boolean(create_constraint=True, name=None)), - Column('cidr', - String(length=None, convert_unicode=False, assert_unicode=None, - unicode_error=None, _warn_on_bytestring=False)), - Column('netmask', - String(length=None, convert_unicode=False, assert_unicode=None, - unicode_error=None, _warn_on_bytestring=False)), - Column('bridge', - String(length=None, convert_unicode=False, assert_unicode=None, - unicode_error=None, _warn_on_bytestring=False)), - Column('gateway', - String(length=None, convert_unicode=False, assert_unicode=None, - unicode_error=None, _warn_on_bytestring=False)), - Column('broadcast', - String(length=None, convert_unicode=False, assert_unicode=None, - unicode_error=None, _warn_on_bytestring=False)), - Column('dns', - String(length=None, convert_unicode=False, assert_unicode=None, - unicode_error=None, _warn_on_bytestring=False)), - Column('vlan', Integer()), - Column('vpn_public_address', - String(length=None, convert_unicode=False, assert_unicode=None, - unicode_error=None, _warn_on_bytestring=False)), - Column('vpn_public_port', Integer()), - Column('vpn_private_address', - String(length=None, convert_unicode=False, assert_unicode=None, - unicode_error=None, _warn_on_bytestring=False)), - Column('dhcp_start', - String(length=None, convert_unicode=False, assert_unicode=None, - unicode_error=None, _warn_on_bytestring=False)), - Column('project_id', - String(length=None, convert_unicode=False, assert_unicode=None, - unicode_error=None, _warn_on_bytestring=False)), - Column('host', - String(length=None, convert_unicode=False, assert_unicode=None, - unicode_error=None, _warn_on_bytestring=False)), - ) - - -projects = Table('projects', meta, - Column('created_at', DateTime(timezone=False)), - Column('updated_at', DateTime(timezone=False)), - Column('deleted_at', DateTime(timezone=False)), - Column('deleted', Boolean(create_constraint=True, name=None)), - Column('id', - String(length=None, convert_unicode=False, assert_unicode=None, - unicode_error=None, _warn_on_bytestring=False), - primary_key=True, - nullable=False), - Column('name', - String(length=None, convert_unicode=False, assert_unicode=None, - unicode_error=None, _warn_on_bytestring=False)), - Column('description', - String(length=None, convert_unicode=False, assert_unicode=None, - unicode_error=None, _warn_on_bytestring=False)), - Column('project_manager', - String(length=None, convert_unicode=False, assert_unicode=None, - unicode_error=None, _warn_on_bytestring=False), - ForeignKey('users.id')), - ) - - -quotas = Table('quotas', meta, - Column('created_at', DateTime(timezone=False)), - Column('updated_at', DateTime(timezone=False)), - Column('deleted_at', DateTime(timezone=False)), - Column('deleted', Boolean(create_constraint=True, name=None)), - Column('id', Integer(), primary_key=True, nullable=False), - Column('project_id', - String(length=None, convert_unicode=False, assert_unicode=None, - unicode_error=None, _warn_on_bytestring=False)), - Column('instances', Integer()), - Column('cores', Integer()), - Column('volumes', Integer()), - Column('gigabytes', Integer()), - Column('floating_ips', Integer()), - ) - - -security_groups = Table('security_groups', meta, - Column('created_at', DateTime(timezone=False)), - Column('updated_at', DateTime(timezone=False)), - Column('deleted_at', DateTime(timezone=False)), - Column('deleted', Boolean(create_constraint=True, name=None)), - Column('id', Integer(), primary_key=True, nullable=False), - Column('name', - String(length=None, convert_unicode=False, assert_unicode=None, - unicode_error=None, _warn_on_bytestring=False)), - Column('description', - String(length=None, convert_unicode=False, assert_unicode=None, - unicode_error=None, _warn_on_bytestring=False)), - Column('user_id', - String(length=None, convert_unicode=False, assert_unicode=None, - unicode_error=None, _warn_on_bytestring=False)), - Column('project_id', - String(length=None, convert_unicode=False, assert_unicode=None, - unicode_error=None, _warn_on_bytestring=False)), - ) - - -security_group_inst_assoc = Table('security_group_instance_association', meta, - Column('created_at', DateTime(timezone=False)), - Column('updated_at', DateTime(timezone=False)), - Column('deleted_at', DateTime(timezone=False)), - Column('deleted', Boolean(create_constraint=True, name=None)), - Column('id', Integer(), primary_key=True, nullable=False), - Column('security_group_id', - Integer(), - ForeignKey('security_groups.id')), - Column('instance_id', Integer(), ForeignKey('instances.id')), - ) - - -security_group_rules = Table('security_group_rules', meta, - Column('created_at', DateTime(timezone=False)), - Column('updated_at', DateTime(timezone=False)), - Column('deleted_at', DateTime(timezone=False)), - Column('deleted', Boolean(create_constraint=True, name=None)), - Column('id', Integer(), primary_key=True, nullable=False), - Column('parent_group_id', - Integer(), - ForeignKey('security_groups.id')), - Column('protocol', - String(length=None, convert_unicode=False, assert_unicode=None, - unicode_error=None, _warn_on_bytestring=False)), - Column('from_port', Integer()), - Column('to_port', Integer()), - Column('cidr', - String(length=None, convert_unicode=False, assert_unicode=None, - unicode_error=None, _warn_on_bytestring=False)), - Column('group_id', - Integer(), - ForeignKey('security_groups.id')), - ) - - -services = Table('services', meta, - Column('created_at', DateTime(timezone=False)), - Column('updated_at', DateTime(timezone=False)), - Column('deleted_at', DateTime(timezone=False)), - Column('deleted', Boolean(create_constraint=True, name=None)), - Column('id', Integer(), primary_key=True, nullable=False), - Column('host', - String(length=None, convert_unicode=False, assert_unicode=None, - unicode_error=None, _warn_on_bytestring=False)), - Column('binary', - String(length=None, convert_unicode=False, assert_unicode=None, - unicode_error=None, _warn_on_bytestring=False)), - Column('topic', - String(length=None, convert_unicode=False, assert_unicode=None, - unicode_error=None, _warn_on_bytestring=False)), - Column('report_count', Integer(), nullable=False), - Column('disabled', Boolean(create_constraint=True, name=None)), - ) - - -users = Table('users', meta, - Column('created_at', DateTime(timezone=False)), - Column('updated_at', DateTime(timezone=False)), - Column('deleted_at', DateTime(timezone=False)), - Column('deleted', Boolean(create_constraint=True, name=None)), - Column('id', - String(length=None, convert_unicode=False, assert_unicode=None, - unicode_error=None, _warn_on_bytestring=False), - primary_key=True, - nullable=False), - Column('name', - String(length=None, convert_unicode=False, assert_unicode=None, - unicode_error=None, _warn_on_bytestring=False)), - Column('access_key', - String(length=None, convert_unicode=False, assert_unicode=None, - unicode_error=None, _warn_on_bytestring=False)), - Column('secret_key', - String(length=None, convert_unicode=False, assert_unicode=None, - unicode_error=None, _warn_on_bytestring=False)), - Column('is_admin', Boolean(create_constraint=True, name=None)), - ) - - -user_project_association = Table('user_project_association', meta, - Column('created_at', DateTime(timezone=False)), - Column('updated_at', DateTime(timezone=False)), - Column('deleted_at', DateTime(timezone=False)), - Column('deleted', Boolean(create_constraint=True, name=None)), - Column('user_id', - String(length=None, convert_unicode=False, assert_unicode=None, - unicode_error=None, _warn_on_bytestring=False), - ForeignKey('users.id'), - primary_key=True, - nullable=False), - Column('project_id', - String(length=None, convert_unicode=False, assert_unicode=None, - unicode_error=None, _warn_on_bytestring=False), - ForeignKey('projects.id'), - primary_key=True, - nullable=False), - ) - - -user_project_role_association = Table('user_project_role_association', meta, - Column('created_at', DateTime(timezone=False)), - Column('updated_at', DateTime(timezone=False)), - Column('deleted_at', DateTime(timezone=False)), - Column('deleted', Boolean(create_constraint=True, name=None)), - Column('user_id', - String(length=None, convert_unicode=False, assert_unicode=None, - unicode_error=None, _warn_on_bytestring=False), - primary_key=True, - nullable=False), - Column('project_id', - String(length=None, convert_unicode=False, assert_unicode=None, - unicode_error=None, _warn_on_bytestring=False), - primary_key=True, - nullable=False), - Column('role', - String(length=None, convert_unicode=False, assert_unicode=None, - unicode_error=None, _warn_on_bytestring=False), - primary_key=True, - nullable=False), - ForeignKeyConstraint(['user_id', - 'project_id'], - ['user_project_association.user_id', - 'user_project_association.project_id']), - ) - - -user_role_association = Table('user_role_association', meta, - Column('created_at', DateTime(timezone=False)), - Column('updated_at', DateTime(timezone=False)), - Column('deleted_at', DateTime(timezone=False)), - Column('deleted', Boolean(create_constraint=True, name=None)), - Column('user_id', - String(length=None, convert_unicode=False, assert_unicode=None, - unicode_error=None, _warn_on_bytestring=False), - ForeignKey('users.id'), - primary_key=True, - nullable=False), - Column('role', - String(length=None, convert_unicode=False, assert_unicode=None, - unicode_error=None, _warn_on_bytestring=False), - primary_key=True, - nullable=False), - ) - - -volumes = Table('volumes', meta, - Column('created_at', DateTime(timezone=False)), - Column('updated_at', DateTime(timezone=False)), - Column('deleted_at', DateTime(timezone=False)), - Column('deleted', Boolean(create_constraint=True, name=None)), - Column('id', Integer(), primary_key=True, nullable=False), - Column('ec2_id', - String(length=None, convert_unicode=False, assert_unicode=None, - unicode_error=None, _warn_on_bytestring=False)), - Column('user_id', - String(length=None, convert_unicode=False, assert_unicode=None, - unicode_error=None, _warn_on_bytestring=False)), - Column('project_id', - String(length=None, convert_unicode=False, assert_unicode=None, - unicode_error=None, _warn_on_bytestring=False)), - Column('host', - String(length=None, convert_unicode=False, assert_unicode=None, - unicode_error=None, _warn_on_bytestring=False)), - Column('size', Integer()), - Column('availability_zone', - String(length=None, convert_unicode=False, assert_unicode=None, - unicode_error=None, _warn_on_bytestring=False)), - Column('instance_id', - Integer(), - ForeignKey('instances.id'), - nullable=True), - Column('mountpoint', - String(length=None, convert_unicode=False, assert_unicode=None, - unicode_error=None, _warn_on_bytestring=False)), - Column('attach_time', - String(length=None, convert_unicode=False, assert_unicode=None, - unicode_error=None, _warn_on_bytestring=False)), - Column('status', - String(length=None, convert_unicode=False, assert_unicode=None, - unicode_error=None, _warn_on_bytestring=False)), - Column('attach_status', - String(length=None, convert_unicode=False, assert_unicode=None, - unicode_error=None, _warn_on_bytestring=False)), - Column('scheduled_at', DateTime(timezone=False)), - Column('launched_at', DateTime(timezone=False)), - Column('terminated_at', DateTime(timezone=False)), - Column('display_name', - String(length=None, convert_unicode=False, assert_unicode=None, - unicode_error=None, _warn_on_bytestring=False)), - Column('display_description', - String(length=None, convert_unicode=False, assert_unicode=None, - unicode_error=None, _warn_on_bytestring=False)), - ) - - -def upgrade(migrate_engine): - # Upgrade operations go here. Don't create your own engine; - # bind migrate_engine to your metadata - meta.bind = migrate_engine - - for table in (auth_tokens, export_devices, fixed_ips, floating_ips, - instances, iscsi_targets, key_pairs, networks, - projects, quotas, security_groups, security_group_inst_assoc, - security_group_rules, services, users, - user_project_association, user_project_role_association, - user_role_association, volumes): - try: - table.create() - except Exception: - logging.info(repr(table)) - logging.exception('Exception while creating table') - raise - - -def downgrade(migrate_engine): - # Operations to reverse the above upgrade go here. - for table in (auth_tokens, export_devices, fixed_ips, floating_ips, - instances, iscsi_targets, key_pairs, networks, - projects, quotas, security_groups, security_group_inst_assoc, - security_group_rules, services, users, - user_project_association, user_project_role_association, - user_role_association, volumes): - table.drop() diff --git a/nova/db/sqlalchemy/migrate_repo/versions/002_bexar.py b/nova/db/sqlalchemy/migrate_repo/versions/002_bexar.py new file mode 100644 index 000000000..e93efab58 --- /dev/null +++ b/nova/db/sqlalchemy/migrate_repo/versions/002_bexar.py @@ -0,0 +1,189 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2010 United States Government as represented by the +# Administrator of the National Aeronautics and Space Administration. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from sqlalchemy import * +from migrate import * + +from nova import log as logging + + +meta = MetaData() + + +# Just for the ForeignKey and column creation to succeed, these are not the +# actual definitions of instances or services. +instances = Table('instances', meta, + Column('id', Integer(), primary_key=True, nullable=False), + ) + +services = Table('services', meta, + Column('id', Integer(), primary_key=True, nullable=False), + ) + +# +# New Tables +# +certificates = Table('certificates', meta, + Column('created_at', DateTime(timezone=False)), + Column('updated_at', DateTime(timezone=False)), + Column('deleted_at', DateTime(timezone=False)), + Column('deleted', Boolean(create_constraint=True, name=None)), + Column('id', Integer(), primary_key=True, nullable=False), + Column('user_id', + String(length=None, convert_unicode=False, assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('project_id', + String(length=None, convert_unicode=False, assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('file_name', + String(length=None, convert_unicode=False, assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + ) + + +consoles = Table('consoles', meta, + Column('created_at', DateTime(timezone=False)), + Column('updated_at', DateTime(timezone=False)), + Column('deleted_at', DateTime(timezone=False)), + Column('deleted', Boolean(create_constraint=True, name=None)), + Column('id', Integer(), primary_key=True, nullable=False), + Column('instance_name', + String(length=None, convert_unicode=False, assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('instance_id', Integer()), + Column('password', + String(length=None, convert_unicode=False, assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('port', Integer(), nullable=True), + Column('pool_id', + Integer(), + ForeignKey('console_pools.id')), + ) + + +console_pools = Table('console_pools', meta, + Column('created_at', DateTime(timezone=False)), + Column('updated_at', DateTime(timezone=False)), + Column('deleted_at', DateTime(timezone=False)), + Column('deleted', Boolean(create_constraint=True, name=None)), + Column('id', Integer(), primary_key=True, nullable=False), + Column('address', + String(length=None, convert_unicode=False, assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('username', + String(length=None, convert_unicode=False, assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('password', + String(length=None, convert_unicode=False, assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('console_type', + String(length=None, convert_unicode=False, assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('public_hostname', + String(length=None, convert_unicode=False, assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('host', + String(length=None, convert_unicode=False, assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('compute_host', + String(length=None, convert_unicode=False, assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + ) + + +instance_actions = Table('instance_actions', meta, + Column('created_at', DateTime(timezone=False)), + Column('updated_at', DateTime(timezone=False)), + Column('deleted_at', DateTime(timezone=False)), + Column('deleted', Boolean(create_constraint=True, name=None)), + Column('id', Integer(), primary_key=True, nullable=False), + Column('instance_id', + Integer(), + ForeignKey('instances.id')), + Column('action', + String(length=None, convert_unicode=False, assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('error', + Text(length=None, convert_unicode=False, assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + ) + + +# +# Tables to alter +# +auth_tokens = Table('auth_tokens', meta, + Column('created_at', DateTime(timezone=False)), + Column('updated_at', DateTime(timezone=False)), + Column('deleted_at', DateTime(timezone=False)), + Column('deleted', Boolean(create_constraint=True, name=None)), + Column('token_hash', + String(length=None, convert_unicode=False, assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False), + primary_key=True, + nullable=False), + Column('user_id', Integer()), + Column('server_manageent_url', + String(length=None, convert_unicode=False, assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('storage_url', + String(length=None, convert_unicode=False, assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('cdn_management_url', + String(length=None, convert_unicode=False, assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + ) + + +instances_availability_zone = Column( + 'availability_zone', + String(length=None, convert_unicode=False, assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)) + + +instances_locked = Column('locked', + Boolean(create_constraint=True, name=None)) + + +services_availability_zone = Column( + 'availability_zone', + String(length=None, convert_unicode=False, assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)) + + +def upgrade(migrate_engine): + # Upgrade operations go here. Don't create your own engine; + # bind migrate_engine to your metadata + meta.bind = migrate_engine + for table in (certificates, consoles, console_pools, instance_actions): + try: + table.create() + except Exception: + logging.info(repr(table)) + logging.exception('Exception while creating table') + raise + + auth_tokens.c.user_id.alter(type=String(length=None, + convert_unicode=False, + assert_unicode=None, + unicode_error=None, + _warn_on_bytestring=False)) + + instances.create_column(instances_availability_zone) + instances.create_column(instances_locked) + services.create_column(services_availability_zone) diff --git a/nova/db/sqlalchemy/migrate_repo/versions/002_update_to_trunk.py b/nova/db/sqlalchemy/migrate_repo/versions/002_update_to_trunk.py deleted file mode 100644 index f9468f005..000000000 --- a/nova/db/sqlalchemy/migrate_repo/versions/002_update_to_trunk.py +++ /dev/null @@ -1,125 +0,0 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 - -# Copyright 2010 United States Government as represented by the -# Administrator of the National Aeronautics and Space Administration. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from sqlalchemy import * -from migrate import * - -from nova import log as logging - - -meta = MetaData() - - -# Just for the ForeignKey to succeed -instances = Table('instances', meta, - Column('id', Integer(), primary_key=True, nullable=False), - ) - -# -# New Tables -# -instance_actions = Table('instance_actions', meta, - Column('created_at', DateTime(timezone=False)), - Column('updated_at', DateTime(timezone=False)), - Column('deleted_at', DateTime(timezone=False)), - Column('deleted', Boolean(create_constraint=True, name=None)), - Column('id', Integer(), primary_key=True, nullable=False), - Column('instance_id', - Integer(), - ForeignKey('instances.id')), - Column('action', - String(length=None, convert_unicode=False, assert_unicode=None, - unicode_error=None, _warn_on_bytestring=False)), - Column('error', - Text(length=None, convert_unicode=False, assert_unicode=None, - unicode_error=None, _warn_on_bytestring=False)), - ) - -certificates = Table('certificates', meta, - Column('created_at', DateTime(timezone=False)), - Column('updated_at', DateTime(timezone=False)), - Column('deleted_at', DateTime(timezone=False)), - Column('deleted', Boolean(create_constraint=True, name=None)), - Column('id', Integer(), primary_key=True, nullable=False), - Column('user_id', - String(length=None, convert_unicode=False, assert_unicode=None, - unicode_error=None, _warn_on_bytestring=False)), - Column('project_id', - String(length=None, convert_unicode=False, assert_unicode=None, - unicode_error=None, _warn_on_bytestring=False)), - Column('file_name', - String(length=None, convert_unicode=False, assert_unicode=None, - unicode_error=None, _warn_on_bytestring=False)), - ) - - -# -# Tables to alter -# -auth_tokens = Table('auth_tokens', meta, - Column('created_at', DateTime(timezone=False)), - Column('updated_at', DateTime(timezone=False)), - Column('deleted_at', DateTime(timezone=False)), - Column('deleted', Boolean(create_constraint=True, name=None)), - Column('token_hash', - String(length=None, convert_unicode=False, assert_unicode=None, - unicode_error=None, _warn_on_bytestring=False), - primary_key=True, - nullable=False), - Column('user_id', Integer()), - Column('server_manageent_url', - String(length=None, convert_unicode=False, assert_unicode=None, - unicode_error=None, _warn_on_bytestring=False)), - Column('storage_url', - String(length=None, convert_unicode=False, assert_unicode=None, - unicode_error=None, _warn_on_bytestring=False)), - Column('cdn_management_url', - String(length=None, convert_unicode=False, assert_unicode=None, - unicode_error=None, _warn_on_bytestring=False)), - ) - - -instances_availability_zone = Column( - 'availability_zone', - String(length=None, convert_unicode=False, assert_unicode=None, - unicode_error=None, _warn_on_bytestring=False)) - - -instances_locked = Column('locked', - Boolean(create_constraint=True, name=None)) - - -def upgrade(migrate_engine): - # Upgrade operations go here. Don't create your own engine; - # bind migrate_engine to your metadata - meta.bind = migrate_engine - for table in (instance_actions, certificates): - try: - table.create() - except Exception: - logging.info(repr(table)) - logging.exception('Exception while creating table') - raise - - auth_tokens.c.user_id.alter(type=String(length=None, - convert_unicode=False, - assert_unicode=None, - unicode_error=None, - _warn_on_bytestring=False)) - instances_availability_zone.create(table=instances) - instances_locked.create(table=instances) -- cgit From 380a279809bdba00734286950c7d3dd085241ad1 Mon Sep 17 00:00:00 2001 From: Andy Smith Date: Thu, 13 Jan 2011 19:04:24 -0800 Subject: pep8 fixes --- nova/db/sqlalchemy/migrate_repo/versions/002_bexar.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/nova/db/sqlalchemy/migrate_repo/versions/002_bexar.py b/nova/db/sqlalchemy/migrate_repo/versions/002_bexar.py index e93efab58..53da35233 100644 --- a/nova/db/sqlalchemy/migrate_repo/versions/002_bexar.py +++ b/nova/db/sqlalchemy/migrate_repo/versions/002_bexar.py @@ -183,7 +183,7 @@ def upgrade(migrate_engine): assert_unicode=None, unicode_error=None, _warn_on_bytestring=False)) - + instances.create_column(instances_availability_zone) instances.create_column(instances_locked) services.create_column(services_availability_zone) -- cgit From 47a2dc24b08ca4be7d114d95b42dc4faf19d9fad Mon Sep 17 00:00:00 2001 From: Vishvananda Ishaya Date: Fri, 14 Jan 2011 02:24:57 -0800 Subject: use .local and .rescue for disk images so they don't make app-armor puke --- nova/virt/libvirt.xml.template | 10 +++++----- nova/virt/libvirt_conn.py | 18 +++++++++--------- 2 files changed, 14 insertions(+), 14 deletions(-) diff --git a/nova/virt/libvirt.xml.template b/nova/virt/libvirt.xml.template index de06a1eb0..8139c3620 100644 --- a/nova/virt/libvirt.xml.template +++ b/nova/virt/libvirt.xml.template @@ -18,10 +18,10 @@ #set $disk_prefix = 'vd' #set $disk_bus = 'virtio' hvm - #end if + #end if #if $getVar('rescue', False) - ${basepath}/rescue-kernel - ${basepath}/rescue-ramdisk + ${basepath}/kernel.rescue + ${basepath}/ramdisk.rescue #else #if $getVar('kernel', None) ${kernel} @@ -47,7 +47,7 @@ #if $getVar('rescue', False) - + @@ -64,7 +64,7 @@ #if $getVar('local', False) - + #end if diff --git a/nova/virt/libvirt_conn.py b/nova/virt/libvirt_conn.py index 073a8e5bb..4223defd5 100644 --- a/nova/virt/libvirt_conn.py +++ b/nova/virt/libvirt_conn.py @@ -350,7 +350,7 @@ class LibvirtConnection(object): rescue_images = {'image_id': FLAGS.rescue_image_id, 'kernel_id': FLAGS.rescue_kernel_id, 'ramdisk_id': FLAGS.rescue_ramdisk_id} - self._create_image(instance, xml, 'rescue-', rescue_images) + self._create_image(instance, xml, '.rescue', rescue_images) self._conn.createXML(xml, 0) timer = utils.LoopingCall(f=None) @@ -532,23 +532,23 @@ class LibvirtConnection(object): utils.execute('truncate %s -s %dG' % (target, local_gb)) # TODO(vish): should we format disk by default? - def _create_image(self, inst, libvirt_xml, prefix='', disk_images=None): + def _create_image(self, inst, libvirt_xml, suffix='', disk_images=None): # syntactic nicety - def basepath(fname='', prefix=prefix): + def basepath(fname='', suffix=suffix): return os.path.join(FLAGS.instances_path, inst['name'], - prefix + fname) + fname + suffix) # ensure directories exist and are writable - utils.execute('mkdir -p %s' % basepath(prefix='')) - utils.execute('chmod 0777 %s' % basepath(prefix='')) + utils.execute('mkdir -p %s' % basepath(suffix='')) + utils.execute('chmod 0777 %s' % basepath(suffix='')) LOG.info(_('instance %s: Creating image'), inst['name']) f = open(basepath('libvirt.xml'), 'w') f.write(libvirt_xml) f.close() - # NOTE(vish): No need add the prefix to console.log + # NOTE(vish): No need add the suffix to console.log os.close(os.open(basepath('console.log', ''), os.O_CREAT | os.O_WRONLY, 0660)) @@ -577,7 +577,7 @@ class LibvirtConnection(object): root_fname = disk_images['image_id'] size = FLAGS.minimum_root_size - if inst['instance_type'] == 'm1.tiny' or prefix == 'rescue-': + if inst['instance_type'] == 'm1.tiny' or suffix == '.rescue': size = None root_fname += "_sm" @@ -593,7 +593,7 @@ class LibvirtConnection(object): if type_data['local_gb']: self._cache_image(fn=self._create_local, - target=basepath('local'), + target=basepath('disk.local'), fname="local_%s" % type_data['local_gb'], cow=FLAGS.use_cow_images, local_gb=type_data['local_gb']) -- cgit From 525544e689334346305ecc11552105fc1b32a5dd Mon Sep 17 00:00:00 2001 From: Kei Masumoto Date: Sun, 16 Jan 2011 14:54:35 +0900 Subject: merged to rev 561 and fixed based on reviewer's comment --- bin/nova-manage | 30 ++++---- nova/compute/manager.py | 65 +++++++++++----- nova/db/api.py | 30 -------- nova/db/sqlalchemy/api.py | 111 +++++---------------------- nova/db/sqlalchemy/models.py | 40 +++++----- nova/network/api.py | 1 + nova/scheduler/driver.py | 173 +++++++++++++++++++++++++++---------------- nova/scheduler/manager.py | 25 ++++--- nova/service.py | 29 +------- nova/virt/fake.py | 31 ++++++++ nova/virt/libvirt_conn.py | 86 ++++++++++++++++----- nova/virt/xenapi_conn.py | 30 ++++++++ nova/volume/manager.py | 9 +-- 13 files changed, 365 insertions(+), 295 deletions(-) diff --git a/bin/nova-manage b/bin/nova-manage index b8a181343..6bd6aef64 100755 --- a/bin/nova-manage +++ b/bin/nova-manage @@ -62,6 +62,7 @@ import time import IPy + # If ../nova/__init__.py exists, add ../ to Python search path, so that # it will override what happens to be installed in /usr/(local/)lib/python... possible_topdir = os.path.normpath(os.path.join(os.path.abspath(sys.argv[0]), @@ -468,17 +469,19 @@ class InstanceCommands(object): def live_migration(self, ec2_id, dest): """live_migration""" + ctxt = context.get_admin_context() + instance_id = cloud.ec2_id_to_id(ec2_id) + if FLAGS.connection_type != 'libvirt': - raise exception.Error('Only KVM is supported for now. ' - 'Sorry.') + msg = _('Only KVM is supported for now. Sorry!') + raise exception.Error(msg) if FLAGS.volume_driver != 'nova.volume.driver.AOEDriver': - raise exception.Error('Only AOEDriver is supported for now. ' - 'Sorry.') - - logging.basicConfig() - ctxt = context.get_admin_context() - instance_id = cloud.ec2_id_to_id(ec2_id) + instance_ref = db.instance_get(instance_id) + if len(instance_ref['volumes']) != 0: + msg = _(("""Volumes attached by ISCSIDriver""" + """ are not supported. Sorry!""")) + raise exception.Error(msg) rpc.call(ctxt, FLAGS.scheduler_topic, @@ -501,16 +504,15 @@ class HostCommands(object): # To supress msg: No handlers could be found for logger "amqplib" logging.basicConfig() - host_refs = db.host_get_all(context.get_admin_context()) - for host_ref in host_refs: - print host_ref['name'] + service_refs = db.service_get_all(context.get_admin_context()) + hosts = [ h['host'] for h in service_refs] + hosts = list(set(hosts)) + for host in hosts: + print host def show(self, host): """describe cpu/memory/hdd info for host.""" - # To supress msg: No handlers could be found for logger "amqplib" - logging.basicConfig() - result = rpc.call(context.get_admin_context(), FLAGS.scheduler_topic, {"method": "show_host_resource", diff --git a/nova/compute/manager.py b/nova/compute/manager.py index 5db756362..9c8cb363c 100644 --- a/nova/compute/manager.py +++ b/nova/compute/manager.py @@ -39,6 +39,7 @@ import logging import socket import functools +from nova import context from nova import db from nova import exception from nova import flags @@ -117,6 +118,37 @@ class ComputeManager(manager.Manager): """ self.driver.init_host() + + def update_service(self, ctxt, host, binary): + """Insert compute node specific information to DB.""" + + try: + service_ref = db.service_get_by_args(ctxt, + host, + binary) + except exception.NotFound: + msg = _(("""Cannot insert compute manager specific info""" + """Because no service record found.""")) + raise exception.invalid(msg) + + # Updating host information + vcpu = self.driver.get_vcpu_number() + memory_mb = self.driver.get_memory_mb() + local_gb = self.driver.get_local_gb() + hypervisor = self.driver.get_hypervisor_type() + version = self.driver.get_hypervisor_version() + cpu_info = self.driver.get_cpu_info() + + db.service_update(ctxt, + service_ref['id'], + {'vcpus': vcpu, + 'memory_mb': memory_mb, + 'local_gb': local_gb, + 'hypervisor_type': hypervisor, + 'hypervisor_version': version, + 'cpu_info': cpu_info}) + + def _update_state(self, context, instance_id): """Update the state of an instance from the driver info.""" # FIXME(ja): include other fields from state? @@ -530,9 +562,9 @@ class ComputeManager(manager.Manager): self.db.volume_detached(context, volume_id) return True - def compare_cpu(self, context, xml): + def compare_cpu(self, context, cpu_info): """ Check the host cpu is compatible to a cpu given by xml.""" - return self.driver.compare_cpu(xml) + return self.driver.compare_cpu(cpu_info) def pre_live_migration(self, context, instance_id, dest): """Any preparation for live migration at dst host.""" @@ -548,11 +580,11 @@ class ComputeManager(manager.Manager): raise exception.NotFound(msg) # If any volume is mounted, prepare here. - try: - for vol in db.volume_get_all_by_instance(context, instance_id): - self.volume_manager.setup_compute_volume(context, vol['id']) - except exception.NotFound: + if len(instance_ref['volumes']) == 0: logging.info(_("%s has no volume.") % ec2_id) + else: + for v in instance_ref['volumes']: + self.volume_manager.setup_compute_volume(context, v['id']) # Bridge settings # call this method prior to ensure_filtering_rules_for_instance, @@ -578,16 +610,16 @@ class ComputeManager(manager.Manager): try: # Checking volume node is working correctly when any volumes # are attached to instances. - rpc.call(context, - FLAGS.volume_topic, - {"method": "check_for_export", - "args": {'instance_id': instance_id}}) + if len(instance_ref['volumes']) != 0: + rpc.call(context, + FLAGS.volume_topic, + {"method": "check_for_export", + "args": {'instance_id': instance_id}}) # Asking dest host to preparing live migration. compute_topic = db.queue_get_for(context, FLAGS.compute_topic, dest) - rpc.call(context, compute_topic, {"method": "pre_live_migration", @@ -602,13 +634,10 @@ class ComputeManager(manager.Manager): power_state.RUNNING, 'running') - try: - for vol in db.volume_get_all_by_instance(context, instance_id): - db.volume_update(context, - vol['id'], - {'status': 'in-use'}) - except exception.NotFound: - pass + for v in instance_ref['volumes']: + db.volume_update(context, + v['id'], + {'status': 'in-use'}) # e should be raised. just calling "raise" may raise NotFound. raise e diff --git a/nova/db/api.py b/nova/db/api.py index aee5d1bb7..8c1e0d54d 100644 --- a/nova/db/api.py +++ b/nova/db/api.py @@ -730,11 +730,6 @@ def volume_get_by_ec2_id(context, ec2_id): return IMPL.volume_get_by_ec2_id(context, ec2_id) -def volume_get_all_by_instance(context, instance_id): - """Get all volumes by instance id or raise if it does not exist.""" - return IMPL.volume_get_all_by_instance(context, instance_id) - - def volume_get_instance(context, volume_id): """Get the instance that a volume is attached to.""" return IMPL.volume_get_instance(context, volume_id) @@ -952,31 +947,6 @@ def host_get_networks(context, host): return IMPL.host_get_networks(context, host) -def host_create(context, value): - """Create a host from the values dictionary.""" - return IMPL.host_create(context, value) - - -def host_get(context, host_id): - """Get an host or raise if it does not exist.""" - return IMPL.host_get(context, host_id) - - -def host_get_all(context, session=None): - """Get all hosts or raise if it does not exist.""" - return IMPL.host_get_all(context) - - -def host_get_by_name(context, host): - """Get an host or raise if it does not exist.""" - return IMPL.host_get_by_name(context, host) - - -def host_update(context, host, values): - """Set the given properties on an host and update it.""" - return IMPL.host_update(context, host, values) - - ################## diff --git a/nova/db/sqlalchemy/api.py b/nova/db/sqlalchemy/api.py index 4ea85a094..9843b7edb 100644 --- a/nova/db/sqlalchemy/api.py +++ b/nova/db/sqlalchemy/api.py @@ -864,10 +864,10 @@ def instance_get_all_by_host(context, hostname): if not session: session = get_session() - result = session.query(models.Instance - ).filter_by(host=hostname - ).filter_by(deleted=can_read_deleted(context) - ).all() + result = session.query(models.Instance).\ + filter_by(host=hostname).\ + filter_by(deleted=can_read_deleted(context)).\ + all() if not result: return [] return result @@ -877,11 +877,11 @@ def instance_get_all_by_host(context, hostname): def _instance_get_sum_by_host_and_project(context, column, hostname, proj_id): session = get_session() - result = session.query(models.Instance - ).filter_by(host=hostname - ).filter_by(project_id=proj_id - ).filter_by(deleted=can_read_deleted(context) - ).value(column) + result = session.query(models.Instance).\ + filter_by(host=hostname).\ + filter_by(project_id=proj_id).\ + filter_by(deleted=can_read_deleted(context)).\ + value(column) if not result: return 0 return result @@ -889,20 +889,26 @@ def _instance_get_sum_by_host_and_project(context, column, hostname, proj_id): @require_context def instance_get_vcpu_sum_by_host_and_project(context, hostname, proj_id): - return _instance_get_sum_by_host_and_project(context, 'vcpus', hostname, + return _instance_get_sum_by_host_and_project(context, + 'vcpus', + hostname, proj_id) @require_context def instance_get_memory_sum_by_host_and_project(context, hostname, proj_id): - return _instance_get_sum_by_host_and_project(context, 'memory_mb', - hostname, proj_id) + return _instance_get_sum_by_host_and_project(context, + 'memory_mb', + hostname, + proj_id) @require_context def instance_get_disk_sum_by_host_and_project(context, hostname, proj_id): - return _instance_get_sum_by_host_and_project(context, 'local_gb', - hostname, proj_id) + return _instance_get_sum_by_host_and_project(context, + 'local_gb', + hostname, + proj_id) @require_context @@ -1470,18 +1476,6 @@ def volume_get_all_by_project(context, project_id): all() -@require_admin_context -def volume_get_all_by_instance(context, instance_id): - session = get_session() - result = session.query(models.Volume).\ - filter_by(instance_id=instance_id).\ - filter_by(deleted=False).\ - all() - if not result: - raise exception.NotFound(_('No volume for instance %s') % instance_id) - return result - - @require_admin_context def volume_get_instance(context, volume_id): session = get_session() @@ -1946,71 +1940,6 @@ def host_get_networks(context, host): all() -@require_admin_context -def host_create(context, values): - host_ref = models.Host() - for (key, value) in values.iteritems(): - host_ref[key] = value - host_ref.save() - return host_ref - - -@require_admin_context -def host_get(context, host_id, session=None): - if not session: - session = get_session() - - result = session.query(models.Host - ).filter_by(deleted=False - ).filter_by(id=host_id - ).first() - - if not result: - raise exception.NotFound('No host for id %s' % host_id) - - return result - - -@require_admin_context -def host_get_all(context, session=None): - if not session: - session = get_session() - - result = session.query(models.Host - ).filter_by(deleted=False - ).all() - - if not result: - raise exception.NotFound('No host record found .') - - return result - - -@require_admin_context -def host_get_by_name(context, host, session=None): - if not session: - session = get_session() - - result = session.query(models.Host - ).filter_by(deleted=False - ).filter_by(name=host - ).first() - - if not result: - raise exception.NotFound('No host for name %s' % host) - - return result - - -@require_admin_context -def host_update(context, host_id, values): - session = get_session() - with session.begin(): - host_ref = host_get(context, host_id, session=session) - for (key, value) in values.iteritems(): - host_ref[key] = value - - ################## diff --git a/nova/db/sqlalchemy/models.py b/nova/db/sqlalchemy/models.py index 61f8b3cc9..add37fe19 100644 --- a/nova/db/sqlalchemy/models.py +++ b/nova/db/sqlalchemy/models.py @@ -138,38 +138,38 @@ class NovaBase(object): # __tablename__ = 'hosts' # id = Column(String(255), primary_key=True) -class Host(BASE, NovaBase): - """Represents a host where services are running""" - __tablename__ = 'hosts' - id = Column(Integer, primary_key=True) - name = Column(String(255)) - vcpus = Column(Integer, nullable=False, default=-1) - memory_mb = Column(Integer, nullable=False, default=-1) - local_gb = Column(Integer, nullable=False, default=-1) - hypervisor_type = Column(String(128)) - hypervisor_version = Column(Integer, nullable=False, default=-1) - cpu_info = Column(String(1024)) - deleted = Column(Boolean, default=False) - # C: when calling service_create() - # D: never deleted. instead of deleting cloumn "deleted" is true - # when host is down - # b/c Host.id is foreign key of service, and records - # of the "service" table are not deleted. - # R: Column "deleted" is true when calling hosts_up() and host is down. - class Service(BASE, NovaBase): """Represents a running service on a host.""" __tablename__ = 'services' id = Column(Integer, primary_key=True) - host = Column(String(255)) # , ForeignKey('hosts.id')) + #host_id = Column(Integer, ForeignKey('hosts.id'), nullable=True) + #host = relationship(Host, backref=backref('services')) + host = Column(String(255)) binary = Column(String(255)) topic = Column(String(255)) report_count = Column(Integer, nullable=False, default=0) disabled = Column(Boolean, default=False) availability_zone = Column(String(255), default='nova') + # The below items are compute node only. + # -1 or None is inserted for other service. + vcpus = Column(Integer, nullable=False, default=-1) + memory_mb = Column(Integer, nullable=False, default=-1) + local_gb = Column(Integer, nullable=False, default=-1) + hypervisor_type = Column(String(128)) + hypervisor_version = Column(Integer, nullable=False, default=-1) + # Note(masumotok): Expected Strings example: + # + # '{"arch":"x86_64", "model":"Nehalem", + # "topology":{"sockets":1, "threads":2, "cores":3}, + # features:[ "tdtscp", "xtpr"]}' + # + # Points are "json translatable" and it must have all + # dictionary keys above. + cpu_info = Column(String(512)) + class Certificate(BASE, NovaBase): """Represents a an x509 certificate""" diff --git a/nova/network/api.py b/nova/network/api.py index 09d20b57e..bf43acb51 100644 --- a/nova/network/api.py +++ b/nova/network/api.py @@ -67,6 +67,7 @@ class API(base.Base): floating_ip = self.db.floating_ip_get_by_address(context, floating_ip) # NOTE(vish): Perhaps we should just pass this on to compute and # let compute communicate with network. + host = fixed_ip['network']['host'] rpc.cast(context, self.db.queue_get_for(context, FLAGS.network_topic, host), {"method": "associate_floating_ip", diff --git a/nova/scheduler/driver.py b/nova/scheduler/driver.py index 699462b12..4ab1e2fbf 100644 --- a/nova/scheduler/driver.py +++ b/nova/scheduler/driver.py @@ -75,110 +75,159 @@ class Scheduler(object): instance_ref = db.instance_get(context, instance_id) ec2_id = instance_ref['hostname'] - # Checking instance state. + # Checking instance. + self._live_migration_src_check(context, instance_ref) + + # Checking destination host. + self._live_migration_dest_check(context, instance_ref, dest) + + # Common checking. + self._live_migration_common_check(context, instance_ref, dest) + + # Changing instance_state. + db.instance_set_state(context, + instance_id, + power_state.PAUSED, + 'migrating') + + # Changing volume state + for v in instance_ref['volumes']: + db.volume_update(context, + v['id'], + {'status': 'migrating'}) + + # Return value is necessary to send request to src + # Check _schedule() in detail. + src = instance_ref['host'] + return src + + def _live_migration_src_check(self, context, instance_ref): + """Live migration check routine (for src host)""" + + # Checking instance is running. if power_state.RUNNING != instance_ref['state'] or \ 'running' != instance_ref['state_description']: msg = _('Instance(%s) is not running') + ec2_id = instance_ref['hostname'] raise exception.Invalid(msg % ec2_id) - # Checking destination host exists - dhost_ref = db.host_get_by_name(context, dest) + # Checing volume node is running when any volumes are mounted to the instance. + if len(instance_ref['volumes']) != 0: + services = db.service_get_all_by_topic(context, 'volume') + if len(services) < 1 or not self.service_is_up(services[0]): + msg = _('volume node is not alive(time synchronize problem?)') + raise exception.Invalid(msg) - # Checking whether The host where instance is running - # and dest is not same. + # Checking src host is alive. src = instance_ref['host'] - if dest == src: - msg = _('%s is where %s is running now. choose other host.') - raise exception.Invalid(msg % (dest, ec2_id)) - - # Checking dest is compute node. services = db.service_get_all_by_topic(context, 'compute') - if dest not in [service.host for service in services]: + services = [service for service in services if service.host == src] + if len(services) < 1 or not self.service_is_up(services[0]): + msg = _('%s is not alive(time synchronize problem?)') + raise exception.Invalid(msg % src) + + + def _live_migration_dest_check(self, context, instance_ref, dest): + """Live migration check routine (for destination host)""" + + # Checking dest exists and compute node. + dservice_refs = db.service_get_all_by_host(context, dest) + if len(dservice_refs) <= 0 : + msg = _('%s does not exists.') + raise exception.Invalid(msg % dest) + + dservice_ref = dservice_refs[0] + if dservice_ref['topic'] != 'compute': msg = _('%s must be compute node') raise exception.Invalid(msg % dest) # Checking dest host is alive. - service = [service for service in services if service.host == dest] - service = service[0] - if not self.service_is_up(service): + if not self.service_is_up(dservice_ref): msg = _('%s is not alive(time synchronize problem?)') raise exception.Invalid(msg % dest) - # NOTE(masumotok): Below pre-checkings are followed by - # http://wiki.libvirt.org/page/TodoPreMigrationChecks + # Checking whether The host where instance is running + # and dest is not same. + src = instance_ref['host'] + if dest == src: + ec2_id = instance_ref['hostname'] + msg = _('%s is where %s is running now. choose other host.') + raise exception.Invalid(msg % (dest, ec2_id)) + + # Checking dst host still has enough capacities. + self.has_enough_resource(context, instance_ref, dest) - # Checking hypervisor is same. + def _live_migration_common_check(self, context, instance_ref, dest): + """ + Live migration check routine. + Below pre-checkings are followed by + http://wiki.libvirt.org/page/TodoPreMigrationChecks + + """ + + # Checking dest exists. + dservice_refs = db.service_get_all_by_host(context, dest) + if len(dservice_refs) <= 0 : + msg = _('%s does not exists.') + raise exception.Invalid(msg % dest) + dservice_ref = dservice_refs[0] + + # Checking original host( where instance was launched at) exists. orighost = instance_ref['launched_on'] - ohost_ref = db.host_get_by_name(context, orighost) + oservice_refs = db.service_get_all_by_host(context, orighost) + if len(oservice_refs) <= 0 : + msg = _('%s(where instance was launched at) does not exists.') + raise exception.Invalid(msg % orighost) + oservice_ref = oservice_refs[0] - otype = ohost_ref['hypervisor_type'] - dtype = dhost_ref['hypervisor_type'] + # Checking hypervisor is same. + otype = oservice_ref['hypervisor_type'] + dtype = dservice_ref['hypervisor_type'] if otype != dtype: msg = _('Different hypervisor type(%s->%s)') raise exception.Invalid(msg % (otype, dtype)) # Checkng hypervisor version. - oversion = ohost_ref['hypervisor_version'] - dversion = dhost_ref['hypervisor_version'] + oversion = oservice_ref['hypervisor_version'] + dversion = dservice_ref['hypervisor_version'] if oversion > dversion: msg = _('Older hypervisor version(%s->%s)') raise exception.Invalid(msg % (oversion, dversion)) # Checking cpuinfo. - cpuinfo = ohost_ref['cpu_info'] - if str != type(cpuinfo): - msg = _('Unexpected err: not found cpu_info for %s on DB.hosts') - raise exception.Invalid(msg % orighost) - + cpu_info = oservice_ref['cpu_info'] try: rpc.call(context, - db.queue_get_for(context, FLAGS.compute_topic, dest), - {"method": 'compare_cpu', - "args": {'xml': cpuinfo}}) + db.queue_get_for(context, FLAGS.compute_topic, dest), + {"method": 'compare_cpu', + "args": {'cpu_info': cpu_info}}) except rpc.RemoteError, e: - msg = '%s doesnt have compatibility to %s(where %s launching at)\n' - msg += 'result:%s \n' - logging.error(_(msg) % (dest, src, ec2_id, ret)) + msg = _('%s doesnt have compatibility to %s(where %s launching at)') + ec2_id = instance_ref['hostname'] + src = instance_ref['host'] + logging.error(msg % (dest, src, ec2_id)) raise e - # Checking dst host still has enough capacities. - self.has_enough_resource(context, instance_id, dest) - - # Changing instance_state. - db.instance_set_state(context, - instance_id, - power_state.PAUSED, - 'migrating') - - # Changing volume state - try: - for vol in db.volume_get_all_by_instance(context, instance_id): - db.volume_update(context, - vol['id'], - {'status': 'migrating'}) - except exception.NotFound: - pass - - # Return value is necessary to send request to src - # Check _schedule() in detail. - return src - - def has_enough_resource(self, context, instance_id, dest): + def has_enough_resource(self, context, instance_ref, dest): """ Check if destination host has enough resource for live migration""" # Getting instance information - instance_ref = db.instance_get(context, instance_id) ec2_id = instance_ref['hostname'] vcpus = instance_ref['vcpus'] mem = instance_ref['memory_mb'] hdd = instance_ref['local_gb'] # Gettin host information - host_ref = db.host_get_by_name(context, dest) - total_cpu = int(host_ref['vcpus']) - total_mem = int(host_ref['memory_mb']) - total_hdd = int(host_ref['local_gb']) + service_refs = db.service_get_all_by_host(context, dest) + if len(service_refs) <= 0 : + msg = _('%s does not exists.') + raise exception.Invalid(msg % dest) + service_ref = service_refs[0] + + total_cpu = int(service_ref['vcpus']) + total_mem = int(service_ref['memory_mb']) + total_hdd = int(service_ref['local_gb']) instances_ref = db.instance_get_all_by_host(context, dest) for i_ref in instances_ref: @@ -196,4 +245,4 @@ class Scheduler(object): msg = '%s doesnt have enough resource for %s' % (dest, ec2_id) raise exception.NotEmpty(msg) - logging.debug(_('%s has enough resource for %s') % (dest, ec2_id)) + logging.debug(_('%s has_enough_resource() for %s') % (dest, ec2_id)) diff --git a/nova/scheduler/manager.py b/nova/scheduler/manager.py index b6627453d..56ffbf221 100644 --- a/nova/scheduler/manager.py +++ b/nova/scheduler/manager.py @@ -76,20 +76,27 @@ class SchedulerManager(manager.Manager): """ show the physical/usage resource given by hosts.""" try: - host_ref = db.host_get_by_name(context, host) + services = db.service_get_all_by_host(context, host) except exception.NotFound: return {'ret': False, 'msg': 'No such Host'} except: raise + compute = [ s for s in services if s['topic'] == 'compute'] + if 0 == len(compute): + service_ref = services[0] + else: + service_ref = compute[0] + # Getting physical resource information - h_resource = {'vcpus': host_ref['vcpus'], - 'memory_mb': host_ref['memory_mb'], - 'local_gb': host_ref['local_gb']} + h_resource = {'vcpus': service_ref['vcpus'], + 'memory_mb': service_ref['memory_mb'], + 'local_gb': service_ref['local_gb']} # Getting usage resource information u_resource = {} - instances_ref = db.instance_get_all_by_host(context, host_ref['name']) + instances_ref = db.instance_get_all_by_host(context, + service_ref['host']) if 0 == len(instances_ref): return {'ret': True, 'phy_resource': h_resource, 'usage': {}} @@ -98,11 +105,11 @@ class SchedulerManager(manager.Manager): project_ids = list(set(project_ids)) for p_id in project_ids: vcpus = db.instance_get_vcpu_sum_by_host_and_project(context, - host, - p_id) + host, + p_id) mem = db.instance_get_memory_sum_by_host_and_project(context, - host, - p_id) + host, + p_id) hdd = db.instance_get_disk_sum_by_host_and_project(context, host, p_id) diff --git a/nova/service.py b/nova/service.py index ff44e49a8..7323c7ff1 100644 --- a/nova/service.py +++ b/nova/service.py @@ -81,12 +81,6 @@ class Service(object): self.model_disconnected = False ctxt = context.get_admin_context() - try: - host_ref = db.host_get_by_name(ctxt, self.host) - except exception.NotFound: - host_ref = db.host_create(ctxt, {'name': self.host}) - host_ref = self._update_host_ref(ctxt, host_ref) - try: service_ref = db.service_get_by_args(ctxt, self.host, @@ -95,6 +89,9 @@ class Service(object): except exception.NotFound: self._create_service_ref(ctxt) + if 'nova-compute' == self.binary: + self.manager.update_service(ctxt, self.host, self.binary) + conn1 = rpc.Connection.instance(new=True) conn2 = rpc.Connection.instance(new=True) if self.report_interval: @@ -129,26 +126,6 @@ class Service(object): 'availability_zone': zone}) self.service_id = service_ref['id'] - def _update_host_ref(self, context, host_ref): - - if 0 <= self.manager_class_name.find('ComputeManager'): - vcpu = self.manager.driver.get_vcpu_number() - memory_mb = self.manager.driver.get_memory_mb() - local_gb = self.manager.driver.get_local_gb() - hypervisor = self.manager.driver.get_hypervisor_type() - version = self.manager.driver.get_hypervisor_version() - cpu_xml = self.manager.driver.get_cpu_xml() - - db.host_update(context, - host_ref['id'], - {'vcpus': vcpu, - 'memory_mb': memory_mb, - 'local_gb': local_gb, - 'hypervisor_type': hypervisor, - 'hypervisor_version': version, - 'cpu_info': cpu_xml}) - return host_ref - def __getattr__(self, key): manager = self.__dict__.get('manager', None) return getattr(manager, key) diff --git a/nova/virt/fake.py b/nova/virt/fake.py index 9186d885e..3b53f714f 100644 --- a/nova/virt/fake.py +++ b/nova/virt/fake.py @@ -297,6 +297,37 @@ class FakeConnection(object): 'username': 'fakeuser', 'password': 'fakepassword'} + def get_cpu_info(self): + """This method is supported only libvirt. """ + return + + def get_vcpu_number(self): + """This method is supported only libvirt. """ + return -1 + + def get_memory_mb(self): + """This method is supported only libvirt..""" + return -1 + + def get_local_gb(self): + """This method is supported only libvirt..""" + return -1 + + def get_hypervisor_type(self): + """This method is supported only libvirt..""" + return + + def get_hypervisor_version(self): + """This method is supported only libvirt..""" + return -1 + + def compare_cpu(self, xml): + """This method is supported only libvirt..""" + raise NotImplementedError('This method is supported only libvirt.') + + def live_migration(self, context, instance_ref, dest): + """This method is supported only libvirt..""" + raise NotImplementedError('This method is supported only libvirt.') class FakeInstance(object): diff --git a/nova/virt/libvirt_conn.py b/nova/virt/libvirt_conn.py index f3f837153..93e768ae9 100644 --- a/nova/virt/libvirt_conn.py +++ b/nova/virt/libvirt_conn.py @@ -36,6 +36,7 @@ Supports KVM, QEMU, UML, and XEN. """ +import json import os import shutil import re @@ -82,6 +83,9 @@ flags.DEFINE_string('injected_network_template', flags.DEFINE_string('libvirt_xml_template', utils.abspath('virt/libvirt.xml.template'), 'Libvirt XML Template') +flags.DEFINE_string('cpuinfo_xml_template', + utils.abspath('virt/cpuinfo.xml.template'), + 'CpuInfo XML Template (used only live migration now)') flags.DEFINE_string('libvirt_type', 'kvm', 'Libvirt domain type (valid options are: ' @@ -110,6 +114,11 @@ flags.DEFINE_string('firewall_driver', 'nova.virt.libvirt_conn.IptablesFirewallDriver', 'Firewall driver (defaults to iptables)') +class cpuinfo: + arch = '' + vendor = '' + def __init__(self): pass + def get_connection(read_only): # These are loaded late so that there's no need to install these @@ -145,6 +154,7 @@ class LibvirtConnection(object): self.libvirt_uri = self.get_uri() self.libvirt_xml = open(FLAGS.libvirt_xml_template).read() + self.cpuinfo_xml = open(FLAGS.cpuinfo_xml_template).read() self._wrapped_conn = None self.read_only = read_only @@ -774,7 +784,7 @@ class LibvirtConnection(object): """ Get hypervisor version """ return self._conn.getVersion() - def get_cpu_xml(self): + def get_cpu_info(self): """ Get cpuinfo information """ xmlstr = self._conn.getCapabilities() xml = libxml2.parseDoc(xmlstr) @@ -784,8 +794,40 @@ class LibvirtConnection(object): % len(nodes) msg += '\n' + xml.serialize() raise exception.Invalid(_(msg)) - cpuxmlstr = re.sub("\n|[ ]+", ' ', nodes[0].serialize()) - return cpuxmlstr + + arch = xml.xpathEval('//cpu/arch')[0].getContent() + model = xml.xpathEval('//cpu/model')[0].getContent() + vendor = xml.xpathEval('//cpu/vendor')[0].getContent() + + topology_node = xml.xpathEval('//cpu/topology')[0].get_properties() + topology = dict() + while topology_node != None: + name = topology_node.get_name() + topology[name] = topology_node.getContent() + topology_node = topology_node.get_next() + + keys = ['cores', 'sockets', 'threads'] + tkeys = topology.keys() + if list(set(tkeys)) != list(set(keys)): + msg = _('Invalid xml: topology(%s) must have %s') + raise exception.Invalid(msg % (str(topology), ', '.join(keys))) + + feature_nodes = xml.xpathEval('//cpu/feature') + features = list() + for nodes in feature_nodes: + feature_name = nodes.get_properties().getContent() + features.append(feature_name) + + template = ("""{"arch":"%s", "model":"%s", "vendor":"%s", """ + """"topology":{"cores":"%s", "threads":"%s", "sockets":"%s"}, """ + """"features":[%s]}""") + c = topology['cores'] + s = topology['sockets'] + t = topology['threads'] + f = [ '"%s"' % x for x in features] + cpu_info = template % (arch, model, vendor, c, s, t, ', '.join(f)) + return cpu_info + def block_stats(self, instance_name, disk): """ @@ -817,7 +859,7 @@ class LibvirtConnection(object): def refresh_security_group_members(self, security_group_id): self.firewall_driver.refresh_security_group_members(security_group_id) - def compare_cpu(self, xml): + def compare_cpu(self, cpu_info): """ Check the host cpu is compatible to a cpu given by xml. "xml" must be a part of libvirt.openReadonly().getCapabilities(). @@ -826,6 +868,11 @@ class LibvirtConnection(object): 'http://libvirt.org/html/libvirt-libvirt.html#virCPUCompareResult' """ + dic = json.loads(cpu_info) + print dic + xml = str(Template(self.cpuinfo_xml, searchList=dic)) + msg = _('Checking cpu_info: instance was launched this cpu.\n: %s ') + LOG.info(msg % xml) ret = self._conn.compareCPU(xml, 0) if ret <= 0: url = 'http://libvirt.org/html/libvirt-libvirt.html' @@ -910,13 +957,10 @@ class LibvirtConnection(object): except Exception, e: id = instance_ref['id'] db.instance_set_state(context, id, power_state.RUNNING, 'running') - try: - for volume in db.volume_get_all_by_instance(context, id): - db.volume_update(context, - volume['id'], - {'status': 'in-use'}) - except exception.NotFound: - pass + for v in instance_ref['volumes']: + db.volume_update(context, + v['id'], + {'status': 'in-use'}) raise e @@ -939,6 +983,7 @@ class LibvirtConnection(object): Post operations for live migration. Mainly, database updating. """ + LOG.info('post livemigration operation is started..') # Detaching volumes. # (not necessary in current version ) @@ -949,7 +994,7 @@ class LibvirtConnection(object): if FLAGS.firewall_driver == \ 'nova.virt.libvirt_conn.IptablesFirewallDriver': try: - self.firewall_driver.remove_instance(instance_ref) + self.firewall_driver.unfilter_instance(instance_ref) except KeyError, e: pass @@ -986,22 +1031,25 @@ class LibvirtConnection(object): msg += '%s cannot inherit floating ip.. ' % ec2_id logging.error(_(msg)) + # Restore instance/volume state db.instance_update(context, instance_id, {'state_description': 'running', 'state': power_state.RUNNING, 'host': dest}) - try: - for volume in db.volume_get_all_by_instance(context, instance_id): - db.volume_update(context, - volume['id'], - {'status': 'in-use'}) - except exception.NotFound: - pass + for v in instance_ref['volumes']: + db.volume_update(context, + v['id'], + {'status': 'in-use'}) logging.info(_('Live migrating %s to %s finishes successfully') % (ec2_id, dest)) + msg = _(("""Known error: the below error is nomally occurs.\n""" + """Just check if iinstance is successfully migrated.\n""" + """libvir: QEMU error : Domain not found: no domain """ + """with matching name..""")) + logging.info(msg) class FirewallDriver(object): diff --git a/nova/virt/xenapi_conn.py b/nova/virt/xenapi_conn.py index 45d0738a5..76862be27 100644 --- a/nova/virt/xenapi_conn.py +++ b/nova/virt/xenapi_conn.py @@ -201,6 +201,36 @@ class XenAPIConnection(object): 'username': FLAGS.xenapi_connection_username, 'password': FLAGS.xenapi_connection_password} + def get_cpu_info(self): + """This method is supported only libvirt. """ + return + + def get_vcpu_number(self): + """This method is supported only libvirt. """ + return -1 + + def get_memory_mb(self): + """This method is supported only libvirt..""" + return -1 + + def get_local_gb(self): + """This method is supported only libvirt..""" + return -1 + + def get_hypervisor_type(self): + """This method is supported only libvirt..""" + return + + def get_hypervisor_version(self): + """This method is supported only libvirt..""" + return -1 + + def compare_cpu(self, xml): + raise NotImplementedError('This method is supported only libvirt.') + + def live_migration(self, context, instance_ref, dest): + raise NotImplementedError('This method is supported only libvirt.') + class XenAPISession(object): """The session to invoke XenAPI SDK calls""" diff --git a/nova/volume/manager.py b/nova/volume/manager.py index b4754c607..a568e75f2 100644 --- a/nova/volume/manager.py +++ b/nova/volume/manager.py @@ -153,9 +153,6 @@ class VolumeManager(manager.Manager): def check_for_export(self, context, instance_id): """Make sure whether volume is exported.""" if FLAGS.volume_driver == 'nova.volume.driver.AOEDriver': - try: - for vol in self.db.volume_get_all_by_instance(context, - instance_id): - self.driver.check_for_export(context, vol['id']) - except exception.NotFound: - pass + instance_ref = db.instance_get(instance_id) + for v in instance_ref['volumes']: + self.driver.check_for_export(context, v['id']) -- cgit From 4243440af10f3d682d255f7283618361a6e94d57 Mon Sep 17 00:00:00 2001 From: Todd Willey Date: Tue, 18 Jan 2011 02:03:52 -0500 Subject: I might have gone overboard with documenting _members. --- nova/virt/fake.py | 47 +++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 47 insertions(+) diff --git a/nova/virt/fake.py b/nova/virt/fake.py index a57a8f43b..4eb42ab85 100644 --- a/nova/virt/fake.py +++ b/nova/virt/fake.py @@ -310,6 +310,53 @@ class FakeConnection(object): 'username': 'fakeuser', 'password': 'fakepassword'} + def refresh_security_group_rules(self, security_group_id): + """This method is called after a change to security groups. + + All security groups and their associated rules live in the datastore, + and calling this method should apply the updated rules to instances + running the specified security group. + + An error should be raised if the operation cannot complete. + + """ + return True + + def refresh_security_group_members(self, security_group_id): + """This method is called when a security group is added to an instance. + + This message is sent to the virtualization drivers on hosts that are + running an instance that belongs to a security group that has a rule + that references the security group identified by `security_group_id`. + It is the responsiblity of this method to make sure any rules + that authorize traffic flow with members of the security group are + updated and any new members can communicate, and any removed members + cannot. + + Scenario: + * we are running on host 'H0' and we have an instance 'i-0'. + * instance 'i-0' is a member of security group 'speaks-b' + * group 'speaks-b' has an ingress rule that authorizes group 'b' + * another host 'H1' runs an instance 'i-1' + * instance 'i-1' is a member of security group 'b' + + When 'i-1' launches or terminates we will recieve the message + to update members of group 'b', at which time we will make + any changes needed to the rules for instance 'i-0' to allow + or deny traffic coming from 'i-1', depending on if it is being + added or removed from the group. + + In this scenario, 'i-1' could just as easily have been running on our + host 'H0' and this method would still have been called. The point was + that this method isn't called on the host where instances of that + group are running (as is the case with + :method:`refresh_security_group_rules`) but is called where references + are made to authorizing those instances. + + An error should be raised if the operation cannot complete. + + """ + return True class FakeInstance(object): -- cgit From 11a10dc9a2fcaaf94e9c661fc9162fd5b8bd420e Mon Sep 17 00:00:00 2001 From: Todd Willey Date: Tue, 18 Jan 2011 02:05:07 -0500 Subject: pep8 --- nova/virt/fake.py | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/nova/virt/fake.py b/nova/virt/fake.py index 4eb42ab85..f8b3c7807 100644 --- a/nova/virt/fake.py +++ b/nova/virt/fake.py @@ -318,7 +318,7 @@ class FakeConnection(object): running the specified security group. An error should be raised if the operation cannot complete. - + """ return True @@ -328,7 +328,7 @@ class FakeConnection(object): This message is sent to the virtualization drivers on hosts that are running an instance that belongs to a security group that has a rule that references the security group identified by `security_group_id`. - It is the responsiblity of this method to make sure any rules + It is the responsiblity of this method to make sure any rules that authorize traffic flow with members of the security group are updated and any new members can communicate, and any removed members cannot. @@ -354,10 +354,11 @@ class FakeConnection(object): are made to authorizing those instances. An error should be raised if the operation cannot complete. - + """ return True + class FakeInstance(object): def __init__(self): -- cgit From a0779f5df2829f91bdc944e7275f44bd831643cc Mon Sep 17 00:00:00 2001 From: Kei Masumoto Date: Wed, 19 Jan 2011 08:49:17 +0900 Subject: fixed based on reviewer's comment --- nova/api/ec2/cloud.py | 2 -- nova/virt/libvirt_conn.py | 1 + nova/volume/driver.py | 9 +++------ nova/volume/manager.py | 2 +- 4 files changed, 5 insertions(+), 9 deletions(-) diff --git a/nova/api/ec2/cloud.py b/nova/api/ec2/cloud.py index 061f3f70f..c807bc13c 100644 --- a/nova/api/ec2/cloud.py +++ b/nova/api/ec2/cloud.py @@ -729,8 +729,6 @@ class CloudController(object): ec2_id = None if (floating_ip_ref['fixed_ip'] and floating_ip_ref['fixed_ip']['instance']): - # modified by masumotok - #instance_id = floating_ip_ref['fixed_ip']['instance']['ec2_id'] instance_id = floating_ip_ref['fixed_ip']['instance']['id'] ec2_id = id_to_ec2_id(instance_id) address_rv = {'public_ip': address, diff --git a/nova/virt/libvirt_conn.py b/nova/virt/libvirt_conn.py index 541432ce3..534227339 100644 --- a/nova/virt/libvirt_conn.py +++ b/nova/virt/libvirt_conn.py @@ -1267,6 +1267,7 @@ class NWFilterFirewall(FirewallDriver): # anyway. return + logging.info('ensuring static filters') self._ensure_static_filters() instance_filter_name = self._instance_filter_name(instance) diff --git a/nova/volume/driver.py b/nova/volume/driver.py index 09b93ae91..0d7ad37d5 100644 --- a/nova/volume/driver.py +++ b/nova/volume/driver.py @@ -122,7 +122,7 @@ class VolumeDriver(object): """Removes an export for a logical volume.""" raise NotImplementedError() - def discover_volume(self, context, volume): + def discover_volume(self, volume): """Discover volume on a remote host.""" raise NotImplementedError() @@ -184,13 +184,10 @@ class AOEDriver(VolumeDriver): self._try_execute("sudo vblade-persist destroy %s %s" % (shelf_id, blade_id)) - def discover_volume(self, context, volume): + def discover_volume(self, volume): """Discover volume on a remote host.""" self._execute("sudo aoe-discover") self._execute("sudo aoe-stat", check_exit_code=False) - shelf_id, blade_id = self.db.volume_get_shelf_and_blade(context, - volume['id']) - return "/dev/etherd/e%s.%s" % (shelf_id, blade_id) def undiscover_volume(self, _volume): """Undiscover volume on a remote host.""" @@ -296,7 +293,7 @@ class ISCSIDriver(VolumeDriver): iscsi_portal = location.split(",")[0] return (iscsi_name, iscsi_portal) - def discover_volume(self, _context, volume): + def discover_volume(self, volume): """Discover volume on a remote host.""" iscsi_name, iscsi_portal = self._get_name_and_portal(volume['name'], volume['host']) diff --git a/nova/volume/manager.py b/nova/volume/manager.py index a568e75f2..da750ab42 100644 --- a/nova/volume/manager.py +++ b/nova/volume/manager.py @@ -138,7 +138,7 @@ class VolumeManager(manager.Manager): if volume_ref['host'] == self.host and FLAGS.use_local_volumes: path = self.driver.local_path(volume_ref) else: - path = self.driver.discover_volume(context, volume_ref) + path = self.driver.discover_volume(volume_ref) return path def remove_compute_volume(self, context, volume_id): -- cgit From f935cf4c6e679d1c8eed99bcabe0d4515c2ba254 Mon Sep 17 00:00:00 2001 From: Kei Masumoto Date: Wed, 19 Jan 2011 09:46:43 +0900 Subject: previous commit breaks volume.driver. fix it.. --- nova/compute/manager.py | 1 - nova/volume/driver.py | 13 ++++++++----- nova/volume/manager.py | 4 ++-- 3 files changed, 10 insertions(+), 8 deletions(-) diff --git a/nova/compute/manager.py b/nova/compute/manager.py index 644f601af..efb5753aa 100644 --- a/nova/compute/manager.py +++ b/nova/compute/manager.py @@ -41,7 +41,6 @@ import logging import socket import functools -from nova import context from nova import db from nova import exception from nova import flags diff --git a/nova/volume/driver.py b/nova/volume/driver.py index 0d7ad37d5..cc8809969 100644 --- a/nova/volume/driver.py +++ b/nova/volume/driver.py @@ -122,7 +122,7 @@ class VolumeDriver(object): """Removes an export for a logical volume.""" raise NotImplementedError() - def discover_volume(self, volume): + def discover_volume(self, _context, volume): """Discover volume on a remote host.""" raise NotImplementedError() @@ -184,10 +184,13 @@ class AOEDriver(VolumeDriver): self._try_execute("sudo vblade-persist destroy %s %s" % (shelf_id, blade_id)) - def discover_volume(self, volume): + def discover_volume(self, context, volume): """Discover volume on a remote host.""" self._execute("sudo aoe-discover") self._execute("sudo aoe-stat", check_exit_code=False) + shelf_id, blade_id = self.db.volume_get_shelf_and_blade(context, + volume['id']) + return "/dev/etherd/e%s.%s" % (shelf_id, blade_id) def undiscover_volume(self, _volume): """Undiscover volume on a remote host.""" @@ -293,7 +296,7 @@ class ISCSIDriver(VolumeDriver): iscsi_portal = location.split(",")[0] return (iscsi_name, iscsi_portal) - def discover_volume(self, volume): + def discover_volume(self, _context, volume): """Discover volume on a remote host.""" iscsi_name, iscsi_portal = self._get_name_and_portal(volume['name'], volume['host']) @@ -381,7 +384,7 @@ class RBDDriver(VolumeDriver): """Removes an export for a logical volume""" pass - def discover_volume(self, volume): + def discover_volume(self, _context, volume): """Discover volume on a remote host""" return "rbd:%s/%s" % (FLAGS.rbd_pool, volume['name']) @@ -430,7 +433,7 @@ class SheepdogDriver(VolumeDriver): """Removes an export for a logical volume""" pass - def discover_volume(self, volume): + def discover_volume(self, _context, volume): """Discover volume on a remote host""" return "sheepdog:%s" % volume['name'] diff --git a/nova/volume/manager.py b/nova/volume/manager.py index da750ab42..1735d79eb 100644 --- a/nova/volume/manager.py +++ b/nova/volume/manager.py @@ -138,7 +138,7 @@ class VolumeManager(manager.Manager): if volume_ref['host'] == self.host and FLAGS.use_local_volumes: path = self.driver.local_path(volume_ref) else: - path = self.driver.discover_volume(volume_ref) + path = self.driver.discover_volume(context, volume_ref) return path def remove_compute_volume(self, context, volume_id): @@ -153,6 +153,6 @@ class VolumeManager(manager.Manager): def check_for_export(self, context, instance_id): """Make sure whether volume is exported.""" if FLAGS.volume_driver == 'nova.volume.driver.AOEDriver': - instance_ref = db.instance_get(instance_id) + instance_ref = self.db.instance_get(instance_id) for v in instance_ref['volumes']: self.driver.check_for_export(context, v['id']) -- cgit From e56376d2fc67b7e291745012aafab392aba5aa8c Mon Sep 17 00:00:00 2001 From: Kei Masumoto Date: Wed, 19 Jan 2011 22:18:30 +0900 Subject: .mailmap updated --- .mailmap | 1 + Authors | 2 +- 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/.mailmap b/.mailmap index 2af2d7cd9..f5147dd04 100644 --- a/.mailmap +++ b/.mailmap @@ -16,6 +16,7 @@ + Masumoto diff --git a/Authors b/Authors index 82e07a6b5..aa91124f7 100644 --- a/Authors +++ b/Authors @@ -26,7 +26,7 @@ Josh Durgin Josh Kearney Joshua McKenty Justin Santa Barbara -Kei Masumoto +Kei Masumoto Ken Pepple Koji Iida Lorin Hochstein -- cgit From 4780df11fcaaf29064cb40166701c5bddcc20d73 Mon Sep 17 00:00:00 2001 From: Kei Masumoto Date: Wed, 19 Jan 2011 23:35:02 +0900 Subject: .mailmap rewrite --- .mailmap | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/.mailmap b/.mailmap index f5147dd04..d13219ab0 100644 --- a/.mailmap +++ b/.mailmap @@ -16,7 +16,8 @@ - Masumoto + + Masumoto -- cgit From 6c7d7aa264c031304904ea349412e9f8da0283a8 Mon Sep 17 00:00:00 2001 From: Kei Masumoto Date: Wed, 19 Jan 2011 23:38:52 +0900 Subject: remove blank from Authors --- Authors | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Authors b/Authors index aa91124f7..82e07a6b5 100644 --- a/Authors +++ b/Authors @@ -26,7 +26,7 @@ Josh Durgin Josh Kearney Joshua McKenty Justin Santa Barbara -Kei Masumoto +Kei Masumoto Ken Pepple Koji Iida Lorin Hochstein -- cgit From 1dc38833c75d546b1c64d2bcd1f5d9a5bab8836d Mon Sep 17 00:00:00 2001 From: Kei Masumoto Date: Thu, 20 Jan 2011 01:14:23 +0900 Subject: fixed pep8 error --- nova/scheduler/manager.py | 7 +++---- nova/service.py | 8 ++++---- nova/virt/fake.py | 5 +++-- 3 files changed, 10 insertions(+), 10 deletions(-) diff --git a/nova/scheduler/manager.py b/nova/scheduler/manager.py index f8e4e1613..1cc767a03 100644 --- a/nova/scheduler/manager.py +++ b/nova/scheduler/manager.py @@ -79,8 +79,8 @@ class SchedulerManager(manager.Manager): if len(services) == 0: return {'ret': False, 'msg': 'No such Host'} - compute = [ s for s in services if s['topic'] == 'compute'] - if 0 == len(compute): + compute = [s for s in services if s['topic'] == 'compute'] + if 0 == len(compute): service_ref = services[0] else: service_ref = compute[0] @@ -90,10 +90,9 @@ class SchedulerManager(manager.Manager): 'memory_mb': service_ref['memory_mb'], 'local_gb': service_ref['local_gb']} - # Getting usage resource information u_resource = {} - instances_ref = db.instance_get_all_by_host(context, + instances_ref = db.instance_get_all_by_host(context, service_ref['host']) if 0 == len(instances_ref): diff --git a/nova/service.py b/nova/service.py index 1acfe3078..a8d52e93b 100644 --- a/nova/service.py +++ b/nova/service.py @@ -119,11 +119,11 @@ class Service(object): def _create_service_ref(self, context): zone = FLAGS.node_availability_zone service_ref = db.service_create(context, - {'host':self.host, - 'binary':self.binary, - 'topic':self.topic, + {'host': self.host, + 'binary': self.binary, + 'topic': self.topic, 'report_count': 0, - 'availability_zone':zone}) + 'availability_zone': zone}) self.service_id = service_ref['id'] def __getattr__(self, key): diff --git a/nova/virt/fake.py b/nova/virt/fake.py index b931e3638..80ae7f34c 100644 --- a/nova/virt/fake.py +++ b/nova/virt/fake.py @@ -312,7 +312,7 @@ class FakeConnection(object): def get_cpu_info(self): """This method is supported only libvirt. """ - return + return def get_vcpu_number(self): """This method is supported only libvirt. """ @@ -328,7 +328,7 @@ class FakeConnection(object): def get_hypervisor_type(self): """This method is supported only libvirt..""" - return + return def get_hypervisor_version(self): """This method is supported only libvirt..""" @@ -342,6 +342,7 @@ class FakeConnection(object): """This method is supported only libvirt..""" raise NotImplementedError('This method is supported only libvirt.') + class FakeInstance(object): def __init__(self): -- cgit From d91229f7a3b60095677e1bb76a548668c59ee9e2 Mon Sep 17 00:00:00 2001 From: Andy Smith Date: Tue, 18 Jan 2011 11:01:16 -0800 Subject: revert live_migration branch --- .mailmap | 2 - Authors | 2 - bin/nova-manage | 82 +----------- nova/api/ec2/cloud.py | 2 +- nova/compute/manager.py | 118 +---------------- nova/db/api.py | 30 ----- nova/db/sqlalchemy/api.py | 64 --------- nova/db/sqlalchemy/models.py | 26 +--- nova/network/manager.py | 14 +- nova/scheduler/driver.py | 183 -------------------------- nova/scheduler/manager.py | 48 ------- nova/service.py | 4 - nova/virt/cpuinfo.xml.template | 9 -- nova/virt/fake.py | 32 ----- nova/virt/libvirt_conn.py | 287 ----------------------------------------- nova/virt/xenapi_conn.py | 30 ----- nova/volume/driver.py | 30 +---- nova/volume/manager.py | 9 +- setup.py | 1 - 19 files changed, 17 insertions(+), 956 deletions(-) delete mode 100644 nova/virt/cpuinfo.xml.template diff --git a/.mailmap b/.mailmap index d13219ab0..2af2d7cd9 100644 --- a/.mailmap +++ b/.mailmap @@ -16,8 +16,6 @@ - - Masumoto diff --git a/Authors b/Authors index 82e07a6b5..bcb2cd0fb 100644 --- a/Authors +++ b/Authors @@ -26,7 +26,6 @@ Josh Durgin Josh Kearney Joshua McKenty Justin Santa Barbara -Kei Masumoto Ken Pepple Koji Iida Lorin Hochstein @@ -35,7 +34,6 @@ Michael Gundlach Monsyne Dragon Monty Taylor MORITA Kazutaka -Muneyuki Noguchi Nachi Ueno Paul Voccio Rick Clark diff --git a/bin/nova-manage b/bin/nova-manage index 1ad3120b8..b5842b595 100755 --- a/bin/nova-manage +++ b/bin/nova-manage @@ -62,7 +62,6 @@ import time import IPy - # If ../nova/__init__.py exists, add ../ to Python search path, so that # it will override what happens to be installed in /usr/(local/)lib/python... possible_topdir = os.path.normpath(os.path.join(os.path.abspath(sys.argv[0]), @@ -82,9 +81,8 @@ from nova import log as logging from nova import quota from nova import utils from nova.auth import manager -from nova import rpc from nova.cloudpipe import pipelib -from nova.api.ec2 import cloud + logging.basicConfig() FLAGS = flags.FLAGS @@ -467,82 +465,6 @@ class NetworkCommands(object): int(vpn_start), fixed_range_v6) -class InstanceCommands(object): - """Class for mangaging VM instances.""" - - def live_migration(self, ec2_id, dest): - """live_migration""" - - ctxt = context.get_admin_context() - instance_id = cloud.ec2_id_to_id(ec2_id) - - if FLAGS.connection_type != 'libvirt': - msg = _('Only KVM is supported for now. Sorry!') - raise exception.Error(msg) - - if FLAGS.volume_driver != 'nova.volume.driver.AOEDriver': - instance_ref = db.instance_get(ctxt, instance_id) - if len(instance_ref['volumes']) != 0: - msg = _(("""Volumes attached by ISCSIDriver""" - """ are not supported. Sorry!""")) - raise exception.Error(msg) - - rpc.call(ctxt, - FLAGS.scheduler_topic, - {"method": "live_migration", - "args": {"instance_id": instance_id, - "dest": dest, - "topic": FLAGS.compute_topic}}) - - msg = 'Migration of %s initiated. ' % ec2_id - msg += 'Check its progress using euca-describe-instances.' - print msg - - -class HostCommands(object): - """Class for mangaging host(physical nodes).""" - - def list(self): - """describe host list.""" - - # To supress msg: No handlers could be found for logger "amqplib" - logging.basicConfig() - - service_refs = db.service_get_all(context.get_admin_context()) - hosts = [h['host'] for h in service_refs] - hosts = list(set(hosts)) - for host in hosts: - print host - - def show(self, host): - """describe cpu/memory/hdd info for host.""" - - result = rpc.call(context.get_admin_context(), - FLAGS.scheduler_topic, - {"method": "show_host_resource", - "args": {"host": host}}) - - # Checking result msg format is necessary, that will have done - # when this feture is included in API. - if type(result) != dict: - print 'Unexpected error occurs' - elif not result['ret']: - print '%s' % result['msg'] - else: - cpu = result['phy_resource']['vcpus'] - mem = result['phy_resource']['memory_mb'] - hdd = result['phy_resource']['local_gb'] - - print 'HOST\t\tPROJECT\t\tcpu\tmem(mb)\tdisk(gb)' - print '%s\t\t\t%s\t%s\t%s' % (host, cpu, mem, hdd) - for p_id, val in result['usage'].items(): - print '%s\t%s\t\t%s\t%s\t%s' % (host, - p_id, - val['vcpus'], - val['memory_mb'], - val['local_gb']) - - class ServiceCommands(object): """Enable and disable running services""" @@ -605,8 +527,6 @@ CATEGORIES = [ ('vpn', VpnCommands), ('floating', FloatingIpCommands), ('network', NetworkCommands), - ('instance', InstanceCommands), - ('host', HostCommands), ('service', ServiceCommands), ('log', LogCommands)] diff --git a/nova/api/ec2/cloud.py b/nova/api/ec2/cloud.py index c94540793..57d41ed67 100644 --- a/nova/api/ec2/cloud.py +++ b/nova/api/ec2/cloud.py @@ -729,7 +729,7 @@ class CloudController(object): ec2_id = None if (floating_ip_ref['fixed_ip'] and floating_ip_ref['fixed_ip']['instance']): - instance_id = floating_ip_ref['fixed_ip']['instance']['id'] + instance_id = floating_ip_ref['fixed_ip']['instance']['ec2_id'] ec2_id = id_to_ec2_id(instance_id) address_rv = {'public_ip': address, 'instance_id': ec2_id} diff --git a/nova/compute/manager.py b/nova/compute/manager.py index efb5753aa..6f09ce674 100644 --- a/nova/compute/manager.py +++ b/nova/compute/manager.py @@ -41,7 +41,6 @@ import logging import socket import functools -from nova import db from nova import exception from nova import flags from nova import log as logging @@ -121,35 +120,6 @@ class ComputeManager(manager.Manager): """ self.driver.init_host() - def update_service(self, ctxt, host, binary): - """Insert compute node specific information to DB.""" - - try: - service_ref = db.service_get_by_args(ctxt, - host, - binary) - except exception.NotFound: - msg = _(("""Cannot insert compute manager specific info""" - """Because no service record found.""")) - raise exception.Invalid(msg) - - # Updating host information - vcpu = self.driver.get_vcpu_number() - memory_mb = self.driver.get_memory_mb() - local_gb = self.driver.get_local_gb() - hypervisor = self.driver.get_hypervisor_type() - version = self.driver.get_hypervisor_version() - cpu_info = self.driver.get_cpu_info() - - db.service_update(ctxt, - service_ref['id'], - {'vcpus': vcpu, - 'memory_mb': memory_mb, - 'local_gb': local_gb, - 'hypervisor_type': hypervisor, - 'hypervisor_version': version, - 'cpu_info': cpu_info}) - def _update_state(self, context, instance_id): """Update the state of an instance from the driver info.""" # FIXME(ja): include other fields from state? @@ -208,10 +178,9 @@ class ComputeManager(manager.Manager): raise exception.Error(_("Instance has already been created")) LOG.audit(_("instance %s: starting..."), instance_id, context=context) - self.db.instance_update(context, instance_id, - {'host': self.host, 'launched_on': self.host}) + {'host': self.host}) self.db.instance_set_state(context, instance_id, @@ -591,88 +560,3 @@ class ComputeManager(manager.Manager): self.volume_manager.remove_compute_volume(context, volume_id) self.db.volume_detached(context, volume_id) return True - - def compare_cpu(self, context, cpu_info): - """ Check the host cpu is compatible to a cpu given by xml.""" - return self.driver.compare_cpu(cpu_info) - - def pre_live_migration(self, context, instance_id, dest): - """Any preparation for live migration at dst host.""" - - # Getting instance info - instance_ref = db.instance_get(context, instance_id) - ec2_id = instance_ref['hostname'] - - # Getting fixed ips - fixed_ip = db.instance_get_fixed_address(context, instance_id) - if not fixed_ip: - msg = _('%s(%s) doesnt have fixed_ip') % (instance_id, ec2_id) - raise exception.NotFound(msg) - - # If any volume is mounted, prepare here. - if len(instance_ref['volumes']) == 0: - logging.info(_("%s has no volume.") % ec2_id) - else: - for v in instance_ref['volumes']: - self.volume_manager.setup_compute_volume(context, v['id']) - - # Bridge settings - # call this method prior to ensure_filtering_rules_for_instance, - # since bridge is not set up, ensure_filtering_rules_for instance - # fails. - self.network_manager.setup_compute_network(context, instance_id) - - # Creating filters to hypervisors and firewalls. - # An example is that nova-instance-instance-xxx, - # which is written to libvirt.xml( check "virsh nwfilter-list ) - # On destination host, this nwfilter is necessary. - # In addition, this method is creating filtering rule - # onto destination host. - self.driver.ensure_filtering_rules_for_instance(instance_ref) - - def live_migration(self, context, instance_id, dest): - """executes live migration.""" - - # Get instance for error handling. - instance_ref = db.instance_get(context, instance_id) - ec2_id = instance_ref['hostname'] - - try: - # Checking volume node is working correctly when any volumes - # are attached to instances. - if len(instance_ref['volumes']) != 0: - rpc.call(context, - FLAGS.volume_topic, - {"method": "check_for_export", - "args": {'instance_id': instance_id}}) - - # Asking dest host to preparing live migration. - compute_topic = db.queue_get_for(context, - FLAGS.compute_topic, - dest) - rpc.call(context, - compute_topic, - {"method": "pre_live_migration", - "args": {'instance_id': instance_id, - 'dest': dest}}) - - except Exception, e: - msg = _('Pre live migration for %s failed at %s') - logging.error(msg, ec2_id, dest) - db.instance_set_state(context, - instance_id, - power_state.RUNNING, - 'running') - - for v in instance_ref['volumes']: - db.volume_update(context, - v['id'], - {'status': 'in-use'}) - - # e should be raised. just calling "raise" may raise NotFound. - raise e - - # Executing live migration - # live_migration might raises exceptions, but - # nothing must be recovered in this version. - self.driver.live_migration(context, instance_ref, dest) diff --git a/nova/db/api.py b/nova/db/api.py index 6277cbac5..f9d561587 100644 --- a/nova/db/api.py +++ b/nova/db/api.py @@ -253,10 +253,6 @@ def floating_ip_get_by_address(context, address): return IMPL.floating_ip_get_by_address(context, address) -def floating_ip_update(context, address, values): - """update floating ip information.""" - return IMPL.floating_ip_update(context, address, values) - #################### @@ -409,32 +405,6 @@ def instance_add_security_group(context, instance_id, security_group_id): security_group_id) -def instance_get_all_by_host(context, hostname): - """Get instances by host""" - return IMPL.instance_get_all_by_host(context, hostname) - - -def instance_get_vcpu_sum_by_host_and_project(context, hostname, proj_id): - """Get instances.vcpus by host and project""" - return IMPL.instance_get_vcpu_sum_by_host_and_project(context, - hostname, - proj_id) - - -def instance_get_memory_sum_by_host_and_project(context, hostname, proj_id): - """Get amount of memory by host and project """ - return IMPL.instance_get_memory_sum_by_host_and_project(context, - hostname, - proj_id) - - -def instance_get_disk_sum_by_host_and_project(context, hostname, proj_id): - """Get total amount of disk by host and project """ - return IMPL.instance_get_disk_sum_by_host_and_project(context, - hostname, - proj_id) - - def instance_action_create(context, values): """Create an instance action from the values dictionary.""" return IMPL.instance_action_create(context, values) diff --git a/nova/db/sqlalchemy/api.py b/nova/db/sqlalchemy/api.py index 248a46f65..b63b84bed 100644 --- a/nova/db/sqlalchemy/api.py +++ b/nova/db/sqlalchemy/api.py @@ -495,16 +495,6 @@ def floating_ip_get_by_address(context, address, session=None): return result -@require_context -def floating_ip_update(context, address, values): - session = get_session() - with session.begin(): - floating_ip_ref = floating_ip_get_by_address(context, address, session) - for (key, value) in values.iteritems(): - floating_ip_ref[key] = value - floating_ip_ref.save(session=session) - - ################### @@ -868,7 +858,6 @@ def instance_update(context, instance_id, values): return instance_ref -@require_context def instance_add_security_group(context, instance_id, security_group_id): """Associate the given security group with the given instance""" session = get_session() @@ -881,59 +870,6 @@ def instance_add_security_group(context, instance_id, security_group_id): instance_ref.save(session=session) -@require_context -def instance_get_all_by_host(context, hostname): - session = get_session() - if not session: - session = get_session() - - result = session.query(models.Instance).\ - filter_by(host=hostname).\ - filter_by(deleted=can_read_deleted(context)).\ - all() - if not result: - return [] - return result - - -@require_context -def _instance_get_sum_by_host_and_project(context, column, hostname, proj_id): - session = get_session() - - result = session.query(models.Instance).\ - filter_by(host=hostname).\ - filter_by(project_id=proj_id).\ - filter_by(deleted=can_read_deleted(context)).\ - value(column) - if not result: - return 0 - return result - - -@require_context -def instance_get_vcpu_sum_by_host_and_project(context, hostname, proj_id): - return _instance_get_sum_by_host_and_project(context, - 'vcpus', - hostname, - proj_id) - - -@require_context -def instance_get_memory_sum_by_host_and_project(context, hostname, proj_id): - return _instance_get_sum_by_host_and_project(context, - 'memory_mb', - hostname, - proj_id) - - -@require_context -def instance_get_disk_sum_by_host_and_project(context, hostname, proj_id): - return _instance_get_sum_by_host_and_project(context, - 'local_gb', - hostname, - proj_id) - - @require_context def instance_action_create(context, values): """Create an instance action from the values dictionary.""" diff --git a/nova/db/sqlalchemy/models.py b/nova/db/sqlalchemy/models.py index b28c64b59..bf5e48b04 100644 --- a/nova/db/sqlalchemy/models.py +++ b/nova/db/sqlalchemy/models.py @@ -150,32 +150,13 @@ class Service(BASE, NovaBase): __tablename__ = 'services' id = Column(Integer, primary_key=True) - #host_id = Column(Integer, ForeignKey('hosts.id'), nullable=True) - #host = relationship(Host, backref=backref('services')) - host = Column(String(255)) + host = Column(String(255)) # , ForeignKey('hosts.id')) binary = Column(String(255)) topic = Column(String(255)) report_count = Column(Integer, nullable=False, default=0) disabled = Column(Boolean, default=False) availability_zone = Column(String(255), default='nova') - # The below items are compute node only. - # -1 or None is inserted for other service. - vcpus = Column(Integer, nullable=False, default=-1) - memory_mb = Column(Integer, nullable=False, default=-1) - local_gb = Column(Integer, nullable=False, default=-1) - hypervisor_type = Column(String(128)) - hypervisor_version = Column(Integer, nullable=False, default=-1) - # Note(masumotok): Expected Strings example: - # - # '{"arch":"x86_64", "model":"Nehalem", - # "topology":{"sockets":1, "threads":2, "cores":3}, - # features:[ "tdtscp", "xtpr"]}' - # - # Points are "json translatable" and it must have all - # dictionary keys above. - cpu_info = Column(String(512)) - class Certificate(BASE, NovaBase): """Represents a an x509 certificate""" @@ -250,9 +231,6 @@ class Instance(BASE, NovaBase): display_name = Column(String(255)) display_description = Column(String(255)) - # To remember on which host a instance booted. - # An instance may moved to other host by live migraiton. - launched_on = Column(String(255)) locked = Column(Boolean) # TODO(vish): see Ewan's email about state improvements, probably @@ -610,7 +588,7 @@ def register_models(): Volume, ExportDevice, IscsiTarget, FixedIp, FloatingIp, Network, SecurityGroup, SecurityGroupIngressRule, SecurityGroupInstanceAssociation, AuthToken, User, - Project, Certificate, ConsolePool, Console) # , Host, Image + Project, Certificate, ConsolePool, Console) # , Image, Host engine = create_engine(FLAGS.sql_connection, echo=False) for model in models: model.metadata.create_all(engine) diff --git a/nova/network/manager.py b/nova/network/manager.py index 932c77d31..2a043cc6b 100644 --- a/nova/network/manager.py +++ b/nova/network/manager.py @@ -159,7 +159,7 @@ class NetworkManager(manager.Manager): """Called when this host becomes the host for a network.""" raise NotImplementedError() - def setup_compute_network(self, context, instance_id, network_ref=None): + def setup_compute_network(self, context, instance_id): """Sets up matching network for compute hosts.""" raise NotImplementedError() @@ -320,7 +320,7 @@ class FlatManager(NetworkManager): self.db.fixed_ip_update(context, address, {'allocated': False}) self.db.fixed_ip_disassociate(context.elevated(), address) - def setup_compute_network(self, context, instance_id, network_ref=None): + def setup_compute_network(self, context, instance_id): """Network is created manually.""" pass @@ -395,10 +395,9 @@ class FlatDHCPManager(FlatManager): super(FlatDHCPManager, self).init_host() self.driver.metadata_forward() - def setup_compute_network(self, context, instance_id, network_ref=None): + def setup_compute_network(self, context, instance_id): """Sets up matching network for compute hosts.""" - if network_ref is None: - network_ref = db.network_get_by_instance(context, instance_id) + network_ref = db.network_get_by_instance(context, instance_id) self.driver.ensure_bridge(network_ref['bridge'], FLAGS.flat_interface) @@ -488,10 +487,9 @@ class VlanManager(NetworkManager): """Returns a fixed ip to the pool.""" self.db.fixed_ip_update(context, address, {'allocated': False}) - def setup_compute_network(self, context, instance_id, network_ref=None): + def setup_compute_network(self, context, instance_id): """Sets up matching network for compute hosts.""" - if network_ref is None: - network_ref = db.network_get_by_instance(context, instance_id) + network_ref = db.network_get_by_instance(context, instance_id) self.driver.ensure_vlan_bridge(network_ref['vlan'], network_ref['bridge']) diff --git a/nova/scheduler/driver.py b/nova/scheduler/driver.py index 65745093b..66e46c1b9 100644 --- a/nova/scheduler/driver.py +++ b/nova/scheduler/driver.py @@ -26,9 +26,6 @@ import datetime from nova import db from nova import exception from nova import flags -from nova import log as logging -from nova import rpc -from nova.compute import power_state FLAGS = flags.FLAGS flags.DEFINE_integer('service_down_time', 60, @@ -67,183 +64,3 @@ class Scheduler(object): def schedule(self, context, topic, *_args, **_kwargs): """Must override at least this method for scheduler to work.""" raise NotImplementedError(_("Must implement a fallback schedule")) - - def schedule_live_migration(self, context, instance_id, dest): - """ live migration method """ - - # Whether instance exists and running - instance_ref = db.instance_get(context, instance_id) - ec2_id = instance_ref['hostname'] - - # Checking instance. - self._live_migration_src_check(context, instance_ref) - - # Checking destination host. - self._live_migration_dest_check(context, instance_ref, dest) - - # Common checking. - self._live_migration_common_check(context, instance_ref, dest) - - # Changing instance_state. - db.instance_set_state(context, - instance_id, - power_state.PAUSED, - 'migrating') - - # Changing volume state - for v in instance_ref['volumes']: - db.volume_update(context, - v['id'], - {'status': 'migrating'}) - - # Return value is necessary to send request to src - # Check _schedule() in detail. - src = instance_ref['host'] - return src - - def _live_migration_src_check(self, context, instance_ref): - """Live migration check routine (for src host)""" - - # Checking instance is running. - if power_state.RUNNING != instance_ref['state'] or \ - 'running' != instance_ref['state_description']: - msg = _('Instance(%s) is not running') - ec2_id = instance_ref['hostname'] - raise exception.Invalid(msg % ec2_id) - - # Checing volume node is running when any volumes are mounted - # to the instance. - if len(instance_ref['volumes']) != 0: - services = db.service_get_all_by_topic(context, 'volume') - if len(services) < 1 or not self.service_is_up(services[0]): - msg = _('volume node is not alive(time synchronize problem?)') - raise exception.Invalid(msg) - - # Checking src host is alive. - src = instance_ref['host'] - services = db.service_get_all_by_topic(context, 'compute') - services = [service for service in services if service.host == src] - if len(services) < 1 or not self.service_is_up(services[0]): - msg = _('%s is not alive(time synchronize problem?)') - raise exception.Invalid(msg % src) - - def _live_migration_dest_check(self, context, instance_ref, dest): - """Live migration check routine (for destination host)""" - - # Checking dest exists and compute node. - dservice_refs = db.service_get_all_by_host(context, dest) - if len(dservice_refs) <= 0: - msg = _('%s does not exists.') - raise exception.Invalid(msg % dest) - - dservice_ref = dservice_refs[0] - if dservice_ref['topic'] != 'compute': - msg = _('%s must be compute node') - raise exception.Invalid(msg % dest) - - # Checking dest host is alive. - if not self.service_is_up(dservice_ref): - msg = _('%s is not alive(time synchronize problem?)') - raise exception.Invalid(msg % dest) - - # Checking whether The host where instance is running - # and dest is not same. - src = instance_ref['host'] - if dest == src: - ec2_id = instance_ref['hostname'] - msg = _('%s is where %s is running now. choose other host.') - raise exception.Invalid(msg % (dest, ec2_id)) - - # Checking dst host still has enough capacities. - self.has_enough_resource(context, instance_ref, dest) - - def _live_migration_common_check(self, context, instance_ref, dest): - """ - Live migration check routine. - Below pre-checkings are followed by - http://wiki.libvirt.org/page/TodoPreMigrationChecks - - """ - - # Checking dest exists. - dservice_refs = db.service_get_all_by_host(context, dest) - if len(dservice_refs) <= 0: - msg = _('%s does not exists.') - raise exception.Invalid(msg % dest) - dservice_ref = dservice_refs[0] - - # Checking original host( where instance was launched at) exists. - orighost = instance_ref['launched_on'] - oservice_refs = db.service_get_all_by_host(context, orighost) - if len(oservice_refs) <= 0: - msg = _('%s(where instance was launched at) does not exists.') - raise exception.Invalid(msg % orighost) - oservice_ref = oservice_refs[0] - - # Checking hypervisor is same. - otype = oservice_ref['hypervisor_type'] - dtype = dservice_ref['hypervisor_type'] - if otype != dtype: - msg = _('Different hypervisor type(%s->%s)') - raise exception.Invalid(msg % (otype, dtype)) - - # Checkng hypervisor version. - oversion = oservice_ref['hypervisor_version'] - dversion = dservice_ref['hypervisor_version'] - if oversion > dversion: - msg = _('Older hypervisor version(%s->%s)') - raise exception.Invalid(msg % (oversion, dversion)) - - # Checking cpuinfo. - cpu_info = oservice_ref['cpu_info'] - try: - rpc.call(context, - db.queue_get_for(context, FLAGS.compute_topic, dest), - {"method": 'compare_cpu', - "args": {'cpu_info': cpu_info}}) - - except rpc.RemoteError, e: - msg = _(("""%s doesnt have compatibility to %s""" - """(where %s was launched at)""")) - ec2_id = instance_ref['hostname'] - src = instance_ref['host'] - logging.error(msg % (dest, src, ec2_id)) - raise e - - def has_enough_resource(self, context, instance_ref, dest): - """ Check if destination host has enough resource for live migration""" - - # Getting instance information - ec2_id = instance_ref['hostname'] - vcpus = instance_ref['vcpus'] - mem = instance_ref['memory_mb'] - hdd = instance_ref['local_gb'] - - # Gettin host information - service_refs = db.service_get_all_by_host(context, dest) - if len(service_refs) <= 0: - msg = _('%s does not exists.') - raise exception.Invalid(msg % dest) - service_ref = service_refs[0] - - total_cpu = int(service_ref['vcpus']) - total_mem = int(service_ref['memory_mb']) - total_hdd = int(service_ref['local_gb']) - - instances_ref = db.instance_get_all_by_host(context, dest) - for i_ref in instances_ref: - total_cpu -= int(i_ref['vcpus']) - total_mem -= int(i_ref['memory_mb']) - total_hdd -= int(i_ref['local_gb']) - - # Checking host has enough information - logging.debug('host(%s) remains vcpu:%s mem:%s hdd:%s,' % - (dest, total_cpu, total_mem, total_hdd)) - logging.debug('instance(%s) has vcpu:%s mem:%s hdd:%s,' % - (ec2_id, vcpus, mem, hdd)) - - if total_cpu <= vcpus or total_mem <= mem or total_hdd <= hdd: - msg = '%s doesnt have enough resource for %s' % (dest, ec2_id) - raise exception.NotEmpty(msg) - - logging.debug(_('%s has_enough_resource() for %s') % (dest, ec2_id)) diff --git a/nova/scheduler/manager.py b/nova/scheduler/manager.py index 1cc767a03..a4d6dd574 100644 --- a/nova/scheduler/manager.py +++ b/nova/scheduler/manager.py @@ -29,7 +29,6 @@ from nova import log as logging from nova import manager from nova import rpc from nova import utils -from nova import exception LOG = logging.getLogger('nova.scheduler.manager') FLAGS = flags.FLAGS @@ -68,50 +67,3 @@ class SchedulerManager(manager.Manager): {"method": method, "args": kwargs}) LOG.debug(_("Casting to %s %s for %s"), topic, host, method) - - # NOTE (masumotok) : This method should be moved to nova.api.ec2.admin. - # Based on bear design summit discussion, - # just put this here for bexar release. - def show_host_resource(self, context, host, *args): - """ show the physical/usage resource given by hosts.""" - - services = db.service_get_all_by_host(context, host) - if len(services) == 0: - return {'ret': False, 'msg': 'No such Host'} - - compute = [s for s in services if s['topic'] == 'compute'] - if 0 == len(compute): - service_ref = services[0] - else: - service_ref = compute[0] - - # Getting physical resource information - h_resource = {'vcpus': service_ref['vcpus'], - 'memory_mb': service_ref['memory_mb'], - 'local_gb': service_ref['local_gb']} - - # Getting usage resource information - u_resource = {} - instances_ref = db.instance_get_all_by_host(context, - service_ref['host']) - - if 0 == len(instances_ref): - return {'ret': True, 'phy_resource': h_resource, 'usage': {}} - - project_ids = [i['project_id'] for i in instances_ref] - project_ids = list(set(project_ids)) - for p_id in project_ids: - vcpus = db.instance_get_vcpu_sum_by_host_and_project(context, - host, - p_id) - mem = db.instance_get_memory_sum_by_host_and_project(context, - host, - p_id) - hdd = db.instance_get_disk_sum_by_host_and_project(context, - host, - p_id) - u_resource[p_id] = {'vcpus': vcpus, - 'memory_mb': mem, - 'local_gb': hdd} - - return {'ret': True, 'phy_resource': h_resource, 'usage': u_resource} diff --git a/nova/service.py b/nova/service.py index a8d52e93b..8b2a22ce0 100644 --- a/nova/service.py +++ b/nova/service.py @@ -80,7 +80,6 @@ class Service(object): self.manager.init_host() self.model_disconnected = False ctxt = context.get_admin_context() - try: service_ref = db.service_get_by_args(ctxt, self.host, @@ -89,9 +88,6 @@ class Service(object): except exception.NotFound: self._create_service_ref(ctxt) - if 'nova-compute' == self.binary: - self.manager.update_service(ctxt, self.host, self.binary) - conn1 = rpc.Connection.instance(new=True) conn2 = rpc.Connection.instance(new=True) if self.report_interval: diff --git a/nova/virt/cpuinfo.xml.template b/nova/virt/cpuinfo.xml.template deleted file mode 100644 index 48842b29d..000000000 --- a/nova/virt/cpuinfo.xml.template +++ /dev/null @@ -1,9 +0,0 @@ - - $arch - $model - $vendor - -#for $var in $features - -#end for - diff --git a/nova/virt/fake.py b/nova/virt/fake.py index 80ae7f34c..a57a8f43b 100644 --- a/nova/virt/fake.py +++ b/nova/virt/fake.py @@ -310,38 +310,6 @@ class FakeConnection(object): 'username': 'fakeuser', 'password': 'fakepassword'} - def get_cpu_info(self): - """This method is supported only libvirt. """ - return - - def get_vcpu_number(self): - """This method is supported only libvirt. """ - return -1 - - def get_memory_mb(self): - """This method is supported only libvirt..""" - return -1 - - def get_local_gb(self): - """This method is supported only libvirt..""" - return -1 - - def get_hypervisor_type(self): - """This method is supported only libvirt..""" - return - - def get_hypervisor_version(self): - """This method is supported only libvirt..""" - return -1 - - def compare_cpu(self, xml): - """This method is supported only libvirt..""" - raise NotImplementedError('This method is supported only libvirt.') - - def live_migration(self, context, instance_ref, dest): - """This method is supported only libvirt..""" - raise NotImplementedError('This method is supported only libvirt.') - class FakeInstance(object): diff --git a/nova/virt/libvirt_conn.py b/nova/virt/libvirt_conn.py index 7d1f76b32..f38af5ed8 100644 --- a/nova/virt/libvirt_conn.py +++ b/nova/virt/libvirt_conn.py @@ -36,11 +36,8 @@ Supports KVM, QEMU, UML, and XEN. """ -import json import os import shutil -import re -import time import random import subprocess import uuid @@ -83,9 +80,6 @@ flags.DEFINE_string('injected_network_template', flags.DEFINE_string('libvirt_xml_template', utils.abspath('virt/libvirt.xml.template'), 'Libvirt XML Template') -flags.DEFINE_string('cpuinfo_xml_template', - utils.abspath('virt/cpuinfo.xml.template'), - 'CpuInfo XML Template (used only live migration now)') flags.DEFINE_string('libvirt_type', 'kvm', 'Libvirt domain type (valid options are: ' @@ -94,16 +88,6 @@ flags.DEFINE_string('libvirt_uri', '', 'Override the default libvirt URI (which is dependent' ' on libvirt_type)') -flags.DEFINE_string('live_migration_uri', - "qemu+tcp://%s/system", - 'Define protocol used by live_migration feature') -flags.DEFINE_string('live_migration_flag', - "VIR_MIGRATE_UNDEFINE_SOURCE, VIR_MIGRATE_PEER2PEER", - 'Define live migration behavior.') -flags.DEFINE_integer('live_migration_bandwidth', 0, - 'Define live migration behavior') -flags.DEFINE_string('live_migration_timeout_sec', 10, - 'Timeout second for pre_live_migration is completed.') flags.DEFINE_bool('allow_project_net_traffic', True, 'Whether to allow in project network traffic') @@ -162,7 +146,6 @@ class LibvirtConnection(object): self.libvirt_uri = self.get_uri() self.libvirt_xml = open(FLAGS.libvirt_xml_template).read() - self.cpuinfo_xml = open(FLAGS.cpuinfo_xml_template).read() self._wrapped_conn = None self.read_only = read_only @@ -835,74 +818,6 @@ class LibvirtConnection(object): return interfaces - def get_vcpu_number(self): - """ Get vcpu number of physical computer. """ - return self._conn.getMaxVcpus(None) - - def get_memory_mb(self): - """Get the memory size of physical computer .""" - meminfo = open('/proc/meminfo').read().split() - idx = meminfo.index('MemTotal:') - # transforming kb to mb. - return int(meminfo[idx + 1]) / 1024 - - def get_local_gb(self): - """Get the hdd size of physical computer .""" - hddinfo = os.statvfs(FLAGS.instances_path) - return hddinfo.f_bsize * hddinfo.f_blocks / 1024 / 1024 / 1024 - - def get_hypervisor_type(self): - """ Get hypervisor type """ - return self._conn.getType() - - def get_hypervisor_version(self): - """ Get hypervisor version """ - return self._conn.getVersion() - - def get_cpu_info(self): - """ Get cpuinfo information """ - xmlstr = self._conn.getCapabilities() - xml = libxml2.parseDoc(xmlstr) - nodes = xml.xpathEval('//cpu') - if len(nodes) != 1: - msg = 'Unexpected xml format. tag "cpu" must be 1, but %d.' \ - % len(nodes) - msg += '\n' + xml.serialize() - raise exception.Invalid(_(msg)) - - arch = xml.xpathEval('//cpu/arch')[0].getContent() - model = xml.xpathEval('//cpu/model')[0].getContent() - vendor = xml.xpathEval('//cpu/vendor')[0].getContent() - - topology_node = xml.xpathEval('//cpu/topology')[0].get_properties() - topology = dict() - while topology_node != None: - name = topology_node.get_name() - topology[name] = topology_node.getContent() - topology_node = topology_node.get_next() - - keys = ['cores', 'sockets', 'threads'] - tkeys = topology.keys() - if list(set(tkeys)) != list(set(keys)): - msg = _('Invalid xml: topology(%s) must have %s') - raise exception.Invalid(msg % (str(topology), ', '.join(keys))) - - feature_nodes = xml.xpathEval('//cpu/feature') - features = list() - for nodes in feature_nodes: - feature_name = nodes.get_properties().getContent() - features.append(feature_name) - - template = ("""{"arch":"%s", "model":"%s", "vendor":"%s", """ - """"topology":{"cores":"%s", "threads":"%s", """ - """"sockets":"%s"}, "features":[%s]}""") - c = topology['cores'] - s = topology['sockets'] - t = topology['threads'] - f = ['"%s"' % x for x in features] - cpu_info = template % (arch, model, vendor, c, s, t, ', '.join(f)) - return cpu_info - def block_stats(self, instance_name, disk): """ Note that this function takes an instance name, not an Instance, so @@ -933,208 +848,6 @@ class LibvirtConnection(object): def refresh_security_group_members(self, security_group_id): self.firewall_driver.refresh_security_group_members(security_group_id) - def compare_cpu(self, cpu_info): - """ - Check the host cpu is compatible to a cpu given by xml. - "xml" must be a part of libvirt.openReadonly().getCapabilities(). - return values follows by virCPUCompareResult. - if 0 > return value, do live migration. - - 'http://libvirt.org/html/libvirt-libvirt.html#virCPUCompareResult' - """ - msg = _('Checking cpu_info: instance was launched this cpu.\n: %s ') - LOG.info(msg % cpu_info) - dic = json.loads(cpu_info) - xml = str(Template(self.cpuinfo_xml, searchList=dic)) - msg = _('to xml...\n: %s ') - LOG.info(msg % xml) - - url = 'http://libvirt.org/html/libvirt-libvirt.html' - url += '#virCPUCompareResult\n' - msg = 'CPU does not have compativility.\n' - msg += 'result:%d \n' - msg += 'Refer to %s' - msg = _(msg) - - # unknown character exists in xml, then libvirt complains - try: - ret = self._conn.compareCPU(xml, 0) - except libvirt.libvirtError, e: - LOG.error(msg % (ret, url)) - raise e - - if ret <= 0: - raise exception.Invalid(msg % (ret, url)) - - return - - def ensure_filtering_rules_for_instance(self, instance_ref): - """ Setting up inevitable filtering rules on compute node, - and waiting for its completion. - To migrate an instance, filtering rules to hypervisors - and firewalls are inevitable on destination host. - ( Waiting only for filterling rules to hypervisor, - since filtering rules to firewall rules can be set faster). - - Concretely, the below method must be called. - - setup_basic_filtering (for nova-basic, etc.) - - prepare_instance_filter(for nova-instance-instance-xxx, etc.) - - to_xml may have to be called since it defines PROJNET, PROJMASK. - but libvirt migrates those value through migrateToURI(), - so , no need to be called. - - Don't use thread for this method since migration should - not be started when setting-up filtering rules operations - are not completed.""" - - # Tf any instances never launch at destination host, - # basic-filtering must be set here. - self.nwfilter.setup_basic_filtering(instance_ref) - # setting up n)ova-instance-instance-xx mainly. - self.firewall_driver.prepare_instance_filter(instance_ref) - - # wait for completion - timeout_count = range(FLAGS.live_migration_timeout_sec * 2) - while len(timeout_count) != 0: - try: - filter_name = 'nova-instance-%s' % instance_ref.name - self._conn.nwfilterLookupByName(filter_name) - break - except libvirt.libvirtError: - timeout_count.pop() - if len(timeout_count) == 0: - ec2_id = instance_ref['hostname'] - msg = _('Timeout migrating for %s(%s)') - raise exception.Error(msg % (ec2_id, instance_ref.name)) - time.sleep(0.5) - - def live_migration(self, context, instance_ref, dest): - """ - Just spawning live_migration operation for - distributing high-load. - """ - greenthread.spawn(self._live_migration, context, instance_ref, dest) - - def _live_migration(self, context, instance_ref, dest): - """ Do live migration.""" - - # Do live migration. - try: - duri = FLAGS.live_migration_uri % dest - - flaglist = FLAGS.live_migration_flag.split(',') - flagvals = [getattr(libvirt, x.strip()) for x in flaglist] - logical_sum = reduce(lambda x, y: x | y, flagvals) - - bandwidth = FLAGS.live_migration_bandwidth - - if self.read_only: - tmpconn = self._connect(self.libvirt_uri, False) - dom = tmpconn.lookupByName(instance_ref.name) - dom.migrateToURI(duri, logical_sum, None, bandwidth) - tmpconn.close() - else: - dom = self._conn.lookupByName(instance_ref.name) - dom.migrateToURI(duri, logical_sum, None, bandwidth) - - except Exception, e: - id = instance_ref['id'] - db.instance_set_state(context, id, power_state.RUNNING, 'running') - for v in instance_ref['volumes']: - db.volume_update(context, - v['id'], - {'status': 'in-use'}) - - raise e - - # Waiting for completion of live_migration. - timer = utils.LoopingCall(f=None) - - def wait_for_live_migration(): - - try: - state = self.get_info(instance_ref.name)['state'] - except exception.NotFound: - timer.stop() - self._post_live_migration(context, instance_ref, dest) - - timer.f = wait_for_live_migration - timer.start(interval=0.5, now=True) - - def _post_live_migration(self, context, instance_ref, dest): - """ - Post operations for live migration. - Mainly, database updating. - """ - LOG.info('post livemigration operation is started..') - # Detaching volumes. - # (not necessary in current version ) - - # Releasing vlan. - # (not necessary in current implementation?) - - # Releasing security group ingress rule. - if FLAGS.firewall_driver == \ - 'nova.virt.libvirt_conn.IptablesFirewallDriver': - try: - self.firewall_driver.unfilter_instance(instance_ref) - except KeyError, e: - pass - - # Database updating. - ec2_id = instance_ref['hostname'] - - instance_id = instance_ref['id'] - fixed_ip = db.instance_get_fixed_address(context, instance_id) - # Not return if fixed_ip is not found, otherwise, - # instance never be accessible.. - if None == fixed_ip: - logging.warn('fixed_ip is not found for %s ' % ec2_id) - db.fixed_ip_update(context, fixed_ip, {'host': dest}) - network_ref = db.fixed_ip_get_network(context, fixed_ip) - db.network_update(context, network_ref['id'], {'host': dest}) - - try: - floating_ip \ - = db.instance_get_floating_address(context, instance_id) - # Not return if floating_ip is not found, otherwise, - # instance never be accessible.. - if None == floating_ip: - logging.error('floating_ip is not found for %s ' % ec2_id) - else: - floating_ip_ref = db.floating_ip_get_by_address(context, - floating_ip) - db.floating_ip_update(context, - floating_ip_ref['address'], - {'host': dest}) - except exception.NotFound: - logging.debug('%s doesnt have floating_ip.. ' % ec2_id) - except: - msg = 'Live migration: Unexpected error:' - msg += '%s cannot inherit floating ip.. ' % ec2_id - logging.error(_(msg)) - - # Restore instance/volume state - db.instance_update(context, - instance_id, - {'state_description': 'running', - 'state': power_state.RUNNING, - 'host': dest}) - - for v in instance_ref['volumes']: - db.volume_update(context, - v['id'], - {'status': 'in-use'}) - - logging.info(_('Live migrating %s to %s finishes successfully') - % (ec2_id, dest)) - msg = _(("""Known error: the below error is nomally occurs.\n""" - """Just check if iinstance is successfully migrated.\n""" - """libvir: QEMU error : Domain not found: no domain """ - """with matching name..""")) - logging.info(msg) - class FirewallDriver(object): def prepare_instance_filter(self, instance): diff --git a/nova/virt/xenapi_conn.py b/nova/virt/xenapi_conn.py index c10f73fe7..c98310dbc 100644 --- a/nova/virt/xenapi_conn.py +++ b/nova/virt/xenapi_conn.py @@ -209,36 +209,6 @@ class XenAPIConnection(object): 'username': FLAGS.xenapi_connection_username, 'password': FLAGS.xenapi_connection_password} - def get_cpu_info(self): - """This method is supported only libvirt. """ - return - - def get_vcpu_number(self): - """This method is supported only libvirt. """ - return -1 - - def get_memory_mb(self): - """This method is supported only libvirt..""" - return -1 - - def get_local_gb(self): - """This method is supported only libvirt..""" - return -1 - - def get_hypervisor_type(self): - """This method is supported only libvirt..""" - return - - def get_hypervisor_version(self): - """This method is supported only libvirt..""" - return -1 - - def compare_cpu(self, xml): - raise NotImplementedError('This method is supported only libvirt.') - - def live_migration(self, context, instance_ref, dest): - raise NotImplementedError('This method is supported only libvirt.') - class XenAPISession(object): """The session to invoke XenAPI SDK calls""" diff --git a/nova/volume/driver.py b/nova/volume/driver.py index cc8809969..71fe18a40 100644 --- a/nova/volume/driver.py +++ b/nova/volume/driver.py @@ -122,7 +122,7 @@ class VolumeDriver(object): """Removes an export for a logical volume.""" raise NotImplementedError() - def discover_volume(self, _context, volume): + def discover_volume(self, volume): """Discover volume on a remote host.""" raise NotImplementedError() @@ -184,35 +184,15 @@ class AOEDriver(VolumeDriver): self._try_execute("sudo vblade-persist destroy %s %s" % (shelf_id, blade_id)) - def discover_volume(self, context, volume): + def discover_volume(self, _volume): """Discover volume on a remote host.""" self._execute("sudo aoe-discover") self._execute("sudo aoe-stat", check_exit_code=False) - shelf_id, blade_id = self.db.volume_get_shelf_and_blade(context, - volume['id']) - return "/dev/etherd/e%s.%s" % (shelf_id, blade_id) def undiscover_volume(self, _volume): """Undiscover volume on a remote host.""" pass - def check_for_export(self, context, volume_id): - """Make sure whether volume is exported.""" - (shelf_id, - blade_id) = self.db.volume_get_shelf_and_blade(context, - volume_id) - (out, _err) = self._execute("sudo vblade-persist ls --no-header") - exists = False - for line in out.split('\n'): - param = line.split(' ') - if len(param) == 6 and param[0] == str(shelf_id) \ - and param[1] == str(blade_id) and param[-1] == "run": - exists = True - break - if not exists: - logging.warning(_("vblade process for e%s.%s isn't running.") - % (shelf_id, blade_id)) - class FakeAOEDriver(AOEDriver): """Logs calls instead of executing.""" @@ -296,7 +276,7 @@ class ISCSIDriver(VolumeDriver): iscsi_portal = location.split(",")[0] return (iscsi_name, iscsi_portal) - def discover_volume(self, _context, volume): + def discover_volume(self, volume): """Discover volume on a remote host.""" iscsi_name, iscsi_portal = self._get_name_and_portal(volume['name'], volume['host']) @@ -384,7 +364,7 @@ class RBDDriver(VolumeDriver): """Removes an export for a logical volume""" pass - def discover_volume(self, _context, volume): + def discover_volume(self, volume): """Discover volume on a remote host""" return "rbd:%s/%s" % (FLAGS.rbd_pool, volume['name']) @@ -433,7 +413,7 @@ class SheepdogDriver(VolumeDriver): """Removes an export for a logical volume""" pass - def discover_volume(self, _context, volume): + def discover_volume(self, volume): """Discover volume on a remote host""" return "sheepdog:%s" % volume['name'] diff --git a/nova/volume/manager.py b/nova/volume/manager.py index 1735d79eb..6348539c5 100644 --- a/nova/volume/manager.py +++ b/nova/volume/manager.py @@ -138,7 +138,7 @@ class VolumeManager(manager.Manager): if volume_ref['host'] == self.host and FLAGS.use_local_volumes: path = self.driver.local_path(volume_ref) else: - path = self.driver.discover_volume(context, volume_ref) + path = self.driver.discover_volume(volume_ref) return path def remove_compute_volume(self, context, volume_id): @@ -149,10 +149,3 @@ class VolumeManager(manager.Manager): return True else: self.driver.undiscover_volume(volume_ref) - - def check_for_export(self, context, instance_id): - """Make sure whether volume is exported.""" - if FLAGS.volume_driver == 'nova.volume.driver.AOEDriver': - instance_ref = self.db.instance_get(instance_id) - for v in instance_ref['volumes']: - self.driver.check_for_export(context, v['id']) diff --git a/setup.py b/setup.py index a20802e8b..3608ff805 100644 --- a/setup.py +++ b/setup.py @@ -34,7 +34,6 @@ if os.path.isdir('.bzr'): version_file.write(vcsversion) - class local_BuildDoc(BuildDoc): def run(self): for builder in ['html', 'man']: -- cgit From 18773e2aa53329511649fed5cdf37f5a80008f5c Mon Sep 17 00:00:00 2001 From: Andy Smith Date: Tue, 18 Jan 2011 11:34:29 -0800 Subject: authors needed for test --- .mailmap | 2 ++ Authors | 2 ++ 2 files changed, 4 insertions(+) diff --git a/.mailmap b/.mailmap index 2af2d7cd9..d13219ab0 100644 --- a/.mailmap +++ b/.mailmap @@ -16,6 +16,8 @@ + + Masumoto diff --git a/Authors b/Authors index bcb2cd0fb..82e07a6b5 100644 --- a/Authors +++ b/Authors @@ -26,6 +26,7 @@ Josh Durgin Josh Kearney Joshua McKenty Justin Santa Barbara +Kei Masumoto Ken Pepple Koji Iida Lorin Hochstein @@ -34,6 +35,7 @@ Michael Gundlach Monsyne Dragon Monty Taylor MORITA Kazutaka +Muneyuki Noguchi Nachi Ueno Paul Voccio Rick Clark -- cgit From 6bef91e9fe67a893bd2a1ba0ef8ef56d4b2bb72d Mon Sep 17 00:00:00 2001 From: Vishvananda Ishaya Date: Tue, 18 Jan 2011 12:42:20 -0800 Subject: fix test to respect xml changes --- nova/tests/test_virt.py | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) diff --git a/nova/tests/test_virt.py b/nova/tests/test_virt.py index afdc89ba2..556fe561c 100644 --- a/nova/tests/test_virt.py +++ b/nova/tests/test_virt.py @@ -122,10 +122,10 @@ class LibvirtConnTestCase(test.TestCase): if rescue: check = (lambda t: t.find('./os/kernel').text.split('/')[1], - 'rescue-kernel') + 'kernel.rescue') check_list.append(check) check = (lambda t: t.find('./os/initrd').text.split('/')[1], - 'rescue-ramdisk') + 'ramdisk.rescue') check_list.append(check) else: if expect_kernel: @@ -161,13 +161,16 @@ class LibvirtConnTestCase(test.TestCase): if rescue: common_checks += [ (lambda t: t.findall('./devices/disk/source')[0].get( - 'file').split('/')[1], 'rescue-disk'), + 'file').split('/')[1], 'disk.rescue'), (lambda t: t.findall('./devices/disk/source')[1].get( 'file').split('/')[1], 'disk')] else: common_checks += [(lambda t: t.findall( './devices/disk/source')[0].get('file').split('/')[1], 'disk')] + common_checks += [(lambda t: t.findall( + './devices/disk/source')[1].get('file').split('/')[1], + 'disk.local')] for (libvirt_type, (expected_uri, checks)) in type_uri_map.iteritems(): FLAGS.libvirt_type = libvirt_type -- cgit From 7e6e3c3895d050dad81c4789bd296d2b910c7c6c Mon Sep 17 00:00:00 2001 From: Vishvananda Ishaya Date: Tue, 18 Jan 2011 14:07:03 -0800 Subject: disable ipv6 by default --- nova/network/manager.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/nova/network/manager.py b/nova/network/manager.py index 932c77d31..1b353a0d1 100644 --- a/nova/network/manager.py +++ b/nova/network/manager.py @@ -92,7 +92,7 @@ flags.DEFINE_bool('update_dhcp_on_disassociate', False, flags.DEFINE_integer('fixed_ip_disassociate_timeout', 600, 'Seconds after which a deallocated ip is disassociated') -flags.DEFINE_bool('use_ipv6', True, +flags.DEFINE_bool('use_ipv6', False, 'use the ipv6') flags.DEFINE_string('network_host', socket.gethostname(), 'Network host to use for ip allocation in flat modes') -- cgit From b6ad755dcb495743c9137b0da441d264420b02b6 Mon Sep 17 00:00:00 2001 From: Soren Hansen Date: Wed, 19 Jan 2011 00:31:51 +0100 Subject: Add an apply_instance_filter method to NWFilter driver. Adjust unit tests for both firewall drivers to actually exercise these code paths. --- nova/tests/test_virt.py | 53 ++++++++++++++++++++++++++++++++--------------- nova/virt/libvirt_conn.py | 4 ++++ 2 files changed, 40 insertions(+), 17 deletions(-) diff --git a/nova/tests/test_virt.py b/nova/tests/test_virt.py index 556fe561c..39dc91df8 100644 --- a/nova/tests/test_virt.py +++ b/nova/tests/test_virt.py @@ -228,12 +228,6 @@ class IptablesFirewallTestCase(test.TestCase): self.manager.delete_user(self.user) super(IptablesFirewallTestCase, self).tearDown() - def _p(self, *args, **kwargs): - if 'iptables-restore' in args: - print ' '.join(args), kwargs['stdin'] - if 'iptables-save' in args: - return - in_rules = [ '# Generated by iptables-save v1.4.4 on Mon Dec 6 11:54:13 2010', '*filter', @@ -255,11 +249,21 @@ class IptablesFirewallTestCase(test.TestCase): '# Completed on Mon Dec 6 11:54:13 2010', ] + in6_rules = [ + '# Generated by ip6tables-save v1.4.4 on Tue Jan 18 23:47:56 2011', + '*filter', + ':INPUT ACCEPT [349155:75810423]', + ':FORWARD ACCEPT [0:0]', + ':OUTPUT ACCEPT [349256:75777230]', + 'COMMIT', + '# Completed on Tue Jan 18 23:47:56 2011' + ] + def test_static_filters(self): - self.fw.execute = self._p instance_ref = db.instance_create(self.context, {'user_id': 'fake', - 'project_id': 'fake'}) + 'project_id': 'fake', + 'mac_address': '56:12:12:12:12:12'}) ip = '10.11.12.13' network_ref = db.project_get_network(self.context, @@ -304,18 +308,31 @@ class IptablesFirewallTestCase(test.TestCase): secgroup['id']) instance_ref = db.instance_get(admin_ctxt, instance_ref['id']) - self.fw.add_instance(instance_ref) - - out_rules = self.fw.modify_rules(self.in_rules) +# self.fw.add_instance(instance_ref) + def fake_iptables_execute(cmd, process_input=None): + if cmd == 'sudo ip6tables-save -t filter': + return '\n'.join(self.in6_rules), None + if cmd == 'sudo iptables-save -t filter': + return '\n'.join(self.in_rules), None + if cmd == 'sudo iptables-restore': + self.out_rules = process_input.split('\n') + return '', '' + if cmd == 'sudo ip6tables-restore': + self.out6_rules = process_input.split('\n') + return '', '' + self.fw.execute = fake_iptables_execute + + self.fw.prepare_instance_filter(instance_ref) + self.fw.apply_instance_filter(instance_ref) in_rules = filter(lambda l: not l.startswith('#'), self.in_rules) for rule in in_rules: if not 'nova' in rule: - self.assertTrue(rule in out_rules, + self.assertTrue(rule in self.out_rules, 'Rule went missing: %s' % rule) instance_chain = None - for rule in out_rules: + for rule in self.out_rules: # This is pretty crude, but it'll do for now if '-d 10.11.12.13 -j' in rule: instance_chain = rule.split(' ')[-1] @@ -323,7 +340,7 @@ class IptablesFirewallTestCase(test.TestCase): self.assertTrue(instance_chain, "The instance chain wasn't added") security_group_chain = None - for rule in out_rules: + for rule in self.out_rules: # This is pretty crude, but it'll do for now if '-A %s -j' % instance_chain in rule: security_group_chain = rule.split(' ')[-1] @@ -332,16 +349,16 @@ class IptablesFirewallTestCase(test.TestCase): "The security group chain wasn't added") self.assertTrue('-A %s -p icmp -s 192.168.11.0/24 -j ACCEPT' % \ - security_group_chain in out_rules, + security_group_chain in self.out_rules, "ICMP acceptance rule wasn't added") self.assertTrue('-A %s -p icmp -s 192.168.11.0/24 -m icmp --icmp-type' - ' 8 -j ACCEPT' % security_group_chain in out_rules, + ' 8 -j ACCEPT' % security_group_chain in self.out_rules, "ICMP Echo Request acceptance rule wasn't added") self.assertTrue('-A %s -p tcp -s 192.168.10.0/24 -m multiport ' '--dports 80:81 -j ACCEPT' % security_group_chain \ - in out_rules, + in self.out_rules, "TCP port 80/81 acceptance rule wasn't added") @@ -476,5 +493,7 @@ class NWFilterTestCase(test.TestCase): self.fw.setup_basic_filtering(instance) self.fw.prepare_instance_filter(instance) + self.fw.apply_instance_filter(instance) _ensure_all_called() self.teardown_security_group() + diff --git a/nova/virt/libvirt_conn.py b/nova/virt/libvirt_conn.py index ab4ceb6a4..03bc9e94e 100644 --- a/nova/virt/libvirt_conn.py +++ b/nova/virt/libvirt_conn.py @@ -1408,6 +1408,10 @@ class NWFilterFirewall(FirewallDriver): return + def apply_instance_filter(self, instance): + """No-op. Everything is done in prepare_instance_filter""" + pass + def refresh_security_group_rules(self, security_group_id): return self._define_filter( self.security_group_to_nwfilter_xml(security_group_id)) -- cgit From 87cca15270e29666cf46adb5ea72e103c284d525 Mon Sep 17 00:00:00 2001 From: Soren Hansen Date: Wed, 19 Jan 2011 00:39:24 +0100 Subject: Enable the use_ipv6 flag in unit tests by default. --- nova/tests/fake_flags.py | 1 + 1 file changed, 1 insertion(+) diff --git a/nova/tests/fake_flags.py b/nova/tests/fake_flags.py index 7376a11dd..1097488ec 100644 --- a/nova/tests/fake_flags.py +++ b/nova/tests/fake_flags.py @@ -40,3 +40,4 @@ FLAGS.blades_per_shelf = 4 FLAGS.iscsi_num_targets = 8 FLAGS.verbose = True FLAGS.sql_connection = 'sqlite:///nova.sqlite' +FLAGS.use_ipv6 = True -- cgit From e2f11223e7f8d09ed91636d06184180773195a19 Mon Sep 17 00:00:00 2001 From: Andy Smith Date: Tue, 18 Jan 2011 15:59:02 -0800 Subject: add two more columns, set string lengths) --- .../sqlalchemy/migrate_repo/versions/001_austin.py | 148 ++++++++++----------- .../sqlalchemy/migrate_repo/versions/002_bexar.py | 60 ++++++--- 2 files changed, 114 insertions(+), 94 deletions(-) diff --git a/nova/db/sqlalchemy/migrate_repo/versions/001_austin.py b/nova/db/sqlalchemy/migrate_repo/versions/001_austin.py index 8a60bd890..a312a7190 100644 --- a/nova/db/sqlalchemy/migrate_repo/versions/001_austin.py +++ b/nova/db/sqlalchemy/migrate_repo/versions/001_austin.py @@ -32,19 +32,19 @@ auth_tokens = Table('auth_tokens', meta, Column('deleted_at', DateTime(timezone=False)), Column('deleted', Boolean(create_constraint=True, name=None)), Column('token_hash', - String(length=None, convert_unicode=False, assert_unicode=None, + String(length=255, convert_unicode=False, assert_unicode=None, unicode_error=None, _warn_on_bytestring=False), primary_key=True, nullable=False), Column('user_id', Integer()), Column('server_manageent_url', - String(length=None, convert_unicode=False, assert_unicode=None, + String(length=255, convert_unicode=False, assert_unicode=None, unicode_error=None, _warn_on_bytestring=False)), Column('storage_url', - String(length=None, convert_unicode=False, assert_unicode=None, + String(length=255, convert_unicode=False, assert_unicode=None, unicode_error=None, _warn_on_bytestring=False)), Column('cdn_management_url', - String(length=None, convert_unicode=False, assert_unicode=None, + String(length=255, convert_unicode=False, assert_unicode=None, unicode_error=None, _warn_on_bytestring=False)), ) @@ -71,7 +71,7 @@ fixed_ips = Table('fixed_ips', meta, Column('deleted', Boolean(create_constraint=True, name=None)), Column('id', Integer(), primary_key=True, nullable=False), Column('address', - String(length=None, convert_unicode=False, assert_unicode=None, + String(length=255, convert_unicode=False, assert_unicode=None, unicode_error=None, _warn_on_bytestring=False)), Column('network_id', Integer(), @@ -94,17 +94,17 @@ floating_ips = Table('floating_ips', meta, Column('deleted', Boolean(create_constraint=True, name=None)), Column('id', Integer(), primary_key=True, nullable=False), Column('address', - String(length=None, convert_unicode=False, assert_unicode=None, + String(length=255, convert_unicode=False, assert_unicode=None, unicode_error=None, _warn_on_bytestring=False)), Column('fixed_ip_id', Integer(), ForeignKey('fixed_ips.id'), nullable=True), Column('project_id', - String(length=None, convert_unicode=False, assert_unicode=None, + String(length=255, convert_unicode=False, assert_unicode=None, unicode_error=None, _warn_on_bytestring=False)), Column('host', - String(length=None, convert_unicode=False, assert_unicode=None, + String(length=255, convert_unicode=False, assert_unicode=None, unicode_error=None, _warn_on_bytestring=False)), ) @@ -117,63 +117,63 @@ instances = Table('instances', meta, Column('id', Integer(), primary_key=True, nullable=False), Column('internal_id', Integer()), Column('admin_pass', - String(length=None, convert_unicode=False, assert_unicode=None, + String(length=255, convert_unicode=False, assert_unicode=None, unicode_error=None, _warn_on_bytestring=False)), Column('user_id', - String(length=None, convert_unicode=False, assert_unicode=None, + String(length=255, convert_unicode=False, assert_unicode=None, unicode_error=None, _warn_on_bytestring=False)), Column('project_id', - String(length=None, convert_unicode=False, assert_unicode=None, + String(length=255, convert_unicode=False, assert_unicode=None, unicode_error=None, _warn_on_bytestring=False)), Column('image_id', - String(length=None, convert_unicode=False, assert_unicode=None, + String(length=255, convert_unicode=False, assert_unicode=None, unicode_error=None, _warn_on_bytestring=False)), Column('kernel_id', - String(length=None, convert_unicode=False, assert_unicode=None, + String(length=255, convert_unicode=False, assert_unicode=None, unicode_error=None, _warn_on_bytestring=False)), Column('ramdisk_id', - String(length=None, convert_unicode=False, assert_unicode=None, + String(length=255, convert_unicode=False, assert_unicode=None, unicode_error=None, _warn_on_bytestring=False)), Column('launch_index', Integer()), Column('key_name', - String(length=None, convert_unicode=False, assert_unicode=None, + String(length=255, convert_unicode=False, assert_unicode=None, unicode_error=None, _warn_on_bytestring=False)), Column('key_data', Text(length=None, convert_unicode=False, assert_unicode=None, unicode_error=None, _warn_on_bytestring=False)), Column('state', Integer()), Column('state_description', - String(length=None, convert_unicode=False, assert_unicode=None, + String(length=255, convert_unicode=False, assert_unicode=None, unicode_error=None, _warn_on_bytestring=False)), Column('memory_mb', Integer()), Column('vcpus', Integer()), Column('local_gb', Integer()), Column('hostname', - String(length=None, convert_unicode=False, assert_unicode=None, + String(length=255, convert_unicode=False, assert_unicode=None, unicode_error=None, _warn_on_bytestring=False)), Column('host', - String(length=None, convert_unicode=False, assert_unicode=None, + String(length=255, convert_unicode=False, assert_unicode=None, unicode_error=None, _warn_on_bytestring=False)), Column('instance_type', - String(length=None, convert_unicode=False, assert_unicode=None, + String(length=255, convert_unicode=False, assert_unicode=None, unicode_error=None, _warn_on_bytestring=False)), Column('user_data', Text(length=None, convert_unicode=False, assert_unicode=None, unicode_error=None, _warn_on_bytestring=False)), Column('reservation_id', - String(length=None, convert_unicode=False, assert_unicode=None, + String(length=255, convert_unicode=False, assert_unicode=None, unicode_error=None, _warn_on_bytestring=False)), Column('mac_address', - String(length=None, convert_unicode=False, assert_unicode=None, + String(length=255, convert_unicode=False, assert_unicode=None, unicode_error=None, _warn_on_bytestring=False)), Column('scheduled_at', DateTime(timezone=False)), Column('launched_at', DateTime(timezone=False)), Column('terminated_at', DateTime(timezone=False)), Column('display_name', - String(length=None, convert_unicode=False, assert_unicode=None, + String(length=255, convert_unicode=False, assert_unicode=None, unicode_error=None, _warn_on_bytestring=False)), Column('display_description', - String(length=None, convert_unicode=False, assert_unicode=None, + String(length=255, convert_unicode=False, assert_unicode=None, unicode_error=None, _warn_on_bytestring=False)), ) @@ -186,7 +186,7 @@ iscsi_targets = Table('iscsi_targets', meta, Column('id', Integer(), primary_key=True, nullable=False), Column('target_num', Integer()), Column('host', - String(length=None, convert_unicode=False, assert_unicode=None, + String(length=255, convert_unicode=False, assert_unicode=None, unicode_error=None, _warn_on_bytestring=False)), Column('volume_id', Integer(), @@ -202,13 +202,13 @@ key_pairs = Table('key_pairs', meta, Column('deleted', Boolean(create_constraint=True, name=None)), Column('id', Integer(), primary_key=True, nullable=False), Column('name', - String(length=None, convert_unicode=False, assert_unicode=None, + String(length=255, convert_unicode=False, assert_unicode=None, unicode_error=None, _warn_on_bytestring=False)), Column('user_id', - String(length=None, convert_unicode=False, assert_unicode=None, + String(length=255, convert_unicode=False, assert_unicode=None, unicode_error=None, _warn_on_bytestring=False)), Column('fingerprint', - String(length=None, convert_unicode=False, assert_unicode=None, + String(length=255, convert_unicode=False, assert_unicode=None, unicode_error=None, _warn_on_bytestring=False)), Column('public_key', Text(length=None, convert_unicode=False, assert_unicode=None, @@ -224,39 +224,39 @@ networks = Table('networks', meta, Column('id', Integer(), primary_key=True, nullable=False), Column('injected', Boolean(create_constraint=True, name=None)), Column('cidr', - String(length=None, convert_unicode=False, assert_unicode=None, + String(length=255, convert_unicode=False, assert_unicode=None, unicode_error=None, _warn_on_bytestring=False)), Column('netmask', - String(length=None, convert_unicode=False, assert_unicode=None, + String(length=255, convert_unicode=False, assert_unicode=None, unicode_error=None, _warn_on_bytestring=False)), Column('bridge', - String(length=None, convert_unicode=False, assert_unicode=None, + String(length=255, convert_unicode=False, assert_unicode=None, unicode_error=None, _warn_on_bytestring=False)), Column('gateway', - String(length=None, convert_unicode=False, assert_unicode=None, + String(length=255, convert_unicode=False, assert_unicode=None, unicode_error=None, _warn_on_bytestring=False)), Column('broadcast', - String(length=None, convert_unicode=False, assert_unicode=None, + String(length=255, convert_unicode=False, assert_unicode=None, unicode_error=None, _warn_on_bytestring=False)), Column('dns', - String(length=None, convert_unicode=False, assert_unicode=None, + String(length=255, convert_unicode=False, assert_unicode=None, unicode_error=None, _warn_on_bytestring=False)), Column('vlan', Integer()), Column('vpn_public_address', - String(length=None, convert_unicode=False, assert_unicode=None, + String(length=255, convert_unicode=False, assert_unicode=None, unicode_error=None, _warn_on_bytestring=False)), Column('vpn_public_port', Integer()), Column('vpn_private_address', - String(length=None, convert_unicode=False, assert_unicode=None, + String(length=255, convert_unicode=False, assert_unicode=None, unicode_error=None, _warn_on_bytestring=False)), Column('dhcp_start', - String(length=None, convert_unicode=False, assert_unicode=None, + String(length=255, convert_unicode=False, assert_unicode=None, unicode_error=None, _warn_on_bytestring=False)), Column('project_id', - String(length=None, convert_unicode=False, assert_unicode=None, + String(length=255, convert_unicode=False, assert_unicode=None, unicode_error=None, _warn_on_bytestring=False)), Column('host', - String(length=None, convert_unicode=False, assert_unicode=None, + String(length=255, convert_unicode=False, assert_unicode=None, unicode_error=None, _warn_on_bytestring=False)), ) @@ -267,18 +267,18 @@ projects = Table('projects', meta, Column('deleted_at', DateTime(timezone=False)), Column('deleted', Boolean(create_constraint=True, name=None)), Column('id', - String(length=None, convert_unicode=False, assert_unicode=None, + String(length=255, convert_unicode=False, assert_unicode=None, unicode_error=None, _warn_on_bytestring=False), primary_key=True, nullable=False), Column('name', - String(length=None, convert_unicode=False, assert_unicode=None, + String(length=255, convert_unicode=False, assert_unicode=None, unicode_error=None, _warn_on_bytestring=False)), Column('description', - String(length=None, convert_unicode=False, assert_unicode=None, + String(length=255, convert_unicode=False, assert_unicode=None, unicode_error=None, _warn_on_bytestring=False)), Column('project_manager', - String(length=None, convert_unicode=False, assert_unicode=None, + String(length=255, convert_unicode=False, assert_unicode=None, unicode_error=None, _warn_on_bytestring=False), ForeignKey('users.id')), ) @@ -291,7 +291,7 @@ quotas = Table('quotas', meta, Column('deleted', Boolean(create_constraint=True, name=None)), Column('id', Integer(), primary_key=True, nullable=False), Column('project_id', - String(length=None, convert_unicode=False, assert_unicode=None, + String(length=255, convert_unicode=False, assert_unicode=None, unicode_error=None, _warn_on_bytestring=False)), Column('instances', Integer()), Column('cores', Integer()), @@ -308,16 +308,16 @@ security_groups = Table('security_groups', meta, Column('deleted', Boolean(create_constraint=True, name=None)), Column('id', Integer(), primary_key=True, nullable=False), Column('name', - String(length=None, convert_unicode=False, assert_unicode=None, + String(length=255, convert_unicode=False, assert_unicode=None, unicode_error=None, _warn_on_bytestring=False)), Column('description', - String(length=None, convert_unicode=False, assert_unicode=None, + String(length=255, convert_unicode=False, assert_unicode=None, unicode_error=None, _warn_on_bytestring=False)), Column('user_id', - String(length=None, convert_unicode=False, assert_unicode=None, + String(length=255, convert_unicode=False, assert_unicode=None, unicode_error=None, _warn_on_bytestring=False)), Column('project_id', - String(length=None, convert_unicode=False, assert_unicode=None, + String(length=255, convert_unicode=False, assert_unicode=None, unicode_error=None, _warn_on_bytestring=False)), ) @@ -345,12 +345,12 @@ security_group_rules = Table('security_group_rules', meta, Integer(), ForeignKey('security_groups.id')), Column('protocol', - String(length=None, convert_unicode=False, assert_unicode=None, + String(length=255, convert_unicode=False, assert_unicode=None, unicode_error=None, _warn_on_bytestring=False)), Column('from_port', Integer()), Column('to_port', Integer()), Column('cidr', - String(length=None, convert_unicode=False, assert_unicode=None, + String(length=255, convert_unicode=False, assert_unicode=None, unicode_error=None, _warn_on_bytestring=False)), Column('group_id', Integer(), @@ -365,13 +365,13 @@ services = Table('services', meta, Column('deleted', Boolean(create_constraint=True, name=None)), Column('id', Integer(), primary_key=True, nullable=False), Column('host', - String(length=None, convert_unicode=False, assert_unicode=None, + String(length=255, convert_unicode=False, assert_unicode=None, unicode_error=None, _warn_on_bytestring=False)), Column('binary', - String(length=None, convert_unicode=False, assert_unicode=None, + String(length=255, convert_unicode=False, assert_unicode=None, unicode_error=None, _warn_on_bytestring=False)), Column('topic', - String(length=None, convert_unicode=False, assert_unicode=None, + String(length=255, convert_unicode=False, assert_unicode=None, unicode_error=None, _warn_on_bytestring=False)), Column('report_count', Integer(), nullable=False), Column('disabled', Boolean(create_constraint=True, name=None)), @@ -384,18 +384,18 @@ users = Table('users', meta, Column('deleted_at', DateTime(timezone=False)), Column('deleted', Boolean(create_constraint=True, name=None)), Column('id', - String(length=None, convert_unicode=False, assert_unicode=None, + String(length=255, convert_unicode=False, assert_unicode=None, unicode_error=None, _warn_on_bytestring=False), primary_key=True, nullable=False), Column('name', - String(length=None, convert_unicode=False, assert_unicode=None, + String(length=255, convert_unicode=False, assert_unicode=None, unicode_error=None, _warn_on_bytestring=False)), Column('access_key', - String(length=None, convert_unicode=False, assert_unicode=None, + String(length=255, convert_unicode=False, assert_unicode=None, unicode_error=None, _warn_on_bytestring=False)), Column('secret_key', - String(length=None, convert_unicode=False, assert_unicode=None, + String(length=255, convert_unicode=False, assert_unicode=None, unicode_error=None, _warn_on_bytestring=False)), Column('is_admin', Boolean(create_constraint=True, name=None)), ) @@ -407,13 +407,13 @@ user_project_association = Table('user_project_association', meta, Column('deleted_at', DateTime(timezone=False)), Column('deleted', Boolean(create_constraint=True, name=None)), Column('user_id', - String(length=None, convert_unicode=False, assert_unicode=None, + String(length=255, convert_unicode=False, assert_unicode=None, unicode_error=None, _warn_on_bytestring=False), ForeignKey('users.id'), primary_key=True, nullable=False), Column('project_id', - String(length=None, convert_unicode=False, assert_unicode=None, + String(length=255, convert_unicode=False, assert_unicode=None, unicode_error=None, _warn_on_bytestring=False), ForeignKey('projects.id'), primary_key=True, @@ -427,17 +427,17 @@ user_project_role_association = Table('user_project_role_association', meta, Column('deleted_at', DateTime(timezone=False)), Column('deleted', Boolean(create_constraint=True, name=None)), Column('user_id', - String(length=None, convert_unicode=False, assert_unicode=None, + String(length=255, convert_unicode=False, assert_unicode=None, unicode_error=None, _warn_on_bytestring=False), primary_key=True, nullable=False), Column('project_id', - String(length=None, convert_unicode=False, assert_unicode=None, + String(length=255, convert_unicode=False, assert_unicode=None, unicode_error=None, _warn_on_bytestring=False), primary_key=True, nullable=False), Column('role', - String(length=None, convert_unicode=False, assert_unicode=None, + String(length=255, convert_unicode=False, assert_unicode=None, unicode_error=None, _warn_on_bytestring=False), primary_key=True, nullable=False), @@ -454,13 +454,13 @@ user_role_association = Table('user_role_association', meta, Column('deleted_at', DateTime(timezone=False)), Column('deleted', Boolean(create_constraint=True, name=None)), Column('user_id', - String(length=None, convert_unicode=False, assert_unicode=None, + String(length=255, convert_unicode=False, assert_unicode=None, unicode_error=None, _warn_on_bytestring=False), ForeignKey('users.id'), primary_key=True, nullable=False), Column('role', - String(length=None, convert_unicode=False, assert_unicode=None, + String(length=255, convert_unicode=False, assert_unicode=None, unicode_error=None, _warn_on_bytestring=False), primary_key=True, nullable=False), @@ -474,45 +474,45 @@ volumes = Table('volumes', meta, Column('deleted', Boolean(create_constraint=True, name=None)), Column('id', Integer(), primary_key=True, nullable=False), Column('ec2_id', - String(length=None, convert_unicode=False, assert_unicode=None, + String(length=255, convert_unicode=False, assert_unicode=None, unicode_error=None, _warn_on_bytestring=False)), Column('user_id', - String(length=None, convert_unicode=False, assert_unicode=None, + String(length=255, convert_unicode=False, assert_unicode=None, unicode_error=None, _warn_on_bytestring=False)), Column('project_id', - String(length=None, convert_unicode=False, assert_unicode=None, + String(length=255, convert_unicode=False, assert_unicode=None, unicode_error=None, _warn_on_bytestring=False)), Column('host', - String(length=None, convert_unicode=False, assert_unicode=None, + String(length=255, convert_unicode=False, assert_unicode=None, unicode_error=None, _warn_on_bytestring=False)), Column('size', Integer()), Column('availability_zone', - String(length=None, convert_unicode=False, assert_unicode=None, + String(length=255, convert_unicode=False, assert_unicode=None, unicode_error=None, _warn_on_bytestring=False)), Column('instance_id', Integer(), ForeignKey('instances.id'), nullable=True), Column('mountpoint', - String(length=None, convert_unicode=False, assert_unicode=None, + String(length=255, convert_unicode=False, assert_unicode=None, unicode_error=None, _warn_on_bytestring=False)), Column('attach_time', - String(length=None, convert_unicode=False, assert_unicode=None, + String(length=255, convert_unicode=False, assert_unicode=None, unicode_error=None, _warn_on_bytestring=False)), Column('status', - String(length=None, convert_unicode=False, assert_unicode=None, + String(length=255, convert_unicode=False, assert_unicode=None, unicode_error=None, _warn_on_bytestring=False)), Column('attach_status', - String(length=None, convert_unicode=False, assert_unicode=None, + String(length=255, convert_unicode=False, assert_unicode=None, unicode_error=None, _warn_on_bytestring=False)), Column('scheduled_at', DateTime(timezone=False)), Column('launched_at', DateTime(timezone=False)), Column('terminated_at', DateTime(timezone=False)), Column('display_name', - String(length=None, convert_unicode=False, assert_unicode=None, + String(length=255, convert_unicode=False, assert_unicode=None, unicode_error=None, _warn_on_bytestring=False)), Column('display_description', - String(length=None, convert_unicode=False, assert_unicode=None, + String(length=255, convert_unicode=False, assert_unicode=None, unicode_error=None, _warn_on_bytestring=False)), ) diff --git a/nova/db/sqlalchemy/migrate_repo/versions/002_bexar.py b/nova/db/sqlalchemy/migrate_repo/versions/002_bexar.py index 53da35233..bd3a3e6f8 100644 --- a/nova/db/sqlalchemy/migrate_repo/versions/002_bexar.py +++ b/nova/db/sqlalchemy/migrate_repo/versions/002_bexar.py @@ -31,10 +31,17 @@ instances = Table('instances', meta, Column('id', Integer(), primary_key=True, nullable=False), ) + services = Table('services', meta, Column('id', Integer(), primary_key=True, nullable=False), ) + +networks = Table('networks', meta, + Column('id', Integer(), primary_key=True, nullable=False), + ) + + # # New Tables # @@ -45,13 +52,13 @@ certificates = Table('certificates', meta, Column('deleted', Boolean(create_constraint=True, name=None)), Column('id', Integer(), primary_key=True, nullable=False), Column('user_id', - String(length=None, convert_unicode=False, assert_unicode=None, + String(length=255, convert_unicode=False, assert_unicode=None, unicode_error=None, _warn_on_bytestring=False)), Column('project_id', - String(length=None, convert_unicode=False, assert_unicode=None, + String(length=255, convert_unicode=False, assert_unicode=None, unicode_error=None, _warn_on_bytestring=False)), Column('file_name', - String(length=None, convert_unicode=False, assert_unicode=None, + String(length=255, convert_unicode=False, assert_unicode=None, unicode_error=None, _warn_on_bytestring=False)), ) @@ -63,11 +70,11 @@ consoles = Table('consoles', meta, Column('deleted', Boolean(create_constraint=True, name=None)), Column('id', Integer(), primary_key=True, nullable=False), Column('instance_name', - String(length=None, convert_unicode=False, assert_unicode=None, + String(length=255, convert_unicode=False, assert_unicode=None, unicode_error=None, _warn_on_bytestring=False)), Column('instance_id', Integer()), Column('password', - String(length=None, convert_unicode=False, assert_unicode=None, + String(length=255, convert_unicode=False, assert_unicode=None, unicode_error=None, _warn_on_bytestring=False)), Column('port', Integer(), nullable=True), Column('pool_id', @@ -83,25 +90,25 @@ console_pools = Table('console_pools', meta, Column('deleted', Boolean(create_constraint=True, name=None)), Column('id', Integer(), primary_key=True, nullable=False), Column('address', - String(length=None, convert_unicode=False, assert_unicode=None, + String(length=255, convert_unicode=False, assert_unicode=None, unicode_error=None, _warn_on_bytestring=False)), Column('username', - String(length=None, convert_unicode=False, assert_unicode=None, + String(length=255, convert_unicode=False, assert_unicode=None, unicode_error=None, _warn_on_bytestring=False)), Column('password', - String(length=None, convert_unicode=False, assert_unicode=None, + String(length=255, convert_unicode=False, assert_unicode=None, unicode_error=None, _warn_on_bytestring=False)), Column('console_type', - String(length=None, convert_unicode=False, assert_unicode=None, + String(length=255, convert_unicode=False, assert_unicode=None, unicode_error=None, _warn_on_bytestring=False)), Column('public_hostname', - String(length=None, convert_unicode=False, assert_unicode=None, + String(length=255, convert_unicode=False, assert_unicode=None, unicode_error=None, _warn_on_bytestring=False)), Column('host', - String(length=None, convert_unicode=False, assert_unicode=None, + String(length=255, convert_unicode=False, assert_unicode=None, unicode_error=None, _warn_on_bytestring=False)), Column('compute_host', - String(length=None, convert_unicode=False, assert_unicode=None, + String(length=255, convert_unicode=False, assert_unicode=None, unicode_error=None, _warn_on_bytestring=False)), ) @@ -116,7 +123,7 @@ instance_actions = Table('instance_actions', meta, Integer(), ForeignKey('instances.id')), Column('action', - String(length=None, convert_unicode=False, assert_unicode=None, + String(length=255, convert_unicode=False, assert_unicode=None, unicode_error=None, _warn_on_bytestring=False)), Column('error', Text(length=None, convert_unicode=False, assert_unicode=None, @@ -133,26 +140,26 @@ auth_tokens = Table('auth_tokens', meta, Column('deleted_at', DateTime(timezone=False)), Column('deleted', Boolean(create_constraint=True, name=None)), Column('token_hash', - String(length=None, convert_unicode=False, assert_unicode=None, + String(length=255, convert_unicode=False, assert_unicode=None, unicode_error=None, _warn_on_bytestring=False), primary_key=True, nullable=False), Column('user_id', Integer()), Column('server_manageent_url', - String(length=None, convert_unicode=False, assert_unicode=None, + String(length=255, convert_unicode=False, assert_unicode=None, unicode_error=None, _warn_on_bytestring=False)), Column('storage_url', - String(length=None, convert_unicode=False, assert_unicode=None, + String(length=255, convert_unicode=False, assert_unicode=None, unicode_error=None, _warn_on_bytestring=False)), Column('cdn_management_url', - String(length=None, convert_unicode=False, assert_unicode=None, + String(length=255, convert_unicode=False, assert_unicode=None, unicode_error=None, _warn_on_bytestring=False)), ) instances_availability_zone = Column( 'availability_zone', - String(length=None, convert_unicode=False, assert_unicode=None, + String(length=255, convert_unicode=False, assert_unicode=None, unicode_error=None, _warn_on_bytestring=False)) @@ -160,9 +167,20 @@ instances_locked = Column('locked', Boolean(create_constraint=True, name=None)) +networks_cidr_v6 = Column( + 'cidr_v6', + String(length=255, convert_unicode=False, assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)) + +networks_ra_server = Column( + 'ra_server', + String(length=255, convert_unicode=False, assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)) + + services_availability_zone = Column( 'availability_zone', - String(length=None, convert_unicode=False, assert_unicode=None, + String(length=255, convert_unicode=False, assert_unicode=None, unicode_error=None, _warn_on_bytestring=False)) @@ -178,7 +196,7 @@ def upgrade(migrate_engine): logging.exception('Exception while creating table') raise - auth_tokens.c.user_id.alter(type=String(length=None, + auth_tokens.c.user_id.alter(type=String(length=255, convert_unicode=False, assert_unicode=None, unicode_error=None, @@ -186,4 +204,6 @@ def upgrade(migrate_engine): instances.create_column(instances_availability_zone) instances.create_column(instances_locked) + networks.create_column(networks_cidr_v6) + networks.create_column(networks_ra_server) services.create_column(services_availability_zone) -- cgit From 7f352a72333e94c642d8288638c73a166cfb2943 Mon Sep 17 00:00:00 2001 From: Andy Smith Date: Tue, 18 Jan 2011 17:32:54 -0800 Subject: move db sync into nosetests package-level fixtures so that the existing nosetests attempt in hudson will pass --- nova/tests/__init__.py | 5 +++++ run_tests.py | 7 ------- 2 files changed, 5 insertions(+), 7 deletions(-) diff --git a/nova/tests/__init__.py b/nova/tests/__init__.py index 8dc87d0e2..592d5bea9 100644 --- a/nova/tests/__init__.py +++ b/nova/tests/__init__.py @@ -34,3 +34,8 @@ # The code below enables nosetests to work with i18n _() blocks import __builtin__ setattr(__builtin__, '_', lambda x: x) + + +def setup(): + from nova.db import migration + migration.db_sync() diff --git a/run_tests.py b/run_tests.py index fbca3cbe3..7b5e2192a 100644 --- a/run_tests.py +++ b/run_tests.py @@ -26,10 +26,6 @@ from nose import config from nose import result from nose import core -gettext.install('nova', unicode=1) - -from nova.db import migration - class NovaTestResult(result.TextTestResult): def __init__(self, *args, **kw): @@ -65,9 +61,6 @@ if __name__ == '__main__': c = config.Config(stream=sys.stdout, env=os.environ, verbosity=3) - - migration.db_sync() - runner = NovaTestRunner(stream=c.stream, verbosity=c.verbosity, -- cgit From ec5e44eb7bce9429f7861d0cf03bf7bc77c46ae0 Mon Sep 17 00:00:00 2001 From: Vishvananda Ishaya Date: Tue, 18 Jan 2011 18:27:08 -0800 Subject: replace old ec2_id with proper id in describe_addresses --- nova/api/ec2/cloud.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/nova/api/ec2/cloud.py b/nova/api/ec2/cloud.py index 57d41ed67..c94540793 100644 --- a/nova/api/ec2/cloud.py +++ b/nova/api/ec2/cloud.py @@ -729,7 +729,7 @@ class CloudController(object): ec2_id = None if (floating_ip_ref['fixed_ip'] and floating_ip_ref['fixed_ip']['instance']): - instance_id = floating_ip_ref['fixed_ip']['instance']['ec2_id'] + instance_id = floating_ip_ref['fixed_ip']['instance']['id'] ec2_id = id_to_ec2_id(instance_id) address_rv = {'public_ip': address, 'instance_id': ec2_id} -- cgit From c79e72b2a14b5e231724a1e187bf7effe5c64fc4 Mon Sep 17 00:00:00 2001 From: Soren Hansen Date: Wed, 19 Jan 2011 10:11:55 +0100 Subject: PEP-8 fixes --- nova/tests/test_virt.py | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/nova/tests/test_virt.py b/nova/tests/test_virt.py index 39dc91df8..f6800e3d9 100644 --- a/nova/tests/test_virt.py +++ b/nova/tests/test_virt.py @@ -352,8 +352,8 @@ class IptablesFirewallTestCase(test.TestCase): security_group_chain in self.out_rules, "ICMP acceptance rule wasn't added") - self.assertTrue('-A %s -p icmp -s 192.168.11.0/24 -m icmp --icmp-type' - ' 8 -j ACCEPT' % security_group_chain in self.out_rules, + self.assertTrue('-A %s -p icmp -s 192.168.11.0/24 -m icmp --icmp-type ' + '8 -j ACCEPT' % security_group_chain in self.out_rules, "ICMP Echo Request acceptance rule wasn't added") self.assertTrue('-A %s -p tcp -s 192.168.10.0/24 -m multiport ' @@ -496,4 +496,3 @@ class NWFilterTestCase(test.TestCase): self.fw.apply_instance_filter(instance) _ensure_all_called() self.teardown_security_group() - -- cgit