From 09bd503a9842857480bd4703d27335e83dd30571 Mon Sep 17 00:00:00 2001 From: Kei Masumoto Date: Sat, 11 Jun 2011 19:48:48 +0900 Subject: block migration feature added --- bin/nova-manage | 36 +++++++++- nova/compute/manager.py | 55 ++++++++++++--- nova/db/api.py | 21 ------ nova/db/sqlalchemy/api.py | 39 ----------- nova/db/sqlalchemy/models.py | 16 ++--- nova/exception.py | 5 ++ nova/scheduler/driver.py | 120 +++++++++++++++++++++++++------- nova/scheduler/manager.py | 54 ++++++++------- nova/tests/scheduler/test_scheduler.py | 52 ++++++++++---- nova/tests/test_compute.py | 6 +- nova/tests/test_libvirt.py | 90 ++++++++++++++++++++++++ nova/virt/libvirt/connection.py | 122 +++++++++++++++++++++++++++------ 12 files changed, 449 insertions(+), 167 deletions(-) diff --git a/bin/nova-manage b/bin/nova-manage index b0cd343f5..220f6e77a 100755 --- a/bin/nova-manage +++ b/bin/nova-manage @@ -649,11 +649,12 @@ class VmCommands(object): instance['availability_zone'], instance['launch_index']) - def live_migration(self, ec2_id, dest): + def _migration(self, ec2_id, dest, block_migration=False): """Migrates a running instance to a new machine. :param ec2_id: instance id which comes from euca-describe-instance. :param dest: destination host name. + :param block_migration: if True, do block_migration. """ @@ -676,11 +677,30 @@ class VmCommands(object): {"method": "live_migration", "args": {"instance_id": instance_id, "dest": dest, - "topic": FLAGS.compute_topic}}) + "topic": FLAGS.compute_topic, + "block_migration": block_migration}}) print _('Migration of %s initiated.' 'Check its progress using euca-describe-instances.') % ec2_id + def live_migration(self, ec2_id, dest): + """Migrates a running instance to a new machine. + + :param ec2_id: instance id which comes from euca-describe-instance. + :param dest: destination host name. + + """ + self._migration(ec2_id, dest) + + def block_migration(self, ec2_id, dest): + """Migrates a running instance to a new machine with storage data. + + :param ec2_id: instance id which comes from euca-describe-instance. + :param dest: destination host name. + + """ + self._migration(ec2_id, dest, True) + class ServiceCommands(object): """Enable and disable running services""" @@ -749,9 +769,19 @@ class ServiceCommands(object): mem_u = result['resource']['memory_mb_used'] hdd_u = result['resource']['local_gb_used'] + cpu_sum = 0 + mem_sum = 0 + hdd_sum = 0 print 'HOST\t\t\tPROJECT\t\tcpu\tmem(mb)\tdisk(gb)' print '%s(total)\t\t\t%s\t%s\t%s' % (host, cpu, mem, hdd) - print '%s(used)\t\t\t%s\t%s\t%s' % (host, cpu_u, mem_u, hdd_u) + print '%s(used_now)\t\t\t%s\t%s\t%s' % (host, cpu_u, mem_u, hdd_u) + for p_id, val in result['usage'].items(): + cpu_sum += val['vcpus'] + mem_sum += val['memory_mb'] + hdd_sum += val['local_gb'] + print '%s(used_max)\t\t\t%s\t%s\t%s' % (host, cpu_sum, + mem_sum, hdd_sum) + for p_id, val in result['usage'].items(): print '%s\t\t%s\t\t%s\t%s\t%s' % (host, p_id, diff --git a/nova/compute/manager.py b/nova/compute/manager.py index 245958de7..f7604a253 100644 --- a/nova/compute/manager.py +++ b/nova/compute/manager.py @@ -921,11 +921,13 @@ class ComputeManager(manager.SchedulerDependentManager): """ return self.driver.update_available_resource(context, self.host) - def pre_live_migration(self, context, instance_id, time=None): + def pre_live_migration(self, context, instance_id, time=None, + block_migration=False, **kwargs): """Preparations for live migration at dest host. :param context: security context :param instance_id: nova.db.sqlalchemy.models.Instance.Id + :param block_migration: if true, prepare for block migration """ if not time: @@ -977,17 +979,24 @@ class ComputeManager(manager.SchedulerDependentManager): # onto destination host. self.driver.ensure_filtering_rules_for_instance(instance_ref) - def live_migration(self, context, instance_id, dest): + # Preparation for block migration + if block_migration: + self.driver.pre_block_migration(context, + instance_ref, + kwargs.get('disk')) + + def live_migration(self, context, instance_id, + dest, block_migration=False): """Executing live migration. :param context: security context :param instance_id: nova.db.sqlalchemy.models.Instance.Id :param dest: destination host + :param block_migration: if true, do block migration """ # Get instance for error handling. instance_ref = self.db.instance_get(context, instance_id) - i_name = instance_ref.name try: # Checking volume node is working correctly when any volumes @@ -998,13 +1007,20 @@ class ComputeManager(manager.SchedulerDependentManager): {"method": "check_for_export", "args": {'instance_id': instance_id}}) - # Asking dest host to preparing live migration. + args = {} + args['instance_id'] = instance_id + if block_migration: + args['block_migration'] = block_migration + args['disk'] = \ + self.driver.get_instance_disk_info(context, instance_ref) + rpc.call(context, self.db.queue_get_for(context, FLAGS.compute_topic, dest), {"method": "pre_live_migration", - "args": {'instance_id': instance_id}}) + "args": args}) except Exception: + i_name = instance_ref.name msg = _("Pre live migration for %(i_name)s failed at %(dest)s") LOG.error(msg % locals()) self.recover_live_migration(context, instance_ref) @@ -1015,9 +1031,11 @@ class ComputeManager(manager.SchedulerDependentManager): # nothing must be recovered in this version. self.driver.live_migration(context, instance_ref, dest, self.post_live_migration, - self.recover_live_migration) + self.recover_live_migration, + block_migration) - def post_live_migration(self, ctxt, instance_ref, dest): + def post_live_migration(self, ctxt, instance_ref, + dest, block_migration=False): """Post operations for live migration. This method is called from live_migration @@ -1068,6 +1086,10 @@ class ComputeManager(manager.SchedulerDependentManager): # Restore instance/volume state self.recover_live_migration(ctxt, instance_ref, dest) + # No instance booting at source host, but instance dir + # must be deleted for preparing next block migration + if block_migration: + self.driver.destroy(instance_ref) LOG.info(_('Migrating %(i_name)s to %(dest)s finished successfully.') % locals()) @@ -1075,14 +1097,20 @@ class ComputeManager(manager.SchedulerDependentManager): "Domain not found: no domain with matching name.\" " "This error can be safely ignored.")) - def recover_live_migration(self, ctxt, instance_ref, host=None, dest=None): + def recover_live_migration(self, ctxt, instance_ref, host=None, + dest=None, delete=True): """Recovers Instance/volume state from migrating -> running. :param ctxt: security context :param instance_id: nova.db.sqlalchemy.models.Instance.Id :param host: DB column value is updated by this hostname. If none, the host instance currently running is selected. - + :param dest: + This method is called from live migration src host. + This param specifies destination host. + :param delete: + If true, ask destination host to remove instance dir, + since empty disk image was created for block migration """ if not host: host = instance_ref['host'] @@ -1101,6 +1129,15 @@ class ComputeManager(manager.SchedulerDependentManager): if dest: volume_api.remove_from_compute(ctxt, volume_id, dest) + # TODO: Block migration needs empty image at destination host + # before migration starts, so if any failure occurs, + # any empty images has to be deleted. but not sure adding below + # method is appropreate here. for now, admin has to delete manually. + # rpc.call(ctxt, + # self.db.queue_get_for(ctxt, FLAGS.compute_topic, dest), + # {"method": "self.driver.destroy", + # "args": {'instance':instance_ref}) + def periodic_tasks(self, context=None): """Tasks to be run at a periodic interval.""" error_list = super(ComputeManager, self).periodic_tasks(context) diff --git a/nova/db/api.py b/nova/db/api.py index 4e0aa60a2..b4cc110b1 100644 --- a/nova/db/api.py +++ b/nova/db/api.py @@ -483,27 +483,6 @@ def instance_add_security_group(context, instance_id, security_group_id): security_group_id) -def instance_get_vcpu_sum_by_host_and_project(context, hostname, proj_id): - """Get instances.vcpus by host and project.""" - return IMPL.instance_get_vcpu_sum_by_host_and_project(context, - hostname, - proj_id) - - -def instance_get_memory_sum_by_host_and_project(context, hostname, proj_id): - """Get amount of memory by host and project.""" - return IMPL.instance_get_memory_sum_by_host_and_project(context, - hostname, - proj_id) - - -def instance_get_disk_sum_by_host_and_project(context, hostname, proj_id): - """Get total amount of disk by host and project.""" - return IMPL.instance_get_disk_sum_by_host_and_project(context, - hostname, - proj_id) - - def instance_action_create(context, values): """Create an instance action from the values dictionary.""" return IMPL.instance_action_create(context, values) diff --git a/nova/db/sqlalchemy/api.py b/nova/db/sqlalchemy/api.py index 103668b94..00c61bfe3 100644 --- a/nova/db/sqlalchemy/api.py +++ b/nova/db/sqlalchemy/api.py @@ -1040,45 +1040,6 @@ def instance_add_security_group(context, instance_id, security_group_id): instance_ref.save(session=session) -@require_context -def instance_get_vcpu_sum_by_host_and_project(context, hostname, proj_id): - session = get_session() - result = session.query(models.Instance).\ - filter_by(host=hostname).\ - filter_by(project_id=proj_id).\ - filter_by(deleted=False).\ - value(func.sum(models.Instance.vcpus)) - if not result: - return 0 - return result - - -@require_context -def instance_get_memory_sum_by_host_and_project(context, hostname, proj_id): - session = get_session() - result = session.query(models.Instance).\ - filter_by(host=hostname).\ - filter_by(project_id=proj_id).\ - filter_by(deleted=False).\ - value(func.sum(models.Instance.memory_mb)) - if not result: - return 0 - return result - - -@require_context -def instance_get_disk_sum_by_host_and_project(context, hostname, proj_id): - session = get_session() - result = session.query(models.Instance).\ - filter_by(host=hostname).\ - filter_by(project_id=proj_id).\ - filter_by(deleted=False).\ - value(func.sum(models.Instance.local_gb)) - if not result: - return 0 - return result - - @require_context def instance_action_create(context, values): """Create an instance action from the values dictionary.""" diff --git a/nova/db/sqlalchemy/models.py b/nova/db/sqlalchemy/models.py index 55efe6886..1e057516b 100644 --- a/nova/db/sqlalchemy/models.py +++ b/nova/db/sqlalchemy/models.py @@ -125,14 +125,14 @@ class ComputeNode(BASE, NovaBase): 'ComputeNode.service_id == Service.id,' 'ComputeNode.deleted == False)') - vcpus = Column(Integer, nullable=True) - memory_mb = Column(Integer, nullable=True) - local_gb = Column(Integer, nullable=True) - vcpus_used = Column(Integer, nullable=True) - memory_mb_used = Column(Integer, nullable=True) - local_gb_used = Column(Integer, nullable=True) - hypervisor_type = Column(Text, nullable=True) - hypervisor_version = Column(Integer, nullable=True) + vcpus = Column(Integer) + memory_mb = Column(Integer) + local_gb = Column(Integer) + vcpus_used = Column(Integer) + memory_mb_used = Column(Integer) + local_gb_used = Column(Integer) + hypervisor_type = Column(Text) + hypervisor_version = Column(Integer) # Note(masumotok): Expected Strings example: # diff --git a/nova/exception.py b/nova/exception.py index 69b3e0359..80e8293b6 100644 --- a/nova/exception.py +++ b/nova/exception.py @@ -211,6 +211,11 @@ class DestinationHypervisorTooOld(Invalid): "has been provided.") +class DestinatioinDiskExists(Invalid): + message = _("The supplied disk path (%(path)s) already exists, " + "it is expected not to exist.") + + class InvalidDevicePath(Invalid): message = _("The supplied device path (%(path)s) is invalid.") diff --git a/nova/scheduler/driver.py b/nova/scheduler/driver.py index 0b257c5d8..889baa567 100644 --- a/nova/scheduler/driver.py +++ b/nova/scheduler/driver.py @@ -77,7 +77,8 @@ class Scheduler(object): """Must override at least this method for scheduler to work.""" raise NotImplementedError(_("Must implement a fallback schedule")) - def schedule_live_migration(self, context, instance_id, dest): + def schedule_live_migration(self, context, instance_id, dest, + block_migration=False): """Live migration scheduling method. :param context: @@ -88,7 +89,6 @@ class Scheduler(object): Then scheduler send request that host. """ - # Whether instance exists and is running. instance_ref = db.instance_get(context, instance_id) @@ -96,10 +96,12 @@ class Scheduler(object): self._live_migration_src_check(context, instance_ref) # Checking destination host. - self._live_migration_dest_check(context, instance_ref, dest) + self._live_migration_dest_check(context, instance_ref, + dest, block_migration) # Common checking. - self._live_migration_common_check(context, instance_ref, dest) + self._live_migration_common_check(context, instance_ref, + dest, block_migration) # Changing instance_state. db.instance_set_state(context, @@ -147,7 +149,8 @@ class Scheduler(object): if not self.service_is_up(services[0]): raise exception.ComputeServiceUnavailable(host=src) - def _live_migration_dest_check(self, context, instance_ref, dest): + def _live_migration_dest_check(self, context, instance_ref, dest, + block_migration): """Live migration check routine (for destination host). :param context: security context @@ -175,9 +178,11 @@ class Scheduler(object): # Checking dst host still has enough capacities. self.assert_compute_node_has_enough_resources(context, instance_ref, - dest) + dest, + block_migration) - def _live_migration_common_check(self, context, instance_ref, dest): + def _live_migration_common_check(self, context, instance_ref, dest, + block_migration): """Live migration common check routine. Below checkings are followed by @@ -186,11 +191,19 @@ class Scheduler(object): :param context: security context :param instance_ref: nova.db.sqlalchemy.models.Instance object :param dest: destination host + :param block_migration if True, check for block_migration. """ # Checking shared storage connectivity - self.mounted_on_same_shared_storage(context, instance_ref, dest) + # if block migration, instances_paths should not be on shared storage. + try: + self.mounted_on_same_shared_storage(context, instance_ref, dest) + if block_migration: + raise + except rpc.RemoteError: + if not block_migration: + raise # Checking dest exists. dservice_refs = db.service_get_all_compute_by_host(context, dest) @@ -229,14 +242,24 @@ class Scheduler(object): "original host %(src)s.") % locals()) raise - def assert_compute_node_has_enough_resources(self, context, - instance_ref, dest): + def assert_compute_node_has_enough_resources(self, context, instance_ref, + dest, block_migration): """Checks if destination host has enough resource for live migration. - Currently, only memory checking has been done. - If storage migration(block migration, meaning live-migration - without any shared storage) will be available, local storage - checking is also necessary. + :param context: security context + :param instance_ref: nova.db.sqlalchemy.models.Instance object + :param dest: destination host + :param block_migration: if True, disk checking has been done + + """ + self.assert_compute_node_has_enough_memory(context, instance_ref, dest) + if not block_migration: + return + self.assert_compute_node_has_enough_disk(context, instance_ref, dest) + + def assert_compute_node_has_enough_memory(self, context, + instance_ref, dest): + """Checks if destination host has enough memory for live migration. :param context: security context :param instance_ref: nova.db.sqlalchemy.models.Instance object @@ -244,22 +267,69 @@ class Scheduler(object): """ - # Getting instance information - ec2_id = instance_ref['hostname'] + # Getting total available memory and disk of host + avail = self._get_compute_info(context, dest, 'memory_mb') - # Getting host information - service_refs = db.service_get_all_compute_by_host(context, dest) - compute_node_ref = service_refs[0]['compute_node'][0] + # Getting total used memory and disk of host + # It should be sum of memories that are assigned as max value, + # because overcommiting is risky. + used = 0 + instance_refs = db.instance_get_all_by_host(context, dest) + used_list = [i['memory_mb'] for i in instance_refs] + if used_list: + used = reduce(lambda x, y: x + y, used_list) - mem_total = int(compute_node_ref['memory_mb']) - mem_used = int(compute_node_ref['memory_mb_used']) - mem_avail = mem_total - mem_used mem_inst = instance_ref['memory_mb'] - if mem_avail <= mem_inst: - reason = _("Unable to migrate %(ec2_id)s to destination: %(dest)s " - "(host:%(mem_avail)s <= instance:%(mem_inst)s)") + avail = avail - used + if avail <= mem_inst: + ec2_id = instance_ref['hostname'] + reason = _("Unable to migrate %(ec2_id)s to %(dest)s: Lack of " + "disk(host:%(avail)s <= instance:%(mem_inst)s)") raise exception.MigrationError(reason=reason % locals()) + def assert_compute_node_has_enough_disk(self, context, + instance_ref, dest): + """Checks if destination host has enough disk for block migration. + + :param context: security context + :param instance_ref: nova.db.sqlalchemy.models.Instance object + :param dest: destination host + + """ + + # Getting total available memory and disk of host + avail = self._get_compute_info(context, dest, 'local_gb') + + # Getting total used memory and disk of host + # It should be sum of disks that are assigned as max value + # because overcommiting is risky. + used = 0 + instance_refs = db.instance_get_all_by_host(context, dest) + used_list = [i['local_gb'] for i in instance_refs] + if used_list: + used = reduce(lambda x, y: x + y, used_list) + + disk_inst = instance_ref['local_gb'] + avail = avail - used + if avail <= disk_inst: + ec2_id = instance_ref['hostname'] + reason = _("Unable to migrate %(ec2_id)s to %(dest)s: Lack of " + "disk(host:%(avail)s <= instance:%(disk_inst)s)") + raise exception.MigrationError(reason=reason % locals()) + + def _get_compute_info(self, context, host, key): + """get compute node's infomation specified by key + + :param context: security context + :param host: hostname(must be compute node) + :param key: column name of compute_nodes + :return: value specified by key + + """ + compute_node_ref = db.service_get_all_compute_by_host(context, host) + compute_node_ref = compute_node_ref[0]['compute_node'][0] + return compute_node_ref[key] + def mounted_on_same_shared_storage(self, context, instance_ref, dest): """Check if the src and dest host mount same shared storage. diff --git a/nova/scheduler/manager.py b/nova/scheduler/manager.py index bd40e73c0..1f663ca4b 100644 --- a/nova/scheduler/manager.py +++ b/nova/scheduler/manager.py @@ -97,7 +97,7 @@ class SchedulerManager(manager.Manager): # NOTE (masumotok) : This method should be moved to nova.api.ec2.admin. # Based on bexar design summit discussion, # just put this here for bexar release. - def show_host_resources(self, context, host, *args): + def show_host_resources(self, context, host): """Shows the physical/usage resource given by hosts. :param context: security context @@ -105,43 +105,45 @@ class SchedulerManager(manager.Manager): :returns: example format is below. {'resource':D, 'usage':{proj_id1:D, proj_id2:D}} - D: {'vcpus':3, 'memory_mb':2048, 'local_gb':2048} + D: {'vcpus':3, 'memory_mb':2048, 'local_gb':2048 + 'vcpus_used': 12, 'memory_mb': 10240, + 'local_gb': 64} """ + # Getting compute node info and related instances info compute_ref = db.service_get_all_compute_by_host(context, host) compute_ref = compute_ref[0] - - # Getting physical resource information - compute_node_ref = compute_ref['compute_node'][0] - resource = {'vcpus': compute_node_ref['vcpus'], - 'memory_mb': compute_node_ref['memory_mb'], - 'local_gb': compute_node_ref['local_gb'], - 'vcpus_used': compute_node_ref['vcpus_used'], - 'memory_mb_used': compute_node_ref['memory_mb_used'], - 'local_gb_used': compute_node_ref['local_gb_used']} - - # Getting usage resource information - usage = {} instance_refs = db.instance_get_all_by_host(context, compute_ref['host']) + + # Getting total available/used resource + compute_ref = compute_ref['compute_node'][0] + resource = {'vcpus': compute_ref['vcpus'], + 'memory_mb': compute_ref['memory_mb'], + 'local_gb': compute_ref['local_gb'], + 'vcpus_used': compute_ref['vcpus_used'], + 'memory_mb_used': compute_ref['memory_mb_used'], + 'local_gb_used': compute_ref['local_gb_used']} + usage = dict() if not instance_refs: return {'resource': resource, 'usage': usage} + # Getting usage resource per project project_ids = [i['project_id'] for i in instance_refs] project_ids = list(set(project_ids)) for project_id in project_ids: - vcpus = db.instance_get_vcpu_sum_by_host_and_project(context, - host, - project_id) - mem = db.instance_get_memory_sum_by_host_and_project(context, - host, - project_id) - hdd = db.instance_get_disk_sum_by_host_and_project(context, - host, - project_id) - usage[project_id] = {'vcpus': int(vcpus), - 'memory_mb': int(mem), - 'local_gb': int(hdd)} + vcpus = [i['vcpus'] for i in instance_refs \ + if i['project_id'] == project_id] + + mem = [i['memory_mb'] for i in instance_refs \ + if i['project_id'] == project_id] + + disk = [i['local_gb'] for i in instance_refs \ + if i['project_id'] == project_id] + + usage[project_id] = {'vcpus': reduce(lambda x, y: x + y, vcpus), + 'memory_mb': reduce(lambda x, y: x + y, mem), + 'local_gb': reduce(lambda x, y: x + y, disk)} return {'resource': resource, 'usage': usage} diff --git a/nova/tests/scheduler/test_scheduler.py b/nova/tests/scheduler/test_scheduler.py index 50b6b52c6..384f6fb00 100644 --- a/nova/tests/scheduler/test_scheduler.py +++ b/nova/tests/scheduler/test_scheduler.py @@ -652,10 +652,13 @@ class SimpleDriverTestCase(test.TestCase): self.mox.StubOutWithMock(driver_i, '_live_migration_dest_check') self.mox.StubOutWithMock(driver_i, '_live_migration_common_check') driver_i._live_migration_src_check(nocare, nocare) - driver_i._live_migration_dest_check(nocare, nocare, i_ref['host']) - driver_i._live_migration_common_check(nocare, nocare, i_ref['host']) + driver_i._live_migration_dest_check(nocare, nocare, + i_ref['host'], False) + driver_i._live_migration_common_check(nocare, nocare, + i_ref['host'], False) self.mox.StubOutWithMock(rpc, 'cast', use_mock_anything=True) - kwargs = {'instance_id': instance_id, 'dest': i_ref['host']} + kwargs = {'instance_id': instance_id, 'dest': i_ref['host'], + 'block_migration': False} rpc.cast(self.context, db.queue_get_for(nocare, FLAGS.compute_topic, i_ref['host']), {"method": 'live_migration', "args": kwargs}) @@ -663,7 +666,8 @@ class SimpleDriverTestCase(test.TestCase): self.mox.ReplayAll() self.scheduler.live_migration(self.context, FLAGS.compute_topic, instance_id=instance_id, - dest=i_ref['host']) + dest=i_ref['host'], + block_migration=False) i_ref = db.instance_get(self.context, instance_id) self.assertTrue(i_ref['state_description'] == 'migrating') @@ -744,7 +748,7 @@ class SimpleDriverTestCase(test.TestCase): self.assertRaises(exception.ComputeServiceUnavailable, self.scheduler.driver._live_migration_dest_check, - self.context, i_ref, i_ref['host']) + self.context, i_ref, i_ref['host'], False) db.instance_destroy(self.context, instance_id) db.service_destroy(self.context, s_ref['id']) @@ -757,7 +761,7 @@ class SimpleDriverTestCase(test.TestCase): self.assertRaises(exception.UnableToMigrateToSelf, self.scheduler.driver._live_migration_dest_check, - self.context, i_ref, i_ref['host']) + self.context, i_ref, i_ref['host'], False) db.instance_destroy(self.context, instance_id) db.service_destroy(self.context, s_ref['id']) @@ -765,15 +769,33 @@ class SimpleDriverTestCase(test.TestCase): def test_live_migration_dest_check_service_lack_memory(self): """Confirms exception raises when dest doesn't have enough memory.""" instance_id = self._create_instance() + instance_id2 = self._create_instance(host='somewhere', + memory_mb=12) i_ref = db.instance_get(self.context, instance_id) - s_ref = self._create_compute_service(host='somewhere', - memory_mb_used=12) + s_ref = self._create_compute_service(host='somewhere') + + self.assertRaises(exception.MigrationError, + self.scheduler.driver._live_migration_dest_check, + self.context, i_ref, 'somewhere', False) + + db.instance_destroy(self.context, instance_id) + db.instance_destroy(self.context, instance_id2) + db.service_destroy(self.context, s_ref['id']) + + def test_block_migration_dest_check_service_lack_disk(self): + """Confirms exception raises when dest doesn't have enough disk.""" + instance_id = self._create_instance() + instance_id2 = self._create_instance(host='somewhere', + local_gb=70) + i_ref = db.instance_get(self.context, instance_id) + s_ref = self._create_compute_service(host='somewhere') self.assertRaises(exception.MigrationError, self.scheduler.driver._live_migration_dest_check, - self.context, i_ref, 'somewhere') + self.context, i_ref, 'somewhere', True) db.instance_destroy(self.context, instance_id) + db.instance_destroy(self.context, instance_id2) db.service_destroy(self.context, s_ref['id']) def test_live_migration_dest_check_service_works_correctly(self): @@ -785,7 +807,8 @@ class SimpleDriverTestCase(test.TestCase): ret = self.scheduler.driver._live_migration_dest_check(self.context, i_ref, - 'somewhere') + 'somewhere', + False) self.assertTrue(ret is None) db.instance_destroy(self.context, instance_id) db.service_destroy(self.context, s_ref['id']) @@ -820,7 +843,7 @@ class SimpleDriverTestCase(test.TestCase): self.mox.ReplayAll() self.assertRaises(exception.SourceHostUnavailable, self.scheduler.driver._live_migration_common_check, - self.context, i_ref, dest) + self.context, i_ref, dest, False) db.instance_destroy(self.context, instance_id) db.service_destroy(self.context, s_ref['id']) @@ -844,7 +867,7 @@ class SimpleDriverTestCase(test.TestCase): self.mox.ReplayAll() self.assertRaises(exception.InvalidHypervisorType, self.scheduler.driver._live_migration_common_check, - self.context, i_ref, dest) + self.context, i_ref, dest, False) db.instance_destroy(self.context, instance_id) db.service_destroy(self.context, s_ref['id']) @@ -870,7 +893,7 @@ class SimpleDriverTestCase(test.TestCase): self.mox.ReplayAll() self.assertRaises(exception.DestinationHypervisorTooOld, self.scheduler.driver._live_migration_common_check, - self.context, i_ref, dest) + self.context, i_ref, dest, False) db.instance_destroy(self.context, instance_id) db.service_destroy(self.context, s_ref['id']) @@ -902,7 +925,8 @@ class SimpleDriverTestCase(test.TestCase): try: self.scheduler.driver._live_migration_common_check(self.context, i_ref, - dest) + dest, + False) except rpc.RemoteError, e: c = (e.message.find(_("doesn't have compatibility to")) >= 0) diff --git a/nova/tests/test_compute.py b/nova/tests/test_compute.py index b4ac2dbc4..1f48a6dce 100644 --- a/nova/tests/test_compute.py +++ b/nova/tests/test_compute.py @@ -545,7 +545,8 @@ class ComputeTestCase(test.TestCase): self.mox.StubOutWithMock(self.compute.driver, 'live_migration') self.compute.driver.live_migration(c, i_ref, i_ref['host'], self.compute.post_live_migration, - self.compute.recover_live_migration) + self.compute.recover_live_migration, + False) self.compute.db = dbmock self.mox.ReplayAll() @@ -622,7 +623,8 @@ class ComputeTestCase(test.TestCase): self.mox.StubOutWithMock(self.compute.driver, 'live_migration') self.compute.driver.live_migration(c, i_ref, i_ref['host'], self.compute.post_live_migration, - self.compute.recover_live_migration) + self.compute.recover_live_migration, + False) self.compute.db = dbmock self.mox.ReplayAll() diff --git a/nova/tests/test_libvirt.py b/nova/tests/test_libvirt.py index b6b36745a..000b383f2 100644 --- a/nova/tests/test_libvirt.py +++ b/nova/tests/test_libvirt.py @@ -20,6 +20,7 @@ import os import re import shutil import sys +import tempfile from xml.etree.ElementTree import fromstring as xml_to_tree from xml.dom.minidom import parseString as xml_to_dom @@ -674,6 +675,95 @@ class LibvirtConnTestCase(test.TestCase): db.volume_destroy(self.context, volume_ref['id']) db.instance_destroy(self.context, instance_ref['id']) + def test_pre_block_migration_works_correctly(self): + """Confirms pre_block_migration works correctly.""" + + # Skip if non-libvirt environment + if not self.lazy_load_library_exists(): + return + + # Replace instances_path since this testcase creates tmpfile + tmpdir = tempfile.mkdtemp() + store = FLAGS.instances_path + FLAGS.instances_path = tmpdir + + # Test data + instance_ref = db.instance_create(self.context, self.test_instance) + dummyjson = '[{"path": "%s/disk", "local_gb": 10, "type": "raw"}]' + + # Preparing mocks + # qemu-img should be mockd since test environment might not have + # large disk space. + self.mox.StubOutWithMock(utils, "execute") + utils.execute('sudo', 'qemu-img', 'create', '-f', 'raw', + '%s/%s/disk' % (tmpdir, instance_ref.name), 10) + + self.mox.ReplayAll() + conn = connection.LibvirtConnection(False) + conn.pre_block_migration(self.context, instance_ref, + dummyjson % tmpdir) + + self.assertTrue(os.path.exists('%s/%s/libvirt.xml' % + (tmpdir, instance_ref.name))) + + shutil.rmtree(tmpdir) + db.instance_destroy(self.context, instance_ref['id']) + # Restore FLAGS.instances_path + FLAGS.instances_path = store + + def test_get_instance_disk_info_works_correctly(self): + """Confirms pre_block_migration works correctly.""" + # Skip if non-libvirt environment + if not self.lazy_load_library_exists(): + return + + # Test data + instance_ref = db.instance_create(self.context, self.test_instance) + dummyxml = ("instance-0000000a" + "" + "" + "" + "" + "" + "" + "" + "") + + ret = ("image: /test/disk\nfile format: raw\n" + "virtual size: 20G (21474836480 bytes)\ndisk size: 3.1G\n") + + # Preparing mocks + vdmock = self.mox.CreateMock(libvirt.virDomain) + self.mox.StubOutWithMock(vdmock, "XMLDesc") + vdmock.XMLDesc(0).AndReturn(dummyxml) + + def fake_lookup(instance_name): + if instance_name == instance_ref.name: + return vdmock + self.create_fake_libvirt_mock(lookupByName=fake_lookup) + + self.mox.StubOutWithMock(os.path, "getsize") + # based on above testdata, one is raw image, so getsize is mocked. + os.path.getsize("/test/disk").AndReturn(10 * 1024 * 1024 * 1024) + # another is qcow image, so qemu-img should be mocked. + self.mox.StubOutWithMock(utils, "execute") + utils.execute('sudo', 'qemu-img', 'info', '/test/disk.local').\ + AndReturn((ret, '')) + + self.mox.ReplayAll() + conn = connection.LibvirtConnection(False) + info = conn.get_instance_disk_info(self.context, instance_ref) + info = utils.loads(info) + + self.assertTrue(info[0]['type'] == 'raw' and + info[1]['type'] == 'qcow2' and + info[0]['path'] == '/test/disk' and + info[1]['path'] == '/test/disk.local' and + info[0]['local_gb'] == 10 and + info[1]['local_gb'] == 20) + + db.instance_destroy(self.context, instance_ref['id']) + def test_spawn_with_network_info(self): # Skip if non-libvirt environment if not self.lazy_load_library_exists(): diff --git a/nova/virt/libvirt/connection.py b/nova/virt/libvirt/connection.py index c491418ae..86e388ff7 100644 --- a/nova/virt/libvirt/connection.py +++ b/nova/virt/libvirt/connection.py @@ -117,6 +117,10 @@ flags.DEFINE_string('live_migration_uri', flags.DEFINE_string('live_migration_flag', "VIR_MIGRATE_UNDEFINE_SOURCE, VIR_MIGRATE_PEER2PEER", 'Define live migration behavior.') +flags.DEFINE_string('block_migration_flag', + "VIR_MIGRATE_UNDEFINE_SOURCE, VIR_MIGRATE_PEER2PEER, " + "VIR_MIGRATE_NON_SHARED_DISK", + 'Define block migration behavior.') flags.DEFINE_integer('live_migration_bandwidth', 0, 'Define live migration behavior') flags.DEFINE_string('qemu_img', 'qemu-img', @@ -727,6 +731,7 @@ class LibvirtConnection(driver.ComputeDriver): If cow is True, it will make a CoW image instead of a copy. """ + if not os.path.exists(target): base_dir = os.path.join(FLAGS.instances_path, '_base') if not os.path.exists(base_dir): @@ -1458,7 +1463,7 @@ class LibvirtConnection(driver.ComputeDriver): time.sleep(1) def live_migration(self, ctxt, instance_ref, dest, - post_method, recover_method): + post_method, recover_method, block_migration=False): """Spawning live_migration operation for distributing high-load. :params ctxt: security context @@ -1466,20 +1471,22 @@ class LibvirtConnection(driver.ComputeDriver): nova.db.sqlalchemy.models.Instance object instance object that is migrated. :params dest: destination host + :params block_migration: destination host :params post_method: post operation method. expected nova.compute.manager.post_live_migration. :params recover_method: recovery method when any exception occurs. expected nova.compute.manager.recover_live_migration. + :params block_migration: if true, do block migration. """ greenthread.spawn(self._live_migration, ctxt, instance_ref, dest, - post_method, recover_method) + post_method, recover_method, block_migration) - def _live_migration(self, ctxt, instance_ref, dest, - post_method, recover_method): + def _live_migration(self, ctxt, instance_ref, dest, post_method, + recover_method, block_migration=False): """Do live migration. :params ctxt: security context @@ -1498,24 +1505,18 @@ class LibvirtConnection(driver.ComputeDriver): # Do live migration. try: - flaglist = FLAGS.live_migration_flag.split(',') + if block_migration: + flaglist = FLAGS.block_migration_flag.split(',') + else: + flaglist = FLAGS.live_migration_flag.split(',') flagvals = [getattr(libvirt, x.strip()) for x in flaglist] logical_sum = reduce(lambda x, y: x | y, flagvals) - if self.read_only: - tmpconn = self._connect(self.libvirt_uri, False) - dom = tmpconn.lookupByName(instance_ref.name) - dom.migrateToURI(FLAGS.live_migration_uri % dest, - logical_sum, - None, - FLAGS.live_migration_bandwidth) - tmpconn.close() - else: - dom = self._conn.lookupByName(instance_ref.name) - dom.migrateToURI(FLAGS.live_migration_uri % dest, - logical_sum, - None, - FLAGS.live_migration_bandwidth) + dom = self._conn.lookupByName(instance_ref.name) + dom.migrateToURI(FLAGS.live_migration_uri % dest, + logical_sum, + None, + FLAGS.live_migration_bandwidth) except Exception: recover_method(ctxt, instance_ref, dest=dest) @@ -1530,11 +1531,92 @@ class LibvirtConnection(driver.ComputeDriver): self.get_info(instance_ref.name)['state'] except exception.NotFound: timer.stop() - post_method(ctxt, instance_ref, dest) + post_method(ctxt, instance_ref, dest, block_migration) timer.f = wait_for_live_migration timer.start(interval=0.5, now=True) + def pre_block_migration(self, ctxt, instance_ref, disk_info_json): + """Preparation block migration. + + :params ctxt: security context + :params instance_ref: + nova.db.sqlalchemy.models.Instance object + instance object that is migrated. + :params disk_info_json: + json strings specified in get_instance_disk_info + + """ + disk_info = utils.loads(disk_info_json) + + # make instance directory + instance_dir = os.path.join(FLAGS.instances_path, instance_ref['name']) + if os.path.exists(instance_dir): + raise exception.DestinatioinDiskExists(path=instance_dir) + os.mkdir(instance_dir) + + for disk in disk_info: + base = os.path.basename(disk['path']) + # Get image type and create empty disk image. + instance_disk = os.path.join(instance_dir, base) + utils.execute('sudo', 'qemu-img', 'create', '-f', disk['type'], + instance_disk, str(disk['local_gb'])+'G') + + # block migration does not migrate libvirt.xml, + # to avoid any confusion of admins, create it now. + xml = self.to_xml(instance_ref) + f = open(os.path.join(instance_dir, 'libvirt.xml'), 'w+') + f.write(xml) + f.close() + + def get_instance_disk_info(self, ctxt, instance_ref): + """Preparation block migration. + + :params ctxt: security context + :params instance_ref: + nova.db.sqlalchemy.models.Instance object + instance object that is migrated. + :return: + json strings with below format. + "[{'path':'disk', 'type':'raw', 'local_gb':10},...]" + + """ + disk_info = [] + + virt_dom = self._lookup_by_name(instance_ref.name) + xml = virt_dom.XMLDesc(0) + doc = libxml2.parseDoc(xml) + disk_nodes = doc.xpathEval('//devices/disk') + path_nodes = doc.xpathEval('//devices/disk/source') + driver_nodes = doc.xpathEval('//devices/disk/driver') + + for cnt, path_node in enumerate(path_nodes): + disk_type = disk_nodes[cnt].get_properties().getContent() + path = path_node.get_properties().getContent() + + if disk_type != 'file': + LOG.debug(_('skipping %(path)s since it looks like volume') % + locals()) + continue + + # xml is generated by kvm, so format is slightly different + # from libvirt.xml that nova generated. + #disk_type = driver_nodes[cnt].get_properties().getContent() + disk_type = \ + driver_nodes[cnt].get_properties().get_next().getContent() + if disk_type == 'raw': + size = int(os.path.getsize(path)) / 1024 / 1024 / 1024 + else: + out, err = utils.execute('sudo', 'qemu-img', 'info', path) + size = [i.split('(')[1].split()[0] for i in out.split('\n') + if i.strip().find('virtual size') >= 0] + size = int(size[0]) / 1024 / 1024 / 1024 + + disk_info.append({'type': disk_type, 'path': path, + 'local_gb': size}) + + return utils.dumps(disk_info) + def unfilter_instance(self, instance_ref): """See comments of same method in firewall_driver.""" self.firewall_driver.unfilter_instance(instance_ref) -- cgit From c16e1af623ef8baa1098510a8a04adb06f2da81b Mon Sep 17 00:00:00 2001 From: Kei Masumoto Date: Wed, 15 Jun 2011 01:35:54 +0900 Subject: added kernel/ramdisk migrate support --- nova/virt/libvirt/connection.py | 24 ++++++++++++++++++++---- 1 file changed, 20 insertions(+), 4 deletions(-) diff --git a/nova/virt/libvirt/connection.py b/nova/virt/libvirt/connection.py index 86e388ff7..73ca6e0d9 100644 --- a/nova/virt/libvirt/connection.py +++ b/nova/virt/libvirt/connection.py @@ -1555,12 +1555,28 @@ class LibvirtConnection(driver.ComputeDriver): raise exception.DestinatioinDiskExists(path=instance_dir) os.mkdir(instance_dir) - for disk in disk_info: - base = os.path.basename(disk['path']) + for info in disk_info: + base = os.path.basename(info['path']) # Get image type and create empty disk image. instance_disk = os.path.join(instance_dir, base) - utils.execute('sudo', 'qemu-img', 'create', '-f', disk['type'], - instance_disk, str(disk['local_gb'])+'G') + utils.execute('sudo', 'qemu-img', 'create', '-f', info['type'], + instance_disk, str(info['local_gb']) + 'G') + + # if image has kernel and ramdisk, just download + # following normal way. + if instance_ref['kernel_id']: + user = manager.AuthManager().get_user(instance_ref['user_id']) + project = manager.AuthManager().get_project( + instance_ref['project_id']) + self._fetch_image(os.path.join(instance_dir, 'kernel'), + instance_ref['kernel_id'], + user, + project) + if instance_ref['ramdisk_id']: + self._fetch_image(os.path.join(instance_dir, 'ramdisk'), + instance_ref['ramdisk_id'], + user, + project) # block migration does not migrate libvirt.xml, # to avoid any confusion of admins, create it now. -- cgit From b03b3145a18f8f4717fdc55ab50dc714516d2c54 Mon Sep 17 00:00:00 2001 From: Kei Masumoto Date: Mon, 20 Jun 2011 08:32:34 +0900 Subject: fix comments at nova.virt.libvirt.connection --- nova/virt/libvirt/connection.py | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/nova/virt/libvirt/connection.py b/nova/virt/libvirt/connection.py index 313b19194..8fe8148d5 100644 --- a/nova/virt/libvirt/connection.py +++ b/nova/virt/libvirt/connection.py @@ -1617,9 +1617,10 @@ class LibvirtConnection(driver.ComputeDriver): locals()) continue - # xml is generated by kvm, so format is slightly different - # from libvirt.xml that nova generated. - #disk_type = driver_nodes[cnt].get_properties().getContent() + # In case of libvirt.xml, disk type can be obtained + # by the below statement. + # -> disk_type = driver_nodes[cnt].get_properties().getContent() + # but this xml is generated by kvm, format is slightly different. disk_type = \ driver_nodes[cnt].get_properties().get_next().getContent() if disk_type == 'raw': -- cgit From 9b52343f792d83647978c7edbfe700258e3ddae2 Mon Sep 17 00:00:00 2001 From: Kei Masumoto Date: Mon, 20 Jun 2011 08:51:25 +0900 Subject: fix pep8 check --- nova/compute/manager.py | 2 +- nova/virt/libvirt/connection.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/nova/compute/manager.py b/nova/compute/manager.py index f7604a253..4410ff27e 100644 --- a/nova/compute/manager.py +++ b/nova/compute/manager.py @@ -1133,7 +1133,7 @@ class ComputeManager(manager.SchedulerDependentManager): # before migration starts, so if any failure occurs, # any empty images has to be deleted. but not sure adding below # method is appropreate here. for now, admin has to delete manually. - # rpc.call(ctxt, + # rpc.cast(ctxt, # self.db.queue_get_for(ctxt, FLAGS.compute_topic, dest), # {"method": "self.driver.destroy", # "args": {'instance':instance_ref}) diff --git a/nova/virt/libvirt/connection.py b/nova/virt/libvirt/connection.py index 8fe8148d5..d0c52c763 100644 --- a/nova/virt/libvirt/connection.py +++ b/nova/virt/libvirt/connection.py @@ -1617,7 +1617,7 @@ class LibvirtConnection(driver.ComputeDriver): locals()) continue - # In case of libvirt.xml, disk type can be obtained + # In case of libvirt.xml, disk type can be obtained # by the below statement. # -> disk_type = driver_nodes[cnt].get_properties().getContent() # but this xml is generated by kvm, format is slightly different. -- cgit From a6d527646184889863de5ab1082695a29f70988a Mon Sep 17 00:00:00 2001 From: Kei Masumoto Date: Mon, 20 Jun 2011 09:43:34 +0900 Subject: nova.virt.libvirt.connection._live_migration is changed --- nova/compute/manager.py | 20 +++++++++----------- nova/virt/libvirt/connection.py | 2 +- 2 files changed, 10 insertions(+), 12 deletions(-) diff --git a/nova/compute/manager.py b/nova/compute/manager.py index 402d5829e..f32dfe6ab 100644 --- a/nova/compute/manager.py +++ b/nova/compute/manager.py @@ -1220,8 +1220,7 @@ class ComputeManager(manager.SchedulerDependentManager): "Domain not found: no domain with matching name.\" " "This error can be safely ignored.")) - def recover_live_migration(self, ctxt, instance_ref, host=None, - dest=None, delete=True): + def recover_live_migration(self, ctxt, instance_ref, host=None, dest=None): """Recovers Instance/volume state from migrating -> running. :param ctxt: security context @@ -1231,9 +1230,6 @@ class ComputeManager(manager.SchedulerDependentManager): :param dest: This method is called from live migration src host. This param specifies destination host. - :param delete: - If true, ask destination host to remove instance dir, - since empty disk image was created for block migration """ if not host: host = instance_ref['host'] @@ -1254,12 +1250,14 @@ class ComputeManager(manager.SchedulerDependentManager): # TODO: Block migration needs empty image at destination host # before migration starts, so if any failure occurs, - # any empty images has to be deleted. but not sure adding below - # method is appropreate here. for now, admin has to delete manually. - # rpc.cast(ctxt, - # self.db.queue_get_for(ctxt, FLAGS.compute_topic, dest), - # {"method": "self.driver.destroy", - # "args": {'instance':instance_ref}) + # any empty images has to be deleted. + # In current version argument dest != None means this method is + # called for error recovering + #if dest: + # rpc.cast(ctxt, + # self.db.queue_get_for(ctxt, FLAGS.compute_topic, dest), + # {"method": "self.driver.destroy", + # "args": {'instance':instance_ref}) def periodic_tasks(self, context=None): """Tasks to be run at a periodic interval.""" diff --git a/nova/virt/libvirt/connection.py b/nova/virt/libvirt/connection.py index b0517e32a..2ddaa5971 100644 --- a/nova/virt/libvirt/connection.py +++ b/nova/virt/libvirt/connection.py @@ -1573,7 +1573,7 @@ class LibvirtConnection(driver.ComputeDriver): self.get_info(instance_ref.name)['state'] except exception.NotFound: timer.stop() - post_method(ctxt, instance_ref, dest, block_migration) + post_method(ctxt, instance_ref, dest) timer.f = wait_for_live_migration timer.start(interval=0.5, now=True) -- cgit From c184fa5d03f3f8d7faaff7b583404874de409aa6 Mon Sep 17 00:00:00 2001 From: Kei Masumoto Date: Tue, 21 Jun 2011 20:51:07 +0900 Subject: fixed reviewer's comment. 1. adding dest-instance-dir deleting operation to nova.compute.manager, 2. fix invalid raise statement --- nova/compute/manager.py | 36 +++++++++++++++++++++++------------- nova/exception.py | 4 ++++ nova/scheduler/driver.py | 3 ++- nova/tests/test_compute.py | 17 +++++++++++++---- nova/tests/test_libvirt.py | 6 +++--- nova/virt/libvirt/connection.py | 14 ++++++++++++-- 6 files changed, 57 insertions(+), 23 deletions(-) diff --git a/nova/compute/manager.py b/nova/compute/manager.py index f32dfe6ab..eac543251 100644 --- a/nova/compute/manager.py +++ b/nova/compute/manager.py @@ -1045,7 +1045,7 @@ class ComputeManager(manager.SchedulerDependentManager): return self.driver.update_available_resource(context, self.host) def pre_live_migration(self, context, instance_id, time=None, - block_migration=False, **kwargs): + block_migration=False, disk=None): """Preparations for live migration at dest host. :param context: security context @@ -1106,7 +1106,7 @@ class ComputeManager(manager.SchedulerDependentManager): if block_migration: self.driver.pre_block_migration(context, instance_ref, - kwargs.get('disk')) + disk) def live_migration(self, context, instance_id, dest, block_migration=False): @@ -1130,17 +1130,18 @@ class ComputeManager(manager.SchedulerDependentManager): {"method": "check_for_export", "args": {'instance_id': instance_id}}) - args = {} - args['instance_id'] = instance_id if block_migration: - args['block_migration'] = block_migration - args['disk'] = \ - self.driver.get_instance_disk_info(context, instance_ref) + disk = self.driver.get_instance_disk_info(context, + instance_ref) + else: + disk = None rpc.call(context, self.db.queue_get_for(context, FLAGS.compute_topic, dest), {"method": "pre_live_migration", - "args": args}) + "args": {'instance_id': instance_id, + 'block_migration': block_migration, + 'disk': disk}}) except Exception: i_name = instance_ref.name @@ -1253,11 +1254,20 @@ class ComputeManager(manager.SchedulerDependentManager): # any empty images has to be deleted. # In current version argument dest != None means this method is # called for error recovering - #if dest: - # rpc.cast(ctxt, - # self.db.queue_get_for(ctxt, FLAGS.compute_topic, dest), - # {"method": "self.driver.destroy", - # "args": {'instance':instance_ref}) + if dest: + rpc.cast(ctxt, + self.db.queue_get_for(ctxt, FLAGS.compute_topic, dest), + {"method": "cleanup", + "args": {'instance_id': instance_ref['id']}}) + + def cleanup(self, ctxt, instance_id): + """ Cleaning up image directory that is created pre_live_migration. + + :param ctxt: security context + :param instance_id: nova.db.sqlalchemy.models.Instance.Id + """ + instances_ref = self.db.instance_get(ctxt, instance_id) + self.driver.cleanup(instance_ref) def periodic_tasks(self, context=None): """Tasks to be run at a periodic interval.""" diff --git a/nova/exception.py b/nova/exception.py index 689a797fd..4ebf0e169 100644 --- a/nova/exception.py +++ b/nova/exception.py @@ -588,6 +588,10 @@ class InstanceExists(Duplicate): message = _("Instance %(name)s already exists.") +class InvalidSharedStorage(NovaException): + message = _("%(path)s is on shared storage: %(reason)s") + + class MigrationError(NovaException): message = _("Migration error") + ": %(reason)s" diff --git a/nova/scheduler/driver.py b/nova/scheduler/driver.py index 889baa567..55762cf85 100644 --- a/nova/scheduler/driver.py +++ b/nova/scheduler/driver.py @@ -200,7 +200,8 @@ class Scheduler(object): try: self.mounted_on_same_shared_storage(context, instance_ref, dest) if block_migration: - raise + reason = "Block migration can not be used with shared storage." + raise exception.InvalidSharedStorage(reason=reason, path=dest) except rpc.RemoteError: if not block_migration: raise diff --git a/nova/tests/test_compute.py b/nova/tests/test_compute.py index 840961771..e9f5eb0e8 100644 --- a/nova/tests/test_compute.py +++ b/nova/tests/test_compute.py @@ -556,7 +556,10 @@ class ComputeTestCase(test.TestCase): dbmock.queue_get_for(c, FLAGS.compute_topic, i_ref['host']).\ AndReturn(topic) rpc.call(c, topic, {"method": "pre_live_migration", - "args": {'instance_id': i_ref['id']}}) + "args": {'instance_id': i_ref['id'], + 'block_migration': False, + 'disk': None}}) + self.mox.StubOutWithMock(self.compute.driver, 'live_migration') self.compute.driver.live_migration(c, i_ref, i_ref['host'], self.compute.post_live_migration, @@ -582,7 +585,9 @@ class ComputeTestCase(test.TestCase): dbmock.queue_get_for(c, FLAGS.compute_topic, i_ref['host']).\ AndReturn(topic) rpc.call(c, topic, {"method": "pre_live_migration", - "args": {'instance_id': i_ref['id']}}).\ + "args": {'instance_id': i_ref['id'], + 'block_migration': False, + 'disk': None}}).\ AndRaise(rpc.RemoteError('', '', '')) dbmock.instance_update(c, i_ref['id'], {'state_description': 'running', 'state': power_state.RUNNING, @@ -609,7 +614,9 @@ class ComputeTestCase(test.TestCase): AndReturn(topic) self.mox.StubOutWithMock(rpc, 'call') rpc.call(c, topic, {"method": "pre_live_migration", - "args": {'instance_id': i_ref['id']}}).\ + "args": {'instance_id': i_ref['id'], + 'block_migration': False, + 'disk': None}}).\ AndRaise(rpc.RemoteError('', '', '')) dbmock.instance_update(c, i_ref['id'], {'state_description': 'running', 'state': power_state.RUNNING, @@ -634,7 +641,9 @@ class ComputeTestCase(test.TestCase): dbmock.queue_get_for(c, FLAGS.compute_topic, i_ref['host']).\ AndReturn(topic) rpc.call(c, topic, {"method": "pre_live_migration", - "args": {'instance_id': i_ref['id']}}) + "args": {'instance_id': i_ref['id'], + 'block_migration': False, + 'disk': None}}) self.mox.StubOutWithMock(self.compute.driver, 'live_migration') self.compute.driver.live_migration(c, i_ref, i_ref['host'], self.compute.post_live_migration, diff --git a/nova/tests/test_libvirt.py b/nova/tests/test_libvirt.py index 79a2b84b7..2e57dae08 100644 --- a/nova/tests/test_libvirt.py +++ b/nova/tests/test_libvirt.py @@ -734,7 +734,7 @@ class LibvirtConnTestCase(test.TestCase): # large disk space. self.mox.StubOutWithMock(utils, "execute") utils.execute('sudo', 'qemu-img', 'create', '-f', 'raw', - '%s/%s/disk' % (tmpdir, instance_ref.name), 10) + '%s/%s/disk' % (tmpdir, instance_ref.name), '10G') self.mox.ReplayAll() conn = connection.LibvirtConnection(False) @@ -759,10 +759,10 @@ class LibvirtConnTestCase(test.TestCase): instance_ref = db.instance_create(self.context, self.test_instance) dummyxml = ("instance-0000000a" "" - "" + "" "" "" - "" + "" "" "" "") diff --git a/nova/virt/libvirt/connection.py b/nova/virt/libvirt/connection.py index 2ddaa5971..8d23df45f 100644 --- a/nova/virt/libvirt/connection.py +++ b/nova/virt/libvirt/connection.py @@ -120,7 +120,7 @@ flags.DEFINE_string('live_migration_flag', 'Define live migration behavior.') flags.DEFINE_string('block_migration_flag', "VIR_MIGRATE_UNDEFINE_SOURCE, VIR_MIGRATE_PEER2PEER, " - "VIR_MIGRATE_NON_SHARED_DISK", + "VIR_MIGRATE_NON_SHARED_INC", 'Define block migration behavior.') flags.DEFINE_integer('live_migration_bandwidth', 0, 'Define live migration behavior') @@ -295,7 +295,10 @@ class LibvirtConnection(driver.ComputeDriver): # NOTE(justinsb): We remove the domain definition. We probably # would do better to keep it if cleanup=False (e.g. volumes?) # (e.g. #2 - not losing machines on failure) - virt_dom.undefine() + # NOTE(masumotok): Migrated instances does not have domain + # definitions. + if instance.name in self._conn.listDefinedDomains(): + virt_dom.undefine() except libvirt.libvirtError as e: errcode = e.get_error_code() LOG.warning(_("Error from libvirt during undefine of " @@ -335,6 +338,13 @@ class LibvirtConnection(driver.ComputeDriver): if os.path.exists(target): shutil.rmtree(target) + def cleanup(self, instance): + """ Cleaning up image directory that is created pre_live_migration. + + :param instance: nova.db.sqlalchemy.models.Instance + """ + self._cleanup(instance) + @exception.wrap_exception def attach_volume(self, instance_name, device_path, mountpoint): virt_dom = self._lookup_by_name(instance_name) -- cgit From e01848ed64c4523bb9e375da07e962b5ea1ea6ee Mon Sep 17 00:00:00 2001 From: Kei Masumoto Date: Tue, 21 Jun 2011 21:10:08 +0900 Subject: erase unnecessary TODO: statement --- nova/compute/manager.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/nova/compute/manager.py b/nova/compute/manager.py index eac543251..16dbda47d 100644 --- a/nova/compute/manager.py +++ b/nova/compute/manager.py @@ -1249,7 +1249,7 @@ class ComputeManager(manager.SchedulerDependentManager): if dest: volume_api.remove_from_compute(ctxt, volume_id, dest) - # TODO: Block migration needs empty image at destination host + # Block migration needs empty image at destination host # before migration starts, so if any failure occurs, # any empty images has to be deleted. # In current version argument dest != None means this method is -- cgit From 4001ee488420589e345dc42001e6cab9c68a5e12 Mon Sep 17 00:00:00 2001 From: Kei Masumoto Date: Mon, 11 Jul 2011 16:31:31 +0900 Subject: fix comments --- nova/compute/manager.py | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/nova/compute/manager.py b/nova/compute/manager.py index 7d89dde80..1fbbe0065 100644 --- a/nova/compute/manager.py +++ b/nova/compute/manager.py @@ -1341,10 +1341,9 @@ class ComputeManager(manager.SchedulerDependentManager): instance_id, block_migration=False): """Post operations for live migration . - :param ctxt: security context + :param context: security context :param instance_id: nova.db.sqlalchemy.models.Instance.Id :param block_migration: block_migration - :param xml: libvirt.xml """ instance_ref = self.db.instance_get(context, instance_id) -- cgit From 79283cbb13d91e3c25e42af765f9da627813a6d8 Mon Sep 17 00:00:00 2001 From: Kei masumoto Date: Fri, 29 Jul 2011 20:03:23 +0900 Subject: merged trunk and fixed post_live_migratioin_at_destination to get nw_info --- nova/compute/manager.py | 4 +++- nova/virt/libvirt/connection.py | 7 +++++-- 2 files changed, 8 insertions(+), 3 deletions(-) diff --git a/nova/compute/manager.py b/nova/compute/manager.py index e80394fbd..8aee456fc 100644 --- a/nova/compute/manager.py +++ b/nova/compute/manager.py @@ -1389,7 +1389,7 @@ class ComputeManager(manager.SchedulerDependentManager): # No instance booting at source host, but instance dir # must be deleted for preparing next block migration if block_migration: - self.driver.destroy(instance_ref) + self.driver.destroy(instance_ref, network_info) LOG.info(_('Migrating %(i_name)s to %(dest)s finished successfully.') % locals()) @@ -1409,8 +1409,10 @@ class ComputeManager(manager.SchedulerDependentManager): instance_ref = self.db.instance_get(context, instance_id) LOG.info(_('Post operation of migraton started for %s .') % instance_ref.name) + network_info = self._get_instance_nw_info(context, instance_ref) self.driver.post_live_migration_at_destination(context, instance_ref, + network_info, block_migration) def rollback_live_migration(self, context, instance_ref, diff --git a/nova/virt/libvirt/connection.py b/nova/virt/libvirt/connection.py index 0eab0b109..31f7acb4d 100644 --- a/nova/virt/libvirt/connection.py +++ b/nova/virt/libvirt/connection.py @@ -1620,7 +1620,9 @@ class LibvirtConnection(driver.ComputeDriver): user, project) - def post_live_migration_at_destination(self, ctxt, instance_ref, + def post_live_migration_at_destination(self, ctxt, + instance_ref, + network_info, block_migration): """Post operation of live migration at destination host. @@ -1628,6 +1630,7 @@ class LibvirtConnection(driver.ComputeDriver): :params instance_ref: nova.db.sqlalchemy.models.Instance object instance object that is migrated. + :params network_info: instance network infomation :params : block_migration: if true, post operation of block_migraiton. """ # Define migrated instance, otherwise, suspend/destroy does not work. @@ -1639,7 +1642,7 @@ class LibvirtConnection(driver.ComputeDriver): # In case of block migration, destination does not have # libvirt.xml if not os.path.isfile(xml_path): - xml = self.to_xml(instance_ref) + xml = self.to_xml(instance_ref, network_info=network_info) f = open(os.path.join(instance_dir, 'libvirt.xml'), 'w+') f.write(xml) f.close() -- cgit From f881bee5b1283d5bec2396b45cea9a062cb2a4b2 Mon Sep 17 00:00:00 2001 From: Ed Leafe Date: Thu, 11 Aug 2011 14:00:56 -0500 Subject: Refactored the scheduler classes without changing functionality. Removed all 'zone-aware' naming references, as these were only useful during the zone development process. Also fixed some PEP8 problems in trunk code. --- nova/scheduler/abstract_scheduler.py | 403 ++++++++++++++++++++++ nova/scheduler/host_filter.py | 51 +-- nova/scheduler/least_cost.py | 7 +- nova/scheduler/zone_aware_scheduler.py | 383 -------------------- nova/tests/scheduler/test_abstract_scheduler.py | 364 +++++++++++++++++++ nova/tests/scheduler/test_least_cost_scheduler.py | 4 +- nova/tests/scheduler/test_zone_aware_scheduler.py | 364 ------------------- 7 files changed, 782 insertions(+), 794 deletions(-) create mode 100644 nova/scheduler/abstract_scheduler.py delete mode 100644 nova/scheduler/zone_aware_scheduler.py create mode 100644 nova/tests/scheduler/test_abstract_scheduler.py delete mode 100644 nova/tests/scheduler/test_zone_aware_scheduler.py diff --git a/nova/scheduler/abstract_scheduler.py b/nova/scheduler/abstract_scheduler.py new file mode 100644 index 000000000..eb924732a --- /dev/null +++ b/nova/scheduler/abstract_scheduler.py @@ -0,0 +1,403 @@ +# Copyright (c) 2011 Openstack, LLC. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +The AbsractScheduler is a base class Scheduler for creating instances +across zones. There are two expansion points to this class for: +1. Assigning Weights to hosts for requested instances +2. Filtering Hosts based on required instance capabilities +""" + +import operator +import json + +import M2Crypto + +from novaclient import v1_1 as novaclient +from novaclient import exceptions as novaclient_exceptions + +from nova import crypto +from nova import db +from nova import exception +from nova import flags +from nova import log as logging +from nova import rpc + +from nova.compute import api as compute_api +from nova.scheduler import api +from nova.scheduler import driver + +FLAGS = flags.FLAGS +LOG = logging.getLogger('nova.scheduler.abstract_scheduler') + + +class InvalidBlob(exception.NovaException): + message = _("Ill-formed or incorrectly routed 'blob' data sent " + "to instance create request.") + + +class AbstractScheduler(driver.Scheduler): + """Base class for creating Schedulers that can work across any nova + deployment, from simple designs to multiply-nested zones. + """ + + def _call_zone_method(self, context, method, specs, zones): + """Call novaclient zone method. Broken out for testing.""" + return api.call_zone_method(context, method, specs=specs, zones=zones) + + def _provision_resource_locally(self, context, build_plan_item, + request_spec, kwargs): + """Create the requested resource in this Zone.""" + host = build_plan_item['hostname'] + base_options = request_spec['instance_properties'] + image = request_spec['image'] + + # TODO(sandy): I guess someone needs to add block_device_mapping + # support at some point? Also, OS API has no concept of security + # groups. + instance = compute_api.API().create_db_entry_for_new_instance(context, + image, base_options, None, []) + + instance_id = instance['id'] + kwargs['instance_id'] = instance_id + + rpc.cast(context, + db.queue_get_for(context, "compute", host), + {"method": "run_instance", + "args": kwargs}) + LOG.debug(_("Provisioning locally via compute node %(host)s") + % locals()) + + def _decrypt_blob(self, blob): + """Returns the decrypted blob or None if invalid. Broken out + for testing.""" + decryptor = crypto.decryptor(FLAGS.build_plan_encryption_key) + try: + json_entry = decryptor(blob) + return json.dumps(json_entry) + except M2Crypto.EVP.EVPError: + pass + return None + + def _ask_child_zone_to_create_instance(self, context, zone_info, + request_spec, kwargs): + """Once we have determined that the request should go to one + of our children, we need to fabricate a new POST /servers/ + call with the same parameters that were passed into us. + + Note that we have to reverse engineer from our args to get back the + image, flavor, ipgroup, etc. since the original call could have + come in from EC2 (which doesn't use these things).""" + + instance_type = request_spec['instance_type'] + instance_properties = request_spec['instance_properties'] + + name = instance_properties['display_name'] + image_ref = instance_properties['image_ref'] + meta = instance_properties['metadata'] + flavor_id = instance_type['flavorid'] + reservation_id = instance_properties['reservation_id'] + + files = kwargs['injected_files'] + ipgroup = None # Not supported in OS API ... yet + + child_zone = zone_info['child_zone'] + child_blob = zone_info['child_blob'] + zone = db.zone_get(context, child_zone) + url = zone.api_url + LOG.debug(_("Forwarding instance create call to child zone %(url)s" + ". ReservationID=%(reservation_id)s") + % locals()) + nova = None + try: + nova = novaclient.Client(zone.username, zone.password, None, url) + nova.authenticate() + except novaclient_exceptions.BadRequest, e: + raise exception.NotAuthorized(_("Bad credentials attempting " + "to talk to zone at %(url)s.") % locals()) + + nova.servers.create(name, image_ref, flavor_id, ipgroup, meta, files, + child_blob, reservation_id=reservation_id) + + def _provision_resource_from_blob(self, context, build_plan_item, + instance_id, request_spec, kwargs): + """Create the requested resource locally or in a child zone + based on what is stored in the zone blob info. + + Attempt to decrypt the blob to see if this request is: + 1. valid, and + 2. intended for this zone or a child zone. + + Note: If we have "blob" that means the request was passed + into us from a parent zone. If we have "child_blob" that + means we gathered the info from one of our children. + It's possible that, when we decrypt the 'blob' field, it + contains "child_blob" data. In which case we forward the + request.""" + + host_info = None + if "blob" in build_plan_item: + # Request was passed in from above. Is it for us? + host_info = self._decrypt_blob(build_plan_item['blob']) + elif "child_blob" in build_plan_item: + # Our immediate child zone provided this info ... + host_info = build_plan_item + + if not host_info: + raise InvalidBlob() + + # Valid data ... is it for us? + if 'child_zone' in host_info and 'child_blob' in host_info: + self._ask_child_zone_to_create_instance(context, host_info, + request_spec, kwargs) + else: + self._provision_resource_locally(context, host_info, request_spec, + kwargs) + + def _provision_resource(self, context, build_plan_item, instance_id, + request_spec, kwargs): + """Create the requested resource in this Zone or a child zone.""" + if "hostname" in build_plan_item: + self._provision_resource_locally(context, build_plan_item, + request_spec, kwargs) + return + + self._provision_resource_from_blob(context, build_plan_item, + instance_id, request_spec, kwargs) + + def _adjust_child_weights(self, child_results, zones): + """Apply the Scale and Offset values from the Zone definition + to adjust the weights returned from the child zones. Alters + child_results in place. + """ + for zone_id, result in child_results: + if not result: + continue + + assert isinstance(zone_id, int) + + for zone_rec in zones: + if zone_rec['id'] != zone_id: + continue + + for item in result: + try: + offset = zone_rec['weight_offset'] + scale = zone_rec['weight_scale'] + raw_weight = item['weight'] + cooked_weight = offset + scale * raw_weight + item['weight'] = cooked_weight + item['raw_weight'] = raw_weight + except KeyError: + LOG.exception(_("Bad child zone scaling values " + "for Zone: %(zone_id)s") % locals()) + + def schedule_run_instance(self, context, instance_id, request_spec, + *args, **kwargs): + """This method is called from nova.compute.api to provision + an instance. However we need to look at the parameters being + passed in to see if this is a request to: + 1. Create a Build Plan and then provision, or + 2. Use the Build Plan information in the request parameters + to simply create the instance (either in this zone or + a child zone). + """ + + # TODO(sandy): We'll have to look for richer specs at some point. + + blob = request_spec.get('blob') + if blob: + self._provision_resource(context, request_spec, instance_id, + request_spec, kwargs) + return None + + num_instances = request_spec.get('num_instances', 1) + LOG.debug(_("Attempting to build %(num_instances)d instance(s)") % + locals()) + + # Create build plan and provision ... + build_plan = self.select(context, request_spec) + if not build_plan: + raise driver.NoValidHost(_('No hosts were available')) + + for num in xrange(num_instances): + if not build_plan: + break + + build_plan_item = build_plan.pop(0) + self._provision_resource(context, build_plan_item, instance_id, + request_spec, kwargs) + + # Returning None short-circuits the routing to Compute (since + # we've already done it here) + return None + + def select(self, context, request_spec, *args, **kwargs): + """Select returns a list of weights and zone/host information + corresponding to the best hosts to service the request. Any + child zone information has been encrypted so as not to reveal + anything about the children. + """ + return self._schedule(context, "compute", request_spec, + *args, **kwargs) + + # TODO(sandy): We're only focused on compute instances right now, + # so we don't implement the default "schedule()" method required + # of Schedulers. + def schedule(self, context, topic, request_spec, *args, **kwargs): + """The schedule() contract requires we return the one + best-suited host for this request. + """ + raise driver.NoValidHost(_('No hosts were available')) + + def _schedule(self, context, topic, request_spec, *args, **kwargs): + """Returns a list of hosts that meet the required specs, + ordered by their fitness. + """ + + if topic != "compute": + raise NotImplementedError(_("Scheduler only understands" + " Compute nodes (for now)")) + + num_instances = request_spec.get('num_instances', 1) + instance_type = request_spec['instance_type'] + + weighted = [] + host_list = None + + for i in xrange(num_instances): + # Filter local hosts based on requirements ... + # + # The first pass through here will pass 'None' as the + # host_list.. which tells the filter to build the full + # list of hosts. + # On a 2nd pass, the filter can modify the host_list with + # any updates it needs to make based on resources that + # may have been consumed from a previous build.. + host_list = self.filter_hosts(topic, request_spec, host_list) + if not host_list: + LOG.warn(_("Filter returned no hosts after processing " + "%(i)d of %(num_instances)d instances") % locals()) + break + + # then weigh the selected hosts. + # weighted = [{weight=weight, hostname=hostname, + # capabilities=capabs}, ...] + weights = self.weigh_hosts(topic, request_spec, host_list) + weights.sort(key=operator.itemgetter('weight')) + best_weight = weights[0] + weighted.append(best_weight) + self.consume_resources(topic, best_weight['capabilities'], + instance_type) + + # Next, tack on the best weights from the child zones ... + json_spec = json.dumps(request_spec) + all_zones = db.zone_get_all(context) + child_results = self._call_zone_method(context, "select", + specs=json_spec, zones=all_zones) + self._adjust_child_weights(child_results, all_zones) + for child_zone, result in child_results: + for weighting in result: + # Remember the child_zone so we can get back to + # it later if needed. This implicitly builds a zone + # path structure. + host_dict = {"weight": weighting["weight"], + "child_zone": child_zone, + "child_blob": weighting["blob"]} + weighted.append(host_dict) + + weighted.sort(key=operator.itemgetter('weight')) + return weighted + + def compute_filter(self, hostname, capabilities, request_spec): + """Return whether or not we can schedule to this compute node. + Derived classes should override this and return True if the host + is acceptable for scheduling. + """ + instance_type = request_spec['instance_type'] + requested_mem = instance_type['memory_mb'] * 1024 * 1024 + return capabilities['host_memory_free'] >= requested_mem + + def hold_filter_hosts(self, topic, request_spec, hosts=None): + """Filter the full host list (from the ZoneManager)""" + # NOTE(dabo): The logic used by the current _schedule() method + # is incorrect. Since this task is just to refactor the classes, + # I'm not fixing the logic now - that will be the next task. + # So for now this method is just renamed; afterwards this will + # become the filter_hosts() method, and the one below will + # be removed. + filter_name = request_spec.get('filter', None) + # Make sure that the requested filter is legitimate. + selected_filter = host_filter.choose_host_filter(filter_name) + + # TODO(sandy): We're only using InstanceType-based specs + # currently. Later we'll need to snoop for more detailed + # host filter requests. + instance_type = request_spec['instance_type'] + name, query = selected_filter.instance_type_to_filter(instance_type) + return selected_filter.filter_hosts(self.zone_manager, query) + + def filter_hosts(self, topic, request_spec, host_list=None): + """Return a list of hosts which are acceptable for scheduling. + Return value should be a list of (hostname, capability_dict)s. + Derived classes may override this, but may find the + '_filter' function more appropriate. + """ + def _default_filter(self, hostname, capabilities, request_spec): + """Default filter function if there's no _filter""" + # NOTE(sirp): The default logic is the equivalent to + # AllHostsFilter + return True + + filter_func = getattr(self, '%s_filter' % topic, _default_filter) + + if host_list is None: + first_run = True + host_list = self.zone_manager.service_states.iteritems() + else: + first_run = False + + filtered_hosts = [] + for host, services in host_list: + if first_run: + if topic not in services: + continue + services = services[topic] + if filter_func(host, services, request_spec): + filtered_hosts.append((host, services)) + return filtered_hosts + + def weigh_hosts(self, topic, request_spec, hosts): + """Derived classes may override this to provide more sophisticated + scheduling objectives + """ + # NOTE(sirp): The default logic is the same as the NoopCostFunction + return [dict(weight=1, hostname=hostname, capabilities=capabilities) + for hostname, capabilities in hosts] + + def compute_consume(self, capabilities, instance_type): + """Consume compute resources for selected host""" + + requested_mem = max(instance_type['memory_mb'], 0) * 1024 * 1024 + capabilities['host_memory_free'] -= requested_mem + + def consume_resources(self, topic, capabilities, instance_type): + """Consume resources for a specific host. 'host' is a tuple + of the hostname and the services""" + + consume_func = getattr(self, '%s_consume' % topic, None) + if not consume_func: + return + consume_func(capabilities, instance_type) diff --git a/nova/scheduler/host_filter.py b/nova/scheduler/host_filter.py index b7bbbbcb8..45a8f40d8 100644 --- a/nova/scheduler/host_filter.py +++ b/nova/scheduler/host_filter.py @@ -14,7 +14,12 @@ # under the License. """ -Host Filter is a mechanism for requesting instance resources. +The Host Filter classes are a way to ensure that only hosts that are +appropriate are considered when creating a new instance. Hosts that are +either incompatible or insufficient to accept a newly-requested instance +are removed by Host Filter classes from consideration. Those that pass +the filter are then passed on for weighting or other process for ordering. + Three filters are included: AllHosts, Flavor & JSON. AllHosts just returns the full, unfiltered list of hosts. Flavor is a hard coded matching mechanism based on flavor criteria and JSON is an ad-hoc @@ -28,12 +33,6 @@ noted a need for a more expressive way of specifying instances. Since we don't want to get into building full DSL this is a simple form as an example of how this could be done. In reality, most consumers will use the more rigid filters such as FlavorFilter. - -Note: These are "required" capability filters. These capabilities -used must be present or the host will be excluded. The hosts -returned are then weighed by the Weighted Scheduler. Weights -can take the more esoteric factors into consideration (such as -server affinity and customer separation). """ import json @@ -41,9 +40,7 @@ import json from nova import exception from nova import flags from nova import log as logging -from nova.scheduler import zone_aware_scheduler from nova import utils -from nova.scheduler import zone_aware_scheduler LOG = logging.getLogger('nova.scheduler.host_filter') @@ -125,9 +122,8 @@ class InstanceTypeFilter(HostFilter): spec_disk = instance_type['local_gb'] extra_specs = instance_type['extra_specs'] - if host_ram_mb >= spec_ram and \ - disk_bytes >= spec_disk and \ - self._satisfies_extra_specs(capabilities, instance_type): + if ((host_ram_mb >= spec_ram) and (disk_bytes >= spec_disk) and + self._satisfies_extra_specs(capabilities, instance_type)): selected_hosts.append((host, capabilities)) return selected_hosts @@ -309,7 +305,6 @@ def choose_host_filter(filter_name=None): function checks the filter name against a predefined set of acceptable filters. """ - if not filter_name: filter_name = FLAGS.default_host_filter for filter_class in FILTERS: @@ -317,33 +312,3 @@ def choose_host_filter(filter_name=None): if host_match == filter_name: return filter_class() raise exception.SchedulerHostFilterNotFound(filter_name=filter_name) - - -class HostFilterScheduler(zone_aware_scheduler.ZoneAwareScheduler): - """The HostFilterScheduler uses the HostFilter to filter - hosts for weighing. The particular filter used may be passed in - as an argument or the default will be used. - - request_spec = {'filter': , - 'instance_type': } - """ - - def filter_hosts(self, topic, request_spec, hosts=None): - """Filter the full host list (from the ZoneManager)""" - - filter_name = request_spec.get('filter', None) - host_filter = choose_host_filter(filter_name) - - # TODO(sandy): We're only using InstanceType-based specs - # currently. Later we'll need to snoop for more detailed - # host filter requests. - instance_type = request_spec['instance_type'] - name, query = host_filter.instance_type_to_filter(instance_type) - return host_filter.filter_hosts(self.zone_manager, query) - - def weigh_hosts(self, topic, request_spec, hosts): - """Derived classes must override this method and return - a lists of hosts in [{weight, hostname}] format. - """ - return [dict(weight=1, hostname=hostname, capabilities=caps) - for hostname, caps in hosts] diff --git a/nova/scheduler/least_cost.py b/nova/scheduler/least_cost.py index 329107efe..a58b11289 100644 --- a/nova/scheduler/least_cost.py +++ b/nova/scheduler/least_cost.py @@ -22,11 +22,14 @@ The cost-function and weights are tabulated, and the host with the least cost is then selected for provisioning. """ +# TODO(dabo): This class will be removed in the next merge prop; it remains now +# because much of the code will be refactored into different classes. + import collections from nova import flags from nova import log as logging -from nova.scheduler import zone_aware_scheduler +from nova.scheduler import abstract_scheduler from nova import utils from nova import exception @@ -61,7 +64,7 @@ def compute_fill_first_cost_fn(host): return free_mem -class LeastCostScheduler(zone_aware_scheduler.ZoneAwareScheduler): +class LeastCostScheduler(abstract_scheduler.AbstractScheduler): def __init__(self, *args, **kwargs): self.cost_fns_cache = {} super(LeastCostScheduler, self).__init__(*args, **kwargs) diff --git a/nova/scheduler/zone_aware_scheduler.py b/nova/scheduler/zone_aware_scheduler.py deleted file mode 100644 index d1924c9f9..000000000 --- a/nova/scheduler/zone_aware_scheduler.py +++ /dev/null @@ -1,383 +0,0 @@ -# Copyright (c) 2011 Openstack, LLC. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -""" -The Zone Aware Scheduler is a base class Scheduler for creating instances -across zones. There are two expansion points to this class for: -1. Assigning Weights to hosts for requested instances -2. Filtering Hosts based on required instance capabilities -""" - -import operator -import json - -import M2Crypto - -from novaclient import v1_1 as novaclient -from novaclient import exceptions as novaclient_exceptions - -from nova import crypto -from nova import db -from nova import exception -from nova import flags -from nova import log as logging -from nova import rpc - -from nova.compute import api as compute_api -from nova.scheduler import api -from nova.scheduler import driver - -FLAGS = flags.FLAGS -LOG = logging.getLogger('nova.scheduler.zone_aware_scheduler') - - -class InvalidBlob(exception.NovaException): - message = _("Ill-formed or incorrectly routed 'blob' data sent " - "to instance create request.") - - -class ZoneAwareScheduler(driver.Scheduler): - """Base class for creating Zone Aware Schedulers.""" - - def _call_zone_method(self, context, method, specs, zones): - """Call novaclient zone method. Broken out for testing.""" - return api.call_zone_method(context, method, specs=specs, zones=zones) - - def _provision_resource_locally(self, context, build_plan_item, - request_spec, kwargs): - """Create the requested resource in this Zone.""" - host = build_plan_item['hostname'] - base_options = request_spec['instance_properties'] - image = request_spec['image'] - - # TODO(sandy): I guess someone needs to add block_device_mapping - # support at some point? Also, OS API has no concept of security - # groups. - instance = compute_api.API().create_db_entry_for_new_instance(context, - image, base_options, None, []) - - instance_id = instance['id'] - kwargs['instance_id'] = instance_id - - rpc.cast(context, - db.queue_get_for(context, "compute", host), - {"method": "run_instance", - "args": kwargs}) - LOG.debug(_("Provisioning locally via compute node %(host)s") - % locals()) - - def _decrypt_blob(self, blob): - """Returns the decrypted blob or None if invalid. Broken out - for testing.""" - decryptor = crypto.decryptor(FLAGS.build_plan_encryption_key) - try: - json_entry = decryptor(blob) - return json.dumps(json_entry) - except M2Crypto.EVP.EVPError: - pass - return None - - def _ask_child_zone_to_create_instance(self, context, zone_info, - request_spec, kwargs): - """Once we have determined that the request should go to one - of our children, we need to fabricate a new POST /servers/ - call with the same parameters that were passed into us. - - Note that we have to reverse engineer from our args to get back the - image, flavor, ipgroup, etc. since the original call could have - come in from EC2 (which doesn't use these things).""" - - instance_type = request_spec['instance_type'] - instance_properties = request_spec['instance_properties'] - - name = instance_properties['display_name'] - image_ref = instance_properties['image_ref'] - meta = instance_properties['metadata'] - flavor_id = instance_type['flavorid'] - reservation_id = instance_properties['reservation_id'] - - files = kwargs['injected_files'] - ipgroup = None # Not supported in OS API ... yet - - child_zone = zone_info['child_zone'] - child_blob = zone_info['child_blob'] - zone = db.zone_get(context, child_zone) - url = zone.api_url - LOG.debug(_("Forwarding instance create call to child zone %(url)s" - ". ReservationID=%(reservation_id)s") - % locals()) - nova = None - try: - nova = novaclient.Client(zone.username, zone.password, None, url) - nova.authenticate() - except novaclient_exceptions.BadRequest, e: - raise exception.NotAuthorized(_("Bad credentials attempting " - "to talk to zone at %(url)s.") % locals()) - - nova.servers.create(name, image_ref, flavor_id, ipgroup, meta, files, - child_blob, reservation_id=reservation_id) - - def _provision_resource_from_blob(self, context, build_plan_item, - instance_id, request_spec, kwargs): - """Create the requested resource locally or in a child zone - based on what is stored in the zone blob info. - - Attempt to decrypt the blob to see if this request is: - 1. valid, and - 2. intended for this zone or a child zone. - - Note: If we have "blob" that means the request was passed - into us from a parent zone. If we have "child_blob" that - means we gathered the info from one of our children. - It's possible that, when we decrypt the 'blob' field, it - contains "child_blob" data. In which case we forward the - request.""" - - host_info = None - if "blob" in build_plan_item: - # Request was passed in from above. Is it for us? - host_info = self._decrypt_blob(build_plan_item['blob']) - elif "child_blob" in build_plan_item: - # Our immediate child zone provided this info ... - host_info = build_plan_item - - if not host_info: - raise InvalidBlob() - - # Valid data ... is it for us? - if 'child_zone' in host_info and 'child_blob' in host_info: - self._ask_child_zone_to_create_instance(context, host_info, - request_spec, kwargs) - else: - self._provision_resource_locally(context, host_info, request_spec, - kwargs) - - def _provision_resource(self, context, build_plan_item, instance_id, - request_spec, kwargs): - """Create the requested resource in this Zone or a child zone.""" - if "hostname" in build_plan_item: - self._provision_resource_locally(context, build_plan_item, - request_spec, kwargs) - return - - self._provision_resource_from_blob(context, build_plan_item, - instance_id, request_spec, kwargs) - - def _adjust_child_weights(self, child_results, zones): - """Apply the Scale and Offset values from the Zone definition - to adjust the weights returned from the child zones. Alters - child_results in place. - """ - for zone_id, result in child_results: - if not result: - continue - - assert isinstance(zone_id, int) - - for zone_rec in zones: - if zone_rec['id'] != zone_id: - continue - - for item in result: - try: - offset = zone_rec['weight_offset'] - scale = zone_rec['weight_scale'] - raw_weight = item['weight'] - cooked_weight = offset + scale * raw_weight - item['weight'] = cooked_weight - item['raw_weight'] = raw_weight - except KeyError: - LOG.exception(_("Bad child zone scaling values " - "for Zone: %(zone_id)s") % locals()) - - def schedule_run_instance(self, context, instance_id, request_spec, - *args, **kwargs): - """This method is called from nova.compute.api to provision - an instance. However we need to look at the parameters being - passed in to see if this is a request to: - 1. Create a Build Plan and then provision, or - 2. Use the Build Plan information in the request parameters - to simply create the instance (either in this zone or - a child zone). - """ - - # TODO(sandy): We'll have to look for richer specs at some point. - - blob = request_spec.get('blob') - if blob: - self._provision_resource(context, request_spec, instance_id, - request_spec, kwargs) - return None - - num_instances = request_spec.get('num_instances', 1) - LOG.debug(_("Attempting to build %(num_instances)d instance(s)") % - locals()) - - # Create build plan and provision ... - build_plan = self.select(context, request_spec) - if not build_plan: - raise driver.NoValidHost(_('No hosts were available')) - - for num in xrange(num_instances): - if not build_plan: - break - - build_plan_item = build_plan.pop(0) - self._provision_resource(context, build_plan_item, instance_id, - request_spec, kwargs) - - # Returning None short-circuits the routing to Compute (since - # we've already done it here) - return None - - def select(self, context, request_spec, *args, **kwargs): - """Select returns a list of weights and zone/host information - corresponding to the best hosts to service the request. Any - child zone information has been encrypted so as not to reveal - anything about the children. - """ - return self._schedule(context, "compute", request_spec, - *args, **kwargs) - - # TODO(sandy): We're only focused on compute instances right now, - # so we don't implement the default "schedule()" method required - # of Schedulers. - def schedule(self, context, topic, request_spec, *args, **kwargs): - """The schedule() contract requires we return the one - best-suited host for this request. - """ - raise driver.NoValidHost(_('No hosts were available')) - - def _schedule(self, context, topic, request_spec, *args, **kwargs): - """Returns a list of hosts that meet the required specs, - ordered by their fitness. - """ - - if topic != "compute": - raise NotImplementedError(_("Zone Aware Scheduler only understands" - " Compute nodes (for now)")) - - num_instances = request_spec.get('num_instances', 1) - instance_type = request_spec['instance_type'] - - weighted = [] - host_list = None - - for i in xrange(num_instances): - # Filter local hosts based on requirements ... - # - # The first pass through here will pass 'None' as the - # host_list.. which tells the filter to build the full - # list of hosts. - # On a 2nd pass, the filter can modify the host_list with - # any updates it needs to make based on resources that - # may have been consumed from a previous build.. - host_list = self.filter_hosts(topic, request_spec, host_list) - if not host_list: - LOG.warn(_("Filter returned no hosts after processing " - "%(i)d of %(num_instances)d instances") % locals()) - break - - # then weigh the selected hosts. - # weighted = [{weight=weight, hostname=hostname, - # capabilities=capabs}, ...] - weights = self.weigh_hosts(topic, request_spec, host_list) - weights.sort(key=operator.itemgetter('weight')) - best_weight = weights[0] - weighted.append(best_weight) - self.consume_resources(topic, best_weight['capabilities'], - instance_type) - - # Next, tack on the best weights from the child zones ... - json_spec = json.dumps(request_spec) - all_zones = db.zone_get_all(context) - child_results = self._call_zone_method(context, "select", - specs=json_spec, zones=all_zones) - self._adjust_child_weights(child_results, all_zones) - for child_zone, result in child_results: - for weighting in result: - # Remember the child_zone so we can get back to - # it later if needed. This implicitly builds a zone - # path structure. - host_dict = {"weight": weighting["weight"], - "child_zone": child_zone, - "child_blob": weighting["blob"]} - weighted.append(host_dict) - - weighted.sort(key=operator.itemgetter('weight')) - return weighted - - def compute_filter(self, hostname, capabilities, request_spec): - """Return whether or not we can schedule to this compute node. - Derived classes should override this and return True if the host - is acceptable for scheduling. - """ - instance_type = request_spec['instance_type'] - requested_mem = instance_type['memory_mb'] * 1024 * 1024 - return capabilities['host_memory_free'] >= requested_mem - - def filter_hosts(self, topic, request_spec, host_list=None): - """Return a list of hosts which are acceptable for scheduling. - Return value should be a list of (hostname, capability_dict)s. - Derived classes may override this, but may find the - '_filter' function more appropriate. - """ - - def _default_filter(self, hostname, capabilities, request_spec): - """Default filter function if there's no _filter""" - # NOTE(sirp): The default logic is the equivalent to - # AllHostsFilter - return True - - filter_func = getattr(self, '%s_filter' % topic, _default_filter) - - if host_list is None: - first_run = True - host_list = self.zone_manager.service_states.iteritems() - else: - first_run = False - - filtered_hosts = [] - for host, services in host_list: - if first_run: - if topic not in services: - continue - services = services[topic] - if filter_func(host, services, request_spec): - filtered_hosts.append((host, services)) - return filtered_hosts - - def weigh_hosts(self, topic, request_spec, hosts): - """Derived classes may override this to provide more sophisticated - scheduling objectives - """ - # NOTE(sirp): The default logic is the same as the NoopCostFunction - return [dict(weight=1, hostname=hostname, capabilities=capabilities) - for hostname, capabilities in hosts] - - def compute_consume(self, capabilities, instance_type): - """Consume compute resources for selected host""" - - requested_mem = max(instance_type['memory_mb'], 0) * 1024 * 1024 - capabilities['host_memory_free'] -= requested_mem - - def consume_resources(self, topic, capabilities, instance_type): - """Consume resources for a specific host. 'host' is a tuple - of the hostname and the services""" - - consume_func = getattr(self, '%s_consume' % topic, None) - if not consume_func: - return - consume_func(capabilities, instance_type) diff --git a/nova/tests/scheduler/test_abstract_scheduler.py b/nova/tests/scheduler/test_abstract_scheduler.py new file mode 100644 index 000000000..f4f5cc233 --- /dev/null +++ b/nova/tests/scheduler/test_abstract_scheduler.py @@ -0,0 +1,364 @@ +# Copyright 2011 OpenStack LLC. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +""" +Tests For Abstract Scheduler. +""" + +import json + +import nova.db + +from nova import exception +from nova import rpc +from nova import test +from nova.compute import api as compute_api +from nova.scheduler import driver +from nova.scheduler import abstract_scheduler +from nova.scheduler import zone_manager + + +def _host_caps(multiplier): + # Returns host capabilities in the following way: + # host1 = memory:free 10 (100max) + # disk:available 100 (1000max) + # hostN = memory:free 10 + 10N + # disk:available 100 + 100N + # in other words: hostN has more resources than host0 + # which means ... don't go above 10 hosts. + return {'host_name-description': 'XenServer %s' % multiplier, + 'host_hostname': 'xs-%s' % multiplier, + 'host_memory_total': 100, + 'host_memory_overhead': 10, + 'host_memory_free': 10 + multiplier * 10, + 'host_memory_free-computed': 10 + multiplier * 10, + 'host_other-config': {}, + 'host_ip_address': '192.168.1.%d' % (100 + multiplier), + 'host_cpu_info': {}, + 'disk_available': 100 + multiplier * 100, + 'disk_total': 1000, + 'disk_used': 0, + 'host_uuid': 'xxx-%d' % multiplier, + 'host_name-label': 'xs-%s' % multiplier} + + +def fake_zone_manager_service_states(num_hosts): + states = {} + for x in xrange(num_hosts): + states['host%02d' % (x + 1)] = {'compute': _host_caps(x)} + return states + + +class FakeAbstractScheduler(abstract_scheduler.AbstractScheduler): + # No need to stub anything at the moment + pass + + +class FakeZoneManager(zone_manager.ZoneManager): + def __init__(self): + self.service_states = { + 'host1': { + 'compute': {'host_memory_free': 1073741824}, + }, + 'host2': { + 'compute': {'host_memory_free': 2147483648}, + }, + 'host3': { + 'compute': {'host_memory_free': 3221225472}, + }, + } + + +class FakeEmptyZoneManager(zone_manager.ZoneManager): + def __init__(self): + self.service_states = {} + + +def fake_empty_call_zone_method(context, method, specs, zones): + return [] + + +# Hmm, I should probably be using mox for this. +was_called = False + + +def fake_provision_resource(context, item, instance_id, request_spec, kwargs): + global was_called + was_called = True + + +def fake_ask_child_zone_to_create_instance(context, zone_info, + request_spec, kwargs): + global was_called + was_called = True + + +def fake_provision_resource_locally(context, build_plan, request_spec, kwargs): + global was_called + was_called = True + + +def fake_provision_resource_from_blob(context, item, instance_id, + request_spec, kwargs): + global was_called + was_called = True + + +def fake_decrypt_blob_returns_local_info(blob): + return {'hostname': 'foooooo'} # values aren't important. + + +def fake_decrypt_blob_returns_child_info(blob): + return {'child_zone': True, + 'child_blob': True} # values aren't important. Keys are. + + +def fake_call_zone_method(context, method, specs, zones): + return [ + (1, [ + dict(weight=1, blob='AAAAAAA'), + dict(weight=111, blob='BBBBBBB'), + dict(weight=112, blob='CCCCCCC'), + dict(weight=113, blob='DDDDDDD'), + ]), + (2, [ + dict(weight=120, blob='EEEEEEE'), + dict(weight=2, blob='FFFFFFF'), + dict(weight=122, blob='GGGGGGG'), + dict(weight=123, blob='HHHHHHH'), + ]), + (3, [ + dict(weight=130, blob='IIIIIII'), + dict(weight=131, blob='JJJJJJJ'), + dict(weight=132, blob='KKKKKKK'), + dict(weight=3, blob='LLLLLLL'), + ]), + ] + + +def fake_zone_get_all(context): + return [ + dict(id=1, api_url='zone1', + username='admin', password='password', + weight_offset=0.0, weight_scale=1.0), + dict(id=2, api_url='zone2', + username='admin', password='password', + weight_offset=1000.0, weight_scale=1.0), + dict(id=3, api_url='zone3', + username='admin', password='password', + weight_offset=0.0, weight_scale=1000.0), + ] + + +class AbstractSchedulerTestCase(test.TestCase): + """Test case for Abstract Scheduler.""" + + def test_abstract_scheduler(self): + """ + Create a nested set of FakeZones, try to build multiple instances + and ensure that a select call returns the appropriate build plan. + """ + sched = FakeAbstractScheduler() + self.stubs.Set(sched, '_call_zone_method', fake_call_zone_method) + self.stubs.Set(nova.db, 'zone_get_all', fake_zone_get_all) + + zm = FakeZoneManager() + sched.set_zone_manager(zm) + + fake_context = {} + build_plan = sched.select(fake_context, + {'instance_type': {'memory_mb': 512}, + 'num_instances': 4}) + + # 4 from local zones, 12 from remotes + self.assertEqual(16, len(build_plan)) + + hostnames = [plan_item['hostname'] + for plan_item in build_plan if 'hostname' in plan_item] + # 4 local hosts + self.assertEqual(4, len(hostnames)) + + def test_adjust_child_weights(self): + """Make sure the weights returned by child zones are + properly adjusted based on the scale/offset in the zone + db entries. + """ + sched = FakeAbstractScheduler() + child_results = fake_call_zone_method(None, None, None, None) + zones = fake_zone_get_all(None) + sched._adjust_child_weights(child_results, zones) + scaled = [130000, 131000, 132000, 3000] + for zone, results in child_results: + for item in results: + w = item['weight'] + if zone == 'zone1': # No change + self.assertTrue(w < 1000.0) + if zone == 'zone2': # Offset +1000 + self.assertTrue(w >= 1000.0 and w < 2000) + if zone == 'zone3': # Scale x1000 + self.assertEqual(scaled.pop(0), w) + + def test_empty_abstract_scheduler(self): + """ + Ensure empty hosts & child_zones result in NoValidHosts exception. + """ + sched = FakeAbstractScheduler() + self.stubs.Set(sched, '_call_zone_method', fake_empty_call_zone_method) + self.stubs.Set(nova.db, 'zone_get_all', fake_zone_get_all) + + zm = FakeEmptyZoneManager() + sched.set_zone_manager(zm) + + fake_context = {} + self.assertRaises(driver.NoValidHost, sched.schedule_run_instance, + fake_context, 1, + dict(host_filter=None, instance_type={})) + + def test_schedule_do_not_schedule_with_hint(self): + """ + Check the local/child zone routing in the run_instance() call. + If the zone_blob hint was passed in, don't re-schedule. + """ + global was_called + sched = FakeAbstractScheduler() + was_called = False + self.stubs.Set(sched, '_provision_resource', fake_provision_resource) + request_spec = { + 'instance_properties': {}, + 'instance_type': {}, + 'filter_driver': 'nova.scheduler.host_filter.AllHostsFilter', + 'blob': "Non-None blob data", + } + + result = sched.schedule_run_instance(None, 1, request_spec) + self.assertEquals(None, result) + self.assertTrue(was_called) + + def test_provision_resource_local(self): + """Provision a resource locally or remotely.""" + global was_called + sched = FakeAbstractScheduler() + was_called = False + self.stubs.Set(sched, '_provision_resource_locally', + fake_provision_resource_locally) + + request_spec = {'hostname': "foo"} + sched._provision_resource(None, request_spec, 1, request_spec, {}) + self.assertTrue(was_called) + + def test_provision_resource_remote(self): + """Provision a resource locally or remotely.""" + global was_called + sched = FakeAbstractScheduler() + was_called = False + self.stubs.Set(sched, '_provision_resource_from_blob', + fake_provision_resource_from_blob) + + request_spec = {} + sched._provision_resource(None, request_spec, 1, request_spec, {}) + self.assertTrue(was_called) + + def test_provision_resource_from_blob_empty(self): + """Provision a resource locally or remotely given no hints.""" + global was_called + sched = FakeAbstractScheduler() + request_spec = {} + self.assertRaises(abstract_scheduler.InvalidBlob, + sched._provision_resource_from_blob, + None, {}, 1, {}, {}) + + def test_provision_resource_from_blob_with_local_blob(self): + """ + Provision a resource locally or remotely when blob hint passed in. + """ + global was_called + sched = FakeAbstractScheduler() + was_called = False + + def fake_create_db_entry_for_new_instance(self, context, + image, base_options, security_group, + block_device_mapping, num=1): + global was_called + was_called = True + # return fake instances + return {'id': 1, 'uuid': 'f874093c-7b17-49c0-89c3-22a5348497f9'} + + def fake_rpc_cast(*args, **kwargs): + pass + + self.stubs.Set(sched, '_decrypt_blob', + fake_decrypt_blob_returns_local_info) + self.stubs.Set(compute_api.API, + 'create_db_entry_for_new_instance', + fake_create_db_entry_for_new_instance) + self.stubs.Set(rpc, 'cast', fake_rpc_cast) + + build_plan_item = {'blob': "Non-None blob data"} + request_spec = {'image': {}, 'instance_properties': {}} + + sched._provision_resource_from_blob(None, build_plan_item, 1, + request_spec, {}) + self.assertTrue(was_called) + + def test_provision_resource_from_blob_with_child_blob(self): + """ + Provision a resource locally or remotely when child blob hint + passed in. + """ + global was_called + sched = FakeAbstractScheduler() + self.stubs.Set(sched, '_decrypt_blob', + fake_decrypt_blob_returns_child_info) + was_called = False + self.stubs.Set(sched, '_ask_child_zone_to_create_instance', + fake_ask_child_zone_to_create_instance) + + request_spec = {'blob': "Non-None blob data"} + + sched._provision_resource_from_blob(None, request_spec, 1, + request_spec, {}) + self.assertTrue(was_called) + + def test_provision_resource_from_blob_with_immediate_child_blob(self): + """ + Provision a resource locally or remotely when blob hint passed in + from an immediate child. + """ + global was_called + sched = FakeAbstractScheduler() + was_called = False + self.stubs.Set(sched, '_ask_child_zone_to_create_instance', + fake_ask_child_zone_to_create_instance) + + request_spec = {'child_blob': True, 'child_zone': True} + + sched._provision_resource_from_blob(None, request_spec, 1, + request_spec, {}) + self.assertTrue(was_called) + + def test_decrypt_blob(self): + """Test that the decrypt method works.""" + + fixture = FakeAbstractScheduler() + test_data = {"foo": "bar"} + + class StubDecryptor(object): + def decryptor(self, key): + return lambda blob: blob + + self.stubs.Set(abstract_scheduler, 'crypto', + StubDecryptor()) + + self.assertEqual(fixture._decrypt_blob(test_data), + json.dumps(test_data)) diff --git a/nova/tests/scheduler/test_least_cost_scheduler.py b/nova/tests/scheduler/test_least_cost_scheduler.py index fbe6b2f77..de7581d0a 100644 --- a/nova/tests/scheduler/test_least_cost_scheduler.py +++ b/nova/tests/scheduler/test_least_cost_scheduler.py @@ -18,7 +18,7 @@ Tests For Least Cost Scheduler from nova import test from nova.scheduler import least_cost -from nova.tests.scheduler import test_zone_aware_scheduler +from nova.tests.scheduler import test_abstract_scheduler MB = 1024 * 1024 @@ -70,7 +70,7 @@ class LeastCostSchedulerTestCase(test.TestCase): zone_manager = FakeZoneManager() - states = test_zone_aware_scheduler.fake_zone_manager_service_states( + states = test_abstract_scheduler.fake_zone_manager_service_states( num_hosts=10) zone_manager.service_states = states diff --git a/nova/tests/scheduler/test_zone_aware_scheduler.py b/nova/tests/scheduler/test_zone_aware_scheduler.py deleted file mode 100644 index 788efca52..000000000 --- a/nova/tests/scheduler/test_zone_aware_scheduler.py +++ /dev/null @@ -1,364 +0,0 @@ -# Copyright 2011 OpenStack LLC. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -""" -Tests For Zone Aware Scheduler. -""" - -import json - -import nova.db - -from nova import exception -from nova import rpc -from nova import test -from nova.compute import api as compute_api -from nova.scheduler import driver -from nova.scheduler import zone_aware_scheduler -from nova.scheduler import zone_manager - - -def _host_caps(multiplier): - # Returns host capabilities in the following way: - # host1 = memory:free 10 (100max) - # disk:available 100 (1000max) - # hostN = memory:free 10 + 10N - # disk:available 100 + 100N - # in other words: hostN has more resources than host0 - # which means ... don't go above 10 hosts. - return {'host_name-description': 'XenServer %s' % multiplier, - 'host_hostname': 'xs-%s' % multiplier, - 'host_memory_total': 100, - 'host_memory_overhead': 10, - 'host_memory_free': 10 + multiplier * 10, - 'host_memory_free-computed': 10 + multiplier * 10, - 'host_other-config': {}, - 'host_ip_address': '192.168.1.%d' % (100 + multiplier), - 'host_cpu_info': {}, - 'disk_available': 100 + multiplier * 100, - 'disk_total': 1000, - 'disk_used': 0, - 'host_uuid': 'xxx-%d' % multiplier, - 'host_name-label': 'xs-%s' % multiplier} - - -def fake_zone_manager_service_states(num_hosts): - states = {} - for x in xrange(num_hosts): - states['host%02d' % (x + 1)] = {'compute': _host_caps(x)} - return states - - -class FakeZoneAwareScheduler(zone_aware_scheduler.ZoneAwareScheduler): - # No need to stub anything at the moment - pass - - -class FakeZoneManager(zone_manager.ZoneManager): - def __init__(self): - self.service_states = { - 'host1': { - 'compute': {'host_memory_free': 1073741824}, - }, - 'host2': { - 'compute': {'host_memory_free': 2147483648}, - }, - 'host3': { - 'compute': {'host_memory_free': 3221225472}, - }, - } - - -class FakeEmptyZoneManager(zone_manager.ZoneManager): - def __init__(self): - self.service_states = {} - - -def fake_empty_call_zone_method(context, method, specs, zones): - return [] - - -# Hmm, I should probably be using mox for this. -was_called = False - - -def fake_provision_resource(context, item, instance_id, request_spec, kwargs): - global was_called - was_called = True - - -def fake_ask_child_zone_to_create_instance(context, zone_info, - request_spec, kwargs): - global was_called - was_called = True - - -def fake_provision_resource_locally(context, build_plan, request_spec, kwargs): - global was_called - was_called = True - - -def fake_provision_resource_from_blob(context, item, instance_id, - request_spec, kwargs): - global was_called - was_called = True - - -def fake_decrypt_blob_returns_local_info(blob): - return {'hostname': 'foooooo'} # values aren't important. - - -def fake_decrypt_blob_returns_child_info(blob): - return {'child_zone': True, - 'child_blob': True} # values aren't important. Keys are. - - -def fake_call_zone_method(context, method, specs, zones): - return [ - (1, [ - dict(weight=1, blob='AAAAAAA'), - dict(weight=111, blob='BBBBBBB'), - dict(weight=112, blob='CCCCCCC'), - dict(weight=113, blob='DDDDDDD'), - ]), - (2, [ - dict(weight=120, blob='EEEEEEE'), - dict(weight=2, blob='FFFFFFF'), - dict(weight=122, blob='GGGGGGG'), - dict(weight=123, blob='HHHHHHH'), - ]), - (3, [ - dict(weight=130, blob='IIIIIII'), - dict(weight=131, blob='JJJJJJJ'), - dict(weight=132, blob='KKKKKKK'), - dict(weight=3, blob='LLLLLLL'), - ]), - ] - - -def fake_zone_get_all(context): - return [ - dict(id=1, api_url='zone1', - username='admin', password='password', - weight_offset=0.0, weight_scale=1.0), - dict(id=2, api_url='zone2', - username='admin', password='password', - weight_offset=1000.0, weight_scale=1.0), - dict(id=3, api_url='zone3', - username='admin', password='password', - weight_offset=0.0, weight_scale=1000.0), - ] - - -class ZoneAwareSchedulerTestCase(test.TestCase): - """Test case for Zone Aware Scheduler.""" - - def test_zone_aware_scheduler(self): - """ - Create a nested set of FakeZones, try to build multiple instances - and ensure that a select call returns the appropriate build plan. - """ - sched = FakeZoneAwareScheduler() - self.stubs.Set(sched, '_call_zone_method', fake_call_zone_method) - self.stubs.Set(nova.db, 'zone_get_all', fake_zone_get_all) - - zm = FakeZoneManager() - sched.set_zone_manager(zm) - - fake_context = {} - build_plan = sched.select(fake_context, - {'instance_type': {'memory_mb': 512}, - 'num_instances': 4}) - - # 4 from local zones, 12 from remotes - self.assertEqual(16, len(build_plan)) - - hostnames = [plan_item['hostname'] - for plan_item in build_plan if 'hostname' in plan_item] - # 4 local hosts - self.assertEqual(4, len(hostnames)) - - def test_adjust_child_weights(self): - """Make sure the weights returned by child zones are - properly adjusted based on the scale/offset in the zone - db entries. - """ - sched = FakeZoneAwareScheduler() - child_results = fake_call_zone_method(None, None, None, None) - zones = fake_zone_get_all(None) - sched._adjust_child_weights(child_results, zones) - scaled = [130000, 131000, 132000, 3000] - for zone, results in child_results: - for item in results: - w = item['weight'] - if zone == 'zone1': # No change - self.assertTrue(w < 1000.0) - if zone == 'zone2': # Offset +1000 - self.assertTrue(w >= 1000.0 and w < 2000) - if zone == 'zone3': # Scale x1000 - self.assertEqual(scaled.pop(0), w) - - def test_empty_zone_aware_scheduler(self): - """ - Ensure empty hosts & child_zones result in NoValidHosts exception. - """ - sched = FakeZoneAwareScheduler() - self.stubs.Set(sched, '_call_zone_method', fake_empty_call_zone_method) - self.stubs.Set(nova.db, 'zone_get_all', fake_zone_get_all) - - zm = FakeEmptyZoneManager() - sched.set_zone_manager(zm) - - fake_context = {} - self.assertRaises(driver.NoValidHost, sched.schedule_run_instance, - fake_context, 1, - dict(host_filter=None, instance_type={})) - - def test_schedule_do_not_schedule_with_hint(self): - """ - Check the local/child zone routing in the run_instance() call. - If the zone_blob hint was passed in, don't re-schedule. - """ - global was_called - sched = FakeZoneAwareScheduler() - was_called = False - self.stubs.Set(sched, '_provision_resource', fake_provision_resource) - request_spec = { - 'instance_properties': {}, - 'instance_type': {}, - 'filter_driver': 'nova.scheduler.host_filter.AllHostsFilter', - 'blob': "Non-None blob data", - } - - result = sched.schedule_run_instance(None, 1, request_spec) - self.assertEquals(None, result) - self.assertTrue(was_called) - - def test_provision_resource_local(self): - """Provision a resource locally or remotely.""" - global was_called - sched = FakeZoneAwareScheduler() - was_called = False - self.stubs.Set(sched, '_provision_resource_locally', - fake_provision_resource_locally) - - request_spec = {'hostname': "foo"} - sched._provision_resource(None, request_spec, 1, request_spec, {}) - self.assertTrue(was_called) - - def test_provision_resource_remote(self): - """Provision a resource locally or remotely.""" - global was_called - sched = FakeZoneAwareScheduler() - was_called = False - self.stubs.Set(sched, '_provision_resource_from_blob', - fake_provision_resource_from_blob) - - request_spec = {} - sched._provision_resource(None, request_spec, 1, request_spec, {}) - self.assertTrue(was_called) - - def test_provision_resource_from_blob_empty(self): - """Provision a resource locally or remotely given no hints.""" - global was_called - sched = FakeZoneAwareScheduler() - request_spec = {} - self.assertRaises(zone_aware_scheduler.InvalidBlob, - sched._provision_resource_from_blob, - None, {}, 1, {}, {}) - - def test_provision_resource_from_blob_with_local_blob(self): - """ - Provision a resource locally or remotely when blob hint passed in. - """ - global was_called - sched = FakeZoneAwareScheduler() - was_called = False - - def fake_create_db_entry_for_new_instance(self, context, - image, base_options, security_group, - block_device_mapping, num=1): - global was_called - was_called = True - # return fake instances - return {'id': 1, 'uuid': 'f874093c-7b17-49c0-89c3-22a5348497f9'} - - def fake_rpc_cast(*args, **kwargs): - pass - - self.stubs.Set(sched, '_decrypt_blob', - fake_decrypt_blob_returns_local_info) - self.stubs.Set(compute_api.API, - 'create_db_entry_for_new_instance', - fake_create_db_entry_for_new_instance) - self.stubs.Set(rpc, 'cast', fake_rpc_cast) - - build_plan_item = {'blob': "Non-None blob data"} - request_spec = {'image': {}, 'instance_properties': {}} - - sched._provision_resource_from_blob(None, build_plan_item, 1, - request_spec, {}) - self.assertTrue(was_called) - - def test_provision_resource_from_blob_with_child_blob(self): - """ - Provision a resource locally or remotely when child blob hint - passed in. - """ - global was_called - sched = FakeZoneAwareScheduler() - self.stubs.Set(sched, '_decrypt_blob', - fake_decrypt_blob_returns_child_info) - was_called = False - self.stubs.Set(sched, '_ask_child_zone_to_create_instance', - fake_ask_child_zone_to_create_instance) - - request_spec = {'blob': "Non-None blob data"} - - sched._provision_resource_from_blob(None, request_spec, 1, - request_spec, {}) - self.assertTrue(was_called) - - def test_provision_resource_from_blob_with_immediate_child_blob(self): - """ - Provision a resource locally or remotely when blob hint passed in - from an immediate child. - """ - global was_called - sched = FakeZoneAwareScheduler() - was_called = False - self.stubs.Set(sched, '_ask_child_zone_to_create_instance', - fake_ask_child_zone_to_create_instance) - - request_spec = {'child_blob': True, 'child_zone': True} - - sched._provision_resource_from_blob(None, request_spec, 1, - request_spec, {}) - self.assertTrue(was_called) - - def test_decrypt_blob(self): - """Test that the decrypt method works.""" - - fixture = FakeZoneAwareScheduler() - test_data = {"foo": "bar"} - - class StubDecryptor(object): - def decryptor(self, key): - return lambda blob: blob - - self.stubs.Set(zone_aware_scheduler, 'crypto', - StubDecryptor()) - - self.assertEqual(fixture._decrypt_blob(test_data), - json.dumps(test_data)) -- cgit From b29bc97d5a69abe71dea5b9ff9dcfc65fcd59cc9 Mon Sep 17 00:00:00 2001 From: Josh Kearney Date: Thu, 11 Aug 2011 17:10:25 -0500 Subject: Check compressed image size and PEP8 cleanup. --- nova/compute/manager.py | 40 +++++++++++++++++++++++ nova/exception.py | 4 +++ nova/tests/api/openstack/contrib/test_keypairs.py | 17 ++++++++-- 3 files changed, 59 insertions(+), 2 deletions(-) diff --git a/nova/compute/manager.py b/nova/compute/manager.py index d38213083..1c3485342 100644 --- a/nova/compute/manager.py +++ b/nova/compute/manager.py @@ -321,10 +321,50 @@ class ComputeManager(manager.SchedulerDependentManager): def _run_instance(self, context, instance_id, **kwargs): """Launch a new instance with specified options.""" + def _check_image_size(): + """Ensure image is smaller than the maximum size allowed by the + instance_type. + + The image stored in Glance is potentially compressed, so we use two + checks to ensure that the size isn't exceeded: + + 1) This one - checks compressed size, this a quick check to + eliminate any images which are obviously too large + + 2) Check uncompressed size in nova.virt.xenapi.vm_utils. This + is a slower check since it requires uncompressing the entire + image, but is accurate because it reflects the image's + actual size. + """ + image_href = instance['image_ref'] + image_service, image_id = nova.image.get_image_service(image_href) + image_meta = image_service.show(context, image_id) + size_bytes = image_meta['size'] + + instance_type_id = instance['instance_type_id'] + instance_type = self.db.instance_type_get(context, + instance_type_id) + allowed_size_gb = instance_type['local_gb'] + allowed_size_bytes = allowed_size_gb * 1024 * 1024 * 1024 + + LOG.debug(_("image_id=%(image_id)d, image_size_bytes=" + "%(size_bytes)d, allowed_size_bytes=" + "%(allowed_size_bytes)d") % locals()) + + if size_bytes > allowed_size_bytes: + LOG.info(_("Image '%(image_id)d' size %(size_bytes)d exceeded" + " instance_type allowed size " + "%(allowed_size_bytes)d") + % locals()) + raise exception.ImageTooLarge() + context = context.elevated() instance = self.db.instance_get(context, instance_id) if instance['name'] in self.driver.list_instances(): raise exception.Error(_("Instance has already been created")) + + _check_image_size() + LOG.audit(_("instance %s: starting..."), instance_id, context=context) updates = {} diff --git a/nova/exception.py b/nova/exception.py index 0d60cb0bf..77ebf3c88 100644 --- a/nova/exception.py +++ b/nova/exception.py @@ -717,3 +717,7 @@ class CannotResizeToSameSize(NovaException): class CannotResizeToSmallerSize(NovaException): message = _("Resizing to a smaller size is not supported.") + + +class ImageTooLarge(NovaException): + message = _("Image is larger than instance type allows") diff --git a/nova/tests/api/openstack/contrib/test_keypairs.py b/nova/tests/api/openstack/contrib/test_keypairs.py index c9dc34d65..cbc815b1a 100644 --- a/nova/tests/api/openstack/contrib/test_keypairs.py +++ b/nova/tests/api/openstack/contrib/test_keypairs.py @@ -28,6 +28,7 @@ def fake_keypair(name): 'fingerprint': 'FAKE_FINGERPRINT', 'name': name} + def db_key_pair_get_all_by_user(self, user_id): return [fake_keypair('FAKE')] @@ -78,7 +79,20 @@ class KeypairsTest(test.TestCase): def test_keypair_import(self): body = {'keypair': {'name': 'create_test', - 'public_key': 'ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDBYIznAx9D7118Q1VKGpXy2HDiKyUTM8XcUuhQpo0srqb9rboUp4a9NmCwpWpeElDLuva707GOUnfaBAvHBwsRXyxHJjRaI6YQj2oLJwqvaSaWUbyT1vtryRqy6J3TecN0WINY71f4uymiMZP0wby4bKBcYnac8KiCIlvkEl0ETjkOGUq8OyWRmn7ljj5SESEUdBP0JnuTFKddWTU/wD6wydeJaUhBTqOlHn0kX1GyqoNTE1UEhcM5ZRWgfUZfTjVyDF2kGj3vJLCJtJ8LoGcj7YaN4uPg1rBle+izwE/tLonRrds+cev8p6krSSrxWOwBbHkXa6OciiJDvkRzJXzf'}} + 'public_key': 'ssh-rsa ' + 'AAAAB3NzaC1yc2EAAAADAQABAAABAQDBY' + 'IznAx9D7118Q1VKGpXy2HDiKyUTM8XcUu' + 'hQpo0srqb9rboUp4a9NmCwpWpeElDLuva' + '707GOUnfaBAvHBwsRXyxHJjRaI6YQj2oL' + 'JwqvaSaWUbyT1vtryRqy6J3TecN0WINY7' + '1f4uymiMZP0wby4bKBcYnac8KiCIlvkEl' + '0ETjkOGUq8OyWRmn7ljj5SESEUdBP0Jnu' + 'TFKddWTU/wD6wydeJaUhBTqOlHn0kX1Gy' + 'qoNTE1UEhcM5ZRWgfUZfTjVyDF2kGj3vJ' + 'LCJtJ8LoGcj7YaN4uPg1rBle+izwE/tLo' + 'nRrds+cev8p6krSSrxWOwBbHkXa6OciiJ' + 'DvkRzJXzf'}} + req = webob.Request.blank('/v1.1/os-keypairs') req.method = 'POST' req.body = json.dumps(body) @@ -96,4 +110,3 @@ class KeypairsTest(test.TestCase): req.headers['Content-Type'] = 'application/json' res = req.get_response(fakes.wsgi_app()) self.assertEqual(res.status_int, 202) - -- cgit From 24796debe819641b1cba58ba966b0d6d5a253fd8 Mon Sep 17 00:00:00 2001 From: Josh Kearney Date: Fri, 12 Aug 2011 12:14:40 -0500 Subject: Fixed unit tests. --- nova/compute/manager.py | 15 ++++++++++++++- nova/tests/scheduler/test_scheduler.py | 4 +++- 2 files changed, 17 insertions(+), 2 deletions(-) diff --git a/nova/compute/manager.py b/nova/compute/manager.py index 1c3485342..f0184161e 100644 --- a/nova/compute/manager.py +++ b/nova/compute/manager.py @@ -336,10 +336,22 @@ class ComputeManager(manager.SchedulerDependentManager): image, but is accurate because it reflects the image's actual size. """ + # NOTE(jk0): image_ref is defined in the DB model, image_href is + # used by the image service. This should be refactored to be + # consistent. image_href = instance['image_ref'] image_service, image_id = nova.image.get_image_service(image_href) image_meta = image_service.show(context, image_id) - size_bytes = image_meta['size'] + + try: + size_bytes = image_meta['size'] + except KeyError: + # Size is not a required field in the image service (yet), so + # we are unable to rely on it being there even though it's in + # glance. + + # TODO(jk0): Should size be required in the image service? + return instance_type_id = instance['instance_type_id'] instance_type = self.db.instance_type_get(context, @@ -360,6 +372,7 @@ class ComputeManager(manager.SchedulerDependentManager): context = context.elevated() instance = self.db.instance_get(context, instance_id) + if instance['name'] in self.driver.list_instances(): raise exception.Error(_("Instance has already been created")) diff --git a/nova/tests/scheduler/test_scheduler.py b/nova/tests/scheduler/test_scheduler.py index 7a26fd1bb..ebaf89624 100644 --- a/nova/tests/scheduler/test_scheduler.py +++ b/nova/tests/scheduler/test_scheduler.py @@ -256,7 +256,9 @@ class SimpleDriverTestCase(test.TestCase): def _create_instance(self, **kwargs): """Create a test instance""" inst = {} - inst['image_id'] = 1 + # NOTE(jk0): If an integer is passed as the image_ref, the image + # service will use the default image service (in this case, the fake). + inst['image_ref'] = '1' inst['reservation_id'] = 'r-fakeres' inst['user_id'] = self.user_id inst['project_id'] = self.project_id -- cgit From 8c9eedb4b4dd9653cca302ae4bbd23d895761aee Mon Sep 17 00:00:00 2001 From: "vladimir.p" Date: Fri, 12 Aug 2011 14:18:25 -0700 Subject: reworked test_extensions code to avoid constant merge conflicts with newly added ext --- nova/tests/api/openstack/test_extensions.py | 17 +++++++++++++---- 1 file changed, 13 insertions(+), 4 deletions(-) diff --git a/nova/tests/api/openstack/test_extensions.py b/nova/tests/api/openstack/test_extensions.py index ea8fe68a7..e9f44af6a 100644 --- a/nova/tests/api/openstack/test_extensions.py +++ b/nova/tests/api/openstack/test_extensions.py @@ -84,6 +84,17 @@ class ExtensionControllerTest(test.TestCase): super(ExtensionControllerTest, self).setUp() ext_path = os.path.join(os.path.dirname(__file__), "extensions") self.flags(osapi_extensions_path=ext_path) + self.ext_list = [ + "FlavorExtraSpecs", + "Floating_ips", + "Fox In Socks", + "Hosts", + "Keypairs", + "Multinic", + "SecurityGroups", + "Volumes", + ] + self.ext_list.sort() def test_list_extensions_json(self): app = openstack.APIRouterV11() @@ -96,9 +107,7 @@ class ExtensionControllerTest(test.TestCase): data = json.loads(response.body) names = [x['name'] for x in data['extensions']] names.sort() - self.assertEqual(names, ["FlavorExtraSpecs", "Floating_ips", - "Fox In Socks", "Hosts", "Keypairs", "Multinic", "SecurityGroups", - "Volumes"]) + self.assertEqual(names, self.ext_list) # Make sure that at least Fox in Sox is correct. (fox_ext,) = [ @@ -145,7 +154,7 @@ class ExtensionControllerTest(test.TestCase): # Make sure we have all the extensions. exts = root.findall('{0}extension'.format(NS)) - self.assertEqual(len(exts), 8) + self.assertEqual(len(exts), len(self.ext_list)) # Make sure that at least Fox in Sox is correct. (fox_ext,) = [x for x in exts if x.get('alias') == 'FOXNSOX'] -- cgit From b60fa0d09d02066863736a3e98f07094c4db05a6 Mon Sep 17 00:00:00 2001 From: Josh Kearney Date: Fri, 12 Aug 2011 16:18:48 -0500 Subject: Check uncompressed VHD size. --- nova/tests/xenapi/stubs.py | 4 +-- nova/virt/xenapi/fake.py | 1 + nova/virt/xenapi/vm_utils.py | 74 +++++++++++++++++++++++++++++++++++++++----- nova/virt/xenapi/vmops.py | 6 ++-- 4 files changed, 72 insertions(+), 13 deletions(-) diff --git a/nova/tests/xenapi/stubs.py b/nova/tests/xenapi/stubs.py index 0d0f84e32..a6a1febd6 100644 --- a/nova/tests/xenapi/stubs.py +++ b/nova/tests/xenapi/stubs.py @@ -28,10 +28,10 @@ from nova import utils def stubout_instance_snapshot(stubs): @classmethod - def fake_fetch_image(cls, context, session, instance_id, image, user, + def fake_fetch_image(cls, context, session, instance, image, user, project, type): from nova.virt.xenapi.fake import create_vdi - name_label = "instance-%s" % instance_id + name_label = "instance-%s" % instance.id #TODO: create fake SR record sr_ref = "fakesr" vdi_ref = create_vdi(name_label=name_label, read_only=False, diff --git a/nova/virt/xenapi/fake.py b/nova/virt/xenapi/fake.py index d5ac39473..eecd60d58 100644 --- a/nova/virt/xenapi/fake.py +++ b/nova/virt/xenapi/fake.py @@ -140,6 +140,7 @@ def create_vdi(name_label, read_only, sr_ref, sharable): 'location': '', 'xenstore_data': '', 'sm_config': {}, + 'physical_utilisation': '123', 'VBDs': {}}) diff --git a/nova/virt/xenapi/vm_utils.py b/nova/virt/xenapi/vm_utils.py index 6c44d53d4..e86f72347 100644 --- a/nova/virt/xenapi/vm_utils.py +++ b/nova/virt/xenapi/vm_utils.py @@ -31,6 +31,7 @@ import uuid from xml.dom import minidom import glance.client +from nova import db from nova import exception from nova import flags import nova.image @@ -368,7 +369,7 @@ class VMHelper(HelperBase): session.wait_for_task(task, instance.id) @classmethod - def fetch_image(cls, context, session, instance_id, image, user_id, + def fetch_image(cls, context, session, instance, image, user_id, project_id, image_type): """Fetch image from glance based on image type. @@ -377,18 +378,19 @@ class VMHelper(HelperBase): """ if image_type == ImageType.DISK_VHD: return cls._fetch_image_glance_vhd(context, - session, instance_id, image, image_type) + session, instance, image, image_type) else: return cls._fetch_image_glance_disk(context, - session, instance_id, image, image_type) + session, instance, image, image_type) @classmethod - def _fetch_image_glance_vhd(cls, context, session, instance_id, image, + def _fetch_image_glance_vhd(cls, context, session, instance, image, image_type): """Tell glance to download an image and put the VHDs into the SR Returns: A list of dictionaries that describe VDIs """ + instance_id = instance.id LOG.debug(_("Asking xapi to fetch vhd image %(image)s") % locals()) sr_ref = safe_find_sr(session) @@ -422,17 +424,57 @@ class VMHelper(HelperBase): cls.scan_sr(session, instance_id, sr_ref) - # Pull out the UUID of the first VDI - vdi_uuid = vdis[0]['vdi_uuid'] + # Pull out the UUID of the first VDI (which is the os VDI) + os_vdi_uuid = vdis[0]['vdi_uuid'] + # Set the name-label to ease debugging - vdi_ref = session.get_xenapi().VDI.get_by_uuid(vdi_uuid) + vdi_ref = session.get_xenapi().VDI.get_by_uuid(os_vdi_uuid) primary_name_label = get_name_label_for_image(image) session.get_xenapi().VDI.set_name_label(vdi_ref, primary_name_label) + cls._check_vdi_size(context, session, instance, os_vdi_uuid) return vdis @classmethod - def _fetch_image_glance_disk(cls, context, session, instance_id, image, + def _get_vdi_chain_size(cls, context, session, vdi_uuid): + """Compute the total size of a VDI chain, starting with the specified + VDI UUID. + + This will walk the VDI chain to the root, add the size of each VDI into + the total. + """ + size_bytes = 0 + for vdi_rec in walk_vdi_chain(session, vdi_uuid): + vdi_size_bytes = int(vdi_rec['physical_utilisation']) + LOG.debug(_('vdi_uuid=%(vdi_uuid)s vdi_size_bytes=' + '%(vdi_size_bytes)d' % locals())) + size_bytes += vdi_size_bytes + return size_bytes + + @classmethod + def _check_vdi_size(cls, context, session, instance, vdi_uuid): + size_bytes = cls._get_vdi_chain_size(context, session, vdi_uuid) + + # FIXME(sirp): this was copied directly from compute.manager.py, let's + # refactor this to a common area + instance_type_id = instance['instance_type_id'] + instance_type = db.instance_type_get(context, + instance_type_id) + allowed_size_gb = instance_type['local_gb'] + allowed_size_bytes = allowed_size_gb * 1024 * 1024 * 1024 + + LOG.debug(_("image_size_bytes=%(size_bytes)d, allowed_size_bytes=" + "%(allowed_size_bytes)d") % locals()) + + if size_bytes > allowed_size_bytes: + LOG.info(_("Image size %(size_bytes)d exceeded" + " instance_type allowed size " + "%(allowed_size_bytes)d") + % locals()) + raise exception.ImageTooLarge() + + @classmethod + def _fetch_image_glance_disk(cls, context, session, instance, image, image_type): """Fetch the image from Glance @@ -444,6 +486,7 @@ class VMHelper(HelperBase): Returns: A single filename if image_type is KERNEL_RAMDISK A list of dictionaries that describe VDIs, otherwise """ + instance_id = instance.id # FIXME(sirp): Since the Glance plugin seems to be required for the # VHD disk, it may be worth using the plugin for both VHD and RAW and # DISK restores @@ -750,6 +793,21 @@ def get_vhd_parent_uuid(session, vdi_ref): return None +def walk_vdi_chain(session, vdi_uuid): + """Yield vdi_recs for each element in a VDI chain""" + # TODO: perhaps make get_vhd_parent use this + while True: + vdi_ref = session.get_xenapi().VDI.get_by_uuid(vdi_uuid) + vdi_rec = session.get_xenapi().VDI.get_record(vdi_ref) + yield vdi_rec + + parent_uuid = vdi_rec['sm_config'].get('vhd-parent') + if parent_uuid: + vdi_uuid = parent_uuid + else: + break + + def wait_for_vhd_coalesce(session, instance_id, sr_ref, vdi_ref, original_parent_uuid): """ Spin until the parent VHD is coalesced into its parent VHD diff --git a/nova/virt/xenapi/vmops.py b/nova/virt/xenapi/vmops.py index b1522729a..7586ade7b 100644 --- a/nova/virt/xenapi/vmops.py +++ b/nova/virt/xenapi/vmops.py @@ -137,7 +137,7 @@ class VMOps(object): def _create_disks(self, context, instance): disk_image_type = VMHelper.determine_disk_image_type(instance) vdis = VMHelper.fetch_image(context, self._session, - instance.id, instance.image_ref, + instance, instance.image_ref, instance.user_id, instance.project_id, disk_image_type) return vdis @@ -182,11 +182,11 @@ class VMOps(object): try: if instance.kernel_id: kernel = VMHelper.fetch_image(context, self._session, - instance.id, instance.kernel_id, instance.user_id, + instance, instance.kernel_id, instance.user_id, instance.project_id, ImageType.KERNEL)[0] if instance.ramdisk_id: ramdisk = VMHelper.fetch_image(context, self._session, - instance.id, instance.ramdisk_id, instance.user_id, + instance, instance.ramdisk_id, instance.user_id, instance.project_id, ImageType.RAMDISK)[0] # Create the VM ref and attach the first disk first_vdi_ref = self._session.call_xenapi('VDI.get_by_uuid', -- cgit From b238bcd9de989e7dabe6698b3de77a104d96a941 Mon Sep 17 00:00:00 2001 From: Josh Kearney Date: Fri, 12 Aug 2011 16:25:16 -0500 Subject: Updated logging. --- nova/virt/xenapi/vm_utils.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/nova/virt/xenapi/vm_utils.py b/nova/virt/xenapi/vm_utils.py index e86f72347..1f5f9b416 100644 --- a/nova/virt/xenapi/vm_utils.py +++ b/nova/virt/xenapi/vm_utils.py @@ -445,8 +445,9 @@ class VMHelper(HelperBase): """ size_bytes = 0 for vdi_rec in walk_vdi_chain(session, vdi_uuid): + cur_vdi_uuid = vdi_rec['uuid'] vdi_size_bytes = int(vdi_rec['physical_utilisation']) - LOG.debug(_('vdi_uuid=%(vdi_uuid)s vdi_size_bytes=' + LOG.debug(_('vdi_uuid=%(cur_vdi_uuid)s vdi_size_bytes=' '%(vdi_size_bytes)d' % locals())) size_bytes += vdi_size_bytes return size_bytes -- cgit From a1e60edee71d2cb24739f2f44ba13fbf28e72c95 Mon Sep 17 00:00:00 2001 From: Vishvananda Ishaya Date: Fri, 12 Aug 2011 21:03:11 -0700 Subject: get rid of network_info hack and pass it everywhere --- nova/virt/libvirt/connection.py | 22 ++++++-------- nova/virt/libvirt/firewall.py | 64 +++++++++++++++------------------------ nova/virt/libvirt/netutils.py | 67 ----------------------------------------- 3 files changed, 33 insertions(+), 120 deletions(-) diff --git a/nova/virt/libvirt/connection.py b/nova/virt/libvirt/connection.py index 6d043577a..bc317d660 100644 --- a/nova/virt/libvirt/connection.py +++ b/nova/virt/libvirt/connection.py @@ -471,10 +471,10 @@ class LibvirtConnection(driver.ComputeDriver): # in the guest OS. But, in case of KVM, shutdown() does not work... self.destroy(instance, network_info, cleanup=False) self.plug_vifs(instance, network_info) - self.firewall_driver.setup_basic_filtering(instance) - self.firewall_driver.prepare_instance_filter(instance) + self.firewall_driver.setup_basic_filtering(instance, network_info) + self.firewall_driver.prepare_instance_filter(instance, network_info) self._create_new_domain(xml) - self.firewall_driver.apply_instance_filter(instance) + self.firewall_driver.apply_instance_filter(instance, network_info) def _wait_for_reboot(): """Called at an interval until the VM is running again.""" @@ -531,7 +531,7 @@ class LibvirtConnection(driver.ComputeDriver): """ self.destroy(instance, network_info, cleanup=False) - xml = self.to_xml(instance, rescue=True) + xml = self.to_xml(instance, network_info, rescue=True) rescue_images = {'image_id': FLAGS.rescue_image_id, 'kernel_id': FLAGS.rescue_kernel_id, 'ramdisk_id': FLAGS.rescue_ramdisk_id} @@ -574,9 +574,9 @@ class LibvirtConnection(driver.ComputeDriver): # NOTE(ilyaalekseyev): Implementation like in multinics # for xenapi(tr3buchet) @exception.wrap_exception() - def spawn(self, context, instance, - network_info=None, block_device_info=None): - xml = self.to_xml(instance, False, network_info=network_info, + def spawn(self, context, instance, network_info, + block_device_info=None): + xml = self.to_xml(instance, network_info, False, block_device_info=block_device_info) self.firewall_driver.setup_basic_filtering(instance, network_info) self.firewall_driver.prepare_instance_filter(instance, network_info) @@ -584,7 +584,7 @@ class LibvirtConnection(driver.ComputeDriver): block_device_info=block_device_info) domain = self._create_new_domain(xml) LOG.debug(_("instance %s: is running"), instance['name']) - self.firewall_driver.apply_instance_filter(instance) + self.firewall_driver.apply_instance_filter(instance, network_info) def _wait_for_boot(): """Called at an interval until the VM is running.""" @@ -992,10 +992,6 @@ class LibvirtConnection(driver.ComputeDriver): block_device_info=None): block_device_mapping = driver.block_device_info_get_mapping( block_device_info) - # TODO(adiantum) remove network_info creation code - # when multinics will be completed - if not network_info: - network_info = netutils.get_network_info(instance) nics = [] for (network, mapping) in network_info: @@ -1082,7 +1078,7 @@ class LibvirtConnection(driver.ComputeDriver): xml_info['disk'] = xml_info['basepath'] + "/disk" return xml_info - def to_xml(self, instance, rescue=False, network_info=None, + def to_xml(self, instance, network_info, rescue=False, block_device_info=None): # TODO(termie): cache? LOG.debug(_('instance %s: starting toXML method'), instance['name']) diff --git a/nova/virt/libvirt/firewall.py b/nova/virt/libvirt/firewall.py index 9ce57b6c9..fa29b99c3 100644 --- a/nova/virt/libvirt/firewall.py +++ b/nova/virt/libvirt/firewall.py @@ -40,17 +40,17 @@ except ImportError: class FirewallDriver(object): - def prepare_instance_filter(self, instance, network_info=None): + def prepare_instance_filter(self, instance, network_info): """Prepare filters for the instance. At this point, the instance isn't running yet.""" raise NotImplementedError() - def unfilter_instance(self, instance, network_info=None): + def unfilter_instance(self, instance, network_info): """Stop filtering instance""" raise NotImplementedError() - def apply_instance_filter(self, instance): + def apply_instance_filter(self, instance, network_info): """Apply instance filter. Once this method returns, the instance should be firewalled @@ -60,9 +60,7 @@ class FirewallDriver(object): """ raise NotImplementedError() - def refresh_security_group_rules(self, - security_group_id, - network_info=None): + def refresh_security_group_rules(self, security_group_id): """Refresh security group rules from data store Gets called when a rule has been added to or removed from @@ -85,7 +83,7 @@ class FirewallDriver(object): """ raise NotImplementedError() - def setup_basic_filtering(self, instance, network_info=None): + def setup_basic_filtering(self, instance, network_info): """Create rules to block spoofing and allow dhcp. This gets called when spawning an instance, before @@ -150,7 +148,7 @@ class NWFilterFirewall(FirewallDriver): self.static_filters_configured = False self.handle_security_groups = False - def apply_instance_filter(self, instance): + def apply_instance_filter(self, instance, network_info): """No-op. Everything is done in prepare_instance_filter""" pass @@ -189,13 +187,10 @@ class NWFilterFirewall(FirewallDriver): ''' - def setup_basic_filtering(self, instance, network_info=None): + def setup_basic_filtering(self, instance, network_info): """Set up basic filtering (MAC, IP, and ARP spoofing protection)""" logging.info('called setup_basic_filtering in nwfilter') - if not network_info: - network_info = netutils.get_network_info(instance) - if self.handle_security_groups: # No point in setting up a filter set that we'll be overriding # anyway. @@ -300,10 +295,8 @@ class NWFilterFirewall(FirewallDriver): # execute in a native thread and block current greenthread until done tpool.execute(self._conn.nwfilterDefineXML, xml) - def unfilter_instance(self, instance, network_info=None): + def unfilter_instance(self, instance, network_info): """Clear out the nwfilter rules.""" - if not network_info: - network_info = netutils.get_network_info(instance) instance_name = instance.name for (network, mapping) in network_info: nic_id = mapping['mac'].replace(':', '') @@ -326,16 +319,13 @@ class NWFilterFirewall(FirewallDriver): LOG.debug(_('The nwfilter(%(instance_secgroup_filter_name)s) ' 'for %(instance_name)s is not found.') % locals()) - def prepare_instance_filter(self, instance, network_info=None): + def prepare_instance_filter(self, instance, network_info): """Creates an NWFilter for the given instance. In the process, it makes sure the filters for the provider blocks, security groups, and base filter are all in place. """ - if not network_info: - network_info = netutils.get_network_info(instance) - self.refresh_provider_fw_rules() ctxt = context.get_admin_context() @@ -500,9 +490,8 @@ class NWFilterFirewall(FirewallDriver): return 'nova-instance-%s' % (instance['name']) return 'nova-instance-%s-%s' % (instance['name'], nic_id) - def instance_filter_exists(self, instance): + def instance_filter_exists(self, instance, network_info): """Check nova-instance-instance-xxx exists""" - network_info = netutils.get_network_info(instance) for (network, mapping) in network_info: nic_id = mapping['mac'].replace(':', '') instance_filter_name = self._instance_filter_name(instance, nic_id) @@ -521,6 +510,7 @@ class IptablesFirewallDriver(FirewallDriver): from nova.network import linux_net self.iptables = linux_net.iptables_manager self.instances = {} + self.network_infos = {} self.nwfilter = NWFilterFirewall(kwargs['get_connection']) self.basicly_filtered = False @@ -529,22 +519,22 @@ class IptablesFirewallDriver(FirewallDriver): self.iptables.ipv6['filter'].add_chain('sg-fallback') self.iptables.ipv6['filter'].add_rule('sg-fallback', '-j DROP') - def setup_basic_filtering(self, instance, network_info=None): + def setup_basic_filtering(self, instance, network_info): """Set up provider rules and basic NWFilter.""" - if not network_info: - network_info = netutils.get_network_info(instance) self.nwfilter.setup_basic_filtering(instance, network_info) if not self.basicly_filtered: LOG.debug(_('iptables firewall: Setup Basic Filtering')) self.refresh_provider_fw_rules() self.basicly_filtered = True - def apply_instance_filter(self, instance): + def apply_instance_filter(self, instance, network_info): """No-op. Everything is done in prepare_instance_filter""" pass - def unfilter_instance(self, instance, network_info=None): + def unfilter_instance(self, instance, network_info): if self.instances.pop(instance['id'], None): + # NOTE(vish): use the passed info instead of the stored info + self.network_infos.pop(instance['id']) self.remove_filters_for_instance(instance) self.iptables.apply() self.nwfilter.unfilter_instance(instance, network_info) @@ -552,11 +542,10 @@ class IptablesFirewallDriver(FirewallDriver): LOG.info(_('Attempted to unfilter instance %s which is not ' 'filtered'), instance['id']) - def prepare_instance_filter(self, instance, network_info=None): - if not network_info: - network_info = netutils.get_network_info(instance) + def prepare_instance_filter(self, instance, network_info): self.instances[instance['id']] = instance - self.add_filters_for_instance(instance, network_info) + self.network_infos[instance['id']] = network_info + self.add_filters_for_instance(instance) self.iptables.apply() def _create_filter(self, ips, chain_name): @@ -583,7 +572,8 @@ class IptablesFirewallDriver(FirewallDriver): for rule in ipv6_rules: self.iptables.ipv6['filter'].add_rule(chain_name, rule) - def add_filters_for_instance(self, instance, network_info=None): + def add_filters_for_instance(self, instance): + network_info = self.network_infos[instance['id']] chain_name = self._instance_chain_name(instance) if FLAGS.use_ipv6: self.iptables.ipv6['filter'].add_chain(chain_name) @@ -601,9 +591,7 @@ class IptablesFirewallDriver(FirewallDriver): if FLAGS.use_ipv6: self.iptables.ipv6['filter'].remove_chain(chain_name) - def instance_rules(self, instance, network_info=None): - if not network_info: - network_info = netutils.get_network_info(instance) + def instance_rules(self, instance, network_info): ctxt = context.get_admin_context() ipv4_rules = [] @@ -726,14 +714,10 @@ class IptablesFirewallDriver(FirewallDriver): self.iptables.apply() @utils.synchronized('iptables', external=True) - def do_refresh_security_group_rules(self, - security_group, - network_info=None): + def do_refresh_security_group_rules(self, security_group): for instance in self.instances.values(): self.remove_filters_for_instance(instance) - if not network_info: - network_info = netutils.get_network_info(instance) - self.add_filters_for_instance(instance, network_info) + self.add_filters_for_instance(instance) def refresh_provider_fw_rules(self): """See class:FirewallDriver: docs.""" diff --git a/nova/virt/libvirt/netutils.py b/nova/virt/libvirt/netutils.py index a8e88fc07..6f303072d 100644 --- a/nova/virt/libvirt/netutils.py +++ b/nova/virt/libvirt/netutils.py @@ -23,12 +23,7 @@ import netaddr -from nova import context -from nova import db -from nova import exception from nova import flags -from nova import ipv6 -from nova import utils FLAGS = flags.FLAGS @@ -47,65 +42,3 @@ def get_net_and_prefixlen(cidr): def get_ip_version(cidr): net = netaddr.IPNetwork(cidr) return int(net.version) - - -def get_network_info(instance): - # TODO(tr3buchet): this function needs to go away! network info - # MUST be passed down from compute - # TODO(adiantum) If we will keep this function - # we should cache network_info - admin_context = context.get_admin_context() - - try: - fixed_ips = db.fixed_ip_get_by_instance(admin_context, instance['id']) - except exception.FixedIpNotFoundForInstance: - fixed_ips = [] - - vifs = db.virtual_interface_get_by_instance(admin_context, instance['id']) - flavor = db.instance_type_get(admin_context, - instance['instance_type_id']) - network_info = [] - - for vif in vifs: - network = vif['network'] - - # determine which of the instance's IPs belong to this network - network_ips = [fixed_ip['address'] for fixed_ip in fixed_ips if - fixed_ip['network_id'] == network['id']] - - def ip_dict(ip): - return { - 'ip': ip, - 'netmask': network['netmask'], - 'enabled': '1'} - - def ip6_dict(): - prefix = network['cidr_v6'] - mac = vif['address'] - project_id = instance['project_id'] - return { - 'ip': ipv6.to_global(prefix, mac, project_id), - 'netmask': network['netmask_v6'], - 'enabled': '1'} - - mapping = { - 'label': network['label'], - 'gateway': network['gateway'], - 'broadcast': network['broadcast'], - 'dhcp_server': network['gateway'], - 'mac': vif['address'], - 'rxtx_cap': flavor['rxtx_cap'], - 'dns': [], - 'ips': [ip_dict(ip) for ip in network_ips]} - - if network['dns1']: - mapping['dns'].append(network['dns1']) - if network['dns2']: - mapping['dns'].append(network['dns2']) - - if FLAGS.use_ipv6: - mapping['ip6s'] = [ip6_dict()] - mapping['gateway6'] = network['gateway_v6'] - - network_info.append((network, mapping)) - return network_info -- cgit From 87e97a868b4b7361937bac8f637ec014276aaf5c Mon Sep 17 00:00:00 2001 From: Vishvananda Ishaya Date: Fri, 12 Aug 2011 21:03:53 -0700 Subject: use dhcp server instead of gateway for filter exception --- nova/virt/libvirt/firewall.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/nova/virt/libvirt/firewall.py b/nova/virt/libvirt/firewall.py index fa29b99c3..a7a67dacb 100644 --- a/nova/virt/libvirt/firewall.py +++ b/nova/virt/libvirt/firewall.py @@ -609,7 +609,7 @@ class IptablesFirewallDriver(FirewallDriver): ipv4_rules += ['-j $provider'] ipv6_rules += ['-j $provider'] - dhcp_servers = [info['gateway'] for (_n, info) in network_info] + dhcp_servers = [info['dhcp_server'] for (_n, info) in network_info] for dhcp_server in dhcp_servers: ipv4_rules.append('-s %s -p udp --sport 67 --dport 68 ' -- cgit From 87ff404bf2bffe690292f7d3922c1ca2529f852b Mon Sep 17 00:00:00 2001 From: Vishvananda Ishaya Date: Fri, 12 Aug 2011 21:17:20 -0700 Subject: rename project_net to same_net --- nova/virt/libvirt/connection.py | 6 +++--- nova/virt/libvirt/firewall.py | 8 ++++---- nova/virt/libvirt/vif.py | 2 +- 3 files changed, 8 insertions(+), 8 deletions(-) diff --git a/nova/virt/libvirt/connection.py b/nova/virt/libvirt/connection.py index bc317d660..f905ce92b 100644 --- a/nova/virt/libvirt/connection.py +++ b/nova/virt/libvirt/connection.py @@ -32,7 +32,7 @@ Supports KVM, LXC, QEMU, UML, and XEN. :rescue_kernel_id: Rescue aki image (default: aki-rescue). :rescue_ramdisk_id: Rescue ari image (default: ari-rescue). :injected_network_template: Template file for injected network -:allow_project_net_traffic: Whether to allow in project network traffic +:allow_same_net_traffic: Whether to allow in project network traffic """ @@ -96,9 +96,9 @@ flags.DEFINE_string('libvirt_uri', '', 'Override the default libvirt URI (which is dependent' ' on libvirt_type)') -flags.DEFINE_bool('allow_project_net_traffic', +flags.DEFINE_bool('allow_same_net_traffic', True, - 'Whether to allow in project network traffic') + 'Whether to allow network traffic from same network') flags.DEFINE_bool('use_cow_images', True, 'Whether to use cow images') diff --git a/nova/virt/libvirt/firewall.py b/nova/virt/libvirt/firewall.py index a7a67dacb..11e3906b8 100644 --- a/nova/virt/libvirt/firewall.py +++ b/nova/virt/libvirt/firewall.py @@ -232,7 +232,7 @@ class NWFilterFirewall(FirewallDriver): self._define_filter(self.nova_base_ipv6_filter) self._define_filter(self.nova_dhcp_filter) self._define_filter(self.nova_ra_filter) - if FLAGS.allow_project_net_traffic: + if FLAGS.allow_same_net_traffic: self._define_filter(self.nova_project_filter) if FLAGS.use_ipv6: self._define_filter(self.nova_project_filter_v6) @@ -378,7 +378,7 @@ class NWFilterFirewall(FirewallDriver): instance_filter_children = [base_filter, 'nova-provider-rules', instance_secgroup_filter_name] - if FLAGS.allow_project_net_traffic: + if FLAGS.allow_same_net_traffic: instance_filter_children.append('nova-project') if FLAGS.use_ipv6: instance_filter_children.append('nova-project-v6') @@ -616,7 +616,7 @@ class IptablesFirewallDriver(FirewallDriver): '-j ACCEPT' % (dhcp_server,)) #Allow project network traffic - if FLAGS.allow_project_net_traffic: + if FLAGS.allow_same_net_traffic: cidrs = [network['cidr'] for (network, _m) in network_info] for cidr in cidrs: ipv4_rules.append('-s %s -j ACCEPT' % (cidr,)) @@ -633,7 +633,7 @@ class IptablesFirewallDriver(FirewallDriver): '-s %s/128 -p icmpv6 -j ACCEPT' % (gateway_v6,)) #Allow project network traffic - if FLAGS.allow_project_net_traffic: + if FLAGS.allow_same_net_traffic: cidrv6s = [network['cidr_v6'] for (network, _m) in network_info] diff --git a/nova/virt/libvirt/vif.py b/nova/virt/libvirt/vif.py index e243d4fa0..4cb9abda4 100644 --- a/nova/virt/libvirt/vif.py +++ b/nova/virt/libvirt/vif.py @@ -44,7 +44,7 @@ class LibvirtBridgeDriver(VIFDriver): gateway6 = mapping.get('gateway6') mac_id = mapping['mac'].replace(':', '') - if FLAGS.allow_project_net_traffic: + if FLAGS.allow_same_net_traffic: template = "\n" net, mask = netutils.get_net_and_mask(network['cidr']) values = [("PROJNET", net), ("PROJMASK", mask)] -- cgit From f7d1270c94d884e661a79d74fb2b2f88f6eb619f Mon Sep 17 00:00:00 2001 From: Vishvananda Ishaya Date: Fri, 12 Aug 2011 22:05:34 -0700 Subject: fix all of the tests --- nova/tests/test_libvirt.py | 84 ++++++++++++++++++++++------------------- nova/virt/libvirt/connection.py | 8 ++-- 2 files changed, 50 insertions(+), 42 deletions(-) diff --git a/nova/tests/test_libvirt.py b/nova/tests/test_libvirt.py index 2180cf4f0..df291ee68 100644 --- a/nova/tests/test_libvirt.py +++ b/nova/tests/test_libvirt.py @@ -49,18 +49,19 @@ def _create_network_info(count=1, ipv6=None): if ipv6 is None: ipv6 = FLAGS.use_ipv6 fake = 'fake' - fake_ip = '0.0.0.0/0' - fake_ip_2 = '0.0.0.1/0' - fake_ip_3 = '0.0.0.1/0' + fake_ip = '10.11.12.13' + fake_ip_2 = '0.0.0.1' + fake_ip_3 = '0.0.0.1' fake_vlan = 100 fake_bridge_interface = 'eth0' network = {'bridge': fake, 'cidr': fake_ip, 'cidr_v6': fake_ip, + 'gateway_v6': fake, 'vlan': fake_vlan, 'bridge_interface': fake_bridge_interface} mapping = {'mac': fake, - 'dhcp_server': fake, + 'dhcp_server': '10.0.0.1', 'gateway': fake, 'gateway6': fake, 'ips': [{'ip': fake_ip}, {'ip': fake_ip}]} @@ -273,15 +274,14 @@ class LibvirtConnTestCase(test.TestCase): conn = connection.LibvirtConnection(True) instance_ref = db.instance_create(self.context, self.test_instance) - result = conn._prepare_xml_info(instance_ref, False) - self.assertFalse(result['nics']) - - result = conn._prepare_xml_info(instance_ref, False, - _create_network_info()) + result = conn._prepare_xml_info(instance_ref, + _create_network_info(), + False) self.assertTrue(len(result['nics']) == 1) - result = conn._prepare_xml_info(instance_ref, False, - _create_network_info(2)) + result = conn._prepare_xml_info(instance_ref, + _create_network_info(2), + False) self.assertTrue(len(result['nics']) == 2) def test_xml_and_uri_no_ramdisk_no_kernel(self): @@ -408,16 +408,16 @@ class LibvirtConnTestCase(test.TestCase): network_info = _create_network_info(2) conn = connection.LibvirtConnection(True) instance_ref = db.instance_create(self.context, instance_data) - xml = conn.to_xml(instance_ref, False, network_info) + xml = conn.to_xml(instance_ref, network_info, False) tree = xml_to_tree(xml) interfaces = tree.findall("./devices/interface") self.assertEquals(len(interfaces), 2) parameters = interfaces[0].findall('./filterref/parameter') self.assertEquals(interfaces[0].get('type'), 'bridge') self.assertEquals(parameters[0].get('name'), 'IP') - self.assertEquals(parameters[0].get('value'), '0.0.0.0/0') + self.assertEquals(parameters[0].get('value'), '10.11.12.13') self.assertEquals(parameters[1].get('name'), 'DHCPSERVER') - self.assertEquals(parameters[1].get('value'), 'fake') + self.assertEquals(parameters[1].get('value'), '10.0.0.1') def _check_xml_and_container(self, instance): user_context = context.RequestContext(self.user_id, @@ -431,7 +431,8 @@ class LibvirtConnTestCase(test.TestCase): uri = conn.get_uri() self.assertEquals(uri, 'lxc:///') - xml = conn.to_xml(instance_ref) + network_info = _create_network_info() + xml = conn.to_xml(instance_ref, network_info) tree = xml_to_tree(xml) check = [ @@ -528,17 +529,20 @@ class LibvirtConnTestCase(test.TestCase): uri = conn.get_uri() self.assertEquals(uri, expected_uri) - xml = conn.to_xml(instance_ref, rescue) + network_info = _create_network_info() + xml = conn.to_xml(instance_ref, network_info, rescue) tree = xml_to_tree(xml) for i, (check, expected_result) in enumerate(checks): self.assertEqual(check(tree), expected_result, - '%s failed check %d' % (xml, i)) + '%s != %s failed check %d' % + (check(tree), expected_result, i)) for i, (check, expected_result) in enumerate(common_checks): self.assertEqual(check(tree), expected_result, - '%s failed common check %d' % (xml, i)) + '%s != %s failed common check %d' % + (check(tree), expected_result, i)) # This test is supposed to make sure we don't # override a specifically set uri @@ -942,8 +946,9 @@ class IptablesFirewallTestCase(test.TestCase): from nova.network import linux_net linux_net.iptables_manager.execute = fake_iptables_execute - self.fw.prepare_instance_filter(instance_ref) - self.fw.apply_instance_filter(instance_ref) + network_info = _create_network_info() + self.fw.prepare_instance_filter(instance_ref, network_info) + self.fw.apply_instance_filter(instance_ref, network_info) in_rules = filter(lambda l: not l.startswith('#'), self.in_filter_rules) @@ -1008,7 +1013,7 @@ class IptablesFirewallTestCase(test.TestCase): ipv6_len = len(self.fw.iptables.ipv6['filter'].rules) inst_ipv4, inst_ipv6 = self.fw.instance_rules(instance_ref, network_info) - self.fw.add_filters_for_instance(instance_ref, network_info) + self.fw.prepare_instance_filter(instance_ref, network_info) ipv4 = self.fw.iptables.ipv4['filter'].rules ipv6 = self.fw.iptables.ipv6['filter'].rules ipv4_network_rules = len(ipv4) - len(inst_ipv4) - ipv4_len @@ -1023,7 +1028,7 @@ class IptablesFirewallTestCase(test.TestCase): self.mox.StubOutWithMock(self.fw, 'add_filters_for_instance', use_mock_anything=True) - self.fw.add_filters_for_instance(instance_ref, mox.IgnoreArg()) + self.fw.prepare_instance_filter(instance_ref, mox.IgnoreArg()) self.fw.instances[instance_ref['id']] = instance_ref self.mox.ReplayAll() self.fw.do_refresh_security_group_rules("fake") @@ -1043,11 +1048,12 @@ class IptablesFirewallTestCase(test.TestCase): instance_ref = self._create_instance_ref() _setup_networking(instance_ref['id'], self.test_ip) - self.fw.setup_basic_filtering(instance_ref) - self.fw.prepare_instance_filter(instance_ref) - self.fw.apply_instance_filter(instance_ref) + network_info = _create_network_info + self.fw.setup_basic_filtering(instance_ref, network_info) + self.fw.prepare_instance_filter(instance_ref, network_info) + self.fw.apply_instance_filter(instance_ref, network_info) original_filter_count = len(fakefilter.filters) - self.fw.unfilter_instance(instance_ref) + self.fw.unfilter_instance(instance_ref, network_info) # should undefine just the instance filter self.assertEqual(original_filter_count - len(fakefilter.filters), 1) @@ -1057,14 +1063,14 @@ class IptablesFirewallTestCase(test.TestCase): def test_provider_firewall_rules(self): # setup basic instance data instance_ref = self._create_instance_ref() - nw_info = _create_network_info(1) _setup_networking(instance_ref['id'], self.test_ip) # FRAGILE: peeks at how the firewall names chains chain_name = 'inst-%s' % instance_ref['id'] # create a firewall via setup_basic_filtering like libvirt_conn.spawn # should have a chain with 0 rules - self.fw.setup_basic_filtering(instance_ref, network_info=nw_info) + network_info = _create_network_info(1) + self.fw.setup_basic_filtering(instance_ref, network_info) self.assertTrue('provider' in self.fw.iptables.ipv4['filter'].chains) rules = [rule for rule in self.fw.iptables.ipv4['filter'].rules if rule.chain == 'provider'] @@ -1094,8 +1100,8 @@ class IptablesFirewallTestCase(test.TestCase): self.assertEqual(2, len(rules)) # create the instance filter and make sure it has a jump rule - self.fw.prepare_instance_filter(instance_ref, network_info=nw_info) - self.fw.apply_instance_filter(instance_ref) + self.fw.prepare_instance_filter(instance_ref, network_info) + self.fw.apply_instance_filter(instance_ref, network_info) inst_rules = [rule for rule in self.fw.iptables.ipv4['filter'].rules if rule.chain == chain_name] jump_rules = [rule for rule in inst_rules if '-j' in rule.rule] @@ -1247,7 +1253,7 @@ class NWFilterTestCase(test.TestCase): def _ensure_all_called(): instance_filter = 'nova-instance-%s-%s' % (instance_ref['name'], - '561212121212') + 'fake') secgroup_filter = 'nova-secgroup-%s' % self.security_group['id'] for required in [secgroup_filter, 'allow-dhcp-server', 'no-arp-spoofing', 'no-ip-spoofing', @@ -1263,9 +1269,10 @@ class NWFilterTestCase(test.TestCase): self.security_group.id) instance = db.instance_get(self.context, inst_id) - self.fw.setup_basic_filtering(instance) - self.fw.prepare_instance_filter(instance) - self.fw.apply_instance_filter(instance) + network_info = _create_network_info() + self.fw.setup_basic_filtering(instance, network_info) + self.fw.prepare_instance_filter(instance, network_info) + self.fw.apply_instance_filter(instance, network_info) _ensure_all_called() self.teardown_security_group() db.instance_destroy(context.get_admin_context(), instance_ref['id']) @@ -1296,11 +1303,12 @@ class NWFilterTestCase(test.TestCase): instance = db.instance_get(self.context, inst_id) _setup_networking(instance_ref['id'], self.test_ip) - self.fw.setup_basic_filtering(instance) - self.fw.prepare_instance_filter(instance) - self.fw.apply_instance_filter(instance) + network_info = _create_network_info() + self.fw.setup_basic_filtering(instance, network_info) + self.fw.prepare_instance_filter(instance, network_info) + self.fw.apply_instance_filter(instance, network_info) original_filter_count = len(fakefilter.filters) - self.fw.unfilter_instance(instance) + self.fw.unfilter_instance(instance, network_info) # should undefine 2 filters: instance and instance-secgroup self.assertEqual(original_filter_count - len(fakefilter.filters), 2) diff --git a/nova/virt/libvirt/connection.py b/nova/virt/libvirt/connection.py index f905ce92b..5945a725d 100644 --- a/nova/virt/libvirt/connection.py +++ b/nova/virt/libvirt/connection.py @@ -463,8 +463,8 @@ class LibvirtConnection(driver.ComputeDriver): """ virt_dom = self._conn.lookupByName(instance['name']) # NOTE(itoumsn): Use XML delived from the running instance - # instead of using to_xml(instance). This is almost the ultimate - # stupid workaround. + # instead of using to_xml(instance, network_info). This is almost + # the ultimate stupid workaround. xml = virt_dom.XMLDesc(0) # NOTE(itoumsn): self.shutdown() and wait instead of self.destroy() is # better because we cannot ensure flushing dirty buffers @@ -988,7 +988,7 @@ class LibvirtConnection(driver.ComputeDriver): else: raise exception.InvalidDevicePath(path=device_path) - def _prepare_xml_info(self, instance, rescue=False, network_info=None, + def _prepare_xml_info(self, instance, network_info, rescue, block_device_info=None): block_device_mapping = driver.block_device_info_get_mapping( block_device_info) @@ -1082,7 +1082,7 @@ class LibvirtConnection(driver.ComputeDriver): block_device_info=None): # TODO(termie): cache? LOG.debug(_('instance %s: starting toXML method'), instance['name']) - xml_info = self._prepare_xml_info(instance, rescue, network_info, + xml_info = self._prepare_xml_info(instance, network_info, rescue, block_device_info) xml = str(Template(self.libvirt_xml, searchList=[xml_info])) LOG.debug(_('instance %s: finished toXML method'), instance['name']) -- cgit From c533e6ed3d2df8725dbcb48e7e546eb853b7ad41 Mon Sep 17 00:00:00 2001 From: Vishvananda Ishaya Date: Fri, 12 Aug 2011 22:36:10 -0700 Subject: make sure security groups come back on restart of nova-compute --- nova/compute/manager.py | 6 ++++-- nova/tests/test_compute.py | 4 ++-- nova/tests/test_libvirt.py | 2 ++ nova/virt/driver.py | 2 +- nova/virt/fake.py | 4 ++-- nova/virt/libvirt/connection.py | 9 +++++---- nova/virt/libvirt/firewall.py | 14 ++++++-------- nova/virt/xenapi_conn.py | 2 +- 8 files changed, 23 insertions(+), 20 deletions(-) diff --git a/nova/compute/manager.py b/nova/compute/manager.py index d38213083..5b98e9ec1 100644 --- a/nova/compute/manager.py +++ b/nova/compute/manager.py @@ -170,7 +170,9 @@ class ComputeManager(manager.SchedulerDependentManager): elif drv_state == power_state.RUNNING: # Hyper-V and VMWareAPI drivers will raise and exception try: - self.driver.ensure_filtering_rules_for_instance(instance) + net_info = self._get_instance_nw_info(context, instance) + self.driver.ensure_filtering_rules_for_instance(instance, + net_info) except NotImplementedError: LOG.warning(_('Hypervisor driver does not ' 'support firewall rules')) @@ -1308,7 +1310,7 @@ class ComputeManager(manager.SchedulerDependentManager): # This nwfilter is necessary on the destination host. # In addition, this method is creating filtering rule # onto destination host. - self.driver.ensure_filtering_rules_for_instance(instance_ref) + self.driver.ensure_filtering_rules_for_instance(instance_ref, network_info) def live_migration(self, context, instance_id, dest): """Executing live migration. diff --git a/nova/tests/test_compute.py b/nova/tests/test_compute.py index 73c9bd78d..9d6e5aee5 100644 --- a/nova/tests/test_compute.py +++ b/nova/tests/test_compute.py @@ -632,7 +632,7 @@ class ComputeTestCase(test.TestCase): vid = i_ref['volumes'][i]['id'] volmock.setup_compute_volume(c, vid).InAnyOrder('g1') drivermock.plug_vifs(i_ref, []) - drivermock.ensure_filtering_rules_for_instance(i_ref) + drivermock.ensure_filtering_rules_for_instance(i_ref, []) self.compute.db = dbmock self.compute.volume_manager = volmock @@ -657,7 +657,7 @@ class ComputeTestCase(test.TestCase): self.mox.StubOutWithMock(compute_manager.LOG, 'info') compute_manager.LOG.info(_("%s has no volume."), i_ref['hostname']) drivermock.plug_vifs(i_ref, []) - drivermock.ensure_filtering_rules_for_instance(i_ref) + drivermock.ensure_filtering_rules_for_instance(i_ref, []) self.compute.db = dbmock self.compute.driver = drivermock diff --git a/nova/tests/test_libvirt.py b/nova/tests/test_libvirt.py index df291ee68..7f4a3b09a 100644 --- a/nova/tests/test_libvirt.py +++ b/nova/tests/test_libvirt.py @@ -644,6 +644,7 @@ class LibvirtConnTestCase(test.TestCase): self.create_fake_libvirt_mock() instance_ref = db.instance_create(self.context, self.test_instance) + network_info = _create_network_info() # Start test self.mox.ReplayAll() @@ -653,6 +654,7 @@ class LibvirtConnTestCase(test.TestCase): conn.firewall_driver.setattr('prepare_instance_filter', fake_none) conn.firewall_driver.setattr('instance_filter_exists', fake_none) conn.ensure_filtering_rules_for_instance(instance_ref, + network_info, time=fake_timer) except exception.Error, e: c1 = (0 <= e.message.find('Timeout migrating for')) diff --git a/nova/virt/driver.py b/nova/virt/driver.py index df4a66ac2..20af2666d 100644 --- a/nova/virt/driver.py +++ b/nova/virt/driver.py @@ -252,7 +252,7 @@ class ComputeDriver(object): # TODO(Vek): Need to pass context in for access to auth_token pass - def ensure_filtering_rules_for_instance(self, instance_ref): + def ensure_filtering_rules_for_instance(self, instance_ref, network_info): """Setting up filtering rules and waiting for its completion. To migrate an instance, filtering rules to hypervisors diff --git a/nova/virt/fake.py b/nova/virt/fake.py index 880702af1..2ffa33d40 100644 --- a/nova/virt/fake.py +++ b/nova/virt/fake.py @@ -487,7 +487,7 @@ class FakeConnection(driver.ComputeDriver): """This method is supported only by libvirt.""" raise NotImplementedError('This method is supported only by libvirt.') - def ensure_filtering_rules_for_instance(self, instance_ref): + def ensure_filtering_rules_for_instance(self, instance_ref, network_info): """This method is supported only by libvirt.""" raise NotImplementedError('This method is supported only by libvirt.') @@ -496,7 +496,7 @@ class FakeConnection(driver.ComputeDriver): """This method is supported only by libvirt.""" return - def unfilter_instance(self, instance_ref, network_info=None): + def unfilter_instance(self, instance_ref, network_info): """This method is supported only by libvirt.""" raise NotImplementedError('This method is supported only by libvirt.') diff --git a/nova/virt/libvirt/connection.py b/nova/virt/libvirt/connection.py index 5945a725d..71516011a 100644 --- a/nova/virt/libvirt/connection.py +++ b/nova/virt/libvirt/connection.py @@ -1502,7 +1502,7 @@ class LibvirtConnection(driver.ComputeDriver): return - def ensure_filtering_rules_for_instance(self, instance_ref, + def ensure_filtering_rules_for_instance(self, instance_ref, network_info, time=None): """Setting up filtering rules and waiting for its completion. @@ -1532,14 +1532,15 @@ class LibvirtConnection(driver.ComputeDriver): # If any instances never launch at destination host, # basic-filtering must be set here. - self.firewall_driver.setup_basic_filtering(instance_ref) + self.firewall_driver.setup_basic_filtering(instance_ref, network_info) # setting up n)ova-instance-instance-xx mainly. - self.firewall_driver.prepare_instance_filter(instance_ref) + self.firewall_driver.prepare_instance_filter(instance_ref, network_info) # wait for completion timeout_count = range(FLAGS.live_migration_retry_count) while timeout_count: - if self.firewall_driver.instance_filter_exists(instance_ref): + if self.firewall_driver.instance_filter_exists(instance_ref, + network_info): break timeout_count.pop() if len(timeout_count) == 0: diff --git a/nova/virt/libvirt/firewall.py b/nova/virt/libvirt/firewall.py index 11e3906b8..55fc58458 100644 --- a/nova/virt/libvirt/firewall.py +++ b/nova/virt/libvirt/firewall.py @@ -92,7 +92,7 @@ class FirewallDriver(object): """ raise NotImplementedError() - def instance_filter_exists(self, instance): + def instance_filter_exists(self, instance, network_info): """Check nova-instance-instance-xxx exists""" raise NotImplementedError() @@ -391,9 +391,7 @@ class NWFilterFirewall(FirewallDriver): self._define_filter(self._filter_container(filter_name, filter_children)) - def refresh_security_group_rules(self, - security_group_id, - network_info=None): + def refresh_security_group_rules(self, security_group_id): return self._define_filter( self.security_group_to_nwfilter_xml(security_group_id)) @@ -702,15 +700,15 @@ class IptablesFirewallDriver(FirewallDriver): return ipv4_rules, ipv6_rules - def instance_filter_exists(self, instance): + def instance_filter_exists(self, instance, network_info): """Check nova-instance-instance-xxx exists""" - return self.nwfilter.instance_filter_exists(instance) + return self.nwfilter.instance_filter_exists(instance, network_info) def refresh_security_group_members(self, security_group): pass - def refresh_security_group_rules(self, security_group, network_info=None): - self.do_refresh_security_group_rules(security_group, network_info) + def refresh_security_group_rules(self, security_group): + self.do_refresh_security_group_rules(security_group) self.iptables.apply() @utils.synchronized('iptables', external=True) diff --git a/nova/virt/xenapi_conn.py b/nova/virt/xenapi_conn.py index 76b6c57fc..0ec957cf3 100644 --- a/nova/virt/xenapi_conn.py +++ b/nova/virt/xenapi_conn.py @@ -309,7 +309,7 @@ class XenAPIConnection(driver.ComputeDriver): """This method is supported only by libvirt.""" raise NotImplementedError('This method is supported only by libvirt.') - def ensure_filtering_rules_for_instance(self, instance_ref): + def ensure_filtering_rules_for_instance(self, instance_ref, network_info): """This method is supported only libvirt.""" return -- cgit From b46320a4175adc4012e60d4eae793a42f3a8186b Mon Sep 17 00:00:00 2001 From: Jesse Andrews Date: Mon, 15 Aug 2011 02:55:22 -0700 Subject: make list response for floating ip match other apis --- nova/api/openstack/contrib/floating_ips.py | 4 ++-- nova/tests/api/openstack/contrib/test_floating_ips.py | 16 ++++++++-------- 2 files changed, 10 insertions(+), 10 deletions(-) diff --git a/nova/api/openstack/contrib/floating_ips.py b/nova/api/openstack/contrib/floating_ips.py index c07bfdf09..f6824a601 100644 --- a/nova/api/openstack/contrib/floating_ips.py +++ b/nova/api/openstack/contrib/floating_ips.py @@ -43,8 +43,8 @@ def _translate_floating_ip_view(floating_ip): def _translate_floating_ips_view(floating_ips): - return {'floating_ips': [_translate_floating_ip_view(floating_ip) - for floating_ip in floating_ips]} + return {'floating_ips': [_translate_floating_ip_view(ip)['floating_ip'] + for ip in floating_ips]} class FloatingIPController(object): diff --git a/nova/tests/api/openstack/contrib/test_floating_ips.py b/nova/tests/api/openstack/contrib/test_floating_ips.py index ab7ae2e54..7996ebbb9 100644 --- a/nova/tests/api/openstack/contrib/test_floating_ips.py +++ b/nova/tests/api/openstack/contrib/test_floating_ips.py @@ -116,14 +116,14 @@ class FloatingIpTest(test.TestCase): res = req.get_response(fakes.wsgi_app()) self.assertEqual(res.status_int, 200) res_dict = json.loads(res.body) - response = {'floating_ips': [{'floating_ip': {'instance_id': 11, - 'ip': '10.10.10.10', - 'fixed_ip': '10.0.0.1', - 'id': 1}}, - {'floating_ip': {'instance_id': None, - 'ip': '10.10.10.11', - 'fixed_ip': None, - 'id': 2}}]} + response = {'floating_ips': [{'instance_id': 11, + 'ip': '10.10.10.10', + 'fixed_ip': '10.0.0.1', + 'id': 1}, + {'instance_id': None, + 'ip': '10.10.10.11', + 'fixed_ip': None, + 'id': 2}]} self.assertEqual(res_dict, response) def test_floating_ip_show(self): -- cgit From 1a2bc77871af060069ba0de80637198be78f8169 Mon Sep 17 00:00:00 2001 From: Josh Kearney Date: Mon, 15 Aug 2011 09:47:01 -0500 Subject: Fixed merge conflict. --- nova/virt/xenapi/vm_utils.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/nova/virt/xenapi/vm_utils.py b/nova/virt/xenapi/vm_utils.py index bf3bf4610..b6f471c0f 100644 --- a/nova/virt/xenapi/vm_utils.py +++ b/nova/virt/xenapi/vm_utils.py @@ -414,7 +414,7 @@ class VMHelper(HelperBase): return vdi_ref @classmethod - def fetch_image(cls, context, session, instance_id, image, user_id, + def fetch_image(cls, context, session, instance, image, user_id, project_id, image_type): """Fetch image from glance based on image type. -- cgit From c53d0567e4526b1e4a2ee5665ac81170a1771d17 Mon Sep 17 00:00:00 2001 From: Vishvananda Ishaya Date: Mon, 15 Aug 2011 11:10:44 -0700 Subject: Fix the tests when libvirt actually exists --- nova/tests/test_libvirt.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/nova/tests/test_libvirt.py b/nova/tests/test_libvirt.py index 7f4a3b09a..f3f7e8057 100644 --- a/nova/tests/test_libvirt.py +++ b/nova/tests/test_libvirt.py @@ -627,7 +627,7 @@ class LibvirtConnTestCase(test.TestCase): return # Preparing mocks - def fake_none(self): + def fake_none(self, *args): return def fake_raise(self): @@ -1050,7 +1050,7 @@ class IptablesFirewallTestCase(test.TestCase): instance_ref = self._create_instance_ref() _setup_networking(instance_ref['id'], self.test_ip) - network_info = _create_network_info + network_info = _create_network_info() self.fw.setup_basic_filtering(instance_ref, network_info) self.fw.prepare_instance_filter(instance_ref, network_info) self.fw.apply_instance_filter(instance_ref, network_info) -- cgit From 055c7692fc91d1d7d709c975fd223cc67f18ef8f Mon Sep 17 00:00:00 2001 From: Vishvananda Ishaya Date: Mon, 15 Aug 2011 11:31:44 -0700 Subject: remove openwrt image --- smoketests/openwrt-x86-ext2.image | Bin 4612608 -> 0 bytes smoketests/openwrt-x86-vmlinuz | Bin 1169948 -> 0 bytes smoketests/random.image | Bin 0 -> 65536 bytes smoketests/random.kernel | Bin 0 -> 16384 bytes smoketests/test_sysadmin.py | 4 ++-- 5 files changed, 2 insertions(+), 2 deletions(-) delete mode 100644 smoketests/openwrt-x86-ext2.image delete mode 100644 smoketests/openwrt-x86-vmlinuz create mode 100644 smoketests/random.image create mode 100644 smoketests/random.kernel diff --git a/smoketests/openwrt-x86-ext2.image b/smoketests/openwrt-x86-ext2.image deleted file mode 100644 index cd2dfa426..000000000 Binary files a/smoketests/openwrt-x86-ext2.image and /dev/null differ diff --git a/smoketests/openwrt-x86-vmlinuz b/smoketests/openwrt-x86-vmlinuz deleted file mode 100644 index 59cc9bb1f..000000000 Binary files a/smoketests/openwrt-x86-vmlinuz and /dev/null differ diff --git a/smoketests/random.image b/smoketests/random.image new file mode 100644 index 000000000..f2c0c30bb Binary files /dev/null and b/smoketests/random.image differ diff --git a/smoketests/random.kernel b/smoketests/random.kernel new file mode 100644 index 000000000..01a6284dd Binary files /dev/null and b/smoketests/random.kernel differ diff --git a/smoketests/test_sysadmin.py b/smoketests/test_sysadmin.py index 454f6f1d5..29cda1a9b 100644 --- a/smoketests/test_sysadmin.py +++ b/smoketests/test_sysadmin.py @@ -35,9 +35,9 @@ from smoketests import flags from smoketests import base FLAGS = flags.FLAGS -flags.DEFINE_string('bundle_kernel', 'openwrt-x86-vmlinuz', +flags.DEFINE_string('bundle_kernel', 'random.kernel', 'Local kernel file to use for bundling tests') -flags.DEFINE_string('bundle_image', 'openwrt-x86-ext2.image', +flags.DEFINE_string('bundle_image', 'random.image', 'Local image file to use for bundling tests') TEST_PREFIX = 'test%s' % int(random.random() * 1000000) -- cgit From bbd577de616915025e524e330f1991f3f155388c Mon Sep 17 00:00:00 2001 From: Josh Kearney Date: Mon, 15 Aug 2011 13:35:53 -0500 Subject: Corrected names in TODO/FIXME. --- nova/virt/xenapi/vm_utils.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/nova/virt/xenapi/vm_utils.py b/nova/virt/xenapi/vm_utils.py index b6f471c0f..4a1f07bb1 100644 --- a/nova/virt/xenapi/vm_utils.py +++ b/nova/virt/xenapi/vm_utils.py @@ -501,7 +501,7 @@ class VMHelper(HelperBase): def _check_vdi_size(cls, context, session, instance, vdi_uuid): size_bytes = cls._get_vdi_chain_size(context, session, vdi_uuid) - # FIXME(sirp): this was copied directly from compute.manager.py, let's + # FIXME(jk0): this was copied directly from compute.manager.py, let's # refactor this to a common area instance_type_id = instance['instance_type_id'] instance_type = db.instance_type_get(context, @@ -853,7 +853,7 @@ def get_vhd_parent_uuid(session, vdi_ref): def walk_vdi_chain(session, vdi_uuid): """Yield vdi_recs for each element in a VDI chain""" - # TODO: perhaps make get_vhd_parent use this + # TODO(jk0): perhaps make get_vhd_parent use this while True: vdi_ref = session.get_xenapi().VDI.get_by_uuid(vdi_uuid) vdi_rec = session.get_xenapi().VDI.get_record(vdi_ref) -- cgit From bc7892f698fbfc21f8d242f52e012d9165e46de7 Mon Sep 17 00:00:00 2001 From: Jake Dahn Date: Mon, 15 Aug 2011 11:55:53 -0700 Subject: Adding standard inclusion of a body param which most http clients will send along with a POST request. --- nova/api/openstack/contrib/floating_ips.py | 2 +- nova/tests/api/openstack/contrib/test_floating_ips.py | 2 ++ 2 files changed, 3 insertions(+), 1 deletion(-) diff --git a/nova/api/openstack/contrib/floating_ips.py b/nova/api/openstack/contrib/floating_ips.py index c07bfdf09..121a1d4a0 100644 --- a/nova/api/openstack/contrib/floating_ips.py +++ b/nova/api/openstack/contrib/floating_ips.py @@ -131,7 +131,7 @@ class FloatingIPController(object): "floating_ip": floating_ip, "fixed_ip": fixed_ip}} - def disassociate(self, req, id): + def disassociate(self, req, id, body): """ POST /floating_ips/{id}/disassociate """ context = req.environ['nova.context'] floating_ip = self.network_api.get_floating_ip(context, id) diff --git a/nova/tests/api/openstack/contrib/test_floating_ips.py b/nova/tests/api/openstack/contrib/test_floating_ips.py index ab7ae2e54..dd58a1b22 100644 --- a/nova/tests/api/openstack/contrib/test_floating_ips.py +++ b/nova/tests/api/openstack/contrib/test_floating_ips.py @@ -177,8 +177,10 @@ class FloatingIpTest(test.TestCase): self.assertEqual(actual, expected) def test_floating_ip_disassociate(self): + body = dict() req = webob.Request.blank('/v1.1/os-floating-ips/1/disassociate') req.method = 'POST' + req.body = json.dumps(body) req.headers['Content-Type'] = 'application/json' res = req.get_response(fakes.wsgi_app()) self.assertEqual(res.status_int, 200) -- cgit From fdb8c92739026e96ac52fc165d70c8f8c7594177 Mon Sep 17 00:00:00 2001 From: Jake Dahn Date: Mon, 15 Aug 2011 12:04:51 -0700 Subject: making body default to none --- nova/api/openstack/contrib/floating_ips.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/nova/api/openstack/contrib/floating_ips.py b/nova/api/openstack/contrib/floating_ips.py index 121a1d4a0..768b9deb1 100644 --- a/nova/api/openstack/contrib/floating_ips.py +++ b/nova/api/openstack/contrib/floating_ips.py @@ -131,7 +131,7 @@ class FloatingIPController(object): "floating_ip": floating_ip, "fixed_ip": fixed_ip}} - def disassociate(self, req, id, body): + def disassociate(self, req, id, body=None): """ POST /floating_ips/{id}/disassociate """ context = req.environ['nova.context'] floating_ip = self.network_api.get_floating_ip(context, id) -- cgit