From 09bd503a9842857480bd4703d27335e83dd30571 Mon Sep 17 00:00:00 2001 From: Kei Masumoto Date: Sat, 11 Jun 2011 19:48:48 +0900 Subject: block migration feature added --- bin/nova-manage | 36 +++++++++- nova/compute/manager.py | 55 ++++++++++++--- nova/db/api.py | 21 ------ nova/db/sqlalchemy/api.py | 39 ----------- nova/db/sqlalchemy/models.py | 16 ++--- nova/exception.py | 5 ++ nova/scheduler/driver.py | 120 +++++++++++++++++++++++++------- nova/scheduler/manager.py | 54 ++++++++------- nova/tests/scheduler/test_scheduler.py | 52 ++++++++++---- nova/tests/test_compute.py | 6 +- nova/tests/test_libvirt.py | 90 ++++++++++++++++++++++++ nova/virt/libvirt/connection.py | 122 +++++++++++++++++++++++++++------ 12 files changed, 449 insertions(+), 167 deletions(-) diff --git a/bin/nova-manage b/bin/nova-manage index b0cd343f5..220f6e77a 100755 --- a/bin/nova-manage +++ b/bin/nova-manage @@ -649,11 +649,12 @@ class VmCommands(object): instance['availability_zone'], instance['launch_index']) - def live_migration(self, ec2_id, dest): + def _migration(self, ec2_id, dest, block_migration=False): """Migrates a running instance to a new machine. :param ec2_id: instance id which comes from euca-describe-instance. :param dest: destination host name. + :param block_migration: if True, do block_migration. """ @@ -676,11 +677,30 @@ class VmCommands(object): {"method": "live_migration", "args": {"instance_id": instance_id, "dest": dest, - "topic": FLAGS.compute_topic}}) + "topic": FLAGS.compute_topic, + "block_migration": block_migration}}) print _('Migration of %s initiated.' 'Check its progress using euca-describe-instances.') % ec2_id + def live_migration(self, ec2_id, dest): + """Migrates a running instance to a new machine. + + :param ec2_id: instance id which comes from euca-describe-instance. + :param dest: destination host name. + + """ + self._migration(ec2_id, dest) + + def block_migration(self, ec2_id, dest): + """Migrates a running instance to a new machine with storage data. + + :param ec2_id: instance id which comes from euca-describe-instance. + :param dest: destination host name. + + """ + self._migration(ec2_id, dest, True) + class ServiceCommands(object): """Enable and disable running services""" @@ -749,9 +769,19 @@ class ServiceCommands(object): mem_u = result['resource']['memory_mb_used'] hdd_u = result['resource']['local_gb_used'] + cpu_sum = 0 + mem_sum = 0 + hdd_sum = 0 print 'HOST\t\t\tPROJECT\t\tcpu\tmem(mb)\tdisk(gb)' print '%s(total)\t\t\t%s\t%s\t%s' % (host, cpu, mem, hdd) - print '%s(used)\t\t\t%s\t%s\t%s' % (host, cpu_u, mem_u, hdd_u) + print '%s(used_now)\t\t\t%s\t%s\t%s' % (host, cpu_u, mem_u, hdd_u) + for p_id, val in result['usage'].items(): + cpu_sum += val['vcpus'] + mem_sum += val['memory_mb'] + hdd_sum += val['local_gb'] + print '%s(used_max)\t\t\t%s\t%s\t%s' % (host, cpu_sum, + mem_sum, hdd_sum) + for p_id, val in result['usage'].items(): print '%s\t\t%s\t\t%s\t%s\t%s' % (host, p_id, diff --git a/nova/compute/manager.py b/nova/compute/manager.py index 245958de7..f7604a253 100644 --- a/nova/compute/manager.py +++ b/nova/compute/manager.py @@ -921,11 +921,13 @@ class ComputeManager(manager.SchedulerDependentManager): """ return self.driver.update_available_resource(context, self.host) - def pre_live_migration(self, context, instance_id, time=None): + def pre_live_migration(self, context, instance_id, time=None, + block_migration=False, **kwargs): """Preparations for live migration at dest host. :param context: security context :param instance_id: nova.db.sqlalchemy.models.Instance.Id + :param block_migration: if true, prepare for block migration """ if not time: @@ -977,17 +979,24 @@ class ComputeManager(manager.SchedulerDependentManager): # onto destination host. self.driver.ensure_filtering_rules_for_instance(instance_ref) - def live_migration(self, context, instance_id, dest): + # Preparation for block migration + if block_migration: + self.driver.pre_block_migration(context, + instance_ref, + kwargs.get('disk')) + + def live_migration(self, context, instance_id, + dest, block_migration=False): """Executing live migration. :param context: security context :param instance_id: nova.db.sqlalchemy.models.Instance.Id :param dest: destination host + :param block_migration: if true, do block migration """ # Get instance for error handling. instance_ref = self.db.instance_get(context, instance_id) - i_name = instance_ref.name try: # Checking volume node is working correctly when any volumes @@ -998,13 +1007,20 @@ class ComputeManager(manager.SchedulerDependentManager): {"method": "check_for_export", "args": {'instance_id': instance_id}}) - # Asking dest host to preparing live migration. + args = {} + args['instance_id'] = instance_id + if block_migration: + args['block_migration'] = block_migration + args['disk'] = \ + self.driver.get_instance_disk_info(context, instance_ref) + rpc.call(context, self.db.queue_get_for(context, FLAGS.compute_topic, dest), {"method": "pre_live_migration", - "args": {'instance_id': instance_id}}) + "args": args}) except Exception: + i_name = instance_ref.name msg = _("Pre live migration for %(i_name)s failed at %(dest)s") LOG.error(msg % locals()) self.recover_live_migration(context, instance_ref) @@ -1015,9 +1031,11 @@ class ComputeManager(manager.SchedulerDependentManager): # nothing must be recovered in this version. self.driver.live_migration(context, instance_ref, dest, self.post_live_migration, - self.recover_live_migration) + self.recover_live_migration, + block_migration) - def post_live_migration(self, ctxt, instance_ref, dest): + def post_live_migration(self, ctxt, instance_ref, + dest, block_migration=False): """Post operations for live migration. This method is called from live_migration @@ -1068,6 +1086,10 @@ class ComputeManager(manager.SchedulerDependentManager): # Restore instance/volume state self.recover_live_migration(ctxt, instance_ref, dest) + # No instance booting at source host, but instance dir + # must be deleted for preparing next block migration + if block_migration: + self.driver.destroy(instance_ref) LOG.info(_('Migrating %(i_name)s to %(dest)s finished successfully.') % locals()) @@ -1075,14 +1097,20 @@ class ComputeManager(manager.SchedulerDependentManager): "Domain not found: no domain with matching name.\" " "This error can be safely ignored.")) - def recover_live_migration(self, ctxt, instance_ref, host=None, dest=None): + def recover_live_migration(self, ctxt, instance_ref, host=None, + dest=None, delete=True): """Recovers Instance/volume state from migrating -> running. :param ctxt: security context :param instance_id: nova.db.sqlalchemy.models.Instance.Id :param host: DB column value is updated by this hostname. If none, the host instance currently running is selected. - + :param dest: + This method is called from live migration src host. + This param specifies destination host. + :param delete: + If true, ask destination host to remove instance dir, + since empty disk image was created for block migration """ if not host: host = instance_ref['host'] @@ -1101,6 +1129,15 @@ class ComputeManager(manager.SchedulerDependentManager): if dest: volume_api.remove_from_compute(ctxt, volume_id, dest) + # TODO: Block migration needs empty image at destination host + # before migration starts, so if any failure occurs, + # any empty images has to be deleted. but not sure adding below + # method is appropreate here. for now, admin has to delete manually. + # rpc.call(ctxt, + # self.db.queue_get_for(ctxt, FLAGS.compute_topic, dest), + # {"method": "self.driver.destroy", + # "args": {'instance':instance_ref}) + def periodic_tasks(self, context=None): """Tasks to be run at a periodic interval.""" error_list = super(ComputeManager, self).periodic_tasks(context) diff --git a/nova/db/api.py b/nova/db/api.py index 4e0aa60a2..b4cc110b1 100644 --- a/nova/db/api.py +++ b/nova/db/api.py @@ -483,27 +483,6 @@ def instance_add_security_group(context, instance_id, security_group_id): security_group_id) -def instance_get_vcpu_sum_by_host_and_project(context, hostname, proj_id): - """Get instances.vcpus by host and project.""" - return IMPL.instance_get_vcpu_sum_by_host_and_project(context, - hostname, - proj_id) - - -def instance_get_memory_sum_by_host_and_project(context, hostname, proj_id): - """Get amount of memory by host and project.""" - return IMPL.instance_get_memory_sum_by_host_and_project(context, - hostname, - proj_id) - - -def instance_get_disk_sum_by_host_and_project(context, hostname, proj_id): - """Get total amount of disk by host and project.""" - return IMPL.instance_get_disk_sum_by_host_and_project(context, - hostname, - proj_id) - - def instance_action_create(context, values): """Create an instance action from the values dictionary.""" return IMPL.instance_action_create(context, values) diff --git a/nova/db/sqlalchemy/api.py b/nova/db/sqlalchemy/api.py index 103668b94..00c61bfe3 100644 --- a/nova/db/sqlalchemy/api.py +++ b/nova/db/sqlalchemy/api.py @@ -1040,45 +1040,6 @@ def instance_add_security_group(context, instance_id, security_group_id): instance_ref.save(session=session) -@require_context -def instance_get_vcpu_sum_by_host_and_project(context, hostname, proj_id): - session = get_session() - result = session.query(models.Instance).\ - filter_by(host=hostname).\ - filter_by(project_id=proj_id).\ - filter_by(deleted=False).\ - value(func.sum(models.Instance.vcpus)) - if not result: - return 0 - return result - - -@require_context -def instance_get_memory_sum_by_host_and_project(context, hostname, proj_id): - session = get_session() - result = session.query(models.Instance).\ - filter_by(host=hostname).\ - filter_by(project_id=proj_id).\ - filter_by(deleted=False).\ - value(func.sum(models.Instance.memory_mb)) - if not result: - return 0 - return result - - -@require_context -def instance_get_disk_sum_by_host_and_project(context, hostname, proj_id): - session = get_session() - result = session.query(models.Instance).\ - filter_by(host=hostname).\ - filter_by(project_id=proj_id).\ - filter_by(deleted=False).\ - value(func.sum(models.Instance.local_gb)) - if not result: - return 0 - return result - - @require_context def instance_action_create(context, values): """Create an instance action from the values dictionary.""" diff --git a/nova/db/sqlalchemy/models.py b/nova/db/sqlalchemy/models.py index 55efe6886..1e057516b 100644 --- a/nova/db/sqlalchemy/models.py +++ b/nova/db/sqlalchemy/models.py @@ -125,14 +125,14 @@ class ComputeNode(BASE, NovaBase): 'ComputeNode.service_id == Service.id,' 'ComputeNode.deleted == False)') - vcpus = Column(Integer, nullable=True) - memory_mb = Column(Integer, nullable=True) - local_gb = Column(Integer, nullable=True) - vcpus_used = Column(Integer, nullable=True) - memory_mb_used = Column(Integer, nullable=True) - local_gb_used = Column(Integer, nullable=True) - hypervisor_type = Column(Text, nullable=True) - hypervisor_version = Column(Integer, nullable=True) + vcpus = Column(Integer) + memory_mb = Column(Integer) + local_gb = Column(Integer) + vcpus_used = Column(Integer) + memory_mb_used = Column(Integer) + local_gb_used = Column(Integer) + hypervisor_type = Column(Text) + hypervisor_version = Column(Integer) # Note(masumotok): Expected Strings example: # diff --git a/nova/exception.py b/nova/exception.py index 69b3e0359..80e8293b6 100644 --- a/nova/exception.py +++ b/nova/exception.py @@ -211,6 +211,11 @@ class DestinationHypervisorTooOld(Invalid): "has been provided.") +class DestinatioinDiskExists(Invalid): + message = _("The supplied disk path (%(path)s) already exists, " + "it is expected not to exist.") + + class InvalidDevicePath(Invalid): message = _("The supplied device path (%(path)s) is invalid.") diff --git a/nova/scheduler/driver.py b/nova/scheduler/driver.py index 0b257c5d8..889baa567 100644 --- a/nova/scheduler/driver.py +++ b/nova/scheduler/driver.py @@ -77,7 +77,8 @@ class Scheduler(object): """Must override at least this method for scheduler to work.""" raise NotImplementedError(_("Must implement a fallback schedule")) - def schedule_live_migration(self, context, instance_id, dest): + def schedule_live_migration(self, context, instance_id, dest, + block_migration=False): """Live migration scheduling method. :param context: @@ -88,7 +89,6 @@ class Scheduler(object): Then scheduler send request that host. """ - # Whether instance exists and is running. instance_ref = db.instance_get(context, instance_id) @@ -96,10 +96,12 @@ class Scheduler(object): self._live_migration_src_check(context, instance_ref) # Checking destination host. - self._live_migration_dest_check(context, instance_ref, dest) + self._live_migration_dest_check(context, instance_ref, + dest, block_migration) # Common checking. - self._live_migration_common_check(context, instance_ref, dest) + self._live_migration_common_check(context, instance_ref, + dest, block_migration) # Changing instance_state. db.instance_set_state(context, @@ -147,7 +149,8 @@ class Scheduler(object): if not self.service_is_up(services[0]): raise exception.ComputeServiceUnavailable(host=src) - def _live_migration_dest_check(self, context, instance_ref, dest): + def _live_migration_dest_check(self, context, instance_ref, dest, + block_migration): """Live migration check routine (for destination host). :param context: security context @@ -175,9 +178,11 @@ class Scheduler(object): # Checking dst host still has enough capacities. self.assert_compute_node_has_enough_resources(context, instance_ref, - dest) + dest, + block_migration) - def _live_migration_common_check(self, context, instance_ref, dest): + def _live_migration_common_check(self, context, instance_ref, dest, + block_migration): """Live migration common check routine. Below checkings are followed by @@ -186,11 +191,19 @@ class Scheduler(object): :param context: security context :param instance_ref: nova.db.sqlalchemy.models.Instance object :param dest: destination host + :param block_migration if True, check for block_migration. """ # Checking shared storage connectivity - self.mounted_on_same_shared_storage(context, instance_ref, dest) + # if block migration, instances_paths should not be on shared storage. + try: + self.mounted_on_same_shared_storage(context, instance_ref, dest) + if block_migration: + raise + except rpc.RemoteError: + if not block_migration: + raise # Checking dest exists. dservice_refs = db.service_get_all_compute_by_host(context, dest) @@ -229,14 +242,24 @@ class Scheduler(object): "original host %(src)s.") % locals()) raise - def assert_compute_node_has_enough_resources(self, context, - instance_ref, dest): + def assert_compute_node_has_enough_resources(self, context, instance_ref, + dest, block_migration): """Checks if destination host has enough resource for live migration. - Currently, only memory checking has been done. - If storage migration(block migration, meaning live-migration - without any shared storage) will be available, local storage - checking is also necessary. + :param context: security context + :param instance_ref: nova.db.sqlalchemy.models.Instance object + :param dest: destination host + :param block_migration: if True, disk checking has been done + + """ + self.assert_compute_node_has_enough_memory(context, instance_ref, dest) + if not block_migration: + return + self.assert_compute_node_has_enough_disk(context, instance_ref, dest) + + def assert_compute_node_has_enough_memory(self, context, + instance_ref, dest): + """Checks if destination host has enough memory for live migration. :param context: security context :param instance_ref: nova.db.sqlalchemy.models.Instance object @@ -244,22 +267,69 @@ class Scheduler(object): """ - # Getting instance information - ec2_id = instance_ref['hostname'] + # Getting total available memory and disk of host + avail = self._get_compute_info(context, dest, 'memory_mb') - # Getting host information - service_refs = db.service_get_all_compute_by_host(context, dest) - compute_node_ref = service_refs[0]['compute_node'][0] + # Getting total used memory and disk of host + # It should be sum of memories that are assigned as max value, + # because overcommiting is risky. + used = 0 + instance_refs = db.instance_get_all_by_host(context, dest) + used_list = [i['memory_mb'] for i in instance_refs] + if used_list: + used = reduce(lambda x, y: x + y, used_list) - mem_total = int(compute_node_ref['memory_mb']) - mem_used = int(compute_node_ref['memory_mb_used']) - mem_avail = mem_total - mem_used mem_inst = instance_ref['memory_mb'] - if mem_avail <= mem_inst: - reason = _("Unable to migrate %(ec2_id)s to destination: %(dest)s " - "(host:%(mem_avail)s <= instance:%(mem_inst)s)") + avail = avail - used + if avail <= mem_inst: + ec2_id = instance_ref['hostname'] + reason = _("Unable to migrate %(ec2_id)s to %(dest)s: Lack of " + "disk(host:%(avail)s <= instance:%(mem_inst)s)") raise exception.MigrationError(reason=reason % locals()) + def assert_compute_node_has_enough_disk(self, context, + instance_ref, dest): + """Checks if destination host has enough disk for block migration. + + :param context: security context + :param instance_ref: nova.db.sqlalchemy.models.Instance object + :param dest: destination host + + """ + + # Getting total available memory and disk of host + avail = self._get_compute_info(context, dest, 'local_gb') + + # Getting total used memory and disk of host + # It should be sum of disks that are assigned as max value + # because overcommiting is risky. + used = 0 + instance_refs = db.instance_get_all_by_host(context, dest) + used_list = [i['local_gb'] for i in instance_refs] + if used_list: + used = reduce(lambda x, y: x + y, used_list) + + disk_inst = instance_ref['local_gb'] + avail = avail - used + if avail <= disk_inst: + ec2_id = instance_ref['hostname'] + reason = _("Unable to migrate %(ec2_id)s to %(dest)s: Lack of " + "disk(host:%(avail)s <= instance:%(disk_inst)s)") + raise exception.MigrationError(reason=reason % locals()) + + def _get_compute_info(self, context, host, key): + """get compute node's infomation specified by key + + :param context: security context + :param host: hostname(must be compute node) + :param key: column name of compute_nodes + :return: value specified by key + + """ + compute_node_ref = db.service_get_all_compute_by_host(context, host) + compute_node_ref = compute_node_ref[0]['compute_node'][0] + return compute_node_ref[key] + def mounted_on_same_shared_storage(self, context, instance_ref, dest): """Check if the src and dest host mount same shared storage. diff --git a/nova/scheduler/manager.py b/nova/scheduler/manager.py index bd40e73c0..1f663ca4b 100644 --- a/nova/scheduler/manager.py +++ b/nova/scheduler/manager.py @@ -97,7 +97,7 @@ class SchedulerManager(manager.Manager): # NOTE (masumotok) : This method should be moved to nova.api.ec2.admin. # Based on bexar design summit discussion, # just put this here for bexar release. - def show_host_resources(self, context, host, *args): + def show_host_resources(self, context, host): """Shows the physical/usage resource given by hosts. :param context: security context @@ -105,43 +105,45 @@ class SchedulerManager(manager.Manager): :returns: example format is below. {'resource':D, 'usage':{proj_id1:D, proj_id2:D}} - D: {'vcpus':3, 'memory_mb':2048, 'local_gb':2048} + D: {'vcpus':3, 'memory_mb':2048, 'local_gb':2048 + 'vcpus_used': 12, 'memory_mb': 10240, + 'local_gb': 64} """ + # Getting compute node info and related instances info compute_ref = db.service_get_all_compute_by_host(context, host) compute_ref = compute_ref[0] - - # Getting physical resource information - compute_node_ref = compute_ref['compute_node'][0] - resource = {'vcpus': compute_node_ref['vcpus'], - 'memory_mb': compute_node_ref['memory_mb'], - 'local_gb': compute_node_ref['local_gb'], - 'vcpus_used': compute_node_ref['vcpus_used'], - 'memory_mb_used': compute_node_ref['memory_mb_used'], - 'local_gb_used': compute_node_ref['local_gb_used']} - - # Getting usage resource information - usage = {} instance_refs = db.instance_get_all_by_host(context, compute_ref['host']) + + # Getting total available/used resource + compute_ref = compute_ref['compute_node'][0] + resource = {'vcpus': compute_ref['vcpus'], + 'memory_mb': compute_ref['memory_mb'], + 'local_gb': compute_ref['local_gb'], + 'vcpus_used': compute_ref['vcpus_used'], + 'memory_mb_used': compute_ref['memory_mb_used'], + 'local_gb_used': compute_ref['local_gb_used']} + usage = dict() if not instance_refs: return {'resource': resource, 'usage': usage} + # Getting usage resource per project project_ids = [i['project_id'] for i in instance_refs] project_ids = list(set(project_ids)) for project_id in project_ids: - vcpus = db.instance_get_vcpu_sum_by_host_and_project(context, - host, - project_id) - mem = db.instance_get_memory_sum_by_host_and_project(context, - host, - project_id) - hdd = db.instance_get_disk_sum_by_host_and_project(context, - host, - project_id) - usage[project_id] = {'vcpus': int(vcpus), - 'memory_mb': int(mem), - 'local_gb': int(hdd)} + vcpus = [i['vcpus'] for i in instance_refs \ + if i['project_id'] == project_id] + + mem = [i['memory_mb'] for i in instance_refs \ + if i['project_id'] == project_id] + + disk = [i['local_gb'] for i in instance_refs \ + if i['project_id'] == project_id] + + usage[project_id] = {'vcpus': reduce(lambda x, y: x + y, vcpus), + 'memory_mb': reduce(lambda x, y: x + y, mem), + 'local_gb': reduce(lambda x, y: x + y, disk)} return {'resource': resource, 'usage': usage} diff --git a/nova/tests/scheduler/test_scheduler.py b/nova/tests/scheduler/test_scheduler.py index 50b6b52c6..384f6fb00 100644 --- a/nova/tests/scheduler/test_scheduler.py +++ b/nova/tests/scheduler/test_scheduler.py @@ -652,10 +652,13 @@ class SimpleDriverTestCase(test.TestCase): self.mox.StubOutWithMock(driver_i, '_live_migration_dest_check') self.mox.StubOutWithMock(driver_i, '_live_migration_common_check') driver_i._live_migration_src_check(nocare, nocare) - driver_i._live_migration_dest_check(nocare, nocare, i_ref['host']) - driver_i._live_migration_common_check(nocare, nocare, i_ref['host']) + driver_i._live_migration_dest_check(nocare, nocare, + i_ref['host'], False) + driver_i._live_migration_common_check(nocare, nocare, + i_ref['host'], False) self.mox.StubOutWithMock(rpc, 'cast', use_mock_anything=True) - kwargs = {'instance_id': instance_id, 'dest': i_ref['host']} + kwargs = {'instance_id': instance_id, 'dest': i_ref['host'], + 'block_migration': False} rpc.cast(self.context, db.queue_get_for(nocare, FLAGS.compute_topic, i_ref['host']), {"method": 'live_migration', "args": kwargs}) @@ -663,7 +666,8 @@ class SimpleDriverTestCase(test.TestCase): self.mox.ReplayAll() self.scheduler.live_migration(self.context, FLAGS.compute_topic, instance_id=instance_id, - dest=i_ref['host']) + dest=i_ref['host'], + block_migration=False) i_ref = db.instance_get(self.context, instance_id) self.assertTrue(i_ref['state_description'] == 'migrating') @@ -744,7 +748,7 @@ class SimpleDriverTestCase(test.TestCase): self.assertRaises(exception.ComputeServiceUnavailable, self.scheduler.driver._live_migration_dest_check, - self.context, i_ref, i_ref['host']) + self.context, i_ref, i_ref['host'], False) db.instance_destroy(self.context, instance_id) db.service_destroy(self.context, s_ref['id']) @@ -757,7 +761,7 @@ class SimpleDriverTestCase(test.TestCase): self.assertRaises(exception.UnableToMigrateToSelf, self.scheduler.driver._live_migration_dest_check, - self.context, i_ref, i_ref['host']) + self.context, i_ref, i_ref['host'], False) db.instance_destroy(self.context, instance_id) db.service_destroy(self.context, s_ref['id']) @@ -765,15 +769,33 @@ class SimpleDriverTestCase(test.TestCase): def test_live_migration_dest_check_service_lack_memory(self): """Confirms exception raises when dest doesn't have enough memory.""" instance_id = self._create_instance() + instance_id2 = self._create_instance(host='somewhere', + memory_mb=12) i_ref = db.instance_get(self.context, instance_id) - s_ref = self._create_compute_service(host='somewhere', - memory_mb_used=12) + s_ref = self._create_compute_service(host='somewhere') + + self.assertRaises(exception.MigrationError, + self.scheduler.driver._live_migration_dest_check, + self.context, i_ref, 'somewhere', False) + + db.instance_destroy(self.context, instance_id) + db.instance_destroy(self.context, instance_id2) + db.service_destroy(self.context, s_ref['id']) + + def test_block_migration_dest_check_service_lack_disk(self): + """Confirms exception raises when dest doesn't have enough disk.""" + instance_id = self._create_instance() + instance_id2 = self._create_instance(host='somewhere', + local_gb=70) + i_ref = db.instance_get(self.context, instance_id) + s_ref = self._create_compute_service(host='somewhere') self.assertRaises(exception.MigrationError, self.scheduler.driver._live_migration_dest_check, - self.context, i_ref, 'somewhere') + self.context, i_ref, 'somewhere', True) db.instance_destroy(self.context, instance_id) + db.instance_destroy(self.context, instance_id2) db.service_destroy(self.context, s_ref['id']) def test_live_migration_dest_check_service_works_correctly(self): @@ -785,7 +807,8 @@ class SimpleDriverTestCase(test.TestCase): ret = self.scheduler.driver._live_migration_dest_check(self.context, i_ref, - 'somewhere') + 'somewhere', + False) self.assertTrue(ret is None) db.instance_destroy(self.context, instance_id) db.service_destroy(self.context, s_ref['id']) @@ -820,7 +843,7 @@ class SimpleDriverTestCase(test.TestCase): self.mox.ReplayAll() self.assertRaises(exception.SourceHostUnavailable, self.scheduler.driver._live_migration_common_check, - self.context, i_ref, dest) + self.context, i_ref, dest, False) db.instance_destroy(self.context, instance_id) db.service_destroy(self.context, s_ref['id']) @@ -844,7 +867,7 @@ class SimpleDriverTestCase(test.TestCase): self.mox.ReplayAll() self.assertRaises(exception.InvalidHypervisorType, self.scheduler.driver._live_migration_common_check, - self.context, i_ref, dest) + self.context, i_ref, dest, False) db.instance_destroy(self.context, instance_id) db.service_destroy(self.context, s_ref['id']) @@ -870,7 +893,7 @@ class SimpleDriverTestCase(test.TestCase): self.mox.ReplayAll() self.assertRaises(exception.DestinationHypervisorTooOld, self.scheduler.driver._live_migration_common_check, - self.context, i_ref, dest) + self.context, i_ref, dest, False) db.instance_destroy(self.context, instance_id) db.service_destroy(self.context, s_ref['id']) @@ -902,7 +925,8 @@ class SimpleDriverTestCase(test.TestCase): try: self.scheduler.driver._live_migration_common_check(self.context, i_ref, - dest) + dest, + False) except rpc.RemoteError, e: c = (e.message.find(_("doesn't have compatibility to")) >= 0) diff --git a/nova/tests/test_compute.py b/nova/tests/test_compute.py index b4ac2dbc4..1f48a6dce 100644 --- a/nova/tests/test_compute.py +++ b/nova/tests/test_compute.py @@ -545,7 +545,8 @@ class ComputeTestCase(test.TestCase): self.mox.StubOutWithMock(self.compute.driver, 'live_migration') self.compute.driver.live_migration(c, i_ref, i_ref['host'], self.compute.post_live_migration, - self.compute.recover_live_migration) + self.compute.recover_live_migration, + False) self.compute.db = dbmock self.mox.ReplayAll() @@ -622,7 +623,8 @@ class ComputeTestCase(test.TestCase): self.mox.StubOutWithMock(self.compute.driver, 'live_migration') self.compute.driver.live_migration(c, i_ref, i_ref['host'], self.compute.post_live_migration, - self.compute.recover_live_migration) + self.compute.recover_live_migration, + False) self.compute.db = dbmock self.mox.ReplayAll() diff --git a/nova/tests/test_libvirt.py b/nova/tests/test_libvirt.py index b6b36745a..000b383f2 100644 --- a/nova/tests/test_libvirt.py +++ b/nova/tests/test_libvirt.py @@ -20,6 +20,7 @@ import os import re import shutil import sys +import tempfile from xml.etree.ElementTree import fromstring as xml_to_tree from xml.dom.minidom import parseString as xml_to_dom @@ -674,6 +675,95 @@ class LibvirtConnTestCase(test.TestCase): db.volume_destroy(self.context, volume_ref['id']) db.instance_destroy(self.context, instance_ref['id']) + def test_pre_block_migration_works_correctly(self): + """Confirms pre_block_migration works correctly.""" + + # Skip if non-libvirt environment + if not self.lazy_load_library_exists(): + return + + # Replace instances_path since this testcase creates tmpfile + tmpdir = tempfile.mkdtemp() + store = FLAGS.instances_path + FLAGS.instances_path = tmpdir + + # Test data + instance_ref = db.instance_create(self.context, self.test_instance) + dummyjson = '[{"path": "%s/disk", "local_gb": 10, "type": "raw"}]' + + # Preparing mocks + # qemu-img should be mockd since test environment might not have + # large disk space. + self.mox.StubOutWithMock(utils, "execute") + utils.execute('sudo', 'qemu-img', 'create', '-f', 'raw', + '%s/%s/disk' % (tmpdir, instance_ref.name), 10) + + self.mox.ReplayAll() + conn = connection.LibvirtConnection(False) + conn.pre_block_migration(self.context, instance_ref, + dummyjson % tmpdir) + + self.assertTrue(os.path.exists('%s/%s/libvirt.xml' % + (tmpdir, instance_ref.name))) + + shutil.rmtree(tmpdir) + db.instance_destroy(self.context, instance_ref['id']) + # Restore FLAGS.instances_path + FLAGS.instances_path = store + + def test_get_instance_disk_info_works_correctly(self): + """Confirms pre_block_migration works correctly.""" + # Skip if non-libvirt environment + if not self.lazy_load_library_exists(): + return + + # Test data + instance_ref = db.instance_create(self.context, self.test_instance) + dummyxml = ("instance-0000000a" + "" + "" + "" + "" + "" + "" + "" + "") + + ret = ("image: /test/disk\nfile format: raw\n" + "virtual size: 20G (21474836480 bytes)\ndisk size: 3.1G\n") + + # Preparing mocks + vdmock = self.mox.CreateMock(libvirt.virDomain) + self.mox.StubOutWithMock(vdmock, "XMLDesc") + vdmock.XMLDesc(0).AndReturn(dummyxml) + + def fake_lookup(instance_name): + if instance_name == instance_ref.name: + return vdmock + self.create_fake_libvirt_mock(lookupByName=fake_lookup) + + self.mox.StubOutWithMock(os.path, "getsize") + # based on above testdata, one is raw image, so getsize is mocked. + os.path.getsize("/test/disk").AndReturn(10 * 1024 * 1024 * 1024) + # another is qcow image, so qemu-img should be mocked. + self.mox.StubOutWithMock(utils, "execute") + utils.execute('sudo', 'qemu-img', 'info', '/test/disk.local').\ + AndReturn((ret, '')) + + self.mox.ReplayAll() + conn = connection.LibvirtConnection(False) + info = conn.get_instance_disk_info(self.context, instance_ref) + info = utils.loads(info) + + self.assertTrue(info[0]['type'] == 'raw' and + info[1]['type'] == 'qcow2' and + info[0]['path'] == '/test/disk' and + info[1]['path'] == '/test/disk.local' and + info[0]['local_gb'] == 10 and + info[1]['local_gb'] == 20) + + db.instance_destroy(self.context, instance_ref['id']) + def test_spawn_with_network_info(self): # Skip if non-libvirt environment if not self.lazy_load_library_exists(): diff --git a/nova/virt/libvirt/connection.py b/nova/virt/libvirt/connection.py index c491418ae..86e388ff7 100644 --- a/nova/virt/libvirt/connection.py +++ b/nova/virt/libvirt/connection.py @@ -117,6 +117,10 @@ flags.DEFINE_string('live_migration_uri', flags.DEFINE_string('live_migration_flag', "VIR_MIGRATE_UNDEFINE_SOURCE, VIR_MIGRATE_PEER2PEER", 'Define live migration behavior.') +flags.DEFINE_string('block_migration_flag', + "VIR_MIGRATE_UNDEFINE_SOURCE, VIR_MIGRATE_PEER2PEER, " + "VIR_MIGRATE_NON_SHARED_DISK", + 'Define block migration behavior.') flags.DEFINE_integer('live_migration_bandwidth', 0, 'Define live migration behavior') flags.DEFINE_string('qemu_img', 'qemu-img', @@ -727,6 +731,7 @@ class LibvirtConnection(driver.ComputeDriver): If cow is True, it will make a CoW image instead of a copy. """ + if not os.path.exists(target): base_dir = os.path.join(FLAGS.instances_path, '_base') if not os.path.exists(base_dir): @@ -1458,7 +1463,7 @@ class LibvirtConnection(driver.ComputeDriver): time.sleep(1) def live_migration(self, ctxt, instance_ref, dest, - post_method, recover_method): + post_method, recover_method, block_migration=False): """Spawning live_migration operation for distributing high-load. :params ctxt: security context @@ -1466,20 +1471,22 @@ class LibvirtConnection(driver.ComputeDriver): nova.db.sqlalchemy.models.Instance object instance object that is migrated. :params dest: destination host + :params block_migration: destination host :params post_method: post operation method. expected nova.compute.manager.post_live_migration. :params recover_method: recovery method when any exception occurs. expected nova.compute.manager.recover_live_migration. + :params block_migration: if true, do block migration. """ greenthread.spawn(self._live_migration, ctxt, instance_ref, dest, - post_method, recover_method) + post_method, recover_method, block_migration) - def _live_migration(self, ctxt, instance_ref, dest, - post_method, recover_method): + def _live_migration(self, ctxt, instance_ref, dest, post_method, + recover_method, block_migration=False): """Do live migration. :params ctxt: security context @@ -1498,24 +1505,18 @@ class LibvirtConnection(driver.ComputeDriver): # Do live migration. try: - flaglist = FLAGS.live_migration_flag.split(',') + if block_migration: + flaglist = FLAGS.block_migration_flag.split(',') + else: + flaglist = FLAGS.live_migration_flag.split(',') flagvals = [getattr(libvirt, x.strip()) for x in flaglist] logical_sum = reduce(lambda x, y: x | y, flagvals) - if self.read_only: - tmpconn = self._connect(self.libvirt_uri, False) - dom = tmpconn.lookupByName(instance_ref.name) - dom.migrateToURI(FLAGS.live_migration_uri % dest, - logical_sum, - None, - FLAGS.live_migration_bandwidth) - tmpconn.close() - else: - dom = self._conn.lookupByName(instance_ref.name) - dom.migrateToURI(FLAGS.live_migration_uri % dest, - logical_sum, - None, - FLAGS.live_migration_bandwidth) + dom = self._conn.lookupByName(instance_ref.name) + dom.migrateToURI(FLAGS.live_migration_uri % dest, + logical_sum, + None, + FLAGS.live_migration_bandwidth) except Exception: recover_method(ctxt, instance_ref, dest=dest) @@ -1530,11 +1531,92 @@ class LibvirtConnection(driver.ComputeDriver): self.get_info(instance_ref.name)['state'] except exception.NotFound: timer.stop() - post_method(ctxt, instance_ref, dest) + post_method(ctxt, instance_ref, dest, block_migration) timer.f = wait_for_live_migration timer.start(interval=0.5, now=True) + def pre_block_migration(self, ctxt, instance_ref, disk_info_json): + """Preparation block migration. + + :params ctxt: security context + :params instance_ref: + nova.db.sqlalchemy.models.Instance object + instance object that is migrated. + :params disk_info_json: + json strings specified in get_instance_disk_info + + """ + disk_info = utils.loads(disk_info_json) + + # make instance directory + instance_dir = os.path.join(FLAGS.instances_path, instance_ref['name']) + if os.path.exists(instance_dir): + raise exception.DestinatioinDiskExists(path=instance_dir) + os.mkdir(instance_dir) + + for disk in disk_info: + base = os.path.basename(disk['path']) + # Get image type and create empty disk image. + instance_disk = os.path.join(instance_dir, base) + utils.execute('sudo', 'qemu-img', 'create', '-f', disk['type'], + instance_disk, str(disk['local_gb'])+'G') + + # block migration does not migrate libvirt.xml, + # to avoid any confusion of admins, create it now. + xml = self.to_xml(instance_ref) + f = open(os.path.join(instance_dir, 'libvirt.xml'), 'w+') + f.write(xml) + f.close() + + def get_instance_disk_info(self, ctxt, instance_ref): + """Preparation block migration. + + :params ctxt: security context + :params instance_ref: + nova.db.sqlalchemy.models.Instance object + instance object that is migrated. + :return: + json strings with below format. + "[{'path':'disk', 'type':'raw', 'local_gb':10},...]" + + """ + disk_info = [] + + virt_dom = self._lookup_by_name(instance_ref.name) + xml = virt_dom.XMLDesc(0) + doc = libxml2.parseDoc(xml) + disk_nodes = doc.xpathEval('//devices/disk') + path_nodes = doc.xpathEval('//devices/disk/source') + driver_nodes = doc.xpathEval('//devices/disk/driver') + + for cnt, path_node in enumerate(path_nodes): + disk_type = disk_nodes[cnt].get_properties().getContent() + path = path_node.get_properties().getContent() + + if disk_type != 'file': + LOG.debug(_('skipping %(path)s since it looks like volume') % + locals()) + continue + + # xml is generated by kvm, so format is slightly different + # from libvirt.xml that nova generated. + #disk_type = driver_nodes[cnt].get_properties().getContent() + disk_type = \ + driver_nodes[cnt].get_properties().get_next().getContent() + if disk_type == 'raw': + size = int(os.path.getsize(path)) / 1024 / 1024 / 1024 + else: + out, err = utils.execute('sudo', 'qemu-img', 'info', path) + size = [i.split('(')[1].split()[0] for i in out.split('\n') + if i.strip().find('virtual size') >= 0] + size = int(size[0]) / 1024 / 1024 / 1024 + + disk_info.append({'type': disk_type, 'path': path, + 'local_gb': size}) + + return utils.dumps(disk_info) + def unfilter_instance(self, instance_ref): """See comments of same method in firewall_driver.""" self.firewall_driver.unfilter_instance(instance_ref) -- cgit From c16e1af623ef8baa1098510a8a04adb06f2da81b Mon Sep 17 00:00:00 2001 From: Kei Masumoto Date: Wed, 15 Jun 2011 01:35:54 +0900 Subject: added kernel/ramdisk migrate support --- nova/virt/libvirt/connection.py | 24 ++++++++++++++++++++---- 1 file changed, 20 insertions(+), 4 deletions(-) diff --git a/nova/virt/libvirt/connection.py b/nova/virt/libvirt/connection.py index 86e388ff7..73ca6e0d9 100644 --- a/nova/virt/libvirt/connection.py +++ b/nova/virt/libvirt/connection.py @@ -1555,12 +1555,28 @@ class LibvirtConnection(driver.ComputeDriver): raise exception.DestinatioinDiskExists(path=instance_dir) os.mkdir(instance_dir) - for disk in disk_info: - base = os.path.basename(disk['path']) + for info in disk_info: + base = os.path.basename(info['path']) # Get image type and create empty disk image. instance_disk = os.path.join(instance_dir, base) - utils.execute('sudo', 'qemu-img', 'create', '-f', disk['type'], - instance_disk, str(disk['local_gb'])+'G') + utils.execute('sudo', 'qemu-img', 'create', '-f', info['type'], + instance_disk, str(info['local_gb']) + 'G') + + # if image has kernel and ramdisk, just download + # following normal way. + if instance_ref['kernel_id']: + user = manager.AuthManager().get_user(instance_ref['user_id']) + project = manager.AuthManager().get_project( + instance_ref['project_id']) + self._fetch_image(os.path.join(instance_dir, 'kernel'), + instance_ref['kernel_id'], + user, + project) + if instance_ref['ramdisk_id']: + self._fetch_image(os.path.join(instance_dir, 'ramdisk'), + instance_ref['ramdisk_id'], + user, + project) # block migration does not migrate libvirt.xml, # to avoid any confusion of admins, create it now. -- cgit From b03b3145a18f8f4717fdc55ab50dc714516d2c54 Mon Sep 17 00:00:00 2001 From: Kei Masumoto Date: Mon, 20 Jun 2011 08:32:34 +0900 Subject: fix comments at nova.virt.libvirt.connection --- nova/virt/libvirt/connection.py | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/nova/virt/libvirt/connection.py b/nova/virt/libvirt/connection.py index 313b19194..8fe8148d5 100644 --- a/nova/virt/libvirt/connection.py +++ b/nova/virt/libvirt/connection.py @@ -1617,9 +1617,10 @@ class LibvirtConnection(driver.ComputeDriver): locals()) continue - # xml is generated by kvm, so format is slightly different - # from libvirt.xml that nova generated. - #disk_type = driver_nodes[cnt].get_properties().getContent() + # In case of libvirt.xml, disk type can be obtained + # by the below statement. + # -> disk_type = driver_nodes[cnt].get_properties().getContent() + # but this xml is generated by kvm, format is slightly different. disk_type = \ driver_nodes[cnt].get_properties().get_next().getContent() if disk_type == 'raw': -- cgit From 9b52343f792d83647978c7edbfe700258e3ddae2 Mon Sep 17 00:00:00 2001 From: Kei Masumoto Date: Mon, 20 Jun 2011 08:51:25 +0900 Subject: fix pep8 check --- nova/compute/manager.py | 2 +- nova/virt/libvirt/connection.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/nova/compute/manager.py b/nova/compute/manager.py index f7604a253..4410ff27e 100644 --- a/nova/compute/manager.py +++ b/nova/compute/manager.py @@ -1133,7 +1133,7 @@ class ComputeManager(manager.SchedulerDependentManager): # before migration starts, so if any failure occurs, # any empty images has to be deleted. but not sure adding below # method is appropreate here. for now, admin has to delete manually. - # rpc.call(ctxt, + # rpc.cast(ctxt, # self.db.queue_get_for(ctxt, FLAGS.compute_topic, dest), # {"method": "self.driver.destroy", # "args": {'instance':instance_ref}) diff --git a/nova/virt/libvirt/connection.py b/nova/virt/libvirt/connection.py index 8fe8148d5..d0c52c763 100644 --- a/nova/virt/libvirt/connection.py +++ b/nova/virt/libvirt/connection.py @@ -1617,7 +1617,7 @@ class LibvirtConnection(driver.ComputeDriver): locals()) continue - # In case of libvirt.xml, disk type can be obtained + # In case of libvirt.xml, disk type can be obtained # by the below statement. # -> disk_type = driver_nodes[cnt].get_properties().getContent() # but this xml is generated by kvm, format is slightly different. -- cgit From a6d527646184889863de5ab1082695a29f70988a Mon Sep 17 00:00:00 2001 From: Kei Masumoto Date: Mon, 20 Jun 2011 09:43:34 +0900 Subject: nova.virt.libvirt.connection._live_migration is changed --- nova/compute/manager.py | 20 +++++++++----------- nova/virt/libvirt/connection.py | 2 +- 2 files changed, 10 insertions(+), 12 deletions(-) diff --git a/nova/compute/manager.py b/nova/compute/manager.py index 402d5829e..f32dfe6ab 100644 --- a/nova/compute/manager.py +++ b/nova/compute/manager.py @@ -1220,8 +1220,7 @@ class ComputeManager(manager.SchedulerDependentManager): "Domain not found: no domain with matching name.\" " "This error can be safely ignored.")) - def recover_live_migration(self, ctxt, instance_ref, host=None, - dest=None, delete=True): + def recover_live_migration(self, ctxt, instance_ref, host=None, dest=None): """Recovers Instance/volume state from migrating -> running. :param ctxt: security context @@ -1231,9 +1230,6 @@ class ComputeManager(manager.SchedulerDependentManager): :param dest: This method is called from live migration src host. This param specifies destination host. - :param delete: - If true, ask destination host to remove instance dir, - since empty disk image was created for block migration """ if not host: host = instance_ref['host'] @@ -1254,12 +1250,14 @@ class ComputeManager(manager.SchedulerDependentManager): # TODO: Block migration needs empty image at destination host # before migration starts, so if any failure occurs, - # any empty images has to be deleted. but not sure adding below - # method is appropreate here. for now, admin has to delete manually. - # rpc.cast(ctxt, - # self.db.queue_get_for(ctxt, FLAGS.compute_topic, dest), - # {"method": "self.driver.destroy", - # "args": {'instance':instance_ref}) + # any empty images has to be deleted. + # In current version argument dest != None means this method is + # called for error recovering + #if dest: + # rpc.cast(ctxt, + # self.db.queue_get_for(ctxt, FLAGS.compute_topic, dest), + # {"method": "self.driver.destroy", + # "args": {'instance':instance_ref}) def periodic_tasks(self, context=None): """Tasks to be run at a periodic interval.""" diff --git a/nova/virt/libvirt/connection.py b/nova/virt/libvirt/connection.py index b0517e32a..2ddaa5971 100644 --- a/nova/virt/libvirt/connection.py +++ b/nova/virt/libvirt/connection.py @@ -1573,7 +1573,7 @@ class LibvirtConnection(driver.ComputeDriver): self.get_info(instance_ref.name)['state'] except exception.NotFound: timer.stop() - post_method(ctxt, instance_ref, dest, block_migration) + post_method(ctxt, instance_ref, dest) timer.f = wait_for_live_migration timer.start(interval=0.5, now=True) -- cgit From c184fa5d03f3f8d7faaff7b583404874de409aa6 Mon Sep 17 00:00:00 2001 From: Kei Masumoto Date: Tue, 21 Jun 2011 20:51:07 +0900 Subject: fixed reviewer's comment. 1. adding dest-instance-dir deleting operation to nova.compute.manager, 2. fix invalid raise statement --- nova/compute/manager.py | 36 +++++++++++++++++++++++------------- nova/exception.py | 4 ++++ nova/scheduler/driver.py | 3 ++- nova/tests/test_compute.py | 17 +++++++++++++---- nova/tests/test_libvirt.py | 6 +++--- nova/virt/libvirt/connection.py | 14 ++++++++++++-- 6 files changed, 57 insertions(+), 23 deletions(-) diff --git a/nova/compute/manager.py b/nova/compute/manager.py index f32dfe6ab..eac543251 100644 --- a/nova/compute/manager.py +++ b/nova/compute/manager.py @@ -1045,7 +1045,7 @@ class ComputeManager(manager.SchedulerDependentManager): return self.driver.update_available_resource(context, self.host) def pre_live_migration(self, context, instance_id, time=None, - block_migration=False, **kwargs): + block_migration=False, disk=None): """Preparations for live migration at dest host. :param context: security context @@ -1106,7 +1106,7 @@ class ComputeManager(manager.SchedulerDependentManager): if block_migration: self.driver.pre_block_migration(context, instance_ref, - kwargs.get('disk')) + disk) def live_migration(self, context, instance_id, dest, block_migration=False): @@ -1130,17 +1130,18 @@ class ComputeManager(manager.SchedulerDependentManager): {"method": "check_for_export", "args": {'instance_id': instance_id}}) - args = {} - args['instance_id'] = instance_id if block_migration: - args['block_migration'] = block_migration - args['disk'] = \ - self.driver.get_instance_disk_info(context, instance_ref) + disk = self.driver.get_instance_disk_info(context, + instance_ref) + else: + disk = None rpc.call(context, self.db.queue_get_for(context, FLAGS.compute_topic, dest), {"method": "pre_live_migration", - "args": args}) + "args": {'instance_id': instance_id, + 'block_migration': block_migration, + 'disk': disk}}) except Exception: i_name = instance_ref.name @@ -1253,11 +1254,20 @@ class ComputeManager(manager.SchedulerDependentManager): # any empty images has to be deleted. # In current version argument dest != None means this method is # called for error recovering - #if dest: - # rpc.cast(ctxt, - # self.db.queue_get_for(ctxt, FLAGS.compute_topic, dest), - # {"method": "self.driver.destroy", - # "args": {'instance':instance_ref}) + if dest: + rpc.cast(ctxt, + self.db.queue_get_for(ctxt, FLAGS.compute_topic, dest), + {"method": "cleanup", + "args": {'instance_id': instance_ref['id']}}) + + def cleanup(self, ctxt, instance_id): + """ Cleaning up image directory that is created pre_live_migration. + + :param ctxt: security context + :param instance_id: nova.db.sqlalchemy.models.Instance.Id + """ + instances_ref = self.db.instance_get(ctxt, instance_id) + self.driver.cleanup(instance_ref) def periodic_tasks(self, context=None): """Tasks to be run at a periodic interval.""" diff --git a/nova/exception.py b/nova/exception.py index 689a797fd..4ebf0e169 100644 --- a/nova/exception.py +++ b/nova/exception.py @@ -588,6 +588,10 @@ class InstanceExists(Duplicate): message = _("Instance %(name)s already exists.") +class InvalidSharedStorage(NovaException): + message = _("%(path)s is on shared storage: %(reason)s") + + class MigrationError(NovaException): message = _("Migration error") + ": %(reason)s" diff --git a/nova/scheduler/driver.py b/nova/scheduler/driver.py index 889baa567..55762cf85 100644 --- a/nova/scheduler/driver.py +++ b/nova/scheduler/driver.py @@ -200,7 +200,8 @@ class Scheduler(object): try: self.mounted_on_same_shared_storage(context, instance_ref, dest) if block_migration: - raise + reason = "Block migration can not be used with shared storage." + raise exception.InvalidSharedStorage(reason=reason, path=dest) except rpc.RemoteError: if not block_migration: raise diff --git a/nova/tests/test_compute.py b/nova/tests/test_compute.py index 840961771..e9f5eb0e8 100644 --- a/nova/tests/test_compute.py +++ b/nova/tests/test_compute.py @@ -556,7 +556,10 @@ class ComputeTestCase(test.TestCase): dbmock.queue_get_for(c, FLAGS.compute_topic, i_ref['host']).\ AndReturn(topic) rpc.call(c, topic, {"method": "pre_live_migration", - "args": {'instance_id': i_ref['id']}}) + "args": {'instance_id': i_ref['id'], + 'block_migration': False, + 'disk': None}}) + self.mox.StubOutWithMock(self.compute.driver, 'live_migration') self.compute.driver.live_migration(c, i_ref, i_ref['host'], self.compute.post_live_migration, @@ -582,7 +585,9 @@ class ComputeTestCase(test.TestCase): dbmock.queue_get_for(c, FLAGS.compute_topic, i_ref['host']).\ AndReturn(topic) rpc.call(c, topic, {"method": "pre_live_migration", - "args": {'instance_id': i_ref['id']}}).\ + "args": {'instance_id': i_ref['id'], + 'block_migration': False, + 'disk': None}}).\ AndRaise(rpc.RemoteError('', '', '')) dbmock.instance_update(c, i_ref['id'], {'state_description': 'running', 'state': power_state.RUNNING, @@ -609,7 +614,9 @@ class ComputeTestCase(test.TestCase): AndReturn(topic) self.mox.StubOutWithMock(rpc, 'call') rpc.call(c, topic, {"method": "pre_live_migration", - "args": {'instance_id': i_ref['id']}}).\ + "args": {'instance_id': i_ref['id'], + 'block_migration': False, + 'disk': None}}).\ AndRaise(rpc.RemoteError('', '', '')) dbmock.instance_update(c, i_ref['id'], {'state_description': 'running', 'state': power_state.RUNNING, @@ -634,7 +641,9 @@ class ComputeTestCase(test.TestCase): dbmock.queue_get_for(c, FLAGS.compute_topic, i_ref['host']).\ AndReturn(topic) rpc.call(c, topic, {"method": "pre_live_migration", - "args": {'instance_id': i_ref['id']}}) + "args": {'instance_id': i_ref['id'], + 'block_migration': False, + 'disk': None}}) self.mox.StubOutWithMock(self.compute.driver, 'live_migration') self.compute.driver.live_migration(c, i_ref, i_ref['host'], self.compute.post_live_migration, diff --git a/nova/tests/test_libvirt.py b/nova/tests/test_libvirt.py index 79a2b84b7..2e57dae08 100644 --- a/nova/tests/test_libvirt.py +++ b/nova/tests/test_libvirt.py @@ -734,7 +734,7 @@ class LibvirtConnTestCase(test.TestCase): # large disk space. self.mox.StubOutWithMock(utils, "execute") utils.execute('sudo', 'qemu-img', 'create', '-f', 'raw', - '%s/%s/disk' % (tmpdir, instance_ref.name), 10) + '%s/%s/disk' % (tmpdir, instance_ref.name), '10G') self.mox.ReplayAll() conn = connection.LibvirtConnection(False) @@ -759,10 +759,10 @@ class LibvirtConnTestCase(test.TestCase): instance_ref = db.instance_create(self.context, self.test_instance) dummyxml = ("instance-0000000a" "" - "" + "" "" "" - "" + "" "" "" "") diff --git a/nova/virt/libvirt/connection.py b/nova/virt/libvirt/connection.py index 2ddaa5971..8d23df45f 100644 --- a/nova/virt/libvirt/connection.py +++ b/nova/virt/libvirt/connection.py @@ -120,7 +120,7 @@ flags.DEFINE_string('live_migration_flag', 'Define live migration behavior.') flags.DEFINE_string('block_migration_flag', "VIR_MIGRATE_UNDEFINE_SOURCE, VIR_MIGRATE_PEER2PEER, " - "VIR_MIGRATE_NON_SHARED_DISK", + "VIR_MIGRATE_NON_SHARED_INC", 'Define block migration behavior.') flags.DEFINE_integer('live_migration_bandwidth', 0, 'Define live migration behavior') @@ -295,7 +295,10 @@ class LibvirtConnection(driver.ComputeDriver): # NOTE(justinsb): We remove the domain definition. We probably # would do better to keep it if cleanup=False (e.g. volumes?) # (e.g. #2 - not losing machines on failure) - virt_dom.undefine() + # NOTE(masumotok): Migrated instances does not have domain + # definitions. + if instance.name in self._conn.listDefinedDomains(): + virt_dom.undefine() except libvirt.libvirtError as e: errcode = e.get_error_code() LOG.warning(_("Error from libvirt during undefine of " @@ -335,6 +338,13 @@ class LibvirtConnection(driver.ComputeDriver): if os.path.exists(target): shutil.rmtree(target) + def cleanup(self, instance): + """ Cleaning up image directory that is created pre_live_migration. + + :param instance: nova.db.sqlalchemy.models.Instance + """ + self._cleanup(instance) + @exception.wrap_exception def attach_volume(self, instance_name, device_path, mountpoint): virt_dom = self._lookup_by_name(instance_name) -- cgit From e01848ed64c4523bb9e375da07e962b5ea1ea6ee Mon Sep 17 00:00:00 2001 From: Kei Masumoto Date: Tue, 21 Jun 2011 21:10:08 +0900 Subject: erase unnecessary TODO: statement --- nova/compute/manager.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/nova/compute/manager.py b/nova/compute/manager.py index eac543251..16dbda47d 100644 --- a/nova/compute/manager.py +++ b/nova/compute/manager.py @@ -1249,7 +1249,7 @@ class ComputeManager(manager.SchedulerDependentManager): if dest: volume_api.remove_from_compute(ctxt, volume_id, dest) - # TODO: Block migration needs empty image at destination host + # Block migration needs empty image at destination host # before migration starts, so if any failure occurs, # any empty images has to be deleted. # In current version argument dest != None means this method is -- cgit From 4001ee488420589e345dc42001e6cab9c68a5e12 Mon Sep 17 00:00:00 2001 From: Kei Masumoto Date: Mon, 11 Jul 2011 16:31:31 +0900 Subject: fix comments --- nova/compute/manager.py | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/nova/compute/manager.py b/nova/compute/manager.py index 7d89dde80..1fbbe0065 100644 --- a/nova/compute/manager.py +++ b/nova/compute/manager.py @@ -1341,10 +1341,9 @@ class ComputeManager(manager.SchedulerDependentManager): instance_id, block_migration=False): """Post operations for live migration . - :param ctxt: security context + :param context: security context :param instance_id: nova.db.sqlalchemy.models.Instance.Id :param block_migration: block_migration - :param xml: libvirt.xml """ instance_ref = self.db.instance_get(context, instance_id) -- cgit From 08d60702f9995f9e756a16c733f6a26b9d0f5019 Mon Sep 17 00:00:00 2001 From: Dan Wendlandt Date: Fri, 22 Jul 2011 16:56:00 -0700 Subject: merge --- nova/network/manager.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/nova/network/manager.py b/nova/network/manager.py index 005fa73e7..4706da6ea 100644 --- a/nova/network/manager.py +++ b/nova/network/manager.py @@ -879,7 +879,6 @@ class VlanManager(RPCAllocateFixedIP, FloatingIP, NetworkManager): def _setup_network(self, context, network_ref): """Sets up network on this host.""" - network_ref['dhcp_server'] = self._get_dhcp_ip(context, network_ref) if not network_ref['vpn_public_address']: net = {} address = FLAGS.vpn_ip @@ -887,6 +886,7 @@ class VlanManager(RPCAllocateFixedIP, FloatingIP, NetworkManager): network_ref = db.network_update(context, network_ref['id'], net) else: address = network_ref['vpn_public_address'] + network_ref['dhcp_server'] = self._get_dhcp_ip(context, network_ref) self.driver.ensure_vlan_bridge(network_ref['vlan'], network_ref['bridge'], network_ref['bridge_interface'], -- cgit From 79283cbb13d91e3c25e42af765f9da627813a6d8 Mon Sep 17 00:00:00 2001 From: Kei masumoto Date: Fri, 29 Jul 2011 20:03:23 +0900 Subject: merged trunk and fixed post_live_migratioin_at_destination to get nw_info --- nova/compute/manager.py | 4 +++- nova/virt/libvirt/connection.py | 7 +++++-- 2 files changed, 8 insertions(+), 3 deletions(-) diff --git a/nova/compute/manager.py b/nova/compute/manager.py index e80394fbd..8aee456fc 100644 --- a/nova/compute/manager.py +++ b/nova/compute/manager.py @@ -1389,7 +1389,7 @@ class ComputeManager(manager.SchedulerDependentManager): # No instance booting at source host, but instance dir # must be deleted for preparing next block migration if block_migration: - self.driver.destroy(instance_ref) + self.driver.destroy(instance_ref, network_info) LOG.info(_('Migrating %(i_name)s to %(dest)s finished successfully.') % locals()) @@ -1409,8 +1409,10 @@ class ComputeManager(manager.SchedulerDependentManager): instance_ref = self.db.instance_get(context, instance_id) LOG.info(_('Post operation of migraton started for %s .') % instance_ref.name) + network_info = self._get_instance_nw_info(context, instance_ref) self.driver.post_live_migration_at_destination(context, instance_ref, + network_info, block_migration) def rollback_live_migration(self, context, instance_ref, diff --git a/nova/virt/libvirt/connection.py b/nova/virt/libvirt/connection.py index 0eab0b109..31f7acb4d 100644 --- a/nova/virt/libvirt/connection.py +++ b/nova/virt/libvirt/connection.py @@ -1620,7 +1620,9 @@ class LibvirtConnection(driver.ComputeDriver): user, project) - def post_live_migration_at_destination(self, ctxt, instance_ref, + def post_live_migration_at_destination(self, ctxt, + instance_ref, + network_info, block_migration): """Post operation of live migration at destination host. @@ -1628,6 +1630,7 @@ class LibvirtConnection(driver.ComputeDriver): :params instance_ref: nova.db.sqlalchemy.models.Instance object instance object that is migrated. + :params network_info: instance network infomation :params : block_migration: if true, post operation of block_migraiton. """ # Define migrated instance, otherwise, suspend/destroy does not work. @@ -1639,7 +1642,7 @@ class LibvirtConnection(driver.ComputeDriver): # In case of block migration, destination does not have # libvirt.xml if not os.path.isfile(xml_path): - xml = self.to_xml(instance_ref) + xml = self.to_xml(instance_ref, network_info=network_info) f = open(os.path.join(instance_dir, 'libvirt.xml'), 'w+') f.write(xml) f.close() -- cgit From 873aad92944f8840e772d65eda4b3320d65a9ce7 Mon Sep 17 00:00:00 2001 From: Dan Wendlandt Date: Mon, 1 Aug 2011 18:11:15 -0700 Subject: initial commit of vif-plugging for network-service interfaces --- bin/nova-dhcpbridge | 24 +++--- nova/network/linux_net.py | 205 ++++++++++++++++++++++++++++++++-------------- nova/network/manager.py | 28 ++----- nova/utils.py | 9 ++ nova/virt/libvirt/vif.py | 2 +- 5 files changed, 174 insertions(+), 94 deletions(-) diff --git a/bin/nova-dhcpbridge b/bin/nova-dhcpbridge index 325642d52..1727ebf9b 100755 --- a/bin/nova-dhcpbridge +++ b/bin/nova-dhcpbridge @@ -48,12 +48,11 @@ flags.DECLARE('auth_driver', 'nova.auth.manager') flags.DECLARE('network_size', 'nova.network.manager') flags.DECLARE('num_networks', 'nova.network.manager') flags.DECLARE('update_dhcp_on_disassociate', 'nova.network.manager') -flags.DEFINE_string('dnsmasq_interface', 'br0', 'Default Dnsmasq interface') LOG = logging.getLogger('nova.dhcpbridge') -def add_lease(mac, ip_address, _hostname, _interface): +def add_lease(mac, ip_address, _hostname): """Set the IP that was assigned by the DHCP server.""" if FLAGS.fake_rabbit: LOG.debug(_("leasing ip")) @@ -67,13 +66,13 @@ def add_lease(mac, ip_address, _hostname, _interface): "args": {"address": ip_address}}) -def old_lease(mac, ip_address, hostname, interface): +def old_lease(mac, ip_address, hostname): """Update just as add lease.""" LOG.debug(_("Adopted old lease or got a change of mac/hostname")) - add_lease(mac, ip_address, hostname, interface) + add_lease(mac, ip_address, hostname) -def del_lease(mac, ip_address, _hostname, _interface): +def del_lease(mac, ip_address, _hostname): """Called when a lease expires.""" if FLAGS.fake_rabbit: LOG.debug(_("releasing ip")) @@ -87,10 +86,10 @@ def del_lease(mac, ip_address, _hostname, _interface): "args": {"address": ip_address}}) -def init_leases(interface): - """Get the list of hosts for an interface.""" +def init_leases(network_id): + """Get the list of hosts for a network.""" ctxt = context.get_admin_context() - network_ref = db.network_get_by_bridge(ctxt, interface) + network_ref = db.network_get(ctxt, network_id) return linux_net.get_dhcp_leases(ctxt, network_ref) @@ -101,7 +100,8 @@ def main(): argv = FLAGS(sys.argv) logging.setup() # check ENV first so we don't break any older deploys - interface = os.environ.get('DNSMASQ_INTERFACE', FLAGS.dnsmasq_interface) + network_id = int(os.environ.get('NETWORK_ID')) + if int(os.environ.get('TESTING', '0')): from nova.tests import fake_flags @@ -117,11 +117,11 @@ def main(): ip = argv[3] hostname = argv[4] msg = _("Called %(action)s for mac %(mac)s with ip %(ip)s and" - " hostname %(hostname)s on interface %(interface)s") % locals() + " hostname %(hostname)s on network %(network_id)s") % locals() LOG.debug(msg) - globals()[action + '_lease'](mac, ip, hostname, interface) + globals()[action + '_lease'](mac, ip, hostname) else: - print init_leases(interface) + print init_leases(network_id) if __name__ == "__main__": main() diff --git a/nova/network/linux_net.py b/nova/network/linux_net.py index 8ace07884..ee0ef0b85 100644 --- a/nova/network/linux_net.py +++ b/nova/network/linux_net.py @@ -63,6 +63,11 @@ flags.DEFINE_string('dmz_cidr', '10.128.0.0/24', 'dmz range that should be accepted') flags.DEFINE_string('dnsmasq_config_file', "", 'Override the default dnsmasq settings with this file') +flags.DEFINE_string('linuxnet_interface_driver', + 'nova.network.linux_net.LinuxBridgeInterfaceDriver', + 'Driver used to create ethernet devices.') +flags.DEFINE_string('linuxnet_ovs_integration_bridge', + 'br-int', 'Name of Open vSwitch bridge used with linuxnet') binary_name = os.path.basename(inspect.stack()[-1][1]) @@ -413,7 +418,7 @@ def ensure_metadata_ip(): 'scope', 'link', 'dev', 'lo', check_exit_code=False) -def ensure_vlan_forward(public_ip, port, private_ip): +def ensure_vpn_forward(public_ip, port, private_ip): """Sets up forwarding rules for vlan.""" iptables_manager.ipv4['filter'].add_rule('FORWARD', '-d %s -p udp ' @@ -491,32 +496,11 @@ def ensure_bridge(bridge, interface, net_attrs=None): _execute('sudo', 'brctl', 'setfd', bridge, 0) # _execute('sudo brctl setageing %s 10' % bridge) _execute('sudo', 'brctl', 'stp', bridge, 'off') - _execute('sudo', 'ip', 'link', 'set', bridge, 'up') - if net_attrs: - # NOTE(vish): The ip for dnsmasq has to be the first address on the - # bridge for it to respond to reqests properly - suffix = net_attrs['cidr'].rpartition('/')[2] - out, err = _execute('sudo', 'ip', 'addr', 'add', - '%s/%s' % - (net_attrs['dhcp_server'], suffix), - 'brd', - net_attrs['broadcast'], - 'dev', - bridge, - check_exit_code=False) - if err and err != 'RTNETLINK answers: File exists\n': - raise exception.Error('Failed to add ip: %s' % err) - if(FLAGS.use_ipv6): - _execute('sudo', 'ip', '-f', 'inet6', 'addr', - 'change', net_attrs['cidr_v6'], - 'dev', bridge) - # NOTE(vish): If the public interface is the same as the - # bridge, then the bridge has to be in promiscuous - # to forward packets properly. - if(FLAGS.public_interface == bridge): - _execute('sudo', 'ip', 'link', 'set', - 'dev', bridge, 'promisc', 'on') + if interface: + out, err = _execute('sudo', 'brctl', 'addif', bridge, interface, + check_exit_code=False) + # NOTE(vish): This will break if there is already an ip on the # interface, so we move any ips to the bridge gateway = None @@ -526,9 +510,9 @@ def ensure_bridge(bridge, interface, net_attrs=None): if fields and fields[0] == '0.0.0.0' and fields[-1] == interface: gateway = fields[1] _execute('sudo', 'route', 'del', 'default', 'gw', gateway, - 'dev', interface, check_exit_code=False) + 'dev', interface, check_exit_code=False) out, err = _execute('sudo', 'ip', 'addr', 'show', 'dev', interface, - 'scope', 'global') + 'scope', 'global') for line in out.split('\n'): fields = line.split() if fields and fields[0] == 'inet': @@ -537,8 +521,6 @@ def ensure_bridge(bridge, interface, net_attrs=None): _execute(*_ip_bridge_cmd('add', params, bridge)) if gateway: _execute('sudo', 'route', 'add', 'default', 'gw', gateway) - out, err = _execute('sudo', 'brctl', 'addif', bridge, interface, - check_exit_code=False) if (err and err != "device %s is already a member of a bridge; can't " "enslave it to bridge %s.\n" % (interface, bridge)): @@ -552,6 +534,36 @@ def ensure_bridge(bridge, interface, net_attrs=None): bridge) +def initialize_gateway_device(dev, network_ref): + if not network_ref: + return + + # NOTE(vish): The ip for dnsmasq has to be the first address on the + # bridge for it to respond to reqests properly + suffix = network_ref['cidr'].rpartition('/')[2] + out, err = _execute('sudo', 'ip', 'addr', 'add', + '%s/%s' % + (network_ref['dhcp_server'], suffix), + 'brd', + network_ref['broadcast'], + 'dev', + dev, + check_exit_code=False) + if err and err != 'RTNETLINK answers: File exists\n': + raise exception.Error('Failed to add ip: %s' % err) + if(FLAGS.use_ipv6): + _execute('sudo', 'ip', '-f', 'inet6', 'addr', + 'change', network_ref['cidr_v6'], + 'dev', dev) + # NOTE(vish): If the public interface is the same as the + # bridge, then the bridge has to be in promiscuous + # to forward packets properly. + if(FLAGS.public_interface == dev): + _execute('sudo', 'ip', 'link', 'set', + 'dev', dev, 'promisc', 'on') + _execute('sudo', 'ip', 'link', 'set', dev, 'up') + + def get_dhcp_leases(context, network_ref): """Return a network's hosts config in dnsmasq leasefile format.""" hosts = [] @@ -580,21 +592,21 @@ def get_dhcp_hosts(context, network_ref): # configuration options (like dchp-range, vlan, ...) # aren't reloaded. @utils.synchronized('dnsmasq_start') -def update_dhcp(context, network_ref): +def update_dhcp(context, dev, network_ref): """(Re)starts a dnsmasq server for a given network. If a dnsmasq instance is already running then send a HUP signal causing it to reload, otherwise spawn a new instance. """ - conffile = _dhcp_file(network_ref['bridge'], 'conf') + conffile = _dhcp_file(dev, 'conf') with open(conffile, 'w') as f: f.write(get_dhcp_hosts(context, network_ref)) # Make sure dnsmasq can actually read it (it setuid()s to "nobody") os.chmod(conffile, 0644) - pid = _dnsmasq_pid_for(network_ref['bridge']) + pid = _dnsmasq_pid_for(dev) # if dnsmasq is already running, then tell it to reload if pid: @@ -609,16 +621,16 @@ def update_dhcp(context, network_ref): else: LOG.debug(_('Pid %d is stale, relaunching dnsmasq'), pid) - # FLAGFILE and DNSMASQ_INTERFACE in env + # FLAGFILE and NETWORK_ID in env env = {'FLAGFILE': FLAGS.dhcpbridge_flagfile, - 'DNSMASQ_INTERFACE': network_ref['bridge']} - command = _dnsmasq_cmd(network_ref) + 'NETWORK_ID': str(network_ref['id'])} + command = _dnsmasq_cmd(dev, network_ref) _execute(*command, addl_env=env) @utils.synchronized('radvd_start') -def update_ra(context, network_ref): - conffile = _ra_file(network_ref['bridge'], 'conf') +def update_ra(context, dev, network_ref): + conffile = _ra_file(dev, 'conf') with open(conffile, 'w') as f: conf_str = """ interface %s @@ -632,13 +644,13 @@ interface %s AdvAutonomous on; }; }; -""" % (network_ref['bridge'], network_ref['cidr_v6']) +""" % (dev, network_ref['cidr_v6']) f.write(conf_str) # Make sure radvd can actually read it (it setuid()s to "nobody") os.chmod(conffile, 0644) - pid = _ra_pid_for(network_ref['bridge']) + pid = _ra_pid_for(dev) # if radvd is already running, then tell it to reload if pid: @@ -651,7 +663,7 @@ interface %s LOG.debug(_('killing radvd threw %s'), exc) else: LOG.debug(_('Pid %d is stale, relaunching radvd'), pid) - command = _ra_cmd(network_ref) + command = _ra_cmd(dev) _execute(*command) @@ -696,20 +708,20 @@ def _device_exists(device): return not err -def _dnsmasq_cmd(net): +def _dnsmasq_cmd(dev, net): """Builds dnsmasq command.""" cmd = ['sudo', '-E', 'dnsmasq', '--strict-order', '--bind-interfaces', - '--interface=%s' % net['bridge'], + '--interface=%s' % dev, '--conf-file=%s' % FLAGS.dnsmasq_config_file, '--domain=%s' % FLAGS.dhcp_domain, - '--pid-file=%s' % _dhcp_file(net['bridge'], 'pid'), + '--pid-file=%s' % _dhcp_file(dev, 'pid'), '--listen-address=%s' % net['dhcp_server'], '--except-interface=lo', '--dhcp-range=%s,static,120s' % net['dhcp_start'], '--dhcp-lease-max=%s' % len(netaddr.IPNetwork(net['cidr'])), - '--dhcp-hostsfile=%s' % _dhcp_file(net['bridge'], 'conf'), + '--dhcp-hostsfile=%s' % _dhcp_file(dev, 'conf'), '--dhcp-script=%s' % FLAGS.dhcpbridge, '--leasefile-ro'] if FLAGS.dns_server: @@ -717,18 +729,18 @@ def _dnsmasq_cmd(net): return cmd -def _ra_cmd(net): +def _ra_cmd(dev): """Builds radvd command.""" cmd = ['sudo', '-E', 'radvd', # '-u', 'nobody', - '-C', '%s' % _ra_file(net['bridge'], 'conf'), - '-p', '%s' % _ra_file(net['bridge'], 'pid')] + '-C', '%s' % _ra_file(dev, 'conf'), + '-p', '%s' % _ra_file(dev, 'pid')] return cmd -def _stop_dnsmasq(network): +def _stop_dnsmasq(dev): """Stops the dnsmasq instance for a given network.""" - pid = _dnsmasq_pid_for(network) + pid = _dnsmasq_pid_for(dev) if pid: try: @@ -737,49 +749,49 @@ def _stop_dnsmasq(network): LOG.debug(_('Killing dnsmasq threw %s'), exc) -def _dhcp_file(bridge, kind): - """Return path to a pid, leases or conf file for a bridge.""" +def _dhcp_file(dev, kind): + """Return path to a pid, leases or conf file for a bridge/device.""" if not os.path.exists(FLAGS.networks_path): os.makedirs(FLAGS.networks_path) return os.path.abspath('%s/nova-%s.%s' % (FLAGS.networks_path, - bridge, + dev, kind)) -def _ra_file(bridge, kind): - """Return path to a pid or conf file for a bridge.""" +def _ra_file(dev, kind): + """Return path to a pid or conf file for a bridge/device.""" if not os.path.exists(FLAGS.networks_path): os.makedirs(FLAGS.networks_path) return os.path.abspath('%s/nova-ra-%s.%s' % (FLAGS.networks_path, - bridge, + dev, kind)) -def _dnsmasq_pid_for(bridge): - """Returns the pid for prior dnsmasq instance for a bridge. +def _dnsmasq_pid_for(dev): + """Returns the pid for prior dnsmasq instance for a bridge/device. Returns None if no pid file exists. If machine has rebooted pid might be incorrect (caller should check). """ - pid_file = _dhcp_file(bridge, 'pid') + pid_file = _dhcp_file(dev, 'pid') if os.path.exists(pid_file): with open(pid_file, 'r') as f: return int(f.read()) -def _ra_pid_for(bridge): - """Returns the pid for prior radvd instance for a bridge. +def _ra_pid_for(dev): + """Returns the pid for prior radvd instance for a bridge/device. Returns None if no pid file exists. If machine has rebooted pid might be incorrect (caller should check). """ - pid_file = _ra_file(bridge, 'pid') + pid_file = _ra_file(dev, 'pid') if os.path.exists(pid_file): with open(pid_file, 'r') as f: @@ -795,3 +807,72 @@ def _ip_bridge_cmd(action, params, device): iptables_manager = IptablesManager() + +# Similar to compute virt layers, the Linux network node +# code uses a flexible driver model to support different ways +# of creating ethernet interfaces and attaching them to the network. +# In the case of a network host, these interfaces +# act as gateway/dhcp/vpn/etc. endpoints not VM interfaces. + + +def plug(network): + return interface_driver.plug(network) + + +def unplug(network): + return interface_driver.unplug(network) + + +class LinuxNetInterfaceDriver(object): + """Abstract class that defines generic network host API""" + """ for for all Linux interface drivers.""" + + def plug(self, network): + """Create Linux device, return device name""" + raise NotImplementedError() + + def unplug(self, network): + """Destory Linux device, return device name""" + raise NotImplementedError() + + +# plugs interfaces using Linux Bridge +class LinuxBridgeInterfaceDriver(LinuxNetInterfaceDriver): + + def plug(self, network): + self.driver.ensure_vlan_bridge(network['vlan'], + network['bridge'], + network['bridge_interface'], + network) + return network['bridge'] + + def unplug(self, network): + return network['bridge'] + + +# plugs interfaces using Open vSwitch +class LinuxOVSInterfaceDriver(LinuxNetInterfaceDriver): + + def plug(self, network): + dev = "gw-" + str(network['id']) + if not _device_exists(dev): + bridge = FLAGS.linuxnet_ovs_integration_bridge + mac_addr = utils.generate_mac_address() + _execute('sudo', 'ovs-vsctl', + '--', '--may-exist', 'add-port', bridge, dev, + '--', 'set', 'Interface', dev, "type=internal", + '--', 'set', 'Interface', dev, + "external-ids:iface-id=nova-%s" % dev, + '--', 'set', 'Interface', dev, + "external-ids:iface-status=active", + '--', 'set', 'Interface', dev, + "external-ids:attached-mac=%s" % mac_addr) + _execute('sudo', 'ip', 'link', 'set', + dev, "address", mac_addr) + return dev + + def unplug(self, network): + dev = "gw-" + str(network['id']) + return dev + +interface_driver = utils.import_object(FLAGS.linuxnet_interface_driver) diff --git a/nova/network/manager.py b/nova/network/manager.py index 4706da6ea..0fc55f441 100644 --- a/nova/network/manager.py +++ b/nova/network/manager.py @@ -504,7 +504,7 @@ class NetworkManager(manager.SchedulerDependentManager): def _allocate_mac_addresses(self, context, instance_id, networks): """Generates mac addresses and creates vif rows in db for them.""" for network in networks: - vif = {'address': self.generate_mac_address(), + vif = {'address': utils.generate_mac_address(), 'instance_id': instance_id, 'network_id': network['id']} # try FLAG times to create a vif record with a unique mac_address @@ -513,20 +513,12 @@ class NetworkManager(manager.SchedulerDependentManager): self.db.virtual_interface_create(context, vif) break except exception.VirtualInterfaceCreateException: - vif['address'] = self.generate_mac_address() + vif['address'] = utils.generate_mac_address() else: self.db.virtual_interface_delete_by_instance(context, instance_id) raise exception.VirtualInterfaceMacAddressException() - def generate_mac_address(self): - """Generate a mac address for a vif on an instance.""" - mac = [0x02, 0x16, 0x3e, - random.randint(0x00, 0x7f), - random.randint(0x00, 0xff), - random.randint(0x00, 0xff)] - return ':'.join(map(lambda x: "%02x" % x, mac)) - def add_fixed_ip_to_instance(self, context, instance_id, host, network_id): """Adds a fixed ip to an instance from specified network.""" networks = [self.db.network_get(context, network_id)] @@ -887,23 +879,21 @@ class VlanManager(RPCAllocateFixedIP, FloatingIP, NetworkManager): else: address = network_ref['vpn_public_address'] network_ref['dhcp_server'] = self._get_dhcp_ip(context, network_ref) - self.driver.ensure_vlan_bridge(network_ref['vlan'], - network_ref['bridge'], - network_ref['bridge_interface'], - network_ref) + dev = self.driver.plug(network_ref) + self.driver.initialize_gateway_device(dev, network_ref) # NOTE(vish): only ensure this forward if the address hasn't been set # manually. if address == FLAGS.vpn_ip and hasattr(self.driver, - "ensure_vlan_forward"): - self.driver.ensure_vlan_forward(FLAGS.vpn_ip, + "ensure_vpn_forward"): + self.driver.ensure_vpn_forward(FLAGS.vpn_ip, network_ref['vpn_public_port'], network_ref['vpn_private_address']) if not FLAGS.fake_network: - self.driver.update_dhcp(context, network_ref) + self.driver.update_dhcp(context, dev, network_ref) if(FLAGS.use_ipv6): - self.driver.update_ra(context, network_ref) - gateway = utils.get_my_linklocal(network_ref['bridge']) + self.driver.update_ra(context, dev, network_ref) + gateway = utils.get_my_linklocal(dev) self.db.network_update(context, network_ref['id'], {'gateway_v6': gateway}) diff --git a/nova/utils.py b/nova/utils.py index 8784a227d..d56fa614c 100644 --- a/nova/utils.py +++ b/nova/utils.py @@ -320,6 +320,15 @@ def get_my_linklocal(interface): " :%(ex)s") % locals()) +def generate_mac_address(): + """Generate an Ethernet MAC address.""" + mac = [0x02, 0x16, 0x3e, + random.randint(0x00, 0x7f), + random.randint(0x00, 0xff), + random.randint(0x00, 0xff)] + return ':'.join(map(lambda x: "%02x" % x, mac)) + + def utcnow(): """Overridable version of utils.utcnow.""" if utcnow.override_time: diff --git a/nova/virt/libvirt/vif.py b/nova/virt/libvirt/vif.py index aa566cc72..432a585ad 100644 --- a/nova/virt/libvirt/vif.py +++ b/nova/virt/libvirt/vif.py @@ -99,8 +99,8 @@ class LibvirtOpenVswitchDriver(VIFDriver): def plug(self, instance, network, mapping): vif_id = str(instance['id']) + "-" + str(network['id']) dev = "tap-%s" % vif_id + iface_id = "nova-" + vif_id if not linux_net._device_exists(dev): - iface_id = "nova-" + vif_id utils.execute('sudo', 'ip', 'tuntap', 'add', dev, 'mode', 'tap') utils.execute('sudo', 'ip', 'link', 'set', dev, 'up') utils.execute('sudo', 'ovs-vsctl', '--', '--may-exist', 'add-port', -- cgit From 857f4453efaca98ced3e07d55ee6f0188713e60e Mon Sep 17 00:00:00 2001 From: Dan Wendlandt Date: Mon, 1 Aug 2011 18:30:59 -0700 Subject: fix LinuxBridgeInterfaceDriver --- nova/network/linux_net.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/nova/network/linux_net.py b/nova/network/linux_net.py index ee0ef0b85..ce1f53ab9 100644 --- a/nova/network/linux_net.py +++ b/nova/network/linux_net.py @@ -840,10 +840,10 @@ class LinuxNetInterfaceDriver(object): class LinuxBridgeInterfaceDriver(LinuxNetInterfaceDriver): def plug(self, network): - self.driver.ensure_vlan_bridge(network['vlan'], - network['bridge'], - network['bridge_interface'], - network) + ensure_vlan_bridge(network['vlan'], + network['bridge'], + network['bridge_interface'], + network) return network['bridge'] def unplug(self, network): -- cgit From dfc9c9c2b5e92e599bdeae4c03d3761215a0deca Mon Sep 17 00:00:00 2001 From: Dan Wendlandt Date: Thu, 4 Aug 2011 12:36:11 -0700 Subject: modify _setup_network for flatDHCP as well --- nova/network/linux_net.py | 12 ++++++++---- nova/network/manager.py | 13 +++++++------ 2 files changed, 15 insertions(+), 10 deletions(-) diff --git a/nova/network/linux_net.py b/nova/network/linux_net.py index ce1f53ab9..7012cceeb 100644 --- a/nova/network/linux_net.py +++ b/nova/network/linux_net.py @@ -805,9 +805,6 @@ def _ip_bridge_cmd(action, params, device): cmd.extend(['dev', device]) return cmd - -iptables_manager = IptablesManager() - # Similar to compute virt layers, the Linux network node # code uses a flexible driver model to support different ways # of creating ethernet interfaces and attaching them to the network. @@ -840,10 +837,16 @@ class LinuxNetInterfaceDriver(object): class LinuxBridgeInterfaceDriver(LinuxNetInterfaceDriver): def plug(self, network): - ensure_vlan_bridge(network['vlan'], + if network.get('vlan', None) is not None: + ensure_vlan_bridge(network['vlan'], network['bridge'], network['bridge_interface'], network) + else: + ensure_bridge(network['bridge'], + network['bridge_interface'], + network) + return network['bridge'] def unplug(self, network): @@ -875,4 +878,5 @@ class LinuxOVSInterfaceDriver(LinuxNetInterfaceDriver): dev = "gw-" + str(network['id']) return dev +iptables_manager = IptablesManager() interface_driver = utils.import_object(FLAGS.linuxnet_interface_driver) diff --git a/nova/network/manager.py b/nova/network/manager.py index bedbe2c21..e7c84d478 100644 --- a/nova/network/manager.py +++ b/nova/network/manager.py @@ -785,14 +785,15 @@ class FlatDHCPManager(FloatingIP, RPCAllocateFixedIP, NetworkManager): def _setup_network(self, context, network_ref): """Sets up network on this host.""" network_ref['dhcp_server'] = self._get_dhcp_ip(context, network_ref) - self.driver.ensure_bridge(network_ref['bridge'], - network_ref['bridge_interface'], - network_ref) + + dev = self.driver.plug(network_ref) + self.driver.initialize_gateway_device(dev, network_ref) + if not FLAGS.fake_network: - self.driver.update_dhcp(context, network_ref) + self.driver.update_dhcp(context, dev, network_ref) if(FLAGS.use_ipv6): - self.driver.update_ra(context, network_ref) - gateway = utils.get_my_linklocal(network_ref['bridge']) + self.driver.update_ra(context, dev, network_ref) + gateway = utils.get_my_linklocal(dev) self.db.network_update(context, network_ref['id'], {'gateway_v6': gateway}) -- cgit From 18f09f165b5dca5f11253b143045b2ff7327532d Mon Sep 17 00:00:00 2001 From: Dan Wendlandt Date: Thu, 4 Aug 2011 16:20:38 -0700 Subject: move ensure_vlan_bridge,ensure_bridge,ensure_vlan to the bridge/vlan specific vif-plugging driver --- nova/network/linux_net.py | 166 ++++++++++++++++++++++++---------------------- nova/virt/libvirt/vif.py | 6 +- 2 files changed, 90 insertions(+), 82 deletions(-) diff --git a/nova/network/linux_net.py b/nova/network/linux_net.py index 7012cceeb..17b63a849 100644 --- a/nova/network/linux_net.py +++ b/nova/network/linux_net.py @@ -456,84 +456,6 @@ def floating_forward_rules(floating_ip, fixed_ip): '-s %s -j SNAT --to %s' % (fixed_ip, floating_ip))] -def ensure_vlan_bridge(vlan_num, bridge, bridge_interface, net_attrs=None): - """Create a vlan and bridge unless they already exist.""" - interface = ensure_vlan(vlan_num, bridge_interface) - ensure_bridge(bridge, interface, net_attrs) - return interface - - -@utils.synchronized('ensure_vlan', external=True) -def ensure_vlan(vlan_num, bridge_interface): - """Create a vlan unless it already exists.""" - interface = 'vlan%s' % vlan_num - if not _device_exists(interface): - LOG.debug(_('Starting VLAN inteface %s'), interface) - _execute('sudo', 'vconfig', 'set_name_type', 'VLAN_PLUS_VID_NO_PAD') - _execute('sudo', 'vconfig', 'add', bridge_interface, vlan_num) - _execute('sudo', 'ip', 'link', 'set', interface, 'up') - return interface - - -@utils.synchronized('ensure_bridge', external=True) -def ensure_bridge(bridge, interface, net_attrs=None): - """Create a bridge unless it already exists. - - :param interface: the interface to create the bridge on. - :param net_attrs: dictionary with attributes used to create the bridge. - - If net_attrs is set, it will add the net_attrs['gateway'] to the bridge - using net_attrs['broadcast'] and net_attrs['cidr']. It will also add - the ip_v6 address specified in net_attrs['cidr_v6'] if use_ipv6 is set. - - The code will attempt to move any ips that already exist on the interface - onto the bridge and reset the default gateway if necessary. - - """ - if not _device_exists(bridge): - LOG.debug(_('Starting Bridge interface for %s'), interface) - _execute('sudo', 'brctl', 'addbr', bridge) - _execute('sudo', 'brctl', 'setfd', bridge, 0) - # _execute('sudo brctl setageing %s 10' % bridge) - _execute('sudo', 'brctl', 'stp', bridge, 'off') - - if interface: - out, err = _execute('sudo', 'brctl', 'addif', bridge, interface, - check_exit_code=False) - - # NOTE(vish): This will break if there is already an ip on the - # interface, so we move any ips to the bridge - gateway = None - out, err = _execute('sudo', 'route', '-n') - for line in out.split('\n'): - fields = line.split() - if fields and fields[0] == '0.0.0.0' and fields[-1] == interface: - gateway = fields[1] - _execute('sudo', 'route', 'del', 'default', 'gw', gateway, - 'dev', interface, check_exit_code=False) - out, err = _execute('sudo', 'ip', 'addr', 'show', 'dev', interface, - 'scope', 'global') - for line in out.split('\n'): - fields = line.split() - if fields and fields[0] == 'inet': - params = fields[1:-1] - _execute(*_ip_bridge_cmd('del', params, fields[-1])) - _execute(*_ip_bridge_cmd('add', params, bridge)) - if gateway: - _execute('sudo', 'route', 'add', 'default', 'gw', gateway) - - if (err and err != "device %s is already a member of a bridge; can't " - "enslave it to bridge %s.\n" % (interface, bridge)): - raise exception.Error('Failed to add interface: %s' % err) - - iptables_manager.ipv4['filter'].add_rule('FORWARD', - '--in-interface %s -j ACCEPT' % \ - bridge) - iptables_manager.ipv4['filter'].add_rule('FORWARD', - '--out-interface %s -j ACCEPT' % \ - bridge) - - def initialize_gateway_device(dev, network_ref): if not network_ref: return @@ -838,12 +760,14 @@ class LinuxBridgeInterfaceDriver(LinuxNetInterfaceDriver): def plug(self, network): if network.get('vlan', None) is not None: - ensure_vlan_bridge(network['vlan'], + LinuxBridgeInterfaceDriver.ensure_vlan_bridge( + network['vlan'], network['bridge'], network['bridge_interface'], network) else: - ensure_bridge(network['bridge'], + LinuxBridgeInterfaceDriver.ensure_bridge( + network['bridge'], network['bridge_interface'], network) @@ -852,6 +776,88 @@ class LinuxBridgeInterfaceDriver(LinuxNetInterfaceDriver): def unplug(self, network): return network['bridge'] + @classmethod + def ensure_vlan_bridge(_self, vlan_num, bridge, bridge_interface, + net_attrs=None): + """Create a vlan and bridge unless they already exist.""" + interface = LinuxBridgeInterfaceDriver.ensure_vlan(vlan_num, + bridge_interface) + LinuxBridgeInterfaceDriver.ensure_bridge(bridge, interface, net_attrs) + return interface + + @classmethod + @utils.synchronized('ensure_vlan', external=True) + def ensure_vlan(_self, vlan_num, bridge_interface): + """Create a vlan unless it already exists.""" + interface = 'vlan%s' % vlan_num + if not _device_exists(interface): + LOG.debug(_('Starting VLAN inteface %s'), interface) + _execute('sudo', 'vconfig', 'set_name_type', + 'VLAN_PLUS_VID_NO_PAD') + _execute('sudo', 'vconfig', 'add', bridge_interface, vlan_num) + _execute('sudo', 'ip', 'link', 'set', interface, 'up') + return interface + + @classmethod + @utils.synchronized('ensure_bridge', external=True) + def ensure_bridge(_self, bridge, interface, net_attrs=None): + """Create a bridge unless it already exists. + + :param interface: the interface to create the bridge on. + :param net_attrs: dictionary with attributes used to create bridge. + + If net_attrs is set, it will add the net_attrs['gateway'] to the bridge + using net_attrs['broadcast'] and net_attrs['cidr']. It will also add + the ip_v6 address specified in net_attrs['cidr_v6'] if use_ipv6 is set. + + The code will attempt to move any ips that already exist on the + interface onto the bridge and reset the default gateway if necessary. + + """ + if not _device_exists(bridge): + LOG.debug(_('Starting Bridge interface for %s'), interface) + _execute('sudo', 'brctl', 'addbr', bridge) + _execute('sudo', 'brctl', 'setfd', bridge, 0) + # _execute('sudo brctl setageing %s 10' % bridge) + _execute('sudo', 'brctl', 'stp', bridge, 'off') + + if interface: + out, err = _execute('sudo', 'brctl', 'addif', bridge, interface, + check_exit_code=False) + + # NOTE(vish): This will break if there is already an ip on the + # interface, so we move any ips to the bridge + gateway = None + out, err = _execute('sudo', 'route', '-n') + for line in out.split('\n'): + fields = line.split() + if fields and fields[0] == '0.0.0.0' and \ + fields[-1] == interface: + gateway = fields[1] + _execute('sudo', 'route', 'del', 'default', 'gw', gateway, + 'dev', interface, check_exit_code=False) + out, err = _execute('sudo', 'ip', 'addr', 'show', 'dev', interface, + 'scope', 'global') + for line in out.split('\n'): + fields = line.split() + if fields and fields[0] == 'inet': + params = fields[1:-1] + _execute(*_ip_bridge_cmd('del', params, fields[-1])) + _execute(*_ip_bridge_cmd('add', params, bridge)) + if gateway: + _execute('sudo', 'route', 'add', 'default', 'gw', gateway) + + if (err and err != "device %s is already a member of a bridge;" + "can't enslave it to bridge %s.\n" % (interface, bridge)): + raise exception.Error('Failed to add interface: %s' % err) + + iptables_manager.ipv4['filter'].add_rule('FORWARD', + '--in-interface %s -j ACCEPT' % \ + bridge) + iptables_manager.ipv4['filter'].add_rule('FORWARD', + '--out-interface %s -j ACCEPT' % \ + bridge) + # plugs interfaces using Open vSwitch class LinuxOVSInterfaceDriver(LinuxNetInterfaceDriver): diff --git a/nova/virt/libvirt/vif.py b/nova/virt/libvirt/vif.py index eef582fac..f42eeec98 100644 --- a/nova/virt/libvirt/vif.py +++ b/nova/virt/libvirt/vif.py @@ -79,12 +79,14 @@ class LibvirtBridgeDriver(VIFDriver): LOG.debug(_('Ensuring vlan %(vlan)s and bridge %(bridge)s'), {'vlan': network['vlan'], 'bridge': network['bridge']}) - linux_net.ensure_vlan_bridge(network['vlan'], + linux_net.LinuxBridgeInterfaceDriver.ensure_vlan_bridge( + network['vlan'], network['bridge'], network['bridge_interface']) else: LOG.debug(_("Ensuring bridge %s"), network['bridge']) - linux_net.ensure_bridge(network['bridge'], + linux_net.LinuxBridgeInterfaceDriver.ensure_bridge( + network['bridge'], network['bridge_interface']) return self._get_configurations(network, mapping) -- cgit From c81febc28a602989636e77d1b3e9a75741e04352 Mon Sep 17 00:00:00 2001 From: Dan Prince Date: Tue, 9 Aug 2011 16:00:54 -0400 Subject: Updated the EC2 metadata controller so that it returns the correct value for instance-type metadata. --- nova/api/ec2/cloud.py | 2 +- smoketests/test_netadmin.py | 19 +++++++++++++++---- 2 files changed, 16 insertions(+), 5 deletions(-) diff --git a/nova/api/ec2/cloud.py b/nova/api/ec2/cloud.py index f64a92d12..9538a31e2 100644 --- a/nova/api/ec2/cloud.py +++ b/nova/api/ec2/cloud.py @@ -299,7 +299,7 @@ class CloudController(object): 'hostname': hostname, 'instance-action': 'none', 'instance-id': ec2_id, - 'instance-type': instance_ref['instance_type'], + 'instance-type': instance_ref['instance_type'].name, 'local-hostname': hostname, 'local-ipv4': address, 'placement': {'availability-zone': availability_zone}, diff --git a/smoketests/test_netadmin.py b/smoketests/test_netadmin.py index 8c8fa35b8..ef73e6f4c 100644 --- a/smoketests/test_netadmin.py +++ b/smoketests/test_netadmin.py @@ -107,14 +107,18 @@ class AddressTests(base.UserSmokeTestCase): class SecurityGroupTests(base.UserSmokeTestCase): - def __public_instance_is_accessible(self): - id_url = "latest/meta-data/instance-id" + def __get_metadata_item(self, category): + id_url = "latest/meta-data/%s" % category options = "-f -s --max-time 1" command = "curl %s %s/%s" % (options, self.data['public_ip'], id_url) status, output = commands.getstatusoutput(command) - instance_id = output.strip() + value = output.strip() if status > 0: return False + return value + + def __public_instance_is_accessible(self): + instance_id = self.__get_metadata_item('instance-id') if not instance_id: return False if instance_id != self.data['instance'].id: @@ -166,7 +170,14 @@ class SecurityGroupTests(base.UserSmokeTestCase): finally: result = self.conn.disassociate_address(self.data['public_ip']) - def test_005_can_revoke_security_group_ingress(self): + def test_005_validate_metadata(self): + + instance = self.data['instance'] + self.assertTrue(instance.instance_type, + self.__get_metadata_item("instance-type")) + #FIXME(dprince): validate more metadata here + + def test_006_can_revoke_security_group_ingress(self): self.assertTrue(self.conn.revoke_security_group(TEST_GROUP, ip_protocol='tcp', from_port=80, -- cgit From f9cf0d334330f034d0e0fb2ae8c88dda38e62832 Mon Sep 17 00:00:00 2001 From: "vladimir.p" Date: Thu, 11 Aug 2011 11:54:35 -0700 Subject: Added search instance by metadata. get_all_by_filters should filter deleted --- Authors | 1 + nova/db/sqlalchemy/api.py | 20 ++++++++++++++- nova/tests/test_compute.py | 63 ++++++++++++++++++++++++++++++++++++++++++++++ 3 files changed, 83 insertions(+), 1 deletion(-) diff --git a/Authors b/Authors index e639cbf76..ccd70baaf 100644 --- a/Authors +++ b/Authors @@ -103,6 +103,7 @@ Tushar Patil Vasiliy Shlykov Vishvananda Ishaya Vivek Y S +Vladimir Popovski William Wolf Yoshiaki Tamura Youcef Laribi diff --git a/nova/db/sqlalchemy/api.py b/nova/db/sqlalchemy/api.py index 8119cdfb8..65977a8bf 100644 --- a/nova/db/sqlalchemy/api.py +++ b/nova/db/sqlalchemy/api.py @@ -1175,6 +1175,19 @@ def instance_get_all_by_filters(context, filters): return True return False + def _regexp_filter_by_metadata(instance, meta): + inst_metadata = [{node['key']: node['value']} \ + for node in instance['metadata']] + if isinstance(meta, list): + for node in meta: + if node not in inst_metadata: + return False + elif isinstance(meta, dict): + for k, v in meta.iteritems(): + if {k: v} not in inst_metadata: + return False + return True + def _regexp_filter_by_column(instance, filter_name, filter_re): try: v = getattr(instance, filter_name) @@ -1232,7 +1245,9 @@ def instance_get_all_by_filters(context, filters): query_prefix = _exact_match_filter(query_prefix, filter_name, filters.pop(filter_name)) - instances = query_prefix.all() + instances = query_prefix.\ + filter_by(deleted=can_read_deleted(context)).\ + all() if not instances: return [] @@ -1248,6 +1263,9 @@ def instance_get_all_by_filters(context, filters): filter_re = re.compile(str(filters[filter_name])) if filter_func: filter_l = lambda instance: filter_func(instance, filter_re) + elif filter_name == 'metadata': + filter_l = lambda instance: _regexp_filter_by_metadata(instance, + filters[filter_name]) else: filter_l = lambda instance: _regexp_filter_by_column(instance, filter_name, filter_re) diff --git a/nova/tests/test_compute.py b/nova/tests/test_compute.py index 73c9bd78d..18daa970e 100644 --- a/nova/tests/test_compute.py +++ b/nova/tests/test_compute.py @@ -1323,6 +1323,69 @@ class ComputeTestCase(test.TestCase): db.instance_destroy(c, instance_id2) db.instance_destroy(c, instance_id3) + def test_get_all_by_metadata(self): + """Test searching instances by metadata""" + + c = context.get_admin_context() + instance_id0 = self._create_instance() + instance_id1 = self._create_instance({ + 'metadata': {'key1': 'value1'}}) + instance_id2 = self._create_instance({ + 'metadata': {'key2': 'value2'}}) + instance_id3 = self._create_instance({ + 'metadata': {'key3': 'value3'}}) + instance_id4 = self._create_instance({ + 'metadata': {'key3': 'value3', + 'key4': 'value4'}}) + + # get all instances + instances = self.compute_api.get_all(c, + search_opts={'metadata': {}}) + self.assertEqual(len(instances), 5) + + # wrong key/value combination + instances = self.compute_api.get_all(c, + search_opts={'metadata': {'key1': 'value3'}}) + self.assertEqual(len(instances), 0) + + # non-existing keys + instances = self.compute_api.get_all(c, + search_opts={'metadata': {'key5': 'value1'}}) + self.assertEqual(len(instances), 0) + + # find existing instance + instances = self.compute_api.get_all(c, + search_opts={'metadata': {'key2': 'value2'}}) + self.assertEqual(len(instances), 1) + self.assertEqual(instances[0].id, instance_id2) + + instances = self.compute_api.get_all(c, + search_opts={'metadata': {'key3': 'value3'}}) + self.assertEqual(len(instances), 2) + instance_ids = [instance.id for instance in instances] + self.assertTrue(instance_id3 in instance_ids) + self.assertTrue(instance_id4 in instance_ids) + + # multiple criterias as a dict + instances = self.compute_api.get_all(c, + search_opts={'metadata': {'key3': 'value3', + 'key4': 'value4'}}) + self.assertEqual(len(instances), 1) + self.assertEqual(instances[0].id, instance_id4) + + # multiple criterias as a list + instances = self.compute_api.get_all(c, + search_opts={'metadata': [{'key4': 'value4'}, + {'key3': 'value3'}]}) + self.assertEqual(len(instances), 1) + self.assertEqual(instances[0].id, instance_id4) + + db.instance_destroy(c, instance_id0) + db.instance_destroy(c, instance_id1) + db.instance_destroy(c, instance_id2) + db.instance_destroy(c, instance_id3) + db.instance_destroy(c, instance_id4) + @staticmethod def _parse_db_block_device_mapping(bdm_ref): attr_list = ('delete_on_termination', 'device_name', 'no_device', -- cgit From f881bee5b1283d5bec2396b45cea9a062cb2a4b2 Mon Sep 17 00:00:00 2001 From: Ed Leafe Date: Thu, 11 Aug 2011 14:00:56 -0500 Subject: Refactored the scheduler classes without changing functionality. Removed all 'zone-aware' naming references, as these were only useful during the zone development process. Also fixed some PEP8 problems in trunk code. --- nova/scheduler/abstract_scheduler.py | 403 ++++++++++++++++++++++ nova/scheduler/host_filter.py | 51 +-- nova/scheduler/least_cost.py | 7 +- nova/scheduler/zone_aware_scheduler.py | 383 -------------------- nova/tests/scheduler/test_abstract_scheduler.py | 364 +++++++++++++++++++ nova/tests/scheduler/test_least_cost_scheduler.py | 4 +- nova/tests/scheduler/test_zone_aware_scheduler.py | 364 ------------------- 7 files changed, 782 insertions(+), 794 deletions(-) create mode 100644 nova/scheduler/abstract_scheduler.py delete mode 100644 nova/scheduler/zone_aware_scheduler.py create mode 100644 nova/tests/scheduler/test_abstract_scheduler.py delete mode 100644 nova/tests/scheduler/test_zone_aware_scheduler.py diff --git a/nova/scheduler/abstract_scheduler.py b/nova/scheduler/abstract_scheduler.py new file mode 100644 index 000000000..eb924732a --- /dev/null +++ b/nova/scheduler/abstract_scheduler.py @@ -0,0 +1,403 @@ +# Copyright (c) 2011 Openstack, LLC. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +The AbsractScheduler is a base class Scheduler for creating instances +across zones. There are two expansion points to this class for: +1. Assigning Weights to hosts for requested instances +2. Filtering Hosts based on required instance capabilities +""" + +import operator +import json + +import M2Crypto + +from novaclient import v1_1 as novaclient +from novaclient import exceptions as novaclient_exceptions + +from nova import crypto +from nova import db +from nova import exception +from nova import flags +from nova import log as logging +from nova import rpc + +from nova.compute import api as compute_api +from nova.scheduler import api +from nova.scheduler import driver + +FLAGS = flags.FLAGS +LOG = logging.getLogger('nova.scheduler.abstract_scheduler') + + +class InvalidBlob(exception.NovaException): + message = _("Ill-formed or incorrectly routed 'blob' data sent " + "to instance create request.") + + +class AbstractScheduler(driver.Scheduler): + """Base class for creating Schedulers that can work across any nova + deployment, from simple designs to multiply-nested zones. + """ + + def _call_zone_method(self, context, method, specs, zones): + """Call novaclient zone method. Broken out for testing.""" + return api.call_zone_method(context, method, specs=specs, zones=zones) + + def _provision_resource_locally(self, context, build_plan_item, + request_spec, kwargs): + """Create the requested resource in this Zone.""" + host = build_plan_item['hostname'] + base_options = request_spec['instance_properties'] + image = request_spec['image'] + + # TODO(sandy): I guess someone needs to add block_device_mapping + # support at some point? Also, OS API has no concept of security + # groups. + instance = compute_api.API().create_db_entry_for_new_instance(context, + image, base_options, None, []) + + instance_id = instance['id'] + kwargs['instance_id'] = instance_id + + rpc.cast(context, + db.queue_get_for(context, "compute", host), + {"method": "run_instance", + "args": kwargs}) + LOG.debug(_("Provisioning locally via compute node %(host)s") + % locals()) + + def _decrypt_blob(self, blob): + """Returns the decrypted blob or None if invalid. Broken out + for testing.""" + decryptor = crypto.decryptor(FLAGS.build_plan_encryption_key) + try: + json_entry = decryptor(blob) + return json.dumps(json_entry) + except M2Crypto.EVP.EVPError: + pass + return None + + def _ask_child_zone_to_create_instance(self, context, zone_info, + request_spec, kwargs): + """Once we have determined that the request should go to one + of our children, we need to fabricate a new POST /servers/ + call with the same parameters that were passed into us. + + Note that we have to reverse engineer from our args to get back the + image, flavor, ipgroup, etc. since the original call could have + come in from EC2 (which doesn't use these things).""" + + instance_type = request_spec['instance_type'] + instance_properties = request_spec['instance_properties'] + + name = instance_properties['display_name'] + image_ref = instance_properties['image_ref'] + meta = instance_properties['metadata'] + flavor_id = instance_type['flavorid'] + reservation_id = instance_properties['reservation_id'] + + files = kwargs['injected_files'] + ipgroup = None # Not supported in OS API ... yet + + child_zone = zone_info['child_zone'] + child_blob = zone_info['child_blob'] + zone = db.zone_get(context, child_zone) + url = zone.api_url + LOG.debug(_("Forwarding instance create call to child zone %(url)s" + ". ReservationID=%(reservation_id)s") + % locals()) + nova = None + try: + nova = novaclient.Client(zone.username, zone.password, None, url) + nova.authenticate() + except novaclient_exceptions.BadRequest, e: + raise exception.NotAuthorized(_("Bad credentials attempting " + "to talk to zone at %(url)s.") % locals()) + + nova.servers.create(name, image_ref, flavor_id, ipgroup, meta, files, + child_blob, reservation_id=reservation_id) + + def _provision_resource_from_blob(self, context, build_plan_item, + instance_id, request_spec, kwargs): + """Create the requested resource locally or in a child zone + based on what is stored in the zone blob info. + + Attempt to decrypt the blob to see if this request is: + 1. valid, and + 2. intended for this zone or a child zone. + + Note: If we have "blob" that means the request was passed + into us from a parent zone. If we have "child_blob" that + means we gathered the info from one of our children. + It's possible that, when we decrypt the 'blob' field, it + contains "child_blob" data. In which case we forward the + request.""" + + host_info = None + if "blob" in build_plan_item: + # Request was passed in from above. Is it for us? + host_info = self._decrypt_blob(build_plan_item['blob']) + elif "child_blob" in build_plan_item: + # Our immediate child zone provided this info ... + host_info = build_plan_item + + if not host_info: + raise InvalidBlob() + + # Valid data ... is it for us? + if 'child_zone' in host_info and 'child_blob' in host_info: + self._ask_child_zone_to_create_instance(context, host_info, + request_spec, kwargs) + else: + self._provision_resource_locally(context, host_info, request_spec, + kwargs) + + def _provision_resource(self, context, build_plan_item, instance_id, + request_spec, kwargs): + """Create the requested resource in this Zone or a child zone.""" + if "hostname" in build_plan_item: + self._provision_resource_locally(context, build_plan_item, + request_spec, kwargs) + return + + self._provision_resource_from_blob(context, build_plan_item, + instance_id, request_spec, kwargs) + + def _adjust_child_weights(self, child_results, zones): + """Apply the Scale and Offset values from the Zone definition + to adjust the weights returned from the child zones. Alters + child_results in place. + """ + for zone_id, result in child_results: + if not result: + continue + + assert isinstance(zone_id, int) + + for zone_rec in zones: + if zone_rec['id'] != zone_id: + continue + + for item in result: + try: + offset = zone_rec['weight_offset'] + scale = zone_rec['weight_scale'] + raw_weight = item['weight'] + cooked_weight = offset + scale * raw_weight + item['weight'] = cooked_weight + item['raw_weight'] = raw_weight + except KeyError: + LOG.exception(_("Bad child zone scaling values " + "for Zone: %(zone_id)s") % locals()) + + def schedule_run_instance(self, context, instance_id, request_spec, + *args, **kwargs): + """This method is called from nova.compute.api to provision + an instance. However we need to look at the parameters being + passed in to see if this is a request to: + 1. Create a Build Plan and then provision, or + 2. Use the Build Plan information in the request parameters + to simply create the instance (either in this zone or + a child zone). + """ + + # TODO(sandy): We'll have to look for richer specs at some point. + + blob = request_spec.get('blob') + if blob: + self._provision_resource(context, request_spec, instance_id, + request_spec, kwargs) + return None + + num_instances = request_spec.get('num_instances', 1) + LOG.debug(_("Attempting to build %(num_instances)d instance(s)") % + locals()) + + # Create build plan and provision ... + build_plan = self.select(context, request_spec) + if not build_plan: + raise driver.NoValidHost(_('No hosts were available')) + + for num in xrange(num_instances): + if not build_plan: + break + + build_plan_item = build_plan.pop(0) + self._provision_resource(context, build_plan_item, instance_id, + request_spec, kwargs) + + # Returning None short-circuits the routing to Compute (since + # we've already done it here) + return None + + def select(self, context, request_spec, *args, **kwargs): + """Select returns a list of weights and zone/host information + corresponding to the best hosts to service the request. Any + child zone information has been encrypted so as not to reveal + anything about the children. + """ + return self._schedule(context, "compute", request_spec, + *args, **kwargs) + + # TODO(sandy): We're only focused on compute instances right now, + # so we don't implement the default "schedule()" method required + # of Schedulers. + def schedule(self, context, topic, request_spec, *args, **kwargs): + """The schedule() contract requires we return the one + best-suited host for this request. + """ + raise driver.NoValidHost(_('No hosts were available')) + + def _schedule(self, context, topic, request_spec, *args, **kwargs): + """Returns a list of hosts that meet the required specs, + ordered by their fitness. + """ + + if topic != "compute": + raise NotImplementedError(_("Scheduler only understands" + " Compute nodes (for now)")) + + num_instances = request_spec.get('num_instances', 1) + instance_type = request_spec['instance_type'] + + weighted = [] + host_list = None + + for i in xrange(num_instances): + # Filter local hosts based on requirements ... + # + # The first pass through here will pass 'None' as the + # host_list.. which tells the filter to build the full + # list of hosts. + # On a 2nd pass, the filter can modify the host_list with + # any updates it needs to make based on resources that + # may have been consumed from a previous build.. + host_list = self.filter_hosts(topic, request_spec, host_list) + if not host_list: + LOG.warn(_("Filter returned no hosts after processing " + "%(i)d of %(num_instances)d instances") % locals()) + break + + # then weigh the selected hosts. + # weighted = [{weight=weight, hostname=hostname, + # capabilities=capabs}, ...] + weights = self.weigh_hosts(topic, request_spec, host_list) + weights.sort(key=operator.itemgetter('weight')) + best_weight = weights[0] + weighted.append(best_weight) + self.consume_resources(topic, best_weight['capabilities'], + instance_type) + + # Next, tack on the best weights from the child zones ... + json_spec = json.dumps(request_spec) + all_zones = db.zone_get_all(context) + child_results = self._call_zone_method(context, "select", + specs=json_spec, zones=all_zones) + self._adjust_child_weights(child_results, all_zones) + for child_zone, result in child_results: + for weighting in result: + # Remember the child_zone so we can get back to + # it later if needed. This implicitly builds a zone + # path structure. + host_dict = {"weight": weighting["weight"], + "child_zone": child_zone, + "child_blob": weighting["blob"]} + weighted.append(host_dict) + + weighted.sort(key=operator.itemgetter('weight')) + return weighted + + def compute_filter(self, hostname, capabilities, request_spec): + """Return whether or not we can schedule to this compute node. + Derived classes should override this and return True if the host + is acceptable for scheduling. + """ + instance_type = request_spec['instance_type'] + requested_mem = instance_type['memory_mb'] * 1024 * 1024 + return capabilities['host_memory_free'] >= requested_mem + + def hold_filter_hosts(self, topic, request_spec, hosts=None): + """Filter the full host list (from the ZoneManager)""" + # NOTE(dabo): The logic used by the current _schedule() method + # is incorrect. Since this task is just to refactor the classes, + # I'm not fixing the logic now - that will be the next task. + # So for now this method is just renamed; afterwards this will + # become the filter_hosts() method, and the one below will + # be removed. + filter_name = request_spec.get('filter', None) + # Make sure that the requested filter is legitimate. + selected_filter = host_filter.choose_host_filter(filter_name) + + # TODO(sandy): We're only using InstanceType-based specs + # currently. Later we'll need to snoop for more detailed + # host filter requests. + instance_type = request_spec['instance_type'] + name, query = selected_filter.instance_type_to_filter(instance_type) + return selected_filter.filter_hosts(self.zone_manager, query) + + def filter_hosts(self, topic, request_spec, host_list=None): + """Return a list of hosts which are acceptable for scheduling. + Return value should be a list of (hostname, capability_dict)s. + Derived classes may override this, but may find the + '_filter' function more appropriate. + """ + def _default_filter(self, hostname, capabilities, request_spec): + """Default filter function if there's no _filter""" + # NOTE(sirp): The default logic is the equivalent to + # AllHostsFilter + return True + + filter_func = getattr(self, '%s_filter' % topic, _default_filter) + + if host_list is None: + first_run = True + host_list = self.zone_manager.service_states.iteritems() + else: + first_run = False + + filtered_hosts = [] + for host, services in host_list: + if first_run: + if topic not in services: + continue + services = services[topic] + if filter_func(host, services, request_spec): + filtered_hosts.append((host, services)) + return filtered_hosts + + def weigh_hosts(self, topic, request_spec, hosts): + """Derived classes may override this to provide more sophisticated + scheduling objectives + """ + # NOTE(sirp): The default logic is the same as the NoopCostFunction + return [dict(weight=1, hostname=hostname, capabilities=capabilities) + for hostname, capabilities in hosts] + + def compute_consume(self, capabilities, instance_type): + """Consume compute resources for selected host""" + + requested_mem = max(instance_type['memory_mb'], 0) * 1024 * 1024 + capabilities['host_memory_free'] -= requested_mem + + def consume_resources(self, topic, capabilities, instance_type): + """Consume resources for a specific host. 'host' is a tuple + of the hostname and the services""" + + consume_func = getattr(self, '%s_consume' % topic, None) + if not consume_func: + return + consume_func(capabilities, instance_type) diff --git a/nova/scheduler/host_filter.py b/nova/scheduler/host_filter.py index b7bbbbcb8..45a8f40d8 100644 --- a/nova/scheduler/host_filter.py +++ b/nova/scheduler/host_filter.py @@ -14,7 +14,12 @@ # under the License. """ -Host Filter is a mechanism for requesting instance resources. +The Host Filter classes are a way to ensure that only hosts that are +appropriate are considered when creating a new instance. Hosts that are +either incompatible or insufficient to accept a newly-requested instance +are removed by Host Filter classes from consideration. Those that pass +the filter are then passed on for weighting or other process for ordering. + Three filters are included: AllHosts, Flavor & JSON. AllHosts just returns the full, unfiltered list of hosts. Flavor is a hard coded matching mechanism based on flavor criteria and JSON is an ad-hoc @@ -28,12 +33,6 @@ noted a need for a more expressive way of specifying instances. Since we don't want to get into building full DSL this is a simple form as an example of how this could be done. In reality, most consumers will use the more rigid filters such as FlavorFilter. - -Note: These are "required" capability filters. These capabilities -used must be present or the host will be excluded. The hosts -returned are then weighed by the Weighted Scheduler. Weights -can take the more esoteric factors into consideration (such as -server affinity and customer separation). """ import json @@ -41,9 +40,7 @@ import json from nova import exception from nova import flags from nova import log as logging -from nova.scheduler import zone_aware_scheduler from nova import utils -from nova.scheduler import zone_aware_scheduler LOG = logging.getLogger('nova.scheduler.host_filter') @@ -125,9 +122,8 @@ class InstanceTypeFilter(HostFilter): spec_disk = instance_type['local_gb'] extra_specs = instance_type['extra_specs'] - if host_ram_mb >= spec_ram and \ - disk_bytes >= spec_disk and \ - self._satisfies_extra_specs(capabilities, instance_type): + if ((host_ram_mb >= spec_ram) and (disk_bytes >= spec_disk) and + self._satisfies_extra_specs(capabilities, instance_type)): selected_hosts.append((host, capabilities)) return selected_hosts @@ -309,7 +305,6 @@ def choose_host_filter(filter_name=None): function checks the filter name against a predefined set of acceptable filters. """ - if not filter_name: filter_name = FLAGS.default_host_filter for filter_class in FILTERS: @@ -317,33 +312,3 @@ def choose_host_filter(filter_name=None): if host_match == filter_name: return filter_class() raise exception.SchedulerHostFilterNotFound(filter_name=filter_name) - - -class HostFilterScheduler(zone_aware_scheduler.ZoneAwareScheduler): - """The HostFilterScheduler uses the HostFilter to filter - hosts for weighing. The particular filter used may be passed in - as an argument or the default will be used. - - request_spec = {'filter': , - 'instance_type': } - """ - - def filter_hosts(self, topic, request_spec, hosts=None): - """Filter the full host list (from the ZoneManager)""" - - filter_name = request_spec.get('filter', None) - host_filter = choose_host_filter(filter_name) - - # TODO(sandy): We're only using InstanceType-based specs - # currently. Later we'll need to snoop for more detailed - # host filter requests. - instance_type = request_spec['instance_type'] - name, query = host_filter.instance_type_to_filter(instance_type) - return host_filter.filter_hosts(self.zone_manager, query) - - def weigh_hosts(self, topic, request_spec, hosts): - """Derived classes must override this method and return - a lists of hosts in [{weight, hostname}] format. - """ - return [dict(weight=1, hostname=hostname, capabilities=caps) - for hostname, caps in hosts] diff --git a/nova/scheduler/least_cost.py b/nova/scheduler/least_cost.py index 329107efe..a58b11289 100644 --- a/nova/scheduler/least_cost.py +++ b/nova/scheduler/least_cost.py @@ -22,11 +22,14 @@ The cost-function and weights are tabulated, and the host with the least cost is then selected for provisioning. """ +# TODO(dabo): This class will be removed in the next merge prop; it remains now +# because much of the code will be refactored into different classes. + import collections from nova import flags from nova import log as logging -from nova.scheduler import zone_aware_scheduler +from nova.scheduler import abstract_scheduler from nova import utils from nova import exception @@ -61,7 +64,7 @@ def compute_fill_first_cost_fn(host): return free_mem -class LeastCostScheduler(zone_aware_scheduler.ZoneAwareScheduler): +class LeastCostScheduler(abstract_scheduler.AbstractScheduler): def __init__(self, *args, **kwargs): self.cost_fns_cache = {} super(LeastCostScheduler, self).__init__(*args, **kwargs) diff --git a/nova/scheduler/zone_aware_scheduler.py b/nova/scheduler/zone_aware_scheduler.py deleted file mode 100644 index d1924c9f9..000000000 --- a/nova/scheduler/zone_aware_scheduler.py +++ /dev/null @@ -1,383 +0,0 @@ -# Copyright (c) 2011 Openstack, LLC. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -""" -The Zone Aware Scheduler is a base class Scheduler for creating instances -across zones. There are two expansion points to this class for: -1. Assigning Weights to hosts for requested instances -2. Filtering Hosts based on required instance capabilities -""" - -import operator -import json - -import M2Crypto - -from novaclient import v1_1 as novaclient -from novaclient import exceptions as novaclient_exceptions - -from nova import crypto -from nova import db -from nova import exception -from nova import flags -from nova import log as logging -from nova import rpc - -from nova.compute import api as compute_api -from nova.scheduler import api -from nova.scheduler import driver - -FLAGS = flags.FLAGS -LOG = logging.getLogger('nova.scheduler.zone_aware_scheduler') - - -class InvalidBlob(exception.NovaException): - message = _("Ill-formed or incorrectly routed 'blob' data sent " - "to instance create request.") - - -class ZoneAwareScheduler(driver.Scheduler): - """Base class for creating Zone Aware Schedulers.""" - - def _call_zone_method(self, context, method, specs, zones): - """Call novaclient zone method. Broken out for testing.""" - return api.call_zone_method(context, method, specs=specs, zones=zones) - - def _provision_resource_locally(self, context, build_plan_item, - request_spec, kwargs): - """Create the requested resource in this Zone.""" - host = build_plan_item['hostname'] - base_options = request_spec['instance_properties'] - image = request_spec['image'] - - # TODO(sandy): I guess someone needs to add block_device_mapping - # support at some point? Also, OS API has no concept of security - # groups. - instance = compute_api.API().create_db_entry_for_new_instance(context, - image, base_options, None, []) - - instance_id = instance['id'] - kwargs['instance_id'] = instance_id - - rpc.cast(context, - db.queue_get_for(context, "compute", host), - {"method": "run_instance", - "args": kwargs}) - LOG.debug(_("Provisioning locally via compute node %(host)s") - % locals()) - - def _decrypt_blob(self, blob): - """Returns the decrypted blob or None if invalid. Broken out - for testing.""" - decryptor = crypto.decryptor(FLAGS.build_plan_encryption_key) - try: - json_entry = decryptor(blob) - return json.dumps(json_entry) - except M2Crypto.EVP.EVPError: - pass - return None - - def _ask_child_zone_to_create_instance(self, context, zone_info, - request_spec, kwargs): - """Once we have determined that the request should go to one - of our children, we need to fabricate a new POST /servers/ - call with the same parameters that were passed into us. - - Note that we have to reverse engineer from our args to get back the - image, flavor, ipgroup, etc. since the original call could have - come in from EC2 (which doesn't use these things).""" - - instance_type = request_spec['instance_type'] - instance_properties = request_spec['instance_properties'] - - name = instance_properties['display_name'] - image_ref = instance_properties['image_ref'] - meta = instance_properties['metadata'] - flavor_id = instance_type['flavorid'] - reservation_id = instance_properties['reservation_id'] - - files = kwargs['injected_files'] - ipgroup = None # Not supported in OS API ... yet - - child_zone = zone_info['child_zone'] - child_blob = zone_info['child_blob'] - zone = db.zone_get(context, child_zone) - url = zone.api_url - LOG.debug(_("Forwarding instance create call to child zone %(url)s" - ". ReservationID=%(reservation_id)s") - % locals()) - nova = None - try: - nova = novaclient.Client(zone.username, zone.password, None, url) - nova.authenticate() - except novaclient_exceptions.BadRequest, e: - raise exception.NotAuthorized(_("Bad credentials attempting " - "to talk to zone at %(url)s.") % locals()) - - nova.servers.create(name, image_ref, flavor_id, ipgroup, meta, files, - child_blob, reservation_id=reservation_id) - - def _provision_resource_from_blob(self, context, build_plan_item, - instance_id, request_spec, kwargs): - """Create the requested resource locally or in a child zone - based on what is stored in the zone blob info. - - Attempt to decrypt the blob to see if this request is: - 1. valid, and - 2. intended for this zone or a child zone. - - Note: If we have "blob" that means the request was passed - into us from a parent zone. If we have "child_blob" that - means we gathered the info from one of our children. - It's possible that, when we decrypt the 'blob' field, it - contains "child_blob" data. In which case we forward the - request.""" - - host_info = None - if "blob" in build_plan_item: - # Request was passed in from above. Is it for us? - host_info = self._decrypt_blob(build_plan_item['blob']) - elif "child_blob" in build_plan_item: - # Our immediate child zone provided this info ... - host_info = build_plan_item - - if not host_info: - raise InvalidBlob() - - # Valid data ... is it for us? - if 'child_zone' in host_info and 'child_blob' in host_info: - self._ask_child_zone_to_create_instance(context, host_info, - request_spec, kwargs) - else: - self._provision_resource_locally(context, host_info, request_spec, - kwargs) - - def _provision_resource(self, context, build_plan_item, instance_id, - request_spec, kwargs): - """Create the requested resource in this Zone or a child zone.""" - if "hostname" in build_plan_item: - self._provision_resource_locally(context, build_plan_item, - request_spec, kwargs) - return - - self._provision_resource_from_blob(context, build_plan_item, - instance_id, request_spec, kwargs) - - def _adjust_child_weights(self, child_results, zones): - """Apply the Scale and Offset values from the Zone definition - to adjust the weights returned from the child zones. Alters - child_results in place. - """ - for zone_id, result in child_results: - if not result: - continue - - assert isinstance(zone_id, int) - - for zone_rec in zones: - if zone_rec['id'] != zone_id: - continue - - for item in result: - try: - offset = zone_rec['weight_offset'] - scale = zone_rec['weight_scale'] - raw_weight = item['weight'] - cooked_weight = offset + scale * raw_weight - item['weight'] = cooked_weight - item['raw_weight'] = raw_weight - except KeyError: - LOG.exception(_("Bad child zone scaling values " - "for Zone: %(zone_id)s") % locals()) - - def schedule_run_instance(self, context, instance_id, request_spec, - *args, **kwargs): - """This method is called from nova.compute.api to provision - an instance. However we need to look at the parameters being - passed in to see if this is a request to: - 1. Create a Build Plan and then provision, or - 2. Use the Build Plan information in the request parameters - to simply create the instance (either in this zone or - a child zone). - """ - - # TODO(sandy): We'll have to look for richer specs at some point. - - blob = request_spec.get('blob') - if blob: - self._provision_resource(context, request_spec, instance_id, - request_spec, kwargs) - return None - - num_instances = request_spec.get('num_instances', 1) - LOG.debug(_("Attempting to build %(num_instances)d instance(s)") % - locals()) - - # Create build plan and provision ... - build_plan = self.select(context, request_spec) - if not build_plan: - raise driver.NoValidHost(_('No hosts were available')) - - for num in xrange(num_instances): - if not build_plan: - break - - build_plan_item = build_plan.pop(0) - self._provision_resource(context, build_plan_item, instance_id, - request_spec, kwargs) - - # Returning None short-circuits the routing to Compute (since - # we've already done it here) - return None - - def select(self, context, request_spec, *args, **kwargs): - """Select returns a list of weights and zone/host information - corresponding to the best hosts to service the request. Any - child zone information has been encrypted so as not to reveal - anything about the children. - """ - return self._schedule(context, "compute", request_spec, - *args, **kwargs) - - # TODO(sandy): We're only focused on compute instances right now, - # so we don't implement the default "schedule()" method required - # of Schedulers. - def schedule(self, context, topic, request_spec, *args, **kwargs): - """The schedule() contract requires we return the one - best-suited host for this request. - """ - raise driver.NoValidHost(_('No hosts were available')) - - def _schedule(self, context, topic, request_spec, *args, **kwargs): - """Returns a list of hosts that meet the required specs, - ordered by their fitness. - """ - - if topic != "compute": - raise NotImplementedError(_("Zone Aware Scheduler only understands" - " Compute nodes (for now)")) - - num_instances = request_spec.get('num_instances', 1) - instance_type = request_spec['instance_type'] - - weighted = [] - host_list = None - - for i in xrange(num_instances): - # Filter local hosts based on requirements ... - # - # The first pass through here will pass 'None' as the - # host_list.. which tells the filter to build the full - # list of hosts. - # On a 2nd pass, the filter can modify the host_list with - # any updates it needs to make based on resources that - # may have been consumed from a previous build.. - host_list = self.filter_hosts(topic, request_spec, host_list) - if not host_list: - LOG.warn(_("Filter returned no hosts after processing " - "%(i)d of %(num_instances)d instances") % locals()) - break - - # then weigh the selected hosts. - # weighted = [{weight=weight, hostname=hostname, - # capabilities=capabs}, ...] - weights = self.weigh_hosts(topic, request_spec, host_list) - weights.sort(key=operator.itemgetter('weight')) - best_weight = weights[0] - weighted.append(best_weight) - self.consume_resources(topic, best_weight['capabilities'], - instance_type) - - # Next, tack on the best weights from the child zones ... - json_spec = json.dumps(request_spec) - all_zones = db.zone_get_all(context) - child_results = self._call_zone_method(context, "select", - specs=json_spec, zones=all_zones) - self._adjust_child_weights(child_results, all_zones) - for child_zone, result in child_results: - for weighting in result: - # Remember the child_zone so we can get back to - # it later if needed. This implicitly builds a zone - # path structure. - host_dict = {"weight": weighting["weight"], - "child_zone": child_zone, - "child_blob": weighting["blob"]} - weighted.append(host_dict) - - weighted.sort(key=operator.itemgetter('weight')) - return weighted - - def compute_filter(self, hostname, capabilities, request_spec): - """Return whether or not we can schedule to this compute node. - Derived classes should override this and return True if the host - is acceptable for scheduling. - """ - instance_type = request_spec['instance_type'] - requested_mem = instance_type['memory_mb'] * 1024 * 1024 - return capabilities['host_memory_free'] >= requested_mem - - def filter_hosts(self, topic, request_spec, host_list=None): - """Return a list of hosts which are acceptable for scheduling. - Return value should be a list of (hostname, capability_dict)s. - Derived classes may override this, but may find the - '_filter' function more appropriate. - """ - - def _default_filter(self, hostname, capabilities, request_spec): - """Default filter function if there's no _filter""" - # NOTE(sirp): The default logic is the equivalent to - # AllHostsFilter - return True - - filter_func = getattr(self, '%s_filter' % topic, _default_filter) - - if host_list is None: - first_run = True - host_list = self.zone_manager.service_states.iteritems() - else: - first_run = False - - filtered_hosts = [] - for host, services in host_list: - if first_run: - if topic not in services: - continue - services = services[topic] - if filter_func(host, services, request_spec): - filtered_hosts.append((host, services)) - return filtered_hosts - - def weigh_hosts(self, topic, request_spec, hosts): - """Derived classes may override this to provide more sophisticated - scheduling objectives - """ - # NOTE(sirp): The default logic is the same as the NoopCostFunction - return [dict(weight=1, hostname=hostname, capabilities=capabilities) - for hostname, capabilities in hosts] - - def compute_consume(self, capabilities, instance_type): - """Consume compute resources for selected host""" - - requested_mem = max(instance_type['memory_mb'], 0) * 1024 * 1024 - capabilities['host_memory_free'] -= requested_mem - - def consume_resources(self, topic, capabilities, instance_type): - """Consume resources for a specific host. 'host' is a tuple - of the hostname and the services""" - - consume_func = getattr(self, '%s_consume' % topic, None) - if not consume_func: - return - consume_func(capabilities, instance_type) diff --git a/nova/tests/scheduler/test_abstract_scheduler.py b/nova/tests/scheduler/test_abstract_scheduler.py new file mode 100644 index 000000000..f4f5cc233 --- /dev/null +++ b/nova/tests/scheduler/test_abstract_scheduler.py @@ -0,0 +1,364 @@ +# Copyright 2011 OpenStack LLC. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +""" +Tests For Abstract Scheduler. +""" + +import json + +import nova.db + +from nova import exception +from nova import rpc +from nova import test +from nova.compute import api as compute_api +from nova.scheduler import driver +from nova.scheduler import abstract_scheduler +from nova.scheduler import zone_manager + + +def _host_caps(multiplier): + # Returns host capabilities in the following way: + # host1 = memory:free 10 (100max) + # disk:available 100 (1000max) + # hostN = memory:free 10 + 10N + # disk:available 100 + 100N + # in other words: hostN has more resources than host0 + # which means ... don't go above 10 hosts. + return {'host_name-description': 'XenServer %s' % multiplier, + 'host_hostname': 'xs-%s' % multiplier, + 'host_memory_total': 100, + 'host_memory_overhead': 10, + 'host_memory_free': 10 + multiplier * 10, + 'host_memory_free-computed': 10 + multiplier * 10, + 'host_other-config': {}, + 'host_ip_address': '192.168.1.%d' % (100 + multiplier), + 'host_cpu_info': {}, + 'disk_available': 100 + multiplier * 100, + 'disk_total': 1000, + 'disk_used': 0, + 'host_uuid': 'xxx-%d' % multiplier, + 'host_name-label': 'xs-%s' % multiplier} + + +def fake_zone_manager_service_states(num_hosts): + states = {} + for x in xrange(num_hosts): + states['host%02d' % (x + 1)] = {'compute': _host_caps(x)} + return states + + +class FakeAbstractScheduler(abstract_scheduler.AbstractScheduler): + # No need to stub anything at the moment + pass + + +class FakeZoneManager(zone_manager.ZoneManager): + def __init__(self): + self.service_states = { + 'host1': { + 'compute': {'host_memory_free': 1073741824}, + }, + 'host2': { + 'compute': {'host_memory_free': 2147483648}, + }, + 'host3': { + 'compute': {'host_memory_free': 3221225472}, + }, + } + + +class FakeEmptyZoneManager(zone_manager.ZoneManager): + def __init__(self): + self.service_states = {} + + +def fake_empty_call_zone_method(context, method, specs, zones): + return [] + + +# Hmm, I should probably be using mox for this. +was_called = False + + +def fake_provision_resource(context, item, instance_id, request_spec, kwargs): + global was_called + was_called = True + + +def fake_ask_child_zone_to_create_instance(context, zone_info, + request_spec, kwargs): + global was_called + was_called = True + + +def fake_provision_resource_locally(context, build_plan, request_spec, kwargs): + global was_called + was_called = True + + +def fake_provision_resource_from_blob(context, item, instance_id, + request_spec, kwargs): + global was_called + was_called = True + + +def fake_decrypt_blob_returns_local_info(blob): + return {'hostname': 'foooooo'} # values aren't important. + + +def fake_decrypt_blob_returns_child_info(blob): + return {'child_zone': True, + 'child_blob': True} # values aren't important. Keys are. + + +def fake_call_zone_method(context, method, specs, zones): + return [ + (1, [ + dict(weight=1, blob='AAAAAAA'), + dict(weight=111, blob='BBBBBBB'), + dict(weight=112, blob='CCCCCCC'), + dict(weight=113, blob='DDDDDDD'), + ]), + (2, [ + dict(weight=120, blob='EEEEEEE'), + dict(weight=2, blob='FFFFFFF'), + dict(weight=122, blob='GGGGGGG'), + dict(weight=123, blob='HHHHHHH'), + ]), + (3, [ + dict(weight=130, blob='IIIIIII'), + dict(weight=131, blob='JJJJJJJ'), + dict(weight=132, blob='KKKKKKK'), + dict(weight=3, blob='LLLLLLL'), + ]), + ] + + +def fake_zone_get_all(context): + return [ + dict(id=1, api_url='zone1', + username='admin', password='password', + weight_offset=0.0, weight_scale=1.0), + dict(id=2, api_url='zone2', + username='admin', password='password', + weight_offset=1000.0, weight_scale=1.0), + dict(id=3, api_url='zone3', + username='admin', password='password', + weight_offset=0.0, weight_scale=1000.0), + ] + + +class AbstractSchedulerTestCase(test.TestCase): + """Test case for Abstract Scheduler.""" + + def test_abstract_scheduler(self): + """ + Create a nested set of FakeZones, try to build multiple instances + and ensure that a select call returns the appropriate build plan. + """ + sched = FakeAbstractScheduler() + self.stubs.Set(sched, '_call_zone_method', fake_call_zone_method) + self.stubs.Set(nova.db, 'zone_get_all', fake_zone_get_all) + + zm = FakeZoneManager() + sched.set_zone_manager(zm) + + fake_context = {} + build_plan = sched.select(fake_context, + {'instance_type': {'memory_mb': 512}, + 'num_instances': 4}) + + # 4 from local zones, 12 from remotes + self.assertEqual(16, len(build_plan)) + + hostnames = [plan_item['hostname'] + for plan_item in build_plan if 'hostname' in plan_item] + # 4 local hosts + self.assertEqual(4, len(hostnames)) + + def test_adjust_child_weights(self): + """Make sure the weights returned by child zones are + properly adjusted based on the scale/offset in the zone + db entries. + """ + sched = FakeAbstractScheduler() + child_results = fake_call_zone_method(None, None, None, None) + zones = fake_zone_get_all(None) + sched._adjust_child_weights(child_results, zones) + scaled = [130000, 131000, 132000, 3000] + for zone, results in child_results: + for item in results: + w = item['weight'] + if zone == 'zone1': # No change + self.assertTrue(w < 1000.0) + if zone == 'zone2': # Offset +1000 + self.assertTrue(w >= 1000.0 and w < 2000) + if zone == 'zone3': # Scale x1000 + self.assertEqual(scaled.pop(0), w) + + def test_empty_abstract_scheduler(self): + """ + Ensure empty hosts & child_zones result in NoValidHosts exception. + """ + sched = FakeAbstractScheduler() + self.stubs.Set(sched, '_call_zone_method', fake_empty_call_zone_method) + self.stubs.Set(nova.db, 'zone_get_all', fake_zone_get_all) + + zm = FakeEmptyZoneManager() + sched.set_zone_manager(zm) + + fake_context = {} + self.assertRaises(driver.NoValidHost, sched.schedule_run_instance, + fake_context, 1, + dict(host_filter=None, instance_type={})) + + def test_schedule_do_not_schedule_with_hint(self): + """ + Check the local/child zone routing in the run_instance() call. + If the zone_blob hint was passed in, don't re-schedule. + """ + global was_called + sched = FakeAbstractScheduler() + was_called = False + self.stubs.Set(sched, '_provision_resource', fake_provision_resource) + request_spec = { + 'instance_properties': {}, + 'instance_type': {}, + 'filter_driver': 'nova.scheduler.host_filter.AllHostsFilter', + 'blob': "Non-None blob data", + } + + result = sched.schedule_run_instance(None, 1, request_spec) + self.assertEquals(None, result) + self.assertTrue(was_called) + + def test_provision_resource_local(self): + """Provision a resource locally or remotely.""" + global was_called + sched = FakeAbstractScheduler() + was_called = False + self.stubs.Set(sched, '_provision_resource_locally', + fake_provision_resource_locally) + + request_spec = {'hostname': "foo"} + sched._provision_resource(None, request_spec, 1, request_spec, {}) + self.assertTrue(was_called) + + def test_provision_resource_remote(self): + """Provision a resource locally or remotely.""" + global was_called + sched = FakeAbstractScheduler() + was_called = False + self.stubs.Set(sched, '_provision_resource_from_blob', + fake_provision_resource_from_blob) + + request_spec = {} + sched._provision_resource(None, request_spec, 1, request_spec, {}) + self.assertTrue(was_called) + + def test_provision_resource_from_blob_empty(self): + """Provision a resource locally or remotely given no hints.""" + global was_called + sched = FakeAbstractScheduler() + request_spec = {} + self.assertRaises(abstract_scheduler.InvalidBlob, + sched._provision_resource_from_blob, + None, {}, 1, {}, {}) + + def test_provision_resource_from_blob_with_local_blob(self): + """ + Provision a resource locally or remotely when blob hint passed in. + """ + global was_called + sched = FakeAbstractScheduler() + was_called = False + + def fake_create_db_entry_for_new_instance(self, context, + image, base_options, security_group, + block_device_mapping, num=1): + global was_called + was_called = True + # return fake instances + return {'id': 1, 'uuid': 'f874093c-7b17-49c0-89c3-22a5348497f9'} + + def fake_rpc_cast(*args, **kwargs): + pass + + self.stubs.Set(sched, '_decrypt_blob', + fake_decrypt_blob_returns_local_info) + self.stubs.Set(compute_api.API, + 'create_db_entry_for_new_instance', + fake_create_db_entry_for_new_instance) + self.stubs.Set(rpc, 'cast', fake_rpc_cast) + + build_plan_item = {'blob': "Non-None blob data"} + request_spec = {'image': {}, 'instance_properties': {}} + + sched._provision_resource_from_blob(None, build_plan_item, 1, + request_spec, {}) + self.assertTrue(was_called) + + def test_provision_resource_from_blob_with_child_blob(self): + """ + Provision a resource locally or remotely when child blob hint + passed in. + """ + global was_called + sched = FakeAbstractScheduler() + self.stubs.Set(sched, '_decrypt_blob', + fake_decrypt_blob_returns_child_info) + was_called = False + self.stubs.Set(sched, '_ask_child_zone_to_create_instance', + fake_ask_child_zone_to_create_instance) + + request_spec = {'blob': "Non-None blob data"} + + sched._provision_resource_from_blob(None, request_spec, 1, + request_spec, {}) + self.assertTrue(was_called) + + def test_provision_resource_from_blob_with_immediate_child_blob(self): + """ + Provision a resource locally or remotely when blob hint passed in + from an immediate child. + """ + global was_called + sched = FakeAbstractScheduler() + was_called = False + self.stubs.Set(sched, '_ask_child_zone_to_create_instance', + fake_ask_child_zone_to_create_instance) + + request_spec = {'child_blob': True, 'child_zone': True} + + sched._provision_resource_from_blob(None, request_spec, 1, + request_spec, {}) + self.assertTrue(was_called) + + def test_decrypt_blob(self): + """Test that the decrypt method works.""" + + fixture = FakeAbstractScheduler() + test_data = {"foo": "bar"} + + class StubDecryptor(object): + def decryptor(self, key): + return lambda blob: blob + + self.stubs.Set(abstract_scheduler, 'crypto', + StubDecryptor()) + + self.assertEqual(fixture._decrypt_blob(test_data), + json.dumps(test_data)) diff --git a/nova/tests/scheduler/test_least_cost_scheduler.py b/nova/tests/scheduler/test_least_cost_scheduler.py index fbe6b2f77..de7581d0a 100644 --- a/nova/tests/scheduler/test_least_cost_scheduler.py +++ b/nova/tests/scheduler/test_least_cost_scheduler.py @@ -18,7 +18,7 @@ Tests For Least Cost Scheduler from nova import test from nova.scheduler import least_cost -from nova.tests.scheduler import test_zone_aware_scheduler +from nova.tests.scheduler import test_abstract_scheduler MB = 1024 * 1024 @@ -70,7 +70,7 @@ class LeastCostSchedulerTestCase(test.TestCase): zone_manager = FakeZoneManager() - states = test_zone_aware_scheduler.fake_zone_manager_service_states( + states = test_abstract_scheduler.fake_zone_manager_service_states( num_hosts=10) zone_manager.service_states = states diff --git a/nova/tests/scheduler/test_zone_aware_scheduler.py b/nova/tests/scheduler/test_zone_aware_scheduler.py deleted file mode 100644 index 788efca52..000000000 --- a/nova/tests/scheduler/test_zone_aware_scheduler.py +++ /dev/null @@ -1,364 +0,0 @@ -# Copyright 2011 OpenStack LLC. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -""" -Tests For Zone Aware Scheduler. -""" - -import json - -import nova.db - -from nova import exception -from nova import rpc -from nova import test -from nova.compute import api as compute_api -from nova.scheduler import driver -from nova.scheduler import zone_aware_scheduler -from nova.scheduler import zone_manager - - -def _host_caps(multiplier): - # Returns host capabilities in the following way: - # host1 = memory:free 10 (100max) - # disk:available 100 (1000max) - # hostN = memory:free 10 + 10N - # disk:available 100 + 100N - # in other words: hostN has more resources than host0 - # which means ... don't go above 10 hosts. - return {'host_name-description': 'XenServer %s' % multiplier, - 'host_hostname': 'xs-%s' % multiplier, - 'host_memory_total': 100, - 'host_memory_overhead': 10, - 'host_memory_free': 10 + multiplier * 10, - 'host_memory_free-computed': 10 + multiplier * 10, - 'host_other-config': {}, - 'host_ip_address': '192.168.1.%d' % (100 + multiplier), - 'host_cpu_info': {}, - 'disk_available': 100 + multiplier * 100, - 'disk_total': 1000, - 'disk_used': 0, - 'host_uuid': 'xxx-%d' % multiplier, - 'host_name-label': 'xs-%s' % multiplier} - - -def fake_zone_manager_service_states(num_hosts): - states = {} - for x in xrange(num_hosts): - states['host%02d' % (x + 1)] = {'compute': _host_caps(x)} - return states - - -class FakeZoneAwareScheduler(zone_aware_scheduler.ZoneAwareScheduler): - # No need to stub anything at the moment - pass - - -class FakeZoneManager(zone_manager.ZoneManager): - def __init__(self): - self.service_states = { - 'host1': { - 'compute': {'host_memory_free': 1073741824}, - }, - 'host2': { - 'compute': {'host_memory_free': 2147483648}, - }, - 'host3': { - 'compute': {'host_memory_free': 3221225472}, - }, - } - - -class FakeEmptyZoneManager(zone_manager.ZoneManager): - def __init__(self): - self.service_states = {} - - -def fake_empty_call_zone_method(context, method, specs, zones): - return [] - - -# Hmm, I should probably be using mox for this. -was_called = False - - -def fake_provision_resource(context, item, instance_id, request_spec, kwargs): - global was_called - was_called = True - - -def fake_ask_child_zone_to_create_instance(context, zone_info, - request_spec, kwargs): - global was_called - was_called = True - - -def fake_provision_resource_locally(context, build_plan, request_spec, kwargs): - global was_called - was_called = True - - -def fake_provision_resource_from_blob(context, item, instance_id, - request_spec, kwargs): - global was_called - was_called = True - - -def fake_decrypt_blob_returns_local_info(blob): - return {'hostname': 'foooooo'} # values aren't important. - - -def fake_decrypt_blob_returns_child_info(blob): - return {'child_zone': True, - 'child_blob': True} # values aren't important. Keys are. - - -def fake_call_zone_method(context, method, specs, zones): - return [ - (1, [ - dict(weight=1, blob='AAAAAAA'), - dict(weight=111, blob='BBBBBBB'), - dict(weight=112, blob='CCCCCCC'), - dict(weight=113, blob='DDDDDDD'), - ]), - (2, [ - dict(weight=120, blob='EEEEEEE'), - dict(weight=2, blob='FFFFFFF'), - dict(weight=122, blob='GGGGGGG'), - dict(weight=123, blob='HHHHHHH'), - ]), - (3, [ - dict(weight=130, blob='IIIIIII'), - dict(weight=131, blob='JJJJJJJ'), - dict(weight=132, blob='KKKKKKK'), - dict(weight=3, blob='LLLLLLL'), - ]), - ] - - -def fake_zone_get_all(context): - return [ - dict(id=1, api_url='zone1', - username='admin', password='password', - weight_offset=0.0, weight_scale=1.0), - dict(id=2, api_url='zone2', - username='admin', password='password', - weight_offset=1000.0, weight_scale=1.0), - dict(id=3, api_url='zone3', - username='admin', password='password', - weight_offset=0.0, weight_scale=1000.0), - ] - - -class ZoneAwareSchedulerTestCase(test.TestCase): - """Test case for Zone Aware Scheduler.""" - - def test_zone_aware_scheduler(self): - """ - Create a nested set of FakeZones, try to build multiple instances - and ensure that a select call returns the appropriate build plan. - """ - sched = FakeZoneAwareScheduler() - self.stubs.Set(sched, '_call_zone_method', fake_call_zone_method) - self.stubs.Set(nova.db, 'zone_get_all', fake_zone_get_all) - - zm = FakeZoneManager() - sched.set_zone_manager(zm) - - fake_context = {} - build_plan = sched.select(fake_context, - {'instance_type': {'memory_mb': 512}, - 'num_instances': 4}) - - # 4 from local zones, 12 from remotes - self.assertEqual(16, len(build_plan)) - - hostnames = [plan_item['hostname'] - for plan_item in build_plan if 'hostname' in plan_item] - # 4 local hosts - self.assertEqual(4, len(hostnames)) - - def test_adjust_child_weights(self): - """Make sure the weights returned by child zones are - properly adjusted based on the scale/offset in the zone - db entries. - """ - sched = FakeZoneAwareScheduler() - child_results = fake_call_zone_method(None, None, None, None) - zones = fake_zone_get_all(None) - sched._adjust_child_weights(child_results, zones) - scaled = [130000, 131000, 132000, 3000] - for zone, results in child_results: - for item in results: - w = item['weight'] - if zone == 'zone1': # No change - self.assertTrue(w < 1000.0) - if zone == 'zone2': # Offset +1000 - self.assertTrue(w >= 1000.0 and w < 2000) - if zone == 'zone3': # Scale x1000 - self.assertEqual(scaled.pop(0), w) - - def test_empty_zone_aware_scheduler(self): - """ - Ensure empty hosts & child_zones result in NoValidHosts exception. - """ - sched = FakeZoneAwareScheduler() - self.stubs.Set(sched, '_call_zone_method', fake_empty_call_zone_method) - self.stubs.Set(nova.db, 'zone_get_all', fake_zone_get_all) - - zm = FakeEmptyZoneManager() - sched.set_zone_manager(zm) - - fake_context = {} - self.assertRaises(driver.NoValidHost, sched.schedule_run_instance, - fake_context, 1, - dict(host_filter=None, instance_type={})) - - def test_schedule_do_not_schedule_with_hint(self): - """ - Check the local/child zone routing in the run_instance() call. - If the zone_blob hint was passed in, don't re-schedule. - """ - global was_called - sched = FakeZoneAwareScheduler() - was_called = False - self.stubs.Set(sched, '_provision_resource', fake_provision_resource) - request_spec = { - 'instance_properties': {}, - 'instance_type': {}, - 'filter_driver': 'nova.scheduler.host_filter.AllHostsFilter', - 'blob': "Non-None blob data", - } - - result = sched.schedule_run_instance(None, 1, request_spec) - self.assertEquals(None, result) - self.assertTrue(was_called) - - def test_provision_resource_local(self): - """Provision a resource locally or remotely.""" - global was_called - sched = FakeZoneAwareScheduler() - was_called = False - self.stubs.Set(sched, '_provision_resource_locally', - fake_provision_resource_locally) - - request_spec = {'hostname': "foo"} - sched._provision_resource(None, request_spec, 1, request_spec, {}) - self.assertTrue(was_called) - - def test_provision_resource_remote(self): - """Provision a resource locally or remotely.""" - global was_called - sched = FakeZoneAwareScheduler() - was_called = False - self.stubs.Set(sched, '_provision_resource_from_blob', - fake_provision_resource_from_blob) - - request_spec = {} - sched._provision_resource(None, request_spec, 1, request_spec, {}) - self.assertTrue(was_called) - - def test_provision_resource_from_blob_empty(self): - """Provision a resource locally or remotely given no hints.""" - global was_called - sched = FakeZoneAwareScheduler() - request_spec = {} - self.assertRaises(zone_aware_scheduler.InvalidBlob, - sched._provision_resource_from_blob, - None, {}, 1, {}, {}) - - def test_provision_resource_from_blob_with_local_blob(self): - """ - Provision a resource locally or remotely when blob hint passed in. - """ - global was_called - sched = FakeZoneAwareScheduler() - was_called = False - - def fake_create_db_entry_for_new_instance(self, context, - image, base_options, security_group, - block_device_mapping, num=1): - global was_called - was_called = True - # return fake instances - return {'id': 1, 'uuid': 'f874093c-7b17-49c0-89c3-22a5348497f9'} - - def fake_rpc_cast(*args, **kwargs): - pass - - self.stubs.Set(sched, '_decrypt_blob', - fake_decrypt_blob_returns_local_info) - self.stubs.Set(compute_api.API, - 'create_db_entry_for_new_instance', - fake_create_db_entry_for_new_instance) - self.stubs.Set(rpc, 'cast', fake_rpc_cast) - - build_plan_item = {'blob': "Non-None blob data"} - request_spec = {'image': {}, 'instance_properties': {}} - - sched._provision_resource_from_blob(None, build_plan_item, 1, - request_spec, {}) - self.assertTrue(was_called) - - def test_provision_resource_from_blob_with_child_blob(self): - """ - Provision a resource locally or remotely when child blob hint - passed in. - """ - global was_called - sched = FakeZoneAwareScheduler() - self.stubs.Set(sched, '_decrypt_blob', - fake_decrypt_blob_returns_child_info) - was_called = False - self.stubs.Set(sched, '_ask_child_zone_to_create_instance', - fake_ask_child_zone_to_create_instance) - - request_spec = {'blob': "Non-None blob data"} - - sched._provision_resource_from_blob(None, request_spec, 1, - request_spec, {}) - self.assertTrue(was_called) - - def test_provision_resource_from_blob_with_immediate_child_blob(self): - """ - Provision a resource locally or remotely when blob hint passed in - from an immediate child. - """ - global was_called - sched = FakeZoneAwareScheduler() - was_called = False - self.stubs.Set(sched, '_ask_child_zone_to_create_instance', - fake_ask_child_zone_to_create_instance) - - request_spec = {'child_blob': True, 'child_zone': True} - - sched._provision_resource_from_blob(None, request_spec, 1, - request_spec, {}) - self.assertTrue(was_called) - - def test_decrypt_blob(self): - """Test that the decrypt method works.""" - - fixture = FakeZoneAwareScheduler() - test_data = {"foo": "bar"} - - class StubDecryptor(object): - def decryptor(self, key): - return lambda blob: blob - - self.stubs.Set(zone_aware_scheduler, 'crypto', - StubDecryptor()) - - self.assertEqual(fixture._decrypt_blob(test_data), - json.dumps(test_data)) -- cgit From b29bc97d5a69abe71dea5b9ff9dcfc65fcd59cc9 Mon Sep 17 00:00:00 2001 From: Josh Kearney Date: Thu, 11 Aug 2011 17:10:25 -0500 Subject: Check compressed image size and PEP8 cleanup. --- nova/compute/manager.py | 40 +++++++++++++++++++++++ nova/exception.py | 4 +++ nova/tests/api/openstack/contrib/test_keypairs.py | 17 ++++++++-- 3 files changed, 59 insertions(+), 2 deletions(-) diff --git a/nova/compute/manager.py b/nova/compute/manager.py index d38213083..1c3485342 100644 --- a/nova/compute/manager.py +++ b/nova/compute/manager.py @@ -321,10 +321,50 @@ class ComputeManager(manager.SchedulerDependentManager): def _run_instance(self, context, instance_id, **kwargs): """Launch a new instance with specified options.""" + def _check_image_size(): + """Ensure image is smaller than the maximum size allowed by the + instance_type. + + The image stored in Glance is potentially compressed, so we use two + checks to ensure that the size isn't exceeded: + + 1) This one - checks compressed size, this a quick check to + eliminate any images which are obviously too large + + 2) Check uncompressed size in nova.virt.xenapi.vm_utils. This + is a slower check since it requires uncompressing the entire + image, but is accurate because it reflects the image's + actual size. + """ + image_href = instance['image_ref'] + image_service, image_id = nova.image.get_image_service(image_href) + image_meta = image_service.show(context, image_id) + size_bytes = image_meta['size'] + + instance_type_id = instance['instance_type_id'] + instance_type = self.db.instance_type_get(context, + instance_type_id) + allowed_size_gb = instance_type['local_gb'] + allowed_size_bytes = allowed_size_gb * 1024 * 1024 * 1024 + + LOG.debug(_("image_id=%(image_id)d, image_size_bytes=" + "%(size_bytes)d, allowed_size_bytes=" + "%(allowed_size_bytes)d") % locals()) + + if size_bytes > allowed_size_bytes: + LOG.info(_("Image '%(image_id)d' size %(size_bytes)d exceeded" + " instance_type allowed size " + "%(allowed_size_bytes)d") + % locals()) + raise exception.ImageTooLarge() + context = context.elevated() instance = self.db.instance_get(context, instance_id) if instance['name'] in self.driver.list_instances(): raise exception.Error(_("Instance has already been created")) + + _check_image_size() + LOG.audit(_("instance %s: starting..."), instance_id, context=context) updates = {} diff --git a/nova/exception.py b/nova/exception.py index 0d60cb0bf..77ebf3c88 100644 --- a/nova/exception.py +++ b/nova/exception.py @@ -717,3 +717,7 @@ class CannotResizeToSameSize(NovaException): class CannotResizeToSmallerSize(NovaException): message = _("Resizing to a smaller size is not supported.") + + +class ImageTooLarge(NovaException): + message = _("Image is larger than instance type allows") diff --git a/nova/tests/api/openstack/contrib/test_keypairs.py b/nova/tests/api/openstack/contrib/test_keypairs.py index c9dc34d65..cbc815b1a 100644 --- a/nova/tests/api/openstack/contrib/test_keypairs.py +++ b/nova/tests/api/openstack/contrib/test_keypairs.py @@ -28,6 +28,7 @@ def fake_keypair(name): 'fingerprint': 'FAKE_FINGERPRINT', 'name': name} + def db_key_pair_get_all_by_user(self, user_id): return [fake_keypair('FAKE')] @@ -78,7 +79,20 @@ class KeypairsTest(test.TestCase): def test_keypair_import(self): body = {'keypair': {'name': 'create_test', - 'public_key': 'ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDBYIznAx9D7118Q1VKGpXy2HDiKyUTM8XcUuhQpo0srqb9rboUp4a9NmCwpWpeElDLuva707GOUnfaBAvHBwsRXyxHJjRaI6YQj2oLJwqvaSaWUbyT1vtryRqy6J3TecN0WINY71f4uymiMZP0wby4bKBcYnac8KiCIlvkEl0ETjkOGUq8OyWRmn7ljj5SESEUdBP0JnuTFKddWTU/wD6wydeJaUhBTqOlHn0kX1GyqoNTE1UEhcM5ZRWgfUZfTjVyDF2kGj3vJLCJtJ8LoGcj7YaN4uPg1rBle+izwE/tLonRrds+cev8p6krSSrxWOwBbHkXa6OciiJDvkRzJXzf'}} + 'public_key': 'ssh-rsa ' + 'AAAAB3NzaC1yc2EAAAADAQABAAABAQDBY' + 'IznAx9D7118Q1VKGpXy2HDiKyUTM8XcUu' + 'hQpo0srqb9rboUp4a9NmCwpWpeElDLuva' + '707GOUnfaBAvHBwsRXyxHJjRaI6YQj2oL' + 'JwqvaSaWUbyT1vtryRqy6J3TecN0WINY7' + '1f4uymiMZP0wby4bKBcYnac8KiCIlvkEl' + '0ETjkOGUq8OyWRmn7ljj5SESEUdBP0Jnu' + 'TFKddWTU/wD6wydeJaUhBTqOlHn0kX1Gy' + 'qoNTE1UEhcM5ZRWgfUZfTjVyDF2kGj3vJ' + 'LCJtJ8LoGcj7YaN4uPg1rBle+izwE/tLo' + 'nRrds+cev8p6krSSrxWOwBbHkXa6OciiJ' + 'DvkRzJXzf'}} + req = webob.Request.blank('/v1.1/os-keypairs') req.method = 'POST' req.body = json.dumps(body) @@ -96,4 +110,3 @@ class KeypairsTest(test.TestCase): req.headers['Content-Type'] = 'application/json' res = req.get_response(fakes.wsgi_app()) self.assertEqual(res.status_int, 202) - -- cgit From 0bc781425bea1162cd81bdc95f49d50068857057 Mon Sep 17 00:00:00 2001 From: Ed Leafe Date: Fri, 12 Aug 2011 10:01:04 -0500 Subject: start of day --- nova/scheduler/abstract_scheduler.py | 180 +++++----------- nova/scheduler/base_scheduler.py | 403 +++++++++++++++++++++++++++++++++++ 2 files changed, 459 insertions(+), 124 deletions(-) create mode 100644 nova/scheduler/base_scheduler.py diff --git a/nova/scheduler/abstract_scheduler.py b/nova/scheduler/abstract_scheduler.py index eb924732a..a6457cc50 100644 --- a/nova/scheduler/abstract_scheduler.py +++ b/nova/scheduler/abstract_scheduler.py @@ -14,10 +14,10 @@ # under the License. """ -The AbsractScheduler is a base class Scheduler for creating instances -across zones. There are two expansion points to this class for: -1. Assigning Weights to hosts for requested instances -2. Filtering Hosts based on required instance capabilities +The AbsractScheduler is an abstract class Scheduler for creating instances +locally or across zones. Two methods should be overridden in order to +customize the behavior: filter_hosts() and weigh_hosts(). The default +behavior is to simply select all hosts and weight them the same. """ import operator @@ -185,13 +185,11 @@ class AbstractScheduler(driver.Scheduler): for zone_id, result in child_results: if not result: continue - assert isinstance(zone_id, int) for zone_rec in zones: if zone_rec['id'] != zone_id: continue - for item in result: try: offset = zone_rec['weight_offset'] @@ -202,10 +200,10 @@ class AbstractScheduler(driver.Scheduler): item['raw_weight'] = raw_weight except KeyError: LOG.exception(_("Bad child zone scaling values " - "for Zone: %(zone_id)s") % locals()) + "for Zone: %(zone_id)s") % locals()) def schedule_run_instance(self, context, instance_id, request_spec, - *args, **kwargs): + *args, **kwargs): """This method is called from nova.compute.api to provision an instance. However we need to look at the parameters being passed in to see if this is a request to: @@ -214,13 +212,11 @@ class AbstractScheduler(driver.Scheduler): to simply create the instance (either in this zone or a child zone). """ - # TODO(sandy): We'll have to look for richer specs at some point. - blob = request_spec.get('blob') if blob: self._provision_resource(context, request_spec, instance_id, - request_spec, kwargs) + request_spec, kwargs) return None num_instances = request_spec.get('num_instances', 1) @@ -238,7 +234,7 @@ class AbstractScheduler(driver.Scheduler): build_plan_item = build_plan.pop(0) self._provision_resource(context, build_plan_item, instance_id, - request_spec, kwargs) + request_spec, kwargs) # Returning None short-circuits the routing to Compute (since # we've already done it here) @@ -251,58 +247,49 @@ class AbstractScheduler(driver.Scheduler): anything about the children. """ return self._schedule(context, "compute", request_spec, - *args, **kwargs) + *args, **kwargs) - # TODO(sandy): We're only focused on compute instances right now, - # so we don't implement the default "schedule()" method required - # of Schedulers. def schedule(self, context, topic, request_spec, *args, **kwargs): """The schedule() contract requires we return the one best-suited host for this request. """ - raise driver.NoValidHost(_('No hosts were available')) + # TODO(sandy): We're only focused on compute instances right now, + # so we don't implement the default "schedule()" method required + # of Schedulers. + msg = _("No host selection for %s defined." % topic) + raise driver.NoValidHost(msg) def _schedule(self, context, topic, request_spec, *args, **kwargs): """Returns a list of hosts that meet the required specs, ordered by their fitness. """ - if topic != "compute": - raise NotImplementedError(_("Scheduler only understands" - " Compute nodes (for now)")) - - num_instances = request_spec.get('num_instances', 1) - instance_type = request_spec['instance_type'] - - weighted = [] - host_list = None - - for i in xrange(num_instances): - # Filter local hosts based on requirements ... - # - # The first pass through here will pass 'None' as the - # host_list.. which tells the filter to build the full - # list of hosts. - # On a 2nd pass, the filter can modify the host_list with - # any updates it needs to make based on resources that - # may have been consumed from a previous build.. - host_list = self.filter_hosts(topic, request_spec, host_list) - if not host_list: - LOG.warn(_("Filter returned no hosts after processing " - "%(i)d of %(num_instances)d instances") % locals()) - break - - # then weigh the selected hosts. - # weighted = [{weight=weight, hostname=hostname, - # capabilities=capabs}, ...] - weights = self.weigh_hosts(topic, request_spec, host_list) - weights.sort(key=operator.itemgetter('weight')) - best_weight = weights[0] - weighted.append(best_weight) - self.consume_resources(topic, best_weight['capabilities'], - instance_type) - - # Next, tack on the best weights from the child zones ... + msg = _("Scheduler only understands Compute nodes (for now)") + raise NotImplementedError(msg) + + # Get all available hosts. + all_hosts = self.zone_manager.service_states.iteritems() + print "-"*88 + ss = self.zone_manager.service_states + print ss + print "KEYS", ss.keys() + print "-"*88 + + unfiltered_hosts = [(host, services[host]) + for host, services in all_hosts + if topic in services[host]] + + # Filter local hosts based on requirements ... + filtered_hosts = self.filter_hosts(topic, request_spec, host_list) + if not filtered_hosts: + LOG.warn(_("No hosts available")) + return [] + + # weigh the selected hosts. + # weighted_hosts = [{weight=weight, hostname=hostname, + # capabilities=capabs}, ...] + weighted_hosts = self.weigh_hosts(topic, request_spec, filtered_hosts) + # Next, tack on the host weights from the child zones json_spec = json.dumps(request_spec) all_zones = db.zone_get_all(context) child_results = self._call_zone_method(context, "select", @@ -314,14 +301,13 @@ class AbstractScheduler(driver.Scheduler): # it later if needed. This implicitly builds a zone # path structure. host_dict = {"weight": weighting["weight"], - "child_zone": child_zone, - "child_blob": weighting["blob"]} - weighted.append(host_dict) - - weighted.sort(key=operator.itemgetter('weight')) - return weighted + "child_zone": child_zone, + "child_blob": weighting["blob"]} + weighted_hosts.append(host_dict) + weighted_hosts.sort(key=operator.itemgetter('weight')) + return weighted_hosts - def compute_filter(self, hostname, capabilities, request_spec): + def basic_ram_filter(self, hostname, capabilities, request_spec): """Return whether or not we can schedule to this compute node. Derived classes should override this and return True if the host is acceptable for scheduling. @@ -330,74 +316,20 @@ class AbstractScheduler(driver.Scheduler): requested_mem = instance_type['memory_mb'] * 1024 * 1024 return capabilities['host_memory_free'] >= requested_mem - def hold_filter_hosts(self, topic, request_spec, hosts=None): - """Filter the full host list (from the ZoneManager)""" - # NOTE(dabo): The logic used by the current _schedule() method - # is incorrect. Since this task is just to refactor the classes, - # I'm not fixing the logic now - that will be the next task. - # So for now this method is just renamed; afterwards this will - # become the filter_hosts() method, and the one below will - # be removed. - filter_name = request_spec.get('filter', None) - # Make sure that the requested filter is legitimate. - selected_filter = host_filter.choose_host_filter(filter_name) - - # TODO(sandy): We're only using InstanceType-based specs - # currently. Later we'll need to snoop for more detailed - # host filter requests. - instance_type = request_spec['instance_type'] - name, query = selected_filter.instance_type_to_filter(instance_type) - return selected_filter.filter_hosts(self.zone_manager, query) - def filter_hosts(self, topic, request_spec, host_list=None): - """Return a list of hosts which are acceptable for scheduling. - Return value should be a list of (hostname, capability_dict)s. - Derived classes may override this, but may find the - '_filter' function more appropriate. + """Filter the full host list returned from the ZoneManager. By default, + this method only applies the basic_ram_filter(), meaning all hosts + with at least enough RAM for the requested instance are returned. + + Override in subclasses to provide greater selectivity. """ - def _default_filter(self, hostname, capabilities, request_spec): - """Default filter function if there's no _filter""" - # NOTE(sirp): The default logic is the equivalent to - # AllHostsFilter - return True - - filter_func = getattr(self, '%s_filter' % topic, _default_filter) - - if host_list is None: - first_run = True - host_list = self.zone_manager.service_states.iteritems() - else: - first_run = False - - filtered_hosts = [] - for host, services in host_list: - if first_run: - if topic not in services: - continue - services = services[topic] - if filter_func(host, services, request_spec): - filtered_hosts.append((host, services)) - return filtered_hosts + return [(host, services) for host, services in host_list + if basic_ram_filter(host, services, request_spec)] def weigh_hosts(self, topic, request_spec, hosts): - """Derived classes may override this to provide more sophisticated - scheduling objectives + """This version assigns a weight of 1 to all hosts, making selection + of any host basically a random event. Override this method in your + subclass to add logic to prefer one potential host over another. """ - # NOTE(sirp): The default logic is the same as the NoopCostFunction return [dict(weight=1, hostname=hostname, capabilities=capabilities) for hostname, capabilities in hosts] - - def compute_consume(self, capabilities, instance_type): - """Consume compute resources for selected host""" - - requested_mem = max(instance_type['memory_mb'], 0) * 1024 * 1024 - capabilities['host_memory_free'] -= requested_mem - - def consume_resources(self, topic, capabilities, instance_type): - """Consume resources for a specific host. 'host' is a tuple - of the hostname and the services""" - - consume_func = getattr(self, '%s_consume' % topic, None) - if not consume_func: - return - consume_func(capabilities, instance_type) diff --git a/nova/scheduler/base_scheduler.py b/nova/scheduler/base_scheduler.py new file mode 100644 index 000000000..43a6ab2b1 --- /dev/null +++ b/nova/scheduler/base_scheduler.py @@ -0,0 +1,403 @@ +# Copyright (c) 2011 Openstack, LLC. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +The BaseScheduler is the base class Scheduler for creating instances +across zones. There are two expansion points to this class for: +1. Assigning Weights to hosts for requested instances +2. Filtering Hosts based on required instance capabilities +""" + +import operator +import json + +import M2Crypto + +from novaclient import v1_1 as novaclient +from novaclient import exceptions as novaclient_exceptions + +from nova import crypto +from nova import db +from nova import exception +from nova import flags +from nova import log as logging +from nova import rpc + +from nova.compute import api as compute_api +from nova.scheduler import api +from nova.scheduler import driver + +FLAGS = flags.FLAGS +LOG = logging.getLogger('nova.scheduler.abstract_scheduler') + + +class InvalidBlob(exception.NovaException): + message = _("Ill-formed or incorrectly routed 'blob' data sent " + "to instance create request.") + + +class AbstractScheduler(driver.Scheduler): + """Base class for creating Schedulers that can work across any nova + deployment, from simple designs to multiply-nested zones. + """ + + def _call_zone_method(self, context, method, specs, zones): + """Call novaclient zone method. Broken out for testing.""" + return api.call_zone_method(context, method, specs=specs, zones=zones) + + def _provision_resource_locally(self, context, build_plan_item, + request_spec, kwargs): + """Create the requested resource in this Zone.""" + host = build_plan_item['hostname'] + base_options = request_spec['instance_properties'] + image = request_spec['image'] + + # TODO(sandy): I guess someone needs to add block_device_mapping + # support at some point? Also, OS API has no concept of security + # groups. + instance = compute_api.API().create_db_entry_for_new_instance(context, + image, base_options, None, []) + + instance_id = instance['id'] + kwargs['instance_id'] = instance_id + + rpc.cast(context, + db.queue_get_for(context, "compute", host), + {"method": "run_instance", + "args": kwargs}) + LOG.debug(_("Provisioning locally via compute node %(host)s") + % locals()) + + def _decrypt_blob(self, blob): + """Returns the decrypted blob or None if invalid. Broken out + for testing.""" + decryptor = crypto.decryptor(FLAGS.build_plan_encryption_key) + try: + json_entry = decryptor(blob) + return json.dumps(json_entry) + except M2Crypto.EVP.EVPError: + pass + return None + + def _ask_child_zone_to_create_instance(self, context, zone_info, + request_spec, kwargs): + """Once we have determined that the request should go to one + of our children, we need to fabricate a new POST /servers/ + call with the same parameters that were passed into us. + + Note that we have to reverse engineer from our args to get back the + image, flavor, ipgroup, etc. since the original call could have + come in from EC2 (which doesn't use these things).""" + + instance_type = request_spec['instance_type'] + instance_properties = request_spec['instance_properties'] + + name = instance_properties['display_name'] + image_ref = instance_properties['image_ref'] + meta = instance_properties['metadata'] + flavor_id = instance_type['flavorid'] + reservation_id = instance_properties['reservation_id'] + + files = kwargs['injected_files'] + ipgroup = None # Not supported in OS API ... yet + + child_zone = zone_info['child_zone'] + child_blob = zone_info['child_blob'] + zone = db.zone_get(context, child_zone) + url = zone.api_url + LOG.debug(_("Forwarding instance create call to child zone %(url)s" + ". ReservationID=%(reservation_id)s") + % locals()) + nova = None + try: + nova = novaclient.Client(zone.username, zone.password, None, url) + nova.authenticate() + except novaclient_exceptions.BadRequest, e: + raise exception.NotAuthorized(_("Bad credentials attempting " + "to talk to zone at %(url)s.") % locals()) + + nova.servers.create(name, image_ref, flavor_id, ipgroup, meta, files, + child_blob, reservation_id=reservation_id) + + def _provision_resource_from_blob(self, context, build_plan_item, + instance_id, request_spec, kwargs): + """Create the requested resource locally or in a child zone + based on what is stored in the zone blob info. + + Attempt to decrypt the blob to see if this request is: + 1. valid, and + 2. intended for this zone or a child zone. + + Note: If we have "blob" that means the request was passed + into us from a parent zone. If we have "child_blob" that + means we gathered the info from one of our children. + It's possible that, when we decrypt the 'blob' field, it + contains "child_blob" data. In which case we forward the + request.""" + + host_info = None + if "blob" in build_plan_item: + # Request was passed in from above. Is it for us? + host_info = self._decrypt_blob(build_plan_item['blob']) + elif "child_blob" in build_plan_item: + # Our immediate child zone provided this info ... + host_info = build_plan_item + + if not host_info: + raise InvalidBlob() + + # Valid data ... is it for us? + if 'child_zone' in host_info and 'child_blob' in host_info: + self._ask_child_zone_to_create_instance(context, host_info, + request_spec, kwargs) + else: + self._provision_resource_locally(context, host_info, request_spec, + kwargs) + + def _provision_resource(self, context, build_plan_item, instance_id, + request_spec, kwargs): + """Create the requested resource in this Zone or a child zone.""" + if "hostname" in build_plan_item: + self._provision_resource_locally(context, build_plan_item, + request_spec, kwargs) + return + + self._provision_resource_from_blob(context, build_plan_item, + instance_id, request_spec, kwargs) + + def _adjust_child_weights(self, child_results, zones): + """Apply the Scale and Offset values from the Zone definition + to adjust the weights returned from the child zones. Alters + child_results in place. + """ + for zone_id, result in child_results: + if not result: + continue + + assert isinstance(zone_id, int) + + for zone_rec in zones: + if zone_rec['id'] != zone_id: + continue + + for item in result: + try: + offset = zone_rec['weight_offset'] + scale = zone_rec['weight_scale'] + raw_weight = item['weight'] + cooked_weight = offset + scale * raw_weight + item['weight'] = cooked_weight + item['raw_weight'] = raw_weight + except KeyError: + LOG.exception(_("Bad child zone scaling values " + "for Zone: %(zone_id)s") % locals()) + + def schedule_run_instance(self, context, instance_id, request_spec, + *args, **kwargs): + """This method is called from nova.compute.api to provision + an instance. However we need to look at the parameters being + passed in to see if this is a request to: + 1. Create a Build Plan and then provision, or + 2. Use the Build Plan information in the request parameters + to simply create the instance (either in this zone or + a child zone). + """ + + # TODO(sandy): We'll have to look for richer specs at some point. + + blob = request_spec.get('blob') + if blob: + self._provision_resource(context, request_spec, instance_id, + request_spec, kwargs) + return None + + num_instances = request_spec.get('num_instances', 1) + LOG.debug(_("Attempting to build %(num_instances)d instance(s)") % + locals()) + + # Create build plan and provision ... + build_plan = self.select(context, request_spec) + if not build_plan: + raise driver.NoValidHost(_('No hosts were available')) + + for num in xrange(num_instances): + if not build_plan: + break + + build_plan_item = build_plan.pop(0) + self._provision_resource(context, build_plan_item, instance_id, + request_spec, kwargs) + + # Returning None short-circuits the routing to Compute (since + # we've already done it here) + return None + + def select(self, context, request_spec, *args, **kwargs): + """Select returns a list of weights and zone/host information + corresponding to the best hosts to service the request. Any + child zone information has been encrypted so as not to reveal + anything about the children. + """ + return self._schedule(context, "compute", request_spec, + *args, **kwargs) + + # TODO(sandy): We're only focused on compute instances right now, + # so we don't implement the default "schedule()" method required + # of Schedulers. + def schedule(self, context, topic, request_spec, *args, **kwargs): + """The schedule() contract requires we return the one + best-suited host for this request. + """ + raise driver.NoValidHost(_('No hosts were available')) + + def _schedule(self, context, topic, request_spec, *args, **kwargs): + """Returns a list of hosts that meet the required specs, + ordered by their fitness. + """ + + if topic != "compute": + raise NotImplementedError(_("Scheduler only understands" + " Compute nodes (for now)")) + + num_instances = request_spec.get('num_instances', 1) + instance_type = request_spec['instance_type'] + + weighted = [] + host_list = None + + for i in xrange(num_instances): + # Filter local hosts based on requirements ... + # + # The first pass through here will pass 'None' as the + # host_list.. which tells the filter to build the full + # list of hosts. + # On a 2nd pass, the filter can modify the host_list with + # any updates it needs to make based on resources that + # may have been consumed from a previous build.. + host_list = self.filter_hosts(topic, request_spec, host_list) + if not host_list: + LOG.warn(_("Filter returned no hosts after processing " + "%(i)d of %(num_instances)d instances") % locals()) + break + + # then weigh the selected hosts. + # weighted = [{weight=weight, hostname=hostname, + # capabilities=capabs}, ...] + weights = self.weigh_hosts(topic, request_spec, host_list) + weights.sort(key=operator.itemgetter('weight')) + best_weight = weights[0] + weighted.append(best_weight) + self.consume_resources(topic, best_weight['capabilities'], + instance_type) + + # Next, tack on the best weights from the child zones ... + json_spec = json.dumps(request_spec) + all_zones = db.zone_get_all(context) + child_results = self._call_zone_method(context, "select", + specs=json_spec, zones=all_zones) + self._adjust_child_weights(child_results, all_zones) + for child_zone, result in child_results: + for weighting in result: + # Remember the child_zone so we can get back to + # it later if needed. This implicitly builds a zone + # path structure. + host_dict = {"weight": weighting["weight"], + "child_zone": child_zone, + "child_blob": weighting["blob"]} + weighted.append(host_dict) + + weighted.sort(key=operator.itemgetter('weight')) + return weighted + + def compute_filter(self, hostname, capabilities, request_spec): + """Return whether or not we can schedule to this compute node. + Derived classes should override this and return True if the host + is acceptable for scheduling. + """ + instance_type = request_spec['instance_type'] + requested_mem = instance_type['memory_mb'] * 1024 * 1024 + return capabilities['host_memory_free'] >= requested_mem + + def hold_filter_hosts(self, topic, request_spec, hosts=None): + """Filter the full host list (from the ZoneManager)""" + # NOTE(dabo): The logic used by the current _schedule() method + # is incorrect. Since this task is just to refactor the classes, + # I'm not fixing the logic now - that will be the next task. + # So for now this method is just renamed; afterwards this will + # become the filter_hosts() method, and the one below will + # be removed. + filter_name = request_spec.get('filter', None) + # Make sure that the requested filter is legitimate. + selected_filter = host_filter.choose_host_filter(filter_name) + + # TODO(sandy): We're only using InstanceType-based specs + # currently. Later we'll need to snoop for more detailed + # host filter requests. + instance_type = request_spec['instance_type'] + name, query = selected_filter.instance_type_to_filter(instance_type) + return selected_filter.filter_hosts(self.zone_manager, query) + + def filter_hosts(self, topic, request_spec, host_list=None): + """Return a list of hosts which are acceptable for scheduling. + Return value should be a list of (hostname, capability_dict)s. + Derived classes may override this, but may find the + '_filter' function more appropriate. + """ + def _default_filter(self, hostname, capabilities, request_spec): + """Default filter function if there's no _filter""" + # NOTE(sirp): The default logic is the equivalent to + # AllHostsFilter + return True + + filter_func = getattr(self, '%s_filter' % topic, _default_filter) + + if host_list is None: + first_run = True + host_list = self.zone_manager.service_states.iteritems() + else: + first_run = False + + filtered_hosts = [] + for host, services in host_list: + if first_run: + if topic not in services: + continue + services = services[topic] + if filter_func(host, services, request_spec): + filtered_hosts.append((host, services)) + return filtered_hosts + + def weigh_hosts(self, topic, request_spec, hosts): + """Derived classes may override this to provide more sophisticated + scheduling objectives + """ + # NOTE(sirp): The default logic is the same as the NoopCostFunction + return [dict(weight=1, hostname=hostname, capabilities=capabilities) + for hostname, capabilities in hosts] + + def compute_consume(self, capabilities, instance_type): + """Consume compute resources for selected host""" + + requested_mem = max(instance_type['memory_mb'], 0) * 1024 * 1024 + capabilities['host_memory_free'] -= requested_mem + + def consume_resources(self, topic, capabilities, instance_type): + """Consume resources for a specific host. 'host' is a tuple + of the hostname and the services""" + + consume_func = getattr(self, '%s_consume' % topic, None) + if not consume_func: + return + consume_func(capabilities, instance_type) -- cgit From ee8ef9ab1de284ec77d33bb27741f010f9a63961 Mon Sep 17 00:00:00 2001 From: Jason Koelker Date: Fri, 12 Aug 2011 11:55:38 -0500 Subject: use subnet iteration from netaddr for subnet calculation --- nova/network/manager.py | 55 ++++++++++++++++++++++++++----------------------- 1 file changed, 29 insertions(+), 26 deletions(-) diff --git a/nova/network/manager.py b/nova/network/manager.py index 8fc6a295f..93f571c21 100644 --- a/nova/network/manager.py +++ b/nova/network/manager.py @@ -45,6 +45,7 @@ topologies. All of the network commands are issued to a subclass of """ import datetime +import itertools import math import netaddr import socket @@ -618,61 +619,63 @@ class NetworkManager(manager.SchedulerDependentManager): network_size, cidr_v6, gateway_v6, bridge, bridge_interface, dns1=None, dns2=None, **kwargs): """Create networks based on parameters.""" + # NOTE(jkoelker): these are dummy values to make sure iter works + fixed_net_v4 = netaddr.IPNetwork('0/32') + fixed_net_v6 = netaddr.IPNetwork('::0/128') + subnets_v4 = [] + subnets_v6 = [] + + subnet_bits = int(math.ceil(math.log(network_size, 2)) + if cidr_v6: fixed_net_v6 = netaddr.IPNetwork(cidr_v6) - significant_bits_v6 = 64 - network_size_v6 = 1 << 64 + prefixlen_v6 = 128 - subnet_bits + subnets_v6 = fixed_net_v6.subnet(prefixlen_v6, count=num_networks) if cidr: fixed_net = netaddr.IPNetwork(cidr) - significant_bits = 32 - int(math.log(network_size, 2)) + prefixlen_v4 = 32 - subnet_bits + subnets_v4 = fixed_net_v4.subnet(prefixlen_v4, count=num_networks) - for index in range(num_networks): + subnets = itertools.izip_longest(subnets_v4, subnets_v6) + for index, (subnet_v4, subnet_v6) in enumerate(subnets): net = {} net['bridge'] = bridge net['bridge_interface'] = bridge_interface + net['multi_host'] = multi_host + net['dns1'] = dns1 net['dns2'] = dns2 - if cidr: - start = index * network_size - project_net = netaddr.IPNetwork('%s/%s' % (fixed_net[start], - significant_bits)) - net['cidr'] = str(project_net) - net['multi_host'] = multi_host - net['netmask'] = str(project_net.netmask) - net['gateway'] = str(project_net[1]) - net['broadcast'] = str(project_net.broadcast) - net['dhcp_start'] = str(project_net[2]) - if num_networks > 1: net['label'] = '%s_%d' % (label, index) else: net['label'] = label - if cidr_v6: - start_v6 = index * network_size_v6 - cidr_v6 = '%s/%s' % (fixed_net_v6[start_v6], - significant_bits_v6) - net['cidr_v6'] = cidr_v6 - - project_net_v6 = netaddr.IPNetwork(cidr_v6) + if cidr: + net['cidr'] = str(subnet_v4) + net['netmask'] = str(subnet_v4.netmask) + net['gateway'] = str(subnet_v4[1]) + net['broadcast'] = str(subnet_v4.broadcast) + net['dhcp_start'] = str(subnet_v4[2]) + if cidr_v6: + net['cidr_v6'] = str(subnet_v6) if gateway_v6: # use a pre-defined gateway if one is provided net['gateway_v6'] = str(gateway_v6) else: - net['gateway_v6'] = str(project_net_v6[1]) + net['gateway_v6'] = str(subnet_v6[1]) - net['netmask_v6'] = str(project_net_v6._prefixlen) + net['netmask_v6'] = str(subnet_v6._prefixlen) if kwargs.get('vpn', False): # this bit here is for vlan-manager del net['dns1'] del net['dns2'] vlan = kwargs['vlan_start'] + index - net['vpn_private_address'] = str(project_net[2]) - net['dhcp_start'] = str(project_net[3]) + net['vpn_private_address'] = str(subnet_v4[2]) + net['dhcp_start'] = str(subnet_v4[3]) net['vlan'] = vlan net['bridge'] = 'br%s' % vlan -- cgit From 8f5f2c651846f8a3ff66821451216552d71c8fe6 Mon Sep 17 00:00:00 2001 From: Jason Koelker Date: Fri, 12 Aug 2011 12:00:23 -0500 Subject: forgot the closing paren --- nova/network/manager.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/nova/network/manager.py b/nova/network/manager.py index 93f571c21..889e1f9cd 100644 --- a/nova/network/manager.py +++ b/nova/network/manager.py @@ -625,7 +625,7 @@ class NetworkManager(manager.SchedulerDependentManager): subnets_v4 = [] subnets_v6 = [] - subnet_bits = int(math.ceil(math.log(network_size, 2)) + subnet_bits = int(math.ceil(math.log(network_size, 2))) if cidr_v6: fixed_net_v6 = netaddr.IPNetwork(cidr_v6) -- cgit From 21707674ce862f4e12a8ee9db665829f09d29467 Mon Sep 17 00:00:00 2001 From: Jason Koelker Date: Fri, 12 Aug 2011 12:03:02 -0500 Subject: don't require ipv4 --- bin/nova-manage | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/bin/nova-manage b/bin/nova-manage index 077a89d6f..1b7a2bf0a 100755 --- a/bin/nova-manage +++ b/bin/nova-manage @@ -719,7 +719,7 @@ class NetworkCommands(object): # sanitize other input using FLAGS if necessary if not num_networks: num_networks = FLAGS.num_networks - if not network_size: + if not network_size and fixed_range_v4: fixnet = netaddr.IPNetwork(fixed_range_v4) each_subnet_size = fixnet.size / int(num_networks) if each_subnet_size > FLAGS.network_size: -- cgit From 0beef1b24ee63f554f5478d54ee32f86fe5f4f2c Mon Sep 17 00:00:00 2001 From: Jason Koelker Date: Fri, 12 Aug 2011 12:06:57 -0500 Subject: make sure network_size gets set --- bin/nova-manage | 3 +++ 1 file changed, 3 insertions(+) diff --git a/bin/nova-manage b/bin/nova-manage index 1b7a2bf0a..25655125d 100755 --- a/bin/nova-manage +++ b/bin/nova-manage @@ -741,6 +741,9 @@ class NetworkCommands(object): if not dns1 and FLAGS.flat_network_dns: dns1 = FLAGS.flat_network_dns + if not network_size: + network_size = FLAGS.network_size + # create the network net_manager = utils.import_object(FLAGS.network_manager) net_manager.create_networks(context.get_admin_context(), -- cgit From 07aae460e848af51667537d56ec8b89d0c79f048 Mon Sep 17 00:00:00 2001 From: Jason Koelker Date: Fri, 12 Aug 2011 12:11:13 -0500 Subject: only run if the subnet and cidr exist --- nova/network/manager.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/nova/network/manager.py b/nova/network/manager.py index 889e1f9cd..f26263f2e 100644 --- a/nova/network/manager.py +++ b/nova/network/manager.py @@ -652,14 +652,14 @@ class NetworkManager(manager.SchedulerDependentManager): else: net['label'] = label - if cidr: + if cidr and subnet_v4: net['cidr'] = str(subnet_v4) net['netmask'] = str(subnet_v4.netmask) net['gateway'] = str(subnet_v4[1]) net['broadcast'] = str(subnet_v4.broadcast) net['dhcp_start'] = str(subnet_v4[2]) - if cidr_v6: + if cidr_v6 and subnet_v6: net['cidr_v6'] = str(subnet_v6) if gateway_v6: # use a pre-defined gateway if one is provided -- cgit From 411ee0c1a1901de7c9b7ceae1d41b7742fce609e Mon Sep 17 00:00:00 2001 From: Jason Koelker Date: Fri, 12 Aug 2011 12:13:14 -0500 Subject: only run if the subnet and cidr exist --- nova/network/manager.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/nova/network/manager.py b/nova/network/manager.py index f26263f2e..ccc899d99 100644 --- a/nova/network/manager.py +++ b/nova/network/manager.py @@ -689,7 +689,7 @@ class NetworkManager(manager.SchedulerDependentManager): if not network: raise ValueError(_('Network already exists!')) - if network and cidr: + if network and cidr and subnet_v4: self._create_fixed_ips(context, network['id']) @property -- cgit From 24796debe819641b1cba58ba966b0d6d5a253fd8 Mon Sep 17 00:00:00 2001 From: Josh Kearney Date: Fri, 12 Aug 2011 12:14:40 -0500 Subject: Fixed unit tests. --- nova/compute/manager.py | 15 ++++++++++++++- nova/tests/scheduler/test_scheduler.py | 4 +++- 2 files changed, 17 insertions(+), 2 deletions(-) diff --git a/nova/compute/manager.py b/nova/compute/manager.py index 1c3485342..f0184161e 100644 --- a/nova/compute/manager.py +++ b/nova/compute/manager.py @@ -336,10 +336,22 @@ class ComputeManager(manager.SchedulerDependentManager): image, but is accurate because it reflects the image's actual size. """ + # NOTE(jk0): image_ref is defined in the DB model, image_href is + # used by the image service. This should be refactored to be + # consistent. image_href = instance['image_ref'] image_service, image_id = nova.image.get_image_service(image_href) image_meta = image_service.show(context, image_id) - size_bytes = image_meta['size'] + + try: + size_bytes = image_meta['size'] + except KeyError: + # Size is not a required field in the image service (yet), so + # we are unable to rely on it being there even though it's in + # glance. + + # TODO(jk0): Should size be required in the image service? + return instance_type_id = instance['instance_type_id'] instance_type = self.db.instance_type_get(context, @@ -360,6 +372,7 @@ class ComputeManager(manager.SchedulerDependentManager): context = context.elevated() instance = self.db.instance_get(context, instance_id) + if instance['name'] in self.driver.list_instances(): raise exception.Error(_("Instance has already been created")) diff --git a/nova/tests/scheduler/test_scheduler.py b/nova/tests/scheduler/test_scheduler.py index 7a26fd1bb..ebaf89624 100644 --- a/nova/tests/scheduler/test_scheduler.py +++ b/nova/tests/scheduler/test_scheduler.py @@ -256,7 +256,9 @@ class SimpleDriverTestCase(test.TestCase): def _create_instance(self, **kwargs): """Create a test instance""" inst = {} - inst['image_id'] = 1 + # NOTE(jk0): If an integer is passed as the image_ref, the image + # service will use the default image service (in this case, the fake). + inst['image_ref'] = '1' inst['reservation_id'] = 'r-fakeres' inst['user_id'] = self.user_id inst['project_id'] = self.project_id -- cgit From dbaa1c2299d3b97273698050b372b9714324706a Mon Sep 17 00:00:00 2001 From: Jason Koelker Date: Fri, 12 Aug 2011 12:23:50 -0500 Subject: need to actually assign the v4 network --- nova/network/manager.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/nova/network/manager.py b/nova/network/manager.py index ccc899d99..32e236073 100644 --- a/nova/network/manager.py +++ b/nova/network/manager.py @@ -633,7 +633,7 @@ class NetworkManager(manager.SchedulerDependentManager): subnets_v6 = fixed_net_v6.subnet(prefixlen_v6, count=num_networks) if cidr: - fixed_net = netaddr.IPNetwork(cidr) + fixed_net_v4 = netaddr.IPNetwork(cidr) prefixlen_v4 = 32 - subnet_bits subnets_v4 = fixed_net_v4.subnet(prefixlen_v4, count=num_networks) -- cgit From 90c6641d47e9c1012b9fb3e53fe0da21ae3d42b7 Mon Sep 17 00:00:00 2001 From: Ed Leafe Date: Fri, 12 Aug 2011 13:58:26 -0500 Subject: Created the filters directory in nova/scheduler --- nova/scheduler/__init__.py | 2 + nova/scheduler/abstract_scheduler.py | 30 +-- nova/scheduler/base_scheduler.py | 312 +-------------------- nova/scheduler/filters/__init__.py | 18 ++ nova/scheduler/filters/abstract_filter.py | 87 ++++++ nova/scheduler/filters/all_hosts_filter.py | 31 +++ nova/scheduler/filters/instance_type_filter.py | 86 ++++++ nova/scheduler/filters/json_filter.py | 141 ++++++++++ nova/scheduler/host_filter.py | 314 ---------------------- nova/tests/scheduler/test_abstract_scheduler.py | 3 + nova/tests/scheduler/test_host_filter.py | 4 +- nova/tests/scheduler/test_least_cost_scheduler.py | 7 +- 12 files changed, 391 insertions(+), 644 deletions(-) create mode 100644 nova/scheduler/filters/__init__.py create mode 100644 nova/scheduler/filters/abstract_filter.py create mode 100644 nova/scheduler/filters/all_hosts_filter.py create mode 100644 nova/scheduler/filters/instance_type_filter.py create mode 100644 nova/scheduler/filters/json_filter.py delete mode 100644 nova/scheduler/host_filter.py diff --git a/nova/scheduler/__init__.py b/nova/scheduler/__init__.py index 8359a7aeb..25078f015 100644 --- a/nova/scheduler/__init__.py +++ b/nova/scheduler/__init__.py @@ -21,5 +21,7 @@ .. automodule:: nova.scheduler :platform: Unix :synopsis: Module that picks a compute node to run a VM instance. +.. moduleauthor:: Sandy Walsh +.. moduleauthor:: Ed Leafe .. moduleauthor:: Chris Behrens """ diff --git a/nova/scheduler/abstract_scheduler.py b/nova/scheduler/abstract_scheduler.py index a6457cc50..a0734f322 100644 --- a/nova/scheduler/abstract_scheduler.py +++ b/nova/scheduler/abstract_scheduler.py @@ -269,18 +269,13 @@ class AbstractScheduler(driver.Scheduler): # Get all available hosts. all_hosts = self.zone_manager.service_states.iteritems() - print "-"*88 - ss = self.zone_manager.service_states - print ss - print "KEYS", ss.keys() - print "-"*88 - - unfiltered_hosts = [(host, services[host]) + unfiltered_hosts = [(host, services[topic]) for host, services in all_hosts - if topic in services[host]] + if topic in services] # Filter local hosts based on requirements ... - filtered_hosts = self.filter_hosts(topic, request_spec, host_list) + filtered_hosts = self.filter_hosts(topic, request_spec, + unfiltered_hosts) if not filtered_hosts: LOG.warn(_("No hosts available")) return [] @@ -307,22 +302,19 @@ class AbstractScheduler(driver.Scheduler): weighted_hosts.sort(key=operator.itemgetter('weight')) return weighted_hosts - def basic_ram_filter(self, hostname, capabilities, request_spec): - """Return whether or not we can schedule to this compute node. - Derived classes should override this and return True if the host - is acceptable for scheduling. - """ - instance_type = request_spec['instance_type'] - requested_mem = instance_type['memory_mb'] * 1024 * 1024 - return capabilities['host_memory_free'] >= requested_mem - - def filter_hosts(self, topic, request_spec, host_list=None): + def filter_hosts(self, topic, request_spec, host_list): """Filter the full host list returned from the ZoneManager. By default, this method only applies the basic_ram_filter(), meaning all hosts with at least enough RAM for the requested instance are returned. Override in subclasses to provide greater selectivity. """ + def basic_ram_filter(hostname, capabilities, request_spec): + """Only return hosts with sufficient available RAM.""" + instance_type = request_spec['instance_type'] + requested_mem = instance_type['memory_mb'] * 1024 * 1024 + return capabilities['host_memory_free'] >= requested_mem + return [(host, services) for host, services in host_list if basic_ram_filter(host, services, request_spec)] diff --git a/nova/scheduler/base_scheduler.py b/nova/scheduler/base_scheduler.py index 43a6ab2b1..e14ee349e 100644 --- a/nova/scheduler/base_scheduler.py +++ b/nova/scheduler/base_scheduler.py @@ -20,324 +20,22 @@ across zones. There are two expansion points to this class for: 2. Filtering Hosts based on required instance capabilities """ -import operator -import json - -import M2Crypto - -from novaclient import v1_1 as novaclient -from novaclient import exceptions as novaclient_exceptions - -from nova import crypto -from nova import db -from nova import exception from nova import flags from nova import log as logging -from nova import rpc -from nova.compute import api as compute_api -from nova.scheduler import api -from nova.scheduler import driver +from nova.scheduler import abstract_scheduler +from nova.scheduler import host_filter FLAGS = flags.FLAGS -LOG = logging.getLogger('nova.scheduler.abstract_scheduler') - +LOG = logging.getLogger('nova.scheduler.base_scheduler') -class InvalidBlob(exception.NovaException): - message = _("Ill-formed or incorrectly routed 'blob' data sent " - "to instance create request.") - -class AbstractScheduler(driver.Scheduler): +class BaseScheduler(abstract_scheduler.AbstractScheduler): """Base class for creating Schedulers that can work across any nova deployment, from simple designs to multiply-nested zones. """ - - def _call_zone_method(self, context, method, specs, zones): - """Call novaclient zone method. Broken out for testing.""" - return api.call_zone_method(context, method, specs=specs, zones=zones) - - def _provision_resource_locally(self, context, build_plan_item, - request_spec, kwargs): - """Create the requested resource in this Zone.""" - host = build_plan_item['hostname'] - base_options = request_spec['instance_properties'] - image = request_spec['image'] - - # TODO(sandy): I guess someone needs to add block_device_mapping - # support at some point? Also, OS API has no concept of security - # groups. - instance = compute_api.API().create_db_entry_for_new_instance(context, - image, base_options, None, []) - - instance_id = instance['id'] - kwargs['instance_id'] = instance_id - - rpc.cast(context, - db.queue_get_for(context, "compute", host), - {"method": "run_instance", - "args": kwargs}) - LOG.debug(_("Provisioning locally via compute node %(host)s") - % locals()) - - def _decrypt_blob(self, blob): - """Returns the decrypted blob or None if invalid. Broken out - for testing.""" - decryptor = crypto.decryptor(FLAGS.build_plan_encryption_key) - try: - json_entry = decryptor(blob) - return json.dumps(json_entry) - except M2Crypto.EVP.EVPError: - pass - return None - - def _ask_child_zone_to_create_instance(self, context, zone_info, - request_spec, kwargs): - """Once we have determined that the request should go to one - of our children, we need to fabricate a new POST /servers/ - call with the same parameters that were passed into us. - - Note that we have to reverse engineer from our args to get back the - image, flavor, ipgroup, etc. since the original call could have - come in from EC2 (which doesn't use these things).""" - - instance_type = request_spec['instance_type'] - instance_properties = request_spec['instance_properties'] - - name = instance_properties['display_name'] - image_ref = instance_properties['image_ref'] - meta = instance_properties['metadata'] - flavor_id = instance_type['flavorid'] - reservation_id = instance_properties['reservation_id'] - - files = kwargs['injected_files'] - ipgroup = None # Not supported in OS API ... yet - - child_zone = zone_info['child_zone'] - child_blob = zone_info['child_blob'] - zone = db.zone_get(context, child_zone) - url = zone.api_url - LOG.debug(_("Forwarding instance create call to child zone %(url)s" - ". ReservationID=%(reservation_id)s") - % locals()) - nova = None - try: - nova = novaclient.Client(zone.username, zone.password, None, url) - nova.authenticate() - except novaclient_exceptions.BadRequest, e: - raise exception.NotAuthorized(_("Bad credentials attempting " - "to talk to zone at %(url)s.") % locals()) - - nova.servers.create(name, image_ref, flavor_id, ipgroup, meta, files, - child_blob, reservation_id=reservation_id) - - def _provision_resource_from_blob(self, context, build_plan_item, - instance_id, request_spec, kwargs): - """Create the requested resource locally or in a child zone - based on what is stored in the zone blob info. - - Attempt to decrypt the blob to see if this request is: - 1. valid, and - 2. intended for this zone or a child zone. - - Note: If we have "blob" that means the request was passed - into us from a parent zone. If we have "child_blob" that - means we gathered the info from one of our children. - It's possible that, when we decrypt the 'blob' field, it - contains "child_blob" data. In which case we forward the - request.""" - - host_info = None - if "blob" in build_plan_item: - # Request was passed in from above. Is it for us? - host_info = self._decrypt_blob(build_plan_item['blob']) - elif "child_blob" in build_plan_item: - # Our immediate child zone provided this info ... - host_info = build_plan_item - - if not host_info: - raise InvalidBlob() - - # Valid data ... is it for us? - if 'child_zone' in host_info and 'child_blob' in host_info: - self._ask_child_zone_to_create_instance(context, host_info, - request_spec, kwargs) - else: - self._provision_resource_locally(context, host_info, request_spec, - kwargs) - - def _provision_resource(self, context, build_plan_item, instance_id, - request_spec, kwargs): - """Create the requested resource in this Zone or a child zone.""" - if "hostname" in build_plan_item: - self._provision_resource_locally(context, build_plan_item, - request_spec, kwargs) - return - - self._provision_resource_from_blob(context, build_plan_item, - instance_id, request_spec, kwargs) - - def _adjust_child_weights(self, child_results, zones): - """Apply the Scale and Offset values from the Zone definition - to adjust the weights returned from the child zones. Alters - child_results in place. - """ - for zone_id, result in child_results: - if not result: - continue - - assert isinstance(zone_id, int) - - for zone_rec in zones: - if zone_rec['id'] != zone_id: - continue - - for item in result: - try: - offset = zone_rec['weight_offset'] - scale = zone_rec['weight_scale'] - raw_weight = item['weight'] - cooked_weight = offset + scale * raw_weight - item['weight'] = cooked_weight - item['raw_weight'] = raw_weight - except KeyError: - LOG.exception(_("Bad child zone scaling values " - "for Zone: %(zone_id)s") % locals()) - - def schedule_run_instance(self, context, instance_id, request_spec, - *args, **kwargs): - """This method is called from nova.compute.api to provision - an instance. However we need to look at the parameters being - passed in to see if this is a request to: - 1. Create a Build Plan and then provision, or - 2. Use the Build Plan information in the request parameters - to simply create the instance (either in this zone or - a child zone). - """ - - # TODO(sandy): We'll have to look for richer specs at some point. - - blob = request_spec.get('blob') - if blob: - self._provision_resource(context, request_spec, instance_id, - request_spec, kwargs) - return None - - num_instances = request_spec.get('num_instances', 1) - LOG.debug(_("Attempting to build %(num_instances)d instance(s)") % - locals()) - - # Create build plan and provision ... - build_plan = self.select(context, request_spec) - if not build_plan: - raise driver.NoValidHost(_('No hosts were available')) - - for num in xrange(num_instances): - if not build_plan: - break - - build_plan_item = build_plan.pop(0) - self._provision_resource(context, build_plan_item, instance_id, - request_spec, kwargs) - - # Returning None short-circuits the routing to Compute (since - # we've already done it here) - return None - - def select(self, context, request_spec, *args, **kwargs): - """Select returns a list of weights and zone/host information - corresponding to the best hosts to service the request. Any - child zone information has been encrypted so as not to reveal - anything about the children. - """ - return self._schedule(context, "compute", request_spec, - *args, **kwargs) - - # TODO(sandy): We're only focused on compute instances right now, - # so we don't implement the default "schedule()" method required - # of Schedulers. - def schedule(self, context, topic, request_spec, *args, **kwargs): - """The schedule() contract requires we return the one - best-suited host for this request. - """ - raise driver.NoValidHost(_('No hosts were available')) - - def _schedule(self, context, topic, request_spec, *args, **kwargs): - """Returns a list of hosts that meet the required specs, - ordered by their fitness. - """ - - if topic != "compute": - raise NotImplementedError(_("Scheduler only understands" - " Compute nodes (for now)")) - - num_instances = request_spec.get('num_instances', 1) - instance_type = request_spec['instance_type'] - - weighted = [] - host_list = None - - for i in xrange(num_instances): - # Filter local hosts based on requirements ... - # - # The first pass through here will pass 'None' as the - # host_list.. which tells the filter to build the full - # list of hosts. - # On a 2nd pass, the filter can modify the host_list with - # any updates it needs to make based on resources that - # may have been consumed from a previous build.. - host_list = self.filter_hosts(topic, request_spec, host_list) - if not host_list: - LOG.warn(_("Filter returned no hosts after processing " - "%(i)d of %(num_instances)d instances") % locals()) - break - - # then weigh the selected hosts. - # weighted = [{weight=weight, hostname=hostname, - # capabilities=capabs}, ...] - weights = self.weigh_hosts(topic, request_spec, host_list) - weights.sort(key=operator.itemgetter('weight')) - best_weight = weights[0] - weighted.append(best_weight) - self.consume_resources(topic, best_weight['capabilities'], - instance_type) - - # Next, tack on the best weights from the child zones ... - json_spec = json.dumps(request_spec) - all_zones = db.zone_get_all(context) - child_results = self._call_zone_method(context, "select", - specs=json_spec, zones=all_zones) - self._adjust_child_weights(child_results, all_zones) - for child_zone, result in child_results: - for weighting in result: - # Remember the child_zone so we can get back to - # it later if needed. This implicitly builds a zone - # path structure. - host_dict = {"weight": weighting["weight"], - "child_zone": child_zone, - "child_blob": weighting["blob"]} - weighted.append(host_dict) - - weighted.sort(key=operator.itemgetter('weight')) - return weighted - - def compute_filter(self, hostname, capabilities, request_spec): - """Return whether or not we can schedule to this compute node. - Derived classes should override this and return True if the host - is acceptable for scheduling. - """ - instance_type = request_spec['instance_type'] - requested_mem = instance_type['memory_mb'] * 1024 * 1024 - return capabilities['host_memory_free'] >= requested_mem - - def hold_filter_hosts(self, topic, request_spec, hosts=None): + def filter_hosts(self, topic, request_spec, hosts=None): """Filter the full host list (from the ZoneManager)""" - # NOTE(dabo): The logic used by the current _schedule() method - # is incorrect. Since this task is just to refactor the classes, - # I'm not fixing the logic now - that will be the next task. - # So for now this method is just renamed; afterwards this will - # become the filter_hosts() method, and the one below will - # be removed. filter_name = request_spec.get('filter', None) # Make sure that the requested filter is legitimate. selected_filter = host_filter.choose_host_filter(filter_name) diff --git a/nova/scheduler/filters/__init__.py b/nova/scheduler/filters/__init__.py new file mode 100644 index 000000000..27160ca0a --- /dev/null +++ b/nova/scheduler/filters/__init__.py @@ -0,0 +1,18 @@ +# Copyright (c) 2011 Openstack, LLC. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from all_hosts_filter import AllHostsFilter +from instance_type_filter import InstanceTypeFilter +from json_filter import JsonFilter diff --git a/nova/scheduler/filters/abstract_filter.py b/nova/scheduler/filters/abstract_filter.py new file mode 100644 index 000000000..05982820f --- /dev/null +++ b/nova/scheduler/filters/abstract_filter.py @@ -0,0 +1,87 @@ +# Copyright (c) 2011 Openstack, LLC. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +The Host Filter classes are a way to ensure that only hosts that are +appropriate are considered when creating a new instance. Hosts that are +either incompatible or insufficient to accept a newly-requested instance +are removed by Host Filter classes from consideration. Those that pass +the filter are then passed on for weighting or other process for ordering. + +Three filters are included: AllHosts, Flavor & JSON. AllHosts just +returns the full, unfiltered list of hosts. Flavor is a hard coded +matching mechanism based on flavor criteria and JSON is an ad-hoc +filter grammar. + +Why JSON? The requests for instances may come in through the +REST interface from a user or a parent Zone. +Currently Flavors and/or InstanceTypes are used for +specifing the type of instance desired. Specific Nova users have +noted a need for a more expressive way of specifying instances. +Since we don't want to get into building full DSL this is a simple +form as an example of how this could be done. In reality, most +consumers will use the more rigid filters such as FlavorFilter. +""" + +import json + +from nova import exception +from nova import flags +from nova import log as logging + +import nova.scheduler + + +LOG = logging.getLogger('nova.scheduler.host_filter') +FLAGS = flags.FLAGS +flags.DEFINE_string('default_host_filter', + 'nova.scheduler.host_filter.AllHostsFilter', + 'Which filter to use for filtering hosts') + + +class AbstractHostFilter(object): + """Base class for host filters.""" + def instance_type_to_filter(self, instance_type): + """Convert instance_type into a filter for most common use-case.""" + raise NotImplementedError() + + def filter_hosts(self, zone_manager, query): + """Return a list of hosts that fulfill the filter.""" + raise NotImplementedError() + + def _full_name(self): + """module.classname of the filter.""" + return "%s.%s" % (self.__module__, self.__class__.__name__) + + +def _get_filters(): + from nova.scheduler import filters + return [itm for itm in dir(filters) + if issubclass(itm, AbstractHostFilter)] + + +def choose_host_filter(filter_name=None): + """Since the caller may specify which filter to use we need + to have an authoritative list of what is permissible. This + function checks the filter name against a predefined set + of acceptable filters. + """ + if not filter_name: + filter_name = FLAGS.default_host_filter + for filter_class in _get_filters(): + host_match = "%s.%s" % (filter_class.__module__, filter_class.__name__) + if host_match == filter_name: + return filter_class() + raise exception.SchedulerHostFilterNotFound(filter_name=filter_name) diff --git a/nova/scheduler/filters/all_hosts_filter.py b/nova/scheduler/filters/all_hosts_filter.py new file mode 100644 index 000000000..bc4acfd1a --- /dev/null +++ b/nova/scheduler/filters/all_hosts_filter.py @@ -0,0 +1,31 @@ +# Copyright (c) 2011 Openstack, LLC. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + + +import nova.scheduler + + +class AllHostsFilter(nova.scheduler.host_filter.AbstractHostFilter): + """NOP host filter. Returns all hosts in ZoneManager.""" + def instance_type_to_filter(self, instance_type): + """Return anything to prevent base-class from raising + exception. + """ + return (self._full_name(), instance_type) + + def filter_hosts(self, zone_manager, query): + """Return a list of hosts from ZoneManager list.""" + return [(host, services) + for host, services in zone_manager.service_states.iteritems()] diff --git a/nova/scheduler/filters/instance_type_filter.py b/nova/scheduler/filters/instance_type_filter.py new file mode 100644 index 000000000..03ffc46c6 --- /dev/null +++ b/nova/scheduler/filters/instance_type_filter.py @@ -0,0 +1,86 @@ +# Copyright (c) 2011 Openstack, LLC. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + + +from nova.scheduler import host_filter + + +class InstanceTypeFilter(host_filter.AbstractHostFilter): + """HostFilter hard-coded to work with InstanceType records.""" + def instance_type_to_filter(self, instance_type): + """Use instance_type to filter hosts.""" + return (self._full_name(), instance_type) + + def _satisfies_extra_specs(self, capabilities, instance_type): + """Check that the capabilities provided by the compute service + satisfy the extra specs associated with the instance type""" + if 'extra_specs' not in instance_type: + return True + # NOTE(lorinh): For now, we are just checking exact matching on the + # values. Later on, we want to handle numerical + # values so we can represent things like number of GPU cards + try: + for key, value in instance_type['extra_specs'].iteritems(): + if capabilities[key] != value: + return False + except KeyError: + return False + return True + + def filter_hosts(self, zone_manager, query): + """Return a list of hosts that can create instance_type.""" + instance_type = query + selected_hosts = [] + for host, services in zone_manager.service_states.iteritems(): + capabilities = services.get('compute', {}) + if not capabilities: + continue + host_ram_mb = capabilities['host_memory_free'] + disk_bytes = capabilities['disk_available'] + spec_ram = instance_type['memory_mb'] + spec_disk = instance_type['local_gb'] + extra_specs = instance_type['extra_specs'] + + if ((host_ram_mb >= spec_ram) and (disk_bytes >= spec_disk) and + self._satisfies_extra_specs(capabilities, instance_type)): + selected_hosts.append((host, capabilities)) + return selected_hosts + + +# host entries (currently) are like: +# {'host_name-description': 'Default install of XenServer', +# 'host_hostname': 'xs-mini', +# 'host_memory_total': 8244539392, +# 'host_memory_overhead': 184225792, +# 'host_memory_free': 3868327936, +# 'host_memory_free_computed': 3840843776, +# 'host_other_config': {}, +# 'host_ip_address': '192.168.1.109', +# 'host_cpu_info': {}, +# 'disk_available': 32954957824, +# 'disk_total': 50394562560, +# 'disk_used': 17439604736, +# 'host_uuid': 'cedb9b39-9388-41df-8891-c5c9a0c0fe5f', +# 'host_name_label': 'xs-mini'} + +# instance_type table has: +# name = Column(String(255), unique=True) +# memory_mb = Column(Integer) +# vcpus = Column(Integer) +# local_gb = Column(Integer) +# flavorid = Column(Integer, unique=True) +# swap = Column(Integer, nullable=False, default=0) +# rxtx_quota = Column(Integer, nullable=False, default=0) +# rxtx_cap = Column(Integer, nullable=False, default=0) diff --git a/nova/scheduler/filters/json_filter.py b/nova/scheduler/filters/json_filter.py new file mode 100644 index 000000000..358abdc4d --- /dev/null +++ b/nova/scheduler/filters/json_filter.py @@ -0,0 +1,141 @@ +# Copyright (c) 2011 Openstack, LLC. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + + +import operator + +from nova.scheduler import host_filter + + +class JsonFilter(host_filter.AbstractHostFilter): + """Host Filter to allow simple JSON-based grammar for + selecting hosts. + """ + def _op_comp(self, args, op): + """Returns True if the specified operator can successfully + compare the first item in the args with all the rest. Will + return False if only one item is in the list. + """ + if len(args) < 2: + return False + bad = [arg for arg in args[1:] + if not op(args[0], arg)] + return not bool(bad) + + def _equals(self, args): + """First term is == all the other terms.""" + return self._op_comp(args, operator.eq) + + def _less_than(self, args): + """First term is < all the other terms.""" + return self._op_comp(args, operator.lt) + + def _greater_than(self, args): + """First term is > all the other terms.""" + return self._op_comp(args, operator.gt) + + def _in(self, args): + """First term is in set of remaining terms""" + return self._op_comp(args, operator.contains) + + def _less_than_equal(self, args): + """First term is <= all the other terms.""" + return self._op_comp(args, operator.le) + + def _greater_than_equal(self, args): + """First term is >= all the other terms.""" + return self._op_comp(args, operator.ge) + + def _not(self, args): + """Flip each of the arguments.""" + return [not arg for arg in args] + + def _or(self, args): + """True if any arg is True.""" + return any(args) + + def _and(self, args): + """True if all args are True.""" + return all(args) + + commands = { + '=': _equals, + '<': _less_than, + '>': _greater_than, + 'in': _in, + '<=': _less_than_equal, + '>=': _greater_than_equal, + 'not': _not, + 'or': _or, + 'and': _and, + } + + def instance_type_to_filter(self, instance_type): + """Convert instance_type into JSON filter object.""" + required_ram = instance_type['memory_mb'] + required_disk = instance_type['local_gb'] + query = ['and', + ['>=', '$compute.host_memory_free', required_ram], + ['>=', '$compute.disk_available', required_disk]] + return (self._full_name(), json.dumps(query)) + + def _parse_string(self, string, host, services): + """Strings prefixed with $ are capability lookups in the + form '$service.capability[.subcap*]'. + """ + if not string: + return None + if not string.startswith("$"): + return string + + path = string[1:].split(".") + for item in path: + services = services.get(item, None) + if not services: + return None + return services + + def _process_filter(self, zone_manager, query, host, services): + """Recursively parse the query structure.""" + if not query: + return True + cmd = query[0] + method = self.commands[cmd] + cooked_args = [] + for arg in query[1:]: + if isinstance(arg, list): + arg = self._process_filter(zone_manager, arg, host, services) + elif isinstance(arg, basestring): + arg = self._parse_string(arg, host, services) + if arg is not None: + cooked_args.append(arg) + result = method(self, cooked_args) + return result + + def filter_hosts(self, zone_manager, query): + """Return a list of hosts that can fulfill the requirements + specified in the query. + """ + expanded = json.loads(query) + filtered_hosts = [] + for host, services in zone_manager.service_states.iteritems(): + result = self._process_filter(zone_manager, expanded, host, + services) + if isinstance(result, list): + # If any succeeded, include the host + result = any(result) + if result: + filtered_hosts.append((host, services)) + return filtered_hosts diff --git a/nova/scheduler/host_filter.py b/nova/scheduler/host_filter.py deleted file mode 100644 index 45a8f40d8..000000000 --- a/nova/scheduler/host_filter.py +++ /dev/null @@ -1,314 +0,0 @@ -# Copyright (c) 2011 Openstack, LLC. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -""" -The Host Filter classes are a way to ensure that only hosts that are -appropriate are considered when creating a new instance. Hosts that are -either incompatible or insufficient to accept a newly-requested instance -are removed by Host Filter classes from consideration. Those that pass -the filter are then passed on for weighting or other process for ordering. - -Three filters are included: AllHosts, Flavor & JSON. AllHosts just -returns the full, unfiltered list of hosts. Flavor is a hard coded -matching mechanism based on flavor criteria and JSON is an ad-hoc -filter grammar. - -Why JSON? The requests for instances may come in through the -REST interface from a user or a parent Zone. -Currently Flavors and/or InstanceTypes are used for -specifing the type of instance desired. Specific Nova users have -noted a need for a more expressive way of specifying instances. -Since we don't want to get into building full DSL this is a simple -form as an example of how this could be done. In reality, most -consumers will use the more rigid filters such as FlavorFilter. -""" - -import json - -from nova import exception -from nova import flags -from nova import log as logging -from nova import utils - -LOG = logging.getLogger('nova.scheduler.host_filter') - -FLAGS = flags.FLAGS -flags.DEFINE_string('default_host_filter', - 'nova.scheduler.host_filter.AllHostsFilter', - 'Which filter to use for filtering hosts.') - - -class HostFilter(object): - """Base class for host filters.""" - - def instance_type_to_filter(self, instance_type): - """Convert instance_type into a filter for most common use-case.""" - raise NotImplementedError() - - def filter_hosts(self, zone_manager, query): - """Return a list of hosts that fulfill the filter.""" - raise NotImplementedError() - - def _full_name(self): - """module.classname of the filter.""" - return "%s.%s" % (self.__module__, self.__class__.__name__) - - -class AllHostsFilter(HostFilter): - """ NOP host filter. Returns all hosts in ZoneManager. - This essentially does what the old Scheduler+Chance used - to give us. - """ - - def instance_type_to_filter(self, instance_type): - """Return anything to prevent base-class from raising - exception.""" - return (self._full_name(), instance_type) - - def filter_hosts(self, zone_manager, query): - """Return a list of hosts from ZoneManager list.""" - return [(host, services) - for host, services in zone_manager.service_states.iteritems()] - - -class InstanceTypeFilter(HostFilter): - """HostFilter hard-coded to work with InstanceType records.""" - - def instance_type_to_filter(self, instance_type): - """Use instance_type to filter hosts.""" - return (self._full_name(), instance_type) - - def _satisfies_extra_specs(self, capabilities, instance_type): - """Check that the capabilities provided by the compute service - satisfy the extra specs associated with the instance type""" - - if 'extra_specs' not in instance_type: - return True - - # Note(lorinh): For now, we are just checking exact matching on the - # values. Later on, we want to handle numerical - # values so we can represent things like number of GPU cards - - try: - for key, value in instance_type['extra_specs'].iteritems(): - if capabilities[key] != value: - return False - except KeyError: - return False - - return True - - def filter_hosts(self, zone_manager, query): - """Return a list of hosts that can create instance_type.""" - instance_type = query - selected_hosts = [] - for host, services in zone_manager.service_states.iteritems(): - capabilities = services.get('compute', {}) - host_ram_mb = capabilities['host_memory_free'] - disk_bytes = capabilities['disk_available'] - spec_ram = instance_type['memory_mb'] - spec_disk = instance_type['local_gb'] - extra_specs = instance_type['extra_specs'] - - if ((host_ram_mb >= spec_ram) and (disk_bytes >= spec_disk) and - self._satisfies_extra_specs(capabilities, instance_type)): - selected_hosts.append((host, capabilities)) - return selected_hosts - -#host entries (currently) are like: -# {'host_name-description': 'Default install of XenServer', -# 'host_hostname': 'xs-mini', -# 'host_memory_total': 8244539392, -# 'host_memory_overhead': 184225792, -# 'host_memory_free': 3868327936, -# 'host_memory_free_computed': 3840843776, -# 'host_other_config': {}, -# 'host_ip_address': '192.168.1.109', -# 'host_cpu_info': {}, -# 'disk_available': 32954957824, -# 'disk_total': 50394562560, -# 'disk_used': 17439604736, -# 'host_uuid': 'cedb9b39-9388-41df-8891-c5c9a0c0fe5f', -# 'host_name_label': 'xs-mini'} - -# instance_type table has: -#name = Column(String(255), unique=True) -#memory_mb = Column(Integer) -#vcpus = Column(Integer) -#local_gb = Column(Integer) -#flavorid = Column(Integer, unique=True) -#swap = Column(Integer, nullable=False, default=0) -#rxtx_quota = Column(Integer, nullable=False, default=0) -#rxtx_cap = Column(Integer, nullable=False, default=0) - - -class JsonFilter(HostFilter): - """Host Filter to allow simple JSON-based grammar for - selecting hosts. - """ - - def _equals(self, args): - """First term is == all the other terms.""" - if len(args) < 2: - return False - lhs = args[0] - for rhs in args[1:]: - if lhs != rhs: - return False - return True - - def _less_than(self, args): - """First term is < all the other terms.""" - if len(args) < 2: - return False - lhs = args[0] - for rhs in args[1:]: - if lhs >= rhs: - return False - return True - - def _greater_than(self, args): - """First term is > all the other terms.""" - if len(args) < 2: - return False - lhs = args[0] - for rhs in args[1:]: - if lhs <= rhs: - return False - return True - - def _in(self, args): - """First term is in set of remaining terms""" - if len(args) < 2: - return False - return args[0] in args[1:] - - def _less_than_equal(self, args): - """First term is <= all the other terms.""" - if len(args) < 2: - return False - lhs = args[0] - for rhs in args[1:]: - if lhs > rhs: - return False - return True - - def _greater_than_equal(self, args): - """First term is >= all the other terms.""" - if len(args) < 2: - return False - lhs = args[0] - for rhs in args[1:]: - if lhs < rhs: - return False - return True - - def _not(self, args): - """Flip each of the arguments.""" - if len(args) == 0: - return False - return [not arg for arg in args] - - def _or(self, args): - """True if any arg is True.""" - return True in args - - def _and(self, args): - """True if all args are True.""" - return False not in args - - commands = { - '=': _equals, - '<': _less_than, - '>': _greater_than, - 'in': _in, - '<=': _less_than_equal, - '>=': _greater_than_equal, - 'not': _not, - 'or': _or, - 'and': _and, - } - - def instance_type_to_filter(self, instance_type): - """Convert instance_type into JSON filter object.""" - required_ram = instance_type['memory_mb'] - required_disk = instance_type['local_gb'] - query = ['and', - ['>=', '$compute.host_memory_free', required_ram], - ['>=', '$compute.disk_available', required_disk]] - return (self._full_name(), json.dumps(query)) - - def _parse_string(self, string, host, services): - """Strings prefixed with $ are capability lookups in the - form '$service.capability[.subcap*]' - """ - if not string: - return None - if string[0] != '$': - return string - - path = string[1:].split('.') - for item in path: - services = services.get(item, None) - if not services: - return None - return services - - def _process_filter(self, zone_manager, query, host, services): - """Recursively parse the query structure.""" - if len(query) == 0: - return True - cmd = query[0] - method = self.commands[cmd] # Let exception fly. - cooked_args = [] - for arg in query[1:]: - if isinstance(arg, list): - arg = self._process_filter(zone_manager, arg, host, services) - elif isinstance(arg, basestring): - arg = self._parse_string(arg, host, services) - if arg != None: - cooked_args.append(arg) - result = method(self, cooked_args) - return result - - def filter_hosts(self, zone_manager, query): - """Return a list of hosts that can fulfill filter.""" - expanded = json.loads(query) - hosts = [] - for host, services in zone_manager.service_states.iteritems(): - r = self._process_filter(zone_manager, expanded, host, services) - if isinstance(r, list): - r = True in r - if r: - hosts.append((host, services)) - return hosts - - -FILTERS = [AllHostsFilter, InstanceTypeFilter, JsonFilter] - - -def choose_host_filter(filter_name=None): - """Since the caller may specify which filter to use we need - to have an authoritative list of what is permissible. This - function checks the filter name against a predefined set - of acceptable filters. - """ - if not filter_name: - filter_name = FLAGS.default_host_filter - for filter_class in FILTERS: - host_match = "%s.%s" % (filter_class.__module__, filter_class.__name__) - if host_match == filter_name: - return filter_class() - raise exception.SchedulerHostFilterNotFound(filter_name=filter_name) diff --git a/nova/tests/scheduler/test_abstract_scheduler.py b/nova/tests/scheduler/test_abstract_scheduler.py index f4f5cc233..aa97e2344 100644 --- a/nova/tests/scheduler/test_abstract_scheduler.py +++ b/nova/tests/scheduler/test_abstract_scheduler.py @@ -77,6 +77,9 @@ class FakeZoneManager(zone_manager.ZoneManager): 'host3': { 'compute': {'host_memory_free': 3221225472}, }, + 'host4': { + 'compute': {'host_memory_free': 999999999}, + }, } diff --git a/nova/tests/scheduler/test_host_filter.py b/nova/tests/scheduler/test_host_filter.py index 7e664d3f9..818be2f45 100644 --- a/nova/tests/scheduler/test_host_filter.py +++ b/nova/tests/scheduler/test_host_filter.py @@ -20,7 +20,7 @@ import json from nova import exception from nova import test -from nova.scheduler import host_filter +from nova.scheduler import filters class FakeZoneManager: @@ -55,7 +55,7 @@ class HostFilterTestCase(test.TestCase): def setUp(self): super(HostFilterTestCase, self).setUp() - default_host_filter = 'nova.scheduler.host_filter.AllHostsFilter' + default_host_filter = 'nova.scheduler.filteris.AllHostsFilter' self.flags(default_host_filter=default_host_filter) self.instance_type = dict(name='tiny', memory_mb=50, diff --git a/nova/tests/scheduler/test_least_cost_scheduler.py b/nova/tests/scheduler/test_least_cost_scheduler.py index de7581d0a..16ec4420b 100644 --- a/nova/tests/scheduler/test_least_cost_scheduler.py +++ b/nova/tests/scheduler/test_least_cost_scheduler.py @@ -122,11 +122,14 @@ class LeastCostSchedulerTestCase(test.TestCase): self.flags(least_cost_scheduler_cost_functions=[ 'nova.scheduler.least_cost.compute_fill_first_cost_fn'], compute_fill_first_cost_fn_weight=1) - num = 1 instance_type = {'memory_mb': 1024} request_spec = {'instance_type': instance_type} - hosts = self.sched.filter_hosts('compute', request_spec, None) + all_hosts = self.sched.zone_manager.service_states.iteritems() + all_hosts = [(host, services["compute"]) + for host, services in all_hosts + if "compute" in services] + hosts = self.sched.filter_hosts('compute', request_spec, host_list) expected = [] for idx, (hostname, caps) in enumerate(hosts): -- cgit From 8c9eedb4b4dd9653cca302ae4bbd23d895761aee Mon Sep 17 00:00:00 2001 From: "vladimir.p" Date: Fri, 12 Aug 2011 14:18:25 -0700 Subject: reworked test_extensions code to avoid constant merge conflicts with newly added ext --- nova/tests/api/openstack/test_extensions.py | 17 +++++++++++++---- 1 file changed, 13 insertions(+), 4 deletions(-) diff --git a/nova/tests/api/openstack/test_extensions.py b/nova/tests/api/openstack/test_extensions.py index ea8fe68a7..e9f44af6a 100644 --- a/nova/tests/api/openstack/test_extensions.py +++ b/nova/tests/api/openstack/test_extensions.py @@ -84,6 +84,17 @@ class ExtensionControllerTest(test.TestCase): super(ExtensionControllerTest, self).setUp() ext_path = os.path.join(os.path.dirname(__file__), "extensions") self.flags(osapi_extensions_path=ext_path) + self.ext_list = [ + "FlavorExtraSpecs", + "Floating_ips", + "Fox In Socks", + "Hosts", + "Keypairs", + "Multinic", + "SecurityGroups", + "Volumes", + ] + self.ext_list.sort() def test_list_extensions_json(self): app = openstack.APIRouterV11() @@ -96,9 +107,7 @@ class ExtensionControllerTest(test.TestCase): data = json.loads(response.body) names = [x['name'] for x in data['extensions']] names.sort() - self.assertEqual(names, ["FlavorExtraSpecs", "Floating_ips", - "Fox In Socks", "Hosts", "Keypairs", "Multinic", "SecurityGroups", - "Volumes"]) + self.assertEqual(names, self.ext_list) # Make sure that at least Fox in Sox is correct. (fox_ext,) = [ @@ -145,7 +154,7 @@ class ExtensionControllerTest(test.TestCase): # Make sure we have all the extensions. exts = root.findall('{0}extension'.format(NS)) - self.assertEqual(len(exts), 8) + self.assertEqual(len(exts), len(self.ext_list)) # Make sure that at least Fox in Sox is correct. (fox_ext,) = [x for x in exts if x.get('alias') == 'FOXNSOX'] -- cgit From b60fa0d09d02066863736a3e98f07094c4db05a6 Mon Sep 17 00:00:00 2001 From: Josh Kearney Date: Fri, 12 Aug 2011 16:18:48 -0500 Subject: Check uncompressed VHD size. --- nova/tests/xenapi/stubs.py | 4 +-- nova/virt/xenapi/fake.py | 1 + nova/virt/xenapi/vm_utils.py | 74 +++++++++++++++++++++++++++++++++++++++----- nova/virt/xenapi/vmops.py | 6 ++-- 4 files changed, 72 insertions(+), 13 deletions(-) diff --git a/nova/tests/xenapi/stubs.py b/nova/tests/xenapi/stubs.py index 0d0f84e32..a6a1febd6 100644 --- a/nova/tests/xenapi/stubs.py +++ b/nova/tests/xenapi/stubs.py @@ -28,10 +28,10 @@ from nova import utils def stubout_instance_snapshot(stubs): @classmethod - def fake_fetch_image(cls, context, session, instance_id, image, user, + def fake_fetch_image(cls, context, session, instance, image, user, project, type): from nova.virt.xenapi.fake import create_vdi - name_label = "instance-%s" % instance_id + name_label = "instance-%s" % instance.id #TODO: create fake SR record sr_ref = "fakesr" vdi_ref = create_vdi(name_label=name_label, read_only=False, diff --git a/nova/virt/xenapi/fake.py b/nova/virt/xenapi/fake.py index d5ac39473..eecd60d58 100644 --- a/nova/virt/xenapi/fake.py +++ b/nova/virt/xenapi/fake.py @@ -140,6 +140,7 @@ def create_vdi(name_label, read_only, sr_ref, sharable): 'location': '', 'xenstore_data': '', 'sm_config': {}, + 'physical_utilisation': '123', 'VBDs': {}}) diff --git a/nova/virt/xenapi/vm_utils.py b/nova/virt/xenapi/vm_utils.py index 6c44d53d4..e86f72347 100644 --- a/nova/virt/xenapi/vm_utils.py +++ b/nova/virt/xenapi/vm_utils.py @@ -31,6 +31,7 @@ import uuid from xml.dom import minidom import glance.client +from nova import db from nova import exception from nova import flags import nova.image @@ -368,7 +369,7 @@ class VMHelper(HelperBase): session.wait_for_task(task, instance.id) @classmethod - def fetch_image(cls, context, session, instance_id, image, user_id, + def fetch_image(cls, context, session, instance, image, user_id, project_id, image_type): """Fetch image from glance based on image type. @@ -377,18 +378,19 @@ class VMHelper(HelperBase): """ if image_type == ImageType.DISK_VHD: return cls._fetch_image_glance_vhd(context, - session, instance_id, image, image_type) + session, instance, image, image_type) else: return cls._fetch_image_glance_disk(context, - session, instance_id, image, image_type) + session, instance, image, image_type) @classmethod - def _fetch_image_glance_vhd(cls, context, session, instance_id, image, + def _fetch_image_glance_vhd(cls, context, session, instance, image, image_type): """Tell glance to download an image and put the VHDs into the SR Returns: A list of dictionaries that describe VDIs """ + instance_id = instance.id LOG.debug(_("Asking xapi to fetch vhd image %(image)s") % locals()) sr_ref = safe_find_sr(session) @@ -422,17 +424,57 @@ class VMHelper(HelperBase): cls.scan_sr(session, instance_id, sr_ref) - # Pull out the UUID of the first VDI - vdi_uuid = vdis[0]['vdi_uuid'] + # Pull out the UUID of the first VDI (which is the os VDI) + os_vdi_uuid = vdis[0]['vdi_uuid'] + # Set the name-label to ease debugging - vdi_ref = session.get_xenapi().VDI.get_by_uuid(vdi_uuid) + vdi_ref = session.get_xenapi().VDI.get_by_uuid(os_vdi_uuid) primary_name_label = get_name_label_for_image(image) session.get_xenapi().VDI.set_name_label(vdi_ref, primary_name_label) + cls._check_vdi_size(context, session, instance, os_vdi_uuid) return vdis @classmethod - def _fetch_image_glance_disk(cls, context, session, instance_id, image, + def _get_vdi_chain_size(cls, context, session, vdi_uuid): + """Compute the total size of a VDI chain, starting with the specified + VDI UUID. + + This will walk the VDI chain to the root, add the size of each VDI into + the total. + """ + size_bytes = 0 + for vdi_rec in walk_vdi_chain(session, vdi_uuid): + vdi_size_bytes = int(vdi_rec['physical_utilisation']) + LOG.debug(_('vdi_uuid=%(vdi_uuid)s vdi_size_bytes=' + '%(vdi_size_bytes)d' % locals())) + size_bytes += vdi_size_bytes + return size_bytes + + @classmethod + def _check_vdi_size(cls, context, session, instance, vdi_uuid): + size_bytes = cls._get_vdi_chain_size(context, session, vdi_uuid) + + # FIXME(sirp): this was copied directly from compute.manager.py, let's + # refactor this to a common area + instance_type_id = instance['instance_type_id'] + instance_type = db.instance_type_get(context, + instance_type_id) + allowed_size_gb = instance_type['local_gb'] + allowed_size_bytes = allowed_size_gb * 1024 * 1024 * 1024 + + LOG.debug(_("image_size_bytes=%(size_bytes)d, allowed_size_bytes=" + "%(allowed_size_bytes)d") % locals()) + + if size_bytes > allowed_size_bytes: + LOG.info(_("Image size %(size_bytes)d exceeded" + " instance_type allowed size " + "%(allowed_size_bytes)d") + % locals()) + raise exception.ImageTooLarge() + + @classmethod + def _fetch_image_glance_disk(cls, context, session, instance, image, image_type): """Fetch the image from Glance @@ -444,6 +486,7 @@ class VMHelper(HelperBase): Returns: A single filename if image_type is KERNEL_RAMDISK A list of dictionaries that describe VDIs, otherwise """ + instance_id = instance.id # FIXME(sirp): Since the Glance plugin seems to be required for the # VHD disk, it may be worth using the plugin for both VHD and RAW and # DISK restores @@ -750,6 +793,21 @@ def get_vhd_parent_uuid(session, vdi_ref): return None +def walk_vdi_chain(session, vdi_uuid): + """Yield vdi_recs for each element in a VDI chain""" + # TODO: perhaps make get_vhd_parent use this + while True: + vdi_ref = session.get_xenapi().VDI.get_by_uuid(vdi_uuid) + vdi_rec = session.get_xenapi().VDI.get_record(vdi_ref) + yield vdi_rec + + parent_uuid = vdi_rec['sm_config'].get('vhd-parent') + if parent_uuid: + vdi_uuid = parent_uuid + else: + break + + def wait_for_vhd_coalesce(session, instance_id, sr_ref, vdi_ref, original_parent_uuid): """ Spin until the parent VHD is coalesced into its parent VHD diff --git a/nova/virt/xenapi/vmops.py b/nova/virt/xenapi/vmops.py index b1522729a..7586ade7b 100644 --- a/nova/virt/xenapi/vmops.py +++ b/nova/virt/xenapi/vmops.py @@ -137,7 +137,7 @@ class VMOps(object): def _create_disks(self, context, instance): disk_image_type = VMHelper.determine_disk_image_type(instance) vdis = VMHelper.fetch_image(context, self._session, - instance.id, instance.image_ref, + instance, instance.image_ref, instance.user_id, instance.project_id, disk_image_type) return vdis @@ -182,11 +182,11 @@ class VMOps(object): try: if instance.kernel_id: kernel = VMHelper.fetch_image(context, self._session, - instance.id, instance.kernel_id, instance.user_id, + instance, instance.kernel_id, instance.user_id, instance.project_id, ImageType.KERNEL)[0] if instance.ramdisk_id: ramdisk = VMHelper.fetch_image(context, self._session, - instance.id, instance.ramdisk_id, instance.user_id, + instance, instance.ramdisk_id, instance.user_id, instance.project_id, ImageType.RAMDISK)[0] # Create the VM ref and attach the first disk first_vdi_ref = self._session.call_xenapi('VDI.get_by_uuid', -- cgit From d940fa4619584dac967176d045407f0919da0a74 Mon Sep 17 00:00:00 2001 From: Ed Leafe Date: Fri, 12 Aug 2011 16:19:46 -0500 Subject: end of day --- nova/scheduler/filters/__init__.py | 1 + nova/scheduler/filters/abstract_filter.py | 54 +------------------ nova/scheduler/filters/all_hosts_filter.py | 3 +- nova/scheduler/filters/instance_type_filter.py | 5 +- nova/scheduler/filters/json_filter.py | 39 ++++++++++---- nova/scheduler/host_filter.py | 75 ++++++++++++++++++++++++++ nova/tests/scheduler/test_host_filter.py | 34 ++++++------ 7 files changed, 129 insertions(+), 82 deletions(-) create mode 100644 nova/scheduler/host_filter.py diff --git a/nova/scheduler/filters/__init__.py b/nova/scheduler/filters/__init__.py index 27160ca0a..4c9187c5a 100644 --- a/nova/scheduler/filters/__init__.py +++ b/nova/scheduler/filters/__init__.py @@ -13,6 +13,7 @@ # License for the specific language governing permissions and limitations # under the License. +from abstract_filter import AbstractHostFilter from all_hosts_filter import AllHostsFilter from instance_type_filter import InstanceTypeFilter from json_filter import JsonFilter diff --git a/nova/scheduler/filters/abstract_filter.py b/nova/scheduler/filters/abstract_filter.py index 05982820f..fe5610923 100644 --- a/nova/scheduler/filters/abstract_filter.py +++ b/nova/scheduler/filters/abstract_filter.py @@ -13,44 +13,15 @@ # License for the specific language governing permissions and limitations # under the License. -""" -The Host Filter classes are a way to ensure that only hosts that are -appropriate are considered when creating a new instance. Hosts that are -either incompatible or insufficient to accept a newly-requested instance -are removed by Host Filter classes from consideration. Those that pass -the filter are then passed on for weighting or other process for ordering. - -Three filters are included: AllHosts, Flavor & JSON. AllHosts just -returns the full, unfiltered list of hosts. Flavor is a hard coded -matching mechanism based on flavor criteria and JSON is an ad-hoc -filter grammar. - -Why JSON? The requests for instances may come in through the -REST interface from a user or a parent Zone. -Currently Flavors and/or InstanceTypes are used for -specifing the type of instance desired. Specific Nova users have -noted a need for a more expressive way of specifying instances. -Since we don't want to get into building full DSL this is a simple -form as an example of how this could be done. In reality, most -consumers will use the more rigid filters such as FlavorFilter. -""" - -import json - -from nova import exception -from nova import flags -from nova import log as logging import nova.scheduler +from nova import flags - -LOG = logging.getLogger('nova.scheduler.host_filter') FLAGS = flags.FLAGS flags.DEFINE_string('default_host_filter', - 'nova.scheduler.host_filter.AllHostsFilter', + 'nova.scheduler.filters.AllHostsFilter', 'Which filter to use for filtering hosts') - class AbstractHostFilter(object): """Base class for host filters.""" def instance_type_to_filter(self, instance_type): @@ -64,24 +35,3 @@ class AbstractHostFilter(object): def _full_name(self): """module.classname of the filter.""" return "%s.%s" % (self.__module__, self.__class__.__name__) - - -def _get_filters(): - from nova.scheduler import filters - return [itm for itm in dir(filters) - if issubclass(itm, AbstractHostFilter)] - - -def choose_host_filter(filter_name=None): - """Since the caller may specify which filter to use we need - to have an authoritative list of what is permissible. This - function checks the filter name against a predefined set - of acceptable filters. - """ - if not filter_name: - filter_name = FLAGS.default_host_filter - for filter_class in _get_filters(): - host_match = "%s.%s" % (filter_class.__module__, filter_class.__name__) - if host_match == filter_name: - return filter_class() - raise exception.SchedulerHostFilterNotFound(filter_name=filter_name) diff --git a/nova/scheduler/filters/all_hosts_filter.py b/nova/scheduler/filters/all_hosts_filter.py index bc4acfd1a..e80d829ca 100644 --- a/nova/scheduler/filters/all_hosts_filter.py +++ b/nova/scheduler/filters/all_hosts_filter.py @@ -15,9 +15,10 @@ import nova.scheduler +from nova.scheduler.filters import abstract_filter -class AllHostsFilter(nova.scheduler.host_filter.AbstractHostFilter): +class AllHostsFilter(abstract_filter.AbstractHostFilter): """NOP host filter. Returns all hosts in ZoneManager.""" def instance_type_to_filter(self, instance_type): """Return anything to prevent base-class from raising diff --git a/nova/scheduler/filters/instance_type_filter.py b/nova/scheduler/filters/instance_type_filter.py index 03ffc46c6..62b9ee414 100644 --- a/nova/scheduler/filters/instance_type_filter.py +++ b/nova/scheduler/filters/instance_type_filter.py @@ -14,10 +14,11 @@ # under the License. -from nova.scheduler import host_filter +import nova.scheduler +from nova.scheduler.filters import abstract_filter -class InstanceTypeFilter(host_filter.AbstractHostFilter): +class InstanceTypeFilter(abstract_filter.AbstractHostFilter): """HostFilter hard-coded to work with InstanceType records.""" def instance_type_to_filter(self, instance_type): """Use instance_type to filter hosts.""" diff --git a/nova/scheduler/filters/json_filter.py b/nova/scheduler/filters/json_filter.py index 358abdc4d..889b96915 100644 --- a/nova/scheduler/filters/json_filter.py +++ b/nova/scheduler/filters/json_filter.py @@ -14,49 +14,64 @@ # under the License. +import json import operator -from nova.scheduler import host_filter +import nova.scheduler +from nova.scheduler.filters import abstract_filter +def debug(*args): + with file("/tmp/debug", "a") as dbg: + msg = " ".join([str(arg) for arg in args]) + dbg.write("%s\n" % msg) -class JsonFilter(host_filter.AbstractHostFilter): + +class JsonFilter(abstract_filter.AbstractHostFilter): """Host Filter to allow simple JSON-based grammar for selecting hosts. """ - def _op_comp(self, args, op): + def _op_compare(self, args, op): """Returns True if the specified operator can successfully compare the first item in the args with all the rest. Will return False if only one item is in the list. """ if len(args) < 2: return False - bad = [arg for arg in args[1:] - if not op(args[0], arg)] + if op is operator.contains: + debug("ARGS", type(args), args) + debug("op", op) + debug("REVERSED!!!") + # operator.contains reverses the param order. + bad = [arg for arg in args[1:] + if not op(args, args[0])] + else: + bad = [arg for arg in args[1:] + if not op(args[0], arg)] return not bool(bad) def _equals(self, args): """First term is == all the other terms.""" - return self._op_comp(args, operator.eq) + return self._op_compare(args, operator.eq) def _less_than(self, args): """First term is < all the other terms.""" - return self._op_comp(args, operator.lt) + return self._op_compare(args, operator.lt) def _greater_than(self, args): """First term is > all the other terms.""" - return self._op_comp(args, operator.gt) + return self._op_compare(args, operator.gt) def _in(self, args): """First term is in set of remaining terms""" - return self._op_comp(args, operator.contains) + return self._op_compare(args, operator.contains) def _less_than_equal(self, args): """First term is <= all the other terms.""" - return self._op_comp(args, operator.le) + return self._op_compare(args, operator.le) def _greater_than_equal(self, args): """First term is >= all the other terms.""" - return self._op_comp(args, operator.ge) + return self._op_compare(args, operator.ge) def _not(self, args): """Flip each of the arguments.""" @@ -129,6 +144,8 @@ class JsonFilter(host_filter.AbstractHostFilter): specified in the query. """ expanded = json.loads(query) + + debug("expanded", type(expanded), expanded) filtered_hosts = [] for host, services in zone_manager.service_states.iteritems(): result = self._process_filter(zone_manager, expanded, host, diff --git a/nova/scheduler/host_filter.py b/nova/scheduler/host_filter.py new file mode 100644 index 000000000..f5191f5c9 --- /dev/null +++ b/nova/scheduler/host_filter.py @@ -0,0 +1,75 @@ +# Copyright (c) 2011 Openstack, LLC. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +The Host Filter classes are a way to ensure that only hosts that are +appropriate are considered when creating a new instance. Hosts that are +either incompatible or insufficient to accept a newly-requested instance +are removed by Host Filter classes from consideration. Those that pass +the filter are then passed on for weighting or other process for ordering. + +Three filters are included: AllHosts, Flavor & JSON. AllHosts just +returns the full, unfiltered list of hosts. Flavor is a hard coded +matching mechanism based on flavor criteria and JSON is an ad-hoc +filter grammar. + +Why JSON? The requests for instances may come in through the +REST interface from a user or a parent Zone. +Currently Flavors and/or InstanceTypes are used for +specifing the type of instance desired. Specific Nova users have +noted a need for a more expressive way of specifying instances. +Since we don't want to get into building full DSL this is a simple +form as an example of how this could be done. In reality, most +consumers will use the more rigid filters such as FlavorFilter. +""" + +import json +import types + +from nova import exception +from nova import flags +from nova import log as logging + +import nova.scheduler + + +LOG = logging.getLogger('nova.scheduler.host_filter') +FLAGS = flags.FLAGS + + +def _get_filters(): + from nova.scheduler import filters + def get_itm(nm): + return getattr(filters, nm) + + return [get_itm(itm) for itm in dir(filters) + if (type(get_itm(itm)) is types.TypeType) + and issubclass(get_itm(itm), filters.AbstractHostFilter)] + + +def choose_host_filter(filter_name=None): + """Since the caller may specify which filter to use we need + to have an authoritative list of what is permissible. This + function checks the filter name against a predefined set + of acceptable filters. + """ + if not filter_name: + filter_name = FLAGS.default_host_filter + for filter_class in _get_filters(): + host_match = "%s.%s" % (filter_class.__module__, filter_class.__name__) + if (host_match.startswith("nova.scheduler.filters") and + (host_match.split(".")[-1] == filter_name)): + return filter_class() + raise exception.SchedulerHostFilterNotFound(filter_name=filter_name) diff --git a/nova/tests/scheduler/test_host_filter.py b/nova/tests/scheduler/test_host_filter.py index 818be2f45..a64b25138 100644 --- a/nova/tests/scheduler/test_host_filter.py +++ b/nova/tests/scheduler/test_host_filter.py @@ -20,6 +20,7 @@ import json from nova import exception from nova import test +from nova.scheduler import host_filter from nova.scheduler import filters @@ -55,7 +56,7 @@ class HostFilterTestCase(test.TestCase): def setUp(self): super(HostFilterTestCase, self).setUp() - default_host_filter = 'nova.scheduler.filteris.AllHostsFilter' + default_host_filter = 'AllHostsFilter' self.flags(default_host_filter=default_host_filter) self.instance_type = dict(name='tiny', memory_mb=50, @@ -98,13 +99,10 @@ class HostFilterTestCase(test.TestCase): def test_choose_filter(self): # Test default filter ... hf = host_filter.choose_host_filter() - self.assertEquals(hf._full_name(), - 'nova.scheduler.host_filter.AllHostsFilter') + self.assertEquals(hf._full_name().split(".")[-1], 'AllHostsFilter') # Test valid filter ... - hf = host_filter.choose_host_filter( - 'nova.scheduler.host_filter.InstanceTypeFilter') - self.assertEquals(hf._full_name(), - 'nova.scheduler.host_filter.InstanceTypeFilter') + hf = host_filter.choose_host_filter('InstanceTypeFilter') + self.assertEquals(hf._full_name().split(".")[-1], 'InstanceTypeFilter') # Test invalid filter ... try: host_filter.choose_host_filter('does not exist') @@ -113,7 +111,7 @@ class HostFilterTestCase(test.TestCase): pass def test_all_host_filter(self): - hf = host_filter.AllHostsFilter() + hf = filters.AllHostsFilter() cooked = hf.instance_type_to_filter(self.instance_type) hosts = hf.filter_hosts(self.zone_manager, cooked) self.assertEquals(10, len(hosts)) @@ -121,11 +119,10 @@ class HostFilterTestCase(test.TestCase): self.assertTrue(host.startswith('host')) def test_instance_type_filter(self): - hf = host_filter.InstanceTypeFilter() + hf = filters.InstanceTypeFilter() # filter all hosts that can support 50 ram and 500 disk name, cooked = hf.instance_type_to_filter(self.instance_type) - self.assertEquals('nova.scheduler.host_filter.InstanceTypeFilter', - name) + self.assertEquals(name.split(".")[-1], 'InstanceTypeFilter') hosts = hf.filter_hosts(self.zone_manager, cooked) self.assertEquals(6, len(hosts)) just_hosts = [host for host, caps in hosts] @@ -134,21 +131,20 @@ class HostFilterTestCase(test.TestCase): self.assertEquals('host10', just_hosts[5]) def test_instance_type_filter_extra_specs(self): - hf = host_filter.InstanceTypeFilter() + hf = filters.InstanceTypeFilter() # filter all hosts that can support 50 ram and 500 disk name, cooked = hf.instance_type_to_filter(self.gpu_instance_type) - self.assertEquals('nova.scheduler.host_filter.InstanceTypeFilter', - name) + self.assertEquals(name.split(".")[-1], 'InstanceTypeFilter') hosts = hf.filter_hosts(self.zone_manager, cooked) self.assertEquals(1, len(hosts)) just_hosts = [host for host, caps in hosts] self.assertEquals('host07', just_hosts[0]) def test_json_filter(self): - hf = host_filter.JsonFilter() + hf = filters.JsonFilter() # filter all hosts that can support 50 ram and 500 disk name, cooked = hf.instance_type_to_filter(self.instance_type) - self.assertEquals('nova.scheduler.host_filter.JsonFilter', name) + self.assertEquals(name.split(".")[-1], 'JsonFilter') hosts = hf.filter_hosts(self.zone_manager, cooked) self.assertEquals(6, len(hosts)) just_hosts = [host for host, caps in hosts] @@ -191,6 +187,12 @@ class HostFilterTestCase(test.TestCase): raw = ['in', '$compute.host_memory_free', 20, 40, 60, 80, 100] cooked = json.dumps(raw) + def debug(*args): + with file("/tmp/debug", "a") as dbg: + msg = " ".join([str(arg) for arg in args]) + dbg.write("%s\n" % msg) + + debug("cooked", cooked, type(cooked)) hosts = hf.filter_hosts(self.zone_manager, cooked) self.assertEquals(5, len(hosts)) -- cgit From b238bcd9de989e7dabe6698b3de77a104d96a941 Mon Sep 17 00:00:00 2001 From: Josh Kearney Date: Fri, 12 Aug 2011 16:25:16 -0500 Subject: Updated logging. --- nova/virt/xenapi/vm_utils.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/nova/virt/xenapi/vm_utils.py b/nova/virt/xenapi/vm_utils.py index e86f72347..1f5f9b416 100644 --- a/nova/virt/xenapi/vm_utils.py +++ b/nova/virt/xenapi/vm_utils.py @@ -445,8 +445,9 @@ class VMHelper(HelperBase): """ size_bytes = 0 for vdi_rec in walk_vdi_chain(session, vdi_uuid): + cur_vdi_uuid = vdi_rec['uuid'] vdi_size_bytes = int(vdi_rec['physical_utilisation']) - LOG.debug(_('vdi_uuid=%(vdi_uuid)s vdi_size_bytes=' + LOG.debug(_('vdi_uuid=%(cur_vdi_uuid)s vdi_size_bytes=' '%(vdi_size_bytes)d' % locals())) size_bytes += vdi_size_bytes return size_bytes -- cgit From 1f3eb69ec547737447e91116881a8cb85157d65c Mon Sep 17 00:00:00 2001 From: Dan Wendlandt Date: Fri, 12 Aug 2011 18:16:07 -0700 Subject: fix issue introduced in merge --- nova/network/linux_net.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/nova/network/linux_net.py b/nova/network/linux_net.py index be4269392..904014716 100644 --- a/nova/network/linux_net.py +++ b/nova/network/linux_net.py @@ -463,7 +463,7 @@ def initialize_gateway_device(dev, network_ref): # NOTE(vish): The ip for dnsmasq has to be the first address on the # bridge for it to respond to reqests properly - suffix = net_attrs['cidr'].rpartition('/')[2] + suffix = network_ref['cidr'].rpartition('/')[2] out, err = _execute('ip', 'addr', 'add', '%s/%s' % (network_ref['dhcp_server'], suffix), @@ -477,7 +477,7 @@ def initialize_gateway_device(dev, network_ref): raise exception.Error('Failed to add ip: %s' % err) if(FLAGS.use_ipv6): _execute('ip', '-f', 'inet6', 'addr', - 'change', net_attrs['cidr_v6'], + 'change', network_ref['cidr_v6'], 'dev', dev, run_as_root=True) # NOTE(vish): If the public interface is the same as the # bridge, then the bridge has to be in promiscuous -- cgit From a1e60edee71d2cb24739f2f44ba13fbf28e72c95 Mon Sep 17 00:00:00 2001 From: Vishvananda Ishaya Date: Fri, 12 Aug 2011 21:03:11 -0700 Subject: get rid of network_info hack and pass it everywhere --- nova/virt/libvirt/connection.py | 22 ++++++-------- nova/virt/libvirt/firewall.py | 64 +++++++++++++++------------------------ nova/virt/libvirt/netutils.py | 67 ----------------------------------------- 3 files changed, 33 insertions(+), 120 deletions(-) diff --git a/nova/virt/libvirt/connection.py b/nova/virt/libvirt/connection.py index 6d043577a..bc317d660 100644 --- a/nova/virt/libvirt/connection.py +++ b/nova/virt/libvirt/connection.py @@ -471,10 +471,10 @@ class LibvirtConnection(driver.ComputeDriver): # in the guest OS. But, in case of KVM, shutdown() does not work... self.destroy(instance, network_info, cleanup=False) self.plug_vifs(instance, network_info) - self.firewall_driver.setup_basic_filtering(instance) - self.firewall_driver.prepare_instance_filter(instance) + self.firewall_driver.setup_basic_filtering(instance, network_info) + self.firewall_driver.prepare_instance_filter(instance, network_info) self._create_new_domain(xml) - self.firewall_driver.apply_instance_filter(instance) + self.firewall_driver.apply_instance_filter(instance, network_info) def _wait_for_reboot(): """Called at an interval until the VM is running again.""" @@ -531,7 +531,7 @@ class LibvirtConnection(driver.ComputeDriver): """ self.destroy(instance, network_info, cleanup=False) - xml = self.to_xml(instance, rescue=True) + xml = self.to_xml(instance, network_info, rescue=True) rescue_images = {'image_id': FLAGS.rescue_image_id, 'kernel_id': FLAGS.rescue_kernel_id, 'ramdisk_id': FLAGS.rescue_ramdisk_id} @@ -574,9 +574,9 @@ class LibvirtConnection(driver.ComputeDriver): # NOTE(ilyaalekseyev): Implementation like in multinics # for xenapi(tr3buchet) @exception.wrap_exception() - def spawn(self, context, instance, - network_info=None, block_device_info=None): - xml = self.to_xml(instance, False, network_info=network_info, + def spawn(self, context, instance, network_info, + block_device_info=None): + xml = self.to_xml(instance, network_info, False, block_device_info=block_device_info) self.firewall_driver.setup_basic_filtering(instance, network_info) self.firewall_driver.prepare_instance_filter(instance, network_info) @@ -584,7 +584,7 @@ class LibvirtConnection(driver.ComputeDriver): block_device_info=block_device_info) domain = self._create_new_domain(xml) LOG.debug(_("instance %s: is running"), instance['name']) - self.firewall_driver.apply_instance_filter(instance) + self.firewall_driver.apply_instance_filter(instance, network_info) def _wait_for_boot(): """Called at an interval until the VM is running.""" @@ -992,10 +992,6 @@ class LibvirtConnection(driver.ComputeDriver): block_device_info=None): block_device_mapping = driver.block_device_info_get_mapping( block_device_info) - # TODO(adiantum) remove network_info creation code - # when multinics will be completed - if not network_info: - network_info = netutils.get_network_info(instance) nics = [] for (network, mapping) in network_info: @@ -1082,7 +1078,7 @@ class LibvirtConnection(driver.ComputeDriver): xml_info['disk'] = xml_info['basepath'] + "/disk" return xml_info - def to_xml(self, instance, rescue=False, network_info=None, + def to_xml(self, instance, network_info, rescue=False, block_device_info=None): # TODO(termie): cache? LOG.debug(_('instance %s: starting toXML method'), instance['name']) diff --git a/nova/virt/libvirt/firewall.py b/nova/virt/libvirt/firewall.py index 9ce57b6c9..fa29b99c3 100644 --- a/nova/virt/libvirt/firewall.py +++ b/nova/virt/libvirt/firewall.py @@ -40,17 +40,17 @@ except ImportError: class FirewallDriver(object): - def prepare_instance_filter(self, instance, network_info=None): + def prepare_instance_filter(self, instance, network_info): """Prepare filters for the instance. At this point, the instance isn't running yet.""" raise NotImplementedError() - def unfilter_instance(self, instance, network_info=None): + def unfilter_instance(self, instance, network_info): """Stop filtering instance""" raise NotImplementedError() - def apply_instance_filter(self, instance): + def apply_instance_filter(self, instance, network_info): """Apply instance filter. Once this method returns, the instance should be firewalled @@ -60,9 +60,7 @@ class FirewallDriver(object): """ raise NotImplementedError() - def refresh_security_group_rules(self, - security_group_id, - network_info=None): + def refresh_security_group_rules(self, security_group_id): """Refresh security group rules from data store Gets called when a rule has been added to or removed from @@ -85,7 +83,7 @@ class FirewallDriver(object): """ raise NotImplementedError() - def setup_basic_filtering(self, instance, network_info=None): + def setup_basic_filtering(self, instance, network_info): """Create rules to block spoofing and allow dhcp. This gets called when spawning an instance, before @@ -150,7 +148,7 @@ class NWFilterFirewall(FirewallDriver): self.static_filters_configured = False self.handle_security_groups = False - def apply_instance_filter(self, instance): + def apply_instance_filter(self, instance, network_info): """No-op. Everything is done in prepare_instance_filter""" pass @@ -189,13 +187,10 @@ class NWFilterFirewall(FirewallDriver): ''' - def setup_basic_filtering(self, instance, network_info=None): + def setup_basic_filtering(self, instance, network_info): """Set up basic filtering (MAC, IP, and ARP spoofing protection)""" logging.info('called setup_basic_filtering in nwfilter') - if not network_info: - network_info = netutils.get_network_info(instance) - if self.handle_security_groups: # No point in setting up a filter set that we'll be overriding # anyway. @@ -300,10 +295,8 @@ class NWFilterFirewall(FirewallDriver): # execute in a native thread and block current greenthread until done tpool.execute(self._conn.nwfilterDefineXML, xml) - def unfilter_instance(self, instance, network_info=None): + def unfilter_instance(self, instance, network_info): """Clear out the nwfilter rules.""" - if not network_info: - network_info = netutils.get_network_info(instance) instance_name = instance.name for (network, mapping) in network_info: nic_id = mapping['mac'].replace(':', '') @@ -326,16 +319,13 @@ class NWFilterFirewall(FirewallDriver): LOG.debug(_('The nwfilter(%(instance_secgroup_filter_name)s) ' 'for %(instance_name)s is not found.') % locals()) - def prepare_instance_filter(self, instance, network_info=None): + def prepare_instance_filter(self, instance, network_info): """Creates an NWFilter for the given instance. In the process, it makes sure the filters for the provider blocks, security groups, and base filter are all in place. """ - if not network_info: - network_info = netutils.get_network_info(instance) - self.refresh_provider_fw_rules() ctxt = context.get_admin_context() @@ -500,9 +490,8 @@ class NWFilterFirewall(FirewallDriver): return 'nova-instance-%s' % (instance['name']) return 'nova-instance-%s-%s' % (instance['name'], nic_id) - def instance_filter_exists(self, instance): + def instance_filter_exists(self, instance, network_info): """Check nova-instance-instance-xxx exists""" - network_info = netutils.get_network_info(instance) for (network, mapping) in network_info: nic_id = mapping['mac'].replace(':', '') instance_filter_name = self._instance_filter_name(instance, nic_id) @@ -521,6 +510,7 @@ class IptablesFirewallDriver(FirewallDriver): from nova.network import linux_net self.iptables = linux_net.iptables_manager self.instances = {} + self.network_infos = {} self.nwfilter = NWFilterFirewall(kwargs['get_connection']) self.basicly_filtered = False @@ -529,22 +519,22 @@ class IptablesFirewallDriver(FirewallDriver): self.iptables.ipv6['filter'].add_chain('sg-fallback') self.iptables.ipv6['filter'].add_rule('sg-fallback', '-j DROP') - def setup_basic_filtering(self, instance, network_info=None): + def setup_basic_filtering(self, instance, network_info): """Set up provider rules and basic NWFilter.""" - if not network_info: - network_info = netutils.get_network_info(instance) self.nwfilter.setup_basic_filtering(instance, network_info) if not self.basicly_filtered: LOG.debug(_('iptables firewall: Setup Basic Filtering')) self.refresh_provider_fw_rules() self.basicly_filtered = True - def apply_instance_filter(self, instance): + def apply_instance_filter(self, instance, network_info): """No-op. Everything is done in prepare_instance_filter""" pass - def unfilter_instance(self, instance, network_info=None): + def unfilter_instance(self, instance, network_info): if self.instances.pop(instance['id'], None): + # NOTE(vish): use the passed info instead of the stored info + self.network_infos.pop(instance['id']) self.remove_filters_for_instance(instance) self.iptables.apply() self.nwfilter.unfilter_instance(instance, network_info) @@ -552,11 +542,10 @@ class IptablesFirewallDriver(FirewallDriver): LOG.info(_('Attempted to unfilter instance %s which is not ' 'filtered'), instance['id']) - def prepare_instance_filter(self, instance, network_info=None): - if not network_info: - network_info = netutils.get_network_info(instance) + def prepare_instance_filter(self, instance, network_info): self.instances[instance['id']] = instance - self.add_filters_for_instance(instance, network_info) + self.network_infos[instance['id']] = network_info + self.add_filters_for_instance(instance) self.iptables.apply() def _create_filter(self, ips, chain_name): @@ -583,7 +572,8 @@ class IptablesFirewallDriver(FirewallDriver): for rule in ipv6_rules: self.iptables.ipv6['filter'].add_rule(chain_name, rule) - def add_filters_for_instance(self, instance, network_info=None): + def add_filters_for_instance(self, instance): + network_info = self.network_infos[instance['id']] chain_name = self._instance_chain_name(instance) if FLAGS.use_ipv6: self.iptables.ipv6['filter'].add_chain(chain_name) @@ -601,9 +591,7 @@ class IptablesFirewallDriver(FirewallDriver): if FLAGS.use_ipv6: self.iptables.ipv6['filter'].remove_chain(chain_name) - def instance_rules(self, instance, network_info=None): - if not network_info: - network_info = netutils.get_network_info(instance) + def instance_rules(self, instance, network_info): ctxt = context.get_admin_context() ipv4_rules = [] @@ -726,14 +714,10 @@ class IptablesFirewallDriver(FirewallDriver): self.iptables.apply() @utils.synchronized('iptables', external=True) - def do_refresh_security_group_rules(self, - security_group, - network_info=None): + def do_refresh_security_group_rules(self, security_group): for instance in self.instances.values(): self.remove_filters_for_instance(instance) - if not network_info: - network_info = netutils.get_network_info(instance) - self.add_filters_for_instance(instance, network_info) + self.add_filters_for_instance(instance) def refresh_provider_fw_rules(self): """See class:FirewallDriver: docs.""" diff --git a/nova/virt/libvirt/netutils.py b/nova/virt/libvirt/netutils.py index a8e88fc07..6f303072d 100644 --- a/nova/virt/libvirt/netutils.py +++ b/nova/virt/libvirt/netutils.py @@ -23,12 +23,7 @@ import netaddr -from nova import context -from nova import db -from nova import exception from nova import flags -from nova import ipv6 -from nova import utils FLAGS = flags.FLAGS @@ -47,65 +42,3 @@ def get_net_and_prefixlen(cidr): def get_ip_version(cidr): net = netaddr.IPNetwork(cidr) return int(net.version) - - -def get_network_info(instance): - # TODO(tr3buchet): this function needs to go away! network info - # MUST be passed down from compute - # TODO(adiantum) If we will keep this function - # we should cache network_info - admin_context = context.get_admin_context() - - try: - fixed_ips = db.fixed_ip_get_by_instance(admin_context, instance['id']) - except exception.FixedIpNotFoundForInstance: - fixed_ips = [] - - vifs = db.virtual_interface_get_by_instance(admin_context, instance['id']) - flavor = db.instance_type_get(admin_context, - instance['instance_type_id']) - network_info = [] - - for vif in vifs: - network = vif['network'] - - # determine which of the instance's IPs belong to this network - network_ips = [fixed_ip['address'] for fixed_ip in fixed_ips if - fixed_ip['network_id'] == network['id']] - - def ip_dict(ip): - return { - 'ip': ip, - 'netmask': network['netmask'], - 'enabled': '1'} - - def ip6_dict(): - prefix = network['cidr_v6'] - mac = vif['address'] - project_id = instance['project_id'] - return { - 'ip': ipv6.to_global(prefix, mac, project_id), - 'netmask': network['netmask_v6'], - 'enabled': '1'} - - mapping = { - 'label': network['label'], - 'gateway': network['gateway'], - 'broadcast': network['broadcast'], - 'dhcp_server': network['gateway'], - 'mac': vif['address'], - 'rxtx_cap': flavor['rxtx_cap'], - 'dns': [], - 'ips': [ip_dict(ip) for ip in network_ips]} - - if network['dns1']: - mapping['dns'].append(network['dns1']) - if network['dns2']: - mapping['dns'].append(network['dns2']) - - if FLAGS.use_ipv6: - mapping['ip6s'] = [ip6_dict()] - mapping['gateway6'] = network['gateway_v6'] - - network_info.append((network, mapping)) - return network_info -- cgit From 87e97a868b4b7361937bac8f637ec014276aaf5c Mon Sep 17 00:00:00 2001 From: Vishvananda Ishaya Date: Fri, 12 Aug 2011 21:03:53 -0700 Subject: use dhcp server instead of gateway for filter exception --- nova/virt/libvirt/firewall.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/nova/virt/libvirt/firewall.py b/nova/virt/libvirt/firewall.py index fa29b99c3..a7a67dacb 100644 --- a/nova/virt/libvirt/firewall.py +++ b/nova/virt/libvirt/firewall.py @@ -609,7 +609,7 @@ class IptablesFirewallDriver(FirewallDriver): ipv4_rules += ['-j $provider'] ipv6_rules += ['-j $provider'] - dhcp_servers = [info['gateway'] for (_n, info) in network_info] + dhcp_servers = [info['dhcp_server'] for (_n, info) in network_info] for dhcp_server in dhcp_servers: ipv4_rules.append('-s %s -p udp --sport 67 --dport 68 ' -- cgit From 87ff404bf2bffe690292f7d3922c1ca2529f852b Mon Sep 17 00:00:00 2001 From: Vishvananda Ishaya Date: Fri, 12 Aug 2011 21:17:20 -0700 Subject: rename project_net to same_net --- nova/virt/libvirt/connection.py | 6 +++--- nova/virt/libvirt/firewall.py | 8 ++++---- nova/virt/libvirt/vif.py | 2 +- 3 files changed, 8 insertions(+), 8 deletions(-) diff --git a/nova/virt/libvirt/connection.py b/nova/virt/libvirt/connection.py index bc317d660..f905ce92b 100644 --- a/nova/virt/libvirt/connection.py +++ b/nova/virt/libvirt/connection.py @@ -32,7 +32,7 @@ Supports KVM, LXC, QEMU, UML, and XEN. :rescue_kernel_id: Rescue aki image (default: aki-rescue). :rescue_ramdisk_id: Rescue ari image (default: ari-rescue). :injected_network_template: Template file for injected network -:allow_project_net_traffic: Whether to allow in project network traffic +:allow_same_net_traffic: Whether to allow in project network traffic """ @@ -96,9 +96,9 @@ flags.DEFINE_string('libvirt_uri', '', 'Override the default libvirt URI (which is dependent' ' on libvirt_type)') -flags.DEFINE_bool('allow_project_net_traffic', +flags.DEFINE_bool('allow_same_net_traffic', True, - 'Whether to allow in project network traffic') + 'Whether to allow network traffic from same network') flags.DEFINE_bool('use_cow_images', True, 'Whether to use cow images') diff --git a/nova/virt/libvirt/firewall.py b/nova/virt/libvirt/firewall.py index a7a67dacb..11e3906b8 100644 --- a/nova/virt/libvirt/firewall.py +++ b/nova/virt/libvirt/firewall.py @@ -232,7 +232,7 @@ class NWFilterFirewall(FirewallDriver): self._define_filter(self.nova_base_ipv6_filter) self._define_filter(self.nova_dhcp_filter) self._define_filter(self.nova_ra_filter) - if FLAGS.allow_project_net_traffic: + if FLAGS.allow_same_net_traffic: self._define_filter(self.nova_project_filter) if FLAGS.use_ipv6: self._define_filter(self.nova_project_filter_v6) @@ -378,7 +378,7 @@ class NWFilterFirewall(FirewallDriver): instance_filter_children = [base_filter, 'nova-provider-rules', instance_secgroup_filter_name] - if FLAGS.allow_project_net_traffic: + if FLAGS.allow_same_net_traffic: instance_filter_children.append('nova-project') if FLAGS.use_ipv6: instance_filter_children.append('nova-project-v6') @@ -616,7 +616,7 @@ class IptablesFirewallDriver(FirewallDriver): '-j ACCEPT' % (dhcp_server,)) #Allow project network traffic - if FLAGS.allow_project_net_traffic: + if FLAGS.allow_same_net_traffic: cidrs = [network['cidr'] for (network, _m) in network_info] for cidr in cidrs: ipv4_rules.append('-s %s -j ACCEPT' % (cidr,)) @@ -633,7 +633,7 @@ class IptablesFirewallDriver(FirewallDriver): '-s %s/128 -p icmpv6 -j ACCEPT' % (gateway_v6,)) #Allow project network traffic - if FLAGS.allow_project_net_traffic: + if FLAGS.allow_same_net_traffic: cidrv6s = [network['cidr_v6'] for (network, _m) in network_info] diff --git a/nova/virt/libvirt/vif.py b/nova/virt/libvirt/vif.py index e243d4fa0..4cb9abda4 100644 --- a/nova/virt/libvirt/vif.py +++ b/nova/virt/libvirt/vif.py @@ -44,7 +44,7 @@ class LibvirtBridgeDriver(VIFDriver): gateway6 = mapping.get('gateway6') mac_id = mapping['mac'].replace(':', '') - if FLAGS.allow_project_net_traffic: + if FLAGS.allow_same_net_traffic: template = "\n" net, mask = netutils.get_net_and_mask(network['cidr']) values = [("PROJNET", net), ("PROJMASK", mask)] -- cgit From f7d1270c94d884e661a79d74fb2b2f88f6eb619f Mon Sep 17 00:00:00 2001 From: Vishvananda Ishaya Date: Fri, 12 Aug 2011 22:05:34 -0700 Subject: fix all of the tests --- nova/tests/test_libvirt.py | 84 ++++++++++++++++++++++------------------- nova/virt/libvirt/connection.py | 8 ++-- 2 files changed, 50 insertions(+), 42 deletions(-) diff --git a/nova/tests/test_libvirt.py b/nova/tests/test_libvirt.py index 2180cf4f0..df291ee68 100644 --- a/nova/tests/test_libvirt.py +++ b/nova/tests/test_libvirt.py @@ -49,18 +49,19 @@ def _create_network_info(count=1, ipv6=None): if ipv6 is None: ipv6 = FLAGS.use_ipv6 fake = 'fake' - fake_ip = '0.0.0.0/0' - fake_ip_2 = '0.0.0.1/0' - fake_ip_3 = '0.0.0.1/0' + fake_ip = '10.11.12.13' + fake_ip_2 = '0.0.0.1' + fake_ip_3 = '0.0.0.1' fake_vlan = 100 fake_bridge_interface = 'eth0' network = {'bridge': fake, 'cidr': fake_ip, 'cidr_v6': fake_ip, + 'gateway_v6': fake, 'vlan': fake_vlan, 'bridge_interface': fake_bridge_interface} mapping = {'mac': fake, - 'dhcp_server': fake, + 'dhcp_server': '10.0.0.1', 'gateway': fake, 'gateway6': fake, 'ips': [{'ip': fake_ip}, {'ip': fake_ip}]} @@ -273,15 +274,14 @@ class LibvirtConnTestCase(test.TestCase): conn = connection.LibvirtConnection(True) instance_ref = db.instance_create(self.context, self.test_instance) - result = conn._prepare_xml_info(instance_ref, False) - self.assertFalse(result['nics']) - - result = conn._prepare_xml_info(instance_ref, False, - _create_network_info()) + result = conn._prepare_xml_info(instance_ref, + _create_network_info(), + False) self.assertTrue(len(result['nics']) == 1) - result = conn._prepare_xml_info(instance_ref, False, - _create_network_info(2)) + result = conn._prepare_xml_info(instance_ref, + _create_network_info(2), + False) self.assertTrue(len(result['nics']) == 2) def test_xml_and_uri_no_ramdisk_no_kernel(self): @@ -408,16 +408,16 @@ class LibvirtConnTestCase(test.TestCase): network_info = _create_network_info(2) conn = connection.LibvirtConnection(True) instance_ref = db.instance_create(self.context, instance_data) - xml = conn.to_xml(instance_ref, False, network_info) + xml = conn.to_xml(instance_ref, network_info, False) tree = xml_to_tree(xml) interfaces = tree.findall("./devices/interface") self.assertEquals(len(interfaces), 2) parameters = interfaces[0].findall('./filterref/parameter') self.assertEquals(interfaces[0].get('type'), 'bridge') self.assertEquals(parameters[0].get('name'), 'IP') - self.assertEquals(parameters[0].get('value'), '0.0.0.0/0') + self.assertEquals(parameters[0].get('value'), '10.11.12.13') self.assertEquals(parameters[1].get('name'), 'DHCPSERVER') - self.assertEquals(parameters[1].get('value'), 'fake') + self.assertEquals(parameters[1].get('value'), '10.0.0.1') def _check_xml_and_container(self, instance): user_context = context.RequestContext(self.user_id, @@ -431,7 +431,8 @@ class LibvirtConnTestCase(test.TestCase): uri = conn.get_uri() self.assertEquals(uri, 'lxc:///') - xml = conn.to_xml(instance_ref) + network_info = _create_network_info() + xml = conn.to_xml(instance_ref, network_info) tree = xml_to_tree(xml) check = [ @@ -528,17 +529,20 @@ class LibvirtConnTestCase(test.TestCase): uri = conn.get_uri() self.assertEquals(uri, expected_uri) - xml = conn.to_xml(instance_ref, rescue) + network_info = _create_network_info() + xml = conn.to_xml(instance_ref, network_info, rescue) tree = xml_to_tree(xml) for i, (check, expected_result) in enumerate(checks): self.assertEqual(check(tree), expected_result, - '%s failed check %d' % (xml, i)) + '%s != %s failed check %d' % + (check(tree), expected_result, i)) for i, (check, expected_result) in enumerate(common_checks): self.assertEqual(check(tree), expected_result, - '%s failed common check %d' % (xml, i)) + '%s != %s failed common check %d' % + (check(tree), expected_result, i)) # This test is supposed to make sure we don't # override a specifically set uri @@ -942,8 +946,9 @@ class IptablesFirewallTestCase(test.TestCase): from nova.network import linux_net linux_net.iptables_manager.execute = fake_iptables_execute - self.fw.prepare_instance_filter(instance_ref) - self.fw.apply_instance_filter(instance_ref) + network_info = _create_network_info() + self.fw.prepare_instance_filter(instance_ref, network_info) + self.fw.apply_instance_filter(instance_ref, network_info) in_rules = filter(lambda l: not l.startswith('#'), self.in_filter_rules) @@ -1008,7 +1013,7 @@ class IptablesFirewallTestCase(test.TestCase): ipv6_len = len(self.fw.iptables.ipv6['filter'].rules) inst_ipv4, inst_ipv6 = self.fw.instance_rules(instance_ref, network_info) - self.fw.add_filters_for_instance(instance_ref, network_info) + self.fw.prepare_instance_filter(instance_ref, network_info) ipv4 = self.fw.iptables.ipv4['filter'].rules ipv6 = self.fw.iptables.ipv6['filter'].rules ipv4_network_rules = len(ipv4) - len(inst_ipv4) - ipv4_len @@ -1023,7 +1028,7 @@ class IptablesFirewallTestCase(test.TestCase): self.mox.StubOutWithMock(self.fw, 'add_filters_for_instance', use_mock_anything=True) - self.fw.add_filters_for_instance(instance_ref, mox.IgnoreArg()) + self.fw.prepare_instance_filter(instance_ref, mox.IgnoreArg()) self.fw.instances[instance_ref['id']] = instance_ref self.mox.ReplayAll() self.fw.do_refresh_security_group_rules("fake") @@ -1043,11 +1048,12 @@ class IptablesFirewallTestCase(test.TestCase): instance_ref = self._create_instance_ref() _setup_networking(instance_ref['id'], self.test_ip) - self.fw.setup_basic_filtering(instance_ref) - self.fw.prepare_instance_filter(instance_ref) - self.fw.apply_instance_filter(instance_ref) + network_info = _create_network_info + self.fw.setup_basic_filtering(instance_ref, network_info) + self.fw.prepare_instance_filter(instance_ref, network_info) + self.fw.apply_instance_filter(instance_ref, network_info) original_filter_count = len(fakefilter.filters) - self.fw.unfilter_instance(instance_ref) + self.fw.unfilter_instance(instance_ref, network_info) # should undefine just the instance filter self.assertEqual(original_filter_count - len(fakefilter.filters), 1) @@ -1057,14 +1063,14 @@ class IptablesFirewallTestCase(test.TestCase): def test_provider_firewall_rules(self): # setup basic instance data instance_ref = self._create_instance_ref() - nw_info = _create_network_info(1) _setup_networking(instance_ref['id'], self.test_ip) # FRAGILE: peeks at how the firewall names chains chain_name = 'inst-%s' % instance_ref['id'] # create a firewall via setup_basic_filtering like libvirt_conn.spawn # should have a chain with 0 rules - self.fw.setup_basic_filtering(instance_ref, network_info=nw_info) + network_info = _create_network_info(1) + self.fw.setup_basic_filtering(instance_ref, network_info) self.assertTrue('provider' in self.fw.iptables.ipv4['filter'].chains) rules = [rule for rule in self.fw.iptables.ipv4['filter'].rules if rule.chain == 'provider'] @@ -1094,8 +1100,8 @@ class IptablesFirewallTestCase(test.TestCase): self.assertEqual(2, len(rules)) # create the instance filter and make sure it has a jump rule - self.fw.prepare_instance_filter(instance_ref, network_info=nw_info) - self.fw.apply_instance_filter(instance_ref) + self.fw.prepare_instance_filter(instance_ref, network_info) + self.fw.apply_instance_filter(instance_ref, network_info) inst_rules = [rule for rule in self.fw.iptables.ipv4['filter'].rules if rule.chain == chain_name] jump_rules = [rule for rule in inst_rules if '-j' in rule.rule] @@ -1247,7 +1253,7 @@ class NWFilterTestCase(test.TestCase): def _ensure_all_called(): instance_filter = 'nova-instance-%s-%s' % (instance_ref['name'], - '561212121212') + 'fake') secgroup_filter = 'nova-secgroup-%s' % self.security_group['id'] for required in [secgroup_filter, 'allow-dhcp-server', 'no-arp-spoofing', 'no-ip-spoofing', @@ -1263,9 +1269,10 @@ class NWFilterTestCase(test.TestCase): self.security_group.id) instance = db.instance_get(self.context, inst_id) - self.fw.setup_basic_filtering(instance) - self.fw.prepare_instance_filter(instance) - self.fw.apply_instance_filter(instance) + network_info = _create_network_info() + self.fw.setup_basic_filtering(instance, network_info) + self.fw.prepare_instance_filter(instance, network_info) + self.fw.apply_instance_filter(instance, network_info) _ensure_all_called() self.teardown_security_group() db.instance_destroy(context.get_admin_context(), instance_ref['id']) @@ -1296,11 +1303,12 @@ class NWFilterTestCase(test.TestCase): instance = db.instance_get(self.context, inst_id) _setup_networking(instance_ref['id'], self.test_ip) - self.fw.setup_basic_filtering(instance) - self.fw.prepare_instance_filter(instance) - self.fw.apply_instance_filter(instance) + network_info = _create_network_info() + self.fw.setup_basic_filtering(instance, network_info) + self.fw.prepare_instance_filter(instance, network_info) + self.fw.apply_instance_filter(instance, network_info) original_filter_count = len(fakefilter.filters) - self.fw.unfilter_instance(instance) + self.fw.unfilter_instance(instance, network_info) # should undefine 2 filters: instance and instance-secgroup self.assertEqual(original_filter_count - len(fakefilter.filters), 2) diff --git a/nova/virt/libvirt/connection.py b/nova/virt/libvirt/connection.py index f905ce92b..5945a725d 100644 --- a/nova/virt/libvirt/connection.py +++ b/nova/virt/libvirt/connection.py @@ -463,8 +463,8 @@ class LibvirtConnection(driver.ComputeDriver): """ virt_dom = self._conn.lookupByName(instance['name']) # NOTE(itoumsn): Use XML delived from the running instance - # instead of using to_xml(instance). This is almost the ultimate - # stupid workaround. + # instead of using to_xml(instance, network_info). This is almost + # the ultimate stupid workaround. xml = virt_dom.XMLDesc(0) # NOTE(itoumsn): self.shutdown() and wait instead of self.destroy() is # better because we cannot ensure flushing dirty buffers @@ -988,7 +988,7 @@ class LibvirtConnection(driver.ComputeDriver): else: raise exception.InvalidDevicePath(path=device_path) - def _prepare_xml_info(self, instance, rescue=False, network_info=None, + def _prepare_xml_info(self, instance, network_info, rescue, block_device_info=None): block_device_mapping = driver.block_device_info_get_mapping( block_device_info) @@ -1082,7 +1082,7 @@ class LibvirtConnection(driver.ComputeDriver): block_device_info=None): # TODO(termie): cache? LOG.debug(_('instance %s: starting toXML method'), instance['name']) - xml_info = self._prepare_xml_info(instance, rescue, network_info, + xml_info = self._prepare_xml_info(instance, network_info, rescue, block_device_info) xml = str(Template(self.libvirt_xml, searchList=[xml_info])) LOG.debug(_('instance %s: finished toXML method'), instance['name']) -- cgit From c533e6ed3d2df8725dbcb48e7e546eb853b7ad41 Mon Sep 17 00:00:00 2001 From: Vishvananda Ishaya Date: Fri, 12 Aug 2011 22:36:10 -0700 Subject: make sure security groups come back on restart of nova-compute --- nova/compute/manager.py | 6 ++++-- nova/tests/test_compute.py | 4 ++-- nova/tests/test_libvirt.py | 2 ++ nova/virt/driver.py | 2 +- nova/virt/fake.py | 4 ++-- nova/virt/libvirt/connection.py | 9 +++++---- nova/virt/libvirt/firewall.py | 14 ++++++-------- nova/virt/xenapi_conn.py | 2 +- 8 files changed, 23 insertions(+), 20 deletions(-) diff --git a/nova/compute/manager.py b/nova/compute/manager.py index d38213083..5b98e9ec1 100644 --- a/nova/compute/manager.py +++ b/nova/compute/manager.py @@ -170,7 +170,9 @@ class ComputeManager(manager.SchedulerDependentManager): elif drv_state == power_state.RUNNING: # Hyper-V and VMWareAPI drivers will raise and exception try: - self.driver.ensure_filtering_rules_for_instance(instance) + net_info = self._get_instance_nw_info(context, instance) + self.driver.ensure_filtering_rules_for_instance(instance, + net_info) except NotImplementedError: LOG.warning(_('Hypervisor driver does not ' 'support firewall rules')) @@ -1308,7 +1310,7 @@ class ComputeManager(manager.SchedulerDependentManager): # This nwfilter is necessary on the destination host. # In addition, this method is creating filtering rule # onto destination host. - self.driver.ensure_filtering_rules_for_instance(instance_ref) + self.driver.ensure_filtering_rules_for_instance(instance_ref, network_info) def live_migration(self, context, instance_id, dest): """Executing live migration. diff --git a/nova/tests/test_compute.py b/nova/tests/test_compute.py index 73c9bd78d..9d6e5aee5 100644 --- a/nova/tests/test_compute.py +++ b/nova/tests/test_compute.py @@ -632,7 +632,7 @@ class ComputeTestCase(test.TestCase): vid = i_ref['volumes'][i]['id'] volmock.setup_compute_volume(c, vid).InAnyOrder('g1') drivermock.plug_vifs(i_ref, []) - drivermock.ensure_filtering_rules_for_instance(i_ref) + drivermock.ensure_filtering_rules_for_instance(i_ref, []) self.compute.db = dbmock self.compute.volume_manager = volmock @@ -657,7 +657,7 @@ class ComputeTestCase(test.TestCase): self.mox.StubOutWithMock(compute_manager.LOG, 'info') compute_manager.LOG.info(_("%s has no volume."), i_ref['hostname']) drivermock.plug_vifs(i_ref, []) - drivermock.ensure_filtering_rules_for_instance(i_ref) + drivermock.ensure_filtering_rules_for_instance(i_ref, []) self.compute.db = dbmock self.compute.driver = drivermock diff --git a/nova/tests/test_libvirt.py b/nova/tests/test_libvirt.py index df291ee68..7f4a3b09a 100644 --- a/nova/tests/test_libvirt.py +++ b/nova/tests/test_libvirt.py @@ -644,6 +644,7 @@ class LibvirtConnTestCase(test.TestCase): self.create_fake_libvirt_mock() instance_ref = db.instance_create(self.context, self.test_instance) + network_info = _create_network_info() # Start test self.mox.ReplayAll() @@ -653,6 +654,7 @@ class LibvirtConnTestCase(test.TestCase): conn.firewall_driver.setattr('prepare_instance_filter', fake_none) conn.firewall_driver.setattr('instance_filter_exists', fake_none) conn.ensure_filtering_rules_for_instance(instance_ref, + network_info, time=fake_timer) except exception.Error, e: c1 = (0 <= e.message.find('Timeout migrating for')) diff --git a/nova/virt/driver.py b/nova/virt/driver.py index df4a66ac2..20af2666d 100644 --- a/nova/virt/driver.py +++ b/nova/virt/driver.py @@ -252,7 +252,7 @@ class ComputeDriver(object): # TODO(Vek): Need to pass context in for access to auth_token pass - def ensure_filtering_rules_for_instance(self, instance_ref): + def ensure_filtering_rules_for_instance(self, instance_ref, network_info): """Setting up filtering rules and waiting for its completion. To migrate an instance, filtering rules to hypervisors diff --git a/nova/virt/fake.py b/nova/virt/fake.py index 880702af1..2ffa33d40 100644 --- a/nova/virt/fake.py +++ b/nova/virt/fake.py @@ -487,7 +487,7 @@ class FakeConnection(driver.ComputeDriver): """This method is supported only by libvirt.""" raise NotImplementedError('This method is supported only by libvirt.') - def ensure_filtering_rules_for_instance(self, instance_ref): + def ensure_filtering_rules_for_instance(self, instance_ref, network_info): """This method is supported only by libvirt.""" raise NotImplementedError('This method is supported only by libvirt.') @@ -496,7 +496,7 @@ class FakeConnection(driver.ComputeDriver): """This method is supported only by libvirt.""" return - def unfilter_instance(self, instance_ref, network_info=None): + def unfilter_instance(self, instance_ref, network_info): """This method is supported only by libvirt.""" raise NotImplementedError('This method is supported only by libvirt.') diff --git a/nova/virt/libvirt/connection.py b/nova/virt/libvirt/connection.py index 5945a725d..71516011a 100644 --- a/nova/virt/libvirt/connection.py +++ b/nova/virt/libvirt/connection.py @@ -1502,7 +1502,7 @@ class LibvirtConnection(driver.ComputeDriver): return - def ensure_filtering_rules_for_instance(self, instance_ref, + def ensure_filtering_rules_for_instance(self, instance_ref, network_info, time=None): """Setting up filtering rules and waiting for its completion. @@ -1532,14 +1532,15 @@ class LibvirtConnection(driver.ComputeDriver): # If any instances never launch at destination host, # basic-filtering must be set here. - self.firewall_driver.setup_basic_filtering(instance_ref) + self.firewall_driver.setup_basic_filtering(instance_ref, network_info) # setting up n)ova-instance-instance-xx mainly. - self.firewall_driver.prepare_instance_filter(instance_ref) + self.firewall_driver.prepare_instance_filter(instance_ref, network_info) # wait for completion timeout_count = range(FLAGS.live_migration_retry_count) while timeout_count: - if self.firewall_driver.instance_filter_exists(instance_ref): + if self.firewall_driver.instance_filter_exists(instance_ref, + network_info): break timeout_count.pop() if len(timeout_count) == 0: diff --git a/nova/virt/libvirt/firewall.py b/nova/virt/libvirt/firewall.py index 11e3906b8..55fc58458 100644 --- a/nova/virt/libvirt/firewall.py +++ b/nova/virt/libvirt/firewall.py @@ -92,7 +92,7 @@ class FirewallDriver(object): """ raise NotImplementedError() - def instance_filter_exists(self, instance): + def instance_filter_exists(self, instance, network_info): """Check nova-instance-instance-xxx exists""" raise NotImplementedError() @@ -391,9 +391,7 @@ class NWFilterFirewall(FirewallDriver): self._define_filter(self._filter_container(filter_name, filter_children)) - def refresh_security_group_rules(self, - security_group_id, - network_info=None): + def refresh_security_group_rules(self, security_group_id): return self._define_filter( self.security_group_to_nwfilter_xml(security_group_id)) @@ -702,15 +700,15 @@ class IptablesFirewallDriver(FirewallDriver): return ipv4_rules, ipv6_rules - def instance_filter_exists(self, instance): + def instance_filter_exists(self, instance, network_info): """Check nova-instance-instance-xxx exists""" - return self.nwfilter.instance_filter_exists(instance) + return self.nwfilter.instance_filter_exists(instance, network_info) def refresh_security_group_members(self, security_group): pass - def refresh_security_group_rules(self, security_group, network_info=None): - self.do_refresh_security_group_rules(security_group, network_info) + def refresh_security_group_rules(self, security_group): + self.do_refresh_security_group_rules(security_group) self.iptables.apply() @utils.synchronized('iptables', external=True) diff --git a/nova/virt/xenapi_conn.py b/nova/virt/xenapi_conn.py index 76b6c57fc..0ec957cf3 100644 --- a/nova/virt/xenapi_conn.py +++ b/nova/virt/xenapi_conn.py @@ -309,7 +309,7 @@ class XenAPIConnection(driver.ComputeDriver): """This method is supported only by libvirt.""" raise NotImplementedError('This method is supported only by libvirt.') - def ensure_filtering_rules_for_instance(self, instance_ref): + def ensure_filtering_rules_for_instance(self, instance_ref, network_info): """This method is supported only libvirt.""" return -- cgit From e0e49dd7340bbb26c82f18a94a6582a5684925fa Mon Sep 17 00:00:00 2001 From: Dan Wendlandt Date: Sat, 13 Aug 2011 18:58:29 -0700 Subject: have NetworkManager generate MAC address and pass it to the driver for plugging. Sets the stage for being able to do duplicate checks on those MACs as well. --- nova/network/linux_net.py | 68 ++++++++++++++++++++++++++++------------------- nova/network/manager.py | 18 ++++++++++--- nova/utils.py | 9 ------- 3 files changed, 54 insertions(+), 41 deletions(-) diff --git a/nova/network/linux_net.py b/nova/network/linux_net.py index 904014716..248f1ce5a 100644 --- a/nova/network/linux_net.py +++ b/nova/network/linux_net.py @@ -458,13 +458,13 @@ def floating_forward_rules(floating_ip, fixed_ip): def initialize_gateway_device(dev, network_ref): - if not network_ref: - return + if not network_ref: + return - # NOTE(vish): The ip for dnsmasq has to be the first address on the - # bridge for it to respond to reqests properly - suffix = network_ref['cidr'].rpartition('/')[2] - out, err = _execute('ip', 'addr', 'add', + # NOTE(vish): The ip for dnsmasq has to be the first address on the + # bridge for it to respond to reqests properly + suffix = network_ref['cidr'].rpartition('/')[2] + out, err = _execute('ip', 'addr', 'add', '%s/%s' % (network_ref['dhcp_server'], suffix), 'brd', @@ -473,19 +473,18 @@ def initialize_gateway_device(dev, network_ref): dev, run_as_root=True, check_exit_code=False) - if err and err != 'RTNETLINK answers: File exists\n': - raise exception.Error('Failed to add ip: %s' % err) - if(FLAGS.use_ipv6): - _execute('ip', '-f', 'inet6', 'addr', + if err and err != 'RTNETLINK answers: File exists\n': + raise exception.Error('Failed to add ip: %s' % err) + if(FLAGS.use_ipv6): + _execute('ip', '-f', 'inet6', 'addr', 'change', network_ref['cidr_v6'], 'dev', dev, run_as_root=True) - # NOTE(vish): If the public interface is the same as the - # bridge, then the bridge has to be in promiscuous - # to forward packets properly. - if(FLAGS.public_interface == dev): - _execute('ip', 'link', 'set', + # NOTE(vish): If the public interface is the same as the + # bridge, then the bridge has to be in promiscuous + # to forward packets properly. + if(FLAGS.public_interface == dev): + _execute('ip', 'link', 'set', 'dev', dev, 'promisc', 'on', run_as_root=True) - _execute('ip', 'link', 'set', dev, 'up', run_as_root=True) def get_dhcp_leases(context, network_ref): @@ -718,6 +717,7 @@ def _ip_bridge_cmd(action, params, device): cmd.extend(['dev', device]) return cmd + # Similar to compute virt layers, the Linux network node # code uses a flexible driver model to support different ways # of creating ethernet interfaces and attaching them to the network. @@ -725,8 +725,8 @@ def _ip_bridge_cmd(action, params, device): # act as gateway/dhcp/vpn/etc. endpoints not VM interfaces. -def plug(network): - return interface_driver.plug(network) +def plug(network, mac_address): + return interface_driver.plug(network, mac_address) def unplug(network): @@ -737,7 +737,7 @@ class LinuxNetInterfaceDriver(object): """Abstract class that defines generic network host API""" """ for for all Linux interface drivers.""" - def plug(self, network): + def plug(self, network, mac_address): """Create Linux device, return device name""" raise NotImplementedError() @@ -749,13 +749,14 @@ class LinuxNetInterfaceDriver(object): # plugs interfaces using Linux Bridge class LinuxBridgeInterfaceDriver(LinuxNetInterfaceDriver): - def plug(self, network): + def plug(self, network, mac_address): if network.get('vlan', None) is not None: LinuxBridgeInterfaceDriver.ensure_vlan_bridge( network['vlan'], network['bridge'], network['bridge_interface'], - network) + network, + mac_address) else: LinuxBridgeInterfaceDriver.ensure_bridge( network['bridge'], @@ -769,16 +770,16 @@ class LinuxBridgeInterfaceDriver(LinuxNetInterfaceDriver): @classmethod def ensure_vlan_bridge(_self, vlan_num, bridge, bridge_interface, - net_attrs=None): + net_attrs=None, mac_address=None): """Create a vlan and bridge unless they already exist.""" interface = LinuxBridgeInterfaceDriver.ensure_vlan(vlan_num, - bridge_interface) + bridge_interface, mac_address) LinuxBridgeInterfaceDriver.ensure_bridge(bridge, interface, net_attrs) return interface @classmethod @utils.synchronized('ensure_vlan', external=True) - def ensure_vlan(_self, vlan_num, bridge_interface): + def ensure_vlan(_self, vlan_num, bridge_interface, mac_address=None): """Create a vlan unless it already exists.""" interface = 'vlan%s' % vlan_num if not _device_exists(interface): @@ -787,6 +788,11 @@ class LinuxBridgeInterfaceDriver(LinuxNetInterfaceDriver): 'VLAN_PLUS_VID_NO_PAD', run_as_root=True) _execute('vconfig', 'add', bridge_interface, vlan_num, run_as_root=True) + # (danwent) the bridge will inherit this address, so we want to + # make sure it is the value set from the NetworkManager + if mac_address: + _execute('ip', 'link', 'set', interface, "address", + mac_address, run_as_root=True) _execute('ip', 'link', 'set', interface, 'up', run_as_root=True) return interface @@ -812,6 +818,11 @@ class LinuxBridgeInterfaceDriver(LinuxNetInterfaceDriver): _execute('brctl', 'setfd', bridge, 0, run_as_root=True) # _execute('brctl setageing %s 10' % bridge, run_as_root=True) _execute('brctl', 'stp', bridge, 'off', run_as_root=True) + # (danwent) bridge device MAC address can't be set directly. + # instead it inherits the MAC address of the first device on the + # bridge, which will either be the vlan interface, or a + # physical NIC. + _execute('ip', 'link', 'set', bridge, 'up', run_as_root=True) if interface: out, err = _execute('brctl', 'addif', bridge, interface, @@ -856,11 +867,10 @@ class LinuxBridgeInterfaceDriver(LinuxNetInterfaceDriver): # plugs interfaces using Open vSwitch class LinuxOVSInterfaceDriver(LinuxNetInterfaceDriver): - def plug(self, network): + def plug(self, network, mac_address): dev = "gw-" + str(network['id']) if not _device_exists(dev): bridge = FLAGS.linuxnet_ovs_integration_bridge - mac_addr = utils.generate_mac_address() _execute('ovs-vsctl', '--', '--may-exist', 'add-port', bridge, dev, '--', 'set', 'Interface', dev, "type=internal", @@ -869,10 +879,12 @@ class LinuxOVSInterfaceDriver(LinuxNetInterfaceDriver): '--', 'set', 'Interface', dev, "external-ids:iface-status=active", '--', 'set', 'Interface', dev, - "external-ids:attached-mac=%s" % mac_addr, + "external-ids:attached-mac=%s" % mac_address, run_as_root=True) - _execute('ip', 'link', 'set', dev, "address", mac_addr, + _execute('ip', 'link', 'set', dev, "address", mac_address, run_as_root=True) + _execute('ip', 'link', 'set', dev, 'up', run_as_root=True) + return dev def unplug(self, network): diff --git a/nova/network/manager.py b/nova/network/manager.py index 653d116fb..1ad48fbc1 100644 --- a/nova/network/manager.py +++ b/nova/network/manager.py @@ -510,7 +510,7 @@ class NetworkManager(manager.SchedulerDependentManager): def _allocate_mac_addresses(self, context, instance_id, networks): """Generates mac addresses and creates vif rows in db for them.""" for network in networks: - vif = {'address': utils.generate_mac_address(), + vif = {'address': self.generate_mac_address(), 'instance_id': instance_id, 'network_id': network['id']} # try FLAG times to create a vif record with a unique mac_address @@ -519,12 +519,20 @@ class NetworkManager(manager.SchedulerDependentManager): self.db.virtual_interface_create(context, vif) break except exception.VirtualInterfaceCreateException: - vif['address'] = utils.generate_mac_address() + vif['address'] = self.generate_mac_address() else: self.db.virtual_interface_delete_by_instance(context, instance_id) raise exception.VirtualInterfaceMacAddressException() + def generate_mac_address(self): + """Generate an Ethernet MAC address.""" + mac = [0x02, 0x16, 0x3e, + random.randint(0x00, 0x7f), + random.randint(0x00, 0xff), + random.randint(0x00, 0xff)] + return ':'.join(map(lambda x: "%02x" % x, mac)) + def add_fixed_ip_to_instance(self, context, instance_id, host, network_id): """Adds a fixed ip to an instance from specified network.""" networks = [self.db.network_get(context, network_id)] @@ -796,7 +804,8 @@ class FlatDHCPManager(FloatingIP, RPCAllocateFixedIP, NetworkManager): """Sets up network on this host.""" network_ref['dhcp_server'] = self._get_dhcp_ip(context, network_ref) - dev = self.driver.plug(network_ref) + mac_address = self.generate_mac_address() + dev = self.driver.plug(network_ref, mac_address) self.driver.initialize_gateway_device(dev, network_ref) if not FLAGS.fake_network: @@ -897,7 +906,8 @@ class VlanManager(RPCAllocateFixedIP, FloatingIP, NetworkManager): address = network_ref['vpn_public_address'] network_ref['dhcp_server'] = self._get_dhcp_ip(context, network_ref) - dev = self.driver.plug(network_ref) + mac_address = self.generate_mac_address() + dev = self.driver.plug(network_ref, mac_address) self.driver.initialize_gateway_device(dev, network_ref) # NOTE(vish): only ensure this forward if the address hasn't been set diff --git a/nova/utils.py b/nova/utils.py index 46d60c735..7276b6bd5 100644 --- a/nova/utils.py +++ b/nova/utils.py @@ -338,15 +338,6 @@ def get_my_linklocal(interface): " :%(ex)s") % locals()) -def generate_mac_address(): - """Generate an Ethernet MAC address.""" - mac = [0x02, 0x16, 0x3e, - random.randint(0x00, 0x7f), - random.randint(0x00, 0xff), - random.randint(0x00, 0xff)] - return ':'.join(map(lambda x: "%02x" % x, mac)) - - def utcnow(): """Overridable version of utils.utcnow.""" if utcnow.override_time: -- cgit From 53ca062830639de242d2cadda4f6bf473d4b6b62 Mon Sep 17 00:00:00 2001 From: Dan Wendlandt Date: Sun, 14 Aug 2011 20:05:18 -0700 Subject: fix missing 'run_as_root' from bad merge --- nova/network/linux_net.py | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/nova/network/linux_net.py b/nova/network/linux_net.py index 248f1ce5a..57c1d0c28 100644 --- a/nova/network/linux_net.py +++ b/nova/network/linux_net.py @@ -846,8 +846,10 @@ class LinuxBridgeInterfaceDriver(LinuxNetInterfaceDriver): fields = line.split() if fields and fields[0] == 'inet': params = fields[1:-1] - _execute(*_ip_bridge_cmd('del', params, fields[-1])) - _execute(*_ip_bridge_cmd('add', params, bridge)) + _execute(*_ip_bridge_cmd('del', params, fields[-1]), + run_as_root=True) + _execute(*_ip_bridge_cmd('add', params, bridge), + run_as_root=True) if gateway: _execute('route', 'add', 'default', 'gw', gateway, run_as_root=True) -- cgit From b46320a4175adc4012e60d4eae793a42f3a8186b Mon Sep 17 00:00:00 2001 From: Jesse Andrews Date: Mon, 15 Aug 2011 02:55:22 -0700 Subject: make list response for floating ip match other apis --- nova/api/openstack/contrib/floating_ips.py | 4 ++-- nova/tests/api/openstack/contrib/test_floating_ips.py | 16 ++++++++-------- 2 files changed, 10 insertions(+), 10 deletions(-) diff --git a/nova/api/openstack/contrib/floating_ips.py b/nova/api/openstack/contrib/floating_ips.py index c07bfdf09..f6824a601 100644 --- a/nova/api/openstack/contrib/floating_ips.py +++ b/nova/api/openstack/contrib/floating_ips.py @@ -43,8 +43,8 @@ def _translate_floating_ip_view(floating_ip): def _translate_floating_ips_view(floating_ips): - return {'floating_ips': [_translate_floating_ip_view(floating_ip) - for floating_ip in floating_ips]} + return {'floating_ips': [_translate_floating_ip_view(ip)['floating_ip'] + for ip in floating_ips]} class FloatingIPController(object): diff --git a/nova/tests/api/openstack/contrib/test_floating_ips.py b/nova/tests/api/openstack/contrib/test_floating_ips.py index ab7ae2e54..7996ebbb9 100644 --- a/nova/tests/api/openstack/contrib/test_floating_ips.py +++ b/nova/tests/api/openstack/contrib/test_floating_ips.py @@ -116,14 +116,14 @@ class FloatingIpTest(test.TestCase): res = req.get_response(fakes.wsgi_app()) self.assertEqual(res.status_int, 200) res_dict = json.loads(res.body) - response = {'floating_ips': [{'floating_ip': {'instance_id': 11, - 'ip': '10.10.10.10', - 'fixed_ip': '10.0.0.1', - 'id': 1}}, - {'floating_ip': {'instance_id': None, - 'ip': '10.10.10.11', - 'fixed_ip': None, - 'id': 2}}]} + response = {'floating_ips': [{'instance_id': 11, + 'ip': '10.10.10.10', + 'fixed_ip': '10.0.0.1', + 'id': 1}, + {'instance_id': None, + 'ip': '10.10.10.11', + 'fixed_ip': None, + 'id': 2}]} self.assertEqual(res_dict, response) def test_floating_ip_show(self): -- cgit From 1a2bc77871af060069ba0de80637198be78f8169 Mon Sep 17 00:00:00 2001 From: Josh Kearney Date: Mon, 15 Aug 2011 09:47:01 -0500 Subject: Fixed merge conflict. --- nova/virt/xenapi/vm_utils.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/nova/virt/xenapi/vm_utils.py b/nova/virt/xenapi/vm_utils.py index bf3bf4610..b6f471c0f 100644 --- a/nova/virt/xenapi/vm_utils.py +++ b/nova/virt/xenapi/vm_utils.py @@ -414,7 +414,7 @@ class VMHelper(HelperBase): return vdi_ref @classmethod - def fetch_image(cls, context, session, instance_id, image, user_id, + def fetch_image(cls, context, session, instance, image, user_id, project_id, image_type): """Fetch image from glance based on image type. -- cgit From c53d0567e4526b1e4a2ee5665ac81170a1771d17 Mon Sep 17 00:00:00 2001 From: Vishvananda Ishaya Date: Mon, 15 Aug 2011 11:10:44 -0700 Subject: Fix the tests when libvirt actually exists --- nova/tests/test_libvirt.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/nova/tests/test_libvirt.py b/nova/tests/test_libvirt.py index 7f4a3b09a..f3f7e8057 100644 --- a/nova/tests/test_libvirt.py +++ b/nova/tests/test_libvirt.py @@ -627,7 +627,7 @@ class LibvirtConnTestCase(test.TestCase): return # Preparing mocks - def fake_none(self): + def fake_none(self, *args): return def fake_raise(self): @@ -1050,7 +1050,7 @@ class IptablesFirewallTestCase(test.TestCase): instance_ref = self._create_instance_ref() _setup_networking(instance_ref['id'], self.test_ip) - network_info = _create_network_info + network_info = _create_network_info() self.fw.setup_basic_filtering(instance_ref, network_info) self.fw.prepare_instance_filter(instance_ref, network_info) self.fw.apply_instance_filter(instance_ref, network_info) -- cgit From 055c7692fc91d1d7d709c975fd223cc67f18ef8f Mon Sep 17 00:00:00 2001 From: Vishvananda Ishaya Date: Mon, 15 Aug 2011 11:31:44 -0700 Subject: remove openwrt image --- smoketests/openwrt-x86-ext2.image | Bin 4612608 -> 0 bytes smoketests/openwrt-x86-vmlinuz | Bin 1169948 -> 0 bytes smoketests/random.image | Bin 0 -> 65536 bytes smoketests/random.kernel | Bin 0 -> 16384 bytes smoketests/test_sysadmin.py | 4 ++-- 5 files changed, 2 insertions(+), 2 deletions(-) delete mode 100644 smoketests/openwrt-x86-ext2.image delete mode 100644 smoketests/openwrt-x86-vmlinuz create mode 100644 smoketests/random.image create mode 100644 smoketests/random.kernel diff --git a/smoketests/openwrt-x86-ext2.image b/smoketests/openwrt-x86-ext2.image deleted file mode 100644 index cd2dfa426..000000000 Binary files a/smoketests/openwrt-x86-ext2.image and /dev/null differ diff --git a/smoketests/openwrt-x86-vmlinuz b/smoketests/openwrt-x86-vmlinuz deleted file mode 100644 index 59cc9bb1f..000000000 Binary files a/smoketests/openwrt-x86-vmlinuz and /dev/null differ diff --git a/smoketests/random.image b/smoketests/random.image new file mode 100644 index 000000000..f2c0c30bb Binary files /dev/null and b/smoketests/random.image differ diff --git a/smoketests/random.kernel b/smoketests/random.kernel new file mode 100644 index 000000000..01a6284dd Binary files /dev/null and b/smoketests/random.kernel differ diff --git a/smoketests/test_sysadmin.py b/smoketests/test_sysadmin.py index 454f6f1d5..29cda1a9b 100644 --- a/smoketests/test_sysadmin.py +++ b/smoketests/test_sysadmin.py @@ -35,9 +35,9 @@ from smoketests import flags from smoketests import base FLAGS = flags.FLAGS -flags.DEFINE_string('bundle_kernel', 'openwrt-x86-vmlinuz', +flags.DEFINE_string('bundle_kernel', 'random.kernel', 'Local kernel file to use for bundling tests') -flags.DEFINE_string('bundle_image', 'openwrt-x86-ext2.image', +flags.DEFINE_string('bundle_image', 'random.image', 'Local image file to use for bundling tests') TEST_PREFIX = 'test%s' % int(random.random() * 1000000) -- cgit From bbd577de616915025e524e330f1991f3f155388c Mon Sep 17 00:00:00 2001 From: Josh Kearney Date: Mon, 15 Aug 2011 13:35:53 -0500 Subject: Corrected names in TODO/FIXME. --- nova/virt/xenapi/vm_utils.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/nova/virt/xenapi/vm_utils.py b/nova/virt/xenapi/vm_utils.py index b6f471c0f..4a1f07bb1 100644 --- a/nova/virt/xenapi/vm_utils.py +++ b/nova/virt/xenapi/vm_utils.py @@ -501,7 +501,7 @@ class VMHelper(HelperBase): def _check_vdi_size(cls, context, session, instance, vdi_uuid): size_bytes = cls._get_vdi_chain_size(context, session, vdi_uuid) - # FIXME(sirp): this was copied directly from compute.manager.py, let's + # FIXME(jk0): this was copied directly from compute.manager.py, let's # refactor this to a common area instance_type_id = instance['instance_type_id'] instance_type = db.instance_type_get(context, @@ -853,7 +853,7 @@ def get_vhd_parent_uuid(session, vdi_ref): def walk_vdi_chain(session, vdi_uuid): """Yield vdi_recs for each element in a VDI chain""" - # TODO: perhaps make get_vhd_parent use this + # TODO(jk0): perhaps make get_vhd_parent use this while True: vdi_ref = session.get_xenapi().VDI.get_by_uuid(vdi_uuid) vdi_rec = session.get_xenapi().VDI.get_record(vdi_ref) -- cgit From bc7892f698fbfc21f8d242f52e012d9165e46de7 Mon Sep 17 00:00:00 2001 From: Jake Dahn Date: Mon, 15 Aug 2011 11:55:53 -0700 Subject: Adding standard inclusion of a body param which most http clients will send along with a POST request. --- nova/api/openstack/contrib/floating_ips.py | 2 +- nova/tests/api/openstack/contrib/test_floating_ips.py | 2 ++ 2 files changed, 3 insertions(+), 1 deletion(-) diff --git a/nova/api/openstack/contrib/floating_ips.py b/nova/api/openstack/contrib/floating_ips.py index c07bfdf09..121a1d4a0 100644 --- a/nova/api/openstack/contrib/floating_ips.py +++ b/nova/api/openstack/contrib/floating_ips.py @@ -131,7 +131,7 @@ class FloatingIPController(object): "floating_ip": floating_ip, "fixed_ip": fixed_ip}} - def disassociate(self, req, id): + def disassociate(self, req, id, body): """ POST /floating_ips/{id}/disassociate """ context = req.environ['nova.context'] floating_ip = self.network_api.get_floating_ip(context, id) diff --git a/nova/tests/api/openstack/contrib/test_floating_ips.py b/nova/tests/api/openstack/contrib/test_floating_ips.py index ab7ae2e54..dd58a1b22 100644 --- a/nova/tests/api/openstack/contrib/test_floating_ips.py +++ b/nova/tests/api/openstack/contrib/test_floating_ips.py @@ -177,8 +177,10 @@ class FloatingIpTest(test.TestCase): self.assertEqual(actual, expected) def test_floating_ip_disassociate(self): + body = dict() req = webob.Request.blank('/v1.1/os-floating-ips/1/disassociate') req.method = 'POST' + req.body = json.dumps(body) req.headers['Content-Type'] = 'application/json' res = req.get_response(fakes.wsgi_app()) self.assertEqual(res.status_int, 200) -- cgit From fdb8c92739026e96ac52fc165d70c8f8c7594177 Mon Sep 17 00:00:00 2001 From: Jake Dahn Date: Mon, 15 Aug 2011 12:04:51 -0700 Subject: making body default to none --- nova/api/openstack/contrib/floating_ips.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/nova/api/openstack/contrib/floating_ips.py b/nova/api/openstack/contrib/floating_ips.py index 121a1d4a0..768b9deb1 100644 --- a/nova/api/openstack/contrib/floating_ips.py +++ b/nova/api/openstack/contrib/floating_ips.py @@ -131,7 +131,7 @@ class FloatingIPController(object): "floating_ip": floating_ip, "fixed_ip": fixed_ip}} - def disassociate(self, req, id, body): + def disassociate(self, req, id, body=None): """ POST /floating_ips/{id}/disassociate """ context = req.environ['nova.context'] floating_ip = self.network_api.get_floating_ip(context, id) -- cgit From 54ba0d6d25f60b5acb363d141aeba63e4c727c72 Mon Sep 17 00:00:00 2001 From: Jason Koelker Date: Mon, 15 Aug 2011 15:28:28 -0500 Subject: fix typo where I forgot a comma --- nova/network/manager.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/nova/network/manager.py b/nova/network/manager.py index 1513dd473..31bb29f81 100644 --- a/nova/network/manager.py +++ b/nova/network/manager.py @@ -671,7 +671,7 @@ class NetworkManager(manager.SchedulerDependentManager): if used_subnet in subnet: msg = _('requested cidr (%{cidr}) conflicts with ' 'existing smaller cidr (%{smaller})') - raise ValueError(msg % {'cidr': subnet + raise ValueError(msg % {'cidr': subnet, 'smaller': used_subnet}) -- cgit From d3e2be67d116dcdbe0484a10708ae00d040d2d9f Mon Sep 17 00:00:00 2001 From: Jason Koelker Date: Mon, 15 Aug 2011 15:29:55 -0500 Subject: i hate these exceptions where it should just return an empty list --- nova/network/manager.py | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/nova/network/manager.py b/nova/network/manager.py index 31bb29f81..ddb60ecfa 100644 --- a/nova/network/manager.py +++ b/nova/network/manager.py @@ -656,7 +656,10 @@ class NetworkManager(manager.SchedulerDependentManager): subnets_v4 = list(fixed_net_v4.subnet(prefixlen_v4, count=num_networks)) - nets = self.db.network_get_all(context) + try: + nets = self.db.network_get_all(context) + except exception.NoNetworksFound: + nets = [] used_subnets = [netaddr.IPNetwork(net['cidr']) for net in nets] for subnet in subnets_v4: -- cgit From 5276b4981dc6e8c4f3d4b9c733939290df3c6a72 Mon Sep 17 00:00:00 2001 From: Jason Koelker Date: Mon, 15 Aug 2011 15:45:14 -0500 Subject: pep8 fix --- nova/network/manager.py | 1 - 1 file changed, 1 deletion(-) diff --git a/nova/network/manager.py b/nova/network/manager.py index ddb60ecfa..6ba73300e 100644 --- a/nova/network/manager.py +++ b/nova/network/manager.py @@ -677,7 +677,6 @@ class NetworkManager(manager.SchedulerDependentManager): raise ValueError(msg % {'cidr': subnet, 'smaller': used_subnet}) - subnets = itertools.izip_longest(subnets_v4, subnets_v6) for index, (subnet_v4, subnet_v6) in enumerate(subnets): net = {} -- cgit From 945a874a77c63710f57fa31988ba7f9ba65a5ad0 Mon Sep 17 00:00:00 2001 From: Jason Koelker Date: Mon, 15 Aug 2011 15:47:11 -0500 Subject: return the created networks --- nova/network/manager.py | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/nova/network/manager.py b/nova/network/manager.py index 6ba73300e..7748479e9 100644 --- a/nova/network/manager.py +++ b/nova/network/manager.py @@ -677,6 +677,7 @@ class NetworkManager(manager.SchedulerDependentManager): raise ValueError(msg % {'cidr': subnet, 'smaller': used_subnet}) + networks = [] subnets = itertools.izip_longest(subnets_v4, subnets_v6) for index, (subnet_v4, subnet_v6) in enumerate(subnets): net = {} @@ -728,9 +729,12 @@ class NetworkManager(manager.SchedulerDependentManager): if not network: raise ValueError(_('Network already exists!')) + else: + networks.append(network) if network and cidr and subnet_v4: self._create_fixed_ips(context, network['id']) + return networks @property def _bottom_reserved_ips(self): # pylint: disable=R0201 -- cgit From a20e18c5ae2c77ed005e5dc9cec7b92d67e50a0b Mon Sep 17 00:00:00 2001 From: Jason Koelker Date: Mon, 15 Aug 2011 16:30:51 -0500 Subject: allow for finding a network that fits the size, also format string correctly --- nova/network/manager.py | 20 ++++++++++++++------ 1 file changed, 14 insertions(+), 6 deletions(-) diff --git a/nova/network/manager.py b/nova/network/manager.py index 7748479e9..7e9a0ecf6 100644 --- a/nova/network/manager.py +++ b/nova/network/manager.py @@ -662,18 +662,26 @@ class NetworkManager(manager.SchedulerDependentManager): nets = [] used_subnets = [netaddr.IPNetwork(net['cidr']) for net in nets] - for subnet in subnets_v4: + for subnet in list(subnets_v4): if subnet in used_subnets: - raise ValueError(_('cidr already in use')) + next_subnet = subnet.next() + while next_subnet in subnets_v4: + next_subnet = next_subnet.next() + if next_subnet in fixed_net_v4: + subnets_v4.remove(subnet) + subnets_v4.append(next_subnet) + subnet = next_subnet + else: + raise ValueError(_('cidr already in use')) for used_subnet in used_subnets: if subnet in used_subnet: - msg = _('requested cidr (%{cidr}) conflicts with ' - 'existing supernet (%{super})') + msg = _('requested cidr (%(cidr)s) conflicts with ' + 'existing supernet (%(super)s)') raise ValueError(msg % {'cidr': subnet, 'super': used_subnet}) if used_subnet in subnet: - msg = _('requested cidr (%{cidr}) conflicts with ' - 'existing smaller cidr (%{smaller})') + msg = _('requested cidr (%(cidr)s) conflicts with ' + 'existing smaller cidr (%(smaller)s)') raise ValueError(msg % {'cidr': subnet, 'smaller': used_subnet}) -- cgit From a34943e89e9aee0a26bd4fd03a2b12fc954029fd Mon Sep 17 00:00:00 2001 From: Jason Koelker Date: Mon, 15 Aug 2011 16:41:11 -0500 Subject: have the tests call create_networks directly --- nova/network/manager.py | 28 ++++++++++++------ nova/tests/test_network.py | 72 +++++++++++++++++++++++++++++----------------- 2 files changed, 65 insertions(+), 35 deletions(-) diff --git a/nova/network/manager.py b/nova/network/manager.py index 7e9a0ecf6..4954dc7e9 100644 --- a/nova/network/manager.py +++ b/nova/network/manager.py @@ -662,12 +662,17 @@ class NetworkManager(manager.SchedulerDependentManager): nets = [] used_subnets = [netaddr.IPNetwork(net['cidr']) for net in nets] + def find_next(subnet): + next_subnet = subnet.next() + while next_subnet in subnets_v4: + next_subnet = next_subnet.next() + if next_subnet in fixed_net_v4: + return next_subnet + for subnet in list(subnets_v4): if subnet in used_subnets: - next_subnet = subnet.next() - while next_subnet in subnets_v4: - next_subnet = next_subnet.next() - if next_subnet in fixed_net_v4: + next_subnet = find_next(subnet) + if next_subnet: subnets_v4.remove(subnet) subnets_v4.append(next_subnet) subnet = next_subnet @@ -680,10 +685,17 @@ class NetworkManager(manager.SchedulerDependentManager): raise ValueError(msg % {'cidr': subnet, 'super': used_subnet}) if used_subnet in subnet: - msg = _('requested cidr (%(cidr)s) conflicts with ' - 'existing smaller cidr (%(smaller)s)') - raise ValueError(msg % {'cidr': subnet, - 'smaller': used_subnet}) + next_subnet = find_next(subnet) + if next_subnet: + subnets_v4.remove(subnet) + subnets_v4.append(next_subnet) + subnet = next_subnet + else: + msg = _('requested cidr (%(cidr)s) conflicts ' + 'with existing smaller cidr ' + '(%(smaller)s)') + raise ValueError(msg % {'cidr': subnet, + 'smaller': used_subnet}) networks = [] subnets = itertools.izip_longest(subnets_v4, subnets_v6) diff --git a/nova/tests/test_network.py b/nova/tests/test_network.py index c673f5d06..0ead680ee 100644 --- a/nova/tests/test_network.py +++ b/nova/tests/test_network.py @@ -255,7 +255,7 @@ class CommonNetworkTestCase(test.TestCase): raise exception.NetworkNotFoundForCidr() def network_create_safe(self, context, net): - fakenet = {} + fakenet = dict(net) fakenet['id'] = 999 return fakenet @@ -269,6 +269,9 @@ class CommonNetworkTestCase(test.TestCase): def deallocate_fixed_ip(self, context, address): self.deallocate_called = address + def _create_fixed_ips(self, context, network_id): + pass + def fake_create_fixed_ips(self, context, network_id): return None @@ -286,16 +289,20 @@ class CommonNetworkTestCase(test.TestCase): def test_validate_cidrs(self): manager = self.FakeNetworkManager() - nets = manager._validate_cidrs(None, '192.168.0.0/24', 1, 256) + nets = manager.create_networks(None, 'fake', '192.168.0.0/24', + False, 1, 256, None, None, None, + None) self.assertEqual(1, len(nets)) - cidrs = [str(net) for net in nets] + cidrs = [str(net['cidr']) for net in nets] self.assertTrue('192.168.0.0/24' in cidrs) def test_validate_cidrs_split_exact_in_half(self): manager = self.FakeNetworkManager() - nets = manager._validate_cidrs(None, '192.168.0.0/24', 2, 128) + nets = manager.create_networks(None, 'fake', '192.168.0.0/24', + False, 2, 128, None, None, None, + None) self.assertEqual(2, len(nets)) - cidrs = [str(net) for net in nets] + cidrs = [str(net['cidr']) for net in nets] self.assertTrue('192.168.0.0/25' in cidrs) self.assertTrue('192.168.0.128/25' in cidrs) @@ -306,9 +313,11 @@ class CommonNetworkTestCase(test.TestCase): manager.db.network_get_all(ctxt).AndReturn([{'id': 1, 'cidr': '192.168.2.0/24'}]) self.mox.ReplayAll() - nets = manager._validate_cidrs(None, '192.168.0.0/16', 4, 256) + nets = manager.create_networks(None, 'fake', '192.168.0.0/16', + False, 4, 256, None, None, None, + None) self.assertEqual(4, len(nets)) - cidrs = [str(net) for net in nets] + cidrs = [str(net['cidr']) for net in nets] exp_cidrs = ['192.168.0.0/24', '192.168.1.0/24', '192.168.3.0/24', '192.168.4.0/24'] for exp_cidr in exp_cidrs: @@ -324,8 +333,9 @@ class CommonNetworkTestCase(test.TestCase): self.mox.ReplayAll() # ValueError: requested cidr (192.168.2.0/24) conflicts with # existing smaller cidr - args = [None, '192.168.2.0/24', 1, 256] - self.assertRaises(ValueError, manager._validate_cidrs, *args) + args = (None, 'fake', '192.168.2.0/24', False, 1, 256, None, None, + None, None) + self.assertRaises(ValueError, manager.create_networks, *args) def test_validate_cidrs_split_smaller_cidr_in_use(self): manager = self.FakeNetworkManager() @@ -334,9 +344,10 @@ class CommonNetworkTestCase(test.TestCase): manager.db.network_get_all(ctxt).AndReturn([{'id': 1, 'cidr': '192.168.2.0/25'}]) self.mox.ReplayAll() - nets = manager._validate_cidrs(None, '192.168.0.0/16', 4, 256) + nets = manager.create_networks(None, 'fake', '192.168.0.0/16', + False, 4, 256, None, None, None, None) self.assertEqual(4, len(nets)) - cidrs = [str(net) for net in nets] + cidrs = [str(net['cidr']) for net in nets] exp_cidrs = ['192.168.0.0/24', '192.168.1.0/24', '192.168.3.0/24', '192.168.4.0/24'] for exp_cidr in exp_cidrs: @@ -350,9 +361,10 @@ class CommonNetworkTestCase(test.TestCase): manager.db.network_get_all(ctxt).AndReturn([{'id': 1, 'cidr': '192.168.2.9/29'}]) self.mox.ReplayAll() - nets = manager._validate_cidrs(None, '192.168.2.0/24', 3, 32) + nets = manager.create_networks(None, 'fake', '192.168.2.0/24', + False, 3, 32, None, None, None, None) self.assertEqual(3, len(nets)) - cidrs = [str(net) for net in nets] + cidrs = [str(net['cidr']) for net in nets] exp_cidrs = ['192.168.2.32/27', '192.168.2.64/27', '192.168.2.96/27'] for exp_cidr in exp_cidrs: self.assertTrue(exp_cidr in cidrs) @@ -367,17 +379,19 @@ class CommonNetworkTestCase(test.TestCase): {'id': 3, 'cidr': '192.168.2.128/26'}] manager.db.network_get_all(ctxt).AndReturn(in_use) self.mox.ReplayAll() - args = [None, '192.168.2.0/24', 3, 64] + args = (None, 'fake', '192.168.2.0/24', False, 3, 64, None, None, + None, None) # ValueError: Not enough subnets avail to satisfy requested num_ # networks - some subnets in requested range already # in use - self.assertRaises(ValueError, manager._validate_cidrs, *args) + self.assertRaises(ValueError, manager.create_networks, *args) def test_validate_cidrs_one_in_use(self): manager = self.FakeNetworkManager() - args = [None, '192.168.0.0/24', 2, 256] + args = (None, 'fake', '192.168.0.0/24', False, 2, 256, None, None, + None, None) # ValueError: network_size * num_networks exceeds cidr size - self.assertRaises(ValueError, manager._validate_cidrs, *args) + self.assertRaises(ValueError, manager.create_networks, *args) def test_validate_cidrs_already_used(self): manager = self.FakeNetworkManager() @@ -387,20 +401,23 @@ class CommonNetworkTestCase(test.TestCase): 'cidr': '192.168.0.0/24'}]) self.mox.ReplayAll() # ValueError: cidr already in use - args = [None, '192.168.0.0/24', 1, 256] - self.assertRaises(ValueError, manager._validate_cidrs, *args) + args = (None, 'fake', '192.168.0.0/24', False, 1, 256, None, None, + None, None) + self.assertRaises(ValueError, manager.create_networks, *args) def test_validate_cidrs_too_many(self): manager = self.FakeNetworkManager() - args = [None, '192.168.0.0/24', 200, 256] + args = (None, 'fake', '192.168.0.0/24', False, 200, 256, None, None, + None, None) # ValueError: Not enough subnets avail to satisfy requested # num_networks - self.assertRaises(ValueError, manager._validate_cidrs, *args) + self.assertRaises(ValueError, manager.create_networks, *args) def test_validate_cidrs_split_partial(self): manager = self.FakeNetworkManager() - nets = manager._validate_cidrs(None, '192.168.0.0/16', 2, 256) - returned_cidrs = [str(net) for net in nets] + nets = manager.create_networks(None, 'fake', '192.168.0.0/16', + False, 2, 256, None, None, None, None) + returned_cidrs = [str(net['cidr']) for net in nets] self.assertTrue('192.168.0.0/24' in returned_cidrs) self.assertTrue('192.168.1.0/24' in returned_cidrs) @@ -411,10 +428,11 @@ class CommonNetworkTestCase(test.TestCase): fakecidr = [{'id': 1, 'cidr': '192.168.0.0/8'}] manager.db.network_get_all(ctxt).AndReturn(fakecidr) self.mox.ReplayAll() - args = [None, '192.168.0.0/24', 1, 256] + args = (None, 'fake', '192.168.0.0/24', False, 1, 256, None, None, + None, None) # ValueError: requested cidr (192.168.0.0/24) conflicts # with existing supernet - self.assertRaises(ValueError, manager._validate_cidrs, *args) + self.assertRaises(ValueError, manager.create_networks, *args) def test_create_networks(self): cidr = '192.168.0.0/24' @@ -424,7 +442,7 @@ class CommonNetworkTestCase(test.TestCase): args = [None, 'foo', cidr, None, 1, 256, 'fd00::/48', None, None, None] result = manager.create_networks(*args) - self.assertEqual(manager.create_networks(*args), None) + self.assertTrue(manager.create_networks(*args)) def test_create_networks_cidr_already_used(self): manager = self.FakeNetworkManager() @@ -444,4 +462,4 @@ class CommonNetworkTestCase(test.TestCase): self.fake_create_fixed_ips) args = [None, 'foo', cidr, None, 10, 256, 'fd00::/48', None, None, None] - self.assertEqual(manager.create_networks(*args), None) + self.assertTrue(manager.create_networks(*args)) -- cgit From cef63a6c2e8f3e1baa126ba41703aac81c2fc6ae Mon Sep 17 00:00:00 2001 From: Jason Koelker Date: Mon, 15 Aug 2011 16:46:30 -0500 Subject: add note --- nova/network/manager.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/nova/network/manager.py b/nova/network/manager.py index 4954dc7e9..a52dd1953 100644 --- a/nova/network/manager.py +++ b/nova/network/manager.py @@ -656,6 +656,8 @@ class NetworkManager(manager.SchedulerDependentManager): subnets_v4 = list(fixed_net_v4.subnet(prefixlen_v4, count=num_networks)) + # NOTE(jkoelker): This replaces the _validate_cidrs call and + # prevents looping multiple times try: nets = self.db.network_get_all(context) except exception.NoNetworksFound: -- cgit From 8b4805b24bb51adca501a38b4b7dbf730cc826d2 Mon Sep 17 00:00:00 2001 From: Jason Koelker Date: Mon, 15 Aug 2011 16:47:48 -0500 Subject: pep8 --- nova/compute/manager.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/nova/compute/manager.py b/nova/compute/manager.py index 16b8e14b4..4e84cb936 100644 --- a/nova/compute/manager.py +++ b/nova/compute/manager.py @@ -1315,7 +1315,8 @@ class ComputeManager(manager.SchedulerDependentManager): # This nwfilter is necessary on the destination host. # In addition, this method is creating filtering rule # onto destination host. - self.driver.ensure_filtering_rules_for_instance(instance_ref, network_info) + self.driver.ensure_filtering_rules_for_instance(instance_ref, + network_info) # Preparation for block migration if block_migration: -- cgit From e57e9e9bbc37fbe87052ccc66bf7b97501e1e759 Mon Sep 17 00:00:00 2001 From: Jason Koelker Date: Mon, 15 Aug 2011 16:48:28 -0500 Subject: pep8 --- nova/virt/libvirt/connection.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/nova/virt/libvirt/connection.py b/nova/virt/libvirt/connection.py index 2b17e244a..6ad4328b1 100644 --- a/nova/virt/libvirt/connection.py +++ b/nova/virt/libvirt/connection.py @@ -1539,7 +1539,8 @@ class LibvirtConnection(driver.ComputeDriver): # basic-filtering must be set here. self.firewall_driver.setup_basic_filtering(instance_ref, network_info) # setting up n)ova-instance-instance-xx mainly. - self.firewall_driver.prepare_instance_filter(instance_ref, network_info) + self.firewall_driver.prepare_instance_filter(instance_ref, + network_info) # wait for completion timeout_count = range(FLAGS.live_migration_retry_count) -- cgit From 55dd18f30eee4f4a75c825c33d4a78b2ef94be4a Mon Sep 17 00:00:00 2001 From: Ed Leafe Date: Mon, 15 Aug 2011 17:09:39 -0500 Subject: got tests passing with logic changes --- nova/scheduler/abstract_scheduler.py | 53 +++--- nova/scheduler/base_scheduler.py | 50 +----- nova/scheduler/filters/__init__.py | 17 ++ nova/scheduler/filters/abstract_filter.py | 2 +- nova/scheduler/filters/json_filter.py | 14 +- nova/scheduler/host_filter.py | 25 +-- nova/scheduler/least_cost.py | 134 +++++++-------- nova/tests/scheduler/test_host_filter.py | 2 - nova/tests/scheduler/test_least_cost_scheduler.py | 16 +- nova/tests/test_host_filter.py | 200 ---------------------- 10 files changed, 125 insertions(+), 388 deletions(-) delete mode 100644 nova/tests/test_host_filter.py diff --git a/nova/scheduler/abstract_scheduler.py b/nova/scheduler/abstract_scheduler.py index a0734f322..2f1ede0a4 100644 --- a/nova/scheduler/abstract_scheduler.py +++ b/nova/scheduler/abstract_scheduler.py @@ -45,20 +45,19 @@ LOG = logging.getLogger('nova.scheduler.abstract_scheduler') class InvalidBlob(exception.NovaException): message = _("Ill-formed or incorrectly routed 'blob' data sent " - "to instance create request.") + "to instance create request.") class AbstractScheduler(driver.Scheduler): """Base class for creating Schedulers that can work across any nova deployment, from simple designs to multiply-nested zones. """ - def _call_zone_method(self, context, method, specs, zones): """Call novaclient zone method. Broken out for testing.""" return api.call_zone_method(context, method, specs=specs, zones=zones) def _provision_resource_locally(self, context, build_plan_item, - request_spec, kwargs): + request_spec, kwargs): """Create the requested resource in this Zone.""" host = build_plan_item['hostname'] base_options = request_spec['instance_properties'] @@ -68,21 +67,21 @@ class AbstractScheduler(driver.Scheduler): # support at some point? Also, OS API has no concept of security # groups. instance = compute_api.API().create_db_entry_for_new_instance(context, - image, base_options, None, []) + image, base_options, None, []) instance_id = instance['id'] kwargs['instance_id'] = instance_id - rpc.cast(context, - db.queue_get_for(context, "compute", host), - {"method": "run_instance", - "args": kwargs}) + queue = db.queue_get_for(context, "compute", host) + params = {"method": "run_instance", "args": kwargs} + rpc.cast(context, queue, params) LOG.debug(_("Provisioning locally via compute node %(host)s") - % locals()) + % locals()) def _decrypt_blob(self, blob): """Returns the decrypted blob or None if invalid. Broken out - for testing.""" + for testing. + """ decryptor = crypto.decryptor(FLAGS.build_plan_encryption_key) try: json_entry = decryptor(blob) @@ -92,15 +91,15 @@ class AbstractScheduler(driver.Scheduler): return None def _ask_child_zone_to_create_instance(self, context, zone_info, - request_spec, kwargs): + request_spec, kwargs): """Once we have determined that the request should go to one of our children, we need to fabricate a new POST /servers/ call with the same parameters that were passed into us. Note that we have to reverse engineer from our args to get back the image, flavor, ipgroup, etc. since the original call could have - come in from EC2 (which doesn't use these things).""" - + come in from EC2 (which doesn't use these things). + """ instance_type = request_spec['instance_type'] instance_properties = request_spec['instance_properties'] @@ -109,30 +108,26 @@ class AbstractScheduler(driver.Scheduler): meta = instance_properties['metadata'] flavor_id = instance_type['flavorid'] reservation_id = instance_properties['reservation_id'] - files = kwargs['injected_files'] ipgroup = None # Not supported in OS API ... yet - child_zone = zone_info['child_zone'] child_blob = zone_info['child_blob'] zone = db.zone_get(context, child_zone) url = zone.api_url LOG.debug(_("Forwarding instance create call to child zone %(url)s" - ". ReservationID=%(reservation_id)s") - % locals()) + ". ReservationID=%(reservation_id)s") % locals()) nova = None try: nova = novaclient.Client(zone.username, zone.password, None, url) nova.authenticate() except novaclient_exceptions.BadRequest, e: raise exception.NotAuthorized(_("Bad credentials attempting " - "to talk to zone at %(url)s.") % locals()) - + "to talk to zone at %(url)s.") % locals()) nova.servers.create(name, image_ref, flavor_id, ipgroup, meta, files, - child_blob, reservation_id=reservation_id) + child_blob, reservation_id=reservation_id) def _provision_resource_from_blob(self, context, build_plan_item, - instance_id, request_spec, kwargs): + instance_id, request_spec, kwargs): """Create the requested resource locally or in a child zone based on what is stored in the zone blob info. @@ -145,8 +140,8 @@ class AbstractScheduler(driver.Scheduler): means we gathered the info from one of our children. It's possible that, when we decrypt the 'blob' field, it contains "child_blob" data. In which case we forward the - request.""" - + request. + """ host_info = None if "blob" in build_plan_item: # Request was passed in from above. Is it for us? @@ -161,21 +156,20 @@ class AbstractScheduler(driver.Scheduler): # Valid data ... is it for us? if 'child_zone' in host_info and 'child_blob' in host_info: self._ask_child_zone_to_create_instance(context, host_info, - request_spec, kwargs) + request_spec, kwargs) else: self._provision_resource_locally(context, host_info, request_spec, - kwargs) + kwargs) def _provision_resource(self, context, build_plan_item, instance_id, - request_spec, kwargs): + request_spec, kwargs): """Create the requested resource in this Zone or a child zone.""" if "hostname" in build_plan_item: self._provision_resource_locally(context, build_plan_item, - request_spec, kwargs) + request_spec, kwargs) return - self._provision_resource_from_blob(context, build_plan_item, - instance_id, request_spec, kwargs) + instance_id, request_spec, kwargs) def _adjust_child_weights(self, child_results, zones): """Apply the Scale and Offset values from the Zone definition @@ -231,7 +225,6 @@ class AbstractScheduler(driver.Scheduler): for num in xrange(num_instances): if not build_plan: break - build_plan_item = build_plan.pop(0) self._provision_resource(context, build_plan_item, instance_id, request_spec, kwargs) diff --git a/nova/scheduler/base_scheduler.py b/nova/scheduler/base_scheduler.py index e14ee349e..35e5af035 100644 --- a/nova/scheduler/base_scheduler.py +++ b/nova/scheduler/base_scheduler.py @@ -43,40 +43,13 @@ class BaseScheduler(abstract_scheduler.AbstractScheduler): # TODO(sandy): We're only using InstanceType-based specs # currently. Later we'll need to snoop for more detailed # host filter requests. - instance_type = request_spec['instance_type'] + instance_type = request_spec.get("instance_type", None) + if instance_type is None: + # No way to select; return the specified hosts + return hosts or [] name, query = selected_filter.instance_type_to_filter(instance_type) return selected_filter.filter_hosts(self.zone_manager, query) - def filter_hosts(self, topic, request_spec, host_list=None): - """Return a list of hosts which are acceptable for scheduling. - Return value should be a list of (hostname, capability_dict)s. - Derived classes may override this, but may find the - '_filter' function more appropriate. - """ - def _default_filter(self, hostname, capabilities, request_spec): - """Default filter function if there's no _filter""" - # NOTE(sirp): The default logic is the equivalent to - # AllHostsFilter - return True - - filter_func = getattr(self, '%s_filter' % topic, _default_filter) - - if host_list is None: - first_run = True - host_list = self.zone_manager.service_states.iteritems() - else: - first_run = False - - filtered_hosts = [] - for host, services in host_list: - if first_run: - if topic not in services: - continue - services = services[topic] - if filter_func(host, services, request_spec): - filtered_hosts.append((host, services)) - return filtered_hosts - def weigh_hosts(self, topic, request_spec, hosts): """Derived classes may override this to provide more sophisticated scheduling objectives @@ -84,18 +57,3 @@ class BaseScheduler(abstract_scheduler.AbstractScheduler): # NOTE(sirp): The default logic is the same as the NoopCostFunction return [dict(weight=1, hostname=hostname, capabilities=capabilities) for hostname, capabilities in hosts] - - def compute_consume(self, capabilities, instance_type): - """Consume compute resources for selected host""" - - requested_mem = max(instance_type['memory_mb'], 0) * 1024 * 1024 - capabilities['host_memory_free'] -= requested_mem - - def consume_resources(self, topic, capabilities, instance_type): - """Consume resources for a specific host. 'host' is a tuple - of the hostname and the services""" - - consume_func = getattr(self, '%s_consume' % topic, None) - if not consume_func: - return - consume_func(capabilities, instance_type) diff --git a/nova/scheduler/filters/__init__.py b/nova/scheduler/filters/__init__.py index 4c9187c5a..b86fb795f 100644 --- a/nova/scheduler/filters/__init__.py +++ b/nova/scheduler/filters/__init__.py @@ -13,6 +13,23 @@ # License for the specific language governing permissions and limitations # under the License. +""" +There are three filters included: AllHosts, InstanceType & JSON. + +AllHosts just returns the full, unfiltered list of hosts. +InstanceType is a hard coded matching mechanism based on flavor criteria. +JSON is an ad-hoc filter grammar. + +Why JSON? The requests for instances may come in through the +REST interface from a user or a parent Zone. +Currently InstanceTypes are used for specifing the type of instance desired. +Specific Nova users have noted a need for a more expressive way of specifying +instance requirements. Since we don't want to get into building full DSL, +this filter is a simple form as an example of how this could be done. +In reality, most consumers will use the more rigid filters such as the +InstanceType filter. +""" + from abstract_filter import AbstractHostFilter from all_hosts_filter import AllHostsFilter from instance_type_filter import InstanceTypeFilter diff --git a/nova/scheduler/filters/abstract_filter.py b/nova/scheduler/filters/abstract_filter.py index fe5610923..d9d272130 100644 --- a/nova/scheduler/filters/abstract_filter.py +++ b/nova/scheduler/filters/abstract_filter.py @@ -19,7 +19,7 @@ from nova import flags FLAGS = flags.FLAGS flags.DEFINE_string('default_host_filter', - 'nova.scheduler.filters.AllHostsFilter', + 'AllHostsFilter', 'Which filter to use for filtering hosts') class AbstractHostFilter(object): diff --git a/nova/scheduler/filters/json_filter.py b/nova/scheduler/filters/json_filter.py index 889b96915..caf22f5d5 100644 --- a/nova/scheduler/filters/json_filter.py +++ b/nova/scheduler/filters/json_filter.py @@ -20,11 +20,6 @@ import operator import nova.scheduler from nova.scheduler.filters import abstract_filter -def debug(*args): - with file("/tmp/debug", "a") as dbg: - msg = " ".join([str(arg) for arg in args]) - dbg.write("%s\n" % msg) - class JsonFilter(abstract_filter.AbstractHostFilter): """Host Filter to allow simple JSON-based grammar for @@ -38,12 +33,7 @@ class JsonFilter(abstract_filter.AbstractHostFilter): if len(args) < 2: return False if op is operator.contains: - debug("ARGS", type(args), args) - debug("op", op) - debug("REVERSED!!!") - # operator.contains reverses the param order. - bad = [arg for arg in args[1:] - if not op(args, args[0])] + bad = not args[0] in args[1:] else: bad = [arg for arg in args[1:] if not op(args[0], arg)] @@ -144,8 +134,6 @@ class JsonFilter(abstract_filter.AbstractHostFilter): specified in the query. """ expanded = json.loads(query) - - debug("expanded", type(expanded), expanded) filtered_hosts = [] for host, services in zone_manager.service_states.iteritems(): result = self._process_filter(zone_manager, expanded, host, diff --git a/nova/scheduler/host_filter.py b/nova/scheduler/host_filter.py index f5191f5c9..be618f3f3 100644 --- a/nova/scheduler/host_filter.py +++ b/nova/scheduler/host_filter.py @@ -20,43 +20,32 @@ either incompatible or insufficient to accept a newly-requested instance are removed by Host Filter classes from consideration. Those that pass the filter are then passed on for weighting or other process for ordering. -Three filters are included: AllHosts, Flavor & JSON. AllHosts just -returns the full, unfiltered list of hosts. Flavor is a hard coded -matching mechanism based on flavor criteria and JSON is an ad-hoc -filter grammar. - -Why JSON? The requests for instances may come in through the -REST interface from a user or a parent Zone. -Currently Flavors and/or InstanceTypes are used for -specifing the type of instance desired. Specific Nova users have -noted a need for a more expressive way of specifying instances. -Since we don't want to get into building full DSL this is a simple -form as an example of how this could be done. In reality, most -consumers will use the more rigid filters such as FlavorFilter. +Filters are in the 'filters' directory that is off the 'scheduler' +directory of nova. Additional filters can be created and added to that +directory; be sure to add them to the filters/__init__.py file so that +they are part of the nova.schedulers.filters namespace. """ -import json import types from nova import exception from nova import flags -from nova import log as logging - import nova.scheduler -LOG = logging.getLogger('nova.scheduler.host_filter') FLAGS = flags.FLAGS def _get_filters(): + # Imported here to avoid circular imports from nova.scheduler import filters def get_itm(nm): return getattr(filters, nm) return [get_itm(itm) for itm in dir(filters) if (type(get_itm(itm)) is types.TypeType) - and issubclass(get_itm(itm), filters.AbstractHostFilter)] + and issubclass(get_itm(itm), filters.AbstractHostFilter) + and get_itm(itm) is not filters.AbstractHostFilter] def choose_host_filter(filter_name=None): diff --git a/nova/scheduler/least_cost.py b/nova/scheduler/least_cost.py index a58b11289..903d786cd 100644 --- a/nova/scheduler/least_cost.py +++ b/nova/scheduler/least_cost.py @@ -22,14 +22,12 @@ The cost-function and weights are tabulated, and the host with the least cost is then selected for provisioning. """ -# TODO(dabo): This class will be removed in the next merge prop; it remains now -# because much of the code will be refactored into different classes. import collections from nova import flags from nova import log as logging -from nova.scheduler import abstract_scheduler +from nova.scheduler import base_scheduler from nova import utils from nova import exception @@ -37,14 +35,16 @@ LOG = logging.getLogger('nova.scheduler.least_cost') FLAGS = flags.FLAGS flags.DEFINE_list('least_cost_scheduler_cost_functions', - ['nova.scheduler.least_cost.noop_cost_fn'], - 'Which cost functions the LeastCostScheduler should use.') + ['nova.scheduler.least_cost.noop_cost_fn'], + 'Which cost functions the LeastCostScheduler should use.') # TODO(sirp): Once we have enough of these rules, we can break them out into a # cost_functions.py file (perhaps in a least_cost_scheduler directory) flags.DEFINE_integer('noop_cost_fn_weight', 1, - 'How much weight to give the noop cost function') + 'How much weight to give the noop cost function') +flags.DEFINE_integer('compute_fill_first_cost_fn_weight', 1, + 'How much weight to give the fill-first cost function') def noop_cost_fn(host): @@ -52,19 +52,64 @@ def noop_cost_fn(host): return 1 -flags.DEFINE_integer('compute_fill_first_cost_fn_weight', 1, - 'How much weight to give the fill-first cost function') - - def compute_fill_first_cost_fn(host): """Prefer hosts that have less ram available, filter_hosts will exclude - hosts that don't have enough ram""" - hostname, caps = host - free_mem = caps['host_memory_free'] + hosts that don't have enough ram. + """ + hostname, service = host + caps = service.get("compute", {}) + free_mem = caps.get("host_memory_free", 0) return free_mem -class LeastCostScheduler(abstract_scheduler.AbstractScheduler): +def normalize_list(L): + """Normalize an array of numbers such that each element satisfies: + 0 <= e <= 1 + """ + if not L: + return L + max_ = max(L) + if max_ > 0: + return [(float(e) / max_) for e in L] + return L + + +def weighted_sum(domain, weighted_fns, normalize=True): + """Use the weighted-sum method to compute a score for an array of objects. + Normalize the results of the objective-functions so that the weights are + meaningful regardless of objective-function's range. + + domain - input to be scored + weighted_fns - list of weights and functions like: + [(weight, objective-functions)] + + Returns an unsorted list of scores. To pair with hosts do: + zip(scores, hosts) + """ + # Table of form: + # { domain1: [score1, score2, ..., scoreM] + # ... + # domainN: [score1, score2, ..., scoreM] } + score_table = collections.defaultdict(list) + for weight, fn in weighted_fns: + scores = [fn(elem) for elem in domain] + if normalize: + norm_scores = normalize_list(scores) + else: + norm_scores = scores + for idx, score in enumerate(norm_scores): + weighted_score = score * weight + score_table[idx].append(weighted_score) + + # Sum rows in table to compute score for each element in domain + domain_scores = [] + for idx in sorted(score_table): + elem_score = sum(score_table[idx]) + domain_scores.append(elem_score) + return domain_scores + + +class LeastCostScheduler(base_scheduler.BaseScheduler): def __init__(self, *args, **kwargs): self.cost_fns_cache = {} super(LeastCostScheduler, self).__init__(*args, **kwargs) @@ -73,10 +118,8 @@ class LeastCostScheduler(abstract_scheduler.AbstractScheduler): """Returns a list of tuples containing weights and cost functions to use for weighing hosts """ - if topic in self.cost_fns_cache: return self.cost_fns_cache[topic] - cost_fns = [] for cost_fn_str in FLAGS.least_cost_scheduler_cost_functions: if '.' in cost_fn_str: @@ -85,7 +128,6 @@ class LeastCostScheduler(abstract_scheduler.AbstractScheduler): short_name = cost_fn_str cost_fn_str = "%s.%s.%s" % ( __name__, self.__class__.__name__, short_name) - if not (short_name.startswith('%s_' % topic) or short_name.startswith('noop')): continue @@ -96,15 +138,14 @@ class LeastCostScheduler(abstract_scheduler.AbstractScheduler): cost_fn = utils.import_class(cost_fn_str) except exception.ClassNotFound: raise exception.SchedulerCostFunctionNotFound( - cost_fn_str=cost_fn_str) + cost_fn_str=cost_fn_str) try: flag_name = "%s_weight" % cost_fn.__name__ weight = getattr(FLAGS, flag_name) except AttributeError: raise exception.SchedulerWeightFlagNotFound( - flag_name=flag_name) - + flag_name=flag_name) cost_fns.append((weight, cost_fn)) self.cost_fns_cache[topic] = cost_fns @@ -114,13 +155,13 @@ class LeastCostScheduler(abstract_scheduler.AbstractScheduler): """Returns a list of dictionaries of form: [ {weight: weight, hostname: hostname, capabilities: capabs} ] """ - cost_fns = self.get_cost_fns(topic) costs = weighted_sum(domain=hosts, weighted_fns=cost_fns) weighted = [] weight_log = [] - for cost, (hostname, caps) in zip(costs, hosts): + for cost, (hostname, service) in zip(costs, hosts): + caps = service[topic] weight_log.append("%s: %s" % (hostname, "%.2f" % cost)) weight_dict = dict(weight=cost, hostname=hostname, capabilities=caps) @@ -128,52 +169,3 @@ class LeastCostScheduler(abstract_scheduler.AbstractScheduler): LOG.debug(_("Weighted Costs => %s") % weight_log) return weighted - - -def normalize_list(L): - """Normalize an array of numbers such that each element satisfies: - 0 <= e <= 1""" - if not L: - return L - max_ = max(L) - if max_ > 0: - return [(float(e) / max_) for e in L] - return L - - -def weighted_sum(domain, weighted_fns, normalize=True): - """Use the weighted-sum method to compute a score for an array of objects. - Normalize the results of the objective-functions so that the weights are - meaningful regardless of objective-function's range. - - domain - input to be scored - weighted_fns - list of weights and functions like: - [(weight, objective-functions)] - - Returns an unsorted list of scores. To pair with hosts do: - zip(scores, hosts) - """ - # Table of form: - # { domain1: [score1, score2, ..., scoreM] - # ... - # domainN: [score1, score2, ..., scoreM] } - score_table = collections.defaultdict(list) - for weight, fn in weighted_fns: - scores = [fn(elem) for elem in domain] - - if normalize: - norm_scores = normalize_list(scores) - else: - norm_scores = scores - - for idx, score in enumerate(norm_scores): - weighted_score = score * weight - score_table[idx].append(weighted_score) - - # Sum rows in table to compute score for each element in domain - domain_scores = [] - for idx in sorted(score_table): - elem_score = sum(score_table[idx]) - domain_scores.append(elem_score) - - return domain_scores diff --git a/nova/tests/scheduler/test_host_filter.py b/nova/tests/scheduler/test_host_filter.py index a64b25138..a961b1b06 100644 --- a/nova/tests/scheduler/test_host_filter.py +++ b/nova/tests/scheduler/test_host_filter.py @@ -192,9 +192,7 @@ class HostFilterTestCase(test.TestCase): msg = " ".join([str(arg) for arg in args]) dbg.write("%s\n" % msg) - debug("cooked", cooked, type(cooked)) hosts = hf.filter_hosts(self.zone_manager, cooked) - self.assertEquals(5, len(hosts)) just_hosts = [host for host, caps in hosts] just_hosts.sort() diff --git a/nova/tests/scheduler/test_least_cost_scheduler.py b/nova/tests/scheduler/test_least_cost_scheduler.py index 16ec4420b..d6eaaa223 100644 --- a/nova/tests/scheduler/test_least_cost_scheduler.py +++ b/nova/tests/scheduler/test_least_cost_scheduler.py @@ -15,6 +15,7 @@ """ Tests For Least Cost Scheduler """ +import copy from nova import test from nova.scheduler import least_cost @@ -81,7 +82,7 @@ class LeastCostSchedulerTestCase(test.TestCase): super(LeastCostSchedulerTestCase, self).tearDown() def assertWeights(self, expected, num, request_spec, hosts): - weighted = self.sched.weigh_hosts(num, request_spec, hosts) + weighted = self.sched.weigh_hosts("compute", request_spec, hosts) self.assertDictListMatch(weighted, expected, approx_equal=True) def test_no_hosts(self): @@ -125,19 +126,20 @@ class LeastCostSchedulerTestCase(test.TestCase): num = 1 instance_type = {'memory_mb': 1024} request_spec = {'instance_type': instance_type} - all_hosts = self.sched.zone_manager.service_states.iteritems() + svc_states = self.sched.zone_manager.service_states.iteritems() all_hosts = [(host, services["compute"]) - for host, services in all_hosts + for host, services in svc_states if "compute" in services] - hosts = self.sched.filter_hosts('compute', request_spec, host_list) + hosts = self.sched.filter_hosts('compute', request_spec, all_hosts) expected = [] - for idx, (hostname, caps) in enumerate(hosts): + for idx, (hostname, services) in enumerate(hosts): + caps = copy.deepcopy(services["compute"]) # Costs are normalized so over 10 hosts, each host with increasing # free ram will cost 1/N more. Since the lowest cost host has some # free ram, we add in the 1/N for the base_cost weight = 0.1 + (0.1 * idx) - weight_dict = dict(weight=weight, hostname=hostname) - expected.append(weight_dict) + wtd_dict = dict(hostname=hostname, weight=weight, capabilities=caps) + expected.append(wtd_dict) self.assertWeights(expected, num, request_spec, hosts) diff --git a/nova/tests/test_host_filter.py b/nova/tests/test_host_filter.py deleted file mode 100644 index 3a1389a49..000000000 --- a/nova/tests/test_host_filter.py +++ /dev/null @@ -1,200 +0,0 @@ -# Copyright 2011 OpenStack LLC. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -""" -Tests For Scheduler Host Filters. -""" - -import json - -from nova import exception -from nova import test -from nova.scheduler import host_filter - - -class FakeZoneManager: - pass - - -class HostFilterTestCase(test.TestCase): - """Test case for host filters.""" - - def _host_caps(self, multiplier): - # Returns host capabilities in the following way: - # host1 = memory:free 10 (100max) - # disk:available 100 (1000max) - # hostN = memory:free 10 + 10N - # disk:available 100 + 100N - # in other words: hostN has more resources than host0 - # which means ... don't go above 10 hosts. - return {'host_name-description': 'XenServer %s' % multiplier, - 'host_hostname': 'xs-%s' % multiplier, - 'host_memory_total': 100, - 'host_memory_overhead': 10, - 'host_memory_free': 10 + multiplier * 10, - 'host_memory_free-computed': 10 + multiplier * 10, - 'host_other-config': {}, - 'host_ip_address': '192.168.1.%d' % (100 + multiplier), - 'host_cpu_info': {}, - 'disk_available': 100 + multiplier * 100, - 'disk_total': 1000, - 'disk_used': 0, - 'host_uuid': 'xxx-%d' % multiplier, - 'host_name-label': 'xs-%s' % multiplier} - - def setUp(self): - super(HostFilterTestCase, self).setUp() - default_host_filter = 'nova.scheduler.host_filter.AllHostsFilter' - self.flags(default_host_filter=default_host_filter) - self.instance_type = dict(name='tiny', - memory_mb=50, - vcpus=10, - local_gb=500, - flavorid=1, - swap=500, - rxtx_quota=30000, - rxtx_cap=200, - extra_specs={}) - - self.zone_manager = FakeZoneManager() - states = {} - for x in xrange(10): - states['host%02d' % (x + 1)] = {'compute': self._host_caps(x)} - self.zone_manager.service_states = states - - def test_choose_filter(self): - # Test default filter ... - hf = host_filter.choose_host_filter() - self.assertEquals(hf._full_name(), - 'nova.scheduler.host_filter.AllHostsFilter') - # Test valid filter ... - hf = host_filter.choose_host_filter( - 'nova.scheduler.host_filter.InstanceTypeFilter') - self.assertEquals(hf._full_name(), - 'nova.scheduler.host_filter.InstanceTypeFilter') - # Test invalid filter ... - try: - host_filter.choose_host_filter('does not exist') - self.fail("Should not find host filter.") - except exception.SchedulerHostFilterNotFound: - pass - - def test_all_host_filter(self): - hf = host_filter.AllHostsFilter() - cooked = hf.instance_type_to_filter(self.instance_type) - hosts = hf.filter_hosts(self.zone_manager, cooked) - self.assertEquals(10, len(hosts)) - for host, capabilities in hosts: - self.assertTrue(host.startswith('host')) - - def test_instance_type_filter(self): - hf = host_filter.InstanceTypeFilter() - # filter all hosts that can support 50 ram and 500 disk - name, cooked = hf.instance_type_to_filter(self.instance_type) - self.assertEquals('nova.scheduler.host_filter.InstanceTypeFilter', - name) - hosts = hf.filter_hosts(self.zone_manager, cooked) - self.assertEquals(6, len(hosts)) - just_hosts = [host for host, caps in hosts] - just_hosts.sort() - self.assertEquals('host05', just_hosts[0]) - self.assertEquals('host10', just_hosts[5]) - - def test_json_filter(self): - hf = host_filter.JsonFilter() - # filter all hosts that can support 50 ram and 500 disk - name, cooked = hf.instance_type_to_filter(self.instance_type) - self.assertEquals('nova.scheduler.host_filter.JsonFilter', name) - hosts = hf.filter_hosts(self.zone_manager, cooked) - self.assertEquals(6, len(hosts)) - just_hosts = [host for host, caps in hosts] - just_hosts.sort() - self.assertEquals('host05', just_hosts[0]) - self.assertEquals('host10', just_hosts[5]) - - # Try some custom queries - - raw = ['or', - ['and', - ['<', '$compute.host_memory_free', 30], - ['<', '$compute.disk_available', 300], - ], - ['and', - ['>', '$compute.host_memory_free', 70], - ['>', '$compute.disk_available', 700], - ], - ] - - cooked = json.dumps(raw) - hosts = hf.filter_hosts(self.zone_manager, cooked) - - self.assertEquals(5, len(hosts)) - just_hosts = [host for host, caps in hosts] - just_hosts.sort() - for index, host in zip([1, 2, 8, 9, 10], just_hosts): - self.assertEquals('host%02d' % index, host) - - raw = ['not', - ['=', '$compute.host_memory_free', 30], - ] - cooked = json.dumps(raw) - hosts = hf.filter_hosts(self.zone_manager, cooked) - - self.assertEquals(9, len(hosts)) - just_hosts = [host for host, caps in hosts] - just_hosts.sort() - for index, host in zip([1, 2, 4, 5, 6, 7, 8, 9, 10], just_hosts): - self.assertEquals('host%02d' % index, host) - - raw = ['in', '$compute.host_memory_free', 20, 40, 60, 80, 100] - cooked = json.dumps(raw) - hosts = hf.filter_hosts(self.zone_manager, cooked) - - self.assertEquals(5, len(hosts)) - just_hosts = [host for host, caps in hosts] - just_hosts.sort() - for index, host in zip([2, 4, 6, 8, 10], just_hosts): - self.assertEquals('host%02d' % index, host) - - # Try some bogus input ... - raw = ['unknown command', ] - cooked = json.dumps(raw) - try: - hf.filter_hosts(self.zone_manager, cooked) - self.fail("Should give KeyError") - except KeyError, e: - pass - - self.assertTrue(hf.filter_hosts(self.zone_manager, json.dumps([]))) - self.assertTrue(hf.filter_hosts(self.zone_manager, json.dumps({}))) - self.assertTrue(hf.filter_hosts(self.zone_manager, json.dumps( - ['not', True, False, True, False]))) - - try: - hf.filter_hosts(self.zone_manager, json.dumps( - 'not', True, False, True, False)) - self.fail("Should give KeyError") - except KeyError, e: - pass - - self.assertFalse(hf.filter_hosts(self.zone_manager, - json.dumps(['=', '$foo', 100]))) - self.assertFalse(hf.filter_hosts(self.zone_manager, - json.dumps(['=', '$.....', 100]))) - self.assertFalse(hf.filter_hosts(self.zone_manager, - json.dumps( - ['>', ['and', ['or', ['not', ['<', ['>=', ['<=', ['in', ]]]]]]]]))) - - self.assertFalse(hf.filter_hosts(self.zone_manager, - json.dumps(['=', {}, ['>', '$missing....foo']]))) -- cgit From d8a156f9ed0729c4c5553fe3b28f6c3afb93d54f Mon Sep 17 00:00:00 2001 From: Ed Leafe Date: Mon, 15 Aug 2011 17:31:24 -0500 Subject: pep8 cleanup --- nova/compute/manager.py | 3 ++- nova/scheduler/abstract_scheduler.py | 6 +++--- nova/scheduler/filters/abstract_filter.py | 4 ++-- nova/scheduler/host_filter.py | 1 + nova/tests/scheduler/test_host_filter.py | 5 ----- nova/tests/scheduler/test_least_cost_scheduler.py | 3 ++- nova/virt/libvirt/connection.py | 3 ++- 7 files changed, 12 insertions(+), 13 deletions(-) diff --git a/nova/compute/manager.py b/nova/compute/manager.py index 16b8e14b4..52fcf5c49 100644 --- a/nova/compute/manager.py +++ b/nova/compute/manager.py @@ -1315,7 +1315,8 @@ class ComputeManager(manager.SchedulerDependentManager): # This nwfilter is necessary on the destination host. # In addition, this method is creating filtering rule # onto destination host. - self.driver.ensure_filtering_rules_for_instance(instance_ref, network_info) + self.driver.ensure_filtering_rules_for_instance(instance_ref, + network_info) # Preparation for block migration if block_migration: diff --git a/nova/scheduler/abstract_scheduler.py b/nova/scheduler/abstract_scheduler.py index 2f1ede0a4..77db67773 100644 --- a/nova/scheduler/abstract_scheduler.py +++ b/nova/scheduler/abstract_scheduler.py @@ -15,7 +15,7 @@ """ The AbsractScheduler is an abstract class Scheduler for creating instances -locally or across zones. Two methods should be overridden in order to +locally or across zones. Two methods should be overridden in order to customize the behavior: filter_hosts() and weigh_hosts(). The default behavior is to simply select all hosts and weight them the same. """ @@ -298,8 +298,8 @@ class AbstractScheduler(driver.Scheduler): def filter_hosts(self, topic, request_spec, host_list): """Filter the full host list returned from the ZoneManager. By default, this method only applies the basic_ram_filter(), meaning all hosts - with at least enough RAM for the requested instance are returned. - + with at least enough RAM for the requested instance are returned. + Override in subclasses to provide greater selectivity. """ def basic_ram_filter(hostname, capabilities, request_spec): diff --git a/nova/scheduler/filters/abstract_filter.py b/nova/scheduler/filters/abstract_filter.py index d9d272130..a1d00d562 100644 --- a/nova/scheduler/filters/abstract_filter.py +++ b/nova/scheduler/filters/abstract_filter.py @@ -18,10 +18,10 @@ import nova.scheduler from nova import flags FLAGS = flags.FLAGS -flags.DEFINE_string('default_host_filter', - 'AllHostsFilter', +flags.DEFINE_string('default_host_filter', 'AllHostsFilter', 'Which filter to use for filtering hosts') + class AbstractHostFilter(object): """Base class for host filters.""" def instance_type_to_filter(self, instance_type): diff --git a/nova/scheduler/host_filter.py b/nova/scheduler/host_filter.py index be618f3f3..4bc5158cc 100644 --- a/nova/scheduler/host_filter.py +++ b/nova/scheduler/host_filter.py @@ -39,6 +39,7 @@ FLAGS = flags.FLAGS def _get_filters(): # Imported here to avoid circular imports from nova.scheduler import filters + def get_itm(nm): return getattr(filters, nm) diff --git a/nova/tests/scheduler/test_host_filter.py b/nova/tests/scheduler/test_host_filter.py index a961b1b06..17431fc7e 100644 --- a/nova/tests/scheduler/test_host_filter.py +++ b/nova/tests/scheduler/test_host_filter.py @@ -187,11 +187,6 @@ class HostFilterTestCase(test.TestCase): raw = ['in', '$compute.host_memory_free', 20, 40, 60, 80, 100] cooked = json.dumps(raw) - def debug(*args): - with file("/tmp/debug", "a") as dbg: - msg = " ".join([str(arg) for arg in args]) - dbg.write("%s\n" % msg) - hosts = hf.filter_hosts(self.zone_manager, cooked) self.assertEquals(5, len(hosts)) just_hosts = [host for host, caps in hosts] diff --git a/nova/tests/scheduler/test_least_cost_scheduler.py b/nova/tests/scheduler/test_least_cost_scheduler.py index d6eaaa223..af58de527 100644 --- a/nova/tests/scheduler/test_least_cost_scheduler.py +++ b/nova/tests/scheduler/test_least_cost_scheduler.py @@ -139,7 +139,8 @@ class LeastCostSchedulerTestCase(test.TestCase): # free ram will cost 1/N more. Since the lowest cost host has some # free ram, we add in the 1/N for the base_cost weight = 0.1 + (0.1 * idx) - wtd_dict = dict(hostname=hostname, weight=weight, capabilities=caps) + wtd_dict = dict(hostname=hostname, weight=weight, + capabilities=caps) expected.append(wtd_dict) self.assertWeights(expected, num, request_spec, hosts) diff --git a/nova/virt/libvirt/connection.py b/nova/virt/libvirt/connection.py index 2b17e244a..c009641ef 100644 --- a/nova/virt/libvirt/connection.py +++ b/nova/virt/libvirt/connection.py @@ -1539,7 +1539,8 @@ class LibvirtConnection(driver.ComputeDriver): # basic-filtering must be set here. self.firewall_driver.setup_basic_filtering(instance_ref, network_info) # setting up n)ova-instance-instance-xx mainly. - self.firewall_driver.prepare_instance_filter(instance_ref, network_info) + self.firewall_driver.prepare_instance_filter(instance_ref, + network_info) # wait for completion timeout_count = range(FLAGS.live_migration_retry_count) -- cgit From 8d83ceb9f8baef3c768c4fc087afb89188250c26 Mon Sep 17 00:00:00 2001 From: Vishvananda Ishaya Date: Mon, 15 Aug 2011 16:29:52 -0700 Subject: fix error logging in s3.py --- nova/image/s3.py | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) diff --git a/nova/image/s3.py b/nova/image/s3.py index ccbfa89cd..e5008d856 100644 --- a/nova/image/s3.py +++ b/nova/image/s3.py @@ -193,6 +193,8 @@ class S3ImageService(service.BaseImageService): def delayed_create(): """This handles the fetching and decrypting of the part files.""" + log_vars = {'image_location': image_location, + 'image_path': image_path} metadata['properties']['image_state'] = 'downloading' self.service.update(context, image_id, metadata) @@ -214,7 +216,7 @@ class S3ImageService(service.BaseImageService): except Exception: LOG.error(_("Failed to download %(image_location)s " - "to %(image_path)s"), locals()) + "to %(image_path)s"), log_vars) metadata['properties']['image_state'] = 'failed_download' self.service.update(context, image_id, metadata) raise @@ -238,7 +240,7 @@ class S3ImageService(service.BaseImageService): dec_filename) except Exception: LOG.error(_("Failed to decrypt %(image_location)s " - "to %(image_path)s"), locals()) + "to %(image_path)s"), log_vars) metadata['properties']['image_state'] = 'failed_decrypt' self.service.update(context, image_id, metadata) raise @@ -250,7 +252,7 @@ class S3ImageService(service.BaseImageService): unz_filename = self._untarzip_image(image_path, dec_filename) except Exception: LOG.error(_("Failed to untar %(image_location)s " - "to %(image_path)s"), locals()) + "to %(image_path)s"), log_vars) metadata['properties']['image_state'] = 'failed_untar' self.service.update(context, image_id, metadata) raise @@ -263,7 +265,7 @@ class S3ImageService(service.BaseImageService): metadata, image_file) except Exception: LOG.error(_("Failed to upload %(image_location)s " - "to %(image_path)s"), locals()) + "to %(image_path)s"), log_vars) metadata['properties']['image_state'] = 'failed_upload' self.service.update(context, image_id, metadata) raise -- cgit From c4f6df55fa8a9c0746074c814b510e4a4cd4e512 Mon Sep 17 00:00:00 2001 From: Vishvananda Ishaya Date: Mon, 15 Aug 2011 17:12:03 -0700 Subject: log the full exception so we don't lose traceback through eventlet --- nova/image/s3.py | 24 ++++++++++++------------ 1 file changed, 12 insertions(+), 12 deletions(-) diff --git a/nova/image/s3.py b/nova/image/s3.py index e5008d856..abf01a942 100644 --- a/nova/image/s3.py +++ b/nova/image/s3.py @@ -215,11 +215,11 @@ class S3ImageService(service.BaseImageService): shutil.copyfileobj(part, combined) except Exception: - LOG.error(_("Failed to download %(image_location)s " - "to %(image_path)s"), log_vars) + LOG.exception(_("Failed to download %(image_location)s " + "to %(image_path)s"), log_vars) metadata['properties']['image_state'] = 'failed_download' self.service.update(context, image_id, metadata) - raise + return metadata['properties']['image_state'] = 'decrypting' self.service.update(context, image_id, metadata) @@ -239,11 +239,11 @@ class S3ImageService(service.BaseImageService): encrypted_iv, cloud_pk, dec_filename) except Exception: - LOG.error(_("Failed to decrypt %(image_location)s " - "to %(image_path)s"), log_vars) + LOG.exception(_("Failed to decrypt %(image_location)s " + "to %(image_path)s"), log_vars) metadata['properties']['image_state'] = 'failed_decrypt' self.service.update(context, image_id, metadata) - raise + return metadata['properties']['image_state'] = 'untarring' self.service.update(context, image_id, metadata) @@ -251,11 +251,11 @@ class S3ImageService(service.BaseImageService): try: unz_filename = self._untarzip_image(image_path, dec_filename) except Exception: - LOG.error(_("Failed to untar %(image_location)s " - "to %(image_path)s"), log_vars) + LOG.exception(_("Failed to untar %(image_location)s " + "to %(image_path)s"), log_vars) metadata['properties']['image_state'] = 'failed_untar' self.service.update(context, image_id, metadata) - raise + return metadata['properties']['image_state'] = 'uploading' self.service.update(context, image_id, metadata) @@ -264,11 +264,11 @@ class S3ImageService(service.BaseImageService): self.service.update(context, image_id, metadata, image_file) except Exception: - LOG.error(_("Failed to upload %(image_location)s " - "to %(image_path)s"), log_vars) + LOG.exception(_("Failed to upload %(image_location)s " + "to %(image_path)s"), log_vars) metadata['properties']['image_state'] = 'failed_upload' self.service.update(context, image_id, metadata) - raise + return metadata['properties']['image_state'] = 'available' metadata['status'] = 'active' -- cgit From 0801dee7b05463b40bf66ee5911c92ac5e4aabc8 Mon Sep 17 00:00:00 2001 From: Dan Prince Date: Tue, 16 Aug 2011 10:49:26 -0400 Subject: Fix test_metadata tests. --- nova/api/ec2/cloud.py | 2 +- nova/tests/test_metadata.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/nova/api/ec2/cloud.py b/nova/api/ec2/cloud.py index 152cd6887..9aebf92e3 100644 --- a/nova/api/ec2/cloud.py +++ b/nova/api/ec2/cloud.py @@ -305,7 +305,7 @@ class CloudController(object): 'hostname': hostname, 'instance-action': 'none', 'instance-id': ec2_id, - 'instance-type': instance_ref['instance_type'].name, + 'instance-type': instance_ref['instance_type']['name'], 'local-hostname': hostname, 'local-ipv4': address, 'placement': {'availability-zone': availability_zone}, diff --git a/nova/tests/test_metadata.py b/nova/tests/test_metadata.py index ad678714e..bfc7a6d44 100644 --- a/nova/tests/test_metadata.py +++ b/nova/tests/test_metadata.py @@ -39,7 +39,7 @@ class MetadataTestCase(test.TestCase): 'key_name': None, 'host': 'test', 'launch_index': 1, - 'instance_type': 'm1.tiny', + 'instance_type': {'name': 'm1.tiny'}, 'reservation_id': 'r-xxxxxxxx', 'user_data': '', 'image_ref': 7, -- cgit From 44a278bc5a456c8eda74c61aaa68cfd74ee0d6e8 Mon Sep 17 00:00:00 2001 From: Brian Lamar Date: Tue, 16 Aug 2011 11:31:29 -0400 Subject: Small bug fix...don't cast DB objects to dicts. --- nova/api/openstack/views/servers.py | 4 ++-- nova/tests/integrated/test_servers.py | 5 +++++ 2 files changed, 7 insertions(+), 2 deletions(-) diff --git a/nova/api/openstack/views/servers.py b/nova/api/openstack/views/servers.py index 8222f6766..60fdf54be 100644 --- a/nova/api/openstack/views/servers.py +++ b/nova/api/openstack/views/servers.py @@ -146,7 +146,7 @@ class ViewBuilderV11(ViewBuilder): return response def _build_image(self, response, inst): - if 'image_ref' in dict(inst): + if inst.get("image_ref", None): image_href = inst['image_ref'] image_id = str(common.get_id_from_href(image_href)) _bookmark = self.image_builder.generate_bookmark(image_id) @@ -161,7 +161,7 @@ class ViewBuilderV11(ViewBuilder): } def _build_flavor(self, response, inst): - if "instance_type" in dict(inst): + if inst.get("instance_type", None): flavor_id = inst["instance_type"]['flavorid'] flavor_ref = self.flavor_builder.generate_href(flavor_id) flavor_bookmark = self.flavor_builder.generate_bookmark(flavor_id) diff --git a/nova/tests/integrated/test_servers.py b/nova/tests/integrated/test_servers.py index 150279a95..725f6d529 100644 --- a/nova/tests/integrated/test_servers.py +++ b/nova/tests/integrated/test_servers.py @@ -27,6 +27,7 @@ LOG = logging.getLogger('nova.tests.integrated') class ServersTest(integrated_helpers._IntegratedTestBase): + def test_get_servers(self): """Simple check that listing servers works.""" servers = self.api.get_servers() @@ -103,6 +104,10 @@ class ServersTest(integrated_helpers._IntegratedTestBase): # It should be available... # TODO(justinsb): Mock doesn't yet do this... #self.assertEqual('available', found_server['status']) + servers = self.api.get_servers(detail=True) + for server in servers: + self.assertTrue("image" in server) + self.assertTrue("flavor" in server) self._delete_server(created_server_id) -- cgit From 6220c4276e30c633ffc4165ce6db0d120c0e88a7 Mon Sep 17 00:00:00 2001 From: Jake Dahn Date: Tue, 16 Aug 2011 09:13:10 -0700 Subject: another trunk merge... a new change made it into nova before the code was merged --- nova/api/openstack/contrib/quotas.py | 107 ++++++++++++++++ nova/tests/api/openstack/contrib/test_quotas.py | 158 ++++++++++++++++++++++++ nova/tests/api/openstack/test_extensions.py | 1 + 3 files changed, 266 insertions(+) create mode 100644 nova/api/openstack/contrib/quotas.py create mode 100644 nova/tests/api/openstack/contrib/test_quotas.py diff --git a/nova/api/openstack/contrib/quotas.py b/nova/api/openstack/contrib/quotas.py new file mode 100644 index 000000000..7dbafb79a --- /dev/null +++ b/nova/api/openstack/contrib/quotas.py @@ -0,0 +1,107 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2011 OpenStack LLC. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import urlparse + +from nova import db +from nova import exception +from nova import quota +from nova.auth import manager as auth_manager +from nova.api.openstack import extensions + + +class QuotasController(object): + + def _format_quota_set(self, project_id, quota_set): + """Convert the quota object to a result dict""" + + return {'quota_set': { + 'id': str(project_id), + 'metadata_items': quota_set['metadata_items'], + 'injected_file_content_bytes': + quota_set['injected_file_content_bytes'], + 'volumes': quota_set['volumes'], + 'gigabytes': quota_set['gigabytes'], + 'ram': quota_set['ram'], + 'floating_ips': quota_set['floating_ips'], + 'instances': quota_set['instances'], + 'injected_files': quota_set['injected_files'], + 'cores': quota_set['cores'], + }} + + def index(self, req): + # NOTE(jakedahn): If http param defaults is true, list system defaults. + if urlparse.parse_qs(req.environ['QUERY_STRING']).get('defaults', + False): + return {'quota_set_list': [self._format_quota_set('__defaults__', + quota._get_default_quotas())]} + else: + context = req.environ['nova.context'] + user = req.environ.get('user') + projects = auth_manager.AuthManager().get_projects(user=user) + + quota_set_list = [self._format_quota_set(project.name, + quota.get_project_quotas(context, project.name)) + for project in projects] + return {'quota_set_list': quota_set_list} + + def show(self, req, id): + context = req.environ['nova.context'] + return self._format_quota_set(id, quota.get_project_quotas(context, + id)) + + def update(self, req, id, body): + context = req.environ['nova.context'] + project_id = id + resources = ['metadata_items', 'injected_file_content_bytes', + 'volumes', 'gigabytes', 'ram', 'floating_ips', 'instances', + 'injected_files', 'cores'] + + for key in body['quota_set'].keys(): + if key in resources: + value = int(body['quota_set'][key]) + try: + db.quota_update(context, project_id, key, value) + except exception.ProjectQuotaNotFound: + db.quota_create(context, project_id, key, value) + return {'quota_set': quota.get_project_quotas(context, project_id)} + + +class Quotas(extensions.ExtensionDescriptor): + + def get_name(self): + return "Quotas" + + def get_alias(self): + return "os-quotas" + + def get_description(self): + return "Quotas management support" + + def get_namespace(self): + return "http://docs.openstack.org/ext/quotas/api/v1.1" + + def get_updated(self): + return "2011-08-08T00:00:00+00:00" + + def get_resources(self): + resources = [] + + res = extensions.ResourceExtension('os-quotas', QuotasController()) + resources.append(res) + + return resources diff --git a/nova/tests/api/openstack/contrib/test_quotas.py b/nova/tests/api/openstack/contrib/test_quotas.py new file mode 100644 index 000000000..6ab2faf4d --- /dev/null +++ b/nova/tests/api/openstack/contrib/test_quotas.py @@ -0,0 +1,158 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2011 OpenStack LLC. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import json +import webob + +from nova import context +from nova import test +from nova.auth import manager as auth_manager +from nova.tests.api.openstack import fakes + + +from nova.api.openstack.contrib.quotas import QuotasController + + +def quota_set(id): + return {'quota_set': {'id': id, 'metadata_items': 128, 'volumes': 10, + 'gigabytes': 1000, 'ram': 51200, 'floating_ips': 10, + 'instances': 10, 'injected_files': 5, 'cores': 20, + 'injected_file_content_bytes': 10240}} + + +def quota_set_list(): + return {'quota_set_list': [quota_set('1234'), quota_set('5678'), + quota_set('update_me')]} + + +def create_project(project_name, manager_user): + auth_manager.AuthManager().create_project(project_name, manager_user) + + +def delete_project(project_name): + auth_manager.AuthManager().delete_project(project_name) + + +def create_admin_user(name): + auth_manager.AuthManager().create_user(name, admin=True) + + +def delete_user(name): + auth_manager.AuthManager().delete_user(name) + + +class QuotasTest(test.TestCase): + + def setUp(self): + super(QuotasTest, self).setUp() + self.controller = QuotasController() + self.context = context.get_admin_context() + + create_admin_user('foo') + create_project('1234', 'foo') + create_project('5678', 'foo') + create_project('update_me', 'foo') + + def tearDown(self): + delete_project('1234') + delete_project('5678') + delete_project('update_me') + delete_user('foo') + + def test_format_quota_set(self): + raw_quota_set = { + 'instances': 10, + 'cores': 20, + 'ram': 51200, + 'volumes': 10, + 'gigabytes': 1000, + 'floating_ips': 10, + 'metadata_items': 128, + 'injected_files': 5, + 'injected_file_content_bytes': 10240, + } + + quota_set = QuotasController()._format_quota_set('1234', raw_quota_set) + quota_set_check = quota_set['quota_set'] + + self.assertEqual(quota_set_check['id'], '1234') + self.assertEqual(quota_set_check['instances'], 10) + self.assertEqual(quota_set_check['cores'], 20) + self.assertEqual(quota_set_check['ram'], 51200) + self.assertEqual(quota_set_check['volumes'], 10) + self.assertEqual(quota_set_check['gigabytes'], 1000) + self.assertEqual(quota_set_check['floating_ips'], 10) + self.assertEqual(quota_set_check['metadata_items'], 128) + self.assertEqual(quota_set_check['injected_files'], 5) + self.assertEqual(quota_set_check['injected_file_content_bytes'], 10240) + + def test_quotas_index_with_default_param(self): + req = webob.Request.blank('/v1.1/os-quotas?defaults=True') + req.method = 'GET' + req.headers['Content-Type'] = 'application/json' + res = req.get_response(fakes.wsgi_app()) + + self.assertEqual(res.status_int, 200) + expected = {'quota_set_list': [{'quota_set': { + 'id': '__defaults__', + 'instances': 10, + 'cores': 20, + 'ram': 51200, + 'volumes': 10, + 'gigabytes': 1000, + 'floating_ips': 10, + 'metadata_items': 128, + 'injected_files': 5, + 'injected_file_content_bytes': 10240}}]} + + self.assertEqual(json.loads(res.body), expected) + + def test_quotas_index(self): + req = webob.Request.blank('/v1.1/os-quotas') + req.method = 'GET' + req.headers['Content-Type'] = 'application/json' + res = req.get_response(fakes.wsgi_app()) + + self.assertEqual(res.status_int, 200) + self.assertEqual(json.loads(res.body), quota_set_list()) + + def test_quotas_show(self): + req = webob.Request.blank('/v1.1/os-quotas/1234') + req.method = 'GET' + req.headers['Content-Type'] = 'application/json' + res = req.get_response(fakes.wsgi_app()) + + self.assertEqual(res.status_int, 200) + self.assertEqual(json.loads(res.body), quota_set('1234')) + + def test_quotas_update(self): + updated_quota_set = {'quota_set': {'instances': 50, + 'cores': 50, 'ram': 51200, 'volumes': 10, + 'gigabytes': 1000, 'floating_ips': 10, + 'metadata_items': 128, 'injected_files': 5, + 'injected_file_content_bytes': 10240}} + + req = webob.Request.blank('/v1.1/os-quotas/update_me') + req.method = 'PUT' + req.body = json.dumps(updated_quota_set) + req.headers['Content-Type'] = 'application/json' + + res = req.get_response(fakes.wsgi_app(fake_auth_context=\ + context.RequestContext('fake', 'fake', + is_admin=True))) + + self.assertEqual(json.loads(res.body), updated_quota_set) diff --git a/nova/tests/api/openstack/test_extensions.py b/nova/tests/api/openstack/test_extensions.py index 3e990a30b..0ae1a059f 100644 --- a/nova/tests/api/openstack/test_extensions.py +++ b/nova/tests/api/openstack/test_extensions.py @@ -91,6 +91,7 @@ class ExtensionControllerTest(test.TestCase): "Hosts", "Keypairs", "Multinic", + "Quotas", "SecurityGroups", "Volumes", ] -- cgit From b6c8985cb10b40572d23b7971aac6d0577ebfe82 Mon Sep 17 00:00:00 2001 From: Jake Dahn Date: Tue, 16 Aug 2011 09:15:01 -0700 Subject: another trunk merge --- nova/tests/api/openstack/contrib/test_quotas.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/nova/tests/api/openstack/contrib/test_quotas.py b/nova/tests/api/openstack/contrib/test_quotas.py index 6ab2faf4d..6a7a1d9fa 100644 --- a/nova/tests/api/openstack/contrib/test_quotas.py +++ b/nova/tests/api/openstack/contrib/test_quotas.py @@ -151,7 +151,7 @@ class QuotasTest(test.TestCase): req.body = json.dumps(updated_quota_set) req.headers['Content-Type'] = 'application/json' - res = req.get_response(fakes.wsgi_app(fake_auth_context=\ + res = req.get_response(fakes.wsgi_app(fake_auth_context= context.RequestContext('fake', 'fake', is_admin=True))) -- cgit From e9cf4ff5c14b274b8a1d7aa39567768368851e81 Mon Sep 17 00:00:00 2001 From: Jake Dahn Date: Tue, 16 Aug 2011 09:15:14 -0700 Subject: merging test_extensions.py --- nova/tests/api/openstack/test_extensions.py | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/nova/tests/api/openstack/test_extensions.py b/nova/tests/api/openstack/test_extensions.py index 0ae1a059f..5d3208e10 100644 --- a/nova/tests/api/openstack/test_extensions.py +++ b/nova/tests/api/openstack/test_extensions.py @@ -111,7 +111,7 @@ class ExtensionControllerTest(test.TestCase): self.assertEqual(names, self.ext_list) # Make sure that at least Fox in Sox is correct. - (fox_ext,) = [ + (fox_ext, ) = [ x for x in data['extensions'] if x['alias'] == 'FOXNSOX'] self.assertEqual(fox_ext, { 'namespace': 'http://www.fox.in.socks/api/ext/pie/v1.0', @@ -156,7 +156,7 @@ class ExtensionControllerTest(test.TestCase): self.assertEqual(len(exts), len(self.ext_list)) # Make sure that at least Fox in Sox is correct. - (fox_ext,) = [x for x in exts if x.get('alias') == 'FOXNSOX'] + (fox_ext, ) = [x for x in exts if x.get('alias') == 'FOXNSOX'] self.assertEqual(fox_ext.get('name'), 'Fox In Socks') self.assertEqual(fox_ext.get('namespace'), 'http://www.fox.in.socks/api/ext/pie/v1.0') @@ -228,6 +228,7 @@ class ResourceExtensionTest(test.TestCase): class InvalidExtension(object): + def get_alias(self): return "THIRD" -- cgit From 02c5d589483abef3fb8ec65f983e5b43a9e41f71 Mon Sep 17 00:00:00 2001 From: Jake Dahn Date: Tue, 16 Aug 2011 09:15:14 -0700 Subject: removed index, and separated out defaults into its own action --- nova/api/openstack/contrib/quotas.py | 28 ++++++--------------- nova/tests/api/openstack/contrib/test_quotas.py | 33 +++++++++---------------- 2 files changed, 20 insertions(+), 41 deletions(-) diff --git a/nova/api/openstack/contrib/quotas.py b/nova/api/openstack/contrib/quotas.py index 7dbafb79a..5f2b54d57 100644 --- a/nova/api/openstack/contrib/quotas.py +++ b/nova/api/openstack/contrib/quotas.py @@ -24,7 +24,7 @@ from nova.auth import manager as auth_manager from nova.api.openstack import extensions -class QuotasController(object): +class QuotaSetsController(object): def _format_quota_set(self, project_id, quota_set): """Convert the quota object to a result dict""" @@ -43,22 +43,6 @@ class QuotasController(object): 'cores': quota_set['cores'], }} - def index(self, req): - # NOTE(jakedahn): If http param defaults is true, list system defaults. - if urlparse.parse_qs(req.environ['QUERY_STRING']).get('defaults', - False): - return {'quota_set_list': [self._format_quota_set('__defaults__', - quota._get_default_quotas())]} - else: - context = req.environ['nova.context'] - user = req.environ.get('user') - projects = auth_manager.AuthManager().get_projects(user=user) - - quota_set_list = [self._format_quota_set(project.name, - quota.get_project_quotas(context, project.name)) - for project in projects] - return {'quota_set_list': quota_set_list} - def show(self, req, id): context = req.environ['nova.context'] return self._format_quota_set(id, quota.get_project_quotas(context, @@ -80,6 +64,8 @@ class QuotasController(object): db.quota_create(context, project_id, key, value) return {'quota_set': quota.get_project_quotas(context, project_id)} + def defaults(self, req): + return self._format_quota_set('defaults', quota._get_default_quotas()) class Quotas(extensions.ExtensionDescriptor): @@ -87,13 +73,13 @@ class Quotas(extensions.ExtensionDescriptor): return "Quotas" def get_alias(self): - return "os-quotas" + return "os-quota-sets" def get_description(self): return "Quotas management support" def get_namespace(self): - return "http://docs.openstack.org/ext/quotas/api/v1.1" + return "http://docs.openstack.org/ext/quotas-sets/api/v1.1" def get_updated(self): return "2011-08-08T00:00:00+00:00" @@ -101,7 +87,9 @@ class Quotas(extensions.ExtensionDescriptor): def get_resources(self): resources = [] - res = extensions.ResourceExtension('os-quotas', QuotasController()) + res = extensions.ResourceExtension('os-quota-sets', + QuotaSetsController(), + member_actions={'defaults': 'GET'}) resources.append(res) return resources diff --git a/nova/tests/api/openstack/contrib/test_quotas.py b/nova/tests/api/openstack/contrib/test_quotas.py index 6a7a1d9fa..decc76b4e 100644 --- a/nova/tests/api/openstack/contrib/test_quotas.py +++ b/nova/tests/api/openstack/contrib/test_quotas.py @@ -24,7 +24,7 @@ from nova.auth import manager as auth_manager from nova.tests.api.openstack import fakes -from nova.api.openstack.contrib.quotas import QuotasController +from nova.api.openstack.contrib.quotas import QuotaSetsController def quota_set(id): @@ -55,11 +55,11 @@ def delete_user(name): auth_manager.AuthManager().delete_user(name) -class QuotasTest(test.TestCase): +class QuotaSetsTest(test.TestCase): def setUp(self): - super(QuotasTest, self).setUp() - self.controller = QuotasController() + super(QuotaSetsTest, self).setUp() + self.controller = QuotaSetsController() self.context = context.get_admin_context() create_admin_user('foo') @@ -86,7 +86,7 @@ class QuotasTest(test.TestCase): 'injected_file_content_bytes': 10240, } - quota_set = QuotasController()._format_quota_set('1234', raw_quota_set) + quota_set = QuotaSetsController()._format_quota_set('1234', raw_quota_set) quota_set_check = quota_set['quota_set'] self.assertEqual(quota_set_check['id'], '1234') @@ -100,15 +100,15 @@ class QuotasTest(test.TestCase): self.assertEqual(quota_set_check['injected_files'], 5) self.assertEqual(quota_set_check['injected_file_content_bytes'], 10240) - def test_quotas_index_with_default_param(self): - req = webob.Request.blank('/v1.1/os-quotas?defaults=True') + def test_quotas_defaults(self): + req = webob.Request.blank('/v1.1/os-quota-sets/defaults') req.method = 'GET' req.headers['Content-Type'] = 'application/json' res = req.get_response(fakes.wsgi_app()) self.assertEqual(res.status_int, 200) - expected = {'quota_set_list': [{'quota_set': { - 'id': '__defaults__', + expected = {'quota_set': { + 'id': 'defaults', 'instances': 10, 'cores': 20, 'ram': 51200, @@ -117,21 +117,12 @@ class QuotasTest(test.TestCase): 'floating_ips': 10, 'metadata_items': 128, 'injected_files': 5, - 'injected_file_content_bytes': 10240}}]} + 'injected_file_content_bytes': 10240}} self.assertEqual(json.loads(res.body), expected) - def test_quotas_index(self): - req = webob.Request.blank('/v1.1/os-quotas') - req.method = 'GET' - req.headers['Content-Type'] = 'application/json' - res = req.get_response(fakes.wsgi_app()) - - self.assertEqual(res.status_int, 200) - self.assertEqual(json.loads(res.body), quota_set_list()) - def test_quotas_show(self): - req = webob.Request.blank('/v1.1/os-quotas/1234') + req = webob.Request.blank('/v1.1/os-quota-sets/1234') req.method = 'GET' req.headers['Content-Type'] = 'application/json' res = req.get_response(fakes.wsgi_app()) @@ -146,7 +137,7 @@ class QuotasTest(test.TestCase): 'metadata_items': 128, 'injected_files': 5, 'injected_file_content_bytes': 10240}} - req = webob.Request.blank('/v1.1/os-quotas/update_me') + req = webob.Request.blank('/v1.1/os-quota-sets/update_me') req.method = 'PUT' req.body = json.dumps(updated_quota_set) req.headers['Content-Type'] = 'application/json' -- cgit From 817b596bccd38f84f72e1ee73df3c3b35287c75c Mon Sep 17 00:00:00 2001 From: Jake Dahn Date: Tue, 16 Aug 2011 09:15:14 -0700 Subject: refactoring tests to not use authmanager, and now returning 403 when non admin user tries to update quotas --- nova/api/openstack/contrib/quotas.py | 4 +- nova/tests/api/openstack/contrib/test_quotas.py | 113 ++++++++++++------------ 2 files changed, 58 insertions(+), 59 deletions(-) diff --git a/nova/api/openstack/contrib/quotas.py b/nova/api/openstack/contrib/quotas.py index 5f2b54d57..f7e7b4105 100644 --- a/nova/api/openstack/contrib/quotas.py +++ b/nova/api/openstack/contrib/quotas.py @@ -15,6 +15,7 @@ # License for the specific language governing permissions and limitations # under the License. +import webob import urlparse from nova import db @@ -54,7 +55,6 @@ class QuotaSetsController(object): resources = ['metadata_items', 'injected_file_content_bytes', 'volumes', 'gigabytes', 'ram', 'floating_ips', 'instances', 'injected_files', 'cores'] - for key in body['quota_set'].keys(): if key in resources: value = int(body['quota_set'][key]) @@ -62,6 +62,8 @@ class QuotaSetsController(object): db.quota_update(context, project_id, key, value) except exception.ProjectQuotaNotFound: db.quota_create(context, project_id, key, value) + except exception.AdminRequired as e: + return webob.Response(status_int=403) return {'quota_set': quota.get_project_quotas(context, project_id)} def defaults(self, req): diff --git a/nova/tests/api/openstack/contrib/test_quotas.py b/nova/tests/api/openstack/contrib/test_quotas.py index decc76b4e..e2bd05428 100644 --- a/nova/tests/api/openstack/contrib/test_quotas.py +++ b/nova/tests/api/openstack/contrib/test_quotas.py @@ -39,39 +39,18 @@ def quota_set_list(): quota_set('update_me')]} -def create_project(project_name, manager_user): - auth_manager.AuthManager().create_project(project_name, manager_user) - - -def delete_project(project_name): - auth_manager.AuthManager().delete_project(project_name) - - -def create_admin_user(name): - auth_manager.AuthManager().create_user(name, admin=True) - - -def delete_user(name): - auth_manager.AuthManager().delete_user(name) - - class QuotaSetsTest(test.TestCase): def setUp(self): super(QuotaSetsTest, self).setUp() self.controller = QuotaSetsController() - self.context = context.get_admin_context() - - create_admin_user('foo') - create_project('1234', 'foo') - create_project('5678', 'foo') - create_project('update_me', 'foo') - - def tearDown(self): - delete_project('1234') - delete_project('5678') - delete_project('update_me') - delete_user('foo') + self.user_id = 'fake' + self.project_id = 'fake' + self.user_context = context.RequestContext(self.user_id, + self.project_id) + self.admin_context = context.RequestContext(self.user_id, + self.project_id, + is_admin=True) def test_format_quota_set(self): raw_quota_set = { @@ -83,22 +62,22 @@ class QuotaSetsTest(test.TestCase): 'floating_ips': 10, 'metadata_items': 128, 'injected_files': 5, - 'injected_file_content_bytes': 10240, - } - - quota_set = QuotaSetsController()._format_quota_set('1234', raw_quota_set) - quota_set_check = quota_set['quota_set'] - - self.assertEqual(quota_set_check['id'], '1234') - self.assertEqual(quota_set_check['instances'], 10) - self.assertEqual(quota_set_check['cores'], 20) - self.assertEqual(quota_set_check['ram'], 51200) - self.assertEqual(quota_set_check['volumes'], 10) - self.assertEqual(quota_set_check['gigabytes'], 1000) - self.assertEqual(quota_set_check['floating_ips'], 10) - self.assertEqual(quota_set_check['metadata_items'], 128) - self.assertEqual(quota_set_check['injected_files'], 5) - self.assertEqual(quota_set_check['injected_file_content_bytes'], 10240) + 'injected_file_content_bytes': 10240} + + quota_set = QuotaSetsController()._format_quota_set('1234', + raw_quota_set) + qs = quota_set['quota_set'] + + self.assertEqual(qs['id'], '1234') + self.assertEqual(qs['instances'], 10) + self.assertEqual(qs['cores'], 20) + self.assertEqual(qs['ram'], 51200) + self.assertEqual(qs['volumes'], 10) + self.assertEqual(qs['gigabytes'], 1000) + self.assertEqual(qs['floating_ips'], 10) + self.assertEqual(qs['metadata_items'], 128) + self.assertEqual(qs['injected_files'], 5) + self.assertEqual(qs['injected_file_content_bytes'], 10240) def test_quotas_defaults(self): req = webob.Request.blank('/v1.1/os-quota-sets/defaults') @@ -108,16 +87,16 @@ class QuotaSetsTest(test.TestCase): self.assertEqual(res.status_int, 200) expected = {'quota_set': { - 'id': 'defaults', - 'instances': 10, - 'cores': 20, - 'ram': 51200, - 'volumes': 10, - 'gigabytes': 1000, - 'floating_ips': 10, - 'metadata_items': 128, - 'injected_files': 5, - 'injected_file_content_bytes': 10240}} + 'id': 'defaults', + 'instances': 10, + 'cores': 20, + 'ram': 51200, + 'volumes': 10, + 'gigabytes': 1000, + 'floating_ips': 10, + 'metadata_items': 128, + 'injected_files': 5, + 'injected_file_content_bytes': 10240}} self.assertEqual(json.loads(res.body), expected) @@ -125,12 +104,13 @@ class QuotaSetsTest(test.TestCase): req = webob.Request.blank('/v1.1/os-quota-sets/1234') req.method = 'GET' req.headers['Content-Type'] = 'application/json' - res = req.get_response(fakes.wsgi_app()) + res = req.get_response(fakes.wsgi_app(fake_auth_context= + self.admin_context)) self.assertEqual(res.status_int, 200) self.assertEqual(json.loads(res.body), quota_set('1234')) - def test_quotas_update(self): + def test_quotas_update_as_admin(self): updated_quota_set = {'quota_set': {'instances': 50, 'cores': 50, 'ram': 51200, 'volumes': 10, 'gigabytes': 1000, 'floating_ips': 10, @@ -143,7 +123,24 @@ class QuotaSetsTest(test.TestCase): req.headers['Content-Type'] = 'application/json' res = req.get_response(fakes.wsgi_app(fake_auth_context= - context.RequestContext('fake', 'fake', - is_admin=True))) + self.admin_context)) self.assertEqual(json.loads(res.body), updated_quota_set) + + + def test_quotas_update_as_user(self): + updated_quota_set = {'quota_set': {'instances': 50, + 'cores': 50, 'ram': 51200, 'volumes': 10, + 'gigabytes': 1000, 'floating_ips': 10, + 'metadata_items': 128, 'injected_files': 5, + 'injected_file_content_bytes': 10240}} + + req = webob.Request.blank('/v1.1/os-quota-sets/update_me') + req.method = 'PUT' + req.body = json.dumps(updated_quota_set) + req.headers['Content-Type'] = 'application/json' + + res = req.get_response(fakes.wsgi_app(fake_auth_context= + self.user_context)) + + self.assertEqual(res.status_int, 403) -- cgit From 903ae5a8274051aaf40a62c929117d7165729360 Mon Sep 17 00:00:00 2001 From: Jake Dahn Date: Tue, 16 Aug 2011 09:15:14 -0700 Subject: fixing pep8 errors --- nova/api/openstack/contrib/quotas.py | 2 +- nova/tests/api/openstack/contrib/test_quotas.py | 1 - 2 files changed, 1 insertion(+), 2 deletions(-) diff --git a/nova/api/openstack/contrib/quotas.py b/nova/api/openstack/contrib/quotas.py index f7e7b4105..b5c6447c4 100644 --- a/nova/api/openstack/contrib/quotas.py +++ b/nova/api/openstack/contrib/quotas.py @@ -21,7 +21,6 @@ import urlparse from nova import db from nova import exception from nova import quota -from nova.auth import manager as auth_manager from nova.api.openstack import extensions @@ -69,6 +68,7 @@ class QuotaSetsController(object): def defaults(self, req): return self._format_quota_set('defaults', quota._get_default_quotas()) + class Quotas(extensions.ExtensionDescriptor): def get_name(self): diff --git a/nova/tests/api/openstack/contrib/test_quotas.py b/nova/tests/api/openstack/contrib/test_quotas.py index e2bd05428..b37edb9f5 100644 --- a/nova/tests/api/openstack/contrib/test_quotas.py +++ b/nova/tests/api/openstack/contrib/test_quotas.py @@ -127,7 +127,6 @@ class QuotaSetsTest(test.TestCase): self.assertEqual(json.loads(res.body), updated_quota_set) - def test_quotas_update_as_user(self): updated_quota_set = {'quota_set': {'instances': 50, 'cores': 50, 'ram': 51200, 'volumes': 10, -- cgit From 8b6e551813ef964af38335fcca749ab9d0971200 Mon Sep 17 00:00:00 2001 From: Jake Dahn Date: Tue, 16 Aug 2011 09:15:14 -0700 Subject: cleaning up a few things from pyflakes --- nova/api/openstack/contrib/quotas.py | 3 +-- nova/tests/api/openstack/contrib/test_quotas.py | 1 - 2 files changed, 1 insertion(+), 3 deletions(-) diff --git a/nova/api/openstack/contrib/quotas.py b/nova/api/openstack/contrib/quotas.py index b5c6447c4..d021a4a4f 100644 --- a/nova/api/openstack/contrib/quotas.py +++ b/nova/api/openstack/contrib/quotas.py @@ -16,7 +16,6 @@ # under the License. import webob -import urlparse from nova import db from nova import exception @@ -61,7 +60,7 @@ class QuotaSetsController(object): db.quota_update(context, project_id, key, value) except exception.ProjectQuotaNotFound: db.quota_create(context, project_id, key, value) - except exception.AdminRequired as e: + except exception.AdminRequired: return webob.Response(status_int=403) return {'quota_set': quota.get_project_quotas(context, project_id)} diff --git a/nova/tests/api/openstack/contrib/test_quotas.py b/nova/tests/api/openstack/contrib/test_quotas.py index b37edb9f5..cb5fcb120 100644 --- a/nova/tests/api/openstack/contrib/test_quotas.py +++ b/nova/tests/api/openstack/contrib/test_quotas.py @@ -20,7 +20,6 @@ import webob from nova import context from nova import test -from nova.auth import manager as auth_manager from nova.tests.api.openstack import fakes -- cgit From a3a0782f65d85c873c2ec3fc8f94486225cb6f76 Mon Sep 17 00:00:00 2001 From: Jake Dahn Date: Tue, 16 Aug 2011 09:15:14 -0700 Subject: fixing spacing issues --- nova/tests/api/openstack/contrib/test_quotas.py | 9 ++++----- 1 file changed, 4 insertions(+), 5 deletions(-) diff --git a/nova/tests/api/openstack/contrib/test_quotas.py b/nova/tests/api/openstack/contrib/test_quotas.py index cb5fcb120..f40a435aa 100644 --- a/nova/tests/api/openstack/contrib/test_quotas.py +++ b/nova/tests/api/openstack/contrib/test_quotas.py @@ -22,7 +22,6 @@ from nova import context from nova import test from nova.tests.api.openstack import fakes - from nova.api.openstack.contrib.quotas import QuotaSetsController @@ -57,9 +56,9 @@ class QuotaSetsTest(test.TestCase): 'cores': 20, 'ram': 51200, 'volumes': 10, - 'gigabytes': 1000, 'floating_ips': 10, 'metadata_items': 128, + 'gigabytes': 1000, 'injected_files': 5, 'injected_file_content_bytes': 10240} @@ -103,7 +102,7 @@ class QuotaSetsTest(test.TestCase): req = webob.Request.blank('/v1.1/os-quota-sets/1234') req.method = 'GET' req.headers['Content-Type'] = 'application/json' - res = req.get_response(fakes.wsgi_app(fake_auth_context= + res = req.get_response(fakes.wsgi_app(fake_auth_context = self.admin_context)) self.assertEqual(res.status_int, 200) @@ -121,7 +120,7 @@ class QuotaSetsTest(test.TestCase): req.body = json.dumps(updated_quota_set) req.headers['Content-Type'] = 'application/json' - res = req.get_response(fakes.wsgi_app(fake_auth_context= + res = req.get_response(fakes.wsgi_app(fake_auth_context = self.admin_context)) self.assertEqual(json.loads(res.body), updated_quota_set) @@ -138,7 +137,7 @@ class QuotaSetsTest(test.TestCase): req.body = json.dumps(updated_quota_set) req.headers['Content-Type'] = 'application/json' - res = req.get_response(fakes.wsgi_app(fake_auth_context= + res = req.get_response(fakes.wsgi_app(fake_auth_context = self.user_context)) self.assertEqual(res.status_int, 403) -- cgit From bf269b3d799a431ad3fc68cdb039b826685c8760 Mon Sep 17 00:00:00 2001 From: Jake Dahn Date: Tue, 16 Aug 2011 09:15:14 -0700 Subject: fixing pep8 issues again --- nova/tests/api/openstack/contrib/test_quotas.py | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/nova/tests/api/openstack/contrib/test_quotas.py b/nova/tests/api/openstack/contrib/test_quotas.py index f40a435aa..8f363aed6 100644 --- a/nova/tests/api/openstack/contrib/test_quotas.py +++ b/nova/tests/api/openstack/contrib/test_quotas.py @@ -102,8 +102,8 @@ class QuotaSetsTest(test.TestCase): req = webob.Request.blank('/v1.1/os-quota-sets/1234') req.method = 'GET' req.headers['Content-Type'] = 'application/json' - res = req.get_response(fakes.wsgi_app(fake_auth_context = - self.admin_context)) + res = req.get_response(fakes.wsgi_app( + fake_auth_context=self.admin_context)) self.assertEqual(res.status_int, 200) self.assertEqual(json.loads(res.body), quota_set('1234')) @@ -120,8 +120,8 @@ class QuotaSetsTest(test.TestCase): req.body = json.dumps(updated_quota_set) req.headers['Content-Type'] = 'application/json' - res = req.get_response(fakes.wsgi_app(fake_auth_context = - self.admin_context)) + res = req.get_response(fakes.wsgi_app( + fake_auth_context=self.admin_context)) self.assertEqual(json.loads(res.body), updated_quota_set) @@ -137,7 +137,7 @@ class QuotaSetsTest(test.TestCase): req.body = json.dumps(updated_quota_set) req.headers['Content-Type'] = 'application/json' - res = req.get_response(fakes.wsgi_app(fake_auth_context = - self.user_context)) + res = req.get_response(fakes.wsgi_app( + fake_auth_context=self.user_context)) self.assertEqual(res.status_int, 403) -- cgit From 029261908ac5acd9950a4b027b7daec17c92854d Mon Sep 17 00:00:00 2001 From: Jake Dahn Date: Tue, 16 Aug 2011 09:15:14 -0700 Subject: making get project quotas require context which has access to the project/tenant) --- nova/db/sqlalchemy/api.py | 1 + 1 file changed, 1 insertion(+) diff --git a/nova/db/sqlalchemy/api.py b/nova/db/sqlalchemy/api.py index 57a4370d8..184ad60d5 100644 --- a/nova/db/sqlalchemy/api.py +++ b/nova/db/sqlalchemy/api.py @@ -1959,6 +1959,7 @@ def quota_get(context, project_id, resource, session=None): @require_context def quota_get_all_by_project(context, project_id): + authorize_project_context(context, project_id) session = get_session() result = {'project_id': project_id} rows = session.query(models.Quota).\ -- cgit From 6a5b9831c4b32053996a99307b7303ca851bf508 Mon Sep 17 00:00:00 2001 From: Jake Dahn Date: Tue, 16 Aug 2011 09:15:14 -0700 Subject: fixing up the show quotas tests, and extension --- nova/api/openstack/contrib/quotas.py | 8 ++++++-- nova/tests/api/openstack/contrib/test_quotas.py | 16 +++++++++++++--- 2 files changed, 19 insertions(+), 5 deletions(-) diff --git a/nova/api/openstack/contrib/quotas.py b/nova/api/openstack/contrib/quotas.py index d021a4a4f..87046063a 100644 --- a/nova/api/openstack/contrib/quotas.py +++ b/nova/api/openstack/contrib/quotas.py @@ -44,8 +44,12 @@ class QuotaSetsController(object): def show(self, req, id): context = req.environ['nova.context'] - return self._format_quota_set(id, quota.get_project_quotas(context, - id)) + try: + db.sqlalchemy.api.authorize_project_context(context, id) + return self._format_quota_set(id, + quota.get_project_quotas(context, id)) + except exception.NotAuthorized: + return webob.Response(status_int=403) def update(self, req, id, body): context = req.environ['nova.context'] diff --git a/nova/tests/api/openstack/contrib/test_quotas.py b/nova/tests/api/openstack/contrib/test_quotas.py index 8f363aed6..d77ed40ed 100644 --- a/nova/tests/api/openstack/contrib/test_quotas.py +++ b/nova/tests/api/openstack/contrib/test_quotas.py @@ -78,14 +78,14 @@ class QuotaSetsTest(test.TestCase): self.assertEqual(qs['injected_file_content_bytes'], 10240) def test_quotas_defaults(self): - req = webob.Request.blank('/v1.1/os-quota-sets/defaults') + req = webob.Request.blank('/v1.1/os-quota-sets/fake_tenant/defaults') req.method = 'GET' req.headers['Content-Type'] = 'application/json' res = req.get_response(fakes.wsgi_app()) self.assertEqual(res.status_int, 200) expected = {'quota_set': { - 'id': 'defaults', + 'id': 'fake_tenant', 'instances': 10, 'cores': 20, 'ram': 51200, @@ -98,7 +98,7 @@ class QuotaSetsTest(test.TestCase): self.assertEqual(json.loads(res.body), expected) - def test_quotas_show(self): + def test_quotas_show_as_admin(self): req = webob.Request.blank('/v1.1/os-quota-sets/1234') req.method = 'GET' req.headers['Content-Type'] = 'application/json' @@ -108,6 +108,16 @@ class QuotaSetsTest(test.TestCase): self.assertEqual(res.status_int, 200) self.assertEqual(json.loads(res.body), quota_set('1234')) + + def test_quotas_show_as_unauthorized_user(self): + req = webob.Request.blank('/v1.1/os-quota-sets/1234') + req.method = 'GET' + req.headers['Content-Type'] = 'application/json' + res = req.get_response(fakes.wsgi_app( + fake_auth_context=self.user_context)) + + self.assertEqual(res.status_int, 403) + def test_quotas_update_as_admin(self): updated_quota_set = {'quota_set': {'instances': 50, 'cores': 50, 'ram': 51200, 'volumes': 10, -- cgit From b85deda977ff46722a4461aca98f0378fd10ee1b Mon Sep 17 00:00:00 2001 From: Jake Dahn Date: Tue, 16 Aug 2011 09:15:14 -0700 Subject: defaults now is referred to using a tenant --- nova/api/openstack/contrib/quotas.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/nova/api/openstack/contrib/quotas.py b/nova/api/openstack/contrib/quotas.py index 87046063a..459b71dfd 100644 --- a/nova/api/openstack/contrib/quotas.py +++ b/nova/api/openstack/contrib/quotas.py @@ -68,8 +68,8 @@ class QuotaSetsController(object): return webob.Response(status_int=403) return {'quota_set': quota.get_project_quotas(context, project_id)} - def defaults(self, req): - return self._format_quota_set('defaults', quota._get_default_quotas()) + def defaults(self, req, id): + return self._format_quota_set(id, quota._get_default_quotas()) class Quotas(extensions.ExtensionDescriptor): -- cgit From f3e64fea374df91a6ff78a891ff627edf635fdb2 Mon Sep 17 00:00:00 2001 From: Jake Dahn Date: Tue, 16 Aug 2011 09:15:14 -0700 Subject: fixing pep8 issue --- nova/tests/api/openstack/contrib/test_quotas.py | 1 - 1 file changed, 1 deletion(-) diff --git a/nova/tests/api/openstack/contrib/test_quotas.py b/nova/tests/api/openstack/contrib/test_quotas.py index d77ed40ed..f6a25385f 100644 --- a/nova/tests/api/openstack/contrib/test_quotas.py +++ b/nova/tests/api/openstack/contrib/test_quotas.py @@ -108,7 +108,6 @@ class QuotaSetsTest(test.TestCase): self.assertEqual(res.status_int, 200) self.assertEqual(json.loads(res.body), quota_set('1234')) - def test_quotas_show_as_unauthorized_user(self): req = webob.Request.blank('/v1.1/os-quota-sets/1234') req.method = 'GET' -- cgit From 71935201aed268e94ee9674e887d67b4b9f217a6 Mon Sep 17 00:00:00 2001 From: Brian Lamar Date: Tue, 16 Aug 2011 13:44:03 -0400 Subject: Updated ViewBuilderV10 as per feedback. --- nova/api/openstack/views/servers.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/nova/api/openstack/views/servers.py b/nova/api/openstack/views/servers.py index 60fdf54be..edc328129 100644 --- a/nova/api/openstack/views/servers.py +++ b/nova/api/openstack/views/servers.py @@ -111,14 +111,14 @@ class ViewBuilderV10(ViewBuilder): response['uuid'] = inst['uuid'] def _build_image(self, response, inst): - if 'image_ref' in dict(inst): + if inst.get('image_ref', None): image_ref = inst['image_ref'] if str(image_ref).startswith('http'): raise exception.ListingImageRefsNotSupported() response['imageId'] = int(image_ref) def _build_flavor(self, response, inst): - if 'instance_type' in dict(inst): + if inst.get('instance_type', None): response['flavorId'] = inst['instance_type']['flavorid'] def _build_addresses(self, response, inst): -- cgit From 935c43b414c1685163957590a6fb77fd8ddbac2f Mon Sep 17 00:00:00 2001 From: Josh Kearney Date: Tue, 16 Aug 2011 13:36:11 -0500 Subject: Allow local_gb to be 0; PEP8 fixes. --- nova/compute/manager.py | 10 +++++++++- nova/virt/libvirt/connection.py | 5 +++-- 2 files changed, 12 insertions(+), 3 deletions(-) diff --git a/nova/compute/manager.py b/nova/compute/manager.py index 3299268f2..39f43a268 100644 --- a/nova/compute/manager.py +++ b/nova/compute/manager.py @@ -359,6 +359,13 @@ class ComputeManager(manager.SchedulerDependentManager): instance_type = self.db.instance_type_get(context, instance_type_id) allowed_size_gb = instance_type['local_gb'] + + if allowed_size_gb == 0: + # NOTE(jk0): Since the default local_gb of m1.tiny is 0, we will + # allow the check to proceed. We may want to look into changing the + # default size to 1GB. + return + allowed_size_bytes = allowed_size_gb * 1024 * 1024 * 1024 LOG.debug(_("image_id=%(image_id)d, image_size_bytes=" @@ -1368,7 +1375,8 @@ class ComputeManager(manager.SchedulerDependentManager): # This nwfilter is necessary on the destination host. # In addition, this method is creating filtering rule # onto destination host. - self.driver.ensure_filtering_rules_for_instance(instance_ref, network_info) + self.driver.ensure_filtering_rules_for_instance(instance_ref, + network_info) # Preparation for block migration if block_migration: diff --git a/nova/virt/libvirt/connection.py b/nova/virt/libvirt/connection.py index 2b17e244a..e8a657bac 100644 --- a/nova/virt/libvirt/connection.py +++ b/nova/virt/libvirt/connection.py @@ -1538,8 +1538,9 @@ class LibvirtConnection(driver.ComputeDriver): # If any instances never launch at destination host, # basic-filtering must be set here. self.firewall_driver.setup_basic_filtering(instance_ref, network_info) - # setting up n)ova-instance-instance-xx mainly. - self.firewall_driver.prepare_instance_filter(instance_ref, network_info) + # setting up nova-instance-instance-xx mainly. + self.firewall_driver.prepare_instance_filter(instance_ref, + network_info) # wait for completion timeout_count = range(FLAGS.live_migration_retry_count) -- cgit From 4ee7e94ab89189c284348c8756da611192dfe5ec Mon Sep 17 00:00:00 2001 From: Josh Kearney Date: Tue, 16 Aug 2011 13:43:33 -0500 Subject: Updated note. --- nova/compute/manager.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/nova/compute/manager.py b/nova/compute/manager.py index 39f43a268..88d290908 100644 --- a/nova/compute/manager.py +++ b/nova/compute/manager.py @@ -361,9 +361,9 @@ class ComputeManager(manager.SchedulerDependentManager): allowed_size_gb = instance_type['local_gb'] if allowed_size_gb == 0: - # NOTE(jk0): Since the default local_gb of m1.tiny is 0, we will - # allow the check to proceed. We may want to look into changing the - # default size to 1GB. + # NOTE(jk0): Since libvirt uses local_gb as a secondary drive, we + # need to handle potential situations where local_gb is 0. This is + # the default for m1.tiny. return allowed_size_bytes = allowed_size_gb * 1024 * 1024 * 1024 -- cgit From dc2ccb95848c330eeb8e6fa55bf487c54e03a3c3 Mon Sep 17 00:00:00 2001 From: Josh Kearney Date: Tue, 16 Aug 2011 13:45:13 -0500 Subject: Review feedback. --- nova/compute/manager.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/nova/compute/manager.py b/nova/compute/manager.py index 88d290908..66458fb36 100644 --- a/nova/compute/manager.py +++ b/nova/compute/manager.py @@ -360,10 +360,10 @@ class ComputeManager(manager.SchedulerDependentManager): instance_type_id) allowed_size_gb = instance_type['local_gb'] - if allowed_size_gb == 0: # NOTE(jk0): Since libvirt uses local_gb as a secondary drive, we # need to handle potential situations where local_gb is 0. This is # the default for m1.tiny. + if allowed_size_gb == 0: return allowed_size_bytes = allowed_size_gb * 1024 * 1024 * 1024 -- cgit From 79f3b1512166a37790c5cb2863140d696c717455 Mon Sep 17 00:00:00 2001 From: Troy Toman Date: Wed, 17 Aug 2011 02:41:17 -0500 Subject: Changed return code to 413 for metadata, personality and instance quota issues --- Authors | 1 + nova/api/openstack/common.py | 3 ++- nova/api/openstack/create_instance_helper.py | 13 ++++++++++--- nova/api/openstack/faults.py | 2 +- nova/api/openstack/server_metadata.py | 3 ++- nova/quota.py | 2 +- nova/tests/api/openstack/test_image_metadata.py | 4 ++-- nova/tests/api/openstack/test_server_actions.py | 4 ++-- nova/tests/api/openstack/test_server_metadata.py | 4 ++-- 9 files changed, 23 insertions(+), 13 deletions(-) diff --git a/Authors b/Authors index 02fe46c79..864679929 100644 --- a/Authors +++ b/Authors @@ -101,6 +101,7 @@ Stephanie Reese Thierry Carrez Todd Willey Trey Morris +Troy Toman Tushar Patil Vasiliy Shlykov Vishvananda Ishaya diff --git a/nova/api/openstack/common.py b/nova/api/openstack/common.py index b2a675653..d9eb832f2 100644 --- a/nova/api/openstack/common.py +++ b/nova/api/openstack/common.py @@ -241,7 +241,8 @@ def check_img_metadata_quota_limit(context, metadata): quota_metadata = quota.allowed_metadata_items(context, num_metadata) if quota_metadata < num_metadata: expl = _("Image metadata limit exceeded") - raise webob.exc.HTTPBadRequest(explanation=expl) + raise webob.exc.HTTPRequestEntityTooLarge(explanation=expl, + headers={'Retry-After': 0}) class MetadataXMLDeserializer(wsgi.XMLDeserializer): diff --git a/nova/api/openstack/create_instance_helper.py b/nova/api/openstack/create_instance_helper.py index 4e1da549e..b4a08dac0 100644 --- a/nova/api/openstack/create_instance_helper.py +++ b/nova/api/openstack/create_instance_helper.py @@ -180,13 +180,20 @@ class CreateInstanceHelper(object): """ if error.code == "OnsetFileLimitExceeded": expl = _("Personality file limit exceeded") - raise exc.HTTPBadRequest(explanation=expl) + raise exc.HTTPRequestEntityTooLarge(explanation=error.message, + headers={'Retry-After': 0}) if error.code == "OnsetFilePathLimitExceeded": expl = _("Personality file path too long") - raise exc.HTTPBadRequest(explanation=expl) + raise exc.HTTPRequestEntityTooLarge(explanation=error.message, + headers={'Retry-After': 0}) if error.code == "OnsetFileContentLimitExceeded": expl = _("Personality file content too long") - raise exc.HTTPBadRequest(explanation=expl) + raise exc.HTTPRequestEntityTooLarge(explanation=error.message, + headers={'Retry-After': 0}) + if error.code == "InstanceLimitExceeded": + expl = _("Instance quotas have been exceeded") + raise exc.HTTPRequestEntityTooLarge(explanation=error.message, + headers={'Retry-After': 0}) # if the original error is okay, just reraise it raise error diff --git a/nova/api/openstack/faults.py b/nova/api/openstack/faults.py index 1ab45d4f1..0ed6f1ff0 100644 --- a/nova/api/openstack/faults.py +++ b/nova/api/openstack/faults.py @@ -53,7 +53,7 @@ class Fault(webob.exc.HTTPException): fault_name: { 'code': code, 'message': self.wrapped_exc.explanation}} - if code == 413: + if code == 413 and self.wrapped_exc.headers['Retry-After']: retry = self.wrapped_exc.headers['Retry-After'] fault_data[fault_name]['retryAfter'] = retry diff --git a/nova/api/openstack/server_metadata.py b/nova/api/openstack/server_metadata.py index 2b235f79a..8ac3319c9 100644 --- a/nova/api/openstack/server_metadata.py +++ b/nova/api/openstack/server_metadata.py @@ -151,7 +151,8 @@ class Controller(object): def _handle_quota_error(self, error): """Reraise quota errors as api-specific http exceptions.""" if error.code == "MetadataLimitExceeded": - raise exc.HTTPBadRequest(explanation=error.message) + raise exc.HTTPRequestEntityTooLarge(explanation=error.message, + headers={'Retry-After': 0}) raise error diff --git a/nova/quota.py b/nova/quota.py index 58766e846..48e598659 100644 --- a/nova/quota.py +++ b/nova/quota.py @@ -164,5 +164,5 @@ def allowed_injected_file_path_bytes(context): class QuotaError(exception.ApiError): - """Quota Exceeeded.""" + """Quota Exceeded.""" pass diff --git a/nova/tests/api/openstack/test_image_metadata.py b/nova/tests/api/openstack/test_image_metadata.py index 56a0932e7..21743eeef 100644 --- a/nova/tests/api/openstack/test_image_metadata.py +++ b/nova/tests/api/openstack/test_image_metadata.py @@ -230,7 +230,7 @@ class ImageMetaDataTest(test.TestCase): req.body = json_string req.headers["content-type"] = "application/json" res = req.get_response(fakes.wsgi_app()) - self.assertEqual(400, res.status_int) + self.assertEqual(413, res.status_int) def test_too_many_metadata_items_on_put(self): req = webob.Request.blank('/v1.1/images/3/metadata/blah') @@ -238,4 +238,4 @@ class ImageMetaDataTest(test.TestCase): req.body = '{"meta": {"blah": "blah"}}' req.headers["content-type"] = "application/json" res = req.get_response(fakes.wsgi_app()) - self.assertEqual(400, res.status_int) + self.assertEqual(413, res.status_int) diff --git a/nova/tests/api/openstack/test_server_actions.py b/nova/tests/api/openstack/test_server_actions.py index 687a19390..80a27e30f 100644 --- a/nova/tests/api/openstack/test_server_actions.py +++ b/nova/tests/api/openstack/test_server_actions.py @@ -392,7 +392,7 @@ class ServerActionsTest(test.TestCase): req.body = json.dumps(body) req.headers["content-type"] = "application/json" response = req.get_response(fakes.wsgi_app()) - self.assertEqual(400, response.status_int) + self.assertEqual(413, response.status_int) def test_create_backup_no_name(self): """Name is required for backups""" @@ -865,7 +865,7 @@ class ServerActionsTestV11(test.TestCase): req.body = json.dumps(body) req.headers["content-type"] = "application/json" response = req.get_response(fakes.wsgi_app()) - self.assertEqual(400, response.status_int) + self.assertEqual(413, response.status_int) def test_create_image_no_name(self): body = { diff --git a/nova/tests/api/openstack/test_server_metadata.py b/nova/tests/api/openstack/test_server_metadata.py index ec446f0f0..8512bd518 100644 --- a/nova/tests/api/openstack/test_server_metadata.py +++ b/nova/tests/api/openstack/test_server_metadata.py @@ -417,9 +417,9 @@ class ServerMetaDataTest(test.TestCase): req.body = json_string req.headers["content-type"] = "application/json" res = req.get_response(fakes.wsgi_app()) - self.assertEqual(400, res.status_int) + self.assertEqual(413, res.status_int) - def test_to_many_metadata_items_on_update_item(self): + def test_too_many_metadata_items_on_update_item(self): self.stubs.Set(nova.db.api, 'instance_metadata_update', return_create_instance_metadata_max) req = webob.Request.blank('/v1.1/servers/1/metadata/key1') -- cgit From 228b185f1366df62da42b646ce98711de4195a5d Mon Sep 17 00:00:00 2001 From: Troy Toman Date: Wed, 17 Aug 2011 03:03:25 -0500 Subject: Removed a change from faults.py that was not required." --- nova/api/openstack/faults.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/nova/api/openstack/faults.py b/nova/api/openstack/faults.py index 0ed6f1ff0..1ab45d4f1 100644 --- a/nova/api/openstack/faults.py +++ b/nova/api/openstack/faults.py @@ -53,7 +53,7 @@ class Fault(webob.exc.HTTPException): fault_name: { 'code': code, 'message': self.wrapped_exc.explanation}} - if code == 413 and self.wrapped_exc.headers['Retry-After']: + if code == 413: retry = self.wrapped_exc.headers['Retry-After'] fault_data[fault_name]['retryAfter'] = retry -- cgit From ecc4e9ee389115e3793f94aaf53f8fbe59e7ac66 Mon Sep 17 00:00:00 2001 From: Ed Leafe Date: Wed, 17 Aug 2011 19:58:22 +0000 Subject: Added the host 'enabled' status to the host_data returned by the plugin. --- plugins/xenserver/xenapi/etc/xapi.d/plugins/xenhost | 1 + 1 file changed, 1 insertion(+) diff --git a/plugins/xenserver/xenapi/etc/xapi.d/plugins/xenhost b/plugins/xenserver/xenapi/etc/xapi.d/plugins/xenhost index cd9694ce1..36c61f78d 100755 --- a/plugins/xenserver/xenapi/etc/xapi.d/plugins/xenhost +++ b/plugins/xenserver/xenapi/etc/xapi.d/plugins/xenhost @@ -258,6 +258,7 @@ def cleanup(dct): # out["host_suspend-image-sr-uuid"] = dct.get("suspend-image-sr-uuid", "") # out["host_crash-dump-sr-uuid"] = dct.get("crash-dump-sr-uuid", "") # out["host_local-cache-sr"] = dct.get("local-cache-sr", "") + out["enabled"] = dct.get("enabled", "true") == "true" out["host_memory"] = omm = {} omm["total"] = safe_int(dct.get("memory-total", "")) omm["overhead"] = safe_int(dct.get("memory-overhead", "")) -- cgit