From fe97e74eea29c990038c0dd36012054d34d704ac Mon Sep 17 00:00:00 2001 From: Donal Lafferty Date: Fri, 6 May 2011 15:57:01 +0100 Subject: Changes to allow a VM to boot from iso image. A blank HD is also attached with a size corresponding to the instance type. --- nova/virt/xenapi/vm_utils.py | 104 +++++++++++++++++++++++++++++++++++++++++-- nova/virt/xenapi/vmops.py | 17 ++++++- 2 files changed, 116 insertions(+), 5 deletions(-) (limited to 'nova') diff --git a/nova/virt/xenapi/vm_utils.py b/nova/virt/xenapi/vm_utils.py index d2045a557..60dddb263 100644 --- a/nova/virt/xenapi/vm_utils.py +++ b/nova/virt/xenapi/vm_utils.py @@ -74,12 +74,14 @@ class ImageType: 2 - raw disk image (local SR, NOT partitioned by plugin) 3 - vhd disk image (local SR, NOT inspected by XS, PV assumed for linux, HVM assumed for Windows) + 4 - ISO disk image (local SR, NOT partitioned by plugin) """ KERNEL_RAMDISK = 0 DISK = 1 DISK_RAW = 2 DISK_VHD = 3 + DISK_ISO = 4 class VMHelper(HelperBase): @@ -204,6 +206,30 @@ class VMHelper(HelperBase): ' VDI %(vdi_ref)s.') % locals()) return vbd_ref + @classmethod + def create_cd_vbd(cls, session, vm_ref, vdi_ref, userdevice, bootable): + """Create a VBD record. Returns a Deferred that gives the new + VBD reference specific to CDRom devices.""" + vbd_rec = {} + vbd_rec['VM'] = vm_ref + vbd_rec['VDI'] = vdi_ref + vbd_rec['userdevice'] = str(userdevice) + vbd_rec['bootable'] = bootable + vbd_rec['mode'] = 'RO' + vbd_rec['type'] = 'CD' + vbd_rec['unpluggable'] = True + vbd_rec['empty'] = False + vbd_rec['other_config'] = {} + vbd_rec['qos_algorithm_type'] = '' + vbd_rec['qos_algorithm_params'] = {} + vbd_rec['qos_supported_algorithms'] = [] + LOG.debug(_('Creating a CDROM-specific VBD for VM %(vm_ref)s,' + ' VDI %(vdi_ref)s ... ') % locals()) + vbd_ref = session.call_xenapi('VBD.create', vbd_rec) + LOG.debug(_('Created a CDROM-specific VBD %(vbd_ref)s ' + ' for VM %(vm_ref)s, VDI %(vdi_ref)s.') % locals()) + return vbd_ref + @classmethod def find_vbd_by_number(cls, session, vm_ref, number): """Get the VBD reference from the device number""" @@ -369,6 +395,23 @@ class VMHelper(HelperBase): task = session.async_call_plugin('glance', 'upload_vhd', kwargs) session.wait_for_task(task, instance.id) + @classmethod + def fetch_blank_disk(cls, session, instance_type_id): + # Size the blank harddrive to suit the machine type: + one_gig = 1024 * 1024 * 1024 + req_type = instance_types.get_instance_type(instance_type_id) + req_size = req_type['local_gb'] + + LOG.debug("Creating blank HD of size %(req_size)d gigs" + % locals()) + vdi_size = one_gig * req_size + + LOG.debug("ISO vm create: Looking for the SR") + sr_ref = safe_find_sr(session) + + vdi_ref = cls.create_vdi(session, sr_ref, 'blank HD', vdi_size, False) + return vdi_ref + @classmethod def fetch_image(cls, session, instance_id, image, user, project, image_type): @@ -437,7 +480,12 @@ class VMHelper(HelperBase): # FIXME(sirp): Since the Glance plugin seems to be required for the # VHD disk, it may be worth using the plugin for both VHD and RAW and # DISK restores - sr_ref = safe_find_sr(session) + sr_ref = None + if image_type == ImageType.DISK_ISO: + sr_ref = safe_find_iso_sr(session) + LOG.debug(_("ISO: Found sr possibly containing the ISO image")) + else: + sr_ref = safe_find_sr(session) client = glance.client.Client(FLAGS.glance_host, FLAGS.glance_port) meta, image_file = client.get_image(image) @@ -452,6 +500,8 @@ class VMHelper(HelperBase): name_label = get_name_label_for_image(image) vdi_ref = cls.create_vdi(session, sr_ref, name_label, vdi_size, False) + LOG.debug(_("Loading image of size %(virtual_size)d. Full set of" + "properties is %(meta)s" % locals())) with_vdi_attached_here(session, vdi_ref, False, lambda dev: _stream_disk(dev, image_type, @@ -490,7 +540,8 @@ class VMHelper(HelperBase): pretty_format = {ImageType.KERNEL_RAMDISK: 'KERNEL_RAMDISK', ImageType.DISK: 'DISK', ImageType.DISK_RAW: 'DISK_RAW', - ImageType.DISK_VHD: 'DISK_VHD'} + ImageType.DISK_VHD: 'DISK_VHD', + ImageType.DISK_ISO: 'DISK_ISO'} disk_format = pretty_format[image_type] image_id = instance.image_id instance_id = instance.id @@ -503,7 +554,8 @@ class VMHelper(HelperBase): 'aki': ImageType.KERNEL_RAMDISK, 'ari': ImageType.KERNEL_RAMDISK, 'raw': ImageType.DISK_RAW, - 'vhd': ImageType.DISK_VHD} + 'vhd': ImageType.DISK_VHD, + 'iso': ImageType.DISK_ISO} client = glance.client.Client(FLAGS.glance_host, FLAGS.glance_port) meta = client.get_image_meta(instance.image_id) disk_format = meta['disk_format'] @@ -579,9 +631,11 @@ class VMHelper(HelperBase): available 4. Glance (DISK): pv is assumed + + 5. Glance (ISO): use 'os_type', raise if not set """ if FLAGS.xenapi_image_service == 'glance': - # 2, 3, 4: Glance + # 2, 3, 4, 5: Glance return cls._determine_is_pv_glance( session, vdi_ref, disk_image_type, os_type) else: @@ -618,6 +672,8 @@ class VMHelper(HelperBase): available 4. Glance (DISK): pv is assumed + + 5. Glance (DISK_ISO): no pv is assumed """ LOG.debug(_("Looking up vdi %s for PV kernel"), vdi_ref) @@ -633,6 +689,9 @@ class VMHelper(HelperBase): elif disk_image_type == ImageType.DISK: # 4. Disk is_pv = True + elif disk_image_type == ImageType.DISK_ISO: + # 5. ISO + is_pv = False else: raise exception.Error(_("Unknown image format %(disk_image_type)s") % locals()) @@ -877,6 +936,43 @@ def find_sr(session): return None +def safe_find_iso_sr(session): + """Same as find_iso_sr except raises a NotFound exception if SR cannot be + determined + """ + sr_ref = find_iso_sr(session) + if sr_ref is None: + raise exception.NotFound(_('Cannot find SR of content-type ISO')) + return sr_ref + + +def find_iso_sr(session): + """Return the storage repository to hold ISO images""" + host = session.get_xenapi_host() + sr_refs = session.get_xenapi().SR.get_all() + for sr_ref in sr_refs: + sr_rec = session.get_xenapi().SR.get_record(sr_ref) + + LOG.debug(_("ISO: looking at SR %(sr_rec)s") % locals()) + #TODO: use ['other_config']['il8n-key-iso-sr'] == 'local-storage' + # or something similar to identify the iso sr + if not (sr_rec['content_type'] == 'iso' and + sr_rec['name_label'] == 'Local ISOs'): + LOG.debug(_("ISO: No match")) + continue + LOG.debug(_("ISO: SR MATCHing our criteria")) + for pbd_ref in sr_rec['PBDs']: + LOG.debug(_("ISO: ISO, looking to see if it is host local")) + pbd_rec = session.get_xenapi().PBD.get_record(pbd_ref) + pbd_rec_host = pbd_rec['host'] + LOG.debug(_("ISO: PBD matching, want %(pbd_rec)s, have %(host)s") % + locals()) + if pbd_rec_host == host: + LOG.debug(_("ISO: SR with local PBD")) + return sr_ref + return None + + def remap_vbd_dev(dev): """Return the appropriate location for a plugged-in VBD device diff --git a/nova/virt/xenapi/vmops.py b/nova/virt/xenapi/vmops.py index 7c7aa8e98..a2ce38c7a 100644 --- a/nova/virt/xenapi/vmops.py +++ b/nova/virt/xenapi/vmops.py @@ -162,7 +162,22 @@ class VMOps(object): vm_ref = VMHelper.create_vm(self._session, instance, kernel, ramdisk, use_pv_kernel) - VMHelper.create_vbd(session=self._session, vm_ref=vm_ref, + # DISK_ISO needs two VBDs: the ISO disk and a blank RW disk + if disk_image_type == ImageType.DISK_ISO: + LOG.debug("detected ISO image type, going to create blank VM for " + "install") + # device 0 reserved for RW disk, so use '1' + cd_vdi_ref = vdi_ref + VMHelper.create_cd_vbd(session=self._session, vm_ref=vm_ref, + vdi_ref=cd_vdi_ref, userdevice=1, bootable=True) + + vdi_ref = VMHelper.fetch_blank_disk(session=self._session, + instance_type_id=instance.instance_type_id) + + VMHelper.create_vbd(session=self._session, vm_ref=vm_ref, + vdi_ref=vdi_ref, userdevice=0, bootable=False) + else: + VMHelper.create_vbd(session=self._session, vm_ref=vm_ref, vdi_ref=vdi_ref, userdevice=0, bootable=True) # TODO(tr3buchet) - check to make sure we have network info, otherwise -- cgit From 09bd503a9842857480bd4703d27335e83dd30571 Mon Sep 17 00:00:00 2001 From: Kei Masumoto Date: Sat, 11 Jun 2011 19:48:48 +0900 Subject: block migration feature added --- nova/compute/manager.py | 55 ++++++++++++--- nova/db/api.py | 21 ------ nova/db/sqlalchemy/api.py | 39 ----------- nova/db/sqlalchemy/models.py | 16 ++--- nova/exception.py | 5 ++ nova/scheduler/driver.py | 120 +++++++++++++++++++++++++------- nova/scheduler/manager.py | 54 ++++++++------- nova/tests/scheduler/test_scheduler.py | 52 ++++++++++---- nova/tests/test_compute.py | 6 +- nova/tests/test_libvirt.py | 90 ++++++++++++++++++++++++ nova/virt/libvirt/connection.py | 122 +++++++++++++++++++++++++++------ 11 files changed, 416 insertions(+), 164 deletions(-) (limited to 'nova') diff --git a/nova/compute/manager.py b/nova/compute/manager.py index 245958de7..f7604a253 100644 --- a/nova/compute/manager.py +++ b/nova/compute/manager.py @@ -921,11 +921,13 @@ class ComputeManager(manager.SchedulerDependentManager): """ return self.driver.update_available_resource(context, self.host) - def pre_live_migration(self, context, instance_id, time=None): + def pre_live_migration(self, context, instance_id, time=None, + block_migration=False, **kwargs): """Preparations for live migration at dest host. :param context: security context :param instance_id: nova.db.sqlalchemy.models.Instance.Id + :param block_migration: if true, prepare for block migration """ if not time: @@ -977,17 +979,24 @@ class ComputeManager(manager.SchedulerDependentManager): # onto destination host. self.driver.ensure_filtering_rules_for_instance(instance_ref) - def live_migration(self, context, instance_id, dest): + # Preparation for block migration + if block_migration: + self.driver.pre_block_migration(context, + instance_ref, + kwargs.get('disk')) + + def live_migration(self, context, instance_id, + dest, block_migration=False): """Executing live migration. :param context: security context :param instance_id: nova.db.sqlalchemy.models.Instance.Id :param dest: destination host + :param block_migration: if true, do block migration """ # Get instance for error handling. instance_ref = self.db.instance_get(context, instance_id) - i_name = instance_ref.name try: # Checking volume node is working correctly when any volumes @@ -998,13 +1007,20 @@ class ComputeManager(manager.SchedulerDependentManager): {"method": "check_for_export", "args": {'instance_id': instance_id}}) - # Asking dest host to preparing live migration. + args = {} + args['instance_id'] = instance_id + if block_migration: + args['block_migration'] = block_migration + args['disk'] = \ + self.driver.get_instance_disk_info(context, instance_ref) + rpc.call(context, self.db.queue_get_for(context, FLAGS.compute_topic, dest), {"method": "pre_live_migration", - "args": {'instance_id': instance_id}}) + "args": args}) except Exception: + i_name = instance_ref.name msg = _("Pre live migration for %(i_name)s failed at %(dest)s") LOG.error(msg % locals()) self.recover_live_migration(context, instance_ref) @@ -1015,9 +1031,11 @@ class ComputeManager(manager.SchedulerDependentManager): # nothing must be recovered in this version. self.driver.live_migration(context, instance_ref, dest, self.post_live_migration, - self.recover_live_migration) + self.recover_live_migration, + block_migration) - def post_live_migration(self, ctxt, instance_ref, dest): + def post_live_migration(self, ctxt, instance_ref, + dest, block_migration=False): """Post operations for live migration. This method is called from live_migration @@ -1068,6 +1086,10 @@ class ComputeManager(manager.SchedulerDependentManager): # Restore instance/volume state self.recover_live_migration(ctxt, instance_ref, dest) + # No instance booting at source host, but instance dir + # must be deleted for preparing next block migration + if block_migration: + self.driver.destroy(instance_ref) LOG.info(_('Migrating %(i_name)s to %(dest)s finished successfully.') % locals()) @@ -1075,14 +1097,20 @@ class ComputeManager(manager.SchedulerDependentManager): "Domain not found: no domain with matching name.\" " "This error can be safely ignored.")) - def recover_live_migration(self, ctxt, instance_ref, host=None, dest=None): + def recover_live_migration(self, ctxt, instance_ref, host=None, + dest=None, delete=True): """Recovers Instance/volume state from migrating -> running. :param ctxt: security context :param instance_id: nova.db.sqlalchemy.models.Instance.Id :param host: DB column value is updated by this hostname. If none, the host instance currently running is selected. - + :param dest: + This method is called from live migration src host. + This param specifies destination host. + :param delete: + If true, ask destination host to remove instance dir, + since empty disk image was created for block migration """ if not host: host = instance_ref['host'] @@ -1101,6 +1129,15 @@ class ComputeManager(manager.SchedulerDependentManager): if dest: volume_api.remove_from_compute(ctxt, volume_id, dest) + # TODO: Block migration needs empty image at destination host + # before migration starts, so if any failure occurs, + # any empty images has to be deleted. but not sure adding below + # method is appropreate here. for now, admin has to delete manually. + # rpc.call(ctxt, + # self.db.queue_get_for(ctxt, FLAGS.compute_topic, dest), + # {"method": "self.driver.destroy", + # "args": {'instance':instance_ref}) + def periodic_tasks(self, context=None): """Tasks to be run at a periodic interval.""" error_list = super(ComputeManager, self).periodic_tasks(context) diff --git a/nova/db/api.py b/nova/db/api.py index 4e0aa60a2..b4cc110b1 100644 --- a/nova/db/api.py +++ b/nova/db/api.py @@ -483,27 +483,6 @@ def instance_add_security_group(context, instance_id, security_group_id): security_group_id) -def instance_get_vcpu_sum_by_host_and_project(context, hostname, proj_id): - """Get instances.vcpus by host and project.""" - return IMPL.instance_get_vcpu_sum_by_host_and_project(context, - hostname, - proj_id) - - -def instance_get_memory_sum_by_host_and_project(context, hostname, proj_id): - """Get amount of memory by host and project.""" - return IMPL.instance_get_memory_sum_by_host_and_project(context, - hostname, - proj_id) - - -def instance_get_disk_sum_by_host_and_project(context, hostname, proj_id): - """Get total amount of disk by host and project.""" - return IMPL.instance_get_disk_sum_by_host_and_project(context, - hostname, - proj_id) - - def instance_action_create(context, values): """Create an instance action from the values dictionary.""" return IMPL.instance_action_create(context, values) diff --git a/nova/db/sqlalchemy/api.py b/nova/db/sqlalchemy/api.py index 103668b94..00c61bfe3 100644 --- a/nova/db/sqlalchemy/api.py +++ b/nova/db/sqlalchemy/api.py @@ -1040,45 +1040,6 @@ def instance_add_security_group(context, instance_id, security_group_id): instance_ref.save(session=session) -@require_context -def instance_get_vcpu_sum_by_host_and_project(context, hostname, proj_id): - session = get_session() - result = session.query(models.Instance).\ - filter_by(host=hostname).\ - filter_by(project_id=proj_id).\ - filter_by(deleted=False).\ - value(func.sum(models.Instance.vcpus)) - if not result: - return 0 - return result - - -@require_context -def instance_get_memory_sum_by_host_and_project(context, hostname, proj_id): - session = get_session() - result = session.query(models.Instance).\ - filter_by(host=hostname).\ - filter_by(project_id=proj_id).\ - filter_by(deleted=False).\ - value(func.sum(models.Instance.memory_mb)) - if not result: - return 0 - return result - - -@require_context -def instance_get_disk_sum_by_host_and_project(context, hostname, proj_id): - session = get_session() - result = session.query(models.Instance).\ - filter_by(host=hostname).\ - filter_by(project_id=proj_id).\ - filter_by(deleted=False).\ - value(func.sum(models.Instance.local_gb)) - if not result: - return 0 - return result - - @require_context def instance_action_create(context, values): """Create an instance action from the values dictionary.""" diff --git a/nova/db/sqlalchemy/models.py b/nova/db/sqlalchemy/models.py index 55efe6886..1e057516b 100644 --- a/nova/db/sqlalchemy/models.py +++ b/nova/db/sqlalchemy/models.py @@ -125,14 +125,14 @@ class ComputeNode(BASE, NovaBase): 'ComputeNode.service_id == Service.id,' 'ComputeNode.deleted == False)') - vcpus = Column(Integer, nullable=True) - memory_mb = Column(Integer, nullable=True) - local_gb = Column(Integer, nullable=True) - vcpus_used = Column(Integer, nullable=True) - memory_mb_used = Column(Integer, nullable=True) - local_gb_used = Column(Integer, nullable=True) - hypervisor_type = Column(Text, nullable=True) - hypervisor_version = Column(Integer, nullable=True) + vcpus = Column(Integer) + memory_mb = Column(Integer) + local_gb = Column(Integer) + vcpus_used = Column(Integer) + memory_mb_used = Column(Integer) + local_gb_used = Column(Integer) + hypervisor_type = Column(Text) + hypervisor_version = Column(Integer) # Note(masumotok): Expected Strings example: # diff --git a/nova/exception.py b/nova/exception.py index 69b3e0359..80e8293b6 100644 --- a/nova/exception.py +++ b/nova/exception.py @@ -211,6 +211,11 @@ class DestinationHypervisorTooOld(Invalid): "has been provided.") +class DestinatioinDiskExists(Invalid): + message = _("The supplied disk path (%(path)s) already exists, " + "it is expected not to exist.") + + class InvalidDevicePath(Invalid): message = _("The supplied device path (%(path)s) is invalid.") diff --git a/nova/scheduler/driver.py b/nova/scheduler/driver.py index 0b257c5d8..889baa567 100644 --- a/nova/scheduler/driver.py +++ b/nova/scheduler/driver.py @@ -77,7 +77,8 @@ class Scheduler(object): """Must override at least this method for scheduler to work.""" raise NotImplementedError(_("Must implement a fallback schedule")) - def schedule_live_migration(self, context, instance_id, dest): + def schedule_live_migration(self, context, instance_id, dest, + block_migration=False): """Live migration scheduling method. :param context: @@ -88,7 +89,6 @@ class Scheduler(object): Then scheduler send request that host. """ - # Whether instance exists and is running. instance_ref = db.instance_get(context, instance_id) @@ -96,10 +96,12 @@ class Scheduler(object): self._live_migration_src_check(context, instance_ref) # Checking destination host. - self._live_migration_dest_check(context, instance_ref, dest) + self._live_migration_dest_check(context, instance_ref, + dest, block_migration) # Common checking. - self._live_migration_common_check(context, instance_ref, dest) + self._live_migration_common_check(context, instance_ref, + dest, block_migration) # Changing instance_state. db.instance_set_state(context, @@ -147,7 +149,8 @@ class Scheduler(object): if not self.service_is_up(services[0]): raise exception.ComputeServiceUnavailable(host=src) - def _live_migration_dest_check(self, context, instance_ref, dest): + def _live_migration_dest_check(self, context, instance_ref, dest, + block_migration): """Live migration check routine (for destination host). :param context: security context @@ -175,9 +178,11 @@ class Scheduler(object): # Checking dst host still has enough capacities. self.assert_compute_node_has_enough_resources(context, instance_ref, - dest) + dest, + block_migration) - def _live_migration_common_check(self, context, instance_ref, dest): + def _live_migration_common_check(self, context, instance_ref, dest, + block_migration): """Live migration common check routine. Below checkings are followed by @@ -186,11 +191,19 @@ class Scheduler(object): :param context: security context :param instance_ref: nova.db.sqlalchemy.models.Instance object :param dest: destination host + :param block_migration if True, check for block_migration. """ # Checking shared storage connectivity - self.mounted_on_same_shared_storage(context, instance_ref, dest) + # if block migration, instances_paths should not be on shared storage. + try: + self.mounted_on_same_shared_storage(context, instance_ref, dest) + if block_migration: + raise + except rpc.RemoteError: + if not block_migration: + raise # Checking dest exists. dservice_refs = db.service_get_all_compute_by_host(context, dest) @@ -229,14 +242,24 @@ class Scheduler(object): "original host %(src)s.") % locals()) raise - def assert_compute_node_has_enough_resources(self, context, - instance_ref, dest): + def assert_compute_node_has_enough_resources(self, context, instance_ref, + dest, block_migration): """Checks if destination host has enough resource for live migration. - Currently, only memory checking has been done. - If storage migration(block migration, meaning live-migration - without any shared storage) will be available, local storage - checking is also necessary. + :param context: security context + :param instance_ref: nova.db.sqlalchemy.models.Instance object + :param dest: destination host + :param block_migration: if True, disk checking has been done + + """ + self.assert_compute_node_has_enough_memory(context, instance_ref, dest) + if not block_migration: + return + self.assert_compute_node_has_enough_disk(context, instance_ref, dest) + + def assert_compute_node_has_enough_memory(self, context, + instance_ref, dest): + """Checks if destination host has enough memory for live migration. :param context: security context :param instance_ref: nova.db.sqlalchemy.models.Instance object @@ -244,22 +267,69 @@ class Scheduler(object): """ - # Getting instance information - ec2_id = instance_ref['hostname'] + # Getting total available memory and disk of host + avail = self._get_compute_info(context, dest, 'memory_mb') - # Getting host information - service_refs = db.service_get_all_compute_by_host(context, dest) - compute_node_ref = service_refs[0]['compute_node'][0] + # Getting total used memory and disk of host + # It should be sum of memories that are assigned as max value, + # because overcommiting is risky. + used = 0 + instance_refs = db.instance_get_all_by_host(context, dest) + used_list = [i['memory_mb'] for i in instance_refs] + if used_list: + used = reduce(lambda x, y: x + y, used_list) - mem_total = int(compute_node_ref['memory_mb']) - mem_used = int(compute_node_ref['memory_mb_used']) - mem_avail = mem_total - mem_used mem_inst = instance_ref['memory_mb'] - if mem_avail <= mem_inst: - reason = _("Unable to migrate %(ec2_id)s to destination: %(dest)s " - "(host:%(mem_avail)s <= instance:%(mem_inst)s)") + avail = avail - used + if avail <= mem_inst: + ec2_id = instance_ref['hostname'] + reason = _("Unable to migrate %(ec2_id)s to %(dest)s: Lack of " + "disk(host:%(avail)s <= instance:%(mem_inst)s)") raise exception.MigrationError(reason=reason % locals()) + def assert_compute_node_has_enough_disk(self, context, + instance_ref, dest): + """Checks if destination host has enough disk for block migration. + + :param context: security context + :param instance_ref: nova.db.sqlalchemy.models.Instance object + :param dest: destination host + + """ + + # Getting total available memory and disk of host + avail = self._get_compute_info(context, dest, 'local_gb') + + # Getting total used memory and disk of host + # It should be sum of disks that are assigned as max value + # because overcommiting is risky. + used = 0 + instance_refs = db.instance_get_all_by_host(context, dest) + used_list = [i['local_gb'] for i in instance_refs] + if used_list: + used = reduce(lambda x, y: x + y, used_list) + + disk_inst = instance_ref['local_gb'] + avail = avail - used + if avail <= disk_inst: + ec2_id = instance_ref['hostname'] + reason = _("Unable to migrate %(ec2_id)s to %(dest)s: Lack of " + "disk(host:%(avail)s <= instance:%(disk_inst)s)") + raise exception.MigrationError(reason=reason % locals()) + + def _get_compute_info(self, context, host, key): + """get compute node's infomation specified by key + + :param context: security context + :param host: hostname(must be compute node) + :param key: column name of compute_nodes + :return: value specified by key + + """ + compute_node_ref = db.service_get_all_compute_by_host(context, host) + compute_node_ref = compute_node_ref[0]['compute_node'][0] + return compute_node_ref[key] + def mounted_on_same_shared_storage(self, context, instance_ref, dest): """Check if the src and dest host mount same shared storage. diff --git a/nova/scheduler/manager.py b/nova/scheduler/manager.py index bd40e73c0..1f663ca4b 100644 --- a/nova/scheduler/manager.py +++ b/nova/scheduler/manager.py @@ -97,7 +97,7 @@ class SchedulerManager(manager.Manager): # NOTE (masumotok) : This method should be moved to nova.api.ec2.admin. # Based on bexar design summit discussion, # just put this here for bexar release. - def show_host_resources(self, context, host, *args): + def show_host_resources(self, context, host): """Shows the physical/usage resource given by hosts. :param context: security context @@ -105,43 +105,45 @@ class SchedulerManager(manager.Manager): :returns: example format is below. {'resource':D, 'usage':{proj_id1:D, proj_id2:D}} - D: {'vcpus':3, 'memory_mb':2048, 'local_gb':2048} + D: {'vcpus':3, 'memory_mb':2048, 'local_gb':2048 + 'vcpus_used': 12, 'memory_mb': 10240, + 'local_gb': 64} """ + # Getting compute node info and related instances info compute_ref = db.service_get_all_compute_by_host(context, host) compute_ref = compute_ref[0] - - # Getting physical resource information - compute_node_ref = compute_ref['compute_node'][0] - resource = {'vcpus': compute_node_ref['vcpus'], - 'memory_mb': compute_node_ref['memory_mb'], - 'local_gb': compute_node_ref['local_gb'], - 'vcpus_used': compute_node_ref['vcpus_used'], - 'memory_mb_used': compute_node_ref['memory_mb_used'], - 'local_gb_used': compute_node_ref['local_gb_used']} - - # Getting usage resource information - usage = {} instance_refs = db.instance_get_all_by_host(context, compute_ref['host']) + + # Getting total available/used resource + compute_ref = compute_ref['compute_node'][0] + resource = {'vcpus': compute_ref['vcpus'], + 'memory_mb': compute_ref['memory_mb'], + 'local_gb': compute_ref['local_gb'], + 'vcpus_used': compute_ref['vcpus_used'], + 'memory_mb_used': compute_ref['memory_mb_used'], + 'local_gb_used': compute_ref['local_gb_used']} + usage = dict() if not instance_refs: return {'resource': resource, 'usage': usage} + # Getting usage resource per project project_ids = [i['project_id'] for i in instance_refs] project_ids = list(set(project_ids)) for project_id in project_ids: - vcpus = db.instance_get_vcpu_sum_by_host_and_project(context, - host, - project_id) - mem = db.instance_get_memory_sum_by_host_and_project(context, - host, - project_id) - hdd = db.instance_get_disk_sum_by_host_and_project(context, - host, - project_id) - usage[project_id] = {'vcpus': int(vcpus), - 'memory_mb': int(mem), - 'local_gb': int(hdd)} + vcpus = [i['vcpus'] for i in instance_refs \ + if i['project_id'] == project_id] + + mem = [i['memory_mb'] for i in instance_refs \ + if i['project_id'] == project_id] + + disk = [i['local_gb'] for i in instance_refs \ + if i['project_id'] == project_id] + + usage[project_id] = {'vcpus': reduce(lambda x, y: x + y, vcpus), + 'memory_mb': reduce(lambda x, y: x + y, mem), + 'local_gb': reduce(lambda x, y: x + y, disk)} return {'resource': resource, 'usage': usage} diff --git a/nova/tests/scheduler/test_scheduler.py b/nova/tests/scheduler/test_scheduler.py index 50b6b52c6..384f6fb00 100644 --- a/nova/tests/scheduler/test_scheduler.py +++ b/nova/tests/scheduler/test_scheduler.py @@ -652,10 +652,13 @@ class SimpleDriverTestCase(test.TestCase): self.mox.StubOutWithMock(driver_i, '_live_migration_dest_check') self.mox.StubOutWithMock(driver_i, '_live_migration_common_check') driver_i._live_migration_src_check(nocare, nocare) - driver_i._live_migration_dest_check(nocare, nocare, i_ref['host']) - driver_i._live_migration_common_check(nocare, nocare, i_ref['host']) + driver_i._live_migration_dest_check(nocare, nocare, + i_ref['host'], False) + driver_i._live_migration_common_check(nocare, nocare, + i_ref['host'], False) self.mox.StubOutWithMock(rpc, 'cast', use_mock_anything=True) - kwargs = {'instance_id': instance_id, 'dest': i_ref['host']} + kwargs = {'instance_id': instance_id, 'dest': i_ref['host'], + 'block_migration': False} rpc.cast(self.context, db.queue_get_for(nocare, FLAGS.compute_topic, i_ref['host']), {"method": 'live_migration', "args": kwargs}) @@ -663,7 +666,8 @@ class SimpleDriverTestCase(test.TestCase): self.mox.ReplayAll() self.scheduler.live_migration(self.context, FLAGS.compute_topic, instance_id=instance_id, - dest=i_ref['host']) + dest=i_ref['host'], + block_migration=False) i_ref = db.instance_get(self.context, instance_id) self.assertTrue(i_ref['state_description'] == 'migrating') @@ -744,7 +748,7 @@ class SimpleDriverTestCase(test.TestCase): self.assertRaises(exception.ComputeServiceUnavailable, self.scheduler.driver._live_migration_dest_check, - self.context, i_ref, i_ref['host']) + self.context, i_ref, i_ref['host'], False) db.instance_destroy(self.context, instance_id) db.service_destroy(self.context, s_ref['id']) @@ -757,7 +761,7 @@ class SimpleDriverTestCase(test.TestCase): self.assertRaises(exception.UnableToMigrateToSelf, self.scheduler.driver._live_migration_dest_check, - self.context, i_ref, i_ref['host']) + self.context, i_ref, i_ref['host'], False) db.instance_destroy(self.context, instance_id) db.service_destroy(self.context, s_ref['id']) @@ -765,15 +769,33 @@ class SimpleDriverTestCase(test.TestCase): def test_live_migration_dest_check_service_lack_memory(self): """Confirms exception raises when dest doesn't have enough memory.""" instance_id = self._create_instance() + instance_id2 = self._create_instance(host='somewhere', + memory_mb=12) i_ref = db.instance_get(self.context, instance_id) - s_ref = self._create_compute_service(host='somewhere', - memory_mb_used=12) + s_ref = self._create_compute_service(host='somewhere') + + self.assertRaises(exception.MigrationError, + self.scheduler.driver._live_migration_dest_check, + self.context, i_ref, 'somewhere', False) + + db.instance_destroy(self.context, instance_id) + db.instance_destroy(self.context, instance_id2) + db.service_destroy(self.context, s_ref['id']) + + def test_block_migration_dest_check_service_lack_disk(self): + """Confirms exception raises when dest doesn't have enough disk.""" + instance_id = self._create_instance() + instance_id2 = self._create_instance(host='somewhere', + local_gb=70) + i_ref = db.instance_get(self.context, instance_id) + s_ref = self._create_compute_service(host='somewhere') self.assertRaises(exception.MigrationError, self.scheduler.driver._live_migration_dest_check, - self.context, i_ref, 'somewhere') + self.context, i_ref, 'somewhere', True) db.instance_destroy(self.context, instance_id) + db.instance_destroy(self.context, instance_id2) db.service_destroy(self.context, s_ref['id']) def test_live_migration_dest_check_service_works_correctly(self): @@ -785,7 +807,8 @@ class SimpleDriverTestCase(test.TestCase): ret = self.scheduler.driver._live_migration_dest_check(self.context, i_ref, - 'somewhere') + 'somewhere', + False) self.assertTrue(ret is None) db.instance_destroy(self.context, instance_id) db.service_destroy(self.context, s_ref['id']) @@ -820,7 +843,7 @@ class SimpleDriverTestCase(test.TestCase): self.mox.ReplayAll() self.assertRaises(exception.SourceHostUnavailable, self.scheduler.driver._live_migration_common_check, - self.context, i_ref, dest) + self.context, i_ref, dest, False) db.instance_destroy(self.context, instance_id) db.service_destroy(self.context, s_ref['id']) @@ -844,7 +867,7 @@ class SimpleDriverTestCase(test.TestCase): self.mox.ReplayAll() self.assertRaises(exception.InvalidHypervisorType, self.scheduler.driver._live_migration_common_check, - self.context, i_ref, dest) + self.context, i_ref, dest, False) db.instance_destroy(self.context, instance_id) db.service_destroy(self.context, s_ref['id']) @@ -870,7 +893,7 @@ class SimpleDriverTestCase(test.TestCase): self.mox.ReplayAll() self.assertRaises(exception.DestinationHypervisorTooOld, self.scheduler.driver._live_migration_common_check, - self.context, i_ref, dest) + self.context, i_ref, dest, False) db.instance_destroy(self.context, instance_id) db.service_destroy(self.context, s_ref['id']) @@ -902,7 +925,8 @@ class SimpleDriverTestCase(test.TestCase): try: self.scheduler.driver._live_migration_common_check(self.context, i_ref, - dest) + dest, + False) except rpc.RemoteError, e: c = (e.message.find(_("doesn't have compatibility to")) >= 0) diff --git a/nova/tests/test_compute.py b/nova/tests/test_compute.py index b4ac2dbc4..1f48a6dce 100644 --- a/nova/tests/test_compute.py +++ b/nova/tests/test_compute.py @@ -545,7 +545,8 @@ class ComputeTestCase(test.TestCase): self.mox.StubOutWithMock(self.compute.driver, 'live_migration') self.compute.driver.live_migration(c, i_ref, i_ref['host'], self.compute.post_live_migration, - self.compute.recover_live_migration) + self.compute.recover_live_migration, + False) self.compute.db = dbmock self.mox.ReplayAll() @@ -622,7 +623,8 @@ class ComputeTestCase(test.TestCase): self.mox.StubOutWithMock(self.compute.driver, 'live_migration') self.compute.driver.live_migration(c, i_ref, i_ref['host'], self.compute.post_live_migration, - self.compute.recover_live_migration) + self.compute.recover_live_migration, + False) self.compute.db = dbmock self.mox.ReplayAll() diff --git a/nova/tests/test_libvirt.py b/nova/tests/test_libvirt.py index b6b36745a..000b383f2 100644 --- a/nova/tests/test_libvirt.py +++ b/nova/tests/test_libvirt.py @@ -20,6 +20,7 @@ import os import re import shutil import sys +import tempfile from xml.etree.ElementTree import fromstring as xml_to_tree from xml.dom.minidom import parseString as xml_to_dom @@ -674,6 +675,95 @@ class LibvirtConnTestCase(test.TestCase): db.volume_destroy(self.context, volume_ref['id']) db.instance_destroy(self.context, instance_ref['id']) + def test_pre_block_migration_works_correctly(self): + """Confirms pre_block_migration works correctly.""" + + # Skip if non-libvirt environment + if not self.lazy_load_library_exists(): + return + + # Replace instances_path since this testcase creates tmpfile + tmpdir = tempfile.mkdtemp() + store = FLAGS.instances_path + FLAGS.instances_path = tmpdir + + # Test data + instance_ref = db.instance_create(self.context, self.test_instance) + dummyjson = '[{"path": "%s/disk", "local_gb": 10, "type": "raw"}]' + + # Preparing mocks + # qemu-img should be mockd since test environment might not have + # large disk space. + self.mox.StubOutWithMock(utils, "execute") + utils.execute('sudo', 'qemu-img', 'create', '-f', 'raw', + '%s/%s/disk' % (tmpdir, instance_ref.name), 10) + + self.mox.ReplayAll() + conn = connection.LibvirtConnection(False) + conn.pre_block_migration(self.context, instance_ref, + dummyjson % tmpdir) + + self.assertTrue(os.path.exists('%s/%s/libvirt.xml' % + (tmpdir, instance_ref.name))) + + shutil.rmtree(tmpdir) + db.instance_destroy(self.context, instance_ref['id']) + # Restore FLAGS.instances_path + FLAGS.instances_path = store + + def test_get_instance_disk_info_works_correctly(self): + """Confirms pre_block_migration works correctly.""" + # Skip if non-libvirt environment + if not self.lazy_load_library_exists(): + return + + # Test data + instance_ref = db.instance_create(self.context, self.test_instance) + dummyxml = ("instance-0000000a" + "" + "" + "" + "" + "" + "" + "" + "") + + ret = ("image: /test/disk\nfile format: raw\n" + "virtual size: 20G (21474836480 bytes)\ndisk size: 3.1G\n") + + # Preparing mocks + vdmock = self.mox.CreateMock(libvirt.virDomain) + self.mox.StubOutWithMock(vdmock, "XMLDesc") + vdmock.XMLDesc(0).AndReturn(dummyxml) + + def fake_lookup(instance_name): + if instance_name == instance_ref.name: + return vdmock + self.create_fake_libvirt_mock(lookupByName=fake_lookup) + + self.mox.StubOutWithMock(os.path, "getsize") + # based on above testdata, one is raw image, so getsize is mocked. + os.path.getsize("/test/disk").AndReturn(10 * 1024 * 1024 * 1024) + # another is qcow image, so qemu-img should be mocked. + self.mox.StubOutWithMock(utils, "execute") + utils.execute('sudo', 'qemu-img', 'info', '/test/disk.local').\ + AndReturn((ret, '')) + + self.mox.ReplayAll() + conn = connection.LibvirtConnection(False) + info = conn.get_instance_disk_info(self.context, instance_ref) + info = utils.loads(info) + + self.assertTrue(info[0]['type'] == 'raw' and + info[1]['type'] == 'qcow2' and + info[0]['path'] == '/test/disk' and + info[1]['path'] == '/test/disk.local' and + info[0]['local_gb'] == 10 and + info[1]['local_gb'] == 20) + + db.instance_destroy(self.context, instance_ref['id']) + def test_spawn_with_network_info(self): # Skip if non-libvirt environment if not self.lazy_load_library_exists(): diff --git a/nova/virt/libvirt/connection.py b/nova/virt/libvirt/connection.py index c491418ae..86e388ff7 100644 --- a/nova/virt/libvirt/connection.py +++ b/nova/virt/libvirt/connection.py @@ -117,6 +117,10 @@ flags.DEFINE_string('live_migration_uri', flags.DEFINE_string('live_migration_flag', "VIR_MIGRATE_UNDEFINE_SOURCE, VIR_MIGRATE_PEER2PEER", 'Define live migration behavior.') +flags.DEFINE_string('block_migration_flag', + "VIR_MIGRATE_UNDEFINE_SOURCE, VIR_MIGRATE_PEER2PEER, " + "VIR_MIGRATE_NON_SHARED_DISK", + 'Define block migration behavior.') flags.DEFINE_integer('live_migration_bandwidth', 0, 'Define live migration behavior') flags.DEFINE_string('qemu_img', 'qemu-img', @@ -727,6 +731,7 @@ class LibvirtConnection(driver.ComputeDriver): If cow is True, it will make a CoW image instead of a copy. """ + if not os.path.exists(target): base_dir = os.path.join(FLAGS.instances_path, '_base') if not os.path.exists(base_dir): @@ -1458,7 +1463,7 @@ class LibvirtConnection(driver.ComputeDriver): time.sleep(1) def live_migration(self, ctxt, instance_ref, dest, - post_method, recover_method): + post_method, recover_method, block_migration=False): """Spawning live_migration operation for distributing high-load. :params ctxt: security context @@ -1466,20 +1471,22 @@ class LibvirtConnection(driver.ComputeDriver): nova.db.sqlalchemy.models.Instance object instance object that is migrated. :params dest: destination host + :params block_migration: destination host :params post_method: post operation method. expected nova.compute.manager.post_live_migration. :params recover_method: recovery method when any exception occurs. expected nova.compute.manager.recover_live_migration. + :params block_migration: if true, do block migration. """ greenthread.spawn(self._live_migration, ctxt, instance_ref, dest, - post_method, recover_method) + post_method, recover_method, block_migration) - def _live_migration(self, ctxt, instance_ref, dest, - post_method, recover_method): + def _live_migration(self, ctxt, instance_ref, dest, post_method, + recover_method, block_migration=False): """Do live migration. :params ctxt: security context @@ -1498,24 +1505,18 @@ class LibvirtConnection(driver.ComputeDriver): # Do live migration. try: - flaglist = FLAGS.live_migration_flag.split(',') + if block_migration: + flaglist = FLAGS.block_migration_flag.split(',') + else: + flaglist = FLAGS.live_migration_flag.split(',') flagvals = [getattr(libvirt, x.strip()) for x in flaglist] logical_sum = reduce(lambda x, y: x | y, flagvals) - if self.read_only: - tmpconn = self._connect(self.libvirt_uri, False) - dom = tmpconn.lookupByName(instance_ref.name) - dom.migrateToURI(FLAGS.live_migration_uri % dest, - logical_sum, - None, - FLAGS.live_migration_bandwidth) - tmpconn.close() - else: - dom = self._conn.lookupByName(instance_ref.name) - dom.migrateToURI(FLAGS.live_migration_uri % dest, - logical_sum, - None, - FLAGS.live_migration_bandwidth) + dom = self._conn.lookupByName(instance_ref.name) + dom.migrateToURI(FLAGS.live_migration_uri % dest, + logical_sum, + None, + FLAGS.live_migration_bandwidth) except Exception: recover_method(ctxt, instance_ref, dest=dest) @@ -1530,11 +1531,92 @@ class LibvirtConnection(driver.ComputeDriver): self.get_info(instance_ref.name)['state'] except exception.NotFound: timer.stop() - post_method(ctxt, instance_ref, dest) + post_method(ctxt, instance_ref, dest, block_migration) timer.f = wait_for_live_migration timer.start(interval=0.5, now=True) + def pre_block_migration(self, ctxt, instance_ref, disk_info_json): + """Preparation block migration. + + :params ctxt: security context + :params instance_ref: + nova.db.sqlalchemy.models.Instance object + instance object that is migrated. + :params disk_info_json: + json strings specified in get_instance_disk_info + + """ + disk_info = utils.loads(disk_info_json) + + # make instance directory + instance_dir = os.path.join(FLAGS.instances_path, instance_ref['name']) + if os.path.exists(instance_dir): + raise exception.DestinatioinDiskExists(path=instance_dir) + os.mkdir(instance_dir) + + for disk in disk_info: + base = os.path.basename(disk['path']) + # Get image type and create empty disk image. + instance_disk = os.path.join(instance_dir, base) + utils.execute('sudo', 'qemu-img', 'create', '-f', disk['type'], + instance_disk, str(disk['local_gb'])+'G') + + # block migration does not migrate libvirt.xml, + # to avoid any confusion of admins, create it now. + xml = self.to_xml(instance_ref) + f = open(os.path.join(instance_dir, 'libvirt.xml'), 'w+') + f.write(xml) + f.close() + + def get_instance_disk_info(self, ctxt, instance_ref): + """Preparation block migration. + + :params ctxt: security context + :params instance_ref: + nova.db.sqlalchemy.models.Instance object + instance object that is migrated. + :return: + json strings with below format. + "[{'path':'disk', 'type':'raw', 'local_gb':10},...]" + + """ + disk_info = [] + + virt_dom = self._lookup_by_name(instance_ref.name) + xml = virt_dom.XMLDesc(0) + doc = libxml2.parseDoc(xml) + disk_nodes = doc.xpathEval('//devices/disk') + path_nodes = doc.xpathEval('//devices/disk/source') + driver_nodes = doc.xpathEval('//devices/disk/driver') + + for cnt, path_node in enumerate(path_nodes): + disk_type = disk_nodes[cnt].get_properties().getContent() + path = path_node.get_properties().getContent() + + if disk_type != 'file': + LOG.debug(_('skipping %(path)s since it looks like volume') % + locals()) + continue + + # xml is generated by kvm, so format is slightly different + # from libvirt.xml that nova generated. + #disk_type = driver_nodes[cnt].get_properties().getContent() + disk_type = \ + driver_nodes[cnt].get_properties().get_next().getContent() + if disk_type == 'raw': + size = int(os.path.getsize(path)) / 1024 / 1024 / 1024 + else: + out, err = utils.execute('sudo', 'qemu-img', 'info', path) + size = [i.split('(')[1].split()[0] for i in out.split('\n') + if i.strip().find('virtual size') >= 0] + size = int(size[0]) / 1024 / 1024 / 1024 + + disk_info.append({'type': disk_type, 'path': path, + 'local_gb': size}) + + return utils.dumps(disk_info) + def unfilter_instance(self, instance_ref): """See comments of same method in firewall_driver.""" self.firewall_driver.unfilter_instance(instance_ref) -- cgit From c16e1af623ef8baa1098510a8a04adb06f2da81b Mon Sep 17 00:00:00 2001 From: Kei Masumoto Date: Wed, 15 Jun 2011 01:35:54 +0900 Subject: added kernel/ramdisk migrate support --- nova/virt/libvirt/connection.py | 24 ++++++++++++++++++++---- 1 file changed, 20 insertions(+), 4 deletions(-) (limited to 'nova') diff --git a/nova/virt/libvirt/connection.py b/nova/virt/libvirt/connection.py index 86e388ff7..73ca6e0d9 100644 --- a/nova/virt/libvirt/connection.py +++ b/nova/virt/libvirt/connection.py @@ -1555,12 +1555,28 @@ class LibvirtConnection(driver.ComputeDriver): raise exception.DestinatioinDiskExists(path=instance_dir) os.mkdir(instance_dir) - for disk in disk_info: - base = os.path.basename(disk['path']) + for info in disk_info: + base = os.path.basename(info['path']) # Get image type and create empty disk image. instance_disk = os.path.join(instance_dir, base) - utils.execute('sudo', 'qemu-img', 'create', '-f', disk['type'], - instance_disk, str(disk['local_gb'])+'G') + utils.execute('sudo', 'qemu-img', 'create', '-f', info['type'], + instance_disk, str(info['local_gb']) + 'G') + + # if image has kernel and ramdisk, just download + # following normal way. + if instance_ref['kernel_id']: + user = manager.AuthManager().get_user(instance_ref['user_id']) + project = manager.AuthManager().get_project( + instance_ref['project_id']) + self._fetch_image(os.path.join(instance_dir, 'kernel'), + instance_ref['kernel_id'], + user, + project) + if instance_ref['ramdisk_id']: + self._fetch_image(os.path.join(instance_dir, 'ramdisk'), + instance_ref['ramdisk_id'], + user, + project) # block migration does not migrate libvirt.xml, # to avoid any confusion of admins, create it now. -- cgit From b03b3145a18f8f4717fdc55ab50dc714516d2c54 Mon Sep 17 00:00:00 2001 From: Kei Masumoto Date: Mon, 20 Jun 2011 08:32:34 +0900 Subject: fix comments at nova.virt.libvirt.connection --- nova/virt/libvirt/connection.py | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) (limited to 'nova') diff --git a/nova/virt/libvirt/connection.py b/nova/virt/libvirt/connection.py index 313b19194..8fe8148d5 100644 --- a/nova/virt/libvirt/connection.py +++ b/nova/virt/libvirt/connection.py @@ -1617,9 +1617,10 @@ class LibvirtConnection(driver.ComputeDriver): locals()) continue - # xml is generated by kvm, so format is slightly different - # from libvirt.xml that nova generated. - #disk_type = driver_nodes[cnt].get_properties().getContent() + # In case of libvirt.xml, disk type can be obtained + # by the below statement. + # -> disk_type = driver_nodes[cnt].get_properties().getContent() + # but this xml is generated by kvm, format is slightly different. disk_type = \ driver_nodes[cnt].get_properties().get_next().getContent() if disk_type == 'raw': -- cgit From 9b52343f792d83647978c7edbfe700258e3ddae2 Mon Sep 17 00:00:00 2001 From: Kei Masumoto Date: Mon, 20 Jun 2011 08:51:25 +0900 Subject: fix pep8 check --- nova/compute/manager.py | 2 +- nova/virt/libvirt/connection.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) (limited to 'nova') diff --git a/nova/compute/manager.py b/nova/compute/manager.py index f7604a253..4410ff27e 100644 --- a/nova/compute/manager.py +++ b/nova/compute/manager.py @@ -1133,7 +1133,7 @@ class ComputeManager(manager.SchedulerDependentManager): # before migration starts, so if any failure occurs, # any empty images has to be deleted. but not sure adding below # method is appropreate here. for now, admin has to delete manually. - # rpc.call(ctxt, + # rpc.cast(ctxt, # self.db.queue_get_for(ctxt, FLAGS.compute_topic, dest), # {"method": "self.driver.destroy", # "args": {'instance':instance_ref}) diff --git a/nova/virt/libvirt/connection.py b/nova/virt/libvirt/connection.py index 8fe8148d5..d0c52c763 100644 --- a/nova/virt/libvirt/connection.py +++ b/nova/virt/libvirt/connection.py @@ -1617,7 +1617,7 @@ class LibvirtConnection(driver.ComputeDriver): locals()) continue - # In case of libvirt.xml, disk type can be obtained + # In case of libvirt.xml, disk type can be obtained # by the below statement. # -> disk_type = driver_nodes[cnt].get_properties().getContent() # but this xml is generated by kvm, format is slightly different. -- cgit From a6d527646184889863de5ab1082695a29f70988a Mon Sep 17 00:00:00 2001 From: Kei Masumoto Date: Mon, 20 Jun 2011 09:43:34 +0900 Subject: nova.virt.libvirt.connection._live_migration is changed --- nova/compute/manager.py | 20 +++++++++----------- nova/virt/libvirt/connection.py | 2 +- 2 files changed, 10 insertions(+), 12 deletions(-) (limited to 'nova') diff --git a/nova/compute/manager.py b/nova/compute/manager.py index 402d5829e..f32dfe6ab 100644 --- a/nova/compute/manager.py +++ b/nova/compute/manager.py @@ -1220,8 +1220,7 @@ class ComputeManager(manager.SchedulerDependentManager): "Domain not found: no domain with matching name.\" " "This error can be safely ignored.")) - def recover_live_migration(self, ctxt, instance_ref, host=None, - dest=None, delete=True): + def recover_live_migration(self, ctxt, instance_ref, host=None, dest=None): """Recovers Instance/volume state from migrating -> running. :param ctxt: security context @@ -1231,9 +1230,6 @@ class ComputeManager(manager.SchedulerDependentManager): :param dest: This method is called from live migration src host. This param specifies destination host. - :param delete: - If true, ask destination host to remove instance dir, - since empty disk image was created for block migration """ if not host: host = instance_ref['host'] @@ -1254,12 +1250,14 @@ class ComputeManager(manager.SchedulerDependentManager): # TODO: Block migration needs empty image at destination host # before migration starts, so if any failure occurs, - # any empty images has to be deleted. but not sure adding below - # method is appropreate here. for now, admin has to delete manually. - # rpc.cast(ctxt, - # self.db.queue_get_for(ctxt, FLAGS.compute_topic, dest), - # {"method": "self.driver.destroy", - # "args": {'instance':instance_ref}) + # any empty images has to be deleted. + # In current version argument dest != None means this method is + # called for error recovering + #if dest: + # rpc.cast(ctxt, + # self.db.queue_get_for(ctxt, FLAGS.compute_topic, dest), + # {"method": "self.driver.destroy", + # "args": {'instance':instance_ref}) def periodic_tasks(self, context=None): """Tasks to be run at a periodic interval.""" diff --git a/nova/virt/libvirt/connection.py b/nova/virt/libvirt/connection.py index b0517e32a..2ddaa5971 100644 --- a/nova/virt/libvirt/connection.py +++ b/nova/virt/libvirt/connection.py @@ -1573,7 +1573,7 @@ class LibvirtConnection(driver.ComputeDriver): self.get_info(instance_ref.name)['state'] except exception.NotFound: timer.stop() - post_method(ctxt, instance_ref, dest, block_migration) + post_method(ctxt, instance_ref, dest) timer.f = wait_for_live_migration timer.start(interval=0.5, now=True) -- cgit From c184fa5d03f3f8d7faaff7b583404874de409aa6 Mon Sep 17 00:00:00 2001 From: Kei Masumoto Date: Tue, 21 Jun 2011 20:51:07 +0900 Subject: fixed reviewer's comment. 1. adding dest-instance-dir deleting operation to nova.compute.manager, 2. fix invalid raise statement --- nova/compute/manager.py | 36 +++++++++++++++++++++++------------- nova/exception.py | 4 ++++ nova/scheduler/driver.py | 3 ++- nova/tests/test_compute.py | 17 +++++++++++++---- nova/tests/test_libvirt.py | 6 +++--- nova/virt/libvirt/connection.py | 14 ++++++++++++-- 6 files changed, 57 insertions(+), 23 deletions(-) (limited to 'nova') diff --git a/nova/compute/manager.py b/nova/compute/manager.py index f32dfe6ab..eac543251 100644 --- a/nova/compute/manager.py +++ b/nova/compute/manager.py @@ -1045,7 +1045,7 @@ class ComputeManager(manager.SchedulerDependentManager): return self.driver.update_available_resource(context, self.host) def pre_live_migration(self, context, instance_id, time=None, - block_migration=False, **kwargs): + block_migration=False, disk=None): """Preparations for live migration at dest host. :param context: security context @@ -1106,7 +1106,7 @@ class ComputeManager(manager.SchedulerDependentManager): if block_migration: self.driver.pre_block_migration(context, instance_ref, - kwargs.get('disk')) + disk) def live_migration(self, context, instance_id, dest, block_migration=False): @@ -1130,17 +1130,18 @@ class ComputeManager(manager.SchedulerDependentManager): {"method": "check_for_export", "args": {'instance_id': instance_id}}) - args = {} - args['instance_id'] = instance_id if block_migration: - args['block_migration'] = block_migration - args['disk'] = \ - self.driver.get_instance_disk_info(context, instance_ref) + disk = self.driver.get_instance_disk_info(context, + instance_ref) + else: + disk = None rpc.call(context, self.db.queue_get_for(context, FLAGS.compute_topic, dest), {"method": "pre_live_migration", - "args": args}) + "args": {'instance_id': instance_id, + 'block_migration': block_migration, + 'disk': disk}}) except Exception: i_name = instance_ref.name @@ -1253,11 +1254,20 @@ class ComputeManager(manager.SchedulerDependentManager): # any empty images has to be deleted. # In current version argument dest != None means this method is # called for error recovering - #if dest: - # rpc.cast(ctxt, - # self.db.queue_get_for(ctxt, FLAGS.compute_topic, dest), - # {"method": "self.driver.destroy", - # "args": {'instance':instance_ref}) + if dest: + rpc.cast(ctxt, + self.db.queue_get_for(ctxt, FLAGS.compute_topic, dest), + {"method": "cleanup", + "args": {'instance_id': instance_ref['id']}}) + + def cleanup(self, ctxt, instance_id): + """ Cleaning up image directory that is created pre_live_migration. + + :param ctxt: security context + :param instance_id: nova.db.sqlalchemy.models.Instance.Id + """ + instances_ref = self.db.instance_get(ctxt, instance_id) + self.driver.cleanup(instance_ref) def periodic_tasks(self, context=None): """Tasks to be run at a periodic interval.""" diff --git a/nova/exception.py b/nova/exception.py index 689a797fd..4ebf0e169 100644 --- a/nova/exception.py +++ b/nova/exception.py @@ -588,6 +588,10 @@ class InstanceExists(Duplicate): message = _("Instance %(name)s already exists.") +class InvalidSharedStorage(NovaException): + message = _("%(path)s is on shared storage: %(reason)s") + + class MigrationError(NovaException): message = _("Migration error") + ": %(reason)s" diff --git a/nova/scheduler/driver.py b/nova/scheduler/driver.py index 889baa567..55762cf85 100644 --- a/nova/scheduler/driver.py +++ b/nova/scheduler/driver.py @@ -200,7 +200,8 @@ class Scheduler(object): try: self.mounted_on_same_shared_storage(context, instance_ref, dest) if block_migration: - raise + reason = "Block migration can not be used with shared storage." + raise exception.InvalidSharedStorage(reason=reason, path=dest) except rpc.RemoteError: if not block_migration: raise diff --git a/nova/tests/test_compute.py b/nova/tests/test_compute.py index 840961771..e9f5eb0e8 100644 --- a/nova/tests/test_compute.py +++ b/nova/tests/test_compute.py @@ -556,7 +556,10 @@ class ComputeTestCase(test.TestCase): dbmock.queue_get_for(c, FLAGS.compute_topic, i_ref['host']).\ AndReturn(topic) rpc.call(c, topic, {"method": "pre_live_migration", - "args": {'instance_id': i_ref['id']}}) + "args": {'instance_id': i_ref['id'], + 'block_migration': False, + 'disk': None}}) + self.mox.StubOutWithMock(self.compute.driver, 'live_migration') self.compute.driver.live_migration(c, i_ref, i_ref['host'], self.compute.post_live_migration, @@ -582,7 +585,9 @@ class ComputeTestCase(test.TestCase): dbmock.queue_get_for(c, FLAGS.compute_topic, i_ref['host']).\ AndReturn(topic) rpc.call(c, topic, {"method": "pre_live_migration", - "args": {'instance_id': i_ref['id']}}).\ + "args": {'instance_id': i_ref['id'], + 'block_migration': False, + 'disk': None}}).\ AndRaise(rpc.RemoteError('', '', '')) dbmock.instance_update(c, i_ref['id'], {'state_description': 'running', 'state': power_state.RUNNING, @@ -609,7 +614,9 @@ class ComputeTestCase(test.TestCase): AndReturn(topic) self.mox.StubOutWithMock(rpc, 'call') rpc.call(c, topic, {"method": "pre_live_migration", - "args": {'instance_id': i_ref['id']}}).\ + "args": {'instance_id': i_ref['id'], + 'block_migration': False, + 'disk': None}}).\ AndRaise(rpc.RemoteError('', '', '')) dbmock.instance_update(c, i_ref['id'], {'state_description': 'running', 'state': power_state.RUNNING, @@ -634,7 +641,9 @@ class ComputeTestCase(test.TestCase): dbmock.queue_get_for(c, FLAGS.compute_topic, i_ref['host']).\ AndReturn(topic) rpc.call(c, topic, {"method": "pre_live_migration", - "args": {'instance_id': i_ref['id']}}) + "args": {'instance_id': i_ref['id'], + 'block_migration': False, + 'disk': None}}) self.mox.StubOutWithMock(self.compute.driver, 'live_migration') self.compute.driver.live_migration(c, i_ref, i_ref['host'], self.compute.post_live_migration, diff --git a/nova/tests/test_libvirt.py b/nova/tests/test_libvirt.py index 79a2b84b7..2e57dae08 100644 --- a/nova/tests/test_libvirt.py +++ b/nova/tests/test_libvirt.py @@ -734,7 +734,7 @@ class LibvirtConnTestCase(test.TestCase): # large disk space. self.mox.StubOutWithMock(utils, "execute") utils.execute('sudo', 'qemu-img', 'create', '-f', 'raw', - '%s/%s/disk' % (tmpdir, instance_ref.name), 10) + '%s/%s/disk' % (tmpdir, instance_ref.name), '10G') self.mox.ReplayAll() conn = connection.LibvirtConnection(False) @@ -759,10 +759,10 @@ class LibvirtConnTestCase(test.TestCase): instance_ref = db.instance_create(self.context, self.test_instance) dummyxml = ("instance-0000000a" "" - "" + "" "" "" - "" + "" "" "" "") diff --git a/nova/virt/libvirt/connection.py b/nova/virt/libvirt/connection.py index 2ddaa5971..8d23df45f 100644 --- a/nova/virt/libvirt/connection.py +++ b/nova/virt/libvirt/connection.py @@ -120,7 +120,7 @@ flags.DEFINE_string('live_migration_flag', 'Define live migration behavior.') flags.DEFINE_string('block_migration_flag', "VIR_MIGRATE_UNDEFINE_SOURCE, VIR_MIGRATE_PEER2PEER, " - "VIR_MIGRATE_NON_SHARED_DISK", + "VIR_MIGRATE_NON_SHARED_INC", 'Define block migration behavior.') flags.DEFINE_integer('live_migration_bandwidth', 0, 'Define live migration behavior') @@ -295,7 +295,10 @@ class LibvirtConnection(driver.ComputeDriver): # NOTE(justinsb): We remove the domain definition. We probably # would do better to keep it if cleanup=False (e.g. volumes?) # (e.g. #2 - not losing machines on failure) - virt_dom.undefine() + # NOTE(masumotok): Migrated instances does not have domain + # definitions. + if instance.name in self._conn.listDefinedDomains(): + virt_dom.undefine() except libvirt.libvirtError as e: errcode = e.get_error_code() LOG.warning(_("Error from libvirt during undefine of " @@ -335,6 +338,13 @@ class LibvirtConnection(driver.ComputeDriver): if os.path.exists(target): shutil.rmtree(target) + def cleanup(self, instance): + """ Cleaning up image directory that is created pre_live_migration. + + :param instance: nova.db.sqlalchemy.models.Instance + """ + self._cleanup(instance) + @exception.wrap_exception def attach_volume(self, instance_name, device_path, mountpoint): virt_dom = self._lookup_by_name(instance_name) -- cgit From e01848ed64c4523bb9e375da07e962b5ea1ea6ee Mon Sep 17 00:00:00 2001 From: Kei Masumoto Date: Tue, 21 Jun 2011 21:10:08 +0900 Subject: erase unnecessary TODO: statement --- nova/compute/manager.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'nova') diff --git a/nova/compute/manager.py b/nova/compute/manager.py index eac543251..16dbda47d 100644 --- a/nova/compute/manager.py +++ b/nova/compute/manager.py @@ -1249,7 +1249,7 @@ class ComputeManager(manager.SchedulerDependentManager): if dest: volume_api.remove_from_compute(ctxt, volume_id, dest) - # TODO: Block migration needs empty image at destination host + # Block migration needs empty image at destination host # before migration starts, so if any failure occurs, # any empty images has to be deleted. # In current version argument dest != None means this method is -- cgit From 0a3b50d7b754af26b68f81617cab9aa588484362 Mon Sep 17 00:00:00 2001 From: Donal Lafferty Date: Mon, 27 Jun 2011 16:03:14 +0100 Subject: Pulled changes, passed the unit tests. --- .../migrate_repo/versions/027_add_provider_firewall_rules.py | 3 +-- nova/virt/xenapi/vmops.py | 5 +---- 2 files changed, 2 insertions(+), 6 deletions(-) (limited to 'nova') diff --git a/nova/db/sqlalchemy/migrate_repo/versions/027_add_provider_firewall_rules.py b/nova/db/sqlalchemy/migrate_repo/versions/027_add_provider_firewall_rules.py index 5aa30f7a8..cb3c73170 100644 --- a/nova/db/sqlalchemy/migrate_repo/versions/027_add_provider_firewall_rules.py +++ b/nova/db/sqlalchemy/migrate_repo/versions/027_add_provider_firewall_rules.py @@ -58,8 +58,7 @@ provider_fw_rules = Table('provider_fw_rules', meta, Column('to_port', Integer()), Column('cidr', String(length=255, convert_unicode=False, assert_unicode=None, - unicode_error=None, _warn_on_bytestring=False)) - ) + unicode_error=None, _warn_on_bytestring=False))) def upgrade(migrate_engine): diff --git a/nova/virt/xenapi/vmops.py b/nova/virt/xenapi/vmops.py index 5cf99b9ac..eef354aec 100644 --- a/nova/virt/xenapi/vmops.py +++ b/nova/virt/xenapi/vmops.py @@ -196,7 +196,7 @@ class VMOps(object): kernel, ramdisk, use_pv_kernel) # device 0 reserved for RW disk - userdevice = 0; + userdevice = 0 # DISK_ISO needs two VBDs: the ISO disk and a blank RW disk if disk_image_type == ImageType.DISK_ISO: @@ -214,15 +214,12 @@ class VMOps(object): userdevice = userdevice + 2 VMHelper.create_cd_vbd(session=self._session, vm_ref=vm_ref, vdi_ref=cd_vdi_ref, userdevice=userdevice, bootable=True) - - else: VMHelper.create_vbd(session=self._session, vm_ref=vm_ref, vdi_ref=first_vdi_ref, userdevice=userdevice, bootable=True) # userdevice 1 is reserved for rescue userdevice = userdevice + 1 - # Attach any other disks for vdi in vdis[1:]: # vdi['vdi_type'] is either 'os' or 'swap', but we don't -- cgit From 32b06d654922e08a53c6a4fc49fd2ad40e7a5d20 Mon Sep 17 00:00:00 2001 From: Donal Lafferty Date: Tue, 28 Jun 2011 12:32:23 +0100 Subject: Revise key used to identify the SR used to store ISO images streamed from Glance. --- nova/virt/xenapi/vm_utils.py | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) (limited to 'nova') diff --git a/nova/virt/xenapi/vm_utils.py b/nova/virt/xenapi/vm_utils.py index 0d08e459c..ede8114c4 100644 --- a/nova/virt/xenapi/vm_utils.py +++ b/nova/virt/xenapi/vm_utils.py @@ -984,12 +984,14 @@ def find_iso_sr(session): sr_rec = session.get_xenapi().SR.get_record(sr_ref) LOG.debug(_("ISO: looking at SR %(sr_rec)s") % locals()) - #TODO: use ['other_config']['il8n-key-iso-sr'] == 'local-storage' - # or something similar to identify the iso sr if not (sr_rec['content_type'] == 'iso' and - sr_rec['name_label'] == 'Local ISOs'): - LOG.debug(_("ISO: No match")) + 'i18n-key' in sr_rec['other_config'] and + sr_rec['other_config']['i18n-key'] == 'local-storage-iso'): + LOG.debug(_("ISO: SR with 'content_type' of 'iso' and" + " 'other-config' key 'i18n-key' of value" + " 'local-storage-iso'")) continue + LOG.debug(_("ISO: SR MATCHing our criteria")) for pbd_ref in sr_rec['PBDs']: LOG.debug(_("ISO: ISO, looking to see if it is host local")) -- cgit From 8878f6433cebcac963ed8789200f38a5ac4dfddd Mon Sep 17 00:00:00 2001 From: Donal Lafferty Date: Tue, 28 Jun 2011 12:33:12 +0100 Subject: Add fake SR with ISO content type. --- nova/virt/xenapi/fake.py | 20 ++++++++++++++++++++ 1 file changed, 20 insertions(+) (limited to 'nova') diff --git a/nova/virt/xenapi/fake.py b/nova/virt/xenapi/fake.py index d5ac39473..1aa642e4e 100644 --- a/nova/virt/xenapi/fake.py +++ b/nova/virt/xenapi/fake.py @@ -194,6 +194,7 @@ def create_local_pifs(): Do this one per host.""" for host_ref in _db_content['host'].keys(): _create_local_pif(host_ref) + _create_local_sr_iso(host_ref) def create_local_srs(): @@ -222,6 +223,25 @@ def _create_local_sr(host_ref): return sr_ref +def _create_local_sr_iso(host_ref): + sr_ref = _create_object( + 'SR', + {'name_label': 'Local storage ISO', + 'type': 'lvm', + 'content_type': 'iso', + 'shared': False, + 'physical_size': str(1 << 30), + 'physical_utilisation': str(0), + 'virtual_allocation': str(0), + 'other_config': { + 'i18n-original-value-name_label': 'Local storage ISO', + 'i18n-key': 'local-storage-iso'}, + 'VDIs': []}) + pbd_ref = create_pbd('', host_ref, sr_ref, True) + _db_content['SR'][sr_ref]['PBDs'] = [pbd_ref] + return sr_ref + + def _create_local_pif(host_ref): pif_ref = _create_object('PIF', {'name-label': 'Fake PIF', -- cgit From 6ca1845582334d69474f5f9d661177e77cd769fe Mon Sep 17 00:00:00 2001 From: Donal Lafferty Date: Tue, 28 Jun 2011 12:34:17 +0100 Subject: Add test for spawn from an ISO. --- nova/tests/glance/stubs.py | 6 ++++++ nova/tests/test_xenapi.py | 6 ++++++ 2 files changed, 12 insertions(+) (limited to 'nova') diff --git a/nova/tests/glance/stubs.py b/nova/tests/glance/stubs.py index 1e0b90d82..37fb7a9d7 100644 --- a/nova/tests/glance/stubs.py +++ b/nova/tests/glance/stubs.py @@ -32,6 +32,7 @@ class FakeGlance(object): IMAGE_RAMDISK = 3 IMAGE_RAW = 4 IMAGE_VHD = 5 + IMAGE_ISO = 6 IMAGE_FIXTURES = { IMAGE_MACHINE: { @@ -58,6 +59,11 @@ class FakeGlance(object): 'image_meta': {'name': 'fakevhd', 'size': 0, 'disk_format': 'vhd', 'container_format': 'ovf'}, + 'image_data': StringIO.StringIO('')}, + IMAGE_ISO: { + 'image_meta': {'name': 'fakeiso', 'size': 0, + 'disk_format': 'iso', + 'container_format': 'bare'}, 'image_data': StringIO.StringIO('')}} def __init__(self, host, port=None, use_ssl=False): diff --git a/nova/tests/test_xenapi.py b/nova/tests/test_xenapi.py index d9a514745..b8f53b735 100644 --- a/nova/tests/test_xenapi.py +++ b/nova/tests/test_xenapi.py @@ -446,6 +446,12 @@ class XenAPIVMTestCase(test.TestCase): os_type="windows", architecture="i386") self.check_vm_params_for_windows() + def test_spawn_iso_glance(self): + FLAGS.xenapi_image_service = 'glance' + self._test_spawn(glance_stubs.FakeGlance.IMAGE_ISO, None, None, + os_type="windows", architecture="i386") + self.check_vm_params_for_windows() + def test_spawn_glance(self): FLAGS.xenapi_image_service = 'glance' self._test_spawn(glance_stubs.FakeGlance.IMAGE_MACHINE, -- cgit From 3794d8889ed933fc776d7541ef25e2c9583a6cf6 Mon Sep 17 00:00:00 2001 From: Donal Lafferty Date: Wed, 29 Jun 2011 16:17:33 +0100 Subject: clean up logging for iso SR search --- nova/virt/xenapi/vm_utils.py | 14 ++++++++------ 1 file changed, 8 insertions(+), 6 deletions(-) (limited to 'nova') diff --git a/nova/virt/xenapi/vm_utils.py b/nova/virt/xenapi/vm_utils.py index ede8114c4..4a0e97e2e 100644 --- a/nova/virt/xenapi/vm_utils.py +++ b/nova/virt/xenapi/vm_utils.py @@ -984,12 +984,14 @@ def find_iso_sr(session): sr_rec = session.get_xenapi().SR.get_record(sr_ref) LOG.debug(_("ISO: looking at SR %(sr_rec)s") % locals()) - if not (sr_rec['content_type'] == 'iso' and - 'i18n-key' in sr_rec['other_config'] and - sr_rec['other_config']['i18n-key'] == 'local-storage-iso'): - LOG.debug(_("ISO: SR with 'content_type' of 'iso' and" - " 'other-config' key 'i18n-key' of value" - " 'local-storage-iso'")) + if not sr_rec['content_type'] == 'iso': + LOG.debug(_("ISO: not iso content")) + continue + if not 'i18n-key' in sr_rec['other_config']: + LOG.debug(_("ISO: iso content_type, no 'i18n-key' key")) + continue + if not sr_rec['other_config']['i18n-key'] == 'local-storage-iso': + LOG.debug(_("ISO: iso content_type, i18n-key value not 'local-storage-iso'")) continue LOG.debug(_("ISO: SR MATCHing our criteria")) -- cgit From 848fd99f378976f99fa883fec85f30c4e9f46bca Mon Sep 17 00:00:00 2001 From: Donal Lafferty Date: Wed, 29 Jun 2011 17:38:02 +0100 Subject: pep8 fix --- nova/virt/xenapi/vm_utils.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) (limited to 'nova') diff --git a/nova/virt/xenapi/vm_utils.py b/nova/virt/xenapi/vm_utils.py index 4a0e97e2e..5f6387acc 100644 --- a/nova/virt/xenapi/vm_utils.py +++ b/nova/virt/xenapi/vm_utils.py @@ -991,7 +991,8 @@ def find_iso_sr(session): LOG.debug(_("ISO: iso content_type, no 'i18n-key' key")) continue if not sr_rec['other_config']['i18n-key'] == 'local-storage-iso': - LOG.debug(_("ISO: iso content_type, i18n-key value not 'local-storage-iso'")) + LOG.debug(_("ISO: iso content_type, i18n-key value not " + "'local-storage-iso'")) continue LOG.debug(_("ISO: SR MATCHing our criteria")) -- cgit From 4001ee488420589e345dc42001e6cab9c68a5e12 Mon Sep 17 00:00:00 2001 From: Kei Masumoto Date: Mon, 11 Jul 2011 16:31:31 +0900 Subject: fix comments --- nova/compute/manager.py | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) (limited to 'nova') diff --git a/nova/compute/manager.py b/nova/compute/manager.py index 7d89dde80..1fbbe0065 100644 --- a/nova/compute/manager.py +++ b/nova/compute/manager.py @@ -1341,10 +1341,9 @@ class ComputeManager(manager.SchedulerDependentManager): instance_id, block_migration=False): """Post operations for live migration . - :param ctxt: security context + :param context: security context :param instance_id: nova.db.sqlalchemy.models.Instance.Id :param block_migration: block_migration - :param xml: libvirt.xml """ instance_ref = self.db.instance_get(context, instance_id) -- cgit From c3cdcc1eb0c9fd37f49701d976c7ceae8df44caf Mon Sep 17 00:00:00 2001 From: Soren Hansen Date: Fri, 22 Jul 2011 22:41:29 +0200 Subject: This is me being all cocky, thinking I'll make it use ipsets... --- nova/compute/api.py | 18 ++++++++------- nova/db/sqlalchemy/models.py | 6 +++++ nova/network/linux_net.py | 30 +++++++++++++++++++++++++ nova/network/manager.py | 24 +++++++++++++++++++- nova/tests/test_iptables_network.py | 39 ++++++++++++++++++++++++++++++-- nova/virt/libvirt/firewall.py | 44 +++++++++++++++++++++++++++---------- 6 files changed, 139 insertions(+), 22 deletions(-) (limited to 'nova') diff --git a/nova/compute/api.py b/nova/compute/api.py index 432658bbb..65a594d2c 100644 --- a/nova/compute/api.py +++ b/nova/compute/api.py @@ -305,10 +305,6 @@ class API(base.Base): updates['hostname'] = self.hostname_factory(instance) instance = self.update(context, instance_id, **updates) - - for group_id in security_groups: - self.trigger_security_group_members_refresh(elevated, group_id) - return instance def _ask_scheduler_to_create_instance(self, context, base_options, @@ -464,19 +460,22 @@ class API(base.Base): {"method": "refresh_security_group_rules", "args": {"security_group_id": security_group.id}}) - def trigger_security_group_members_refresh(self, context, group_id): + def trigger_security_group_members_refresh(self, context, group_ids): """Called when a security group gains a new or loses a member. Sends an update request to each compute node for whom this is relevant. """ - # First, we get the security group rules that reference this group as + # First, we get the security group rules that reference these groups as # the grantee.. - security_group_rules = \ + security_group_rules = set() + for group_id in group_ids: + security_group_rules.update( self.db.security_group_rule_get_by_security_group_grantee( context, - group_id) + group_id)) + LOG.info('rules: %r', security_group_rules) # ..then we distill the security groups to which they belong.. security_groups = set() for rule in security_group_rules: @@ -485,12 +484,14 @@ class API(base.Base): rule['parent_group_id']) security_groups.add(security_group) + LOG.info('security_groups: %r', security_groups) # ..then we find the instances that are members of these groups.. instances = set() for security_group in security_groups: for instance in security_group['instances']: instances.add(instance) + LOG.info('instances: %r', instances) # ...then we find the hosts where they live... hosts = set() for instance in instances: @@ -500,6 +501,7 @@ class API(base.Base): # ...and finally we tell these nodes to refresh their view of this # particular security group. for host in hosts: + LOG.info('host: %r', host) rpc.cast(context, self.db.queue_get_for(context, FLAGS.compute_topic, host), {"method": "refresh_security_group_members", diff --git a/nova/db/sqlalchemy/models.py b/nova/db/sqlalchemy/models.py index d29d3d6f1..023821dfb 100644 --- a/nova/db/sqlalchemy/models.py +++ b/nova/db/sqlalchemy/models.py @@ -491,6 +491,12 @@ class SecurityGroupIngressRule(BASE, NovaBase): # Note: This is not the parent SecurityGroup. It's SecurityGroup we're # granting access for. group_id = Column(Integer, ForeignKey('security_groups.id')) + grantee_group = relationship("SecurityGroup", + foreign_keys=group_id, + primaryjoin='and_(' + 'SecurityGroupIngressRule.group_id == SecurityGroup.id,' + 'SecurityGroupIngressRule.deleted == False)') + class ProviderFirewallRule(BASE, NovaBase): diff --git a/nova/network/linux_net.py b/nova/network/linux_net.py index 283a5aca1..0e021a40f 100644 --- a/nova/network/linux_net.py +++ b/nova/network/linux_net.py @@ -96,6 +96,33 @@ class IptablesRule(object): chain = self.chain return '-A %s %s' % (chain, self.rule) +class IpSet(object): + """A class for handling large collections of IPs efficiently""" + + def __init__(self, name, execute=None): + self.name = name + self._ips = set() + if not execute: + self.execute = _execute + else: + self.execute = execute + + def __contains__(self, addr): + return addr in self._ips + + def _set_name(self): + return '%s-%s' % (binary_name, self.name) + + def add_ip(self, addr): + self._ips.add(addr) + self.execute('ipset', '-A', self._set_name(), addr) + + def remove_ip(self, addr): + self._ips.remove(addr) + self.execute('ipset', '-D', self._set_name(), addr) + + def iptables_source_match(self): + return ['-m set --match-set %s src' % (self._set_name(),)] class IptablesTable(object): """An iptables table.""" @@ -281,6 +308,9 @@ class IptablesManager(object): self.ipv4['nat'].add_chain('floating-snat') self.ipv4['nat'].add_rule('snat', '-j $floating-snat') + def ipset_supported(self): + return False + @utils.synchronized('iptables', external=True) def apply(self): """Apply the current in-memory set of iptables rules. diff --git a/nova/network/manager.py b/nova/network/manager.py index 824e8d24d..928cb09f6 100644 --- a/nova/network/manager.py +++ b/nova/network/manager.py @@ -63,6 +63,7 @@ from nova import quota from nova import utils from nova import rpc from nova.network import api as network_api +from nova.compute import api as compute_api import random @@ -297,6 +298,7 @@ class NetworkManager(manager.SchedulerDependentManager): network_driver = FLAGS.network_driver self.driver = utils.import_object(network_driver) self.network_api = network_api.API() + self.compute_api = compute_api.API() super(NetworkManager, self).__init__(service_name='network', *args, **kwargs) @@ -350,6 +352,15 @@ class NetworkManager(manager.SchedulerDependentManager): # return so worker will only grab 1 (to help scale flatter) return self.set_network_host(context, network['id']) + def _do_trigger_security_group_members_refresh_for_instance(self, + context, + instance_id): + instance_ref = db.instance_get(context, instance_id) + groups = instance_ref.security_groups + group_ids = [group.id for group in groups] + self.compute_api.trigger_security_group_members_refresh(context, + group_ids) + def _get_networks_for_instance(self, context, instance_id, project_id): """Determine & return which networks an instance should connect to.""" # TODO(tr3buchet) maybe this needs to be updated in the future if @@ -511,6 +522,9 @@ class NetworkManager(manager.SchedulerDependentManager): address = self.db.fixed_ip_associate_pool(context.elevated(), network['id'], instance_id) + self._do_trigger_security_group_members_refresh_for_instance( + context, + instance_id) vif = self.db.virtual_interface_get_by_instance_and_network(context, instance_id, network['id']) @@ -524,6 +538,12 @@ class NetworkManager(manager.SchedulerDependentManager): self.db.fixed_ip_update(context, address, {'allocated': False, 'virtual_interface_id': None}) + fixed_ip_ref = self.db.fixed_ip_get_by_address(context, address) + instance_ref = fixed_ip_ref['instance'] + instance_id = instance_ref['id'] + self._do_trigger_security_group_members_refresh_for_instance( + context, + instance_id) def lease_fixed_ip(self, context, address): """Called by dhcp-bridge when ip is leased.""" @@ -825,7 +845,9 @@ class VlanManager(RPCAllocateFixedIP, FloatingIP, NetworkManager): address = self.db.fixed_ip_associate_pool(context, network['id'], instance_id) - + self._do_trigger_security_group_members_refresh_for_instance( + context, + instance_id) vif = self.db.virtual_interface_get_by_instance_and_network(context, instance_id, network['id']) diff --git a/nova/tests/test_iptables_network.py b/nova/tests/test_iptables_network.py index 918034269..d0a8c052c 100644 --- a/nova/tests/test_iptables_network.py +++ b/nova/tests/test_iptables_network.py @@ -17,11 +17,46 @@ # under the License. """Unit Tests for network code.""" -import os - from nova import test from nova.network import linux_net +class IpSetTestCase(test.TestCase): + def test_add(self): + """Adding an address""" + ipset = linux_net.IpSet('somename') + + ipset.add_ip('1.2.3.4') + self.assertTrue('1.2.3.4' in ipset) + + + def test_add_remove(self): + """Adding and then removing an address""" + + self.verify_cmd_call_count = 0 + def verify_cmd(*args): + self.assertEquals(args, self.expected_cmd) + self.verify_cmd_call_count += 1 + + self.expected_cmd = ('ipset', '-A', 'run_tests.py-somename', '1.2.3.4') + ipset = linux_net.IpSet('somename',execute=verify_cmd) + ipset.add_ip('1.2.3.4') + self.assertTrue('1.2.3.4' in ipset) + + self.expected_cmd = ('ipset', '-D', 'run_tests.py-somename', '1.2.3.4') + ipset.remove_ip('1.2.3.4') + self.assertTrue('1.2.3.4' not in ipset) + self.assertEquals(self.verify_cmd_call_count, 2) + + + def test_two_adds_one_remove(self): + """Adding the same address twice works. Removing it once removes it entirely.""" + ipset = linux_net.IpSet('somename') + + ipset.add_ip('1.2.3.4') + ipset.add_ip('1.2.3.4') + ipset.remove_ip('1.2.3.4') + self.assertTrue('1.2.3.4' not in ipset) + class IptablesManagerTestCase(test.TestCase): sample_filter = ['#Generated by iptables-save on Fri Feb 18 15:17:05 2011', diff --git a/nova/virt/libvirt/firewall.py b/nova/virt/libvirt/firewall.py index 379197398..aa36e4184 100644 --- a/nova/virt/libvirt/firewall.py +++ b/nova/virt/libvirt/firewall.py @@ -663,11 +663,10 @@ class IptablesFirewallDriver(FirewallDriver): LOG.debug(_('Adding security group rule: %r'), rule) if not rule.cidr: - # Eventually, a mechanism to grant access for security - # groups will turn up here. It'll use ipsets. - continue + version = 4 + else: + version = netutils.get_ip_version(rule.cidr) - version = netutils.get_ip_version(rule.cidr) if version == 4: fw_rules = ipv4_rules else: @@ -677,16 +676,16 @@ class IptablesFirewallDriver(FirewallDriver): if version == 6 and rule.protocol == 'icmp': protocol = 'icmpv6' - args = ['-p', protocol, '-s', rule.cidr] + args = ['-j ACCEPT', '-p', protocol] - if rule.protocol in ['udp', 'tcp']: + if protocol in ['udp', 'tcp']: if rule.from_port == rule.to_port: args += ['--dport', '%s' % (rule.from_port,)] else: args += ['-m', 'multiport', '--dports', '%s:%s' % (rule.from_port, rule.to_port)] - elif rule.protocol == 'icmp': + elif protocol == 'icmp': icmp_type = rule.from_port icmp_code = rule.to_port @@ -705,9 +704,30 @@ class IptablesFirewallDriver(FirewallDriver): args += ['-m', 'icmp6', '--icmpv6-type', icmp_type_arg] - args += ['-j ACCEPT'] - fw_rules += [' '.join(args)] - + if rule.cidr: + LOG.info('Using cidr %r', rule.cidr) + args += ['-s', rule.cidr] + fw_rules += [' '.join(args)] + else: + LOG.info('Not using cidr %r', rule.cidr) + if self.iptables.ipset_supported(): + LOG.info('ipset supported %r', rule.cidr) + ipset = linux_net.IpSet('%s' % rule.group_id) + args += ipset.iptables_source_match() + fw_rules += [' '.join(args)] + else: + LOG.info('ipset unsupported %r', rule.cidr) + LOG.info('rule.grantee_group.instances: %r', rule.grantee_group.instances) + for instance in rule.grantee_group.instances: + LOG.info('instance: %r', instance) + ips = db.instance_get_fixed_addresses(ctxt, + instance['id']) + LOG.info('ips: %r', ips) + for ip in ips: + subrule = args + ['-s %s' % ip] + fw_rules += [' '.join(subrule)] + + LOG.info('Using fw_rules: %r', fw_rules) ipv4_rules += ['-j $sg-fallback'] ipv6_rules += ['-j $sg-fallback'] @@ -718,7 +738,9 @@ class IptablesFirewallDriver(FirewallDriver): return self.nwfilter.instance_filter_exists(instance) def refresh_security_group_members(self, security_group): - pass + if not self.iptables.ipset_supported(): + self.do_refresh_security_group_rules(security_group) + self.iptables.apply() def refresh_security_group_rules(self, security_group, network_info=None): self.do_refresh_security_group_rules(security_group, network_info) -- cgit From 00fcb54769fdbe8828d7bd52a6636ffc5ad6c862 Mon Sep 17 00:00:00 2001 From: Soren Hansen Date: Fri, 22 Jul 2011 22:49:16 +0200 Subject: ...and this is me snapping back into reality removing all trace of ipsets. Go me. --- nova/network/linux_net.py | 30 ---------------------------- nova/tests/test_iptables_network.py | 39 ++----------------------------------- nova/virt/libvirt/firewall.py | 30 ++++++++++------------------ 3 files changed, 12 insertions(+), 87 deletions(-) (limited to 'nova') diff --git a/nova/network/linux_net.py b/nova/network/linux_net.py index 0e021a40f..283a5aca1 100644 --- a/nova/network/linux_net.py +++ b/nova/network/linux_net.py @@ -96,33 +96,6 @@ class IptablesRule(object): chain = self.chain return '-A %s %s' % (chain, self.rule) -class IpSet(object): - """A class for handling large collections of IPs efficiently""" - - def __init__(self, name, execute=None): - self.name = name - self._ips = set() - if not execute: - self.execute = _execute - else: - self.execute = execute - - def __contains__(self, addr): - return addr in self._ips - - def _set_name(self): - return '%s-%s' % (binary_name, self.name) - - def add_ip(self, addr): - self._ips.add(addr) - self.execute('ipset', '-A', self._set_name(), addr) - - def remove_ip(self, addr): - self._ips.remove(addr) - self.execute('ipset', '-D', self._set_name(), addr) - - def iptables_source_match(self): - return ['-m set --match-set %s src' % (self._set_name(),)] class IptablesTable(object): """An iptables table.""" @@ -308,9 +281,6 @@ class IptablesManager(object): self.ipv4['nat'].add_chain('floating-snat') self.ipv4['nat'].add_rule('snat', '-j $floating-snat') - def ipset_supported(self): - return False - @utils.synchronized('iptables', external=True) def apply(self): """Apply the current in-memory set of iptables rules. diff --git a/nova/tests/test_iptables_network.py b/nova/tests/test_iptables_network.py index d0a8c052c..918034269 100644 --- a/nova/tests/test_iptables_network.py +++ b/nova/tests/test_iptables_network.py @@ -17,46 +17,11 @@ # under the License. """Unit Tests for network code.""" +import os + from nova import test from nova.network import linux_net -class IpSetTestCase(test.TestCase): - def test_add(self): - """Adding an address""" - ipset = linux_net.IpSet('somename') - - ipset.add_ip('1.2.3.4') - self.assertTrue('1.2.3.4' in ipset) - - - def test_add_remove(self): - """Adding and then removing an address""" - - self.verify_cmd_call_count = 0 - def verify_cmd(*args): - self.assertEquals(args, self.expected_cmd) - self.verify_cmd_call_count += 1 - - self.expected_cmd = ('ipset', '-A', 'run_tests.py-somename', '1.2.3.4') - ipset = linux_net.IpSet('somename',execute=verify_cmd) - ipset.add_ip('1.2.3.4') - self.assertTrue('1.2.3.4' in ipset) - - self.expected_cmd = ('ipset', '-D', 'run_tests.py-somename', '1.2.3.4') - ipset.remove_ip('1.2.3.4') - self.assertTrue('1.2.3.4' not in ipset) - self.assertEquals(self.verify_cmd_call_count, 2) - - - def test_two_adds_one_remove(self): - """Adding the same address twice works. Removing it once removes it entirely.""" - ipset = linux_net.IpSet('somename') - - ipset.add_ip('1.2.3.4') - ipset.add_ip('1.2.3.4') - ipset.remove_ip('1.2.3.4') - self.assertTrue('1.2.3.4' not in ipset) - class IptablesManagerTestCase(test.TestCase): sample_filter = ['#Generated by iptables-save on Fri Feb 18 15:17:05 2011', diff --git a/nova/virt/libvirt/firewall.py b/nova/virt/libvirt/firewall.py index aa36e4184..4d615058b 100644 --- a/nova/virt/libvirt/firewall.py +++ b/nova/virt/libvirt/firewall.py @@ -709,23 +709,14 @@ class IptablesFirewallDriver(FirewallDriver): args += ['-s', rule.cidr] fw_rules += [' '.join(args)] else: - LOG.info('Not using cidr %r', rule.cidr) - if self.iptables.ipset_supported(): - LOG.info('ipset supported %r', rule.cidr) - ipset = linux_net.IpSet('%s' % rule.group_id) - args += ipset.iptables_source_match() - fw_rules += [' '.join(args)] - else: - LOG.info('ipset unsupported %r', rule.cidr) - LOG.info('rule.grantee_group.instances: %r', rule.grantee_group.instances) - for instance in rule.grantee_group.instances: - LOG.info('instance: %r', instance) - ips = db.instance_get_fixed_addresses(ctxt, - instance['id']) - LOG.info('ips: %r', ips) - for ip in ips: - subrule = args + ['-s %s' % ip] - fw_rules += [' '.join(subrule)] + for instance in rule.grantee_group.instances: + LOG.info('instance: %r', instance) + ips = db.instance_get_fixed_addresses(ctxt, + instance['id']) + LOG.info('ips: %r', ips) + for ip in ips: + subrule = args + ['-s %s' % ip] + fw_rules += [' '.join(subrule)] LOG.info('Using fw_rules: %r', fw_rules) ipv4_rules += ['-j $sg-fallback'] @@ -738,9 +729,8 @@ class IptablesFirewallDriver(FirewallDriver): return self.nwfilter.instance_filter_exists(instance) def refresh_security_group_members(self, security_group): - if not self.iptables.ipset_supported(): - self.do_refresh_security_group_rules(security_group) - self.iptables.apply() + self.do_refresh_security_group_rules(security_group) + self.iptables.apply() def refresh_security_group_rules(self, security_group, network_info=None): self.do_refresh_security_group_rules(security_group, network_info) -- cgit From 5961aa33f01db7503beeab4fabafb8e0d9ef6a3e Mon Sep 17 00:00:00 2001 From: Soren Hansen Date: Sun, 24 Jul 2011 06:29:43 -0700 Subject: Adjust and re-enable relevant unit tests. --- nova/tests/test_libvirt.py | 50 ++++++++++++++++++++++++++++++++++++++++------ 1 file changed, 44 insertions(+), 6 deletions(-) (limited to 'nova') diff --git a/nova/tests/test_libvirt.py b/nova/tests/test_libvirt.py index f99e1713d..8eec7aada 100644 --- a/nova/tests/test_libvirt.py +++ b/nova/tests/test_libvirt.py @@ -891,7 +891,6 @@ class IptablesFirewallTestCase(test.TestCase): 'project_id': 'fake', 'instance_type_id': 1}) - @test.skip_test("skipping libvirt tests depends on get_network_info shim") def test_static_filters(self): instance_ref = self._create_instance_ref() ip = '10.11.12.13' @@ -907,17 +906,41 @@ class IptablesFirewallTestCase(test.TestCase): fixed_ip = {'address': ip, 'network_id': network_ref['id'], 'virtual_interface_id': vif_ref['id']} + + src_instance_ref = self._create_instance_ref() + src_instance_ip = '10.11.12.14' + src_instance_vif = {'address': '56:12:12:12:12:13', + 'network_id': network_ref['id'], + 'instance_id': src_instance_ref['id']} + src_instance_vif_ref = db.virtual_interface_create(self.context, + src_instance_vif) + src_instance_fixed_ip = {'address': src_instance_ip, + 'network_id': network_ref['id'], + 'virtual_interface_id': + src_instance_vif_ref['id']} + admin_ctxt = context.get_admin_context() db.fixed_ip_create(admin_ctxt, fixed_ip) db.fixed_ip_update(admin_ctxt, ip, {'allocated': True, 'instance_id': instance_ref['id']}) + db.fixed_ip_create(admin_ctxt, src_instance_fixed_ip) + db.fixed_ip_update(admin_ctxt, src_instance_ip, + {'allocated': True, + 'instance_id': src_instance_ref['id']}) + secgroup = db.security_group_create(admin_ctxt, {'user_id': 'fake', 'project_id': 'fake', 'name': 'testgroup', 'description': 'test group'}) + src_secgroup = db.security_group_create(admin_ctxt, + {'user_id': 'fake', + 'project_id': 'fake', + 'name': 'testsourcegroup', + 'description': 'src group'}) + db.security_group_rule_create(admin_ctxt, {'parent_group_id': secgroup['id'], 'protocol': 'icmp', @@ -939,9 +962,19 @@ class IptablesFirewallTestCase(test.TestCase): 'to_port': 81, 'cidr': '192.168.10.0/24'}) + db.security_group_rule_create(admin_ctxt, + {'parent_group_id': secgroup['id'], + 'protocol': 'tcp', + 'from_port': 80, + 'to_port': 81, + 'group_id': src_secgroup['id']}) + db.instance_add_security_group(admin_ctxt, instance_ref['id'], secgroup['id']) + db.instance_add_security_group(admin_ctxt, src_instance_ref['id'], + src_secgroup['id']) instance_ref = db.instance_get(admin_ctxt, instance_ref['id']) + src_instance_ref = db.instance_get(admin_ctxt, src_instance_ref['id']) # self.fw.add_instance(instance_ref) def fake_iptables_execute(*cmd, **kwargs): @@ -994,17 +1027,22 @@ class IptablesFirewallTestCase(test.TestCase): self.assertTrue(security_group_chain, "The security group chain wasn't added") - regex = re.compile('-A .* -p icmp -s 192.168.11.0/24 -j ACCEPT') + regex = re.compile('-A .* -j ACCEPT -p icmp -s 192.168.11.0/24') self.assertTrue(len(filter(regex.match, self.out_rules)) > 0, "ICMP acceptance rule wasn't added") - regex = re.compile('-A .* -p icmp -s 192.168.11.0/24 -m icmp ' - '--icmp-type 8 -j ACCEPT') + regex = re.compile('-A .* -j ACCEPT -p icmp -m icmp --icmp-type 8' + ' -s 192.168.11.0/24') self.assertTrue(len(filter(regex.match, self.out_rules)) > 0, "ICMP Echo Request acceptance rule wasn't added") - regex = re.compile('-A .* -p tcp -s 192.168.10.0/24 -m multiport ' - '--dports 80:81 -j ACCEPT') + regex = re.compile('-A .* -j ACCEPT -p tcp -m multiport ' + '--dports 80:81 -s %s' % (src_instance_ip,)) + self.assertTrue(len(filter(regex.match, self.out_rules)) > 0, + "TCP port 80/81 acceptance rule wasn't added") + + regex = re.compile('-A .* -j ACCEPT -p tcp ' + '-m multiport --dports 80:81 -s 192.168.10.0/24') self.assertTrue(len(filter(regex.match, self.out_rules)) > 0, "TCP port 80/81 acceptance rule wasn't added") db.instance_destroy(admin_ctxt, instance_ref['id']) -- cgit From a943c01dd56169270e1986ce62ae99f16ee4abe3 Mon Sep 17 00:00:00 2001 From: Soren Hansen Date: Sun, 24 Jul 2011 06:30:59 -0700 Subject: Make IP allocation test work again. --- nova/tests/test_network.py | 4 ++++ 1 file changed, 4 insertions(+) (limited to 'nova') diff --git a/nova/tests/test_network.py b/nova/tests/test_network.py index b09021e13..9e021feea 100644 --- a/nova/tests/test_network.py +++ b/nova/tests/test_network.py @@ -216,7 +216,11 @@ class VlanNetworkTestCase(test.TestCase): self.mox.StubOutWithMock(db, 'fixed_ip_update') self.mox.StubOutWithMock(db, 'virtual_interface_get_by_instance_and_network') + self.mox.StubOutWithMock(db, 'instance_get') + db.instance_get(mox.IgnoreArg(), + mox.IgnoreArg()).AndReturn({ 'security_groups': + [ { 'id': 0 } ] }) db.fixed_ip_associate_pool(mox.IgnoreArg(), mox.IgnoreArg(), mox.IgnoreArg()).AndReturn('192.168.0.1') -- cgit From 6ddc49298f87fc20c6daff994495d745dc82b6e3 Mon Sep 17 00:00:00 2001 From: Soren Hansen Date: Sun, 24 Jul 2011 06:33:57 -0700 Subject: Use subscript rather than attribute. --- nova/virt/libvirt/firewall.py | 17 +++++++++-------- 1 file changed, 9 insertions(+), 8 deletions(-) (limited to 'nova') diff --git a/nova/virt/libvirt/firewall.py b/nova/virt/libvirt/firewall.py index 4d615058b..27056255c 100644 --- a/nova/virt/libvirt/firewall.py +++ b/nova/virt/libvirt/firewall.py @@ -709,14 +709,15 @@ class IptablesFirewallDriver(FirewallDriver): args += ['-s', rule.cidr] fw_rules += [' '.join(args)] else: - for instance in rule.grantee_group.instances: - LOG.info('instance: %r', instance) - ips = db.instance_get_fixed_addresses(ctxt, - instance['id']) - LOG.info('ips: %r', ips) - for ip in ips: - subrule = args + ['-s %s' % ip] - fw_rules += [' '.join(subrule)] + if rule['grantee_group']: + for instance in rule['grantee_group']['instances']: + LOG.info('instance: %r', instance) + ips = db.instance_get_fixed_addresses(ctxt, + instance['id']) + LOG.info('ips: %r', ips) + for ip in ips: + subrule = args + ['-s %s' % ip] + fw_rules += [' '.join(subrule)] LOG.info('Using fw_rules: %r', fw_rules) ipv4_rules += ['-j $sg-fallback'] -- cgit From a88f7ab3b7469af70c74ed5962abf867e62d768f Mon Sep 17 00:00:00 2001 From: Soren Hansen Date: Sun, 24 Jul 2011 06:35:38 -0700 Subject: Use admin context when fetching instances. --- nova/network/manager.py | 13 +++++-------- 1 file changed, 5 insertions(+), 8 deletions(-) (limited to 'nova') diff --git a/nova/network/manager.py b/nova/network/manager.py index 928cb09f6..ac37bb885 100644 --- a/nova/network/manager.py +++ b/nova/network/manager.py @@ -353,12 +353,12 @@ class NetworkManager(manager.SchedulerDependentManager): return self.set_network_host(context, network['id']) def _do_trigger_security_group_members_refresh_for_instance(self, - context, instance_id): - instance_ref = db.instance_get(context, instance_id) - groups = instance_ref.security_groups - group_ids = [group.id for group in groups] - self.compute_api.trigger_security_group_members_refresh(context, + admin_context = context.get_admin_context() + instance_ref = self.db.instance_get(admin_context, instance_id) + groups = instance_ref['security_groups'] + group_ids = [group['id'] for group in groups] + self.compute_api.trigger_security_group_members_refresh(admin_context, group_ids) def _get_networks_for_instance(self, context, instance_id, project_id): @@ -523,7 +523,6 @@ class NetworkManager(manager.SchedulerDependentManager): network['id'], instance_id) self._do_trigger_security_group_members_refresh_for_instance( - context, instance_id) vif = self.db.virtual_interface_get_by_instance_and_network(context, instance_id, @@ -542,7 +541,6 @@ class NetworkManager(manager.SchedulerDependentManager): instance_ref = fixed_ip_ref['instance'] instance_id = instance_ref['id'] self._do_trigger_security_group_members_refresh_for_instance( - context, instance_id) def lease_fixed_ip(self, context, address): @@ -846,7 +844,6 @@ class VlanManager(RPCAllocateFixedIP, FloatingIP, NetworkManager): network['id'], instance_id) self._do_trigger_security_group_members_refresh_for_instance( - context, instance_id) vif = self.db.virtual_interface_get_by_instance_and_network(context, instance_id, -- cgit From 3841a5515807b42e2e74e3119f76cdb2ef0f5575 Mon Sep 17 00:00:00 2001 From: Soren Hansen Date: Tue, 26 Jul 2011 11:04:53 -0700 Subject: Remove debugging code. --- nova/compute/api.py | 4 ---- 1 file changed, 4 deletions(-) (limited to 'nova') diff --git a/nova/compute/api.py b/nova/compute/api.py index 65a594d2c..ad8886f23 100644 --- a/nova/compute/api.py +++ b/nova/compute/api.py @@ -475,7 +475,6 @@ class API(base.Base): context, group_id)) - LOG.info('rules: %r', security_group_rules) # ..then we distill the security groups to which they belong.. security_groups = set() for rule in security_group_rules: @@ -484,14 +483,12 @@ class API(base.Base): rule['parent_group_id']) security_groups.add(security_group) - LOG.info('security_groups: %r', security_groups) # ..then we find the instances that are members of these groups.. instances = set() for security_group in security_groups: for instance in security_group['instances']: instances.add(instance) - LOG.info('instances: %r', instances) # ...then we find the hosts where they live... hosts = set() for instance in instances: @@ -501,7 +498,6 @@ class API(base.Base): # ...and finally we tell these nodes to refresh their view of this # particular security group. for host in hosts: - LOG.info('host: %r', host) rpc.cast(context, self.db.queue_get_for(context, FLAGS.compute_topic, host), {"method": "refresh_security_group_members", -- cgit From 00171c3f50d333a1771efc048b064e1fd73614b0 Mon Sep 17 00:00:00 2001 From: Soren Hansen Date: Tue, 26 Jul 2011 14:10:26 -0700 Subject: pep8 --- nova/db/sqlalchemy/models.py | 3 +-- nova/tests/test_network.py | 4 ++-- 2 files changed, 3 insertions(+), 4 deletions(-) (limited to 'nova') diff --git a/nova/db/sqlalchemy/models.py b/nova/db/sqlalchemy/models.py index 4ecf80c8f..9f1967d69 100644 --- a/nova/db/sqlalchemy/models.py +++ b/nova/db/sqlalchemy/models.py @@ -494,14 +494,13 @@ class SecurityGroupIngressRule(BASE, NovaBase): # Note: This is not the parent SecurityGroup. It's SecurityGroup we're # granting access for. group_id = Column(Integer, ForeignKey('security_groups.id')) - grantee_group = relationship("SecurityGroup", + grantee_group = relationship("SecurityGroup", foreign_keys=group_id, primaryjoin='and_(' 'SecurityGroupIngressRule.group_id == SecurityGroup.id,' 'SecurityGroupIngressRule.deleted == False)') - class ProviderFirewallRule(BASE, NovaBase): """Represents a rule in a security group.""" __tablename__ = 'provider_fw_rules' diff --git a/nova/tests/test_network.py b/nova/tests/test_network.py index e6cd5858a..4119953f2 100644 --- a/nova/tests/test_network.py +++ b/nova/tests/test_network.py @@ -215,8 +215,8 @@ class VlanNetworkTestCase(test.TestCase): self.mox.StubOutWithMock(db, 'instance_get') db.instance_get(mox.IgnoreArg(), - mox.IgnoreArg()).AndReturn({ 'security_groups': - [ { 'id': 0 } ] }) + mox.IgnoreArg()).AndReturn({'security_groups': + [{'id': 0}]}) db.fixed_ip_associate_pool(mox.IgnoreArg(), mox.IgnoreArg(), mox.IgnoreArg()).AndReturn('192.168.0.1') -- cgit From 151ecddc0227ff9e779712532971fac8a2c79c7b Mon Sep 17 00:00:00 2001 From: Donal Lafferty Date: Wed, 27 Jul 2011 15:40:13 +0100 Subject: Address merge review concerns. --- nova/virt/xenapi/vm_utils.py | 2 +- nova/virt/xenapi/vmops.py | 28 +++++++++++++++++----------- 2 files changed, 18 insertions(+), 12 deletions(-) (limited to 'nova') diff --git a/nova/virt/xenapi/vm_utils.py b/nova/virt/xenapi/vm_utils.py index d0ffb13da..5d6fe0825 100644 --- a/nova/virt/xenapi/vm_utils.py +++ b/nova/virt/xenapi/vm_utils.py @@ -520,7 +520,7 @@ class VMHelper(HelperBase): # DISK restores LOG.debug(_("Fetching image %(image)s") % locals()) LOG.debug(_("Image Type: %s"), ImageType.to_string(image_type)) - sr_ref = None + if image_type == ImageType.DISK_ISO: sr_ref = safe_find_iso_sr(session) LOG.debug(_("ISO: Found sr possibly containing the ISO image")) diff --git a/nova/virt/xenapi/vmops.py b/nova/virt/xenapi/vmops.py index 1830ce18d..7ac923933 100644 --- a/nova/virt/xenapi/vmops.py +++ b/nova/virt/xenapi/vmops.py @@ -238,6 +238,21 @@ class VMOps(object): raise vm_create_error + # Add disks to VM + self._attach_disks(instance, disk_image_type, vm_ref, first_vdi_ref, + vdis) + + # Alter the image before VM start for, e.g. network injection + if FLAGS.xenapi_inject_image: + VMHelper.preconfigure_instance(self._session, instance, + first_vdi_ref, network_info) + + self.create_vifs(vm_ref, instance, network_info) + self.inject_network_info(instance, network_info, vm_ref) + return vm_ref + + def _attach_disks(self, instance, disk_image_type, vm_ref, first_vdi_ref, + vdis): # device 0 reserved for RW disk userdevice = 0 @@ -253,7 +268,7 @@ class VMOps(object): VMHelper.create_vbd(session=self._session, vm_ref=vm_ref, vdi_ref=first_vdi_ref, userdevice=userdevice, bootable=False) - # device 1 reserved for rescue disk so use '2', we've used '0' + # device 1 reserved for rescue disk and we've used '0' userdevice = 2 VMHelper.create_cd_vbd(session=self._session, vm_ref=vm_ref, vdi_ref=cd_vdi_ref, userdevice=userdevice, bootable=True) @@ -264,7 +279,7 @@ class VMOps(object): VMHelper.create_vbd(session=self._session, vm_ref=vm_ref, vdi_ref=first_vdi_ref, userdevice=userdevice, bootable=True) # set user device to next free value - # userdevice 1 is reserved for rescue, we've used '0' + # userdevice 1 is reserved for rescue and we've used '0' userdevice = 2 # Attach any other disks @@ -278,15 +293,6 @@ class VMOps(object): bootable=False) userdevice += 1 - # Alter the image before VM start for, e.g. network injection - if FLAGS.xenapi_inject_image: - VMHelper.preconfigure_instance(self._session, instance, - first_vdi_ref, network_info) - - self.create_vifs(vm_ref, instance, network_info) - self.inject_network_info(instance, network_info, vm_ref) - return vm_ref - def _spawn(self, instance, vm_ref): """Spawn a new instance.""" LOG.debug(_('Starting VM %s...'), vm_ref) -- cgit From 79283cbb13d91e3c25e42af765f9da627813a6d8 Mon Sep 17 00:00:00 2001 From: Kei masumoto Date: Fri, 29 Jul 2011 20:03:23 +0900 Subject: merged trunk and fixed post_live_migratioin_at_destination to get nw_info --- nova/compute/manager.py | 4 +++- nova/virt/libvirt/connection.py | 7 +++++-- 2 files changed, 8 insertions(+), 3 deletions(-) (limited to 'nova') diff --git a/nova/compute/manager.py b/nova/compute/manager.py index e80394fbd..8aee456fc 100644 --- a/nova/compute/manager.py +++ b/nova/compute/manager.py @@ -1389,7 +1389,7 @@ class ComputeManager(manager.SchedulerDependentManager): # No instance booting at source host, but instance dir # must be deleted for preparing next block migration if block_migration: - self.driver.destroy(instance_ref) + self.driver.destroy(instance_ref, network_info) LOG.info(_('Migrating %(i_name)s to %(dest)s finished successfully.') % locals()) @@ -1409,8 +1409,10 @@ class ComputeManager(manager.SchedulerDependentManager): instance_ref = self.db.instance_get(context, instance_id) LOG.info(_('Post operation of migraton started for %s .') % instance_ref.name) + network_info = self._get_instance_nw_info(context, instance_ref) self.driver.post_live_migration_at_destination(context, instance_ref, + network_info, block_migration) def rollback_live_migration(self, context, instance_ref, diff --git a/nova/virt/libvirt/connection.py b/nova/virt/libvirt/connection.py index 0eab0b109..31f7acb4d 100644 --- a/nova/virt/libvirt/connection.py +++ b/nova/virt/libvirt/connection.py @@ -1620,7 +1620,9 @@ class LibvirtConnection(driver.ComputeDriver): user, project) - def post_live_migration_at_destination(self, ctxt, instance_ref, + def post_live_migration_at_destination(self, ctxt, + instance_ref, + network_info, block_migration): """Post operation of live migration at destination host. @@ -1628,6 +1630,7 @@ class LibvirtConnection(driver.ComputeDriver): :params instance_ref: nova.db.sqlalchemy.models.Instance object instance object that is migrated. + :params network_info: instance network infomation :params : block_migration: if true, post operation of block_migraiton. """ # Define migrated instance, otherwise, suspend/destroy does not work. @@ -1639,7 +1642,7 @@ class LibvirtConnection(driver.ComputeDriver): # In case of block migration, destination does not have # libvirt.xml if not os.path.isfile(xml_path): - xml = self.to_xml(instance_ref) + xml = self.to_xml(instance_ref, network_info=network_info) f = open(os.path.join(instance_dir, 'libvirt.xml'), 'w+') f.write(xml) f.close() -- cgit From 8c54cfdabad3b3c102bae05283dd8484da38c557 Mon Sep 17 00:00:00 2001 From: Donal Lafferty Date: Mon, 1 Aug 2011 09:30:13 +0100 Subject: Remove copy/paste error. --- nova/virt/xenapi/vmops.py | 9 --------- 1 file changed, 9 deletions(-) (limited to 'nova') diff --git a/nova/virt/xenapi/vmops.py b/nova/virt/xenapi/vmops.py index f2258bde3..5c0b3020f 100644 --- a/nova/virt/xenapi/vmops.py +++ b/nova/virt/xenapi/vmops.py @@ -293,15 +293,6 @@ class VMOps(object): bootable=False) userdevice += 1 - # Alter the image before VM start for, e.g. network injection - if FLAGS.flat_injected: - VMHelper.preconfigure_instance(self._session, instance, - first_vdi_ref, network_info) - - self.create_vifs(vm_ref, instance, network_info) - self.inject_network_info(instance, network_info, vm_ref) - return vm_ref - def _spawn(self, instance, vm_ref): """Spawn a new instance.""" LOG.debug(_('Starting VM %s...'), vm_ref) -- cgit From e72fafbf76ed456039426a96dd65d2c148dffa29 Mon Sep 17 00:00:00 2001 From: John Tran Date: Mon, 1 Aug 2011 12:37:12 -0700 Subject: adding a function with logic to make the creation of networks validation a bit smarter: - detects if the cidr is already in use - when specifying a supernet to be split into smaller subnets via num_networks && network_size, ensures none of the returned subnets are in use by either a subnet of the same size and range, nor a SMALLER size within the same range. - detects if splitting a supernet into # of num_networks && network_size will fit - detects if the supernet/cidr specified is conflicting with a network cidr that currently exists that may be a larger supernet already encompassing the specified cidr. " --- nova/network/manager.py | 48 +++++++++++++++++++++ nova/tests/test_network.py | 102 +++++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 150 insertions(+) (limited to 'nova') diff --git a/nova/network/manager.py b/nova/network/manager.py index 4a3791d8a..fafb75c9e 100644 --- a/nova/network/manager.py +++ b/nova/network/manager.py @@ -609,6 +609,54 @@ class NetworkManager(manager.SchedulerDependentManager): network_ref = self.db.fixed_ip_get_network(context, address) self._setup_network(context, network_ref) + def _validate_cidrs(self, context, cidr, num_networks, network_size): + significant_bits = 32 - int(math.log(network_size, 2)) + req_net = netaddr.IPNetwork(cidr) + req_net_ip = str(req_net.ip) + req_size = network_size * num_networks + if req_size > req_net.size: + raise ValueError(_("network_size * num_networks exceeds cidr size")) + adjusted_cidr = netaddr.IPNetwork(req_net_ip+'/'+str(significant_bits)) + all_req_nets = [adjusted_cidr] + try: + used_nets = self.db.network_get_all(context) + except exception.NoNetworksFound: + used_nets = [] + used_cidrs = [netaddr.IPNetwork(net['cidr']) for net in used_nets] + if adjusted_cidr in used_cidrs: + raise ValueError(_("cidr already in use")) + for adjusted_cidr_supernet in adjusted_cidr.supernet(): + if adjusted_cidr_supernet in used_cidrs: + raise ValueError(_("requested cidr (%s) conflicts with existing supernet (%s)" % (str(adjusted_cidr), str(adjusted_cidr_supernet)))) + # split supernet into subnets + if num_networks >= 2: + next_cidr = adjusted_cidr + for used_cidr in used_cidrs: + # watch for smaller subnets conflicting + if used_cidr.size < next_cidr.size: + for ucsupernet in used_cidr.supernet(): + if ucsupernet.size == next_cidr.size: + used_cidrs.append(ucsupernet) + for index in range(1, num_networks): + while True: + next_cidr = next_cidr.next() + if next_cidr in used_cidrs: + continue + else: + all_req_nets.append(next_cidr) + break + all_req_nets = list(set(all_req_nets)) + if not used_nets: + return all_req_nets + # after splitting ensure there were enough to satisfy the num_networks + if len(all_req_nets) < num_networks: + raise ValueError(_("Not enough subnets avail to satisfy requested num_networks")) + # if one of the split subnets were already defined, remove from create list + for req_cidr in all_req_nets: + if req_cidr in used_cidrs: + all_req_nets.remove(req_cidr) + return all_req_nets + def create_networks(self, context, label, cidr, multi_host, num_networks, network_size, cidr_v6, gateway_v6, bridge, bridge_interface, dns1=None, dns2=None, **kwargs): diff --git a/nova/tests/test_network.py b/nova/tests/test_network.py index 28f50d328..8a851cf76 100644 --- a/nova/tests/test_network.py +++ b/nova/tests/test_network.py @@ -15,6 +15,7 @@ # License for the specific language governing permissions and limitations # under the License. +from nova import context from nova import db from nova import exception from nova import flags @@ -249,6 +250,15 @@ class CommonNetworkTestCase(test.TestCase): return [dict(address='10.0.0.0'), dict(address='10.0.0.1'), dict(address='10.0.0.2')] + def network_get_by_cidr(self, context, cidr): + raise exception.NetworkNotFoundForCidr() + + def network_create_safe(self, context, net): + return {'foo': 'bar'} + + def network_get_all(self, context): + raise exception.NoNetworksFound() + def __init__(self): self.db = self.FakeDB() self.deallocate_called = None @@ -267,3 +277,95 @@ class CommonNetworkTestCase(test.TestCase): self.assertRaises(exception.FixedIpNotFoundForSpecificInstance, manager.remove_fixed_ip_from_instance, None, 99, 'bad input') + + def test__validate_cidrs(self): + manager = self.FakeNetworkManager() + nets = manager._validate_cidrs(None, '192.168.0.0/24', 1, 256) + self.assertEqual(1, len(nets)) + cidrs = [str(net) for net in nets] + self.assertTrue('192.168.0.0/24' in cidrs) + + def test__validate_cidrs_split_exact_in_half(self): + manager = self.FakeNetworkManager() + nets = manager._validate_cidrs(None, '192.168.0.0/24', 2, 128) + self.assertEqual(2, len(nets)) + cidrs = [str(net) for net in nets] + self.assertTrue('192.168.0.0/25' in cidrs) + self.assertTrue('192.168.0.128/25' in cidrs) + + def test__validate_cidrs_split_cidr_in_use_middle_of_range(self): + manager = self.FakeNetworkManager() + self.mox.StubOutWithMock(manager.db, 'network_get_all') + ctxt = mox.IgnoreArg() + manager.db.network_get_all(ctxt).AndReturn([{'id': 1, 'cidr': '192.168.2.0/24'}]) + self.mox.ReplayAll() + nets = manager._validate_cidrs(None, '192.168.0.0/16', 4, 256) + self.assertEqual(4, len(nets)) + cidrs = [str(net) for net in nets] + exp_cidrs = ['192.168.0.0', '192.168.1.0', '192.168.3.0', '192.168.4.0'] + for exp_cidr in exp_cidrs: + self.assertTrue(exp_cidr+'/24' in cidrs) + self.assertFalse('192.168.2.0/24' in cidrs) + + def test__validate_cidrs_split_cidr_smaller_subnet_in_use_middle_of_range(self): + manager = self.FakeNetworkManager() + self.mox.StubOutWithMock(manager.db, 'network_get_all') + ctxt = mox.IgnoreArg() + manager.db.network_get_all(ctxt).AndReturn([{'id': 1, 'cidr': '192.168.2.0/25'}]) + self.mox.ReplayAll() + nets = manager._validate_cidrs(None, '192.168.0.0/16', 4, 256) + self.assertEqual(4, len(nets)) + cidrs = [str(net) for net in nets] + exp_cidrs = ['192.168.0.0', '192.168.1.0', '192.168.3.0', '192.168.4.0'] + for exp_cidr in exp_cidrs: + self.assertTrue(exp_cidr+'/24' in cidrs) + self.assertFalse('192.168.2.0/24' in cidrs) + + def test__validate_cidrs_one_in_use(self): + manager = self.FakeNetworkManager() + args = [None, '192.168.0.0/24', 2, 256] + # ValueError: network_size * num_networks exceeds cidr size + self.assertRaises(ValueError, manager._validate_cidrs, *args) + + def test__validate_cidrs_already_used(self): + manager = self.FakeNetworkManager() + self.mox.StubOutWithMock(manager.db, 'network_get_all') + ctxt = mox.IgnoreArg() + manager.db.network_get_all(ctxt).AndReturn([{'id': 1, 'cidr': '192.168.0.0/24'}]) + self.mox.ReplayAll() + # ValueError: cidr already in use + args = [None, '192.168.0.0/24', 1, 256] + self.assertRaises(ValueError, manager._validate_cidrs, *args) + + def test__validate_cidrs_too_many(self): + manager = self.FakeNetworkManager() + args = [None, '192.168.0.0/24', 200, 256] + # ValueError: Not enough subnets avail to satisfy requested num_networks + self.assertRaises(ValueError, manager._validate_cidrs, *args) + + def test__validate_cidrs_split_partial(self): + manager = self.FakeNetworkManager() + nets = manager._validate_cidrs(None, '192.168.0.0/16', 2, 256) + returned_cidrs = [str(net) for net in nets] + print returned_cidrs + self.assertTrue('192.168.0.0/24' in returned_cidrs) + self.assertTrue('192.168.1.0/24' in returned_cidrs) + + def test__validate_cidrs_conflict_existing_supernet(self): + manager = self.FakeNetworkManager() + self.mox.StubOutWithMock(manager.db, 'network_get_all') + ctxt = mox.IgnoreArg() + manager.db.network_get_all(ctxt).AndReturn([{'id': 1, 'cidr': '192.168.0.0/8'}]) + self.mox.ReplayAll() + args = [None, '192.168.0.0/24', 1, 256] + # ValueError: requested cidr (192.168.0.0/24) conflicts with existing supernet (192.0.0.0/8) + self.assertRaises(ValueError, manager._validate_cidrs, *args) +# +# def test_create_networks_cidr_already_used(self): +# mockany = self.mox.CreateMockAnything() +# manager = self.FakeNetworkManager() +# self.mox.StubOutWithMock(manager.db, 'network_get_by_cidr') +# manager.db.network_get_by_cidr(mox.IgnoreArg(), '192.168.0.0/24').AndReturn(mockany) +# self.mox.ReplayAll() +# args = [None, 'foo', '192.168.0.0/24', None, 1, 256, 'fd00::/48', None, None, None] +# self.assertRaises(ValueError, manager.create_networks, *args) -- cgit From 951114be20065044e7f12e37188eb30e859ff2cb Mon Sep 17 00:00:00 2001 From: John Tran Date: Mon, 1 Aug 2011 12:47:41 -0700 Subject: removed redundant logic --- nova/network/manager.py | 6 ------ 1 file changed, 6 deletions(-) (limited to 'nova') diff --git a/nova/network/manager.py b/nova/network/manager.py index fafb75c9e..f6c7e35e0 100644 --- a/nova/network/manager.py +++ b/nova/network/manager.py @@ -646,15 +646,9 @@ class NetworkManager(manager.SchedulerDependentManager): all_req_nets.append(next_cidr) break all_req_nets = list(set(all_req_nets)) - if not used_nets: - return all_req_nets # after splitting ensure there were enough to satisfy the num_networks if len(all_req_nets) < num_networks: raise ValueError(_("Not enough subnets avail to satisfy requested num_networks")) - # if one of the split subnets were already defined, remove from create list - for req_cidr in all_req_nets: - if req_cidr in used_cidrs: - all_req_nets.remove(req_cidr) return all_req_nets def create_networks(self, context, label, cidr, multi_host, num_networks, -- cgit From ab4bfcf6c458ab6bf6ead126a91413b92aa543b8 Mon Sep 17 00:00:00 2001 From: John Tran Date: Mon, 1 Aug 2011 16:27:17 -0700 Subject: pep8 fixes --- nova/network/manager.py | 12 ++++++++---- nova/tests/test_network.py | 36 +++++++++++++++++++++++------------- 2 files changed, 31 insertions(+), 17 deletions(-) (limited to 'nova') diff --git a/nova/network/manager.py b/nova/network/manager.py index f6c7e35e0..9d0f080ba 100644 --- a/nova/network/manager.py +++ b/nova/network/manager.py @@ -615,8 +615,10 @@ class NetworkManager(manager.SchedulerDependentManager): req_net_ip = str(req_net.ip) req_size = network_size * num_networks if req_size > req_net.size: - raise ValueError(_("network_size * num_networks exceeds cidr size")) - adjusted_cidr = netaddr.IPNetwork(req_net_ip+'/'+str(significant_bits)) + msg = "network_size * num_networks exceeds cidr size" + raise ValueError(_(msg)) + adjusted_cidr_str = req_net_ip + '/' + str(significant_bits) + adjusted_cidr = netaddr.IPNetwork(adjusted_cidr_str) all_req_nets = [adjusted_cidr] try: used_nets = self.db.network_get_all(context) @@ -627,7 +629,8 @@ class NetworkManager(manager.SchedulerDependentManager): raise ValueError(_("cidr already in use")) for adjusted_cidr_supernet in adjusted_cidr.supernet(): if adjusted_cidr_supernet in used_cidrs: - raise ValueError(_("requested cidr (%s) conflicts with existing supernet (%s)" % (str(adjusted_cidr), str(adjusted_cidr_supernet)))) + msg = "requested cidr (%s) conflicts with existing supernet" + raise ValueError(_(msg % str(adjusted_cidr))) # split supernet into subnets if num_networks >= 2: next_cidr = adjusted_cidr @@ -648,7 +651,8 @@ class NetworkManager(manager.SchedulerDependentManager): all_req_nets = list(set(all_req_nets)) # after splitting ensure there were enough to satisfy the num_networks if len(all_req_nets) < num_networks: - raise ValueError(_("Not enough subnets avail to satisfy requested num_networks")) + msg = "Not enough subnets avail to satisfy requested num_networks" + raise ValueError(_(msg)) return all_req_nets def create_networks(self, context, label, cidr, multi_host, num_networks, diff --git a/nova/tests/test_network.py b/nova/tests/test_network.py index 8a851cf76..1f5305b1d 100644 --- a/nova/tests/test_network.py +++ b/nova/tests/test_network.py @@ -297,28 +297,32 @@ class CommonNetworkTestCase(test.TestCase): manager = self.FakeNetworkManager() self.mox.StubOutWithMock(manager.db, 'network_get_all') ctxt = mox.IgnoreArg() - manager.db.network_get_all(ctxt).AndReturn([{'id': 1, 'cidr': '192.168.2.0/24'}]) + manager.db.network_get_all(ctxt).AndReturn([{'id': 1, + 'cidr': '192.168.2.0/24'}]) self.mox.ReplayAll() nets = manager._validate_cidrs(None, '192.168.0.0/16', 4, 256) self.assertEqual(4, len(nets)) cidrs = [str(net) for net in nets] - exp_cidrs = ['192.168.0.0', '192.168.1.0', '192.168.3.0', '192.168.4.0'] + exp_cidrs = ['192.168.0.0', '192.168.1.0', '192.168.3.0', + '192.168.4.0'] for exp_cidr in exp_cidrs: - self.assertTrue(exp_cidr+'/24' in cidrs) + self.assertTrue(exp_cidr + '/24' in cidrs) self.assertFalse('192.168.2.0/24' in cidrs) - def test__validate_cidrs_split_cidr_smaller_subnet_in_use_middle_of_range(self): + def test__validate_cidrs_split_cidr_smaller_subnet_in_use(self): manager = self.FakeNetworkManager() self.mox.StubOutWithMock(manager.db, 'network_get_all') ctxt = mox.IgnoreArg() - manager.db.network_get_all(ctxt).AndReturn([{'id': 1, 'cidr': '192.168.2.0/25'}]) + manager.db.network_get_all(ctxt).AndReturn([{'id': 1, + 'cidr': '192.168.2.0/25'}]) self.mox.ReplayAll() nets = manager._validate_cidrs(None, '192.168.0.0/16', 4, 256) self.assertEqual(4, len(nets)) cidrs = [str(net) for net in nets] - exp_cidrs = ['192.168.0.0', '192.168.1.0', '192.168.3.0', '192.168.4.0'] + exp_cidrs = ['192.168.0.0', '192.168.1.0', '192.168.3.0', + '192.168.4.0'] for exp_cidr in exp_cidrs: - self.assertTrue(exp_cidr+'/24' in cidrs) + self.assertTrue(exp_cidr + '/24' in cidrs) self.assertFalse('192.168.2.0/24' in cidrs) def test__validate_cidrs_one_in_use(self): @@ -331,7 +335,8 @@ class CommonNetworkTestCase(test.TestCase): manager = self.FakeNetworkManager() self.mox.StubOutWithMock(manager.db, 'network_get_all') ctxt = mox.IgnoreArg() - manager.db.network_get_all(ctxt).AndReturn([{'id': 1, 'cidr': '192.168.0.0/24'}]) + manager.db.network_get_all(ctxt).AndReturn([{'id': 1, + 'cidr': '192.168.0.0/24'}]) self.mox.ReplayAll() # ValueError: cidr already in use args = [None, '192.168.0.0/24', 1, 256] @@ -340,7 +345,8 @@ class CommonNetworkTestCase(test.TestCase): def test__validate_cidrs_too_many(self): manager = self.FakeNetworkManager() args = [None, '192.168.0.0/24', 200, 256] - # ValueError: Not enough subnets avail to satisfy requested num_networks + # ValueError: Not enough subnets avail to satisfy requested + # num_networks self.assertRaises(ValueError, manager._validate_cidrs, *args) def test__validate_cidrs_split_partial(self): @@ -355,17 +361,21 @@ class CommonNetworkTestCase(test.TestCase): manager = self.FakeNetworkManager() self.mox.StubOutWithMock(manager.db, 'network_get_all') ctxt = mox.IgnoreArg() - manager.db.network_get_all(ctxt).AndReturn([{'id': 1, 'cidr': '192.168.0.0/8'}]) + manager.db.network_get_all(ctxt).AndReturn([{'id': 1, + 'cidr': '192.168.0.0/8'}]) self.mox.ReplayAll() args = [None, '192.168.0.0/24', 1, 256] - # ValueError: requested cidr (192.168.0.0/24) conflicts with existing supernet (192.0.0.0/8) + # ValueError: requested cidr (192.168.0.0/24) conflicts + # with existing supernet self.assertRaises(ValueError, manager._validate_cidrs, *args) # # def test_create_networks_cidr_already_used(self): # mockany = self.mox.CreateMockAnything() # manager = self.FakeNetworkManager() # self.mox.StubOutWithMock(manager.db, 'network_get_by_cidr') -# manager.db.network_get_by_cidr(mox.IgnoreArg(), '192.168.0.0/24').AndReturn(mockany) +# manager.db.network_get_by_cidr(mox.IgnoreArg(), '192.168.0.0/24')\ +# .AndReturn(mockany) # self.mox.ReplayAll() -# args = [None, 'foo', '192.168.0.0/24', None, 1, 256, 'fd00::/48', None, None, None] +# args = [None, 'foo', '192.168.0.0/24', None, 1, 256, +# 'fd00::/48', None, None, None] # self.assertRaises(ValueError, manager.create_networks, *args) -- cgit From bcfd8f5e1e0c3b53a2ad4a5bb533d94dcf5ef18c Mon Sep 17 00:00:00 2001 From: John Tran Date: Mon, 1 Aug 2011 21:34:43 -0700 Subject: added some tests for network create & moved the ipv6 logic back into the function --- nova/network/manager.py | 43 ++++++++++++++++++------------------------- nova/tests/test_network.py | 41 ++++++++++++++++++++++++++++------------- 2 files changed, 46 insertions(+), 38 deletions(-) (limited to 'nova') diff --git a/nova/network/manager.py b/nova/network/manager.py index 9d0f080ba..8b86bc036 100644 --- a/nova/network/manager.py +++ b/nova/network/manager.py @@ -659,71 +659,64 @@ class NetworkManager(manager.SchedulerDependentManager): network_size, cidr_v6, gateway_v6, bridge, bridge_interface, dns1=None, dns2=None, **kwargs): """Create networks based on parameters.""" - fixed_net = netaddr.IPNetwork(cidr) + req_cidrs = self._validate_cidrs(context, cidr, num_networks, network_size) + if FLAGS.use_ipv6: fixed_net_v6 = netaddr.IPNetwork(cidr_v6) significant_bits_v6 = 64 network_size_v6 = 1 << 64 - for index in range(num_networks): - start = index * network_size - significant_bits = 32 - int(math.log(network_size, 2)) - cidr = '%s/%s' % (fixed_net[start], significant_bits) - project_net = netaddr.IPNetwork(cidr) + count = 0 + for req_cidr in req_cidrs: + count = count + 1 net = {} net['bridge'] = bridge net['bridge_interface'] = bridge_interface net['dns1'] = dns1 net['dns2'] = dns2 - net['cidr'] = cidr - net['multi_host'] = multi_host - net['netmask'] = str(project_net.netmask) - net['gateway'] = str(project_net[1]) - net['broadcast'] = str(project_net.broadcast) - net['dhcp_start'] = str(project_net[2]) + net['cidr'] = str(req_cidr) + net['netmask'] = str(req_cidr.netmask) + net['gateway'] = str(req_cidr[1]) + net['broadcast'] = str(req_cidr.broadcast) + net['dhcp_start'] = str(req_cidr[2]) if num_networks > 1: - net['label'] = '%s_%d' % (label, index) + net['label'] = '%s_%d' % (label, count) else: net['label'] = label if FLAGS.use_ipv6: - start_v6 = index * network_size_v6 + start_v6 = count * network_size_v6 cidr_v6 = '%s/%s' % (fixed_net_v6[start_v6], significant_bits_v6) net['cidr_v6'] = cidr_v6 - project_net_v6 = netaddr.IPNetwork(cidr_v6) - if gateway_v6: # use a pre-defined gateway if one is provided net['gateway_v6'] = str(gateway_v6) else: net['gateway_v6'] = str(project_net_v6[1]) - net['netmask_v6'] = str(project_net_v6._prefixlen) if kwargs.get('vpn', False): # this bit here is for vlan-manager del net['dns1'] del net['dns2'] - vlan = kwargs['vlan_start'] + index - net['vpn_private_address'] = str(project_net[2]) - net['dhcp_start'] = str(project_net[3]) + vlan = kwargs['vlan_start'] + count + net['vpn_private_address'] = str(req_cidr[2]) + net['dhcp_start'] = str(req_cidr[3]) net['vlan'] = vlan net['bridge'] = 'br%s' % vlan # NOTE(vish): This makes ports unique accross the cloud, a more # robust solution would be to make them uniq per ip - net['vpn_public_port'] = kwargs['vpn_start'] + index + net['vpn_public_port'] = kwargs['vpn_start'] + count - # None if network with cidr or cidr_v6 already exists network = self.db.network_create_safe(context, net) - if network: self._create_fixed_ips(context, network['id']) else: - raise ValueError(_('Network with cidr %s already exists') % - cidr) + msg = "Error creating cidr %s, see log" + raise ValueError(_(msg) % str(cidr)) @property def _bottom_reserved_ips(self): # pylint: disable=R0201 diff --git a/nova/tests/test_network.py b/nova/tests/test_network.py index 1f5305b1d..424b5f098 100644 --- a/nova/tests/test_network.py +++ b/nova/tests/test_network.py @@ -361,21 +361,36 @@ class CommonNetworkTestCase(test.TestCase): manager = self.FakeNetworkManager() self.mox.StubOutWithMock(manager.db, 'network_get_all') ctxt = mox.IgnoreArg() - manager.db.network_get_all(ctxt).AndReturn([{'id': 1, - 'cidr': '192.168.0.0/8'}]) + fakecidr= [{'id': 1, 'cidr': '192.168.0.0/8'}] + manager.db.network_get_all(ctxt).AndReturn(fakecidr) self.mox.ReplayAll() args = [None, '192.168.0.0/24', 1, 256] # ValueError: requested cidr (192.168.0.0/24) conflicts # with existing supernet self.assertRaises(ValueError, manager._validate_cidrs, *args) -# -# def test_create_networks_cidr_already_used(self): -# mockany = self.mox.CreateMockAnything() -# manager = self.FakeNetworkManager() -# self.mox.StubOutWithMock(manager.db, 'network_get_by_cidr') -# manager.db.network_get_by_cidr(mox.IgnoreArg(), '192.168.0.0/24')\ -# .AndReturn(mockany) -# self.mox.ReplayAll() -# args = [None, 'foo', '192.168.0.0/24', None, 1, 256, -# 'fd00::/48', None, None, None] -# self.assertRaises(ValueError, manager.create_networks, *args) + + def test_create_networks(self): + cidr = '192.168.0.0/24' + manager = self.FakeNetworkManager() + self.mox.StubOutWithMock(manager.db, 'network_create_safe') + ignore = mox.IgnoreArg() + fakecidr= {'id': 1, 'cidr': cidr} + manager.db.network_create_safe(ignore, ignore).AndReturn(fakecidr) + self.mox.StubOutWithMock(manager, '_create_fixed_ips') + manager._create_fixed_ips(ignore, ignore).AndReturn('foo') + self.mox.ReplayAll() + args = [None, 'foo', cidr, None, 1, 256, 'fd00::/48', None, None, + None] + # making it to the end, without err, is enough validation + self.assertEqual(manager.create_networks(*args), None) + + def test_create_networks_cidr_already_used(self): + manager = self.FakeNetworkManager() + self.mox.StubOutWithMock(manager.db, 'network_get_all') + ctxt = mox.IgnoreArg() + fakecidr= [{'id': 1, 'cidr': '192.168.0.0/24'}] + manager.db.network_get_all(ctxt).AndReturn(fakecidr) + self.mox.ReplayAll() + args = [None, 'foo', '192.168.0.0/24', None, 1, 256, + 'fd00::/48', None, None, None] + self.assertRaises(ValueError, manager.create_networks, *args) -- cgit From 51f0cbf9221b461eb92beae2497e871bf2a7f45f Mon Sep 17 00:00:00 2001 From: John Tran Date: Tue, 2 Aug 2011 10:06:22 -0700 Subject: refactored tests --- nova/network/manager.py | 13 ++++++------- nova/tests/test_network.py | 27 ++++++++++++++++++--------- 2 files changed, 24 insertions(+), 16 deletions(-) (limited to 'nova') diff --git a/nova/network/manager.py b/nova/network/manager.py index 8b86bc036..b21667def 100644 --- a/nova/network/manager.py +++ b/nova/network/manager.py @@ -666,9 +666,8 @@ class NetworkManager(manager.SchedulerDependentManager): significant_bits_v6 = 64 network_size_v6 = 1 << 64 - count = 0 - for req_cidr in req_cidrs: - count = count + 1 + for index in range(len(req_cidrs)): + req_cidr = req_cidrs[index] net = {} net['bridge'] = bridge net['bridge_interface'] = bridge_interface @@ -680,12 +679,12 @@ class NetworkManager(manager.SchedulerDependentManager): net['broadcast'] = str(req_cidr.broadcast) net['dhcp_start'] = str(req_cidr[2]) if num_networks > 1: - net['label'] = '%s_%d' % (label, count) + net['label'] = '%s_%d' % (label, index) else: net['label'] = label if FLAGS.use_ipv6: - start_v6 = count * network_size_v6 + start_v6 = index * network_size_v6 cidr_v6 = '%s/%s' % (fixed_net_v6[start_v6], significant_bits_v6) net['cidr_v6'] = cidr_v6 @@ -701,7 +700,7 @@ class NetworkManager(manager.SchedulerDependentManager): # this bit here is for vlan-manager del net['dns1'] del net['dns2'] - vlan = kwargs['vlan_start'] + count + vlan = kwargs['vlan_start'] + index net['vpn_private_address'] = str(req_cidr[2]) net['dhcp_start'] = str(req_cidr[3]) net['vlan'] = vlan @@ -709,7 +708,7 @@ class NetworkManager(manager.SchedulerDependentManager): # NOTE(vish): This makes ports unique accross the cloud, a more # robust solution would be to make them uniq per ip - net['vpn_public_port'] = kwargs['vpn_start'] + count + net['vpn_public_port'] = kwargs['vpn_start'] + index network = self.db.network_create_safe(context, net) if network: diff --git a/nova/tests/test_network.py b/nova/tests/test_network.py index 424b5f098..9c808c6db 100644 --- a/nova/tests/test_network.py +++ b/nova/tests/test_network.py @@ -254,7 +254,9 @@ class CommonNetworkTestCase(test.TestCase): raise exception.NetworkNotFoundForCidr() def network_create_safe(self, context, net): - return {'foo': 'bar'} + fakenet = {} + fakenet['id'] = 999 + return fakenet def network_get_all(self, context): raise exception.NoNetworksFound() @@ -266,6 +268,9 @@ class CommonNetworkTestCase(test.TestCase): def deallocate_fixed_ip(self, context, address): self.deallocate_called = address + def fake_create_fixed_ips(self, context, network_id): + return None + def test_remove_fixed_ip_from_instance(self): manager = self.FakeNetworkManager() manager.remove_fixed_ip_from_instance(None, 99, '10.0.0.1') @@ -372,16 +377,11 @@ class CommonNetworkTestCase(test.TestCase): def test_create_networks(self): cidr = '192.168.0.0/24' manager = self.FakeNetworkManager() - self.mox.StubOutWithMock(manager.db, 'network_create_safe') - ignore = mox.IgnoreArg() - fakecidr= {'id': 1, 'cidr': cidr} - manager.db.network_create_safe(ignore, ignore).AndReturn(fakecidr) - self.mox.StubOutWithMock(manager, '_create_fixed_ips') - manager._create_fixed_ips(ignore, ignore).AndReturn('foo') - self.mox.ReplayAll() + self.stubs.Set(manager, '_create_fixed_ips', + self.fake_create_fixed_ips) args = [None, 'foo', cidr, None, 1, 256, 'fd00::/48', None, None, None] - # making it to the end, without err, is enough validation + result = manager.create_networks(*args) self.assertEqual(manager.create_networks(*args), None) def test_create_networks_cidr_already_used(self): @@ -394,3 +394,12 @@ class CommonNetworkTestCase(test.TestCase): args = [None, 'foo', '192.168.0.0/24', None, 1, 256, 'fd00::/48', None, None, None] self.assertRaises(ValueError, manager.create_networks, *args) + + def test_create_networks_many(self): + cidr = '192.168.0.0/16' + manager = self.FakeNetworkManager() + self.stubs.Set(manager, '_create_fixed_ips', + self.fake_create_fixed_ips) + args = [None, 'foo', cidr, None, 10, 256, 'fd00::/48', None, None, + None] + self.assertEqual(manager.create_networks(*args), None) -- cgit From 0c19e26cddb50bf6808670d550d71ab435df37c5 Mon Sep 17 00:00:00 2001 From: Donal Lafferty Date: Thu, 4 Aug 2011 18:48:31 +0100 Subject: Fix comments. --- nova/virt/xenapi/vm_utils.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'nova') diff --git a/nova/virt/xenapi/vm_utils.py b/nova/virt/xenapi/vm_utils.py index a723c5e22..666b84b76 100644 --- a/nova/virt/xenapi/vm_utils.py +++ b/nova/virt/xenapi/vm_utils.py @@ -644,7 +644,7 @@ class VMHelper(HelperBase): # 3. Disk is_pv = True elif disk_image_type == ImageType.DISK_ISO: - # 5. ISO + # 4. ISO is_pv = False else: raise exception.Error(_("Unknown image format %(disk_image_type)s") -- cgit From 8c7b71f65e54d67615e52927591e12a43b8b3991 Mon Sep 17 00:00:00 2001 From: John Tran Date: Thu, 4 Aug 2011 16:05:08 -0700 Subject: re-integrated my changes after merging trunk. fixed some pep8 issues. sorting the list of cidrs to create, so that it will create x.x.0.0 with a lower 'id' than x.x.1.0 (as an example). <- was causing libvirtd test to fail --- nova/network/manager.py | 54 +++++++++++++++++++++++++++++++++++++++++----- nova/tests/test_network.py | 6 +++--- 2 files changed, 52 insertions(+), 8 deletions(-) (limited to 'nova') diff --git a/nova/network/manager.py b/nova/network/manager.py index 8fc6a295f..3c29417d7 100644 --- a/nova/network/manager.py +++ b/nova/network/manager.py @@ -614,6 +614,52 @@ class NetworkManager(manager.SchedulerDependentManager): network_ref = self.db.fixed_ip_get_network(context, address) self._setup_network(context, network_ref) + def _validate_cidrs(self, context, cidr, num_networks, network_size): + significant_bits = 32 - int(math.log(network_size, 2)) + req_net = netaddr.IPNetwork(cidr) + req_net_ip = str(req_net.ip) + req_size = network_size * num_networks + if req_size > req_net.size: + msg = "network_size * num_networks exceeds cidr size" + raise ValueError(_(msg)) + adjusted_cidr_str = req_net_ip + '/' + str(significant_bits) + adjusted_cidr = netaddr.IPNetwork(adjusted_cidr_str) + all_req_nets = [adjusted_cidr] + try: + used_nets = self.db.network_get_all(context) + except exception.NoNetworksFound: + used_nets = [] + used_cidrs = [netaddr.IPNetwork(net['cidr']) for net in used_nets] + if adjusted_cidr in used_cidrs: + raise ValueError(_("cidr already in use")) + for adjusted_cidr_supernet in adjusted_cidr.supernet(): + if adjusted_cidr_supernet in used_cidrs: + msg = "requested cidr (%s) conflicts with existing supernet" + raise ValueError(_(msg % str(adjusted_cidr))) + # split supernet into subnets + if num_networks >= 2: + next_cidr = adjusted_cidr + for used_cidr in used_cidrs: + # watch for smaller subnets conflicting + if used_cidr.size < next_cidr.size: + for ucsupernet in used_cidr.supernet(): + if ucsupernet.size == next_cidr.size: + used_cidrs.append(ucsupernet) + for index in range(1, num_networks): + while True: + next_cidr = next_cidr.next() + if next_cidr in used_cidrs: + continue + else: + all_req_nets.append(next_cidr) + break + all_req_nets = sorted(list(set(all_req_nets))) + # after splitting ensure there were enough to satisfy the num_networks + if len(all_req_nets) < num_networks: + msg = "Not enough subnets avail to satisfy requested num_networks" + raise ValueError(_(msg)) + return all_req_nets + def create_networks(self, context, label, cidr, multi_host, num_networks, network_size, cidr_v6, gateway_v6, bridge, bridge_interface, dns1=None, dns2=None, **kwargs): @@ -624,8 +670,8 @@ class NetworkManager(manager.SchedulerDependentManager): network_size_v6 = 1 << 64 if cidr: - fixed_net = netaddr.IPNetwork(cidr) - significant_bits = 32 - int(math.log(network_size, 2)) + req_cidrs = self._validate_cidrs(context, cidr, num_networks, + network_size) for index in range(num_networks): net = {} @@ -635,9 +681,7 @@ class NetworkManager(manager.SchedulerDependentManager): net['dns2'] = dns2 if cidr: - start = index * network_size - project_net = netaddr.IPNetwork('%s/%s' % (fixed_net[start], - significant_bits)) + project_net = req_cidrs[index] net['cidr'] = str(project_net) net['multi_host'] = multi_host net['netmask'] = str(project_net.netmask) diff --git a/nova/tests/test_network.py b/nova/tests/test_network.py index 9c808c6db..ceb12e890 100644 --- a/nova/tests/test_network.py +++ b/nova/tests/test_network.py @@ -269,7 +269,7 @@ class CommonNetworkTestCase(test.TestCase): self.deallocate_called = address def fake_create_fixed_ips(self, context, network_id): - return None + return None def test_remove_fixed_ip_from_instance(self): manager = self.FakeNetworkManager() @@ -366,7 +366,7 @@ class CommonNetworkTestCase(test.TestCase): manager = self.FakeNetworkManager() self.mox.StubOutWithMock(manager.db, 'network_get_all') ctxt = mox.IgnoreArg() - fakecidr= [{'id': 1, 'cidr': '192.168.0.0/8'}] + fakecidr = [{'id': 1, 'cidr': '192.168.0.0/8'}] manager.db.network_get_all(ctxt).AndReturn(fakecidr) self.mox.ReplayAll() args = [None, '192.168.0.0/24', 1, 256] @@ -388,7 +388,7 @@ class CommonNetworkTestCase(test.TestCase): manager = self.FakeNetworkManager() self.mox.StubOutWithMock(manager.db, 'network_get_all') ctxt = mox.IgnoreArg() - fakecidr= [{'id': 1, 'cidr': '192.168.0.0/24'}] + fakecidr = [{'id': 1, 'cidr': '192.168.0.0/24'}] manager.db.network_get_all(ctxt).AndReturn(fakecidr) self.mox.ReplayAll() args = [None, 'foo', '192.168.0.0/24', None, 1, 256, -- cgit From f58d441b55e143de35aefd039b80e0b27dad9ce2 Mon Sep 17 00:00:00 2001 From: John Tran Date: Thu, 4 Aug 2011 16:27:55 -0700 Subject: removed unnecessary context from test I had left there from prior --- nova/tests/test_network.py | 1 - 1 file changed, 1 deletion(-) (limited to 'nova') diff --git a/nova/tests/test_network.py b/nova/tests/test_network.py index ceb12e890..1ff3f0c01 100644 --- a/nova/tests/test_network.py +++ b/nova/tests/test_network.py @@ -15,7 +15,6 @@ # License for the specific language governing permissions and limitations # under the License. -from nova import context from nova import db from nova import exception from nova import flags -- cgit From 89ec28c70d7795d427ecd4242cb1856eabdca104 Mon Sep 17 00:00:00 2001 From: John Tran Date: Thu, 4 Aug 2011 18:01:07 -0700 Subject: fixed bug, wasn't detecting smaller subnet conflict properly added test for it --- nova/network/manager.py | 35 ++++++++++++++++++++++++----------- nova/tests/test_network.py | 30 +++++++++++++++++++++++++++++- 2 files changed, 53 insertions(+), 12 deletions(-) (limited to 'nova') diff --git a/nova/network/manager.py b/nova/network/manager.py index 3c29417d7..873fcadf5 100644 --- a/nova/network/manager.py +++ b/nova/network/manager.py @@ -624,7 +624,6 @@ class NetworkManager(manager.SchedulerDependentManager): raise ValueError(_(msg)) adjusted_cidr_str = req_net_ip + '/' + str(significant_bits) adjusted_cidr = netaddr.IPNetwork(adjusted_cidr_str) - all_req_nets = [adjusted_cidr] try: used_nets = self.db.network_get_all(context) except exception.NoNetworksFound: @@ -636,22 +635,36 @@ class NetworkManager(manager.SchedulerDependentManager): if adjusted_cidr_supernet in used_cidrs: msg = "requested cidr (%s) conflicts with existing supernet" raise ValueError(_(msg % str(adjusted_cidr))) - # split supernet into subnets - if num_networks >= 2: + # watch for smaller subnets conflicting + used_supernets = [] + for used_cidr in used_cidrs: + if not used_cidr: + continue + if used_cidr.size < network_size: + for ucsupernet in used_cidr.supernet(): + if ucsupernet.size == network_size: + used_supernets.append(ucsupernet) + all_req_nets = [] + if num_networks == 1: + if adjusted_cidr in used_supernets: + msg = "requested cidr (%s) conflicts with existing smaller cidr" + raise ValueError(_(msg % str(adjusted_cidr))) + else: + all_req_nets.append(adjusted_cidr) + elif num_networks >= 2: + # split supernet into subnets next_cidr = adjusted_cidr - for used_cidr in used_cidrs: - # watch for smaller subnets conflicting - if used_cidr.size < next_cidr.size: - for ucsupernet in used_cidr.supernet(): - if ucsupernet.size == next_cidr.size: - used_cidrs.append(ucsupernet) - for index in range(1, num_networks): + for index in range(num_networks): while True: - next_cidr = next_cidr.next() if next_cidr in used_cidrs: + next_cidr = next_cidr.next() + continue + elif next_cidr in used_supernets: + next_cidr = next_cidr.next() continue else: all_req_nets.append(next_cidr) + next_cidr = next_cidr.next() break all_req_nets = sorted(list(set(all_req_nets))) # after splitting ensure there were enough to satisfy the num_networks diff --git a/nova/tests/test_network.py b/nova/tests/test_network.py index 1ff3f0c01..e3a677c97 100644 --- a/nova/tests/test_network.py +++ b/nova/tests/test_network.py @@ -313,7 +313,19 @@ class CommonNetworkTestCase(test.TestCase): self.assertTrue(exp_cidr + '/24' in cidrs) self.assertFalse('192.168.2.0/24' in cidrs) - def test__validate_cidrs_split_cidr_smaller_subnet_in_use(self): + def test__validate_cidrs_smaller_subnet_in_use(self): + manager = self.FakeNetworkManager() + self.mox.StubOutWithMock(manager.db, 'network_get_all') + ctxt = mox.IgnoreArg() + manager.db.network_get_all(ctxt).AndReturn([{'id': 1, + 'cidr': '192.168.2.9/25'}]) + self.mox.ReplayAll() + # ValueError: requested cidr (192.168.2.0/24) conflicts with + # existing smaller cidr + args = [None, '192.168.2.0/24', 1, 256] + self.assertRaises(ValueError, manager._validate_cidrs, *args) + + def test__validate_cidrs_split_smaller_cidr_in_use(self): manager = self.FakeNetworkManager() self.mox.StubOutWithMock(manager.db, 'network_get_all') ctxt = mox.IgnoreArg() @@ -329,6 +341,22 @@ class CommonNetworkTestCase(test.TestCase): self.assertTrue(exp_cidr + '/24' in cidrs) self.assertFalse('192.168.2.0/24' in cidrs) + def test__validate_cidrs_split_smaller_cidr_in_use2(self): + manager = self.FakeNetworkManager() + self.mox.StubOutWithMock(manager.db, 'network_get_all') + ctxt = mox.IgnoreArg() + manager.db.network_get_all(ctxt).AndReturn([{'id': 1, + 'cidr': '192.168.2.9/29'}]) + self.mox.ReplayAll() + nets = manager._validate_cidrs(None, '192.168.2.0/24', 3, 32) + self.assertEqual(3, len(nets)) + cidrs = [str(net) for net in nets] + print cidrs + exp_cidrs = ['192.168.2.32', '192.168.2.64', '192.168.2.96'] + for exp_cidr in exp_cidrs: + self.assertTrue(exp_cidr + '/27' in cidrs) + self.assertFalse('192.168.2.0/27' in cidrs) + def test__validate_cidrs_one_in_use(self): manager = self.FakeNetworkManager() args = [None, '192.168.0.0/24', 2, 256] -- cgit From 38eb72be5f15731ba34a7dc0f8a28aa0fb63ea90 Mon Sep 17 00:00:00 2001 From: John Tran Date: Thu, 4 Aug 2011 18:37:36 -0700 Subject: fixed bug , when logic searched for next avail cidr it would return cidrs that were out of range of original requested cidr block. added test for it --- nova/network/manager.py | 4 ++++ nova/tests/test_network.py | 17 +++++++++++++++-- 2 files changed, 19 insertions(+), 2 deletions(-) (limited to 'nova') diff --git a/nova/network/manager.py b/nova/network/manager.py index 873fcadf5..0bb107268 100644 --- a/nova/network/manager.py +++ b/nova/network/manager.py @@ -655,6 +655,10 @@ class NetworkManager(manager.SchedulerDependentManager): # split supernet into subnets next_cidr = adjusted_cidr for index in range(num_networks): + if next_cidr.first > req_net.last: + msg = "Not enough subnets avail to satisfy requested num_net" \ + "works - some subnets in requested range already in use" + raise ValueError(_(msg)) while True: if next_cidr in used_cidrs: next_cidr = next_cidr.next() diff --git a/nova/tests/test_network.py b/nova/tests/test_network.py index e3a677c97..b4c5a7584 100644 --- a/nova/tests/test_network.py +++ b/nova/tests/test_network.py @@ -351,12 +351,26 @@ class CommonNetworkTestCase(test.TestCase): nets = manager._validate_cidrs(None, '192.168.2.0/24', 3, 32) self.assertEqual(3, len(nets)) cidrs = [str(net) for net in nets] - print cidrs exp_cidrs = ['192.168.2.32', '192.168.2.64', '192.168.2.96'] for exp_cidr in exp_cidrs: self.assertTrue(exp_cidr + '/27' in cidrs) self.assertFalse('192.168.2.0/27' in cidrs) + def test__validate_cidrs_split_all_in_use(self): + manager = self.FakeNetworkManager() + self.mox.StubOutWithMock(manager.db, 'network_get_all') + ctxt = mox.IgnoreArg() + in_use = [{'id': 1, 'cidr': '192.168.2.9/29'}, + {'id': 2, 'cidr': '192.168.2.64/26'}, + {'id': 3, 'cidr': '192.168.2.128/26'} + ] + manager.db.network_get_all(ctxt).AndReturn(in_use) + self.mox.ReplayAll() + args = [None, '192.168.2.0/24', 3, 64] + # ValueError: Not enough subnets avail to satisfy requested num_networks + # - some subnets in requested range already in use + self.assertRaises(ValueError, manager._validate_cidrs, *args) + def test__validate_cidrs_one_in_use(self): manager = self.FakeNetworkManager() args = [None, '192.168.0.0/24', 2, 256] @@ -385,7 +399,6 @@ class CommonNetworkTestCase(test.TestCase): manager = self.FakeNetworkManager() nets = manager._validate_cidrs(None, '192.168.0.0/16', 2, 256) returned_cidrs = [str(net) for net in nets] - print returned_cidrs self.assertTrue('192.168.0.0/24' in returned_cidrs) self.assertTrue('192.168.1.0/24' in returned_cidrs) -- cgit From b557b6366b21a0d3795369785037ee29c8cef377 Mon Sep 17 00:00:00 2001 From: John Tran Date: Thu, 4 Aug 2011 18:52:15 -0700 Subject: fix pep8 issues --- nova/network/manager.py | 14 +++++++++----- nova/tests/test_network.py | 5 +++-- 2 files changed, 12 insertions(+), 7 deletions(-) (limited to 'nova') diff --git a/nova/network/manager.py b/nova/network/manager.py index 0bb107268..1b92effb2 100644 --- a/nova/network/manager.py +++ b/nova/network/manager.py @@ -647,7 +647,8 @@ class NetworkManager(manager.SchedulerDependentManager): all_req_nets = [] if num_networks == 1: if adjusted_cidr in used_supernets: - msg = "requested cidr (%s) conflicts with existing smaller cidr" + msg = "requested cidr (%s) conflicts with existing smaller" \ + " cidr" raise ValueError(_(msg % str(adjusted_cidr))) else: all_req_nets.append(adjusted_cidr) @@ -656,8 +657,9 @@ class NetworkManager(manager.SchedulerDependentManager): next_cidr = adjusted_cidr for index in range(num_networks): if next_cidr.first > req_net.last: - msg = "Not enough subnets avail to satisfy requested num_net" \ - "works - some subnets in requested range already in use" + msg = "Not enough subnets avail to satisfy requested " \ + "num_net works - some subnets in requested range" \ + " already in use" raise ValueError(_(msg)) while True: if next_cidr in used_cidrs: @@ -671,9 +673,11 @@ class NetworkManager(manager.SchedulerDependentManager): next_cidr = next_cidr.next() break all_req_nets = sorted(list(set(all_req_nets))) - # after splitting ensure there were enough to satisfy the num_networks + # after splitting ensure there were enough to satisfy the + # num_networks if len(all_req_nets) < num_networks: - msg = "Not enough subnets avail to satisfy requested num_networks" + msg = "Not enough subnets avail to satisfy requested " \ + "num_networks" raise ValueError(_(msg)) return all_req_nets diff --git a/nova/tests/test_network.py b/nova/tests/test_network.py index b4c5a7584..8c84ea938 100644 --- a/nova/tests/test_network.py +++ b/nova/tests/test_network.py @@ -367,8 +367,9 @@ class CommonNetworkTestCase(test.TestCase): manager.db.network_get_all(ctxt).AndReturn(in_use) self.mox.ReplayAll() args = [None, '192.168.2.0/24', 3, 64] - # ValueError: Not enough subnets avail to satisfy requested num_networks - # - some subnets in requested range already in use + # ValueError: Not enough subnets avail to satisfy requested num_ + # networks - some subnets in requested range already + # in use self.assertRaises(ValueError, manager._validate_cidrs, *args) def test__validate_cidrs_one_in_use(self): -- cgit From b7167b21d615f8617d588a1656aa341fd226ded9 Mon Sep 17 00:00:00 2001 From: John Tran Date: Thu, 4 Aug 2011 20:13:23 -0700 Subject: removed redundant logic --- nova/network/manager.py | 6 ------ 1 file changed, 6 deletions(-) (limited to 'nova') diff --git a/nova/network/manager.py b/nova/network/manager.py index 1b92effb2..3d915423f 100644 --- a/nova/network/manager.py +++ b/nova/network/manager.py @@ -673,12 +673,6 @@ class NetworkManager(manager.SchedulerDependentManager): next_cidr = next_cidr.next() break all_req_nets = sorted(list(set(all_req_nets))) - # after splitting ensure there were enough to satisfy the - # num_networks - if len(all_req_nets) < num_networks: - msg = "Not enough subnets avail to satisfy requested " \ - "num_networks" - raise ValueError(_(msg)) return all_req_nets def create_networks(self, context, label, cidr, multi_host, num_networks, -- cgit From 2935bebd718e770d0f2c9d1ab5dca76cc7d5f76a Mon Sep 17 00:00:00 2001 From: John Tran Date: Fri, 5 Aug 2011 09:50:11 -0700 Subject: fix typo --- nova/tests/test_network.py | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) (limited to 'nova') diff --git a/nova/tests/test_network.py b/nova/tests/test_network.py index 40cc96147..1f27643e5 100644 --- a/nova/tests/test_network.py +++ b/nova/tests/test_network.py @@ -360,8 +360,7 @@ class CommonNetworkTestCase(test.TestCase): ctxt = mox.IgnoreArg() in_use = [{'id': 1, 'cidr': '192.168.2.9/29'}, {'id': 2, 'cidr': '192.168.2.64/26'}, - {'id': 3, 'cidr': '192.168.2.128/26'} - ] + {'id': 3, 'cidr': '192.168.2.128/26'}] manager.db.network_get_all(ctxt).AndReturn(in_use) self.mox.ReplayAll() args = [None, '192.168.2.0/24', 3, 64] -- cgit From 3812d22b7a6f5d74418a7a99dc69c68a5b9f9046 Mon Sep 17 00:00:00 2001 From: John Tran Date: Fri, 5 Aug 2011 11:36:00 -0700 Subject: fixed per peer review --- nova/network/manager.py | 27 ++++++++++++--------------- nova/tests/test_network.py | 40 ++++++++++++++++++++-------------------- 2 files changed, 32 insertions(+), 35 deletions(-) (limited to 'nova') diff --git a/nova/network/manager.py b/nova/network/manager.py index 3d915423f..5677df670 100644 --- a/nova/network/manager.py +++ b/nova/network/manager.py @@ -620,8 +620,8 @@ class NetworkManager(manager.SchedulerDependentManager): req_net_ip = str(req_net.ip) req_size = network_size * num_networks if req_size > req_net.size: - msg = "network_size * num_networks exceeds cidr size" - raise ValueError(_(msg)) + msg = _("network_size * num_networks exceeds cidr size") + raise ValueError(msg) adjusted_cidr_str = req_net_ip + '/' + str(significant_bits) adjusted_cidr = netaddr.IPNetwork(adjusted_cidr_str) try: @@ -633,8 +633,8 @@ class NetworkManager(manager.SchedulerDependentManager): raise ValueError(_("cidr already in use")) for adjusted_cidr_supernet in adjusted_cidr.supernet(): if adjusted_cidr_supernet in used_cidrs: - msg = "requested cidr (%s) conflicts with existing supernet" - raise ValueError(_(msg % str(adjusted_cidr))) + msg = _("requested cidr (%s) conflicts with existing supernet") + raise ValueError(msg % str(adjusted_cidr)) # watch for smaller subnets conflicting used_supernets = [] for used_cidr in used_cidrs: @@ -647,9 +647,9 @@ class NetworkManager(manager.SchedulerDependentManager): all_req_nets = [] if num_networks == 1: if adjusted_cidr in used_supernets: - msg = "requested cidr (%s) conflicts with existing smaller" \ - " cidr" - raise ValueError(_(msg % str(adjusted_cidr))) + msg = _("requested cidr (%s) conflicts with existing smaller" \ + " cidr") + raise ValueError(msg % str(adjusted_cidr)) else: all_req_nets.append(adjusted_cidr) elif num_networks >= 2: @@ -657,17 +657,14 @@ class NetworkManager(manager.SchedulerDependentManager): next_cidr = adjusted_cidr for index in range(num_networks): if next_cidr.first > req_net.last: - msg = "Not enough subnets avail to satisfy requested " \ + msg = ("Not enough subnets avail to satisfy requested " \ "num_net works - some subnets in requested range" \ - " already in use" - raise ValueError(_(msg)) + " already in use") + raise ValueError(msg) while True: - if next_cidr in used_cidrs: + used_values = used_cidrs + used_supernets + if next_cidr in used_values: next_cidr = next_cidr.next() - continue - elif next_cidr in used_supernets: - next_cidr = next_cidr.next() - continue else: all_req_nets.append(next_cidr) next_cidr = next_cidr.next() diff --git a/nova/tests/test_network.py b/nova/tests/test_network.py index 1f27643e5..547a7a1fa 100644 --- a/nova/tests/test_network.py +++ b/nova/tests/test_network.py @@ -280,14 +280,14 @@ class CommonNetworkTestCase(test.TestCase): manager.remove_fixed_ip_from_instance, None, 99, 'bad input') - def test__validate_cidrs(self): + def test_validate_cidrs(self): manager = self.FakeNetworkManager() nets = manager._validate_cidrs(None, '192.168.0.0/24', 1, 256) self.assertEqual(1, len(nets)) cidrs = [str(net) for net in nets] self.assertTrue('192.168.0.0/24' in cidrs) - def test__validate_cidrs_split_exact_in_half(self): + def test_validate_cidrs_split_exact_in_half(self): manager = self.FakeNetworkManager() nets = manager._validate_cidrs(None, '192.168.0.0/24', 2, 128) self.assertEqual(2, len(nets)) @@ -295,7 +295,7 @@ class CommonNetworkTestCase(test.TestCase): self.assertTrue('192.168.0.0/25' in cidrs) self.assertTrue('192.168.0.128/25' in cidrs) - def test__validate_cidrs_split_cidr_in_use_middle_of_range(self): + def test_validate_cidrs_split_cidr_in_use_middle_of_range(self): manager = self.FakeNetworkManager() self.mox.StubOutWithMock(manager.db, 'network_get_all') ctxt = mox.IgnoreArg() @@ -305,13 +305,13 @@ class CommonNetworkTestCase(test.TestCase): nets = manager._validate_cidrs(None, '192.168.0.0/16', 4, 256) self.assertEqual(4, len(nets)) cidrs = [str(net) for net in nets] - exp_cidrs = ['192.168.0.0', '192.168.1.0', '192.168.3.0', - '192.168.4.0'] + exp_cidrs = ['192.168.0.0/24', '192.168.1.0/24', '192.168.3.0/24', + '192.168.4.0/24'] for exp_cidr in exp_cidrs: - self.assertTrue(exp_cidr + '/24' in cidrs) + self.assertTrue(exp_cidr in cidrs) self.assertFalse('192.168.2.0/24' in cidrs) - def test__validate_cidrs_smaller_subnet_in_use(self): + def test_validate_cidrs_smaller_subnet_in_use(self): manager = self.FakeNetworkManager() self.mox.StubOutWithMock(manager.db, 'network_get_all') ctxt = mox.IgnoreArg() @@ -323,7 +323,7 @@ class CommonNetworkTestCase(test.TestCase): args = [None, '192.168.2.0/24', 1, 256] self.assertRaises(ValueError, manager._validate_cidrs, *args) - def test__validate_cidrs_split_smaller_cidr_in_use(self): + def test_validate_cidrs_split_smaller_cidr_in_use(self): manager = self.FakeNetworkManager() self.mox.StubOutWithMock(manager.db, 'network_get_all') ctxt = mox.IgnoreArg() @@ -333,13 +333,13 @@ class CommonNetworkTestCase(test.TestCase): nets = manager._validate_cidrs(None, '192.168.0.0/16', 4, 256) self.assertEqual(4, len(nets)) cidrs = [str(net) for net in nets] - exp_cidrs = ['192.168.0.0', '192.168.1.0', '192.168.3.0', - '192.168.4.0'] + exp_cidrs = ['192.168.0.0/24', '192.168.1.0/24', '192.168.3.0/24', + '192.168.4.0/24'] for exp_cidr in exp_cidrs: - self.assertTrue(exp_cidr + '/24' in cidrs) + self.assertTrue(exp_cidr in cidrs) self.assertFalse('192.168.2.0/24' in cidrs) - def test__validate_cidrs_split_smaller_cidr_in_use2(self): + def test_validate_cidrs_split_smaller_cidr_in_use2(self): manager = self.FakeNetworkManager() self.mox.StubOutWithMock(manager.db, 'network_get_all') ctxt = mox.IgnoreArg() @@ -349,12 +349,12 @@ class CommonNetworkTestCase(test.TestCase): nets = manager._validate_cidrs(None, '192.168.2.0/24', 3, 32) self.assertEqual(3, len(nets)) cidrs = [str(net) for net in nets] - exp_cidrs = ['192.168.2.32', '192.168.2.64', '192.168.2.96'] + exp_cidrs = ['192.168.2.32/27', '192.168.2.64/27', '192.168.2.96/27'] for exp_cidr in exp_cidrs: - self.assertTrue(exp_cidr + '/27' in cidrs) + self.assertTrue(exp_cidr in cidrs) self.assertFalse('192.168.2.0/27' in cidrs) - def test__validate_cidrs_split_all_in_use(self): + def test_validate_cidrs_split_all_in_use(self): manager = self.FakeNetworkManager() self.mox.StubOutWithMock(manager.db, 'network_get_all') ctxt = mox.IgnoreArg() @@ -369,13 +369,13 @@ class CommonNetworkTestCase(test.TestCase): # in use self.assertRaises(ValueError, manager._validate_cidrs, *args) - def test__validate_cidrs_one_in_use(self): + def test_validate_cidrs_one_in_use(self): manager = self.FakeNetworkManager() args = [None, '192.168.0.0/24', 2, 256] # ValueError: network_size * num_networks exceeds cidr size self.assertRaises(ValueError, manager._validate_cidrs, *args) - def test__validate_cidrs_already_used(self): + def test_validate_cidrs_already_used(self): manager = self.FakeNetworkManager() self.mox.StubOutWithMock(manager.db, 'network_get_all') ctxt = mox.IgnoreArg() @@ -386,21 +386,21 @@ class CommonNetworkTestCase(test.TestCase): args = [None, '192.168.0.0/24', 1, 256] self.assertRaises(ValueError, manager._validate_cidrs, *args) - def test__validate_cidrs_too_many(self): + def test_validate_cidrs_too_many(self): manager = self.FakeNetworkManager() args = [None, '192.168.0.0/24', 200, 256] # ValueError: Not enough subnets avail to satisfy requested # num_networks self.assertRaises(ValueError, manager._validate_cidrs, *args) - def test__validate_cidrs_split_partial(self): + def test_validate_cidrs_split_partial(self): manager = self.FakeNetworkManager() nets = manager._validate_cidrs(None, '192.168.0.0/16', 2, 256) returned_cidrs = [str(net) for net in nets] self.assertTrue('192.168.0.0/24' in returned_cidrs) self.assertTrue('192.168.1.0/24' in returned_cidrs) - def test__validate_cidrs_conflict_existing_supernet(self): + def test_validate_cidrs_conflict_existing_supernet(self): manager = self.FakeNetworkManager() self.mox.StubOutWithMock(manager.db, 'network_get_all') ctxt = mox.IgnoreArg() -- cgit From 353fa4871069cf0b926f09aa00496002f65584cb Mon Sep 17 00:00:00 2001 From: John Tran Date: Fri, 5 Aug 2011 11:48:06 -0700 Subject: fixed per peer review --- nova/network/manager.py | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) (limited to 'nova') diff --git a/nova/network/manager.py b/nova/network/manager.py index 5677df670..402049d44 100644 --- a/nova/network/manager.py +++ b/nova/network/manager.py @@ -647,8 +647,8 @@ class NetworkManager(manager.SchedulerDependentManager): all_req_nets = [] if num_networks == 1: if adjusted_cidr in used_supernets: - msg = _("requested cidr (%s) conflicts with existing smaller" \ - " cidr") + msg = _("requested cidr (%s) conflicts with existing smaller" + " cidr") raise ValueError(msg % str(adjusted_cidr)) else: all_req_nets.append(adjusted_cidr) @@ -657,9 +657,9 @@ class NetworkManager(manager.SchedulerDependentManager): next_cidr = adjusted_cidr for index in range(num_networks): if next_cidr.first > req_net.last: - msg = ("Not enough subnets avail to satisfy requested " \ - "num_net works - some subnets in requested range" \ - " already in use") + msg = _("Not enough subnets avail to satisfy requested " + "num_net works - some subnets in requested range" + " already in use") raise ValueError(msg) while True: used_values = used_cidrs + used_supernets -- cgit From 6548ce754984f2eb5e72612392a8a3392c2a21a2 Mon Sep 17 00:00:00 2001 From: John Tran Date: Tue, 9 Aug 2011 18:43:18 -0700 Subject: fix so that the exception shows up in euca2ools instead of UnknownError --- nova/api/ec2/__init__.py | 4 ++++ 1 file changed, 4 insertions(+) (limited to 'nova') diff --git a/nova/api/ec2/__init__.py b/nova/api/ec2/__init__.py index 8b6e47cfb..ffd5382bf 100644 --- a/nova/api/ec2/__init__.py +++ b/nova/api/ec2/__init__.py @@ -358,6 +358,10 @@ class Executor(wsgi.Application): LOG.debug(_('InvalidParameterValue raised: %s'), unicode(ex), context=context) return self._error(req, context, type(ex).__name__, unicode(ex)) + except exception.InvalidPortRange as ex:$ + LOG.debug(_('InvalidPortRange raised: %s'), unicode(ex),$ + context=context)$ + return self._error(req, context, type(ex).__name__, unicode(ex))$ except Exception as ex: extra = {'environment': req.environ} LOG.exception(_('Unexpected error raised: %s'), unicode(ex), -- cgit From 7b72972cbc9fbd267160d8d3282e1d0ec888de98 Mon Sep 17 00:00:00 2001 From: John Tran Date: Wed, 10 Aug 2011 16:19:21 -0700 Subject: removed typos, end of line chars --- nova/api/ec2/__init__.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) (limited to 'nova') diff --git a/nova/api/ec2/__init__.py b/nova/api/ec2/__init__.py index ffd5382bf..96df97393 100644 --- a/nova/api/ec2/__init__.py +++ b/nova/api/ec2/__init__.py @@ -358,10 +358,10 @@ class Executor(wsgi.Application): LOG.debug(_('InvalidParameterValue raised: %s'), unicode(ex), context=context) return self._error(req, context, type(ex).__name__, unicode(ex)) - except exception.InvalidPortRange as ex:$ - LOG.debug(_('InvalidPortRange raised: %s'), unicode(ex),$ - context=context)$ - return self._error(req, context, type(ex).__name__, unicode(ex))$ + except exception.InvalidPortRange as ex: + LOG.debug(_('InvalidPortRange raised: %s'), unicode(ex), + context=context) + return self._error(req, context, type(ex).__name__, unicode(ex)) except Exception as ex: extra = {'environment': req.environ} LOG.exception(_('Unexpected error raised: %s'), unicode(ex), -- cgit From 0a543d4f8ff31733c32cbd9063e461ca41a0b076 Mon Sep 17 00:00:00 2001 From: Alex Meade Date: Wed, 10 Aug 2011 21:27:40 -0400 Subject: Changed bad server actions requests to raise an HTTP 400 --- nova/api/openstack/servers.py | 10 +++++++--- nova/tests/api/openstack/test_extensions.py | 2 +- nova/tests/api/openstack/test_server_actions.py | 20 +++++++++++++++++++- 3 files changed, 27 insertions(+), 5 deletions(-) (limited to 'nova') diff --git a/nova/api/openstack/servers.py b/nova/api/openstack/servers.py index 736fdf6ce..c7d17a5bc 100644 --- a/nova/api/openstack/servers.py +++ b/nova/api/openstack/servers.py @@ -210,11 +210,15 @@ class Controller(object): } self.actions.update(admin_actions) - for key in self.actions.keys(): - if key in body: + for key in body.keys(): + if key in self.actions: return self.actions[key](body, req, id) + else: + msg = _('There is no such server action: %s' % key) + raise exc.HTTPBadRequest(explanation=msg) - raise exc.HTTPNotImplemented() + msg = _('Invalid request body') + raise exc.HTTPBadRequest(explanation=msg) def _action_create_backup(self, input_dict, req, instance_id): """Backup a server instance. diff --git a/nova/tests/api/openstack/test_extensions.py b/nova/tests/api/openstack/test_extensions.py index 8b7e11a5b..12eee3367 100644 --- a/nova/tests/api/openstack/test_extensions.py +++ b/nova/tests/api/openstack/test_extensions.py @@ -280,7 +280,7 @@ class ActionExtensionTest(test.TestCase): def test_invalid_action_body(self): body = dict(blah=dict(name="test")) # Doesn't exist response = self._send_server_action_request("/servers/1/action", body) - self.assertEqual(501, response.status_int) + self.assertEqual(400, response.status_int) def test_invalid_action(self): body = dict(blah=dict(name="test")) diff --git a/nova/tests/api/openstack/test_server_actions.py b/nova/tests/api/openstack/test_server_actions.py index 717e11c00..687a19390 100644 --- a/nova/tests/api/openstack/test_server_actions.py +++ b/nova/tests/api/openstack/test_server_actions.py @@ -352,7 +352,7 @@ class ServerActionsTest(test.TestCase): req.body = json.dumps(body) req.headers["content-type"] = "application/json" response = req.get_response(fakes.wsgi_app()) - self.assertEqual(501, response.status_int) + self.assertEqual(400, response.status_int) def test_create_backup_with_metadata(self): self.flags(allow_admin_api=True) @@ -487,6 +487,24 @@ class ServerActionsTestV11(test.TestCase): def tearDown(self): self.stubs.UnsetAll() + def test_server_bad_body(self): + body = {} + req = webob.Request.blank('/v1.1/servers/1/action') + req.method = 'POST' + req.content_type = 'application/json' + req.body = json.dumps(body) + res = req.get_response(fakes.wsgi_app()) + self.assertEqual(res.status_int, 400) + + def test_server_unknown_action(self): + body = {'sockTheFox': {'fakekey': '1234'}} + req = webob.Request.blank('/v1.1/servers/1/action') + req.method = 'POST' + req.content_type = 'application/json' + req.body = json.dumps(body) + res = req.get_response(fakes.wsgi_app()) + self.assertEqual(res.status_int, 400) + def test_server_change_password(self): mock_method = MockSetAdminPassword() self.stubs.Set(nova.compute.api.API, 'set_admin_password', mock_method) -- cgit From 8517d9563191b635669032e8364d8fa64876b977 Mon Sep 17 00:00:00 2001 From: Alex Meade Date: Thu, 11 Aug 2011 10:53:40 -0400 Subject: Fixed per HACKING --- nova/api/openstack/servers.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'nova') diff --git a/nova/api/openstack/servers.py b/nova/api/openstack/servers.py index c7d17a5bc..c3bb23adb 100644 --- a/nova/api/openstack/servers.py +++ b/nova/api/openstack/servers.py @@ -214,10 +214,10 @@ class Controller(object): if key in self.actions: return self.actions[key](body, req, id) else: - msg = _('There is no such server action: %s' % key) + msg = _("There is no such server action: %s") % (key,)) raise exc.HTTPBadRequest(explanation=msg) - msg = _('Invalid request body') + msg = _("Invalid request body") raise exc.HTTPBadRequest(explanation=msg) def _action_create_backup(self, input_dict, req, instance_id): -- cgit From 7ae64a0b7e1db7e46d183bfa8a2fe1be5d47f1cc Mon Sep 17 00:00:00 2001 From: Alex Meade Date: Thu, 11 Aug 2011 11:57:16 -0400 Subject: removed extra paren --- nova/api/openstack/servers.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'nova') diff --git a/nova/api/openstack/servers.py b/nova/api/openstack/servers.py index c3bb23adb..4c56539a4 100644 --- a/nova/api/openstack/servers.py +++ b/nova/api/openstack/servers.py @@ -214,7 +214,7 @@ class Controller(object): if key in self.actions: return self.actions[key](body, req, id) else: - msg = _("There is no such server action: %s") % (key,)) + msg = _("There is no such server action: %s") % (key,) raise exc.HTTPBadRequest(explanation=msg) msg = _("Invalid request body") -- cgit From 24869338aad2dfd36db9d466820325d1a3ed1adb Mon Sep 17 00:00:00 2001 From: Johannes Erdfelt Date: Thu, 11 Aug 2011 18:01:37 +0000 Subject: Make PUT /servers/ follow the API specs and return a 200 status --- nova/api/openstack/servers.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'nova') diff --git a/nova/api/openstack/servers.py b/nova/api/openstack/servers.py index 736fdf6ce..0f8e8e461 100644 --- a/nova/api/openstack/servers.py +++ b/nova/api/openstack/servers.py @@ -185,7 +185,7 @@ class Controller(object): except exception.NotFound: raise exc.HTTPNotFound() - return exc.HTTPNoContent() + return webob.Response() # 200 response def _parse_update(self, context, id, inst_dict, update_dict): pass -- cgit From 2ccec88a5a5c85ce7776b4b70d490189d63d3098 Mon Sep 17 00:00:00 2001 From: Tushar Patil Date: Thu, 11 Aug 2011 11:15:14 -0700 Subject: Added availability zone support to the Create Server API --- nova/api/openstack/create_instance_helper.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) (limited to 'nova') diff --git a/nova/api/openstack/create_instance_helper.py b/nova/api/openstack/create_instance_helper.py index 1425521a9..4e1da549e 100644 --- a/nova/api/openstack/create_instance_helper.py +++ b/nova/api/openstack/create_instance_helper.py @@ -122,6 +122,7 @@ class CreateInstanceHelper(object): raise exc.HTTPBadRequest(explanation=msg) zone_blob = server_dict.get('blob') + availability_zone = server_dict.get('availability_zone') name = server_dict['name'] self._validate_server_name(name) name = name.strip() @@ -161,7 +162,8 @@ class CreateInstanceHelper(object): zone_blob=zone_blob, reservation_id=reservation_id, min_count=min_count, - max_count=max_count)) + max_count=max_count, + availability_zone=availability_zone)) except quota.QuotaError as error: self._handle_quota_error(error) except exception.ImageNotFound as error: -- cgit From 5dd39df596f7038cffde5079822ae4b747b92b72 Mon Sep 17 00:00:00 2001 From: Alex Meade Date: Thu, 11 Aug 2011 14:20:18 -0400 Subject: minor cleanup --- nova/api/openstack/servers.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'nova') diff --git a/nova/api/openstack/servers.py b/nova/api/openstack/servers.py index 4c56539a4..c22a5a2a6 100644 --- a/nova/api/openstack/servers.py +++ b/nova/api/openstack/servers.py @@ -210,7 +210,7 @@ class Controller(object): } self.actions.update(admin_actions) - for key in body.keys(): + for key in body: if key in self.actions: return self.actions[key](body, req, id) else: -- cgit From 3bfaf0a0720fc8713fb77fddd8f1b2dffa0eabfc Mon Sep 17 00:00:00 2001 From: Johannes Erdfelt Date: Thu, 11 Aug 2011 18:28:15 +0000 Subject: v1.0 and v1.1 API differs for PUT, so split them out Update tests to match API --- nova/api/openstack/servers.py | 72 ++++++++++++++++++++------------ nova/tests/api/openstack/test_servers.py | 4 +- 2 files changed, 48 insertions(+), 28 deletions(-) (limited to 'nova') diff --git a/nova/api/openstack/servers.py b/nova/api/openstack/servers.py index 0f8e8e461..90b6e684b 100644 --- a/nova/api/openstack/servers.py +++ b/nova/api/openstack/servers.py @@ -161,32 +161,6 @@ class Controller(object): server['server']['adminPass'] = extra_values['password'] return server - @scheduler_api.redirect_handler - def update(self, req, id, body): - """ Updates the server name or password """ - if len(req.body) == 0: - raise exc.HTTPUnprocessableEntity() - - if not body: - raise exc.HTTPUnprocessableEntity() - - ctxt = req.environ['nova.context'] - update_dict = {} - - if 'name' in body['server']: - name = body['server']['name'] - self.helper._validate_server_name(name) - update_dict['display_name'] = name.strip() - - self._parse_update(ctxt, id, body, update_dict) - - try: - self.compute_api.update(ctxt, id, **update_dict) - except exception.NotFound: - raise exc.HTTPNotFound() - - return webob.Response() # 200 response - def _parse_update(self, context, id, inst_dict, update_dict): pass @@ -545,6 +519,29 @@ class Controller(object): class ControllerV10(Controller): """v1.0 OpenStack API controller""" + @scheduler_api.redirect_handler + def update(self, req, id, body): + """ Updates the server name or password """ + if len(req.body) == 0 or not body: + raise exc.HTTPUnprocessableEntity() + + ctxt = req.environ['nova.context'] + update_dict = {} + + if 'name' in body['server']: + name = body['server']['name'] + self.helper._validate_server_name(name) + update_dict['display_name'] = name.strip() + + self._parse_update(ctxt, id, body, update_dict) + + try: + self.compute_api.update(ctxt, id, **update_dict) + except exception.NotFound: + raise exc.HTTPNotFound() + + return exc.HTTPNoContent() + @scheduler_api.redirect_handler def delete(self, req, id): """ Destroys a server """ @@ -614,6 +611,29 @@ class ControllerV10(Controller): class ControllerV11(Controller): """v1.1 OpenStack API controller""" + @scheduler_api.redirect_handler + def update(self, req, id, body): + """ Updates the server name or password """ + if len(req.body) == 0 or not body: + raise exc.HTTPUnprocessableEntity() + + ctxt = req.environ['nova.context'] + update_dict = {} + + if 'name' in body['server']: + name = body['server']['name'] + self.helper._validate_server_name(name) + update_dict['display_name'] = name.strip() + + self._parse_update(ctxt, id, body, update_dict) + + try: + self.compute_api.update(ctxt, id, **update_dict) + except exception.NotFound: + raise exc.HTTPNotFound() + + # v1.1 API returns 200, which differs from v1.0 + @scheduler_api.redirect_handler def delete(self, req, id): """ Destroys a server """ diff --git a/nova/tests/api/openstack/test_servers.py b/nova/tests/api/openstack/test_servers.py index b6342ae2f..b732d2972 100644 --- a/nova/tests/api/openstack/test_servers.py +++ b/nova/tests/api/openstack/test_servers.py @@ -1869,7 +1869,7 @@ class ServersTest(test.TestCase): req.content_type = 'application/json' req.body = json.dumps({'server': {'name': 'new-name'}}) res = req.get_response(fakes.wsgi_app()) - self.assertEqual(res.status_int, 204) + self.assertEqual(res.status_int, 200) self.assertEqual(res.body, '') def test_update_server_adminPass_ignored_v1_1(self): @@ -1889,7 +1889,7 @@ class ServersTest(test.TestCase): req.content_type = "application/json" req.body = self.body res = req.get_response(fakes.wsgi_app()) - self.assertEqual(res.status_int, 204) + self.assertEqual(res.status_int, 200) self.assertEqual(res.body, '') def test_create_backup_schedules(self): -- cgit From 684c41e7a4aa5fdab78f2e1aac1d309c3bb16412 Mon Sep 17 00:00:00 2001 From: "vladimir.p" Date: Thu, 11 Aug 2011 13:18:30 -0700 Subject: lp824780: fixed typo in update_service_capabilities --- nova/scheduler/manager.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'nova') diff --git a/nova/scheduler/manager.py b/nova/scheduler/manager.py index c8b16b622..294de62e4 100644 --- a/nova/scheduler/manager.py +++ b/nova/scheduler/manager.py @@ -71,8 +71,8 @@ class SchedulerManager(manager.Manager): def update_service_capabilities(self, context=None, service_name=None, host=None, capabilities=None): """Process a capability update from a service node.""" - if not capability: - capability = {} + if not capabilities: + capabilities = {} self.zone_manager.update_service_capabilities(service_name, host, capabilities) -- cgit From 26d96b80fdc07d8bb9453112cd33ee12143c6f46 Mon Sep 17 00:00:00 2001 From: Johannes Erdfelt Date: Thu, 11 Aug 2011 20:48:16 +0000 Subject: v1.1 API also requires the server be returned in the body --- nova/api/openstack/servers.py | 83 +++++++++++++------------------- nova/tests/api/openstack/test_servers.py | 6 ++- 2 files changed, 37 insertions(+), 52 deletions(-) (limited to 'nova') diff --git a/nova/api/openstack/servers.py b/nova/api/openstack/servers.py index 90b6e684b..f19befd6f 100644 --- a/nova/api/openstack/servers.py +++ b/nova/api/openstack/servers.py @@ -161,8 +161,32 @@ class Controller(object): server['server']['adminPass'] = extra_values['password'] return server - def _parse_update(self, context, id, inst_dict, update_dict): - pass + @scheduler_api.redirect_handler + def update(self, req, id, body): + """ Updates the server name or password """ + if len(req.body) == 0: + raise exc.HTTPUnprocessableEntity() + + if not body: + raise exc.HTTPUnprocessableEntity() + + ctxt = req.environ['nova.context'] + update_dict = {} + + if 'name' in body['server']: + name = body['server']['name'] + self.helper._validate_server_name(name) + update_dict['display_name'] = name.strip() + + try: + self.compute_api.update(ctxt, id, **update_dict) + except exception.NotFound: + raise exc.HTTPNotFound() + + return self._update(ctxt, req, id, body) + + def _update(self, context, req, id, inst_dict): + return exc.HTTPNotImplemented() @scheduler_api.redirect_handler def action(self, req, id, body): @@ -519,29 +543,6 @@ class Controller(object): class ControllerV10(Controller): """v1.0 OpenStack API controller""" - @scheduler_api.redirect_handler - def update(self, req, id, body): - """ Updates the server name or password """ - if len(req.body) == 0 or not body: - raise exc.HTTPUnprocessableEntity() - - ctxt = req.environ['nova.context'] - update_dict = {} - - if 'name' in body['server']: - name = body['server']['name'] - self.helper._validate_server_name(name) - update_dict['display_name'] = name.strip() - - self._parse_update(ctxt, id, body, update_dict) - - try: - self.compute_api.update(ctxt, id, **update_dict) - except exception.NotFound: - raise exc.HTTPNotFound() - - return exc.HTTPNoContent() - @scheduler_api.redirect_handler def delete(self, req, id): """ Destroys a server """ @@ -565,10 +566,11 @@ class ControllerV10(Controller): def _limit_items(self, items, req): return common.limited(items, req) - def _parse_update(self, context, server_id, inst_dict, update_dict): + def _update(self, context, req, id, inst_dict): if 'adminPass' in inst_dict['server']: - self.compute_api.set_admin_password(context, server_id, + self.compute_api.set_admin_password(context, id, inst_dict['server']['adminPass']) + return exc.HTTPNoContent() def _action_resize(self, input_dict, req, id): """ Resizes a given instance to the flavor size requested """ @@ -611,29 +613,6 @@ class ControllerV10(Controller): class ControllerV11(Controller): """v1.1 OpenStack API controller""" - @scheduler_api.redirect_handler - def update(self, req, id, body): - """ Updates the server name or password """ - if len(req.body) == 0 or not body: - raise exc.HTTPUnprocessableEntity() - - ctxt = req.environ['nova.context'] - update_dict = {} - - if 'name' in body['server']: - name = body['server']['name'] - self.helper._validate_server_name(name) - update_dict['display_name'] = name.strip() - - self._parse_update(ctxt, id, body, update_dict) - - try: - self.compute_api.update(ctxt, id, **update_dict) - except exception.NotFound: - raise exc.HTTPNotFound() - - # v1.1 API returns 200, which differs from v1.0 - @scheduler_api.redirect_handler def delete(self, req, id): """ Destroys a server """ @@ -713,6 +692,10 @@ class ControllerV11(Controller): LOG.info(msg) raise exc.HTTPBadRequest(explanation=msg) + def _update(self, context, req, id, inst_dict): + instance = self.compute_api.routing_get(context, id) + return self._build_view(req, instance, is_detail=True) + def _action_resize(self, input_dict, req, id): """ Resizes a given instance to the flavor size requested """ try: diff --git a/nova/tests/api/openstack/test_servers.py b/nova/tests/api/openstack/test_servers.py index b732d2972..457ce5aec 100644 --- a/nova/tests/api/openstack/test_servers.py +++ b/nova/tests/api/openstack/test_servers.py @@ -1870,7 +1870,8 @@ class ServersTest(test.TestCase): req.body = json.dumps({'server': {'name': 'new-name'}}) res = req.get_response(fakes.wsgi_app()) self.assertEqual(res.status_int, 200) - self.assertEqual(res.body, '') + res_dict = json.loads(res.body) + self.assertEqual(res_dict['server']['id'], 1) def test_update_server_adminPass_ignored_v1_1(self): inst_dict = dict(name='server_test', adminPass='bacon') @@ -1890,7 +1891,8 @@ class ServersTest(test.TestCase): req.body = self.body res = req.get_response(fakes.wsgi_app()) self.assertEqual(res.status_int, 200) - self.assertEqual(res.body, '') + res_dict = json.loads(res.body) + self.assertEqual(res_dict['server']['id'], 1) def test_create_backup_schedules(self): req = webob.Request.blank('/v1.0/servers/1/backup_schedule') -- cgit From 500bd3218906e2575467467700f80f5b31e0a87e Mon Sep 17 00:00:00 2001 From: termie Date: Thu, 11 Aug 2011 16:26:26 -0700 Subject: allow scheduling topics to multiple drivers --- nova/scheduler/manager.py | 5 ++- nova/scheduler/multi.py | 73 ++++++++++++++++++++++++++++++++++ nova/tests/scheduler/test_scheduler.py | 24 ++++++++++- 3 files changed, 98 insertions(+), 4 deletions(-) create mode 100644 nova/scheduler/multi.py (limited to 'nova') diff --git a/nova/scheduler/manager.py b/nova/scheduler/manager.py index c8b16b622..13c0bf22a 100644 --- a/nova/scheduler/manager.py +++ b/nova/scheduler/manager.py @@ -34,12 +34,13 @@ from nova.scheduler import zone_manager LOG = logging.getLogger('nova.scheduler.manager') FLAGS = flags.FLAGS flags.DEFINE_string('scheduler_driver', - 'nova.scheduler.chance.ChanceScheduler', - 'Driver to use for the scheduler') + 'nova.scheduler.multi.MultiScheduler', + 'Default driver to use for the scheduler') class SchedulerManager(manager.Manager): """Chooses a host to run instances on.""" + def __init__(self, scheduler_driver=None, *args, **kwargs): self.zone_manager = zone_manager.ZoneManager() if not scheduler_driver: diff --git a/nova/scheduler/multi.py b/nova/scheduler/multi.py new file mode 100644 index 000000000..b1578033c --- /dev/null +++ b/nova/scheduler/multi.py @@ -0,0 +1,73 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright (c) 2010 Openstack, LLC. +# Copyright 2010 United States Government as represented by the +# Administrator of the National Aeronautics and Space Administration. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +Scheduler that allows routing some calls to one driver and others to another. +""" + +from nova import flags +from nova import utils +from nova.scheduler import driver + + +FLAGS = flags.FLAGS +flags.DEFINE_string('compute_scheduler_driver', + 'nova.scheduler.chance.ChanceScheduler', + 'Driver to use for scheduling compute calls') +flags.DEFINE_string('volume_scheduler_driver', + 'nova.scheduler.chance.ChanceScheduler', + 'Driver to use for scheduling volume calls') + + +# A mapping of methods to topics so we can figure out which driver to use. +_METHOD_MAP = {'run_instance': 'compute', + 'start_instance': 'compute', + 'create_volume': 'volume'} + + +class MultiScheduler(driver.Scheduler): + """A scheduler that holds multiple sub-schedulers. + + This exists to allow flag-driven composibility of schedulers, allowing + third parties to integrate custom schedulers more easily. + + """ + + def __init__(self): + super(MultiScheduler, self).__init__() + compute_driver = utils.import_object(FLAGS.compute_scheduler_driver) + volume_driver = utils.import_object(FLAGS.volume_scheduler_driver) + + self.drivers = {'compute': compute_driver, + 'volume': volume_driver} + + def __getattr__(self, key): + if not key.startswith('schedule_'): + raise AttributeError(key) + method = key[len('schedule_'):] + if method not in _METHOD_MAP: + raise AttributeError(key) + return getattr(self.drivers[_METHOD_MAP[method]], key) + + def set_zone_manager(self, zone_manager): + for k, v in self.drivers.iteritems(): + v.set_zone_manager(zone_manager) + + def schedule(self, context, topic, *_args, **_kwargs): + return self.drivers[topic].schedule(context, topic, *_args, **_kwargs) diff --git a/nova/tests/scheduler/test_scheduler.py b/nova/tests/scheduler/test_scheduler.py index 7a26fd1bb..d70a6779f 100644 --- a/nova/tests/scheduler/test_scheduler.py +++ b/nova/tests/scheduler/test_scheduler.py @@ -36,8 +36,9 @@ from nova import test from nova import rpc from nova import utils from nova.scheduler import api -from nova.scheduler import manager from nova.scheduler import driver +from nova.scheduler import manager +from nova.scheduler import multi from nova.compute import power_state @@ -391,7 +392,7 @@ class SimpleDriverTestCase(test.TestCase): compute1.kill() compute2.kill() - def test_wont_sechedule_if_specified_host_is_down_no_queue(self): + def test_wont_schedule_if_specified_host_is_down_no_queue(self): compute1 = service.Service('host1', 'nova-compute', 'compute', @@ -903,6 +904,25 @@ class SimpleDriverTestCase(test.TestCase): db.service_destroy(self.context, s_ref2['id']) +class MultiDriverTestCase(SimpleDriverTestCase): + """Test case for multi driver.""" + + def setUp(self): + super(MultiDriverTestCase, self).setUp() + self.flags(connection_type='fake', + stub_network=True, + max_cores=4, + max_gigabytes=4, + network_manager='nova.network.manager.FlatManager', + volume_driver='nova.volume.driver.FakeISCSIDriver', + compute_scheduler_driver=('nova.scheduler.simple' + '.SimpleScheduler'), + volume_scheduler_driver=('nova.scheduler.simple' + '.SimpleScheduler'), + scheduler_driver='nova.scheduler.multi.MultiScheduler') + self.scheduler = manager.SchedulerManager() + + class FakeZone(object): def __init__(self, id, api_url, username, password): self.id = id -- cgit From 258e169a60d3551e789022ec23d6ae040c1f981e Mon Sep 17 00:00:00 2001 From: Johannes Erdfelt Date: Fri, 12 Aug 2011 20:18:47 +0000 Subject: Stub out instance_get as well so we can show the results of the name change --- nova/api/openstack/servers.py | 2 +- nova/tests/api/openstack/test_servers.py | 19 ++++++++++++------- 2 files changed, 13 insertions(+), 8 deletions(-) (limited to 'nova') diff --git a/nova/api/openstack/servers.py b/nova/api/openstack/servers.py index f19befd6f..42e46a94a 100644 --- a/nova/api/openstack/servers.py +++ b/nova/api/openstack/servers.py @@ -163,7 +163,7 @@ class Controller(object): @scheduler_api.redirect_handler def update(self, req, id, body): - """ Updates the server name or password """ + """Update server name then pass on to version-specific controller""" if len(req.body) == 0: raise exc.HTTPUnprocessableEntity() diff --git a/nova/tests/api/openstack/test_servers.py b/nova/tests/api/openstack/test_servers.py index d43a40070..1a4288ae7 100644 --- a/nova/tests/api/openstack/test_servers.py +++ b/nova/tests/api/openstack/test_servers.py @@ -134,8 +134,8 @@ def return_security_group(context, instance_id, security_group_id): pass -def instance_update(context, instance_id, kwargs): - return stub_instance(instance_id) +def instance_update(context, instance_id, values): + return stub_instance(instance_id, name=values.get('display_name')) def instance_addresses(context, instance_id): @@ -145,7 +145,7 @@ def instance_addresses(context, instance_id): def stub_instance(id, user_id='fake', project_id='fake', private_address=None, public_addresses=None, host=None, power_state=0, reservation_id="", uuid=FAKE_UUID, image_ref="10", - flavor_id="1", interfaces=None): + flavor_id="1", interfaces=None, name=None): metadata = [] metadata.append(InstanceMetadata(key='seq', value=id)) @@ -161,7 +161,7 @@ def stub_instance(id, user_id='fake', project_id='fake', private_address=None, host = str(host) # ReservationID isn't sent back, hack it in there. - server_name = "server%s" % id + server_name = name or "server%s" % id if reservation_id != "": server_name = "reservation_%s" % (reservation_id, ) @@ -1880,14 +1880,17 @@ class ServersTest(test.TestCase): self.assertEqual(res.status_int, 400) def test_update_server_name_v1_1(self): + self.stubs.Set(nova.db.api, 'instance_get', + return_server_with_attributes(name='server_test')) req = webob.Request.blank('/v1.1/servers/1') req.method = 'PUT' req.content_type = 'application/json' - req.body = json.dumps({'server': {'name': 'new-name'}}) + req.body = json.dumps({'server': {'name': 'server_test'}}) res = req.get_response(fakes.wsgi_app()) self.assertEqual(res.status_int, 200) res_dict = json.loads(res.body) self.assertEqual(res_dict['server']['id'], 1) + self.assertEqual(res_dict['server']['name'], 'server_test') def test_update_server_adminPass_ignored_v1_1(self): inst_dict = dict(name='server_test', adminPass='bacon') @@ -1898,8 +1901,9 @@ class ServersTest(test.TestCase): self.assertEqual(params, filtered_dict) return filtered_dict - self.stubs.Set(nova.db.api, 'instance_update', - server_update) + self.stubs.Set(nova.db.api, 'instance_update', server_update) + self.stubs.Set(nova.db.api, 'instance_get', + return_server_with_attributes(name='server_test')) req = webob.Request.blank('/v1.1/servers/1') req.method = 'PUT' @@ -1909,6 +1913,7 @@ class ServersTest(test.TestCase): self.assertEqual(res.status_int, 200) res_dict = json.loads(res.body) self.assertEqual(res_dict['server']['id'], 1) + self.assertEqual(res_dict['server']['name'], 'server_test') def test_create_backup_schedules(self): req = webob.Request.blank('/v1.0/servers/1/backup_schedule') -- cgit From e7858fabb433a0ee587a9444f749381bf36d5d92 Mon Sep 17 00:00:00 2001 From: Monsyne Dragon Date: Fri, 12 Aug 2011 23:58:13 +0000 Subject: Added durable option for nova rabbit queues added queueu delete script for admin/debug purposes --- nova/flags.py | 1 + nova/rpc/amqp.py | 8 +++++--- 2 files changed, 6 insertions(+), 3 deletions(-) (limited to 'nova') diff --git a/nova/flags.py b/nova/flags.py index eb6366ed9..db3906374 100644 --- a/nova/flags.py +++ b/nova/flags.py @@ -305,6 +305,7 @@ DEFINE_string('rabbit_virtual_host', '/', 'rabbit virtual host') DEFINE_integer('rabbit_retry_interval', 10, 'rabbit connection retry interval') DEFINE_integer('rabbit_max_retries', 12, 'rabbit connection attempts') DEFINE_string('control_exchange', 'nova', 'the main exchange to connect to') +DEFINE_boolean('rabbit_durable_queues', False, 'use durable queues') DEFINE_list('enabled_apis', ['ec2', 'osapi'], 'list of APIs to enable by default') DEFINE_string('ec2_host', '$my_ip', 'ip of api server') diff --git a/nova/rpc/amqp.py b/nova/rpc/amqp.py index 61555795a..fe429b266 100644 --- a/nova/rpc/amqp.py +++ b/nova/rpc/amqp.py @@ -257,7 +257,7 @@ class TopicAdapterConsumer(AdapterConsumer): self.queue = topic self.routing_key = topic self.exchange = FLAGS.control_exchange - self.durable = False + self.durable = FLAGS.rabbit_durable_queues super(TopicAdapterConsumer, self).__init__(connection=connection, topic=topic, proxy=proxy) @@ -345,7 +345,7 @@ class TopicPublisher(Publisher): def __init__(self, connection=None, topic='broadcast'): self.routing_key = topic self.exchange = FLAGS.control_exchange - self.durable = False + self.durable = FLAGS.rabbit_durable_queues super(TopicPublisher, self).__init__(connection=connection) @@ -373,6 +373,7 @@ class DirectConsumer(Consumer): self.queue = msg_id self.routing_key = msg_id self.exchange = msg_id + self.durable = False self.auto_delete = True self.exclusive = True super(DirectConsumer, self).__init__(connection=connection) @@ -386,6 +387,7 @@ class DirectPublisher(Publisher): def __init__(self, connection=None, msg_id=None): self.routing_key = msg_id self.exchange = msg_id + self.durable = False self.auto_delete = True super(DirectPublisher, self).__init__(connection=connection) @@ -573,7 +575,7 @@ def send_message(topic, message, wait=True): publisher = messaging.Publisher(connection=Connection.instance(), exchange=FLAGS.control_exchange, - durable=False, + durable=FLAGS.rabbit_durable_queues, exchange_type='topic', routing_key=topic) publisher.send(message) -- cgit From a1e60edee71d2cb24739f2f44ba13fbf28e72c95 Mon Sep 17 00:00:00 2001 From: Vishvananda Ishaya Date: Fri, 12 Aug 2011 21:03:11 -0700 Subject: get rid of network_info hack and pass it everywhere --- nova/virt/libvirt/connection.py | 22 ++++++-------- nova/virt/libvirt/firewall.py | 64 +++++++++++++++------------------------ nova/virt/libvirt/netutils.py | 67 ----------------------------------------- 3 files changed, 33 insertions(+), 120 deletions(-) (limited to 'nova') diff --git a/nova/virt/libvirt/connection.py b/nova/virt/libvirt/connection.py index 6d043577a..bc317d660 100644 --- a/nova/virt/libvirt/connection.py +++ b/nova/virt/libvirt/connection.py @@ -471,10 +471,10 @@ class LibvirtConnection(driver.ComputeDriver): # in the guest OS. But, in case of KVM, shutdown() does not work... self.destroy(instance, network_info, cleanup=False) self.plug_vifs(instance, network_info) - self.firewall_driver.setup_basic_filtering(instance) - self.firewall_driver.prepare_instance_filter(instance) + self.firewall_driver.setup_basic_filtering(instance, network_info) + self.firewall_driver.prepare_instance_filter(instance, network_info) self._create_new_domain(xml) - self.firewall_driver.apply_instance_filter(instance) + self.firewall_driver.apply_instance_filter(instance, network_info) def _wait_for_reboot(): """Called at an interval until the VM is running again.""" @@ -531,7 +531,7 @@ class LibvirtConnection(driver.ComputeDriver): """ self.destroy(instance, network_info, cleanup=False) - xml = self.to_xml(instance, rescue=True) + xml = self.to_xml(instance, network_info, rescue=True) rescue_images = {'image_id': FLAGS.rescue_image_id, 'kernel_id': FLAGS.rescue_kernel_id, 'ramdisk_id': FLAGS.rescue_ramdisk_id} @@ -574,9 +574,9 @@ class LibvirtConnection(driver.ComputeDriver): # NOTE(ilyaalekseyev): Implementation like in multinics # for xenapi(tr3buchet) @exception.wrap_exception() - def spawn(self, context, instance, - network_info=None, block_device_info=None): - xml = self.to_xml(instance, False, network_info=network_info, + def spawn(self, context, instance, network_info, + block_device_info=None): + xml = self.to_xml(instance, network_info, False, block_device_info=block_device_info) self.firewall_driver.setup_basic_filtering(instance, network_info) self.firewall_driver.prepare_instance_filter(instance, network_info) @@ -584,7 +584,7 @@ class LibvirtConnection(driver.ComputeDriver): block_device_info=block_device_info) domain = self._create_new_domain(xml) LOG.debug(_("instance %s: is running"), instance['name']) - self.firewall_driver.apply_instance_filter(instance) + self.firewall_driver.apply_instance_filter(instance, network_info) def _wait_for_boot(): """Called at an interval until the VM is running.""" @@ -992,10 +992,6 @@ class LibvirtConnection(driver.ComputeDriver): block_device_info=None): block_device_mapping = driver.block_device_info_get_mapping( block_device_info) - # TODO(adiantum) remove network_info creation code - # when multinics will be completed - if not network_info: - network_info = netutils.get_network_info(instance) nics = [] for (network, mapping) in network_info: @@ -1082,7 +1078,7 @@ class LibvirtConnection(driver.ComputeDriver): xml_info['disk'] = xml_info['basepath'] + "/disk" return xml_info - def to_xml(self, instance, rescue=False, network_info=None, + def to_xml(self, instance, network_info, rescue=False, block_device_info=None): # TODO(termie): cache? LOG.debug(_('instance %s: starting toXML method'), instance['name']) diff --git a/nova/virt/libvirt/firewall.py b/nova/virt/libvirt/firewall.py index 9ce57b6c9..fa29b99c3 100644 --- a/nova/virt/libvirt/firewall.py +++ b/nova/virt/libvirt/firewall.py @@ -40,17 +40,17 @@ except ImportError: class FirewallDriver(object): - def prepare_instance_filter(self, instance, network_info=None): + def prepare_instance_filter(self, instance, network_info): """Prepare filters for the instance. At this point, the instance isn't running yet.""" raise NotImplementedError() - def unfilter_instance(self, instance, network_info=None): + def unfilter_instance(self, instance, network_info): """Stop filtering instance""" raise NotImplementedError() - def apply_instance_filter(self, instance): + def apply_instance_filter(self, instance, network_info): """Apply instance filter. Once this method returns, the instance should be firewalled @@ -60,9 +60,7 @@ class FirewallDriver(object): """ raise NotImplementedError() - def refresh_security_group_rules(self, - security_group_id, - network_info=None): + def refresh_security_group_rules(self, security_group_id): """Refresh security group rules from data store Gets called when a rule has been added to or removed from @@ -85,7 +83,7 @@ class FirewallDriver(object): """ raise NotImplementedError() - def setup_basic_filtering(self, instance, network_info=None): + def setup_basic_filtering(self, instance, network_info): """Create rules to block spoofing and allow dhcp. This gets called when spawning an instance, before @@ -150,7 +148,7 @@ class NWFilterFirewall(FirewallDriver): self.static_filters_configured = False self.handle_security_groups = False - def apply_instance_filter(self, instance): + def apply_instance_filter(self, instance, network_info): """No-op. Everything is done in prepare_instance_filter""" pass @@ -189,13 +187,10 @@ class NWFilterFirewall(FirewallDriver): ''' - def setup_basic_filtering(self, instance, network_info=None): + def setup_basic_filtering(self, instance, network_info): """Set up basic filtering (MAC, IP, and ARP spoofing protection)""" logging.info('called setup_basic_filtering in nwfilter') - if not network_info: - network_info = netutils.get_network_info(instance) - if self.handle_security_groups: # No point in setting up a filter set that we'll be overriding # anyway. @@ -300,10 +295,8 @@ class NWFilterFirewall(FirewallDriver): # execute in a native thread and block current greenthread until done tpool.execute(self._conn.nwfilterDefineXML, xml) - def unfilter_instance(self, instance, network_info=None): + def unfilter_instance(self, instance, network_info): """Clear out the nwfilter rules.""" - if not network_info: - network_info = netutils.get_network_info(instance) instance_name = instance.name for (network, mapping) in network_info: nic_id = mapping['mac'].replace(':', '') @@ -326,16 +319,13 @@ class NWFilterFirewall(FirewallDriver): LOG.debug(_('The nwfilter(%(instance_secgroup_filter_name)s) ' 'for %(instance_name)s is not found.') % locals()) - def prepare_instance_filter(self, instance, network_info=None): + def prepare_instance_filter(self, instance, network_info): """Creates an NWFilter for the given instance. In the process, it makes sure the filters for the provider blocks, security groups, and base filter are all in place. """ - if not network_info: - network_info = netutils.get_network_info(instance) - self.refresh_provider_fw_rules() ctxt = context.get_admin_context() @@ -500,9 +490,8 @@ class NWFilterFirewall(FirewallDriver): return 'nova-instance-%s' % (instance['name']) return 'nova-instance-%s-%s' % (instance['name'], nic_id) - def instance_filter_exists(self, instance): + def instance_filter_exists(self, instance, network_info): """Check nova-instance-instance-xxx exists""" - network_info = netutils.get_network_info(instance) for (network, mapping) in network_info: nic_id = mapping['mac'].replace(':', '') instance_filter_name = self._instance_filter_name(instance, nic_id) @@ -521,6 +510,7 @@ class IptablesFirewallDriver(FirewallDriver): from nova.network import linux_net self.iptables = linux_net.iptables_manager self.instances = {} + self.network_infos = {} self.nwfilter = NWFilterFirewall(kwargs['get_connection']) self.basicly_filtered = False @@ -529,22 +519,22 @@ class IptablesFirewallDriver(FirewallDriver): self.iptables.ipv6['filter'].add_chain('sg-fallback') self.iptables.ipv6['filter'].add_rule('sg-fallback', '-j DROP') - def setup_basic_filtering(self, instance, network_info=None): + def setup_basic_filtering(self, instance, network_info): """Set up provider rules and basic NWFilter.""" - if not network_info: - network_info = netutils.get_network_info(instance) self.nwfilter.setup_basic_filtering(instance, network_info) if not self.basicly_filtered: LOG.debug(_('iptables firewall: Setup Basic Filtering')) self.refresh_provider_fw_rules() self.basicly_filtered = True - def apply_instance_filter(self, instance): + def apply_instance_filter(self, instance, network_info): """No-op. Everything is done in prepare_instance_filter""" pass - def unfilter_instance(self, instance, network_info=None): + def unfilter_instance(self, instance, network_info): if self.instances.pop(instance['id'], None): + # NOTE(vish): use the passed info instead of the stored info + self.network_infos.pop(instance['id']) self.remove_filters_for_instance(instance) self.iptables.apply() self.nwfilter.unfilter_instance(instance, network_info) @@ -552,11 +542,10 @@ class IptablesFirewallDriver(FirewallDriver): LOG.info(_('Attempted to unfilter instance %s which is not ' 'filtered'), instance['id']) - def prepare_instance_filter(self, instance, network_info=None): - if not network_info: - network_info = netutils.get_network_info(instance) + def prepare_instance_filter(self, instance, network_info): self.instances[instance['id']] = instance - self.add_filters_for_instance(instance, network_info) + self.network_infos[instance['id']] = network_info + self.add_filters_for_instance(instance) self.iptables.apply() def _create_filter(self, ips, chain_name): @@ -583,7 +572,8 @@ class IptablesFirewallDriver(FirewallDriver): for rule in ipv6_rules: self.iptables.ipv6['filter'].add_rule(chain_name, rule) - def add_filters_for_instance(self, instance, network_info=None): + def add_filters_for_instance(self, instance): + network_info = self.network_infos[instance['id']] chain_name = self._instance_chain_name(instance) if FLAGS.use_ipv6: self.iptables.ipv6['filter'].add_chain(chain_name) @@ -601,9 +591,7 @@ class IptablesFirewallDriver(FirewallDriver): if FLAGS.use_ipv6: self.iptables.ipv6['filter'].remove_chain(chain_name) - def instance_rules(self, instance, network_info=None): - if not network_info: - network_info = netutils.get_network_info(instance) + def instance_rules(self, instance, network_info): ctxt = context.get_admin_context() ipv4_rules = [] @@ -726,14 +714,10 @@ class IptablesFirewallDriver(FirewallDriver): self.iptables.apply() @utils.synchronized('iptables', external=True) - def do_refresh_security_group_rules(self, - security_group, - network_info=None): + def do_refresh_security_group_rules(self, security_group): for instance in self.instances.values(): self.remove_filters_for_instance(instance) - if not network_info: - network_info = netutils.get_network_info(instance) - self.add_filters_for_instance(instance, network_info) + self.add_filters_for_instance(instance) def refresh_provider_fw_rules(self): """See class:FirewallDriver: docs.""" diff --git a/nova/virt/libvirt/netutils.py b/nova/virt/libvirt/netutils.py index a8e88fc07..6f303072d 100644 --- a/nova/virt/libvirt/netutils.py +++ b/nova/virt/libvirt/netutils.py @@ -23,12 +23,7 @@ import netaddr -from nova import context -from nova import db -from nova import exception from nova import flags -from nova import ipv6 -from nova import utils FLAGS = flags.FLAGS @@ -47,65 +42,3 @@ def get_net_and_prefixlen(cidr): def get_ip_version(cidr): net = netaddr.IPNetwork(cidr) return int(net.version) - - -def get_network_info(instance): - # TODO(tr3buchet): this function needs to go away! network info - # MUST be passed down from compute - # TODO(adiantum) If we will keep this function - # we should cache network_info - admin_context = context.get_admin_context() - - try: - fixed_ips = db.fixed_ip_get_by_instance(admin_context, instance['id']) - except exception.FixedIpNotFoundForInstance: - fixed_ips = [] - - vifs = db.virtual_interface_get_by_instance(admin_context, instance['id']) - flavor = db.instance_type_get(admin_context, - instance['instance_type_id']) - network_info = [] - - for vif in vifs: - network = vif['network'] - - # determine which of the instance's IPs belong to this network - network_ips = [fixed_ip['address'] for fixed_ip in fixed_ips if - fixed_ip['network_id'] == network['id']] - - def ip_dict(ip): - return { - 'ip': ip, - 'netmask': network['netmask'], - 'enabled': '1'} - - def ip6_dict(): - prefix = network['cidr_v6'] - mac = vif['address'] - project_id = instance['project_id'] - return { - 'ip': ipv6.to_global(prefix, mac, project_id), - 'netmask': network['netmask_v6'], - 'enabled': '1'} - - mapping = { - 'label': network['label'], - 'gateway': network['gateway'], - 'broadcast': network['broadcast'], - 'dhcp_server': network['gateway'], - 'mac': vif['address'], - 'rxtx_cap': flavor['rxtx_cap'], - 'dns': [], - 'ips': [ip_dict(ip) for ip in network_ips]} - - if network['dns1']: - mapping['dns'].append(network['dns1']) - if network['dns2']: - mapping['dns'].append(network['dns2']) - - if FLAGS.use_ipv6: - mapping['ip6s'] = [ip6_dict()] - mapping['gateway6'] = network['gateway_v6'] - - network_info.append((network, mapping)) - return network_info -- cgit From 87e97a868b4b7361937bac8f637ec014276aaf5c Mon Sep 17 00:00:00 2001 From: Vishvananda Ishaya Date: Fri, 12 Aug 2011 21:03:53 -0700 Subject: use dhcp server instead of gateway for filter exception --- nova/virt/libvirt/firewall.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'nova') diff --git a/nova/virt/libvirt/firewall.py b/nova/virt/libvirt/firewall.py index fa29b99c3..a7a67dacb 100644 --- a/nova/virt/libvirt/firewall.py +++ b/nova/virt/libvirt/firewall.py @@ -609,7 +609,7 @@ class IptablesFirewallDriver(FirewallDriver): ipv4_rules += ['-j $provider'] ipv6_rules += ['-j $provider'] - dhcp_servers = [info['gateway'] for (_n, info) in network_info] + dhcp_servers = [info['dhcp_server'] for (_n, info) in network_info] for dhcp_server in dhcp_servers: ipv4_rules.append('-s %s -p udp --sport 67 --dport 68 ' -- cgit From 87ff404bf2bffe690292f7d3922c1ca2529f852b Mon Sep 17 00:00:00 2001 From: Vishvananda Ishaya Date: Fri, 12 Aug 2011 21:17:20 -0700 Subject: rename project_net to same_net --- nova/virt/libvirt/connection.py | 6 +++--- nova/virt/libvirt/firewall.py | 8 ++++---- nova/virt/libvirt/vif.py | 2 +- 3 files changed, 8 insertions(+), 8 deletions(-) (limited to 'nova') diff --git a/nova/virt/libvirt/connection.py b/nova/virt/libvirt/connection.py index bc317d660..f905ce92b 100644 --- a/nova/virt/libvirt/connection.py +++ b/nova/virt/libvirt/connection.py @@ -32,7 +32,7 @@ Supports KVM, LXC, QEMU, UML, and XEN. :rescue_kernel_id: Rescue aki image (default: aki-rescue). :rescue_ramdisk_id: Rescue ari image (default: ari-rescue). :injected_network_template: Template file for injected network -:allow_project_net_traffic: Whether to allow in project network traffic +:allow_same_net_traffic: Whether to allow in project network traffic """ @@ -96,9 +96,9 @@ flags.DEFINE_string('libvirt_uri', '', 'Override the default libvirt URI (which is dependent' ' on libvirt_type)') -flags.DEFINE_bool('allow_project_net_traffic', +flags.DEFINE_bool('allow_same_net_traffic', True, - 'Whether to allow in project network traffic') + 'Whether to allow network traffic from same network') flags.DEFINE_bool('use_cow_images', True, 'Whether to use cow images') diff --git a/nova/virt/libvirt/firewall.py b/nova/virt/libvirt/firewall.py index a7a67dacb..11e3906b8 100644 --- a/nova/virt/libvirt/firewall.py +++ b/nova/virt/libvirt/firewall.py @@ -232,7 +232,7 @@ class NWFilterFirewall(FirewallDriver): self._define_filter(self.nova_base_ipv6_filter) self._define_filter(self.nova_dhcp_filter) self._define_filter(self.nova_ra_filter) - if FLAGS.allow_project_net_traffic: + if FLAGS.allow_same_net_traffic: self._define_filter(self.nova_project_filter) if FLAGS.use_ipv6: self._define_filter(self.nova_project_filter_v6) @@ -378,7 +378,7 @@ class NWFilterFirewall(FirewallDriver): instance_filter_children = [base_filter, 'nova-provider-rules', instance_secgroup_filter_name] - if FLAGS.allow_project_net_traffic: + if FLAGS.allow_same_net_traffic: instance_filter_children.append('nova-project') if FLAGS.use_ipv6: instance_filter_children.append('nova-project-v6') @@ -616,7 +616,7 @@ class IptablesFirewallDriver(FirewallDriver): '-j ACCEPT' % (dhcp_server,)) #Allow project network traffic - if FLAGS.allow_project_net_traffic: + if FLAGS.allow_same_net_traffic: cidrs = [network['cidr'] for (network, _m) in network_info] for cidr in cidrs: ipv4_rules.append('-s %s -j ACCEPT' % (cidr,)) @@ -633,7 +633,7 @@ class IptablesFirewallDriver(FirewallDriver): '-s %s/128 -p icmpv6 -j ACCEPT' % (gateway_v6,)) #Allow project network traffic - if FLAGS.allow_project_net_traffic: + if FLAGS.allow_same_net_traffic: cidrv6s = [network['cidr_v6'] for (network, _m) in network_info] diff --git a/nova/virt/libvirt/vif.py b/nova/virt/libvirt/vif.py index e243d4fa0..4cb9abda4 100644 --- a/nova/virt/libvirt/vif.py +++ b/nova/virt/libvirt/vif.py @@ -44,7 +44,7 @@ class LibvirtBridgeDriver(VIFDriver): gateway6 = mapping.get('gateway6') mac_id = mapping['mac'].replace(':', '') - if FLAGS.allow_project_net_traffic: + if FLAGS.allow_same_net_traffic: template = "\n" net, mask = netutils.get_net_and_mask(network['cidr']) values = [("PROJNET", net), ("PROJMASK", mask)] -- cgit From f7d1270c94d884e661a79d74fb2b2f88f6eb619f Mon Sep 17 00:00:00 2001 From: Vishvananda Ishaya Date: Fri, 12 Aug 2011 22:05:34 -0700 Subject: fix all of the tests --- nova/tests/test_libvirt.py | 84 ++++++++++++++++++++++------------------- nova/virt/libvirt/connection.py | 8 ++-- 2 files changed, 50 insertions(+), 42 deletions(-) (limited to 'nova') diff --git a/nova/tests/test_libvirt.py b/nova/tests/test_libvirt.py index 2180cf4f0..df291ee68 100644 --- a/nova/tests/test_libvirt.py +++ b/nova/tests/test_libvirt.py @@ -49,18 +49,19 @@ def _create_network_info(count=1, ipv6=None): if ipv6 is None: ipv6 = FLAGS.use_ipv6 fake = 'fake' - fake_ip = '0.0.0.0/0' - fake_ip_2 = '0.0.0.1/0' - fake_ip_3 = '0.0.0.1/0' + fake_ip = '10.11.12.13' + fake_ip_2 = '0.0.0.1' + fake_ip_3 = '0.0.0.1' fake_vlan = 100 fake_bridge_interface = 'eth0' network = {'bridge': fake, 'cidr': fake_ip, 'cidr_v6': fake_ip, + 'gateway_v6': fake, 'vlan': fake_vlan, 'bridge_interface': fake_bridge_interface} mapping = {'mac': fake, - 'dhcp_server': fake, + 'dhcp_server': '10.0.0.1', 'gateway': fake, 'gateway6': fake, 'ips': [{'ip': fake_ip}, {'ip': fake_ip}]} @@ -273,15 +274,14 @@ class LibvirtConnTestCase(test.TestCase): conn = connection.LibvirtConnection(True) instance_ref = db.instance_create(self.context, self.test_instance) - result = conn._prepare_xml_info(instance_ref, False) - self.assertFalse(result['nics']) - - result = conn._prepare_xml_info(instance_ref, False, - _create_network_info()) + result = conn._prepare_xml_info(instance_ref, + _create_network_info(), + False) self.assertTrue(len(result['nics']) == 1) - result = conn._prepare_xml_info(instance_ref, False, - _create_network_info(2)) + result = conn._prepare_xml_info(instance_ref, + _create_network_info(2), + False) self.assertTrue(len(result['nics']) == 2) def test_xml_and_uri_no_ramdisk_no_kernel(self): @@ -408,16 +408,16 @@ class LibvirtConnTestCase(test.TestCase): network_info = _create_network_info(2) conn = connection.LibvirtConnection(True) instance_ref = db.instance_create(self.context, instance_data) - xml = conn.to_xml(instance_ref, False, network_info) + xml = conn.to_xml(instance_ref, network_info, False) tree = xml_to_tree(xml) interfaces = tree.findall("./devices/interface") self.assertEquals(len(interfaces), 2) parameters = interfaces[0].findall('./filterref/parameter') self.assertEquals(interfaces[0].get('type'), 'bridge') self.assertEquals(parameters[0].get('name'), 'IP') - self.assertEquals(parameters[0].get('value'), '0.0.0.0/0') + self.assertEquals(parameters[0].get('value'), '10.11.12.13') self.assertEquals(parameters[1].get('name'), 'DHCPSERVER') - self.assertEquals(parameters[1].get('value'), 'fake') + self.assertEquals(parameters[1].get('value'), '10.0.0.1') def _check_xml_and_container(self, instance): user_context = context.RequestContext(self.user_id, @@ -431,7 +431,8 @@ class LibvirtConnTestCase(test.TestCase): uri = conn.get_uri() self.assertEquals(uri, 'lxc:///') - xml = conn.to_xml(instance_ref) + network_info = _create_network_info() + xml = conn.to_xml(instance_ref, network_info) tree = xml_to_tree(xml) check = [ @@ -528,17 +529,20 @@ class LibvirtConnTestCase(test.TestCase): uri = conn.get_uri() self.assertEquals(uri, expected_uri) - xml = conn.to_xml(instance_ref, rescue) + network_info = _create_network_info() + xml = conn.to_xml(instance_ref, network_info, rescue) tree = xml_to_tree(xml) for i, (check, expected_result) in enumerate(checks): self.assertEqual(check(tree), expected_result, - '%s failed check %d' % (xml, i)) + '%s != %s failed check %d' % + (check(tree), expected_result, i)) for i, (check, expected_result) in enumerate(common_checks): self.assertEqual(check(tree), expected_result, - '%s failed common check %d' % (xml, i)) + '%s != %s failed common check %d' % + (check(tree), expected_result, i)) # This test is supposed to make sure we don't # override a specifically set uri @@ -942,8 +946,9 @@ class IptablesFirewallTestCase(test.TestCase): from nova.network import linux_net linux_net.iptables_manager.execute = fake_iptables_execute - self.fw.prepare_instance_filter(instance_ref) - self.fw.apply_instance_filter(instance_ref) + network_info = _create_network_info() + self.fw.prepare_instance_filter(instance_ref, network_info) + self.fw.apply_instance_filter(instance_ref, network_info) in_rules = filter(lambda l: not l.startswith('#'), self.in_filter_rules) @@ -1008,7 +1013,7 @@ class IptablesFirewallTestCase(test.TestCase): ipv6_len = len(self.fw.iptables.ipv6['filter'].rules) inst_ipv4, inst_ipv6 = self.fw.instance_rules(instance_ref, network_info) - self.fw.add_filters_for_instance(instance_ref, network_info) + self.fw.prepare_instance_filter(instance_ref, network_info) ipv4 = self.fw.iptables.ipv4['filter'].rules ipv6 = self.fw.iptables.ipv6['filter'].rules ipv4_network_rules = len(ipv4) - len(inst_ipv4) - ipv4_len @@ -1023,7 +1028,7 @@ class IptablesFirewallTestCase(test.TestCase): self.mox.StubOutWithMock(self.fw, 'add_filters_for_instance', use_mock_anything=True) - self.fw.add_filters_for_instance(instance_ref, mox.IgnoreArg()) + self.fw.prepare_instance_filter(instance_ref, mox.IgnoreArg()) self.fw.instances[instance_ref['id']] = instance_ref self.mox.ReplayAll() self.fw.do_refresh_security_group_rules("fake") @@ -1043,11 +1048,12 @@ class IptablesFirewallTestCase(test.TestCase): instance_ref = self._create_instance_ref() _setup_networking(instance_ref['id'], self.test_ip) - self.fw.setup_basic_filtering(instance_ref) - self.fw.prepare_instance_filter(instance_ref) - self.fw.apply_instance_filter(instance_ref) + network_info = _create_network_info + self.fw.setup_basic_filtering(instance_ref, network_info) + self.fw.prepare_instance_filter(instance_ref, network_info) + self.fw.apply_instance_filter(instance_ref, network_info) original_filter_count = len(fakefilter.filters) - self.fw.unfilter_instance(instance_ref) + self.fw.unfilter_instance(instance_ref, network_info) # should undefine just the instance filter self.assertEqual(original_filter_count - len(fakefilter.filters), 1) @@ -1057,14 +1063,14 @@ class IptablesFirewallTestCase(test.TestCase): def test_provider_firewall_rules(self): # setup basic instance data instance_ref = self._create_instance_ref() - nw_info = _create_network_info(1) _setup_networking(instance_ref['id'], self.test_ip) # FRAGILE: peeks at how the firewall names chains chain_name = 'inst-%s' % instance_ref['id'] # create a firewall via setup_basic_filtering like libvirt_conn.spawn # should have a chain with 0 rules - self.fw.setup_basic_filtering(instance_ref, network_info=nw_info) + network_info = _create_network_info(1) + self.fw.setup_basic_filtering(instance_ref, network_info) self.assertTrue('provider' in self.fw.iptables.ipv4['filter'].chains) rules = [rule for rule in self.fw.iptables.ipv4['filter'].rules if rule.chain == 'provider'] @@ -1094,8 +1100,8 @@ class IptablesFirewallTestCase(test.TestCase): self.assertEqual(2, len(rules)) # create the instance filter and make sure it has a jump rule - self.fw.prepare_instance_filter(instance_ref, network_info=nw_info) - self.fw.apply_instance_filter(instance_ref) + self.fw.prepare_instance_filter(instance_ref, network_info) + self.fw.apply_instance_filter(instance_ref, network_info) inst_rules = [rule for rule in self.fw.iptables.ipv4['filter'].rules if rule.chain == chain_name] jump_rules = [rule for rule in inst_rules if '-j' in rule.rule] @@ -1247,7 +1253,7 @@ class NWFilterTestCase(test.TestCase): def _ensure_all_called(): instance_filter = 'nova-instance-%s-%s' % (instance_ref['name'], - '561212121212') + 'fake') secgroup_filter = 'nova-secgroup-%s' % self.security_group['id'] for required in [secgroup_filter, 'allow-dhcp-server', 'no-arp-spoofing', 'no-ip-spoofing', @@ -1263,9 +1269,10 @@ class NWFilterTestCase(test.TestCase): self.security_group.id) instance = db.instance_get(self.context, inst_id) - self.fw.setup_basic_filtering(instance) - self.fw.prepare_instance_filter(instance) - self.fw.apply_instance_filter(instance) + network_info = _create_network_info() + self.fw.setup_basic_filtering(instance, network_info) + self.fw.prepare_instance_filter(instance, network_info) + self.fw.apply_instance_filter(instance, network_info) _ensure_all_called() self.teardown_security_group() db.instance_destroy(context.get_admin_context(), instance_ref['id']) @@ -1296,11 +1303,12 @@ class NWFilterTestCase(test.TestCase): instance = db.instance_get(self.context, inst_id) _setup_networking(instance_ref['id'], self.test_ip) - self.fw.setup_basic_filtering(instance) - self.fw.prepare_instance_filter(instance) - self.fw.apply_instance_filter(instance) + network_info = _create_network_info() + self.fw.setup_basic_filtering(instance, network_info) + self.fw.prepare_instance_filter(instance, network_info) + self.fw.apply_instance_filter(instance, network_info) original_filter_count = len(fakefilter.filters) - self.fw.unfilter_instance(instance) + self.fw.unfilter_instance(instance, network_info) # should undefine 2 filters: instance and instance-secgroup self.assertEqual(original_filter_count - len(fakefilter.filters), 2) diff --git a/nova/virt/libvirt/connection.py b/nova/virt/libvirt/connection.py index f905ce92b..5945a725d 100644 --- a/nova/virt/libvirt/connection.py +++ b/nova/virt/libvirt/connection.py @@ -463,8 +463,8 @@ class LibvirtConnection(driver.ComputeDriver): """ virt_dom = self._conn.lookupByName(instance['name']) # NOTE(itoumsn): Use XML delived from the running instance - # instead of using to_xml(instance). This is almost the ultimate - # stupid workaround. + # instead of using to_xml(instance, network_info). This is almost + # the ultimate stupid workaround. xml = virt_dom.XMLDesc(0) # NOTE(itoumsn): self.shutdown() and wait instead of self.destroy() is # better because we cannot ensure flushing dirty buffers @@ -988,7 +988,7 @@ class LibvirtConnection(driver.ComputeDriver): else: raise exception.InvalidDevicePath(path=device_path) - def _prepare_xml_info(self, instance, rescue=False, network_info=None, + def _prepare_xml_info(self, instance, network_info, rescue, block_device_info=None): block_device_mapping = driver.block_device_info_get_mapping( block_device_info) @@ -1082,7 +1082,7 @@ class LibvirtConnection(driver.ComputeDriver): block_device_info=None): # TODO(termie): cache? LOG.debug(_('instance %s: starting toXML method'), instance['name']) - xml_info = self._prepare_xml_info(instance, rescue, network_info, + xml_info = self._prepare_xml_info(instance, network_info, rescue, block_device_info) xml = str(Template(self.libvirt_xml, searchList=[xml_info])) LOG.debug(_('instance %s: finished toXML method'), instance['name']) -- cgit From c533e6ed3d2df8725dbcb48e7e546eb853b7ad41 Mon Sep 17 00:00:00 2001 From: Vishvananda Ishaya Date: Fri, 12 Aug 2011 22:36:10 -0700 Subject: make sure security groups come back on restart of nova-compute --- nova/compute/manager.py | 6 ++++-- nova/tests/test_compute.py | 4 ++-- nova/tests/test_libvirt.py | 2 ++ nova/virt/driver.py | 2 +- nova/virt/fake.py | 4 ++-- nova/virt/libvirt/connection.py | 9 +++++---- nova/virt/libvirt/firewall.py | 14 ++++++-------- nova/virt/xenapi_conn.py | 2 +- 8 files changed, 23 insertions(+), 20 deletions(-) (limited to 'nova') diff --git a/nova/compute/manager.py b/nova/compute/manager.py index d38213083..5b98e9ec1 100644 --- a/nova/compute/manager.py +++ b/nova/compute/manager.py @@ -170,7 +170,9 @@ class ComputeManager(manager.SchedulerDependentManager): elif drv_state == power_state.RUNNING: # Hyper-V and VMWareAPI drivers will raise and exception try: - self.driver.ensure_filtering_rules_for_instance(instance) + net_info = self._get_instance_nw_info(context, instance) + self.driver.ensure_filtering_rules_for_instance(instance, + net_info) except NotImplementedError: LOG.warning(_('Hypervisor driver does not ' 'support firewall rules')) @@ -1308,7 +1310,7 @@ class ComputeManager(manager.SchedulerDependentManager): # This nwfilter is necessary on the destination host. # In addition, this method is creating filtering rule # onto destination host. - self.driver.ensure_filtering_rules_for_instance(instance_ref) + self.driver.ensure_filtering_rules_for_instance(instance_ref, network_info) def live_migration(self, context, instance_id, dest): """Executing live migration. diff --git a/nova/tests/test_compute.py b/nova/tests/test_compute.py index 73c9bd78d..9d6e5aee5 100644 --- a/nova/tests/test_compute.py +++ b/nova/tests/test_compute.py @@ -632,7 +632,7 @@ class ComputeTestCase(test.TestCase): vid = i_ref['volumes'][i]['id'] volmock.setup_compute_volume(c, vid).InAnyOrder('g1') drivermock.plug_vifs(i_ref, []) - drivermock.ensure_filtering_rules_for_instance(i_ref) + drivermock.ensure_filtering_rules_for_instance(i_ref, []) self.compute.db = dbmock self.compute.volume_manager = volmock @@ -657,7 +657,7 @@ class ComputeTestCase(test.TestCase): self.mox.StubOutWithMock(compute_manager.LOG, 'info') compute_manager.LOG.info(_("%s has no volume."), i_ref['hostname']) drivermock.plug_vifs(i_ref, []) - drivermock.ensure_filtering_rules_for_instance(i_ref) + drivermock.ensure_filtering_rules_for_instance(i_ref, []) self.compute.db = dbmock self.compute.driver = drivermock diff --git a/nova/tests/test_libvirt.py b/nova/tests/test_libvirt.py index df291ee68..7f4a3b09a 100644 --- a/nova/tests/test_libvirt.py +++ b/nova/tests/test_libvirt.py @@ -644,6 +644,7 @@ class LibvirtConnTestCase(test.TestCase): self.create_fake_libvirt_mock() instance_ref = db.instance_create(self.context, self.test_instance) + network_info = _create_network_info() # Start test self.mox.ReplayAll() @@ -653,6 +654,7 @@ class LibvirtConnTestCase(test.TestCase): conn.firewall_driver.setattr('prepare_instance_filter', fake_none) conn.firewall_driver.setattr('instance_filter_exists', fake_none) conn.ensure_filtering_rules_for_instance(instance_ref, + network_info, time=fake_timer) except exception.Error, e: c1 = (0 <= e.message.find('Timeout migrating for')) diff --git a/nova/virt/driver.py b/nova/virt/driver.py index df4a66ac2..20af2666d 100644 --- a/nova/virt/driver.py +++ b/nova/virt/driver.py @@ -252,7 +252,7 @@ class ComputeDriver(object): # TODO(Vek): Need to pass context in for access to auth_token pass - def ensure_filtering_rules_for_instance(self, instance_ref): + def ensure_filtering_rules_for_instance(self, instance_ref, network_info): """Setting up filtering rules and waiting for its completion. To migrate an instance, filtering rules to hypervisors diff --git a/nova/virt/fake.py b/nova/virt/fake.py index 880702af1..2ffa33d40 100644 --- a/nova/virt/fake.py +++ b/nova/virt/fake.py @@ -487,7 +487,7 @@ class FakeConnection(driver.ComputeDriver): """This method is supported only by libvirt.""" raise NotImplementedError('This method is supported only by libvirt.') - def ensure_filtering_rules_for_instance(self, instance_ref): + def ensure_filtering_rules_for_instance(self, instance_ref, network_info): """This method is supported only by libvirt.""" raise NotImplementedError('This method is supported only by libvirt.') @@ -496,7 +496,7 @@ class FakeConnection(driver.ComputeDriver): """This method is supported only by libvirt.""" return - def unfilter_instance(self, instance_ref, network_info=None): + def unfilter_instance(self, instance_ref, network_info): """This method is supported only by libvirt.""" raise NotImplementedError('This method is supported only by libvirt.') diff --git a/nova/virt/libvirt/connection.py b/nova/virt/libvirt/connection.py index 5945a725d..71516011a 100644 --- a/nova/virt/libvirt/connection.py +++ b/nova/virt/libvirt/connection.py @@ -1502,7 +1502,7 @@ class LibvirtConnection(driver.ComputeDriver): return - def ensure_filtering_rules_for_instance(self, instance_ref, + def ensure_filtering_rules_for_instance(self, instance_ref, network_info, time=None): """Setting up filtering rules and waiting for its completion. @@ -1532,14 +1532,15 @@ class LibvirtConnection(driver.ComputeDriver): # If any instances never launch at destination host, # basic-filtering must be set here. - self.firewall_driver.setup_basic_filtering(instance_ref) + self.firewall_driver.setup_basic_filtering(instance_ref, network_info) # setting up n)ova-instance-instance-xx mainly. - self.firewall_driver.prepare_instance_filter(instance_ref) + self.firewall_driver.prepare_instance_filter(instance_ref, network_info) # wait for completion timeout_count = range(FLAGS.live_migration_retry_count) while timeout_count: - if self.firewall_driver.instance_filter_exists(instance_ref): + if self.firewall_driver.instance_filter_exists(instance_ref, + network_info): break timeout_count.pop() if len(timeout_count) == 0: diff --git a/nova/virt/libvirt/firewall.py b/nova/virt/libvirt/firewall.py index 11e3906b8..55fc58458 100644 --- a/nova/virt/libvirt/firewall.py +++ b/nova/virt/libvirt/firewall.py @@ -92,7 +92,7 @@ class FirewallDriver(object): """ raise NotImplementedError() - def instance_filter_exists(self, instance): + def instance_filter_exists(self, instance, network_info): """Check nova-instance-instance-xxx exists""" raise NotImplementedError() @@ -391,9 +391,7 @@ class NWFilterFirewall(FirewallDriver): self._define_filter(self._filter_container(filter_name, filter_children)) - def refresh_security_group_rules(self, - security_group_id, - network_info=None): + def refresh_security_group_rules(self, security_group_id): return self._define_filter( self.security_group_to_nwfilter_xml(security_group_id)) @@ -702,15 +700,15 @@ class IptablesFirewallDriver(FirewallDriver): return ipv4_rules, ipv6_rules - def instance_filter_exists(self, instance): + def instance_filter_exists(self, instance, network_info): """Check nova-instance-instance-xxx exists""" - return self.nwfilter.instance_filter_exists(instance) + return self.nwfilter.instance_filter_exists(instance, network_info) def refresh_security_group_members(self, security_group): pass - def refresh_security_group_rules(self, security_group, network_info=None): - self.do_refresh_security_group_rules(security_group, network_info) + def refresh_security_group_rules(self, security_group): + self.do_refresh_security_group_rules(security_group) self.iptables.apply() @utils.synchronized('iptables', external=True) diff --git a/nova/virt/xenapi_conn.py b/nova/virt/xenapi_conn.py index 76b6c57fc..0ec957cf3 100644 --- a/nova/virt/xenapi_conn.py +++ b/nova/virt/xenapi_conn.py @@ -309,7 +309,7 @@ class XenAPIConnection(driver.ComputeDriver): """This method is supported only by libvirt.""" raise NotImplementedError('This method is supported only by libvirt.') - def ensure_filtering_rules_for_instance(self, instance_ref): + def ensure_filtering_rules_for_instance(self, instance_ref, network_info): """This method is supported only libvirt.""" return -- cgit From b46320a4175adc4012e60d4eae793a42f3a8186b Mon Sep 17 00:00:00 2001 From: Jesse Andrews Date: Mon, 15 Aug 2011 02:55:22 -0700 Subject: make list response for floating ip match other apis --- nova/api/openstack/contrib/floating_ips.py | 4 ++-- nova/tests/api/openstack/contrib/test_floating_ips.py | 16 ++++++++-------- 2 files changed, 10 insertions(+), 10 deletions(-) (limited to 'nova') diff --git a/nova/api/openstack/contrib/floating_ips.py b/nova/api/openstack/contrib/floating_ips.py index c07bfdf09..f6824a601 100644 --- a/nova/api/openstack/contrib/floating_ips.py +++ b/nova/api/openstack/contrib/floating_ips.py @@ -43,8 +43,8 @@ def _translate_floating_ip_view(floating_ip): def _translate_floating_ips_view(floating_ips): - return {'floating_ips': [_translate_floating_ip_view(floating_ip) - for floating_ip in floating_ips]} + return {'floating_ips': [_translate_floating_ip_view(ip)['floating_ip'] + for ip in floating_ips]} class FloatingIPController(object): diff --git a/nova/tests/api/openstack/contrib/test_floating_ips.py b/nova/tests/api/openstack/contrib/test_floating_ips.py index ab7ae2e54..7996ebbb9 100644 --- a/nova/tests/api/openstack/contrib/test_floating_ips.py +++ b/nova/tests/api/openstack/contrib/test_floating_ips.py @@ -116,14 +116,14 @@ class FloatingIpTest(test.TestCase): res = req.get_response(fakes.wsgi_app()) self.assertEqual(res.status_int, 200) res_dict = json.loads(res.body) - response = {'floating_ips': [{'floating_ip': {'instance_id': 11, - 'ip': '10.10.10.10', - 'fixed_ip': '10.0.0.1', - 'id': 1}}, - {'floating_ip': {'instance_id': None, - 'ip': '10.10.10.11', - 'fixed_ip': None, - 'id': 2}}]} + response = {'floating_ips': [{'instance_id': 11, + 'ip': '10.10.10.10', + 'fixed_ip': '10.0.0.1', + 'id': 1}, + {'instance_id': None, + 'ip': '10.10.10.11', + 'fixed_ip': None, + 'id': 2}]} self.assertEqual(res_dict, response) def test_floating_ip_show(self): -- cgit From c53d0567e4526b1e4a2ee5665ac81170a1771d17 Mon Sep 17 00:00:00 2001 From: Vishvananda Ishaya Date: Mon, 15 Aug 2011 11:10:44 -0700 Subject: Fix the tests when libvirt actually exists --- nova/tests/test_libvirt.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'nova') diff --git a/nova/tests/test_libvirt.py b/nova/tests/test_libvirt.py index 7f4a3b09a..f3f7e8057 100644 --- a/nova/tests/test_libvirt.py +++ b/nova/tests/test_libvirt.py @@ -627,7 +627,7 @@ class LibvirtConnTestCase(test.TestCase): return # Preparing mocks - def fake_none(self): + def fake_none(self, *args): return def fake_raise(self): @@ -1050,7 +1050,7 @@ class IptablesFirewallTestCase(test.TestCase): instance_ref = self._create_instance_ref() _setup_networking(instance_ref['id'], self.test_ip) - network_info = _create_network_info + network_info = _create_network_info() self.fw.setup_basic_filtering(instance_ref, network_info) self.fw.prepare_instance_filter(instance_ref, network_info) self.fw.apply_instance_filter(instance_ref, network_info) -- cgit From bc7892f698fbfc21f8d242f52e012d9165e46de7 Mon Sep 17 00:00:00 2001 From: Jake Dahn Date: Mon, 15 Aug 2011 11:55:53 -0700 Subject: Adding standard inclusion of a body param which most http clients will send along with a POST request. --- nova/api/openstack/contrib/floating_ips.py | 2 +- nova/tests/api/openstack/contrib/test_floating_ips.py | 2 ++ 2 files changed, 3 insertions(+), 1 deletion(-) (limited to 'nova') diff --git a/nova/api/openstack/contrib/floating_ips.py b/nova/api/openstack/contrib/floating_ips.py index c07bfdf09..121a1d4a0 100644 --- a/nova/api/openstack/contrib/floating_ips.py +++ b/nova/api/openstack/contrib/floating_ips.py @@ -131,7 +131,7 @@ class FloatingIPController(object): "floating_ip": floating_ip, "fixed_ip": fixed_ip}} - def disassociate(self, req, id): + def disassociate(self, req, id, body): """ POST /floating_ips/{id}/disassociate """ context = req.environ['nova.context'] floating_ip = self.network_api.get_floating_ip(context, id) diff --git a/nova/tests/api/openstack/contrib/test_floating_ips.py b/nova/tests/api/openstack/contrib/test_floating_ips.py index ab7ae2e54..dd58a1b22 100644 --- a/nova/tests/api/openstack/contrib/test_floating_ips.py +++ b/nova/tests/api/openstack/contrib/test_floating_ips.py @@ -177,8 +177,10 @@ class FloatingIpTest(test.TestCase): self.assertEqual(actual, expected) def test_floating_ip_disassociate(self): + body = dict() req = webob.Request.blank('/v1.1/os-floating-ips/1/disassociate') req.method = 'POST' + req.body = json.dumps(body) req.headers['Content-Type'] = 'application/json' res = req.get_response(fakes.wsgi_app()) self.assertEqual(res.status_int, 200) -- cgit From fdb8c92739026e96ac52fc165d70c8f8c7594177 Mon Sep 17 00:00:00 2001 From: Jake Dahn Date: Mon, 15 Aug 2011 12:04:51 -0700 Subject: making body default to none --- nova/api/openstack/contrib/floating_ips.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'nova') diff --git a/nova/api/openstack/contrib/floating_ips.py b/nova/api/openstack/contrib/floating_ips.py index 121a1d4a0..768b9deb1 100644 --- a/nova/api/openstack/contrib/floating_ips.py +++ b/nova/api/openstack/contrib/floating_ips.py @@ -131,7 +131,7 @@ class FloatingIPController(object): "floating_ip": floating_ip, "fixed_ip": fixed_ip}} - def disassociate(self, req, id, body): + def disassociate(self, req, id, body=None): """ POST /floating_ips/{id}/disassociate """ context = req.environ['nova.context'] floating_ip = self.network_api.get_floating_ip(context, id) -- cgit