diff options
-rw-r--r-- | Revert-iommu-intel-iommu-Enable-CONFIG_DMA_DIRECT_OP.patch | 125 | ||||
-rw-r--r-- | kernel.spec | 6 |
2 files changed, 131 insertions, 0 deletions
diff --git a/Revert-iommu-intel-iommu-Enable-CONFIG_DMA_DIRECT_OP.patch b/Revert-iommu-intel-iommu-Enable-CONFIG_DMA_DIRECT_OP.patch new file mode 100644 index 000000000..46212341f --- /dev/null +++ b/Revert-iommu-intel-iommu-Enable-CONFIG_DMA_DIRECT_OP.patch @@ -0,0 +1,125 @@ +From 3c16e0cc4ace8bd838bf234caead5a766b07fe9d Mon Sep 17 00:00:00 2001 +From: Christoph Hellwig <hch@lst.de> +Date: Thu, 5 Jul 2018 13:29:55 -0600 +Subject: [PATCH] Revert "iommu/intel-iommu: Enable CONFIG_DMA_DIRECT_OPS=y and + clean up intel_{alloc,free}_coherent()" + +This commit may cause a less than required dma mask to be used for +some allocations, which apparently leads to module load failures for +iwlwifi sometimes. + +This reverts commit d657c5c73ca987214a6f9436e435b34fc60f332a. + +Signed-off-by: Christoph Hellwig <hch@lst.de> +Reported-by: Fabio Coatti <fabio.coatti@gmail.com> +Tested-by: Fabio Coatti <fabio.coatti@gmail.com> +Signed-off-by: Jeremy Cline <jcline@redhat.com> +--- + drivers/iommu/Kconfig | 1 - + drivers/iommu/intel-iommu.c | 62 +++++++++++++++++++++++++++---------- + 2 files changed, 46 insertions(+), 17 deletions(-) + +diff --git a/drivers/iommu/Kconfig b/drivers/iommu/Kconfig +index b38798cc5288..f3a21343e636 100644 +--- a/drivers/iommu/Kconfig ++++ b/drivers/iommu/Kconfig +@@ -142,7 +142,6 @@ config DMAR_TABLE + config INTEL_IOMMU + bool "Support for Intel IOMMU using DMA Remapping Devices" + depends on PCI_MSI && ACPI && (X86 || IA64_GENERIC) +- select DMA_DIRECT_OPS + select IOMMU_API + select IOMMU_IOVA + select DMAR_TABLE +diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c +index 749d8f235346..6392a4964fc5 100644 +--- a/drivers/iommu/intel-iommu.c ++++ b/drivers/iommu/intel-iommu.c +@@ -31,7 +31,6 @@ + #include <linux/pci.h> + #include <linux/dmar.h> + #include <linux/dma-mapping.h> +-#include <linux/dma-direct.h> + #include <linux/mempool.h> + #include <linux/memory.h> + #include <linux/cpu.h> +@@ -3709,30 +3708,61 @@ static void *intel_alloc_coherent(struct device *dev, size_t size, + dma_addr_t *dma_handle, gfp_t flags, + unsigned long attrs) + { +- void *vaddr; ++ struct page *page = NULL; ++ int order; + +- vaddr = dma_direct_alloc(dev, size, dma_handle, flags, attrs); +- if (iommu_no_mapping(dev) || !vaddr) +- return vaddr; ++ size = PAGE_ALIGN(size); ++ order = get_order(size); + +- *dma_handle = __intel_map_single(dev, virt_to_phys(vaddr), +- PAGE_ALIGN(size), DMA_BIDIRECTIONAL, +- dev->coherent_dma_mask); +- if (!*dma_handle) +- goto out_free_pages; +- return vaddr; ++ if (!iommu_no_mapping(dev)) ++ flags &= ~(GFP_DMA | GFP_DMA32); ++ else if (dev->coherent_dma_mask < dma_get_required_mask(dev)) { ++ if (dev->coherent_dma_mask < DMA_BIT_MASK(32)) ++ flags |= GFP_DMA; ++ else ++ flags |= GFP_DMA32; ++ } ++ ++ if (gfpflags_allow_blocking(flags)) { ++ unsigned int count = size >> PAGE_SHIFT; ++ ++ page = dma_alloc_from_contiguous(dev, count, order, flags); ++ if (page && iommu_no_mapping(dev) && ++ page_to_phys(page) + size > dev->coherent_dma_mask) { ++ dma_release_from_contiguous(dev, page, count); ++ page = NULL; ++ } ++ } ++ ++ if (!page) ++ page = alloc_pages(flags, order); ++ if (!page) ++ return NULL; ++ memset(page_address(page), 0, size); ++ ++ *dma_handle = __intel_map_single(dev, page_to_phys(page), size, ++ DMA_BIDIRECTIONAL, ++ dev->coherent_dma_mask); ++ if (*dma_handle) ++ return page_address(page); ++ if (!dma_release_from_contiguous(dev, page, size >> PAGE_SHIFT)) ++ __free_pages(page, order); + +-out_free_pages: +- dma_direct_free(dev, size, vaddr, *dma_handle, attrs); + return NULL; + } + + static void intel_free_coherent(struct device *dev, size_t size, void *vaddr, + dma_addr_t dma_handle, unsigned long attrs) + { +- if (!iommu_no_mapping(dev)) +- intel_unmap(dev, dma_handle, PAGE_ALIGN(size)); +- dma_direct_free(dev, size, vaddr, dma_handle, attrs); ++ int order; ++ struct page *page = virt_to_page(vaddr); ++ ++ size = PAGE_ALIGN(size); ++ order = get_order(size); ++ ++ intel_unmap(dev, dma_handle, size); ++ if (!dma_release_from_contiguous(dev, page, size >> PAGE_SHIFT)) ++ __free_pages(page, order); + } + + static void intel_unmap_sg(struct device *dev, struct scatterlist *sglist, +-- +2.17.1 + diff --git a/kernel.spec b/kernel.spec index 6f6265c8d..22817b63e 100644 --- a/kernel.spec +++ b/kernel.spec @@ -674,6 +674,9 @@ Patch523: 0001-xfs-More-robust-inode-extent-count-validation.patch # i686 patch that will eventually be 4.17.8 Patch527: mm-don-t-do-zero_resv_unavail-if-memmap-is-not-allocated.patch +# rbhz 1607092 +Patch528: Revert-iommu-intel-iommu-Enable-CONFIG_DMA_DIRECT_OP.patch + # END OF PATCH DEFINITIONS %endif @@ -1923,6 +1926,9 @@ fi # # %changelog +* Mon Jul 23 2018 Jeremy Cline <jeremy@jcline.org> +- Fix iwlwifi module load failure (rhbz 1607092) + * Tue Jul 17 2018 Justin M. Forbes <jforbes@fedoraproject.org> - 4.17.7-200 - Linux v4.17.7 |