diff options
author | Joerg Roedel <joerg.roedel@amd.com> | 2009-11-20 15:30:58 +0100 |
---|---|---|
committer | Joerg Roedel <joerg.roedel@amd.com> | 2009-11-27 14:16:26 +0100 |
commit | dcd1e92e405449ecc5e8bd8fcfebf3b2a13d3d37 (patch) | |
tree | 8c40ea4593985366369de1e9255a8baf478d07b1 /arch/x86/kernel/amd_iommu.c | |
parent | 6de8ad9b9ee0ec5b52ec8ec41401833e5e89186f (diff) | |
download | kernel-crypto-dcd1e92e405449ecc5e8bd8fcfebf3b2a13d3d37.tar.gz kernel-crypto-dcd1e92e405449ecc5e8bd8fcfebf3b2a13d3d37.tar.xz kernel-crypto-dcd1e92e405449ecc5e8bd8fcfebf3b2a13d3d37.zip |
x86/amd-iommu: Use __iommu_flush_pages for tlb flushes
This patch re-implements iommu_flush_tlb functions to use
the __iommu_flush_pages logic.
Signed-off-by: Joerg Roedel <joerg.roedel@amd.com>
Diffstat (limited to 'arch/x86/kernel/amd_iommu.c')
-rw-r--r-- | arch/x86/kernel/amd_iommu.c | 22 |
1 files changed, 7 insertions, 15 deletions
diff --git a/arch/x86/kernel/amd_iommu.c b/arch/x86/kernel/amd_iommu.c index 7c06e574008..c55aa079ded 100644 --- a/arch/x86/kernel/amd_iommu.c +++ b/arch/x86/kernel/amd_iommu.c @@ -451,7 +451,7 @@ static void __iommu_flush_pages(struct protection_domain *domain, u64 address, size_t size, int pde) { int s = 0, i; - unsigned pages = iommu_num_pages(address, size, PAGE_SIZE); + unsigned long pages = iommu_num_pages(address, size, PAGE_SIZE); address &= PAGE_MASK; @@ -487,23 +487,15 @@ static void iommu_flush_pages(struct protection_domain *domain, } /* Flush the whole IO/TLB for a given protection domain */ -static void iommu_flush_tlb(struct amd_iommu *iommu, u16 domid) +static void iommu_flush_tlb(struct protection_domain *domain) { - u64 address = CMD_INV_IOMMU_ALL_PAGES_ADDRESS; - - INC_STATS_COUNTER(domain_flush_single); - - iommu_queue_inv_iommu_pages(iommu, address, domid, 0, 1); + __iommu_flush_pages(domain, 0, CMD_INV_IOMMU_ALL_PAGES_ADDRESS, 0); } /* Flush the whole IO/TLB for a given protection domain - including PDE */ -static void iommu_flush_tlb_pde(struct amd_iommu *iommu, u16 domid) +static void iommu_flush_tlb_pde(struct protection_domain *domain) { - u64 address = CMD_INV_IOMMU_ALL_PAGES_ADDRESS; - - INC_STATS_COUNTER(domain_flush_single); - - iommu_queue_inv_iommu_pages(iommu, address, domid, 1, 1); + __iommu_flush_pages(domain, 0, CMD_INV_IOMMU_ALL_PAGES_ADDRESS, 1); } /* @@ -1236,7 +1228,7 @@ static void attach_device(struct amd_iommu *iommu, * here to evict all dirty stuff. */ iommu_queue_inv_dev_entry(iommu, devid); - iommu_flush_tlb_pde(iommu, domain->id); + iommu_flush_tlb_pde(domain); } /* @@ -1697,7 +1689,7 @@ retry: ADD_STATS_COUNTER(alloced_io_mem, size); if (unlikely(dma_dom->need_flush && !amd_iommu_unmap_flush)) { - iommu_flush_tlb(iommu, dma_dom->domain.id); + iommu_flush_tlb(&dma_dom->domain); dma_dom->need_flush = false; } else if (unlikely(iommu_has_npcache(iommu))) iommu_flush_pages(&dma_dom->domain, address, size); |