From ec40f95db753d3bfdbcc43b1505ecf7980cb6492 Mon Sep 17 00:00:00 2001 From: Greg Ungerer Date: Fri, 17 Apr 2009 23:11:38 +1000 Subject: m68knommu: fix DMA support for ColdFire ColdFire CPU family members support DMA (all those with the FEC ethernet core use it, the rest have dedicated DMA engines). The code support is just missing a handful of routines for it to be usable by drivers. Add the missing dma_ functions. Signed-off-by: Greg Ungerer --- arch/m68knommu/Kconfig | 1 + arch/m68knommu/kernel/dma.c | 37 ++++++++++++++++++++++++++++++++++--- 2 files changed, 35 insertions(+), 3 deletions(-) (limited to 'arch') diff --git a/arch/m68knommu/Kconfig b/arch/m68knommu/Kconfig index 4beb59dfc6e..534376299a9 100644 --- a/arch/m68knommu/Kconfig +++ b/arch/m68knommu/Kconfig @@ -16,6 +16,7 @@ config MMU config NO_DMA bool + depends on !COLDFIRE default y config FPU diff --git a/arch/m68knommu/kernel/dma.c b/arch/m68knommu/kernel/dma.c index 93612580663..aaf38bbbb6c 100644 --- a/arch/m68knommu/kernel/dma.c +++ b/arch/m68knommu/kernel/dma.c @@ -7,10 +7,9 @@ #include #include -#include #include #include -#include +#include void *dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle, gfp_t gfp) @@ -36,7 +35,39 @@ void dma_free_coherent(struct device *dev, size_t size, free_pages((unsigned long)vaddr, get_order(size)); } -void dma_sync_single_for_cpu(struct device *dev, dma_addr_t handle, size_t size, enum dma_data_direction dir) +void dma_sync_single_for_device(struct device *dev, dma_addr_t handle, + size_t size, enum dma_data_direction dir) { + switch (dir) { + case DMA_TO_DEVICE: + flush_dcache_range(handle, size); + break; + case DMA_FROM_DEVICE: + /* Should be clear already */ + break; + default: + if (printk_ratelimit()) + printk("dma_sync_single_for_device: unsupported dir %u\n", dir); + break; + } +} + +EXPORT_SYMBOL(dma_sync_single_for_device); +dma_addr_t dma_map_single(struct device *dev, void *addr, size_t size, + enum dma_data_direction dir) +{ + dma_addr_t handle = virt_to_phys(addr); + flush_dcache_range(handle, size); + return handle; } +EXPORT_SYMBOL(dma_map_single); +dma_addr_t dma_map_page(struct device *dev, struct page *page, + unsigned long offset, size_t size, + enum dma_data_direction dir) +{ + dma_addr_t handle = page_to_phys(page) + offset; + dma_sync_single_for_device(dev, handle, size, dir); + return handle; +} +EXPORT_SYMBOL(dma_map_page); -- cgit