Commit 19dca8c0 authored by Christoph Hellwig's avatar Christoph Hellwig

dma-direct: make dma_direct_{alloc,free} available to other implementations

So that they don't need to indirect through the operation vector.
Signed-off-by: default avatarChristoph Hellwig <hch@lst.de>
Reviewed-by: default avatarVladimir Murzin <vladimir.murzin@arm.com>
parent 95f18391
...@@ -11,7 +11,7 @@ ...@@ -11,7 +11,7 @@
#include <linux/export.h> #include <linux/export.h>
#include <linux/mm.h> #include <linux/mm.h>
#include <linux/dma-mapping.h> #include <linux/dma-direct.h>
#include <linux/scatterlist.h> #include <linux/scatterlist.h>
#include <asm/cachetype.h> #include <asm/cachetype.h>
...@@ -39,7 +39,6 @@ static void *arm_nommu_dma_alloc(struct device *dev, size_t size, ...@@ -39,7 +39,6 @@ static void *arm_nommu_dma_alloc(struct device *dev, size_t size,
unsigned long attrs) unsigned long attrs)
{ {
const struct dma_map_ops *ops = &dma_direct_ops;
void *ret; void *ret;
/* /*
...@@ -48,7 +47,7 @@ static void *arm_nommu_dma_alloc(struct device *dev, size_t size, ...@@ -48,7 +47,7 @@ static void *arm_nommu_dma_alloc(struct device *dev, size_t size,
*/ */
if (attrs & DMA_ATTR_NON_CONSISTENT) if (attrs & DMA_ATTR_NON_CONSISTENT)
return ops->alloc(dev, size, dma_handle, gfp, attrs); return dma_direct_alloc(dev, size, dma_handle, gfp, attrs);
ret = dma_alloc_from_global_coherent(size, dma_handle); ret = dma_alloc_from_global_coherent(size, dma_handle);
...@@ -70,10 +69,8 @@ static void arm_nommu_dma_free(struct device *dev, size_t size, ...@@ -70,10 +69,8 @@ static void arm_nommu_dma_free(struct device *dev, size_t size,
void *cpu_addr, dma_addr_t dma_addr, void *cpu_addr, dma_addr_t dma_addr,
unsigned long attrs) unsigned long attrs)
{ {
const struct dma_map_ops *ops = &dma_direct_ops;
if (attrs & DMA_ATTR_NON_CONSISTENT) { if (attrs & DMA_ATTR_NON_CONSISTENT) {
ops->free(dev, size, cpu_addr, dma_addr, attrs); dma_direct_free(dev, size, cpu_addr, dma_addr, attrs);
} else { } else {
int ret = dma_release_from_global_coherent(get_order(size), int ret = dma_release_from_global_coherent(get_order(size),
cpu_addr); cpu_addr);
......
...@@ -38,4 +38,9 @@ static inline void dma_mark_clean(void *addr, size_t size) ...@@ -38,4 +38,9 @@ static inline void dma_mark_clean(void *addr, size_t size)
} }
#endif /* CONFIG_ARCH_HAS_DMA_MARK_CLEAN */ #endif /* CONFIG_ARCH_HAS_DMA_MARK_CLEAN */
void *dma_direct_alloc(struct device *dev, size_t size, dma_addr_t *dma_handle,
gfp_t gfp, unsigned long attrs);
void dma_direct_free(struct device *dev, size_t size, void *cpu_addr,
dma_addr_t dma_addr, unsigned long attrs);
#endif /* _LINUX_DMA_DIRECT_H */ #endif /* _LINUX_DMA_DIRECT_H */
...@@ -40,8 +40,8 @@ static bool dma_coherent_ok(struct device *dev, phys_addr_t phys, size_t size) ...@@ -40,8 +40,8 @@ static bool dma_coherent_ok(struct device *dev, phys_addr_t phys, size_t size)
return phys_to_dma(dev, phys) + size - 1 <= dev->coherent_dma_mask; return phys_to_dma(dev, phys) + size - 1 <= dev->coherent_dma_mask;
} }
static void *dma_direct_alloc(struct device *dev, size_t size, void *dma_direct_alloc(struct device *dev, size_t size, dma_addr_t *dma_handle,
dma_addr_t *dma_handle, gfp_t gfp, unsigned long attrs) gfp_t gfp, unsigned long attrs)
{ {
unsigned int count = PAGE_ALIGN(size) >> PAGE_SHIFT; unsigned int count = PAGE_ALIGN(size) >> PAGE_SHIFT;
int page_order = get_order(size); int page_order = get_order(size);
...@@ -84,7 +84,7 @@ static void *dma_direct_alloc(struct device *dev, size_t size, ...@@ -84,7 +84,7 @@ static void *dma_direct_alloc(struct device *dev, size_t size,
return page_address(page); return page_address(page);
} }
static void dma_direct_free(struct device *dev, size_t size, void *cpu_addr, void dma_direct_free(struct device *dev, size_t size, void *cpu_addr,
dma_addr_t dma_addr, unsigned long attrs) dma_addr_t dma_addr, unsigned long attrs)
{ {
unsigned int count = PAGE_ALIGN(size) >> PAGE_SHIFT; unsigned int count = PAGE_ALIGN(size) >> PAGE_SHIFT;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment