Commit f32154c9 authored by Paul Mundt's avatar Paul Mundt

sh: Add dma-mapping support for dma_alloc/free_coherent() overrides.

This moves the current dma_alloc/free_coherent() calls to a generic
variant and plugs them in for the nommu default. Other variants can
override the defaults in the dma mapping ops directly.
Signed-off-by: default avatarPaul Mundt <lethal@linux-sh.org>
parent 73c926be
...@@ -9,6 +9,9 @@ static inline struct dma_map_ops *get_dma_ops(struct device *dev) ...@@ -9,6 +9,9 @@ static inline struct dma_map_ops *get_dma_ops(struct device *dev)
return dma_ops; return dma_ops;
} }
#include <asm-generic/dma-coherent.h>
#include <asm-generic/dma-mapping-common.h>
static inline int dma_supported(struct device *dev, u64 mask) static inline int dma_supported(struct device *dev, u64 mask)
{ {
struct dma_map_ops *ops = get_dma_ops(dev); struct dma_map_ops *ops = get_dma_ops(dev);
...@@ -33,12 +36,6 @@ static inline int dma_set_mask(struct device *dev, u64 mask) ...@@ -33,12 +36,6 @@ static inline int dma_set_mask(struct device *dev, u64 mask)
return 0; return 0;
} }
void *dma_alloc_coherent(struct device *dev, size_t size,
dma_addr_t *dma_handle, gfp_t flag);
void dma_free_coherent(struct device *dev, size_t size,
void *vaddr, dma_addr_t dma_handle);
void dma_cache_sync(struct device *dev, void *vaddr, size_t size, void dma_cache_sync(struct device *dev, void *vaddr, size_t size,
enum dma_data_direction dir); enum dma_data_direction dir);
...@@ -65,7 +62,42 @@ static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr) ...@@ -65,7 +62,42 @@ static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
return dma_addr == 0; return dma_addr == 0;
} }
#include <asm-generic/dma-coherent.h> static inline void *dma_alloc_coherent(struct device *dev, size_t size,
#include <asm-generic/dma-mapping-common.h> dma_addr_t *dma_handle, gfp_t gfp)
{
struct dma_map_ops *ops = get_dma_ops(dev);
void *memory;
if (dma_alloc_from_coherent(dev, size, dma_handle, &memory))
return memory;
if (!ops->alloc_coherent)
return NULL;
memory = ops->alloc_coherent(dev, size, dma_handle, gfp);
debug_dma_alloc_coherent(dev, size, *dma_handle, memory);
return memory;
}
static inline void dma_free_coherent(struct device *dev, size_t size,
void *vaddr, dma_addr_t dma_handle)
{
struct dma_map_ops *ops = get_dma_ops(dev);
WARN_ON(irqs_disabled()); /* for portability */
if (dma_release_from_coherent(dev, get_order(size), vaddr))
return;
debug_dma_free_coherent(dev, size, vaddr, dma_handle);
if (ops->free_coherent)
ops->free_coherent(dev, size, vaddr, dma_handle);
}
/* arch/sh/mm/consistent.c */
extern void *dma_generic_alloc_coherent(struct device *dev, size_t size,
dma_addr_t *dma_addr, gfp_t flag);
extern void dma_generic_free_coherent(struct device *dev, size_t size,
void *vaddr, dma_addr_t dma_handle);
#endif /* __ASM_SH_DMA_MAPPING_H */ #endif /* __ASM_SH_DMA_MAPPING_H */
...@@ -61,6 +61,8 @@ static void nommu_sync_sg(struct device *dev, struct scatterlist *sg, ...@@ -61,6 +61,8 @@ static void nommu_sync_sg(struct device *dev, struct scatterlist *sg,
} }
struct dma_map_ops nommu_dma_ops = { struct dma_map_ops nommu_dma_ops = {
.alloc_coherent = dma_generic_alloc_coherent,
.free_coherent = dma_generic_free_coherent,
.map_page = nommu_map_page, .map_page = nommu_map_page,
.map_sg = nommu_map_sg, .map_sg = nommu_map_sg,
.sync_single_for_device = nommu_sync_single, .sync_single_for_device = nommu_sync_single,
......
...@@ -33,15 +33,12 @@ static int __init dma_init(void) ...@@ -33,15 +33,12 @@ static int __init dma_init(void)
} }
fs_initcall(dma_init); fs_initcall(dma_init);
void *dma_alloc_coherent(struct device *dev, size_t size, void *dma_generic_alloc_coherent(struct device *dev, size_t size,
dma_addr_t *dma_handle, gfp_t gfp) dma_addr_t *dma_handle, gfp_t gfp)
{ {
void *ret, *ret_nocache; void *ret, *ret_nocache;
int order = get_order(size); int order = get_order(size);
if (dma_alloc_from_coherent(dev, size, dma_handle, &ret))
return ret;
ret = (void *)__get_free_pages(gfp, order); ret = (void *)__get_free_pages(gfp, order);
if (!ret) if (!ret)
return NULL; return NULL;
...@@ -63,30 +60,21 @@ void *dma_alloc_coherent(struct device *dev, size_t size, ...@@ -63,30 +60,21 @@ void *dma_alloc_coherent(struct device *dev, size_t size,
*dma_handle = virt_to_phys(ret); *dma_handle = virt_to_phys(ret);
debug_dma_alloc_coherent(dev, size, *dma_handle, ret_nocache);
return ret_nocache; return ret_nocache;
} }
EXPORT_SYMBOL(dma_alloc_coherent);
void dma_free_coherent(struct device *dev, size_t size, void dma_generic_free_coherent(struct device *dev, size_t size,
void *vaddr, dma_addr_t dma_handle) void *vaddr, dma_addr_t dma_handle)
{ {
int order = get_order(size); int order = get_order(size);
unsigned long pfn = dma_handle >> PAGE_SHIFT; unsigned long pfn = dma_handle >> PAGE_SHIFT;
int k; int k;
WARN_ON(irqs_disabled()); /* for portability */
if (dma_release_from_coherent(dev, order, vaddr))
return;
debug_dma_free_coherent(dev, size, vaddr, dma_handle);
for (k = 0; k < (1 << order); k++) for (k = 0; k < (1 << order); k++)
__free_pages(pfn_to_page(pfn + k), 0); __free_pages(pfn_to_page(pfn + k), 0);
iounmap(vaddr); iounmap(vaddr);
} }
EXPORT_SYMBOL(dma_free_coherent);
void dma_cache_sync(struct device *dev, void *vaddr, size_t size, void dma_cache_sync(struct device *dev, void *vaddr, size_t size,
enum dma_data_direction direction) enum dma_data_direction direction)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment