Commit 7b903e6c authored by Jonas Bonn's avatar Jonas Bonn

openrisc: provide dma_map_ops

This switches OpenRISC over to fully using the generic dma-mapping
framework.  This was almost already the case as the architecture's
implementation was essentially a copy of the generic header.

This also brings this architecture in line with the recent changes
to dma_map_ops (adding attributes to ops->alloc).
Signed-off-by: default avatarJonas Bonn <jonas@southpole.se>
parent b0e026f4
...@@ -20,150 +20,71 @@ ...@@ -20,150 +20,71 @@
/* /*
* See Documentation/DMA-API-HOWTO.txt and * See Documentation/DMA-API-HOWTO.txt and
* Documentation/DMA-API.txt for documentation. * Documentation/DMA-API.txt for documentation.
*
* This file is written with the intention of eventually moving over
* to largely using asm-generic/dma-mapping-common.h in its place.
*/ */
#include <linux/dma-debug.h> #include <linux/dma-debug.h>
#include <asm-generic/dma-coherent.h> #include <asm-generic/dma-coherent.h>
#include <linux/kmemcheck.h> #include <linux/kmemcheck.h>
#include <linux/dma-mapping.h>
#define DMA_ERROR_CODE (~(dma_addr_t)0x0) #define DMA_ERROR_CODE (~(dma_addr_t)0x0)
extern struct dma_map_ops or1k_dma_map_ops;
#define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f) static inline struct dma_map_ops *get_dma_ops(struct device *dev)
#define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h)
void *or1k_dma_alloc_coherent(struct device *dev, size_t size,
dma_addr_t *dma_handle, gfp_t flag);
void or1k_dma_free_coherent(struct device *dev, size_t size, void *vaddr,
dma_addr_t dma_handle);
dma_addr_t or1k_map_page(struct device *dev, struct page *page,
unsigned long offset, size_t size,
enum dma_data_direction dir,
struct dma_attrs *attrs);
void or1k_unmap_page(struct device *dev, dma_addr_t dma_handle,
size_t size, enum dma_data_direction dir,
struct dma_attrs *attrs);
int or1k_map_sg(struct device *dev, struct scatterlist *sg,
int nents, enum dma_data_direction dir,
struct dma_attrs *attrs);
void or1k_unmap_sg(struct device *dev, struct scatterlist *sg,
int nents, enum dma_data_direction dir,
struct dma_attrs *attrs);
void or1k_sync_single_for_cpu(struct device *dev,
dma_addr_t dma_handle, size_t size,
enum dma_data_direction dir);
void or1k_sync_single_for_device(struct device *dev,
dma_addr_t dma_handle, size_t size,
enum dma_data_direction dir);
static inline void *dma_alloc_coherent(struct device *dev, size_t size,
dma_addr_t *dma_handle, gfp_t flag)
{ {
void *memory; return &or1k_dma_map_ops;
memory = or1k_dma_alloc_coherent(dev, size, dma_handle, flag);
debug_dma_alloc_coherent(dev, size, *dma_handle, memory);
return memory;
} }
static inline void dma_free_coherent(struct device *dev, size_t size, #include <asm-generic/dma-mapping-common.h>
void *cpu_addr, dma_addr_t dma_handle)
{
debug_dma_free_coherent(dev, size, cpu_addr, dma_handle);
or1k_dma_free_coherent(dev, size, cpu_addr, dma_handle);
}
static inline dma_addr_t dma_map_single(struct device *dev, void *ptr, #define dma_alloc_coherent(d,s,h,f) dma_alloc_attrs(d,s,h,f,NULL)
size_t size,
enum dma_data_direction dir)
{
dma_addr_t addr;
kmemcheck_mark_initialized(ptr, size);
BUG_ON(!valid_dma_direction(dir));
addr = or1k_map_page(dev, virt_to_page(ptr),
(unsigned long)ptr & ~PAGE_MASK, size,
dir, NULL);
debug_dma_map_page(dev, virt_to_page(ptr),
(unsigned long)ptr & ~PAGE_MASK, size,
dir, addr, true);
return addr;
}
static inline void dma_unmap_single(struct device *dev, dma_addr_t addr, static inline void *dma_alloc_attrs(struct device *dev, size_t size,
size_t size, dma_addr_t *dma_handle, gfp_t gfp,
enum dma_data_direction dir) struct dma_attrs *attrs)
{ {
BUG_ON(!valid_dma_direction(dir)); struct dma_map_ops *ops = get_dma_ops(dev);
or1k_unmap_page(dev, addr, size, dir, NULL); void *memory;
debug_dma_unmap_page(dev, addr, size, dir, true);
}
static inline int dma_map_sg(struct device *dev, struct scatterlist *sg, memory = ops->alloc(dev, size, dma_handle, gfp, attrs);
int nents, enum dma_data_direction dir)
{
int i, ents;
struct scatterlist *s;
for_each_sg(sg, s, nents, i) debug_dma_alloc_coherent(dev, size, *dma_handle, memory);
kmemcheck_mark_initialized(sg_virt(s), s->length);
BUG_ON(!valid_dma_direction(dir));
ents = or1k_map_sg(dev, sg, nents, dir, NULL);
debug_dma_map_sg(dev, sg, nents, ents, dir);
return ents; return memory;
} }
static inline void dma_unmap_sg(struct device *dev, struct scatterlist *sg, #define dma_free_coherent(d,s,c,h) dma_free_attrs(d,s,c,h,NULL)
int nents, enum dma_data_direction dir)
{
BUG_ON(!valid_dma_direction(dir));
debug_dma_unmap_sg(dev, sg, nents, dir);
or1k_unmap_sg(dev, sg, nents, dir, NULL);
}
static inline dma_addr_t dma_map_page(struct device *dev, struct page *page, static inline void dma_free_attrs(struct device *dev, size_t size,
size_t offset, size_t size, void *cpu_addr, dma_addr_t dma_handle,
enum dma_data_direction dir) struct dma_attrs *attrs)
{ {
dma_addr_t addr; struct dma_map_ops *ops = get_dma_ops(dev);
kmemcheck_mark_initialized(page_address(page) + offset, size); debug_dma_free_coherent(dev, size, cpu_addr, dma_handle);
BUG_ON(!valid_dma_direction(dir));
addr = or1k_map_page(dev, page, offset, size, dir, NULL);
debug_dma_map_page(dev, page, offset, size, dir, addr, false);
return addr; ops->free(dev, size, cpu_addr, dma_handle, attrs);
} }
static inline void dma_unmap_page(struct device *dev, dma_addr_t addr, static inline void *dma_alloc_noncoherent(struct device *dev, size_t size,
size_t size, enum dma_data_direction dir) dma_addr_t *dma_handle, gfp_t gfp)
{ {
BUG_ON(!valid_dma_direction(dir)); struct dma_attrs attrs;
or1k_unmap_page(dev, addr, size, dir, NULL);
debug_dma_unmap_page(dev, addr, size, dir, true);
}
static inline void dma_sync_single_for_cpu(struct device *dev, dma_addr_t addr, dma_set_attr(DMA_ATTR_NON_CONSISTENT, &attrs);
size_t size,
enum dma_data_direction dir) return dma_alloc_attrs(dev, size, dma_handle, gfp, &attrs);
{
BUG_ON(!valid_dma_direction(dir));
or1k_sync_single_for_cpu(dev, addr, size, dir);
debug_dma_sync_single_for_cpu(dev, addr, size, dir);
} }
static inline void dma_sync_single_for_device(struct device *dev, static inline void dma_free_noncoherent(struct device *dev, size_t size,
dma_addr_t addr, size_t size, void *cpu_addr, dma_addr_t dma_handle)
enum dma_data_direction dir)
{ {
BUG_ON(!valid_dma_direction(dir)); struct dma_attrs attrs;
or1k_sync_single_for_device(dev, addr, size, dir);
debug_dma_sync_single_for_device(dev, addr, size, dir); dma_set_attr(DMA_ATTR_NON_CONSISTENT, &attrs);
dma_free_attrs(dev, size, cpu_addr, dma_handle, &attrs);
} }
static inline int dma_supported(struct device *dev, u64 dma_mask) static inline int dma_supported(struct device *dev, u64 dma_mask)
......
...@@ -21,13 +21,16 @@ ...@@ -21,13 +21,16 @@
#include <linux/dma-mapping.h> #include <linux/dma-mapping.h>
#include <linux/dma-debug.h> #include <linux/dma-debug.h>
#include <linux/export.h>
#include <linux/dma-attrs.h>
#include <asm/cpuinfo.h> #include <asm/cpuinfo.h>
#include <asm/spr_defs.h> #include <asm/spr_defs.h>
#include <asm/tlbflush.h> #include <asm/tlbflush.h>
static int page_set_nocache(pte_t *pte, unsigned long addr, static int
unsigned long next, struct mm_walk *walk) page_set_nocache(pte_t *pte, unsigned long addr,
unsigned long next, struct mm_walk *walk)
{ {
unsigned long cl; unsigned long cl;
...@@ -46,8 +49,9 @@ static int page_set_nocache(pte_t *pte, unsigned long addr, ...@@ -46,8 +49,9 @@ static int page_set_nocache(pte_t *pte, unsigned long addr,
return 0; return 0;
} }
static int page_clear_nocache(pte_t *pte, unsigned long addr, static int
unsigned long next, struct mm_walk *walk) page_clear_nocache(pte_t *pte, unsigned long addr,
unsigned long next, struct mm_walk *walk)
{ {
pte_val(*pte) &= ~_PAGE_CI; pte_val(*pte) &= ~_PAGE_CI;
...@@ -67,9 +71,19 @@ static int page_clear_nocache(pte_t *pte, unsigned long addr, ...@@ -67,9 +71,19 @@ static int page_clear_nocache(pte_t *pte, unsigned long addr,
* cache-inhibit bit on those pages, and makes sure that the pages are * cache-inhibit bit on those pages, and makes sure that the pages are
* flushed out of the cache before they are used. * flushed out of the cache before they are used.
* *
* If the NON_CONSISTENT attribute is set, then this function just
* returns "normal", cachable memory.
*
* There are additional flags WEAK_ORDERING and WRITE_COMBINE to take
* into consideration here, too. All current known implementations of
* the OR1K support only strongly ordered memory accesses, so that flag
* is being ignored for now; uncached but write-combined memory is a
* missing feature of the OR1K.
*/ */
void *or1k_dma_alloc_coherent(struct device *dev, size_t size, static void *
dma_addr_t *dma_handle, gfp_t gfp) or1k_dma_alloc(struct device *dev, size_t size,
dma_addr_t *dma_handle, gfp_t gfp,
struct dma_attrs *attrs)
{ {
unsigned long va; unsigned long va;
void *page; void *page;
...@@ -87,20 +101,23 @@ void *or1k_dma_alloc_coherent(struct device *dev, size_t size, ...@@ -87,20 +101,23 @@ void *or1k_dma_alloc_coherent(struct device *dev, size_t size,
va = (unsigned long)page; va = (unsigned long)page;
/* if (!dma_get_attr(DMA_ATTR_NON_CONSISTENT, attrs)) {
* We need to iterate through the pages, clearing the dcache for /*
* them and setting the cache-inhibit bit. * We need to iterate through the pages, clearing the dcache for
*/ * them and setting the cache-inhibit bit.
if (walk_page_range(va, va + size, &walk)) { */
free_pages_exact(page, size); if (walk_page_range(va, va + size, &walk)) {
return NULL; free_pages_exact(page, size);
return NULL;
}
} }
return (void *)va; return (void *)va;
} }
void or1k_dma_free_coherent(struct device *dev, size_t size, void *vaddr, static void
dma_addr_t dma_handle) or1k_dma_free(struct device *dev, size_t size, void *vaddr,
dma_addr_t dma_handle, struct dma_attrs *attrs)
{ {
unsigned long va = (unsigned long)vaddr; unsigned long va = (unsigned long)vaddr;
struct mm_walk walk = { struct mm_walk walk = {
...@@ -108,16 +125,19 @@ void or1k_dma_free_coherent(struct device *dev, size_t size, void *vaddr, ...@@ -108,16 +125,19 @@ void or1k_dma_free_coherent(struct device *dev, size_t size, void *vaddr,
.mm = &init_mm .mm = &init_mm
}; };
/* walk_page_range shouldn't be able to fail here */ if (!dma_get_attr(DMA_ATTR_NON_CONSISTENT, attrs)) {
WARN_ON(walk_page_range(va, va + size, &walk)); /* walk_page_range shouldn't be able to fail here */
WARN_ON(walk_page_range(va, va + size, &walk));
}
free_pages_exact(vaddr, size); free_pages_exact(vaddr, size);
} }
dma_addr_t or1k_map_page(struct device *dev, struct page *page, static dma_addr_t
unsigned long offset, size_t size, or1k_map_page(struct device *dev, struct page *page,
enum dma_data_direction dir, unsigned long offset, size_t size,
struct dma_attrs *attrs) enum dma_data_direction dir,
struct dma_attrs *attrs)
{ {
unsigned long cl; unsigned long cl;
dma_addr_t addr = page_to_phys(page) + offset; dma_addr_t addr = page_to_phys(page) + offset;
...@@ -147,16 +167,18 @@ dma_addr_t or1k_map_page(struct device *dev, struct page *page, ...@@ -147,16 +167,18 @@ dma_addr_t or1k_map_page(struct device *dev, struct page *page,
return addr; return addr;
} }
void or1k_unmap_page(struct device *dev, dma_addr_t dma_handle, static void
size_t size, enum dma_data_direction dir, or1k_unmap_page(struct device *dev, dma_addr_t dma_handle,
struct dma_attrs *attrs) size_t size, enum dma_data_direction dir,
struct dma_attrs *attrs)
{ {
/* Nothing special to do here... */ /* Nothing special to do here... */
} }
int or1k_map_sg(struct device *dev, struct scatterlist *sg, static int
int nents, enum dma_data_direction dir, or1k_map_sg(struct device *dev, struct scatterlist *sg,
struct dma_attrs *attrs) int nents, enum dma_data_direction dir,
struct dma_attrs *attrs)
{ {
struct scatterlist *s; struct scatterlist *s;
int i; int i;
...@@ -169,9 +191,10 @@ int or1k_map_sg(struct device *dev, struct scatterlist *sg, ...@@ -169,9 +191,10 @@ int or1k_map_sg(struct device *dev, struct scatterlist *sg,
return nents; return nents;
} }
void or1k_unmap_sg(struct device *dev, struct scatterlist *sg, static void
int nents, enum dma_data_direction dir, or1k_unmap_sg(struct device *dev, struct scatterlist *sg,
struct dma_attrs *attrs) int nents, enum dma_data_direction dir,
struct dma_attrs *attrs)
{ {
struct scatterlist *s; struct scatterlist *s;
int i; int i;
...@@ -181,9 +204,10 @@ void or1k_unmap_sg(struct device *dev, struct scatterlist *sg, ...@@ -181,9 +204,10 @@ void or1k_unmap_sg(struct device *dev, struct scatterlist *sg,
} }
} }
void or1k_sync_single_for_cpu(struct device *dev, static void
dma_addr_t dma_handle, size_t size, or1k_sync_single_for_cpu(struct device *dev,
enum dma_data_direction dir) dma_addr_t dma_handle, size_t size,
enum dma_data_direction dir)
{ {
unsigned long cl; unsigned long cl;
dma_addr_t addr = dma_handle; dma_addr_t addr = dma_handle;
...@@ -193,9 +217,10 @@ void or1k_sync_single_for_cpu(struct device *dev, ...@@ -193,9 +217,10 @@ void or1k_sync_single_for_cpu(struct device *dev,
mtspr(SPR_DCBIR, cl); mtspr(SPR_DCBIR, cl);
} }
void or1k_sync_single_for_device(struct device *dev, static void
dma_addr_t dma_handle, size_t size, or1k_sync_single_for_device(struct device *dev,
enum dma_data_direction dir) dma_addr_t dma_handle, size_t size,
enum dma_data_direction dir)
{ {
unsigned long cl; unsigned long cl;
dma_addr_t addr = dma_handle; dma_addr_t addr = dma_handle;
...@@ -205,6 +230,18 @@ void or1k_sync_single_for_device(struct device *dev, ...@@ -205,6 +230,18 @@ void or1k_sync_single_for_device(struct device *dev,
mtspr(SPR_DCBFR, cl); mtspr(SPR_DCBFR, cl);
} }
struct dma_map_ops or1k_dma_map_ops = {
.alloc = or1k_dma_alloc,
.free = or1k_dma_free,
.map_page = or1k_map_page,
.unmap_page = or1k_unmap_page,
.map_sg = or1k_map_sg,
.unmap_sg = or1k_unmap_sg,
.sync_single_for_cpu = or1k_sync_single_for_cpu,
.sync_single_for_device = or1k_sync_single_for_device,
};
EXPORT_SYMBOL(or1k_dma_map_ops);
/* Number of entries preallocated for DMA-API debugging */ /* Number of entries preallocated for DMA-API debugging */
#define PREALLOC_DMA_DEBUG_ENTRIES (1 << 16) #define PREALLOC_DMA_DEBUG_ENTRIES (1 << 16)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment