Commit 4f3dd8a0 authored by Mark Nelson's avatar Mark Nelson Committed by Benjamin Herrenschmidt

powerpc/dma: Use the struct dma_attrs in iommu code

Update iommu_alloc() to take the struct dma_attrs and pass them on to
tce_build(). This change propagates down to the tce_build functions of
all the platforms.
Signed-off-by: default avatarMark Nelson <markn@au1.ibm.com>
Signed-off-by: default avatarArnd Bergmann <arnd@arndb.de>
Signed-off-by: default avatarBenjamin Herrenschmidt <benh@kernel.crashing.org>
parent 4795b780
...@@ -186,7 +186,8 @@ static unsigned long iommu_range_alloc(struct device *dev, ...@@ -186,7 +186,8 @@ static unsigned long iommu_range_alloc(struct device *dev,
static dma_addr_t iommu_alloc(struct device *dev, struct iommu_table *tbl, static dma_addr_t iommu_alloc(struct device *dev, struct iommu_table *tbl,
void *page, unsigned int npages, void *page, unsigned int npages,
enum dma_data_direction direction, enum dma_data_direction direction,
unsigned long mask, unsigned int align_order) unsigned long mask, unsigned int align_order,
struct dma_attrs *attrs)
{ {
unsigned long entry, flags; unsigned long entry, flags;
dma_addr_t ret = DMA_ERROR_CODE; dma_addr_t ret = DMA_ERROR_CODE;
...@@ -205,7 +206,7 @@ static dma_addr_t iommu_alloc(struct device *dev, struct iommu_table *tbl, ...@@ -205,7 +206,7 @@ static dma_addr_t iommu_alloc(struct device *dev, struct iommu_table *tbl,
/* Put the TCEs in the HW table */ /* Put the TCEs in the HW table */
ppc_md.tce_build(tbl, entry, npages, (unsigned long)page & IOMMU_PAGE_MASK, ppc_md.tce_build(tbl, entry, npages, (unsigned long)page & IOMMU_PAGE_MASK,
direction); direction, attrs);
/* Flush/invalidate TLB caches if necessary */ /* Flush/invalidate TLB caches if necessary */
...@@ -336,7 +337,8 @@ int iommu_map_sg(struct device *dev, struct iommu_table *tbl, ...@@ -336,7 +337,8 @@ int iommu_map_sg(struct device *dev, struct iommu_table *tbl,
npages, entry, dma_addr); npages, entry, dma_addr);
/* Insert into HW table */ /* Insert into HW table */
ppc_md.tce_build(tbl, entry, npages, vaddr & IOMMU_PAGE_MASK, direction); ppc_md.tce_build(tbl, entry, npages, vaddr & IOMMU_PAGE_MASK,
direction, attrs);
/* If we are in an open segment, try merging */ /* If we are in an open segment, try merging */
if (segstart != s) { if (segstart != s) {
...@@ -573,7 +575,8 @@ dma_addr_t iommu_map_single(struct device *dev, struct iommu_table *tbl, ...@@ -573,7 +575,8 @@ dma_addr_t iommu_map_single(struct device *dev, struct iommu_table *tbl,
align = PAGE_SHIFT - IOMMU_PAGE_SHIFT; align = PAGE_SHIFT - IOMMU_PAGE_SHIFT;
dma_handle = iommu_alloc(dev, tbl, vaddr, npages, direction, dma_handle = iommu_alloc(dev, tbl, vaddr, npages, direction,
mask >> IOMMU_PAGE_SHIFT, align); mask >> IOMMU_PAGE_SHIFT, align,
attrs);
if (dma_handle == DMA_ERROR_CODE) { if (dma_handle == DMA_ERROR_CODE) {
if (printk_ratelimit()) { if (printk_ratelimit()) {
printk(KERN_INFO "iommu_alloc failed, " printk(KERN_INFO "iommu_alloc failed, "
...@@ -642,7 +645,7 @@ void *iommu_alloc_coherent(struct device *dev, struct iommu_table *tbl, ...@@ -642,7 +645,7 @@ void *iommu_alloc_coherent(struct device *dev, struct iommu_table *tbl,
nio_pages = size >> IOMMU_PAGE_SHIFT; nio_pages = size >> IOMMU_PAGE_SHIFT;
io_order = get_iommu_order(size); io_order = get_iommu_order(size);
mapping = iommu_alloc(dev, tbl, ret, nio_pages, DMA_BIDIRECTIONAL, mapping = iommu_alloc(dev, tbl, ret, nio_pages, DMA_BIDIRECTIONAL,
mask >> IOMMU_PAGE_SHIFT, io_order); mask >> IOMMU_PAGE_SHIFT, io_order, NULL);
if (mapping == DMA_ERROR_CODE) { if (mapping == DMA_ERROR_CODE) {
free_pages((unsigned long)ret, order); free_pages((unsigned long)ret, order);
return NULL; return NULL;
......
...@@ -173,7 +173,8 @@ static void invalidate_tce_cache(struct cbe_iommu *iommu, unsigned long *pte, ...@@ -173,7 +173,8 @@ static void invalidate_tce_cache(struct cbe_iommu *iommu, unsigned long *pte,
} }
static void tce_build_cell(struct iommu_table *tbl, long index, long npages, static void tce_build_cell(struct iommu_table *tbl, long index, long npages,
unsigned long uaddr, enum dma_data_direction direction) unsigned long uaddr, enum dma_data_direction direction,
struct dma_attrs *attrs)
{ {
int i; int i;
unsigned long *io_pte, base_pte; unsigned long *io_pte, base_pte;
...@@ -519,7 +520,7 @@ cell_iommu_setup_window(struct cbe_iommu *iommu, struct device_node *np, ...@@ -519,7 +520,7 @@ cell_iommu_setup_window(struct cbe_iommu *iommu, struct device_node *np,
__set_bit(0, window->table.it_map); __set_bit(0, window->table.it_map);
tce_build_cell(&window->table, window->table.it_offset, 1, tce_build_cell(&window->table, window->table.it_offset, 1,
(unsigned long)iommu->pad_page, DMA_TO_DEVICE); (unsigned long)iommu->pad_page, DMA_TO_DEVICE, NULL);
window->table.it_hint = window->table.it_blocksize; window->table.it_hint = window->table.it_blocksize;
return window; return window;
......
...@@ -42,7 +42,8 @@ ...@@ -42,7 +42,8 @@
#include <asm/iseries/iommu.h> #include <asm/iseries/iommu.h>
static void tce_build_iSeries(struct iommu_table *tbl, long index, long npages, static void tce_build_iSeries(struct iommu_table *tbl, long index, long npages,
unsigned long uaddr, enum dma_data_direction direction) unsigned long uaddr, enum dma_data_direction direction,
struct dma_attrs *attrs)
{ {
u64 rc; u64 rc;
u64 tce, rpn; u64 tce, rpn;
......
...@@ -85,7 +85,8 @@ static int iommu_table_iobmap_inited; ...@@ -85,7 +85,8 @@ static int iommu_table_iobmap_inited;
static void iobmap_build(struct iommu_table *tbl, long index, static void iobmap_build(struct iommu_table *tbl, long index,
long npages, unsigned long uaddr, long npages, unsigned long uaddr,
enum dma_data_direction direction) enum dma_data_direction direction,
struct dma_attrs *attrs)
{ {
u32 *ip; u32 *ip;
u32 rpn; u32 rpn;
......
...@@ -50,7 +50,8 @@ ...@@ -50,7 +50,8 @@
static void tce_build_pSeries(struct iommu_table *tbl, long index, static void tce_build_pSeries(struct iommu_table *tbl, long index,
long npages, unsigned long uaddr, long npages, unsigned long uaddr,
enum dma_data_direction direction) enum dma_data_direction direction,
struct dma_attrs *attrs)
{ {
u64 proto_tce; u64 proto_tce;
u64 *tcep; u64 *tcep;
...@@ -95,7 +96,8 @@ static unsigned long tce_get_pseries(struct iommu_table *tbl, long index) ...@@ -95,7 +96,8 @@ static unsigned long tce_get_pseries(struct iommu_table *tbl, long index)
static void tce_build_pSeriesLP(struct iommu_table *tbl, long tcenum, static void tce_build_pSeriesLP(struct iommu_table *tbl, long tcenum,
long npages, unsigned long uaddr, long npages, unsigned long uaddr,
enum dma_data_direction direction) enum dma_data_direction direction,
struct dma_attrs *attrs)
{ {
u64 rc; u64 rc;
u64 proto_tce, tce; u64 proto_tce, tce;
...@@ -127,7 +129,8 @@ static DEFINE_PER_CPU(u64 *, tce_page) = NULL; ...@@ -127,7 +129,8 @@ static DEFINE_PER_CPU(u64 *, tce_page) = NULL;
static void tce_buildmulti_pSeriesLP(struct iommu_table *tbl, long tcenum, static void tce_buildmulti_pSeriesLP(struct iommu_table *tbl, long tcenum,
long npages, unsigned long uaddr, long npages, unsigned long uaddr,
enum dma_data_direction direction) enum dma_data_direction direction,
struct dma_attrs *attrs)
{ {
u64 rc; u64 rc;
u64 proto_tce; u64 proto_tce;
...@@ -136,7 +139,8 @@ static void tce_buildmulti_pSeriesLP(struct iommu_table *tbl, long tcenum, ...@@ -136,7 +139,8 @@ static void tce_buildmulti_pSeriesLP(struct iommu_table *tbl, long tcenum,
long l, limit; long l, limit;
if (npages == 1) { if (npages == 1) {
tce_build_pSeriesLP(tbl, tcenum, npages, uaddr, direction); tce_build_pSeriesLP(tbl, tcenum, npages, uaddr,
direction, attrs);
return; return;
} }
...@@ -150,7 +154,7 @@ static void tce_buildmulti_pSeriesLP(struct iommu_table *tbl, long tcenum, ...@@ -150,7 +154,7 @@ static void tce_buildmulti_pSeriesLP(struct iommu_table *tbl, long tcenum,
/* If allocation fails, fall back to the loop implementation */ /* If allocation fails, fall back to the loop implementation */
if (!tcep) { if (!tcep) {
tce_build_pSeriesLP(tbl, tcenum, npages, uaddr, tce_build_pSeriesLP(tbl, tcenum, npages, uaddr,
direction); direction, attrs);
return; return;
} }
__get_cpu_var(tce_page) = tcep; __get_cpu_var(tce_page) = tcep;
......
...@@ -149,7 +149,8 @@ static void dart_flush(struct iommu_table *tbl) ...@@ -149,7 +149,8 @@ static void dart_flush(struct iommu_table *tbl)
static void dart_build(struct iommu_table *tbl, long index, static void dart_build(struct iommu_table *tbl, long index,
long npages, unsigned long uaddr, long npages, unsigned long uaddr,
enum dma_data_direction direction) enum dma_data_direction direction,
struct dma_attrs *attrs)
{ {
unsigned int *dp; unsigned int *dp;
unsigned int rpn; unsigned int rpn;
......
...@@ -80,7 +80,8 @@ struct machdep_calls { ...@@ -80,7 +80,8 @@ struct machdep_calls {
long index, long index,
long npages, long npages,
unsigned long uaddr, unsigned long uaddr,
enum dma_data_direction direction); enum dma_data_direction direction,
struct dma_attrs *attrs);
void (*tce_free)(struct iommu_table *tbl, void (*tce_free)(struct iommu_table *tbl,
long index, long index,
long npages); long npages);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment