Commit d618382b authored by David S. Miller's avatar David S. Miller

iommu-common: Fix error code used in iommu_tbl_range_{alloc,free}().

The value returned from iommu_tbl_range_alloc() (and the one passed
in as a fourth argument to iommu_tbl_range_free) is not a DMA address,
it is rather an index into the IOMMU page table.

Therefore using DMA_ERROR_CODE is not appropriate.

Use a more type matching error code define, IOMMU_ERROR_CODE, and
update all users of this interface.
Reported-by: default avatarAndre Przywara <andre.przywara@arm.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent 73958c65
...@@ -161,7 +161,7 @@ static inline iopte_t *alloc_npages(struct device *dev, ...@@ -161,7 +161,7 @@ static inline iopte_t *alloc_npages(struct device *dev,
entry = iommu_tbl_range_alloc(dev, &iommu->tbl, npages, NULL, entry = iommu_tbl_range_alloc(dev, &iommu->tbl, npages, NULL,
(unsigned long)(-1), 0); (unsigned long)(-1), 0);
if (unlikely(entry == DMA_ERROR_CODE)) if (unlikely(entry == IOMMU_ERROR_CODE))
return NULL; return NULL;
return iommu->page_table + entry; return iommu->page_table + entry;
...@@ -253,7 +253,7 @@ static void dma_4u_free_coherent(struct device *dev, size_t size, ...@@ -253,7 +253,7 @@ static void dma_4u_free_coherent(struct device *dev, size_t size,
npages = IO_PAGE_ALIGN(size) >> IO_PAGE_SHIFT; npages = IO_PAGE_ALIGN(size) >> IO_PAGE_SHIFT;
iommu = dev->archdata.iommu; iommu = dev->archdata.iommu;
iommu_tbl_range_free(&iommu->tbl, dvma, npages, DMA_ERROR_CODE); iommu_tbl_range_free(&iommu->tbl, dvma, npages, IOMMU_ERROR_CODE);
order = get_order(size); order = get_order(size);
if (order < 10) if (order < 10)
...@@ -426,7 +426,7 @@ static void dma_4u_unmap_page(struct device *dev, dma_addr_t bus_addr, ...@@ -426,7 +426,7 @@ static void dma_4u_unmap_page(struct device *dev, dma_addr_t bus_addr,
iommu_free_ctx(iommu, ctx); iommu_free_ctx(iommu, ctx);
spin_unlock_irqrestore(&iommu->lock, flags); spin_unlock_irqrestore(&iommu->lock, flags);
iommu_tbl_range_free(&iommu->tbl, bus_addr, npages, DMA_ERROR_CODE); iommu_tbl_range_free(&iommu->tbl, bus_addr, npages, IOMMU_ERROR_CODE);
} }
static int dma_4u_map_sg(struct device *dev, struct scatterlist *sglist, static int dma_4u_map_sg(struct device *dev, struct scatterlist *sglist,
...@@ -492,7 +492,7 @@ static int dma_4u_map_sg(struct device *dev, struct scatterlist *sglist, ...@@ -492,7 +492,7 @@ static int dma_4u_map_sg(struct device *dev, struct scatterlist *sglist,
&handle, (unsigned long)(-1), 0); &handle, (unsigned long)(-1), 0);
/* Handle failure */ /* Handle failure */
if (unlikely(entry == DMA_ERROR_CODE)) { if (unlikely(entry == IOMMU_ERROR_CODE)) {
if (printk_ratelimit()) if (printk_ratelimit())
printk(KERN_INFO "iommu_alloc failed, iommu %p paddr %lx" printk(KERN_INFO "iommu_alloc failed, iommu %p paddr %lx"
" npages %lx\n", iommu, paddr, npages); " npages %lx\n", iommu, paddr, npages);
...@@ -571,7 +571,7 @@ static int dma_4u_map_sg(struct device *dev, struct scatterlist *sglist, ...@@ -571,7 +571,7 @@ static int dma_4u_map_sg(struct device *dev, struct scatterlist *sglist,
iopte_make_dummy(iommu, base + j); iopte_make_dummy(iommu, base + j);
iommu_tbl_range_free(&iommu->tbl, vaddr, npages, iommu_tbl_range_free(&iommu->tbl, vaddr, npages,
DMA_ERROR_CODE); IOMMU_ERROR_CODE);
s->dma_address = DMA_ERROR_CODE; s->dma_address = DMA_ERROR_CODE;
s->dma_length = 0; s->dma_length = 0;
...@@ -648,7 +648,7 @@ static void dma_4u_unmap_sg(struct device *dev, struct scatterlist *sglist, ...@@ -648,7 +648,7 @@ static void dma_4u_unmap_sg(struct device *dev, struct scatterlist *sglist,
iopte_make_dummy(iommu, base + i); iopte_make_dummy(iommu, base + i);
iommu_tbl_range_free(&iommu->tbl, dma_handle, npages, iommu_tbl_range_free(&iommu->tbl, dma_handle, npages,
DMA_ERROR_CODE); IOMMU_ERROR_CODE);
sg = sg_next(sg); sg = sg_next(sg);
} }
......
...@@ -1953,7 +1953,7 @@ static struct ldc_mtable_entry *alloc_npages(struct ldc_iommu *iommu, ...@@ -1953,7 +1953,7 @@ static struct ldc_mtable_entry *alloc_npages(struct ldc_iommu *iommu,
entry = iommu_tbl_range_alloc(NULL, &iommu->iommu_map_table, entry = iommu_tbl_range_alloc(NULL, &iommu->iommu_map_table,
npages, NULL, (unsigned long)-1, 0); npages, NULL, (unsigned long)-1, 0);
if (unlikely(entry < 0)) if (unlikely(entry == IOMMU_ERROR_CODE))
return NULL; return NULL;
return iommu->page_table + entry; return iommu->page_table + entry;
......
...@@ -159,7 +159,7 @@ static void *dma_4v_alloc_coherent(struct device *dev, size_t size, ...@@ -159,7 +159,7 @@ static void *dma_4v_alloc_coherent(struct device *dev, size_t size,
entry = iommu_tbl_range_alloc(dev, &iommu->tbl, npages, NULL, entry = iommu_tbl_range_alloc(dev, &iommu->tbl, npages, NULL,
(unsigned long)(-1), 0); (unsigned long)(-1), 0);
if (unlikely(entry == DMA_ERROR_CODE)) if (unlikely(entry == IOMMU_ERROR_CODE))
goto range_alloc_fail; goto range_alloc_fail;
*dma_addrp = (iommu->tbl.table_map_base + (entry << IO_PAGE_SHIFT)); *dma_addrp = (iommu->tbl.table_map_base + (entry << IO_PAGE_SHIFT));
...@@ -187,7 +187,7 @@ static void *dma_4v_alloc_coherent(struct device *dev, size_t size, ...@@ -187,7 +187,7 @@ static void *dma_4v_alloc_coherent(struct device *dev, size_t size,
return ret; return ret;
iommu_map_fail: iommu_map_fail:
iommu_tbl_range_free(&iommu->tbl, *dma_addrp, npages, DMA_ERROR_CODE); iommu_tbl_range_free(&iommu->tbl, *dma_addrp, npages, IOMMU_ERROR_CODE);
range_alloc_fail: range_alloc_fail:
free_pages(first_page, order); free_pages(first_page, order);
...@@ -226,7 +226,7 @@ static void dma_4v_free_coherent(struct device *dev, size_t size, void *cpu, ...@@ -226,7 +226,7 @@ static void dma_4v_free_coherent(struct device *dev, size_t size, void *cpu,
devhandle = pbm->devhandle; devhandle = pbm->devhandle;
entry = ((dvma - iommu->tbl.table_map_base) >> IO_PAGE_SHIFT); entry = ((dvma - iommu->tbl.table_map_base) >> IO_PAGE_SHIFT);
dma_4v_iommu_demap(&devhandle, entry, npages); dma_4v_iommu_demap(&devhandle, entry, npages);
iommu_tbl_range_free(&iommu->tbl, dvma, npages, DMA_ERROR_CODE); iommu_tbl_range_free(&iommu->tbl, dvma, npages, IOMMU_ERROR_CODE);
order = get_order(size); order = get_order(size);
if (order < 10) if (order < 10)
free_pages((unsigned long)cpu, order); free_pages((unsigned long)cpu, order);
...@@ -256,7 +256,7 @@ static dma_addr_t dma_4v_map_page(struct device *dev, struct page *page, ...@@ -256,7 +256,7 @@ static dma_addr_t dma_4v_map_page(struct device *dev, struct page *page,
entry = iommu_tbl_range_alloc(dev, &iommu->tbl, npages, NULL, entry = iommu_tbl_range_alloc(dev, &iommu->tbl, npages, NULL,
(unsigned long)(-1), 0); (unsigned long)(-1), 0);
if (unlikely(entry == DMA_ERROR_CODE)) if (unlikely(entry == IOMMU_ERROR_CODE))
goto bad; goto bad;
bus_addr = (iommu->tbl.table_map_base + (entry << IO_PAGE_SHIFT)); bus_addr = (iommu->tbl.table_map_base + (entry << IO_PAGE_SHIFT));
...@@ -288,7 +288,7 @@ static dma_addr_t dma_4v_map_page(struct device *dev, struct page *page, ...@@ -288,7 +288,7 @@ static dma_addr_t dma_4v_map_page(struct device *dev, struct page *page,
return DMA_ERROR_CODE; return DMA_ERROR_CODE;
iommu_map_fail: iommu_map_fail:
iommu_tbl_range_free(&iommu->tbl, bus_addr, npages, DMA_ERROR_CODE); iommu_tbl_range_free(&iommu->tbl, bus_addr, npages, IOMMU_ERROR_CODE);
return DMA_ERROR_CODE; return DMA_ERROR_CODE;
} }
...@@ -317,7 +317,7 @@ static void dma_4v_unmap_page(struct device *dev, dma_addr_t bus_addr, ...@@ -317,7 +317,7 @@ static void dma_4v_unmap_page(struct device *dev, dma_addr_t bus_addr,
bus_addr &= IO_PAGE_MASK; bus_addr &= IO_PAGE_MASK;
entry = (bus_addr - iommu->tbl.table_map_base) >> IO_PAGE_SHIFT; entry = (bus_addr - iommu->tbl.table_map_base) >> IO_PAGE_SHIFT;
dma_4v_iommu_demap(&devhandle, entry, npages); dma_4v_iommu_demap(&devhandle, entry, npages);
iommu_tbl_range_free(&iommu->tbl, bus_addr, npages, DMA_ERROR_CODE); iommu_tbl_range_free(&iommu->tbl, bus_addr, npages, IOMMU_ERROR_CODE);
} }
static int dma_4v_map_sg(struct device *dev, struct scatterlist *sglist, static int dma_4v_map_sg(struct device *dev, struct scatterlist *sglist,
...@@ -376,7 +376,7 @@ static int dma_4v_map_sg(struct device *dev, struct scatterlist *sglist, ...@@ -376,7 +376,7 @@ static int dma_4v_map_sg(struct device *dev, struct scatterlist *sglist,
&handle, (unsigned long)(-1), 0); &handle, (unsigned long)(-1), 0);
/* Handle failure */ /* Handle failure */
if (unlikely(entry == DMA_ERROR_CODE)) { if (unlikely(entry == IOMMU_ERROR_CODE)) {
if (printk_ratelimit()) if (printk_ratelimit())
printk(KERN_INFO "iommu_alloc failed, iommu %p paddr %lx" printk(KERN_INFO "iommu_alloc failed, iommu %p paddr %lx"
" npages %lx\n", iommu, paddr, npages); " npages %lx\n", iommu, paddr, npages);
...@@ -451,7 +451,7 @@ static int dma_4v_map_sg(struct device *dev, struct scatterlist *sglist, ...@@ -451,7 +451,7 @@ static int dma_4v_map_sg(struct device *dev, struct scatterlist *sglist,
npages = iommu_num_pages(s->dma_address, s->dma_length, npages = iommu_num_pages(s->dma_address, s->dma_length,
IO_PAGE_SIZE); IO_PAGE_SIZE);
iommu_tbl_range_free(&iommu->tbl, vaddr, npages, iommu_tbl_range_free(&iommu->tbl, vaddr, npages,
DMA_ERROR_CODE); IOMMU_ERROR_CODE);
/* XXX demap? XXX */ /* XXX demap? XXX */
s->dma_address = DMA_ERROR_CODE; s->dma_address = DMA_ERROR_CODE;
s->dma_length = 0; s->dma_length = 0;
...@@ -496,7 +496,7 @@ static void dma_4v_unmap_sg(struct device *dev, struct scatterlist *sglist, ...@@ -496,7 +496,7 @@ static void dma_4v_unmap_sg(struct device *dev, struct scatterlist *sglist,
entry = ((dma_handle - tbl->table_map_base) >> shift); entry = ((dma_handle - tbl->table_map_base) >> shift);
dma_4v_iommu_demap(&devhandle, entry, npages); dma_4v_iommu_demap(&devhandle, entry, npages);
iommu_tbl_range_free(&iommu->tbl, dma_handle, npages, iommu_tbl_range_free(&iommu->tbl, dma_handle, npages,
DMA_ERROR_CODE); IOMMU_ERROR_CODE);
sg = sg_next(sg); sg = sg_next(sg);
} }
......
...@@ -7,6 +7,7 @@ ...@@ -7,6 +7,7 @@
#define IOMMU_POOL_HASHBITS 4 #define IOMMU_POOL_HASHBITS 4
#define IOMMU_NR_POOLS (1 << IOMMU_POOL_HASHBITS) #define IOMMU_NR_POOLS (1 << IOMMU_POOL_HASHBITS)
#define IOMMU_ERROR_CODE (~(unsigned long) 0)
struct iommu_pool { struct iommu_pool {
unsigned long start; unsigned long start;
......
...@@ -11,10 +11,6 @@ ...@@ -11,10 +11,6 @@
#include <linux/dma-mapping.h> #include <linux/dma-mapping.h>
#include <linux/hash.h> #include <linux/hash.h>
#ifndef DMA_ERROR_CODE
#define DMA_ERROR_CODE (~(dma_addr_t)0x0)
#endif
static unsigned long iommu_large_alloc = 15; static unsigned long iommu_large_alloc = 15;
static DEFINE_PER_CPU(unsigned int, iommu_hash_common); static DEFINE_PER_CPU(unsigned int, iommu_hash_common);
...@@ -124,7 +120,7 @@ unsigned long iommu_tbl_range_alloc(struct device *dev, ...@@ -124,7 +120,7 @@ unsigned long iommu_tbl_range_alloc(struct device *dev,
/* Sanity check */ /* Sanity check */
if (unlikely(npages == 0)) { if (unlikely(npages == 0)) {
WARN_ON_ONCE(1); WARN_ON_ONCE(1);
return DMA_ERROR_CODE; return IOMMU_ERROR_CODE;
} }
if (largealloc) { if (largealloc) {
...@@ -207,7 +203,7 @@ unsigned long iommu_tbl_range_alloc(struct device *dev, ...@@ -207,7 +203,7 @@ unsigned long iommu_tbl_range_alloc(struct device *dev,
goto again; goto again;
} else { } else {
/* give up */ /* give up */
n = DMA_ERROR_CODE; n = IOMMU_ERROR_CODE;
goto bail; goto bail;
} }
} }
...@@ -259,7 +255,7 @@ void iommu_tbl_range_free(struct iommu_map_table *iommu, u64 dma_addr, ...@@ -259,7 +255,7 @@ void iommu_tbl_range_free(struct iommu_map_table *iommu, u64 dma_addr,
unsigned long flags; unsigned long flags;
unsigned long shift = iommu->table_shift; unsigned long shift = iommu->table_shift;
if (entry == DMA_ERROR_CODE) /* use default addr->entry mapping */ if (entry == IOMMU_ERROR_CODE) /* use default addr->entry mapping */
entry = (dma_addr - iommu->table_map_base) >> shift; entry = (dma_addr - iommu->table_map_base) >> shift;
pool = get_pool(iommu, entry); pool = get_pool(iommu, entry);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment