Commit d11e3d3d authored by Christoph Hellwig's avatar Christoph Hellwig

powerpc/iommu: remove the mapping_error dma_map_ops method

The powerpc iommu code already returns (~(dma_addr_t)0x0) on mapping
failures, so we can switch over to returning DMA_MAPPING_ERROR and let
the core dma-mapping code handle the rest.
Signed-off-by: default avatarChristoph Hellwig <hch@lst.de>
Acked-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent 72fd97bf
...@@ -143,8 +143,6 @@ struct scatterlist; ...@@ -143,8 +143,6 @@ struct scatterlist;
#ifdef CONFIG_PPC64 #ifdef CONFIG_PPC64
#define IOMMU_MAPPING_ERROR (~(dma_addr_t)0x0)
static inline void set_iommu_table_base(struct device *dev, static inline void set_iommu_table_base(struct device *dev,
struct iommu_table *base) struct iommu_table *base)
{ {
...@@ -242,8 +240,6 @@ static inline int __init tce_iommu_bus_notifier_init(void) ...@@ -242,8 +240,6 @@ static inline int __init tce_iommu_bus_notifier_init(void)
} }
#endif /* !CONFIG_IOMMU_API */ #endif /* !CONFIG_IOMMU_API */
int dma_iommu_mapping_error(struct device *dev, dma_addr_t dma_addr);
#else #else
static inline void *get_iommu_table_base(struct device *dev) static inline void *get_iommu_table_base(struct device *dev)
......
...@@ -106,11 +106,6 @@ static u64 dma_iommu_get_required_mask(struct device *dev) ...@@ -106,11 +106,6 @@ static u64 dma_iommu_get_required_mask(struct device *dev)
return mask; return mask;
} }
int dma_iommu_mapping_error(struct device *dev, dma_addr_t dma_addr)
{
return dma_addr == IOMMU_MAPPING_ERROR;
}
struct dma_map_ops dma_iommu_ops = { struct dma_map_ops dma_iommu_ops = {
.alloc = dma_iommu_alloc_coherent, .alloc = dma_iommu_alloc_coherent,
.free = dma_iommu_free_coherent, .free = dma_iommu_free_coherent,
...@@ -121,6 +116,5 @@ struct dma_map_ops dma_iommu_ops = { ...@@ -121,6 +116,5 @@ struct dma_map_ops dma_iommu_ops = {
.map_page = dma_iommu_map_page, .map_page = dma_iommu_map_page,
.unmap_page = dma_iommu_unmap_page, .unmap_page = dma_iommu_unmap_page,
.get_required_mask = dma_iommu_get_required_mask, .get_required_mask = dma_iommu_get_required_mask,
.mapping_error = dma_iommu_mapping_error,
}; };
EXPORT_SYMBOL(dma_iommu_ops); EXPORT_SYMBOL(dma_iommu_ops);
...@@ -197,11 +197,11 @@ static unsigned long iommu_range_alloc(struct device *dev, ...@@ -197,11 +197,11 @@ static unsigned long iommu_range_alloc(struct device *dev,
if (unlikely(npages == 0)) { if (unlikely(npages == 0)) {
if (printk_ratelimit()) if (printk_ratelimit())
WARN_ON(1); WARN_ON(1);
return IOMMU_MAPPING_ERROR; return DMA_MAPPING_ERROR;
} }
if (should_fail_iommu(dev)) if (should_fail_iommu(dev))
return IOMMU_MAPPING_ERROR; return DMA_MAPPING_ERROR;
/* /*
* We don't need to disable preemption here because any CPU can * We don't need to disable preemption here because any CPU can
...@@ -277,7 +277,7 @@ static unsigned long iommu_range_alloc(struct device *dev, ...@@ -277,7 +277,7 @@ static unsigned long iommu_range_alloc(struct device *dev,
} else { } else {
/* Give up */ /* Give up */
spin_unlock_irqrestore(&(pool->lock), flags); spin_unlock_irqrestore(&(pool->lock), flags);
return IOMMU_MAPPING_ERROR; return DMA_MAPPING_ERROR;
} }
} }
...@@ -309,13 +309,13 @@ static dma_addr_t iommu_alloc(struct device *dev, struct iommu_table *tbl, ...@@ -309,13 +309,13 @@ static dma_addr_t iommu_alloc(struct device *dev, struct iommu_table *tbl,
unsigned long attrs) unsigned long attrs)
{ {
unsigned long entry; unsigned long entry;
dma_addr_t ret = IOMMU_MAPPING_ERROR; dma_addr_t ret = DMA_MAPPING_ERROR;
int build_fail; int build_fail;
entry = iommu_range_alloc(dev, tbl, npages, NULL, mask, align_order); entry = iommu_range_alloc(dev, tbl, npages, NULL, mask, align_order);
if (unlikely(entry == IOMMU_MAPPING_ERROR)) if (unlikely(entry == DMA_MAPPING_ERROR))
return IOMMU_MAPPING_ERROR; return DMA_MAPPING_ERROR;
entry += tbl->it_offset; /* Offset into real TCE table */ entry += tbl->it_offset; /* Offset into real TCE table */
ret = entry << tbl->it_page_shift; /* Set the return dma address */ ret = entry << tbl->it_page_shift; /* Set the return dma address */
...@@ -327,12 +327,12 @@ static dma_addr_t iommu_alloc(struct device *dev, struct iommu_table *tbl, ...@@ -327,12 +327,12 @@ static dma_addr_t iommu_alloc(struct device *dev, struct iommu_table *tbl,
/* tbl->it_ops->set() only returns non-zero for transient errors. /* tbl->it_ops->set() only returns non-zero for transient errors.
* Clean up the table bitmap in this case and return * Clean up the table bitmap in this case and return
* IOMMU_MAPPING_ERROR. For all other errors the functionality is * DMA_MAPPING_ERROR. For all other errors the functionality is
* not altered. * not altered.
*/ */
if (unlikely(build_fail)) { if (unlikely(build_fail)) {
__iommu_free(tbl, ret, npages); __iommu_free(tbl, ret, npages);
return IOMMU_MAPPING_ERROR; return DMA_MAPPING_ERROR;
} }
/* Flush/invalidate TLB caches if necessary */ /* Flush/invalidate TLB caches if necessary */
...@@ -477,7 +477,7 @@ int ppc_iommu_map_sg(struct device *dev, struct iommu_table *tbl, ...@@ -477,7 +477,7 @@ int ppc_iommu_map_sg(struct device *dev, struct iommu_table *tbl,
DBG(" - vaddr: %lx, size: %lx\n", vaddr, slen); DBG(" - vaddr: %lx, size: %lx\n", vaddr, slen);
/* Handle failure */ /* Handle failure */
if (unlikely(entry == IOMMU_MAPPING_ERROR)) { if (unlikely(entry == DMA_MAPPING_ERROR)) {
if (!(attrs & DMA_ATTR_NO_WARN) && if (!(attrs & DMA_ATTR_NO_WARN) &&
printk_ratelimit()) printk_ratelimit())
dev_info(dev, "iommu_alloc failed, tbl %p " dev_info(dev, "iommu_alloc failed, tbl %p "
...@@ -544,7 +544,7 @@ int ppc_iommu_map_sg(struct device *dev, struct iommu_table *tbl, ...@@ -544,7 +544,7 @@ int ppc_iommu_map_sg(struct device *dev, struct iommu_table *tbl,
*/ */
if (outcount < incount) { if (outcount < incount) {
outs = sg_next(outs); outs = sg_next(outs);
outs->dma_address = IOMMU_MAPPING_ERROR; outs->dma_address = DMA_MAPPING_ERROR;
outs->dma_length = 0; outs->dma_length = 0;
} }
...@@ -562,7 +562,7 @@ int ppc_iommu_map_sg(struct device *dev, struct iommu_table *tbl, ...@@ -562,7 +562,7 @@ int ppc_iommu_map_sg(struct device *dev, struct iommu_table *tbl,
npages = iommu_num_pages(s->dma_address, s->dma_length, npages = iommu_num_pages(s->dma_address, s->dma_length,
IOMMU_PAGE_SIZE(tbl)); IOMMU_PAGE_SIZE(tbl));
__iommu_free(tbl, vaddr, npages); __iommu_free(tbl, vaddr, npages);
s->dma_address = IOMMU_MAPPING_ERROR; s->dma_address = DMA_MAPPING_ERROR;
s->dma_length = 0; s->dma_length = 0;
} }
if (s == outs) if (s == outs)
...@@ -776,7 +776,7 @@ dma_addr_t iommu_map_page(struct device *dev, struct iommu_table *tbl, ...@@ -776,7 +776,7 @@ dma_addr_t iommu_map_page(struct device *dev, struct iommu_table *tbl,
unsigned long mask, enum dma_data_direction direction, unsigned long mask, enum dma_data_direction direction,
unsigned long attrs) unsigned long attrs)
{ {
dma_addr_t dma_handle = IOMMU_MAPPING_ERROR; dma_addr_t dma_handle = DMA_MAPPING_ERROR;
void *vaddr; void *vaddr;
unsigned long uaddr; unsigned long uaddr;
unsigned int npages, align; unsigned int npages, align;
...@@ -796,7 +796,7 @@ dma_addr_t iommu_map_page(struct device *dev, struct iommu_table *tbl, ...@@ -796,7 +796,7 @@ dma_addr_t iommu_map_page(struct device *dev, struct iommu_table *tbl,
dma_handle = iommu_alloc(dev, tbl, vaddr, npages, direction, dma_handle = iommu_alloc(dev, tbl, vaddr, npages, direction,
mask >> tbl->it_page_shift, align, mask >> tbl->it_page_shift, align,
attrs); attrs);
if (dma_handle == IOMMU_MAPPING_ERROR) { if (dma_handle == DMA_MAPPING_ERROR) {
if (!(attrs & DMA_ATTR_NO_WARN) && if (!(attrs & DMA_ATTR_NO_WARN) &&
printk_ratelimit()) { printk_ratelimit()) {
dev_info(dev, "iommu_alloc failed, tbl %p " dev_info(dev, "iommu_alloc failed, tbl %p "
...@@ -868,7 +868,7 @@ void *iommu_alloc_coherent(struct device *dev, struct iommu_table *tbl, ...@@ -868,7 +868,7 @@ void *iommu_alloc_coherent(struct device *dev, struct iommu_table *tbl,
io_order = get_iommu_order(size, tbl); io_order = get_iommu_order(size, tbl);
mapping = iommu_alloc(dev, tbl, ret, nio_pages, DMA_BIDIRECTIONAL, mapping = iommu_alloc(dev, tbl, ret, nio_pages, DMA_BIDIRECTIONAL,
mask >> tbl->it_page_shift, io_order, 0); mask >> tbl->it_page_shift, io_order, 0);
if (mapping == IOMMU_MAPPING_ERROR) { if (mapping == DMA_MAPPING_ERROR) {
free_pages((unsigned long)ret, order); free_pages((unsigned long)ret, order);
return NULL; return NULL;
} }
......
...@@ -654,7 +654,6 @@ static const struct dma_map_ops dma_iommu_fixed_ops = { ...@@ -654,7 +654,6 @@ static const struct dma_map_ops dma_iommu_fixed_ops = {
.dma_supported = dma_suported_and_switch, .dma_supported = dma_suported_and_switch,
.map_page = dma_fixed_map_page, .map_page = dma_fixed_map_page,
.unmap_page = dma_fixed_unmap_page, .unmap_page = dma_fixed_unmap_page,
.mapping_error = dma_iommu_mapping_error,
}; };
static void cell_dma_dev_setup(struct device *dev) static void cell_dma_dev_setup(struct device *dev)
......
...@@ -519,7 +519,7 @@ static dma_addr_t vio_dma_iommu_map_page(struct device *dev, struct page *page, ...@@ -519,7 +519,7 @@ static dma_addr_t vio_dma_iommu_map_page(struct device *dev, struct page *page,
{ {
struct vio_dev *viodev = to_vio_dev(dev); struct vio_dev *viodev = to_vio_dev(dev);
struct iommu_table *tbl; struct iommu_table *tbl;
dma_addr_t ret = IOMMU_MAPPING_ERROR; dma_addr_t ret = DMA_MAPPING_ERROR;
tbl = get_iommu_table_base(dev); tbl = get_iommu_table_base(dev);
if (vio_cmo_alloc(viodev, roundup(size, IOMMU_PAGE_SIZE(tbl)))) { if (vio_cmo_alloc(viodev, roundup(size, IOMMU_PAGE_SIZE(tbl)))) {
...@@ -625,7 +625,6 @@ static const struct dma_map_ops vio_dma_mapping_ops = { ...@@ -625,7 +625,6 @@ static const struct dma_map_ops vio_dma_mapping_ops = {
.unmap_page = vio_dma_iommu_unmap_page, .unmap_page = vio_dma_iommu_unmap_page,
.dma_supported = vio_dma_iommu_dma_supported, .dma_supported = vio_dma_iommu_dma_supported,
.get_required_mask = vio_dma_get_required_mask, .get_required_mask = vio_dma_get_required_mask,
.mapping_error = dma_iommu_mapping_error,
}; };
/** /**
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment