Commit f69e342e authored by Leon Romanovsky's avatar Leon Romanovsky Committed by Christoph Hellwig

dma-mapping: call ->unmap_page and ->unmap_sg unconditionally

Almost all instances of the dma_map_ops ->map_page()/map_sg() methods
implement ->unmap_page()/unmap_sg() too.  The once instance which doesn't
dma_dummy_ops which is used to fail the DMA mapping and thus there won't
be any calls to ->unmap_page()/unmap_sg().

Remove the checks for ->unmap_page()/unmap_sg() and call them directly to
create an interface that is symmetrical to ->map_page()/map_sg().
Signed-off-by: default avatarLeon Romanovsky <leonro@nvidia.com>
Signed-off-by: default avatarLeon Romanovsky <leon@kernel.org>
Reviewed-by: default avatarRobin Murphy <robin.murphy@arm.com>
Signed-off-by: default avatarChristoph Hellwig <hch@lst.de>
parent 3be9b846
...@@ -17,6 +17,15 @@ static dma_addr_t dma_dummy_map_page(struct device *dev, struct page *page, ...@@ -17,6 +17,15 @@ static dma_addr_t dma_dummy_map_page(struct device *dev, struct page *page,
{ {
return DMA_MAPPING_ERROR; return DMA_MAPPING_ERROR;
} }
static void dma_dummy_unmap_page(struct device *dev, dma_addr_t dma_handle,
size_t size, enum dma_data_direction dir, unsigned long attrs)
{
/*
* Dummy ops doesn't support map_page, so unmap_page should never be
* called.
*/
WARN_ON_ONCE(true);
}
static int dma_dummy_map_sg(struct device *dev, struct scatterlist *sgl, static int dma_dummy_map_sg(struct device *dev, struct scatterlist *sgl,
int nelems, enum dma_data_direction dir, int nelems, enum dma_data_direction dir,
...@@ -25,6 +34,16 @@ static int dma_dummy_map_sg(struct device *dev, struct scatterlist *sgl, ...@@ -25,6 +34,16 @@ static int dma_dummy_map_sg(struct device *dev, struct scatterlist *sgl,
return -EINVAL; return -EINVAL;
} }
static void dma_dummy_unmap_sg(struct device *dev, struct scatterlist *sgl,
int nelems, enum dma_data_direction dir,
unsigned long attrs)
{
/*
* Dummy ops doesn't support map_sg, so unmap_sg should never be called.
*/
WARN_ON_ONCE(true);
}
static int dma_dummy_supported(struct device *hwdev, u64 mask) static int dma_dummy_supported(struct device *hwdev, u64 mask)
{ {
return 0; return 0;
...@@ -33,6 +52,8 @@ static int dma_dummy_supported(struct device *hwdev, u64 mask) ...@@ -33,6 +52,8 @@ static int dma_dummy_supported(struct device *hwdev, u64 mask)
const struct dma_map_ops dma_dummy_ops = { const struct dma_map_ops dma_dummy_ops = {
.mmap = dma_dummy_mmap, .mmap = dma_dummy_mmap,
.map_page = dma_dummy_map_page, .map_page = dma_dummy_map_page,
.unmap_page = dma_dummy_unmap_page,
.map_sg = dma_dummy_map_sg, .map_sg = dma_dummy_map_sg,
.unmap_sg = dma_dummy_unmap_sg,
.dma_supported = dma_dummy_supported, .dma_supported = dma_dummy_supported,
}; };
...@@ -177,7 +177,7 @@ void dma_unmap_page_attrs(struct device *dev, dma_addr_t addr, size_t size, ...@@ -177,7 +177,7 @@ void dma_unmap_page_attrs(struct device *dev, dma_addr_t addr, size_t size,
if (dma_map_direct(dev, ops) || if (dma_map_direct(dev, ops) ||
arch_dma_unmap_page_direct(dev, addr + size)) arch_dma_unmap_page_direct(dev, addr + size))
dma_direct_unmap_page(dev, addr, size, dir, attrs); dma_direct_unmap_page(dev, addr, size, dir, attrs);
else if (ops->unmap_page) else
ops->unmap_page(dev, addr, size, dir, attrs); ops->unmap_page(dev, addr, size, dir, attrs);
debug_dma_unmap_page(dev, addr, size, dir); debug_dma_unmap_page(dev, addr, size, dir);
} }
...@@ -291,7 +291,7 @@ void dma_unmap_sg_attrs(struct device *dev, struct scatterlist *sg, ...@@ -291,7 +291,7 @@ void dma_unmap_sg_attrs(struct device *dev, struct scatterlist *sg,
if (dma_map_direct(dev, ops) || if (dma_map_direct(dev, ops) ||
arch_dma_unmap_sg_direct(dev, sg, nents)) arch_dma_unmap_sg_direct(dev, sg, nents))
dma_direct_unmap_sg(dev, sg, nents, dir, attrs); dma_direct_unmap_sg(dev, sg, nents, dir, attrs);
else if (ops->unmap_sg) else
ops->unmap_sg(dev, sg, nents, dir, attrs); ops->unmap_sg(dev, sg, nents, dir, attrs);
} }
EXPORT_SYMBOL(dma_unmap_sg_attrs); EXPORT_SYMBOL(dma_unmap_sg_attrs);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment