Commit 0aeba2d0 authored by Christoph Hellwig's avatar Christoph Hellwig Committed by Michael Ellerman

powerpc/dma: properly wire up the unmap_page and unmap_sg methods

The unmap methods need to transfer memory ownership back from the
device to the cpu by identical means as dma_sync_*_to_cpu. I'm not
sure powerpc needs to do any work in this transfer direction, but
given that it does invalidate the caches in dma_sync_*_to_cpu already
we should make sure we also do so on unmapping.
Signed-off-by: default avatarChristoph Hellwig <hch@lst.de>
[mpe: s/dir/direction in dma_nommu_unmap_page()]
Signed-off-by: default avatarMichael Ellerman <mpe@ellerman.id.au>
parent 92863569
......@@ -210,10 +210,15 @@ static int dma_nommu_map_sg(struct device *dev, struct scatterlist *sgl,
return nents;
}
static void dma_nommu_unmap_sg(struct device *dev, struct scatterlist *sg,
static void dma_nommu_unmap_sg(struct device *dev, struct scatterlist *sgl,
int nents, enum dma_data_direction direction,
unsigned long attrs)
{
struct scatterlist *sg;
int i;
for_each_sg(sgl, sg, nents, i)
__dma_sync_page(sg_page(sg), sg->offset, sg->length, direction);
}
static u64 dma_nommu_get_required_mask(struct device *dev)
......@@ -247,6 +252,8 @@ static inline void dma_nommu_unmap_page(struct device *dev,
enum dma_data_direction direction,
unsigned long attrs)
{
if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC))
__dma_sync(bus_to_virt(dma_address), size, direction);
}
#ifdef CONFIG_NOT_COHERENT_CACHE
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment