Commit 309dbbab authored by Russell King's avatar Russell King Committed by Russell King

[ARM] dma: don't touch cache on dma_*_for_cpu()

As per the dma_unmap_* calls, we don't touch the cache when a DMA
buffer transitions from device to CPU ownership.  Presently, no
problems have been identified with speculative cache prefetching
which in itself is a new feature in later architectures.  We may
have to revisit the DMA API later for these architectures anyway.
Signed-off-by: default avatarRussell King <rmk+kernel@arm.linux.org.uk>
parent 0e18b5d7
...@@ -376,11 +376,7 @@ static inline void dma_sync_single_range_for_cpu(struct device *dev, ...@@ -376,11 +376,7 @@ static inline void dma_sync_single_range_for_cpu(struct device *dev,
{ {
BUG_ON(!valid_dma_direction(dir)); BUG_ON(!valid_dma_direction(dir));
if (!dmabounce_sync_for_cpu(dev, handle, offset, size, dir)) dmabounce_sync_for_cpu(dev, handle, offset, size, dir);
return;
if (!arch_is_coherent())
dma_cache_maint(dma_to_virt(dev, handle) + offset, size, dir);
} }
static inline void dma_sync_single_range_for_device(struct device *dev, static inline void dma_sync_single_range_for_device(struct device *dev,
......
...@@ -585,12 +585,8 @@ void dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, ...@@ -585,12 +585,8 @@ void dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg,
int i; int i;
for_each_sg(sg, s, nents, i) { for_each_sg(sg, s, nents, i) {
if (!dmabounce_sync_for_cpu(dev, sg_dma_address(s), 0, dmabounce_sync_for_cpu(dev, sg_dma_address(s), 0,
sg_dma_len(s), dir)) sg_dma_len(s), dir);
continue;
if (!arch_is_coherent())
dma_cache_maint(sg_virt(s), s->length, dir);
} }
} }
EXPORT_SYMBOL(dma_sync_sg_for_cpu); EXPORT_SYMBOL(dma_sync_sg_for_cpu);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment