Commit 9f318d47 authored by Alexander Duyck's avatar Alexander Duyck Committed by Linus Torvalds

arch/mips: add option to skip DMA sync as a part of map and unmap

This change allows us to pass DMA_ATTR_SKIP_CPU_SYNC which allows us to
avoid invoking cache line invalidation if the driver will just handle it
via a sync_for_cpu or sync_for_device call.

Link: http://lkml.kernel.org/r/20161110113513.76501.32321.stgit@ahduyck-blue-test.jf.intel.comSigned-off-by: default avatarAlexander Duyck <alexander.h.duyck@intel.com>
Cc: Ralf Baechle <ralf@linux-mips.org>
Cc: Keguang Zhang <keguang.zhang@gmail.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent 98ac2fc2
...@@ -61,7 +61,7 @@ static int loongson_dma_map_sg(struct device *dev, struct scatterlist *sg, ...@@ -61,7 +61,7 @@ static int loongson_dma_map_sg(struct device *dev, struct scatterlist *sg,
int nents, enum dma_data_direction dir, int nents, enum dma_data_direction dir,
unsigned long attrs) unsigned long attrs)
{ {
int r = swiotlb_map_sg_attrs(dev, sg, nents, dir, 0); int r = swiotlb_map_sg_attrs(dev, sg, nents, dir, attrs);
mb(); mb();
return r; return r;
......
...@@ -293,7 +293,7 @@ static inline void __dma_sync(struct page *page, ...@@ -293,7 +293,7 @@ static inline void __dma_sync(struct page *page,
static void mips_dma_unmap_page(struct device *dev, dma_addr_t dma_addr, static void mips_dma_unmap_page(struct device *dev, dma_addr_t dma_addr,
size_t size, enum dma_data_direction direction, unsigned long attrs) size_t size, enum dma_data_direction direction, unsigned long attrs)
{ {
if (cpu_needs_post_dma_flush(dev)) if (cpu_needs_post_dma_flush(dev) && !(attrs & DMA_ATTR_SKIP_CPU_SYNC))
__dma_sync(dma_addr_to_page(dev, dma_addr), __dma_sync(dma_addr_to_page(dev, dma_addr),
dma_addr & ~PAGE_MASK, size, direction); dma_addr & ~PAGE_MASK, size, direction);
plat_post_dma_flush(dev); plat_post_dma_flush(dev);
...@@ -307,7 +307,8 @@ static int mips_dma_map_sg(struct device *dev, struct scatterlist *sglist, ...@@ -307,7 +307,8 @@ static int mips_dma_map_sg(struct device *dev, struct scatterlist *sglist,
struct scatterlist *sg; struct scatterlist *sg;
for_each_sg(sglist, sg, nents, i) { for_each_sg(sglist, sg, nents, i) {
if (!plat_device_is_coherent(dev)) if (!plat_device_is_coherent(dev) &&
!(attrs & DMA_ATTR_SKIP_CPU_SYNC))
__dma_sync(sg_page(sg), sg->offset, sg->length, __dma_sync(sg_page(sg), sg->offset, sg->length,
direction); direction);
#ifdef CONFIG_NEED_SG_DMA_LENGTH #ifdef CONFIG_NEED_SG_DMA_LENGTH
...@@ -324,7 +325,7 @@ static dma_addr_t mips_dma_map_page(struct device *dev, struct page *page, ...@@ -324,7 +325,7 @@ static dma_addr_t mips_dma_map_page(struct device *dev, struct page *page,
unsigned long offset, size_t size, enum dma_data_direction direction, unsigned long offset, size_t size, enum dma_data_direction direction,
unsigned long attrs) unsigned long attrs)
{ {
if (!plat_device_is_coherent(dev)) if (!plat_device_is_coherent(dev) && !(attrs & DMA_ATTR_SKIP_CPU_SYNC))
__dma_sync(page, offset, size, direction); __dma_sync(page, offset, size, direction);
return plat_map_dma_mem_page(dev, page) + offset; return plat_map_dma_mem_page(dev, page) + offset;
...@@ -339,6 +340,7 @@ static void mips_dma_unmap_sg(struct device *dev, struct scatterlist *sglist, ...@@ -339,6 +340,7 @@ static void mips_dma_unmap_sg(struct device *dev, struct scatterlist *sglist,
for_each_sg(sglist, sg, nhwentries, i) { for_each_sg(sglist, sg, nhwentries, i) {
if (!plat_device_is_coherent(dev) && if (!plat_device_is_coherent(dev) &&
!(attrs & DMA_ATTR_SKIP_CPU_SYNC) &&
direction != DMA_TO_DEVICE) direction != DMA_TO_DEVICE)
__dma_sync(sg_page(sg), sg->offset, sg->length, __dma_sync(sg_page(sg), sg->offset, sg->length,
direction); direction);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment