Commit f50a2bd2 authored by Alexander Duyck's avatar Alexander Duyck Committed by Linus Torvalds

arch/parisc: add option to skip DMA sync as a part of map and unmap

This change allows us to pass DMA_ATTR_SKIP_CPU_SYNC which allows us to
avoid invoking cache line invalidation if the driver will just handle it
via a sync_for_cpu or sync_for_device call.

Link: http://lkml.kernel.org/r/20161110113529.76501.44762.stgit@ahduyck-blue-test.jf.intel.comSigned-off-by: default avatarAlexander Duyck <alexander.h.duyck@intel.com>
Cc: "James E.J. Bottomley" <jejb@parisc-linux.org>
Cc: Helge Deller <deller@gmx.de>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent 043b42bc
...@@ -459,7 +459,9 @@ static dma_addr_t pa11_dma_map_page(struct device *dev, struct page *page, ...@@ -459,7 +459,9 @@ static dma_addr_t pa11_dma_map_page(struct device *dev, struct page *page,
void *addr = page_address(page) + offset; void *addr = page_address(page) + offset;
BUG_ON(direction == DMA_NONE); BUG_ON(direction == DMA_NONE);
flush_kernel_dcache_range((unsigned long) addr, size); if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC))
flush_kernel_dcache_range((unsigned long) addr, size);
return virt_to_phys(addr); return virt_to_phys(addr);
} }
...@@ -469,8 +471,11 @@ static void pa11_dma_unmap_page(struct device *dev, dma_addr_t dma_handle, ...@@ -469,8 +471,11 @@ static void pa11_dma_unmap_page(struct device *dev, dma_addr_t dma_handle,
{ {
BUG_ON(direction == DMA_NONE); BUG_ON(direction == DMA_NONE);
if (attrs & DMA_ATTR_SKIP_CPU_SYNC)
return;
if (direction == DMA_TO_DEVICE) if (direction == DMA_TO_DEVICE)
return; return;
/* /*
* For PCI_DMA_FROMDEVICE this flush is not necessary for the * For PCI_DMA_FROMDEVICE this flush is not necessary for the
...@@ -479,7 +484,6 @@ static void pa11_dma_unmap_page(struct device *dev, dma_addr_t dma_handle, ...@@ -479,7 +484,6 @@ static void pa11_dma_unmap_page(struct device *dev, dma_addr_t dma_handle,
*/ */
flush_kernel_dcache_range((unsigned long) phys_to_virt(dma_handle), size); flush_kernel_dcache_range((unsigned long) phys_to_virt(dma_handle), size);
return;
} }
static int pa11_dma_map_sg(struct device *dev, struct scatterlist *sglist, static int pa11_dma_map_sg(struct device *dev, struct scatterlist *sglist,
...@@ -496,6 +500,10 @@ static int pa11_dma_map_sg(struct device *dev, struct scatterlist *sglist, ...@@ -496,6 +500,10 @@ static int pa11_dma_map_sg(struct device *dev, struct scatterlist *sglist,
sg_dma_address(sg) = (dma_addr_t) virt_to_phys(vaddr); sg_dma_address(sg) = (dma_addr_t) virt_to_phys(vaddr);
sg_dma_len(sg) = sg->length; sg_dma_len(sg) = sg->length;
if (attrs & DMA_ATTR_SKIP_CPU_SYNC)
continue;
flush_kernel_dcache_range(vaddr, sg->length); flush_kernel_dcache_range(vaddr, sg->length);
} }
return nents; return nents;
...@@ -510,14 +518,16 @@ static void pa11_dma_unmap_sg(struct device *dev, struct scatterlist *sglist, ...@@ -510,14 +518,16 @@ static void pa11_dma_unmap_sg(struct device *dev, struct scatterlist *sglist,
BUG_ON(direction == DMA_NONE); BUG_ON(direction == DMA_NONE);
if (attrs & DMA_ATTR_SKIP_CPU_SYNC)
return;
if (direction == DMA_TO_DEVICE) if (direction == DMA_TO_DEVICE)
return; return;
/* once we do combining we'll need to use phys_to_virt(sg_dma_address(sglist)) */ /* once we do combining we'll need to use phys_to_virt(sg_dma_address(sglist)) */
for_each_sg(sglist, sg, nents, i) for_each_sg(sglist, sg, nents, i)
flush_kernel_vmap_range(sg_virt(sg), sg->length); flush_kernel_vmap_range(sg_virt(sg), sg->length);
return;
} }
static void pa11_dma_sync_single_for_cpu(struct device *dev, static void pa11_dma_sync_single_for_cpu(struct device *dev,
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment