Commit d34fdb70 authored by Kwangwoo Lee's avatar Kwangwoo Lee Committed by Will Deacon

arm64: mm: convert __dma_* routines to use start, size

__dma_* routines have been converted to use start and size instread of
start and end addresses. The patch was origianlly for adding
__clean_dcache_area_poc() which will be used in pmem driver to clean
dcache to the PoC(Point of Coherency) in arch_wb_cache_pmem().

The functionality of __clean_dcache_area_poc()  was equivalent to
__dma_clean_range(). The difference was __dma_clean_range() uses the end
address, but __clean_dcache_area_poc() uses the size to clean.

Thus, __clean_dcache_area_poc() has been revised with a fallthrough
function of __dma_clean_range() after the change that __dma_* routines
use start and size instead of using start and end.

As a consequence of using start and size, the name of __dma_* routines
has also been altered following the terminology below:
    area: takes a start and size
    range: takes a start and end
Reviewed-by: default avatarRobin Murphy <robin.murphy@arm.com>
Signed-off-by: default avatarKwangwoo Lee <kwangwoo.lee@sk.com>
Signed-off-by: default avatarWill Deacon <will.deacon@arm.com>
parent 421dd6fa
...@@ -68,6 +68,7 @@ ...@@ -68,6 +68,7 @@
extern void flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned long end); extern void flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned long end);
extern void flush_icache_range(unsigned long start, unsigned long end); extern void flush_icache_range(unsigned long start, unsigned long end);
extern void __flush_dcache_area(void *addr, size_t len); extern void __flush_dcache_area(void *addr, size_t len);
extern void __clean_dcache_area_poc(void *addr, size_t len);
extern void __clean_dcache_area_pou(void *addr, size_t len); extern void __clean_dcache_area_pou(void *addr, size_t len);
extern long __flush_cache_user_range(unsigned long start, unsigned long end); extern long __flush_cache_user_range(unsigned long start, unsigned long end);
...@@ -85,7 +86,7 @@ static inline void flush_cache_page(struct vm_area_struct *vma, ...@@ -85,7 +86,7 @@ static inline void flush_cache_page(struct vm_area_struct *vma,
*/ */
extern void __dma_map_area(const void *, size_t, int); extern void __dma_map_area(const void *, size_t, int);
extern void __dma_unmap_area(const void *, size_t, int); extern void __dma_unmap_area(const void *, size_t, int);
extern void __dma_flush_range(const void *, const void *); extern void __dma_flush_area(const void *, size_t);
/* /*
* Copy user data from/to a page which is mapped into a different * Copy user data from/to a page which is mapped into a different
......
...@@ -105,19 +105,20 @@ ENTRY(__clean_dcache_area_pou) ...@@ -105,19 +105,20 @@ ENTRY(__clean_dcache_area_pou)
ENDPROC(__clean_dcache_area_pou) ENDPROC(__clean_dcache_area_pou)
/* /*
* __inval_cache_range(start, end) * __dma_inv_area(start, size)
* - start - start address of region * - start - virtual start address of region
* - end - end address of region * - size - size in question
*/ */
ENTRY(__inval_cache_range) __dma_inv_area:
add x1, x1, x0
/* FALLTHROUGH */ /* FALLTHROUGH */
/* /*
* __dma_inv_range(start, end) * __inval_cache_range(start, end)
* - start - virtual start address of region * - start - start address of region
* - end - virtual end address of region * - end - end address of region
*/ */
__dma_inv_range: ENTRY(__inval_cache_range)
dcache_line_size x2, x3 dcache_line_size x2, x3
sub x3, x2, #1 sub x3, x2, #1
tst x1, x3 // end cache line aligned? tst x1, x3 // end cache line aligned?
...@@ -136,46 +137,43 @@ __dma_inv_range: ...@@ -136,46 +137,43 @@ __dma_inv_range:
dsb sy dsb sy
ret ret
ENDPIPROC(__inval_cache_range) ENDPIPROC(__inval_cache_range)
ENDPROC(__dma_inv_range) ENDPROC(__dma_inv_area)
/*
* __clean_dcache_area_poc(kaddr, size)
*
* Ensure that any D-cache lines for the interval [kaddr, kaddr+size)
* are cleaned to the PoC.
*
* - kaddr - kernel address
* - size - size in question
*/
ENTRY(__clean_dcache_area_poc)
/* FALLTHROUGH */
/* /*
* __dma_clean_range(start, end) * __dma_clean_area(start, size)
* - start - virtual start address of region * - start - virtual start address of region
* - end - virtual end address of region * - size - size in question
*/ */
__dma_clean_range: __dma_clean_area:
dcache_line_size x2, x3 dcache_by_line_op cvac, sy, x0, x1, x2, x3
sub x3, x2, #1
bic x0, x0, x3
1:
alternative_if_not ARM64_WORKAROUND_CLEAN_CACHE
dc cvac, x0
alternative_else
dc civac, x0
alternative_endif
add x0, x0, x2
cmp x0, x1
b.lo 1b
dsb sy
ret ret
ENDPROC(__dma_clean_range) ENDPIPROC(__clean_dcache_area_poc)
ENDPROC(__dma_clean_area)
/* /*
* __dma_flush_range(start, end) * __dma_flush_area(start, size)
*
* clean & invalidate D / U line
*
* - start - virtual start address of region * - start - virtual start address of region
* - end - virtual end address of region * - size - size in question
*/ */
ENTRY(__dma_flush_range) ENTRY(__dma_flush_area)
dcache_line_size x2, x3 dcache_by_line_op civac, sy, x0, x1, x2, x3
sub x3, x2, #1
bic x0, x0, x3
1: dc civac, x0 // clean & invalidate D / U line
add x0, x0, x2
cmp x0, x1
b.lo 1b
dsb sy
ret ret
ENDPIPROC(__dma_flush_range) ENDPIPROC(__dma_flush_area)
/* /*
* __dma_map_area(start, size, dir) * __dma_map_area(start, size, dir)
...@@ -184,10 +182,9 @@ ENDPIPROC(__dma_flush_range) ...@@ -184,10 +182,9 @@ ENDPIPROC(__dma_flush_range)
* - dir - DMA direction * - dir - DMA direction
*/ */
ENTRY(__dma_map_area) ENTRY(__dma_map_area)
add x1, x1, x0
cmp w2, #DMA_FROM_DEVICE cmp w2, #DMA_FROM_DEVICE
b.eq __dma_inv_range b.eq __dma_inv_area
b __dma_clean_range b __dma_clean_area
ENDPIPROC(__dma_map_area) ENDPIPROC(__dma_map_area)
/* /*
...@@ -197,8 +194,7 @@ ENDPIPROC(__dma_map_area) ...@@ -197,8 +194,7 @@ ENDPIPROC(__dma_map_area)
* - dir - DMA direction * - dir - DMA direction
*/ */
ENTRY(__dma_unmap_area) ENTRY(__dma_unmap_area)
add x1, x1, x0
cmp w2, #DMA_TO_DEVICE cmp w2, #DMA_TO_DEVICE
b.ne __dma_inv_range b.ne __dma_inv_area
ret ret
ENDPIPROC(__dma_unmap_area) ENDPIPROC(__dma_unmap_area)
...@@ -168,7 +168,7 @@ static void *__dma_alloc(struct device *dev, size_t size, ...@@ -168,7 +168,7 @@ static void *__dma_alloc(struct device *dev, size_t size,
return ptr; return ptr;
/* remove any dirty cache lines on the kernel alias */ /* remove any dirty cache lines on the kernel alias */
__dma_flush_range(ptr, ptr + size); __dma_flush_area(ptr, size);
/* create a coherent mapping */ /* create a coherent mapping */
page = virt_to_page(ptr); page = virt_to_page(ptr);
...@@ -387,7 +387,7 @@ static int __init atomic_pool_init(void) ...@@ -387,7 +387,7 @@ static int __init atomic_pool_init(void)
void *page_addr = page_address(page); void *page_addr = page_address(page);
memset(page_addr, 0, atomic_pool_size); memset(page_addr, 0, atomic_pool_size);
__dma_flush_range(page_addr, page_addr + atomic_pool_size); __dma_flush_area(page_addr, atomic_pool_size);
atomic_pool = gen_pool_create(PAGE_SHIFT, -1); atomic_pool = gen_pool_create(PAGE_SHIFT, -1);
if (!atomic_pool) if (!atomic_pool)
...@@ -548,7 +548,7 @@ fs_initcall(dma_debug_do_init); ...@@ -548,7 +548,7 @@ fs_initcall(dma_debug_do_init);
/* Thankfully, all cache ops are by VA so we can ignore phys here */ /* Thankfully, all cache ops are by VA so we can ignore phys here */
static void flush_page(struct device *dev, const void *virt, phys_addr_t phys) static void flush_page(struct device *dev, const void *virt, phys_addr_t phys)
{ {
__dma_flush_range(virt, virt + PAGE_SIZE); __dma_flush_area(virt, PAGE_SIZE);
} }
static void *__iommu_alloc_attrs(struct device *dev, size_t size, static void *__iommu_alloc_attrs(struct device *dev, size_t size,
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment