Commit d46befef authored by Robin Murphy's avatar Robin Murphy Committed by Catalin Marinas

arm64: Convert __inval_cache_range() to area-based

__inval_cache_range() is already the odd one out among our data cache
maintenance routines as the only remaining range-based one; as we're
going to want an invalidation routine to call from C code for the pmem
API, let's tweak the prototype and name to bring it in line with the
clean operations, and to make its relationship with __dma_inv_area()
neatly mirror that of __clean_dcache_area_poc() and __dma_clean_area().
The loop clearing the early page tables gets mildly massaged in the
process for the sake of consistency.
Reviewed-by: default avatarWill Deacon <will.deacon@arm.com>
Signed-off-by: default avatarRobin Murphy <robin.murphy@arm.com>
Signed-off-by: default avatarCatalin Marinas <catalin.marinas@arm.com>
parent 09c2a7dc
...@@ -67,6 +67,7 @@ ...@@ -67,6 +67,7 @@
*/ */
extern void flush_icache_range(unsigned long start, unsigned long end); extern void flush_icache_range(unsigned long start, unsigned long end);
extern void __flush_dcache_area(void *addr, size_t len); extern void __flush_dcache_area(void *addr, size_t len);
extern void __inval_dcache_area(void *addr, size_t len);
extern void __clean_dcache_area_poc(void *addr, size_t len); extern void __clean_dcache_area_poc(void *addr, size_t len);
extern void __clean_dcache_area_pou(void *addr, size_t len); extern void __clean_dcache_area_pou(void *addr, size_t len);
extern long __flush_cache_user_range(unsigned long start, unsigned long end); extern long __flush_cache_user_range(unsigned long start, unsigned long end);
......
...@@ -143,8 +143,8 @@ preserve_boot_args: ...@@ -143,8 +143,8 @@ preserve_boot_args:
dmb sy // needed before dc ivac with dmb sy // needed before dc ivac with
// MMU off // MMU off
add x1, x0, #0x20 // 4 x 8 bytes mov x1, #0x20 // 4 x 8 bytes
b __inval_cache_range // tail call b __inval_dcache_area // tail call
ENDPROC(preserve_boot_args) ENDPROC(preserve_boot_args)
/* /*
...@@ -221,20 +221,20 @@ __create_page_tables: ...@@ -221,20 +221,20 @@ __create_page_tables:
* dirty cache lines being evicted. * dirty cache lines being evicted.
*/ */
adrp x0, idmap_pg_dir adrp x0, idmap_pg_dir
adrp x1, swapper_pg_dir + SWAPPER_DIR_SIZE + RESERVED_TTBR0_SIZE ldr x1, =(IDMAP_DIR_SIZE + SWAPPER_DIR_SIZE + RESERVED_TTBR0_SIZE)
bl __inval_cache_range bl __inval_dcache_area
/* /*
* Clear the idmap and swapper page tables. * Clear the idmap and swapper page tables.
*/ */
adrp x0, idmap_pg_dir adrp x0, idmap_pg_dir
adrp x6, swapper_pg_dir + SWAPPER_DIR_SIZE + RESERVED_TTBR0_SIZE ldr x1, =(IDMAP_DIR_SIZE + SWAPPER_DIR_SIZE + RESERVED_TTBR0_SIZE)
1: stp xzr, xzr, [x0], #16 1: stp xzr, xzr, [x0], #16
stp xzr, xzr, [x0], #16 stp xzr, xzr, [x0], #16
stp xzr, xzr, [x0], #16 stp xzr, xzr, [x0], #16
stp xzr, xzr, [x0], #16 stp xzr, xzr, [x0], #16
cmp x0, x6 subs x1, x1, #64
b.lo 1b b.ne 1b
mov x7, SWAPPER_MM_MMUFLAGS mov x7, SWAPPER_MM_MMUFLAGS
...@@ -307,9 +307,9 @@ __create_page_tables: ...@@ -307,9 +307,9 @@ __create_page_tables:
* tables again to remove any speculatively loaded cache lines. * tables again to remove any speculatively loaded cache lines.
*/ */
adrp x0, idmap_pg_dir adrp x0, idmap_pg_dir
adrp x1, swapper_pg_dir + SWAPPER_DIR_SIZE + RESERVED_TTBR0_SIZE ldr x1, =(IDMAP_DIR_SIZE + SWAPPER_DIR_SIZE + RESERVED_TTBR0_SIZE)
dmb sy dmb sy
bl __inval_cache_range bl __inval_dcache_area
ret x28 ret x28
ENDPROC(__create_page_tables) ENDPROC(__create_page_tables)
......
...@@ -109,20 +109,25 @@ ENTRY(__clean_dcache_area_pou) ...@@ -109,20 +109,25 @@ ENTRY(__clean_dcache_area_pou)
ENDPROC(__clean_dcache_area_pou) ENDPROC(__clean_dcache_area_pou)
/* /*
* __dma_inv_area(start, size) * __inval_dcache_area(kaddr, size)
* - start - virtual start address of region *
* Ensure that any D-cache lines for the interval [kaddr, kaddr+size)
* are invalidated. Any partial lines at the ends of the interval are
* also cleaned to PoC to prevent data loss.
*
* - kaddr - kernel address
* - size - size in question * - size - size in question
*/ */
__dma_inv_area: ENTRY(__inval_dcache_area)
add x1, x1, x0
/* FALLTHROUGH */ /* FALLTHROUGH */
/* /*
* __inval_cache_range(start, end) * __dma_inv_area(start, size)
* - start - start address of region * - start - virtual start address of region
* - end - end address of region * - size - size in question
*/ */
ENTRY(__inval_cache_range) __dma_inv_area:
add x1, x1, x0
dcache_line_size x2, x3 dcache_line_size x2, x3
sub x3, x2, #1 sub x3, x2, #1
tst x1, x3 // end cache line aligned? tst x1, x3 // end cache line aligned?
...@@ -140,7 +145,7 @@ ENTRY(__inval_cache_range) ...@@ -140,7 +145,7 @@ ENTRY(__inval_cache_range)
b.lo 2b b.lo 2b
dsb sy dsb sy
ret ret
ENDPIPROC(__inval_cache_range) ENDPIPROC(__inval_dcache_area)
ENDPROC(__dma_inv_area) ENDPROC(__dma_inv_area)
/* /*
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment