Commit 3036ec59 authored by Pasha Tatashin's avatar Pasha Tatashin Committed by Will Deacon

arm64: kexec: Use dcache ops macros instead of open-coding

kexec does dcache maintenance when it re-writes all memory. Our
dcache_by_line_op macro depends on reading the sanitized DminLine
from memory. Kexec may have overwritten this, so open-codes the
sequence.

dcache_by_line_op is a whole set of macros, it uses dcache_line_size
which uses read_ctr for the sanitsed DminLine. Reading the DminLine
is the first thing the dcache_by_line_op does.

Rename dcache_by_line_op dcache_by_myline_op and take DminLine as
an argument. Kexec can now use the slightly smaller macro.

This makes up-coming changes to the dcache maintenance easier on
the eye.

Code generated by the existing callers is unchanged.
Suggested-by: default avatarJames Morse <james.morse@arm.com>
Signed-off-by: default avatarPasha Tatashin <pasha.tatashin@soleen.com>
Acked-by: default avatarCatalin Marinas <catalin.marinas@arm.com>
Link: https://lore.kernel.org/r/20210930143113.1502553-7-pasha.tatashin@soleen.comSigned-off-by: default avatarWill Deacon <will@kernel.org>
parent 5bb6834f
...@@ -405,19 +405,19 @@ alternative_endif ...@@ -405,19 +405,19 @@ alternative_endif
/* /*
* Macro to perform a data cache maintenance for the interval * Macro to perform a data cache maintenance for the interval
* [start, end) * [start, end) with dcache line size explicitly provided.
* *
* op: operation passed to dc instruction * op: operation passed to dc instruction
* domain: domain used in dsb instruciton * domain: domain used in dsb instruciton
* start: starting virtual address of the region * start: starting virtual address of the region
* end: end virtual address of the region * end: end virtual address of the region
* linesz: dcache line size
* fixup: optional label to branch to on user fault * fixup: optional label to branch to on user fault
* Corrupts: start, end, tmp1, tmp2 * Corrupts: start, end, tmp
*/ */
.macro dcache_by_line_op op, domain, start, end, tmp1, tmp2, fixup .macro dcache_by_myline_op op, domain, start, end, linesz, tmp, fixup
dcache_line_size \tmp1, \tmp2 sub \tmp, \linesz, #1
sub \tmp2, \tmp1, #1 bic \start, \start, \tmp
bic \start, \start, \tmp2
.Ldcache_op\@: .Ldcache_op\@:
.ifc \op, cvau .ifc \op, cvau
__dcache_op_workaround_clean_cache \op, \start __dcache_op_workaround_clean_cache \op, \start
...@@ -436,7 +436,7 @@ alternative_endif ...@@ -436,7 +436,7 @@ alternative_endif
.endif .endif
.endif .endif
.endif .endif
add \start, \start, \tmp1 add \start, \start, \linesz
cmp \start, \end cmp \start, \end
b.lo .Ldcache_op\@ b.lo .Ldcache_op\@
dsb \domain dsb \domain
...@@ -444,6 +444,22 @@ alternative_endif ...@@ -444,6 +444,22 @@ alternative_endif
_cond_extable .Ldcache_op\@, \fixup _cond_extable .Ldcache_op\@, \fixup
.endm .endm
/*
* Macro to perform a data cache maintenance for the interval
* [start, end)
*
* op: operation passed to dc instruction
* domain: domain used in dsb instruciton
* start: starting virtual address of the region
* end: end virtual address of the region
* fixup: optional label to branch to on user fault
* Corrupts: start, end, tmp1, tmp2
*/
.macro dcache_by_line_op op, domain, start, end, tmp1, tmp2, fixup
dcache_line_size \tmp1, \tmp2
dcache_by_myline_op \op, \domain, \start, \end, \tmp1, \tmp2, \fixup
.endm
/* /*
* Macro to perform an instruction cache maintenance for the interval * Macro to perform an instruction cache maintenance for the interval
* [start, end) * [start, end)
......
...@@ -41,16 +41,9 @@ SYM_CODE_START(arm64_relocate_new_kernel) ...@@ -41,16 +41,9 @@ SYM_CODE_START(arm64_relocate_new_kernel)
tbz x16, IND_SOURCE_BIT, .Ltest_indirection tbz x16, IND_SOURCE_BIT, .Ltest_indirection
/* Invalidate dest page to PoC. */ /* Invalidate dest page to PoC. */
mov x2, x13 mov x2, x13
add x20, x2, #PAGE_SIZE add x1, x2, #PAGE_SIZE
sub x1, x15, #1 dcache_by_myline_op ivac, sy, x2, x1, x15, x20
bic x2, x2, x1
2: dc ivac, x2
add x2, x2, x15
cmp x2, x20
b.lo 2b
dsb sy
copy_page x13, x12, x1, x2, x3, x4, x5, x6, x7, x8 copy_page x13, x12, x1, x2, x3, x4, x5, x6, x7, x8
b .Lnext b .Lnext
.Ltest_indirection: .Ltest_indirection:
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment