Commit c812026c authored by Steve Capper's avatar Steve Capper Committed by Will Deacon

arm64: mm: Logic to make offset_ttbr1 conditional

When running with a 52-bit userspace VA and a 48-bit kernel VA we offset
ttbr1_el1 to allow the kernel pagetables with a 52-bit PTRS_PER_PGD to
be used for both userspace and kernel.

Moving on to a 52-bit kernel VA we no longer require this offset to
ttbr1_el1 should we be running on a system with HW support for 52-bit
VAs.

This patch introduces conditional logic to offset_ttbr1 to query
SYS_ID_AA64MMFR2_EL1 whenever 52-bit VAs are selected. If there is HW
support for 52-bit VAs then the ttbr1 offset is skipped.

We choose to read a system register rather than vabits_actual because
offset_ttbr1 can be called in places where the kernel data is not
actually mapped.

Calls to offset_ttbr1 appear to be made from rarely called code paths so
this extra logic is not expected to adversely affect performance.
Signed-off-by: default avatarSteve Capper <steve.capper@arm.com>
Reviewed-by: default avatarCatalin Marinas <catalin.marinas@arm.com>
Signed-off-by: default avatarWill Deacon <will@kernel.org>
parent 5383cc6e
...@@ -538,9 +538,17 @@ USER(\label, ic ivau, \tmp2) // invalidate I line PoU ...@@ -538,9 +538,17 @@ USER(\label, ic ivau, \tmp2) // invalidate I line PoU
* In future this may be nop'ed out when dealing with 52-bit kernel VAs. * In future this may be nop'ed out when dealing with 52-bit kernel VAs.
* ttbr: Value of ttbr to set, modified. * ttbr: Value of ttbr to set, modified.
*/ */
.macro offset_ttbr1, ttbr .macro offset_ttbr1, ttbr, tmp
#ifdef CONFIG_ARM64_USER_VA_BITS_52 #ifdef CONFIG_ARM64_USER_VA_BITS_52
orr \ttbr, \ttbr, #TTBR1_BADDR_4852_OFFSET orr \ttbr, \ttbr, #TTBR1_BADDR_4852_OFFSET
#endif
#ifdef CONFIG_ARM64_VA_BITS_52
mrs_s \tmp, SYS_ID_AA64MMFR2_EL1
and \tmp, \tmp, #(0xf << ID_AA64MMFR2_LVA_SHIFT)
cbnz \tmp, .Lskipoffs_\@
orr \ttbr, \ttbr, #TTBR1_BADDR_4852_OFFSET
.Lskipoffs_\@ :
#endif #endif
.endm .endm
...@@ -550,7 +558,7 @@ USER(\label, ic ivau, \tmp2) // invalidate I line PoU ...@@ -550,7 +558,7 @@ USER(\label, ic ivau, \tmp2) // invalidate I line PoU
* to be nop'ed out when dealing with 52-bit kernel VAs. * to be nop'ed out when dealing with 52-bit kernel VAs.
*/ */
.macro restore_ttbr1, ttbr .macro restore_ttbr1, ttbr
#ifdef CONFIG_ARM64_USER_VA_BITS_52 #if defined(CONFIG_ARM64_USER_VA_BITS_52) || defined(CONFIG_ARM64_VA_BITS_52)
bic \ttbr, \ttbr, #TTBR1_BADDR_4852_OFFSET bic \ttbr, \ttbr, #TTBR1_BADDR_4852_OFFSET
#endif #endif
.endm .endm
......
...@@ -777,7 +777,7 @@ ENTRY(__enable_mmu) ...@@ -777,7 +777,7 @@ ENTRY(__enable_mmu)
phys_to_ttbr x1, x1 phys_to_ttbr x1, x1
phys_to_ttbr x2, x2 phys_to_ttbr x2, x2
msr ttbr0_el1, x2 // load TTBR0 msr ttbr0_el1, x2 // load TTBR0
offset_ttbr1 x1 offset_ttbr1 x1, x3
msr ttbr1_el1, x1 // load TTBR1 msr ttbr1_el1, x1 // load TTBR1
isb isb
msr sctlr_el1, x0 msr sctlr_el1, x0
......
...@@ -22,14 +22,14 @@ ...@@ -22,14 +22,14 @@
* Even switching to our copied tables will cause a changed output address at * Even switching to our copied tables will cause a changed output address at
* each stage of the walk. * each stage of the walk.
*/ */
.macro break_before_make_ttbr_switch zero_page, page_table, tmp .macro break_before_make_ttbr_switch zero_page, page_table, tmp, tmp2
phys_to_ttbr \tmp, \zero_page phys_to_ttbr \tmp, \zero_page
msr ttbr1_el1, \tmp msr ttbr1_el1, \tmp
isb isb
tlbi vmalle1 tlbi vmalle1
dsb nsh dsb nsh
phys_to_ttbr \tmp, \page_table phys_to_ttbr \tmp, \page_table
offset_ttbr1 \tmp offset_ttbr1 \tmp, \tmp2
msr ttbr1_el1, \tmp msr ttbr1_el1, \tmp
isb isb
.endm .endm
...@@ -70,7 +70,7 @@ ENTRY(swsusp_arch_suspend_exit) ...@@ -70,7 +70,7 @@ ENTRY(swsusp_arch_suspend_exit)
* We execute from ttbr0, change ttbr1 to our copied linear map tables * We execute from ttbr0, change ttbr1 to our copied linear map tables
* with a break-before-make via the zero page * with a break-before-make via the zero page
*/ */
break_before_make_ttbr_switch x5, x0, x6 break_before_make_ttbr_switch x5, x0, x6, x8
mov x21, x1 mov x21, x1
mov x30, x2 mov x30, x2
...@@ -101,7 +101,7 @@ ENTRY(swsusp_arch_suspend_exit) ...@@ -101,7 +101,7 @@ ENTRY(swsusp_arch_suspend_exit)
dsb ish /* wait for PoU cleaning to finish */ dsb ish /* wait for PoU cleaning to finish */
/* switch to the restored kernels page tables */ /* switch to the restored kernels page tables */
break_before_make_ttbr_switch x25, x21, x6 break_before_make_ttbr_switch x25, x21, x6, x8
ic ialluis ic ialluis
dsb ish dsb ish
......
...@@ -168,7 +168,7 @@ ENDPROC(cpu_do_switch_mm) ...@@ -168,7 +168,7 @@ ENDPROC(cpu_do_switch_mm)
.macro __idmap_cpu_set_reserved_ttbr1, tmp1, tmp2 .macro __idmap_cpu_set_reserved_ttbr1, tmp1, tmp2
adrp \tmp1, empty_zero_page adrp \tmp1, empty_zero_page
phys_to_ttbr \tmp2, \tmp1 phys_to_ttbr \tmp2, \tmp1
offset_ttbr1 \tmp2 offset_ttbr1 \tmp2, \tmp1
msr ttbr1_el1, \tmp2 msr ttbr1_el1, \tmp2
isb isb
tlbi vmalle1 tlbi vmalle1
...@@ -187,7 +187,7 @@ ENTRY(idmap_cpu_replace_ttbr1) ...@@ -187,7 +187,7 @@ ENTRY(idmap_cpu_replace_ttbr1)
__idmap_cpu_set_reserved_ttbr1 x1, x3 __idmap_cpu_set_reserved_ttbr1 x1, x3
offset_ttbr1 x0 offset_ttbr1 x0, x3
msr ttbr1_el1, x0 msr ttbr1_el1, x0
isb isb
...@@ -362,7 +362,7 @@ __idmap_kpti_secondary: ...@@ -362,7 +362,7 @@ __idmap_kpti_secondary:
cbnz w18, 1b cbnz w18, 1b
/* All done, act like nothing happened */ /* All done, act like nothing happened */
offset_ttbr1 swapper_ttb offset_ttbr1 swapper_ttb, x18
msr ttbr1_el1, swapper_ttb msr ttbr1_el1, swapper_ttb
isb isb
ret ret
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment