Commit 723d3a8e authored by Ard Biesheuvel's avatar Ard Biesheuvel Committed by Will Deacon

arm64: head: pass ID map root table address to __enable_mmu()

We will be adding an initial ID map that covers the entire kernel image,
so we will pass the actual ID map root table to use to __enable_mmu(),
rather than hard code it.
Signed-off-by: default avatarArd Biesheuvel <ardb@kernel.org>
Link: https://lore.kernel.org/r/20220624150651.1358849-10-ardb@kernel.orgSigned-off-by: default avatarWill Deacon <will@kernel.org>
parent 2e945851
...@@ -595,6 +595,7 @@ SYM_FUNC_START_LOCAL(secondary_startup) ...@@ -595,6 +595,7 @@ SYM_FUNC_START_LOCAL(secondary_startup)
bl __cpu_secondary_check52bitva bl __cpu_secondary_check52bitva
bl __cpu_setup // initialise processor bl __cpu_setup // initialise processor
adrp x1, swapper_pg_dir adrp x1, swapper_pg_dir
adrp x2, idmap_pg_dir
bl __enable_mmu bl __enable_mmu
ldr x8, =__secondary_switched ldr x8, =__secondary_switched
br x8 br x8
...@@ -648,6 +649,7 @@ SYM_FUNC_END(__secondary_too_slow) ...@@ -648,6 +649,7 @@ SYM_FUNC_END(__secondary_too_slow)
* *
* x0 = SCTLR_EL1 value for turning on the MMU. * x0 = SCTLR_EL1 value for turning on the MMU.
* x1 = TTBR1_EL1 value * x1 = TTBR1_EL1 value
* x2 = ID map root table address
* *
* Returns to the caller via x30/lr. This requires the caller to be covered * Returns to the caller via x30/lr. This requires the caller to be covered
* by the .idmap.text section. * by the .idmap.text section.
...@@ -656,14 +658,13 @@ SYM_FUNC_END(__secondary_too_slow) ...@@ -656,14 +658,13 @@ SYM_FUNC_END(__secondary_too_slow)
* If it isn't, park the CPU * If it isn't, park the CPU
*/ */
SYM_FUNC_START(__enable_mmu) SYM_FUNC_START(__enable_mmu)
mrs x2, ID_AA64MMFR0_EL1 mrs x3, ID_AA64MMFR0_EL1
ubfx x2, x2, #ID_AA64MMFR0_TGRAN_SHIFT, 4 ubfx x3, x3, #ID_AA64MMFR0_TGRAN_SHIFT, 4
cmp x2, #ID_AA64MMFR0_TGRAN_SUPPORTED_MIN cmp x3, #ID_AA64MMFR0_TGRAN_SUPPORTED_MIN
b.lt __no_granule_support b.lt __no_granule_support
cmp x2, #ID_AA64MMFR0_TGRAN_SUPPORTED_MAX cmp x3, #ID_AA64MMFR0_TGRAN_SUPPORTED_MAX
b.gt __no_granule_support b.gt __no_granule_support
update_early_cpu_boot_status 0, x2, x3 update_early_cpu_boot_status 0, x3, x4
adrp x2, idmap_pg_dir
phys_to_ttbr x1, x1 phys_to_ttbr x1, x1
phys_to_ttbr x2, x2 phys_to_ttbr x2, x2
msr ttbr0_el1, x2 // load TTBR0 msr ttbr0_el1, x2 // load TTBR0
...@@ -819,6 +820,7 @@ SYM_FUNC_START_LOCAL(__primary_switch) ...@@ -819,6 +820,7 @@ SYM_FUNC_START_LOCAL(__primary_switch)
#endif #endif
adrp x1, init_pg_dir adrp x1, init_pg_dir
adrp x2, idmap_pg_dir
bl __enable_mmu bl __enable_mmu
#ifdef CONFIG_RELOCATABLE #ifdef CONFIG_RELOCATABLE
#ifdef CONFIG_RELR #ifdef CONFIG_RELR
......
...@@ -104,6 +104,7 @@ SYM_CODE_START(cpu_resume) ...@@ -104,6 +104,7 @@ SYM_CODE_START(cpu_resume)
bl __cpu_setup bl __cpu_setup
/* enable the MMU early - so we can access sleep_save_stash by va */ /* enable the MMU early - so we can access sleep_save_stash by va */
adrp x1, swapper_pg_dir adrp x1, swapper_pg_dir
adrp x2, idmap_pg_dir
bl __enable_mmu bl __enable_mmu
ldr x8, =_cpu_resume ldr x8, =_cpu_resume
br x8 br x8
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment