Commit 2ced0f30 authored by Ard Biesheuvel's avatar Ard Biesheuvel Committed by Catalin Marinas

arm64: head: Switch endianness before populating the ID map

Ensure that the endianness used for populating the ID map matches the
endianness that the running kernel will be using, as this is no longer
guaranteed now that create_idmap() is invoked before init_kernel_el().

Note that doing so is only safe if the MMU is off, as switching the
endianness with the MMU on results in the active ID map to become
invalid. So also clear the M bit when toggling the EE bit in SCTLR, and
mark the MMU as disabled at boot.

Note that the same issue has resulted in preserve_boot_args() recording
the contents of registers X0 ... X3 in the wrong byte order, although
this is arguably a very minor concern.

Fixes: 32b135a7 ("arm64: head: avoid cache invalidation when entering with the MMU on")
Reported-by: default avatarNathan Chancellor <nathan@kernel.org>
Signed-off-by: default avatarArd Biesheuvel <ardb@kernel.org>
Tested-by: default avatarNathan Chancellor <nathan@kernel.org>
Link: https://lore.kernel.org/r/20230125185910.962733-1-ardb@kernel.orgSigned-off-by: default avatarCatalin Marinas <catalin.marinas@arm.com>
parent 61786170
......@@ -575,6 +575,7 @@
#define SCTLR_ELx_DSSBS (BIT(44))
#define SCTLR_ELx_ATA (BIT(43))
#define SCTLR_ELx_EE_SHIFT 25
#define SCTLR_ELx_ENIA_SHIFT 31
#define SCTLR_ELx_ITFSB (BIT(37))
......@@ -583,7 +584,7 @@
#define SCTLR_ELx_LSMAOE (BIT(29))
#define SCTLR_ELx_nTLSMD (BIT(28))
#define SCTLR_ELx_ENDA (BIT(27))
#define SCTLR_ELx_EE (BIT(25))
#define SCTLR_ELx_EE (BIT(SCTLR_ELx_EE_SHIFT))
#define SCTLR_ELx_EIS (BIT(22))
#define SCTLR_ELx_IESB (BIT(21))
#define SCTLR_ELx_TSCXT (BIT(20))
......
......@@ -129,10 +129,31 @@ SYM_CODE_START_LOCAL(record_mmu_state)
mrs x19, sctlr_el1
b.ne 0f
mrs x19, sctlr_el2
0: tst x19, #SCTLR_ELx_C // Z := (C == 0)
0:
CPU_LE( tbnz x19, #SCTLR_ELx_EE_SHIFT, 1f )
CPU_BE( tbz x19, #SCTLR_ELx_EE_SHIFT, 1f )
tst x19, #SCTLR_ELx_C // Z := (C == 0)
and x19, x19, #SCTLR_ELx_M // isolate M bit
csel x19, xzr, x19, eq // clear x19 if Z
ret
/*
* Set the correct endianness early so all memory accesses issued
* before init_kernel_el() occur in the correct byte order. Note that
* this means the MMU must be disabled, or the active ID map will end
* up getting interpreted with the wrong byte order.
*/
1: eor x19, x19, #SCTLR_ELx_EE
bic x19, x19, #SCTLR_ELx_M
b.ne 2f
pre_disable_mmu_workaround
msr sctlr_el2, x19
b 3f
pre_disable_mmu_workaround
2: msr sctlr_el1, x19
3: isb
mov x19, xzr
ret
SYM_CODE_END(record_mmu_state)
/*
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment