Commit 0d9b1ffe authored by Ard Biesheuvel's avatar Ard Biesheuvel Committed by Will Deacon

arm64: mm: make vabits_actual a build time constant if possible

Currently, we only support 52-bit virtual addressing on 64k pages
configurations, and in all other cases, vabits_actual is guaranteed to
equal VA_BITS (== VA_BITS_MIN). So get rid of the variable entirely in
that case.

While at it, move the assignment out of the asm entry code - it has no
need to be there.
Signed-off-by: default avatarArd Biesheuvel <ardb@kernel.org>
Link: https://lore.kernel.org/r/20220624150651.1358849-3-ardb@kernel.orgSigned-off-by: default avatarWill Deacon <will@kernel.org>
parent 475031b6
...@@ -174,7 +174,11 @@ ...@@ -174,7 +174,11 @@
#include <linux/types.h> #include <linux/types.h>
#include <asm/bug.h> #include <asm/bug.h>
#if VA_BITS > 48
extern u64 vabits_actual; extern u64 vabits_actual;
#else
#define vabits_actual ((u64)VA_BITS)
#endif
extern s64 memstart_addr; extern s64 memstart_addr;
/* PHYS_OFFSET - the physical address of the start of memory. */ /* PHYS_OFFSET - the physical address of the start of memory. */
......
...@@ -293,19 +293,6 @@ SYM_FUNC_START_LOCAL(__create_page_tables) ...@@ -293,19 +293,6 @@ SYM_FUNC_START_LOCAL(__create_page_tables)
adrp x0, idmap_pg_dir adrp x0, idmap_pg_dir
adrp x3, __idmap_text_start // __pa(__idmap_text_start) adrp x3, __idmap_text_start // __pa(__idmap_text_start)
#ifdef CONFIG_ARM64_VA_BITS_52
mrs_s x6, SYS_ID_AA64MMFR2_EL1
and x6, x6, #(0xf << ID_AA64MMFR2_LVA_SHIFT)
mov x5, #52
cbnz x6, 1f
#endif
mov x5, #VA_BITS_MIN
1:
adr_l x6, vabits_actual
str x5, [x6]
dmb sy
dc ivac, x6 // Invalidate potentially stale cache line
/* /*
* VA_BITS may be too small to allow for an ID mapping to be created * VA_BITS may be too small to allow for an ID mapping to be created
* that covers system RAM if that is located sufficiently high in the * that covers system RAM if that is located sufficiently high in the
...@@ -713,7 +700,7 @@ SYM_FUNC_START(__enable_mmu) ...@@ -713,7 +700,7 @@ SYM_FUNC_START(__enable_mmu)
SYM_FUNC_END(__enable_mmu) SYM_FUNC_END(__enable_mmu)
SYM_FUNC_START(__cpu_secondary_check52bitva) SYM_FUNC_START(__cpu_secondary_check52bitva)
#ifdef CONFIG_ARM64_VA_BITS_52 #if VA_BITS > 48
ldr_l x0, vabits_actual ldr_l x0, vabits_actual
cmp x0, #52 cmp x0, #52
b.ne 2f b.ne 2f
......
...@@ -265,7 +265,20 @@ early_param("mem", early_mem); ...@@ -265,7 +265,20 @@ early_param("mem", early_mem);
void __init arm64_memblock_init(void) void __init arm64_memblock_init(void)
{ {
s64 linear_region_size = PAGE_END - _PAGE_OFFSET(vabits_actual); s64 linear_region_size;
#if VA_BITS > 48
if (cpuid_feature_extract_unsigned_field(
read_sysreg_s(SYS_ID_AA64MMFR2_EL1),
ID_AA64MMFR2_LVA_SHIFT))
vabits_actual = VA_BITS;
/* make the variable visible to secondaries with the MMU off */
dcache_clean_inval_poc((u64)&vabits_actual,
(u64)&vabits_actual + sizeof(vabits_actual));
#endif
linear_region_size = PAGE_END - _PAGE_OFFSET(vabits_actual);
/* /*
* Corner case: 52-bit VA capable systems running KVM in nVHE mode may * Corner case: 52-bit VA capable systems running KVM in nVHE mode may
......
...@@ -46,8 +46,10 @@ ...@@ -46,8 +46,10 @@
u64 idmap_t0sz = TCR_T0SZ(VA_BITS_MIN); u64 idmap_t0sz = TCR_T0SZ(VA_BITS_MIN);
u64 idmap_ptrs_per_pgd = PTRS_PER_PGD; u64 idmap_ptrs_per_pgd = PTRS_PER_PGD;
u64 __section(".mmuoff.data.write") vabits_actual; #if VA_BITS > 48
u64 vabits_actual __ro_after_init = VA_BITS_MIN;
EXPORT_SYMBOL(vabits_actual); EXPORT_SYMBOL(vabits_actual);
#endif
u64 kimage_vaddr __ro_after_init = (u64)&_text; u64 kimage_vaddr __ro_after_init = (u64)&_text;
EXPORT_SYMBOL(kimage_vaddr); EXPORT_SYMBOL(kimage_vaddr);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment