Commit 529c4b05 authored by Kristina Martsenko's avatar Kristina Martsenko Committed by Catalin Marinas

arm64: handle 52-bit addresses in TTBR

The top 4 bits of a 52-bit physical address are positioned at bits 2..5
in the TTBR registers. Introduce a couple of macros to move the bits
there, and change all TTBR writers to use them.

Leave TTBR0 PAN code unchanged, to avoid complicating it. A system with
52-bit PA will have PAN anyway (because it's ARMv8.1 or later), and a
system without 52-bit PA can only use up to 48-bit PAs. A later patch in
this series will add a kconfig dependency to ensure PAN is configured.

In addition, when using 52-bit PA there is a special alignment
requirement on the top-level table. We don't currently have any VA_BITS
configuration that would violate the requirement, but one could be added
in the future, so add a compile-time BUG_ON to check for it.
Tested-by: default avatarSuzuki K Poulose <suzuki.poulose@arm.com>
Reviewed-by: default avatarSuzuki K Poulose <suzuki.poulose@arm.com>
Reviewed-by: default avatarMarc Zyngier <marc.zyngier@arm.com>
Tested-by: default avatarBob Picco <bob.picco@oracle.com>
Reviewed-by: default avatarBob Picco <bob.picco@oracle.com>
Signed-off-by: default avatarKristina Martsenko <kristina.martsenko@arm.com>
[catalin.marinas@arm.com: added TTBR_BADD_MASK_52 comment]
Signed-off-by: default avatarCatalin Marinas <catalin.marinas@arm.com>
parent 787fd1d0
...@@ -221,6 +221,8 @@ static inline unsigned int kvm_get_vmid_bits(void) ...@@ -221,6 +221,8 @@ static inline unsigned int kvm_get_vmid_bits(void)
return 8; return 8;
} }
#define kvm_phys_to_vttbr(addr) (addr)
#endif /* !__ASSEMBLY__ */ #endif /* !__ASSEMBLY__ */
#endif /* __ARM_KVM_MMU_H__ */ #endif /* __ARM_KVM_MMU_H__ */
...@@ -530,4 +530,20 @@ alternative_else_nop_endif ...@@ -530,4 +530,20 @@ alternative_else_nop_endif
#endif #endif
.endm .endm
/*
* Arrange a physical address in a TTBR register, taking care of 52-bit
* addresses.
*
* phys: physical address, preserved
* ttbr: returns the TTBR value
*/
.macro phys_to_ttbr, phys, ttbr
#ifdef CONFIG_ARM64_PA_BITS_52
orr \ttbr, \phys, \phys, lsr #46
and \ttbr, \ttbr, #TTBR_BADDR_MASK_52
#else
mov \ttbr, \phys
#endif
.endm
#endif /* __ASM_ASSEMBLER_H */ #endif /* __ASM_ASSEMBLER_H */
...@@ -309,5 +309,7 @@ static inline unsigned int kvm_get_vmid_bits(void) ...@@ -309,5 +309,7 @@ static inline unsigned int kvm_get_vmid_bits(void)
return (cpuid_feature_extract_unsigned_field(reg, ID_AA64MMFR1_VMIDBITS_SHIFT) == 2) ? 16 : 8; return (cpuid_feature_extract_unsigned_field(reg, ID_AA64MMFR1_VMIDBITS_SHIFT) == 2) ? 16 : 8;
} }
#define kvm_phys_to_vttbr(addr) phys_to_ttbr(addr)
#endif /* __ASSEMBLY__ */ #endif /* __ASSEMBLY__ */
#endif /* __ARM64_KVM_MMU_H__ */ #endif /* __ARM64_KVM_MMU_H__ */
...@@ -51,7 +51,7 @@ static inline void contextidr_thread_switch(struct task_struct *next) ...@@ -51,7 +51,7 @@ static inline void contextidr_thread_switch(struct task_struct *next)
*/ */
static inline void cpu_set_reserved_ttbr0(void) static inline void cpu_set_reserved_ttbr0(void)
{ {
unsigned long ttbr = __pa_symbol(empty_zero_page); unsigned long ttbr = phys_to_ttbr(__pa_symbol(empty_zero_page));
write_sysreg(ttbr, ttbr0_el1); write_sysreg(ttbr, ttbr0_el1);
isb(); isb();
......
...@@ -16,6 +16,8 @@ ...@@ -16,6 +16,8 @@
#ifndef __ASM_PGTABLE_HWDEF_H #ifndef __ASM_PGTABLE_HWDEF_H
#define __ASM_PGTABLE_HWDEF_H #define __ASM_PGTABLE_HWDEF_H
#include <asm/memory.h>
/* /*
* Number of page-table levels required to address 'va_bits' wide * Number of page-table levels required to address 'va_bits' wide
* address, without section mapping. We resolve the top (va_bits - PAGE_SHIFT) * address, without section mapping. We resolve the top (va_bits - PAGE_SHIFT)
...@@ -279,4 +281,15 @@ ...@@ -279,4 +281,15 @@
#define TCR_HA (UL(1) << 39) #define TCR_HA (UL(1) << 39)
#define TCR_HD (UL(1) << 40) #define TCR_HD (UL(1) << 40)
/*
* TTBR.
*/
#ifdef CONFIG_ARM64_PA_BITS_52
/*
* This should be GENMASK_ULL(47, 2).
* TTBR_ELx[1] is RES0 in this configuration.
*/
#define TTBR_BADDR_MASK_52 (((UL(1) << 46) - 1) << 2)
#endif
#endif #endif
...@@ -733,6 +733,12 @@ static inline void update_mmu_cache(struct vm_area_struct *vma, ...@@ -733,6 +733,12 @@ static inline void update_mmu_cache(struct vm_area_struct *vma,
#define kc_vaddr_to_offset(v) ((v) & ~VA_START) #define kc_vaddr_to_offset(v) ((v) & ~VA_START)
#define kc_offset_to_vaddr(o) ((o) | VA_START) #define kc_offset_to_vaddr(o) ((o) | VA_START)
#ifdef CONFIG_ARM64_PA_BITS_52
#define phys_to_ttbr(addr) (((addr) | ((addr) >> 46)) & TTBR_BADDR_MASK_52)
#else
#define phys_to_ttbr(addr) (addr)
#endif
#endif /* !__ASSEMBLY__ */ #endif /* !__ASSEMBLY__ */
#endif /* __ASM_PGTABLE_H */ #endif /* __ASM_PGTABLE_H */
...@@ -679,8 +679,10 @@ ENTRY(__enable_mmu) ...@@ -679,8 +679,10 @@ ENTRY(__enable_mmu)
update_early_cpu_boot_status 0, x1, x2 update_early_cpu_boot_status 0, x1, x2
adrp x1, idmap_pg_dir adrp x1, idmap_pg_dir
adrp x2, swapper_pg_dir adrp x2, swapper_pg_dir
msr ttbr0_el1, x1 // load TTBR0 phys_to_ttbr x1, x3
msr ttbr1_el1, x2 // load TTBR1 phys_to_ttbr x2, x4
msr ttbr0_el1, x3 // load TTBR0
msr ttbr1_el1, x4 // load TTBR1
isb isb
msr sctlr_el1, x0 msr sctlr_el1, x0
isb isb
......
...@@ -33,12 +33,14 @@ ...@@ -33,12 +33,14 @@
* Even switching to our copied tables will cause a changed output address at * Even switching to our copied tables will cause a changed output address at
* each stage of the walk. * each stage of the walk.
*/ */
.macro break_before_make_ttbr_switch zero_page, page_table .macro break_before_make_ttbr_switch zero_page, page_table, tmp
msr ttbr1_el1, \zero_page phys_to_ttbr \zero_page, \tmp
msr ttbr1_el1, \tmp
isb isb
tlbi vmalle1 tlbi vmalle1
dsb nsh dsb nsh
msr ttbr1_el1, \page_table phys_to_ttbr \page_table, \tmp
msr ttbr1_el1, \tmp
isb isb
.endm .endm
...@@ -78,7 +80,7 @@ ENTRY(swsusp_arch_suspend_exit) ...@@ -78,7 +80,7 @@ ENTRY(swsusp_arch_suspend_exit)
* We execute from ttbr0, change ttbr1 to our copied linear map tables * We execute from ttbr0, change ttbr1 to our copied linear map tables
* with a break-before-make via the zero page * with a break-before-make via the zero page
*/ */
break_before_make_ttbr_switch x5, x0 break_before_make_ttbr_switch x5, x0, x6
mov x21, x1 mov x21, x1
mov x30, x2 mov x30, x2
...@@ -109,7 +111,7 @@ ENTRY(swsusp_arch_suspend_exit) ...@@ -109,7 +111,7 @@ ENTRY(swsusp_arch_suspend_exit)
dsb ish /* wait for PoU cleaning to finish */ dsb ish /* wait for PoU cleaning to finish */
/* switch to the restored kernels page tables */ /* switch to the restored kernels page tables */
break_before_make_ttbr_switch x25, x21 break_before_make_ttbr_switch x25, x21, x6
ic ialluis ic ialluis
dsb ish dsb ish
......
...@@ -264,7 +264,7 @@ static int create_safe_exec_page(void *src_start, size_t length, ...@@ -264,7 +264,7 @@ static int create_safe_exec_page(void *src_start, size_t length,
*/ */
cpu_set_reserved_ttbr0(); cpu_set_reserved_ttbr0();
local_flush_tlb_all(); local_flush_tlb_all();
write_sysreg(virt_to_phys(pgd), ttbr0_el1); write_sysreg(phys_to_ttbr(virt_to_phys(pgd)), ttbr0_el1);
isb(); isb();
*phys_dst_addr = virt_to_phys((void *)dst); *phys_dst_addr = virt_to_phys((void *)dst);
......
...@@ -63,7 +63,8 @@ __do_hyp_init: ...@@ -63,7 +63,8 @@ __do_hyp_init:
cmp x0, #HVC_STUB_HCALL_NR cmp x0, #HVC_STUB_HCALL_NR
b.lo __kvm_handle_stub_hvc b.lo __kvm_handle_stub_hvc
msr ttbr0_el2, x0 phys_to_ttbr x0, x4
msr ttbr0_el2, x4
mrs x4, tcr_el1 mrs x4, tcr_el1
ldr x5, =TCR_EL2_MASK ldr x5, =TCR_EL2_MASK
......
...@@ -49,6 +49,14 @@ void __init pgd_cache_init(void) ...@@ -49,6 +49,14 @@ void __init pgd_cache_init(void)
if (PGD_SIZE == PAGE_SIZE) if (PGD_SIZE == PAGE_SIZE)
return; return;
#ifdef CONFIG_ARM64_PA_BITS_52
/*
* With 52-bit physical addresses, the architecture requires the
* top-level table to be aligned to at least 64 bytes.
*/
BUILD_BUG_ON(PGD_SIZE < 64);
#endif
/* /*
* Naturally aligned pgds required by the architecture. * Naturally aligned pgds required by the architecture.
*/ */
......
...@@ -138,10 +138,11 @@ ENDPROC(cpu_do_resume) ...@@ -138,10 +138,11 @@ ENDPROC(cpu_do_resume)
* - pgd_phys - physical address of new TTB * - pgd_phys - physical address of new TTB
*/ */
ENTRY(cpu_do_switch_mm) ENTRY(cpu_do_switch_mm)
pre_ttbr0_update_workaround x0, x2, x3 phys_to_ttbr x0, x2
pre_ttbr0_update_workaround x2, x3, x4
mmid x1, x1 // get mm->context.id mmid x1, x1 // get mm->context.id
bfi x0, x1, #48, #16 // set the ASID bfi x2, x1, #48, #16 // set the ASID
msr ttbr0_el1, x0 // set TTBR0 msr ttbr0_el1, x2 // set TTBR0
isb isb
post_ttbr0_update_workaround post_ttbr0_update_workaround
ret ret
...@@ -158,14 +159,16 @@ ENTRY(idmap_cpu_replace_ttbr1) ...@@ -158,14 +159,16 @@ ENTRY(idmap_cpu_replace_ttbr1)
save_and_disable_daif flags=x2 save_and_disable_daif flags=x2
adrp x1, empty_zero_page adrp x1, empty_zero_page
msr ttbr1_el1, x1 phys_to_ttbr x1, x3
msr ttbr1_el1, x3
isb isb
tlbi vmalle1 tlbi vmalle1
dsb nsh dsb nsh
isb isb
msr ttbr1_el1, x0 phys_to_ttbr x0, x3
msr ttbr1_el1, x3
isb isb
restore_daif x2 restore_daif x2
......
...@@ -509,7 +509,7 @@ static void update_vttbr(struct kvm *kvm) ...@@ -509,7 +509,7 @@ static void update_vttbr(struct kvm *kvm)
pgd_phys = virt_to_phys(kvm->arch.pgd); pgd_phys = virt_to_phys(kvm->arch.pgd);
BUG_ON(pgd_phys & ~VTTBR_BADDR_MASK); BUG_ON(pgd_phys & ~VTTBR_BADDR_MASK);
vmid = ((u64)(kvm->arch.vmid) << VTTBR_VMID_SHIFT) & VTTBR_VMID_MASK(kvm_vmid_bits); vmid = ((u64)(kvm->arch.vmid) << VTTBR_VMID_SHIFT) & VTTBR_VMID_MASK(kvm_vmid_bits);
kvm->arch.vttbr = pgd_phys | vmid; kvm->arch.vttbr = kvm_phys_to_vttbr(pgd_phys) | vmid;
spin_unlock(&kvm_vmid_lock); spin_unlock(&kvm_vmid_lock);
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment