Commit 9e8e865b authored by Mark Rutland's avatar Mark Rutland Committed by Catalin Marinas

arm64: unify idmap removal

We currently open-code the removal of the idmap and restoration of the
current task's MMU state in a few places.

Before introducing yet more copies of this sequence, unify these to call
a new helper, cpu_uninstall_idmap.
Signed-off-by: default avatarMark Rutland <mark.rutland@arm.com>
Reviewed-by: default avatarCatalin Marinas <catalin.marinas@arm.com>
Tested-by: default avatarArd Biesheuvel <ard.biesheuvel@linaro.org>
Reviewed-by: default avatarArd Biesheuvel <ard.biesheuvel@linaro.org>
Tested-by: default avatarJeremy Linton <jeremy.linton@arm.com>
Cc: Laura Abbott <labbott@fedoraproject.org>
Cc: Lorenzo Pieralisi <lorenzo.pieralisi@arm.com>
Cc: Will Deacon <will.deacon@arm.com>
Signed-off-by: default avatarCatalin Marinas <catalin.marinas@arm.com>
parent 5227cfa7
...@@ -27,6 +27,7 @@ ...@@ -27,6 +27,7 @@
#include <asm-generic/mm_hooks.h> #include <asm-generic/mm_hooks.h>
#include <asm/cputype.h> #include <asm/cputype.h>
#include <asm/pgtable.h> #include <asm/pgtable.h>
#include <asm/tlbflush.h>
#ifdef CONFIG_PID_IN_CONTEXTIDR #ifdef CONFIG_PID_IN_CONTEXTIDR
static inline void contextidr_thread_switch(struct task_struct *next) static inline void contextidr_thread_switch(struct task_struct *next)
...@@ -89,6 +90,30 @@ static inline void cpu_set_default_tcr_t0sz(void) ...@@ -89,6 +90,30 @@ static inline void cpu_set_default_tcr_t0sz(void)
: "r"(TCR_T0SZ(VA_BITS)), "I"(TCR_T0SZ_OFFSET), "I"(TCR_TxSZ_WIDTH)); : "r"(TCR_T0SZ(VA_BITS)), "I"(TCR_T0SZ_OFFSET), "I"(TCR_TxSZ_WIDTH));
} }
/*
* Remove the idmap from TTBR0_EL1 and install the pgd of the active mm.
*
* The idmap lives in the same VA range as userspace, but uses global entries
* and may use a different TCR_EL1.T0SZ. To avoid issues resulting from
* speculative TLB fetches, we must temporarily install the reserved page
* tables while we invalidate the TLBs and set up the correct TCR_EL1.T0SZ.
*
* If current is a not a user task, the mm covers the TTBR1_EL1 page tables,
* which should not be installed in TTBR0_EL1. In this case we can leave the
* reserved page tables in place.
*/
static inline void cpu_uninstall_idmap(void)
{
struct mm_struct *mm = current->active_mm;
cpu_set_reserved_ttbr0();
local_flush_tlb_all();
cpu_set_default_tcr_t0sz();
if (mm != &init_mm)
cpu_switch_mm(mm->pgd, mm);
}
/* /*
* It would be nice to return ASIDs back to the allocator, but unfortunately * It would be nice to return ASIDs back to the allocator, but unfortunately
* that introduces a race with a generation rollover where we could erroneously * that introduces a race with a generation rollover where we could erroneously
......
...@@ -62,6 +62,7 @@ ...@@ -62,6 +62,7 @@
#include <asm/memblock.h> #include <asm/memblock.h>
#include <asm/efi.h> #include <asm/efi.h>
#include <asm/xen/hypervisor.h> #include <asm/xen/hypervisor.h>
#include <asm/mmu_context.h>
phys_addr_t __fdt_pointer __initdata; phys_addr_t __fdt_pointer __initdata;
......
...@@ -149,9 +149,7 @@ asmlinkage void secondary_start_kernel(void) ...@@ -149,9 +149,7 @@ asmlinkage void secondary_start_kernel(void)
* TTBR0 is only used for the identity mapping at this stage. Make it * TTBR0 is only used for the identity mapping at this stage. Make it
* point to zero page to avoid speculatively fetching new entries. * point to zero page to avoid speculatively fetching new entries.
*/ */
cpu_set_reserved_ttbr0(); cpu_uninstall_idmap();
local_flush_tlb_all();
cpu_set_default_tcr_t0sz();
preempt_disable(); preempt_disable();
trace_hardirqs_off(); trace_hardirqs_off();
......
...@@ -60,7 +60,6 @@ void __init cpu_suspend_set_dbg_restorer(void (*hw_bp_restore)(void *)) ...@@ -60,7 +60,6 @@ void __init cpu_suspend_set_dbg_restorer(void (*hw_bp_restore)(void *))
*/ */
int cpu_suspend(unsigned long arg, int (*fn)(unsigned long)) int cpu_suspend(unsigned long arg, int (*fn)(unsigned long))
{ {
struct mm_struct *mm = current->active_mm;
int ret; int ret;
unsigned long flags; unsigned long flags;
...@@ -87,22 +86,11 @@ int cpu_suspend(unsigned long arg, int (*fn)(unsigned long)) ...@@ -87,22 +86,11 @@ int cpu_suspend(unsigned long arg, int (*fn)(unsigned long))
ret = __cpu_suspend_enter(arg, fn); ret = __cpu_suspend_enter(arg, fn);
if (ret == 0) { if (ret == 0) {
/* /*
* We are resuming from reset with TTBR0_EL1 set to the * We are resuming from reset with the idmap active in TTBR0_EL1.
* idmap to enable the MMU; set the TTBR0 to the reserved * We must uninstall the idmap and restore the expected MMU
* page tables to prevent speculative TLB allocations, flush * state before we can possibly return to userspace.
* the local tlb and set the default tcr_el1.t0sz so that
* the TTBR0 address space set-up is properly restored.
* If the current active_mm != &init_mm we entered cpu_suspend
* with mappings in TTBR0 that must be restored, so we switch
* them back to complete the address space configuration
* restoration before returning.
*/ */
cpu_set_reserved_ttbr0(); cpu_uninstall_idmap();
local_flush_tlb_all();
cpu_set_default_tcr_t0sz();
if (mm != &init_mm)
cpu_switch_mm(mm->pgd, mm);
/* /*
* Restore per-cpu offset before any kernel * Restore per-cpu offset before any kernel
......
...@@ -468,9 +468,7 @@ void __init paging_init(void) ...@@ -468,9 +468,7 @@ void __init paging_init(void)
* TTBR0 is only used for the identity mapping at this stage. Make it * TTBR0 is only used for the identity mapping at this stage. Make it
* point to zero page to avoid speculatively fetching new entries. * point to zero page to avoid speculatively fetching new entries.
*/ */
cpu_set_reserved_ttbr0(); cpu_uninstall_idmap();
local_flush_tlb_all();
cpu_set_default_tcr_t0sz();
} }
/* /*
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment