Commit 8e63d388 authored by Will Deacon's avatar Will Deacon Committed by Catalin Marinas

arm64: flush: use local TLB and I-cache invalidation

There are a number of places where a single CPU is running with a
private page-table and we need to perform maintenance on the TLB and
I-cache in order to ensure correctness, but do not require the operation
to be broadcast to other CPUs.

This patch adds local variants of tlb_flush_all and __flush_icache_all
to support these use-cases and updates the callers respectively.
__local_flush_icache_all also implies an isb, since it is intended to be
used synchronously.
Reviewed-by: default avatarCatalin Marinas <catalin.marinas@arm.com>
Acked-by: default avatarDavid Daney <david.daney@cavium.com>
Acked-by: default avatarArd Biesheuvel <ard.biesheuvel@linaro.org>
Signed-off-by: default avatarWill Deacon <will.deacon@arm.com>
Signed-off-by: default avatarCatalin Marinas <catalin.marinas@arm.com>
parent fa7aae8a
......@@ -115,6 +115,13 @@ extern void copy_to_user_page(struct vm_area_struct *, struct page *,
#define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 1
extern void flush_dcache_page(struct page *);
static inline void __local_flush_icache_all(void)
{
asm("ic iallu");
dsb(nsh);
isb();
}
static inline void __flush_icache_all(void)
{
asm("ic ialluis");
......
......@@ -63,6 +63,14 @@
* only require the D-TLB to be invalidated.
* - kaddr - Kernel virtual memory address
*/
static inline void local_flush_tlb_all(void)
{
dsb(nshst);
asm("tlbi vmalle1");
dsb(nsh);
isb();
}
static inline void flush_tlb_all(void)
{
dsb(ishst);
......
......@@ -344,9 +344,9 @@ static void efi_set_pgd(struct mm_struct *mm)
else
cpu_switch_mm(mm->pgd, mm);
flush_tlb_all();
local_flush_tlb_all();
if (icache_is_aivivt())
__flush_icache_all();
__local_flush_icache_all();
}
void efi_virtmap_load(void)
......
......@@ -152,7 +152,7 @@ asmlinkage void secondary_start_kernel(void)
* point to zero page to avoid speculatively fetching new entries.
*/
cpu_set_reserved_ttbr0();
flush_tlb_all();
local_flush_tlb_all();
cpu_set_default_tcr_t0sz();
preempt_disable();
......
......@@ -90,7 +90,7 @@ int cpu_suspend(unsigned long arg, int (*fn)(unsigned long))
else
cpu_switch_mm(mm->pgd, mm);
flush_tlb_all();
local_flush_tlb_all();
/*
* Restore per-cpu offset before any kernel
......
......@@ -48,9 +48,9 @@ static void flush_context(void)
{
/* set the reserved TTBR0 before flushing the TLB */
cpu_set_reserved_ttbr0();
flush_tlb_all();
local_flush_tlb_all();
if (icache_is_aivivt())
__flush_icache_all();
__local_flush_icache_all();
}
static void set_mm_context(struct mm_struct *mm, unsigned int asid)
......
......@@ -456,7 +456,7 @@ void __init paging_init(void)
* point to zero page to avoid speculatively fetching new entries.
*/
cpu_set_reserved_ttbr0();
flush_tlb_all();
local_flush_tlb_all();
cpu_set_default_tcr_t0sz();
}
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment