Commit 9b7fef25 authored by Palmer Dabbelt's avatar Palmer Dabbelt

Merge patch series "riscv: asid: switch to alternative way to fix stale TLB entries"

Sergey Matyukevich <geomatsi@gmail.com> says:

Some time ago two different patches have been posted to fix stale TLB
entries that caused applications crashes.

The patch [0] suggested 'aggregating' mm_cpumask, i.e. current cpu is not
cleared for the switched-out task in switch_mm function. For additional
explanations see the commit message by Guo Ren. The same approach is
used by arc architecture, so another good comment is for switch_mm
in arch/arc/include/asm/mmu_context.h.

The patch [1] attempted to reduce the number of TLB flushes by deferring
(and possibly avoiding) them for CPUs not running the task.

Patch [1] has been merged. However we already have two bug reports from
different vendors. So apparently something is missing in the approach
suggested in [1]. In both cases the patch [0] fixed the issue.

This patch series reverts [1] and replaces it by [0].

[0] https://lore.kernel.org/linux-riscv/20221111075902.798571-1-guoren@kernel.org/
[1] https://lore.kernel.org/linux-riscv/20220829205219.283543-1-geomatsi@gmail.com/

* b4-shazam-merge:
  riscv: asid: Fixup stale TLB entry cause application crash
  Revert "riscv: mm: notify remote harts about mmu cache updates"

Link: https://lore.kernel.org/r/20230226150137.1919750-1-geomatsi@gmail.comSigned-off-by: default avatarPalmer Dabbelt <palmer@rivosinc.com>
parents 2a8db5ec 82dd33fd
...@@ -19,8 +19,6 @@ typedef struct { ...@@ -19,8 +19,6 @@ typedef struct {
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
/* A local icache flush is needed before user execution can resume. */ /* A local icache flush is needed before user execution can resume. */
cpumask_t icache_stale_mask; cpumask_t icache_stale_mask;
/* A local tlb flush is needed before user execution can resume. */
cpumask_t tlb_stale_mask;
#endif #endif
} mm_context_t; } mm_context_t;
......
...@@ -22,24 +22,6 @@ static inline void local_flush_tlb_page(unsigned long addr) ...@@ -22,24 +22,6 @@ static inline void local_flush_tlb_page(unsigned long addr)
{ {
ALT_FLUSH_TLB_PAGE(__asm__ __volatile__ ("sfence.vma %0" : : "r" (addr) : "memory")); ALT_FLUSH_TLB_PAGE(__asm__ __volatile__ ("sfence.vma %0" : : "r" (addr) : "memory"));
} }
static inline void local_flush_tlb_all_asid(unsigned long asid)
{
__asm__ __volatile__ ("sfence.vma x0, %0"
:
: "r" (asid)
: "memory");
}
static inline void local_flush_tlb_page_asid(unsigned long addr,
unsigned long asid)
{
__asm__ __volatile__ ("sfence.vma %0, %1"
:
: "r" (addr), "r" (asid)
: "memory");
}
#else /* CONFIG_MMU */ #else /* CONFIG_MMU */
#define local_flush_tlb_all() do { } while (0) #define local_flush_tlb_all() do { } while (0)
#define local_flush_tlb_page(addr) do { } while (0) #define local_flush_tlb_page(addr) do { } while (0)
......
...@@ -196,16 +196,6 @@ static void set_mm_asid(struct mm_struct *mm, unsigned int cpu) ...@@ -196,16 +196,6 @@ static void set_mm_asid(struct mm_struct *mm, unsigned int cpu)
if (need_flush_tlb) if (need_flush_tlb)
local_flush_tlb_all(); local_flush_tlb_all();
#ifdef CONFIG_SMP
else {
cpumask_t *mask = &mm->context.tlb_stale_mask;
if (cpumask_test_cpu(cpu, mask)) {
cpumask_clear_cpu(cpu, mask);
local_flush_tlb_all_asid(cntx & asid_mask);
}
}
#endif
} }
static void set_mm_noasid(struct mm_struct *mm) static void set_mm_noasid(struct mm_struct *mm)
...@@ -215,12 +205,24 @@ static void set_mm_noasid(struct mm_struct *mm) ...@@ -215,12 +205,24 @@ static void set_mm_noasid(struct mm_struct *mm)
local_flush_tlb_all(); local_flush_tlb_all();
} }
static inline void set_mm(struct mm_struct *mm, unsigned int cpu) static inline void set_mm(struct mm_struct *prev,
struct mm_struct *next, unsigned int cpu)
{ {
if (static_branch_unlikely(&use_asid_allocator)) /*
set_mm_asid(mm, cpu); * The mm_cpumask indicates which harts' TLBs contain the virtual
else * address mapping of the mm. Compared to noasid, using asid
set_mm_noasid(mm); * can't guarantee that stale TLB entries are invalidated because
* the asid mechanism wouldn't flush TLB for every switch_mm for
* performance. So when using asid, keep all CPUs footmarks in
* cpumask() until mm reset.
*/
cpumask_set_cpu(cpu, mm_cpumask(next));
if (static_branch_unlikely(&use_asid_allocator)) {
set_mm_asid(next, cpu);
} else {
cpumask_clear_cpu(cpu, mm_cpumask(prev));
set_mm_noasid(next);
}
} }
static int __init asids_init(void) static int __init asids_init(void)
...@@ -274,7 +276,8 @@ static int __init asids_init(void) ...@@ -274,7 +276,8 @@ static int __init asids_init(void)
} }
early_initcall(asids_init); early_initcall(asids_init);
#else #else
static inline void set_mm(struct mm_struct *mm, unsigned int cpu) static inline void set_mm(struct mm_struct *prev,
struct mm_struct *next, unsigned int cpu)
{ {
/* Nothing to do here when there is no MMU */ /* Nothing to do here when there is no MMU */
} }
...@@ -327,10 +330,7 @@ void switch_mm(struct mm_struct *prev, struct mm_struct *next, ...@@ -327,10 +330,7 @@ void switch_mm(struct mm_struct *prev, struct mm_struct *next,
*/ */
cpu = smp_processor_id(); cpu = smp_processor_id();
cpumask_clear_cpu(cpu, mm_cpumask(prev)); set_mm(prev, next, cpu);
cpumask_set_cpu(cpu, mm_cpumask(next));
set_mm(next, cpu);
flush_icache_deferred(next, cpu); flush_icache_deferred(next, cpu);
} }
...@@ -5,7 +5,23 @@ ...@@ -5,7 +5,23 @@
#include <linux/sched.h> #include <linux/sched.h>
#include <asm/sbi.h> #include <asm/sbi.h>
#include <asm/mmu_context.h> #include <asm/mmu_context.h>
#include <asm/tlbflush.h>
static inline void local_flush_tlb_all_asid(unsigned long asid)
{
__asm__ __volatile__ ("sfence.vma x0, %0"
:
: "r" (asid)
: "memory");
}
static inline void local_flush_tlb_page_asid(unsigned long addr,
unsigned long asid)
{
__asm__ __volatile__ ("sfence.vma %0, %1"
:
: "r" (addr), "r" (asid)
: "memory");
}
void flush_tlb_all(void) void flush_tlb_all(void)
{ {
...@@ -15,7 +31,6 @@ void flush_tlb_all(void) ...@@ -15,7 +31,6 @@ void flush_tlb_all(void)
static void __sbi_tlb_flush_range(struct mm_struct *mm, unsigned long start, static void __sbi_tlb_flush_range(struct mm_struct *mm, unsigned long start,
unsigned long size, unsigned long stride) unsigned long size, unsigned long stride)
{ {
struct cpumask *pmask = &mm->context.tlb_stale_mask;
struct cpumask *cmask = mm_cpumask(mm); struct cpumask *cmask = mm_cpumask(mm);
unsigned int cpuid; unsigned int cpuid;
bool broadcast; bool broadcast;
...@@ -29,15 +44,6 @@ static void __sbi_tlb_flush_range(struct mm_struct *mm, unsigned long start, ...@@ -29,15 +44,6 @@ static void __sbi_tlb_flush_range(struct mm_struct *mm, unsigned long start,
if (static_branch_unlikely(&use_asid_allocator)) { if (static_branch_unlikely(&use_asid_allocator)) {
unsigned long asid = atomic_long_read(&mm->context.id); unsigned long asid = atomic_long_read(&mm->context.id);
/*
* TLB will be immediately flushed on harts concurrently
* executing this MM context. TLB flush on other harts
* is deferred until this MM context migrates there.
*/
cpumask_setall(pmask);
cpumask_clear_cpu(cpuid, pmask);
cpumask_andnot(pmask, pmask, cmask);
if (broadcast) { if (broadcast) {
sbi_remote_sfence_vma_asid(cmask, start, size, asid); sbi_remote_sfence_vma_asid(cmask, start, size, asid);
} else if (size <= stride) { } else if (size <= stride) {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment