Commit 0cef77c7 authored by Nicholas Piggin's avatar Nicholas Piggin Committed by Michael Ellerman

powerpc/64s/radix: flush remote CPUs out of single-threaded mm_cpumask

When a single-threaded process has a non-local mm_cpumask, try to use
that point to flush the TLBs out of other CPUs in the cpumask.

An IPI is used for clearing remote CPUs for a few reasons:
- An IPI can end lazy TLB use of the mm, which is required to prevent
  TLB entries being created on the remote CPU. The alternative is to
  drop lazy TLB switching completely, which costs 7.5% in a context
  switch ping-pong test betwee a process and kernel idle thread.
- An IPI can have remote CPUs flush the entire PID, but the local CPU
  can flush a specific VA. tlbie would require over-flushing of the
  local CPU (where the process is running).
- A single threaded process that is migrated to a different CPU is
  likely to have a relatively small mm_cpumask, so IPI is reasonable.

No other thread can concurrently switch to this mm, because it must
have been given a reference to mm_users by the current thread before it
can use_mm. mm_users can be asynchronously incremented (by
mm_activate or mmget_not_zero), but those users must use remote mm
access and can't use_mm or access user address space. Existing code
makes the this assumption already, for example sparc64 has reset
mm_cpumask using this condition since the start of history, see
arch/sparc/kernel/smp_64.c.

This reduces tlbies for a kernel compile workload from 0.90M to 0.12M,
tlbiels are increased significantly due to the PID flushing for the
cleaning up remote CPUs, and increased local flushes (PID flushes take
128 tlbiels vs 1 tlbie).
Signed-off-by: default avatarNicholas Piggin <npiggin@gmail.com>
Signed-off-by: default avatarMichael Ellerman <mpe@ellerman.id.au>
parent 85bcfaf6
...@@ -76,6 +76,19 @@ static inline int mm_is_thread_local(struct mm_struct *mm) ...@@ -76,6 +76,19 @@ static inline int mm_is_thread_local(struct mm_struct *mm)
return false; return false;
return cpumask_test_cpu(smp_processor_id(), mm_cpumask(mm)); return cpumask_test_cpu(smp_processor_id(), mm_cpumask(mm));
} }
static inline void mm_reset_thread_local(struct mm_struct *mm)
{
WARN_ON(atomic_read(&mm->context.copros) > 0);
/*
* It's possible for mm_access to take a reference on mm_users to
* access the remote mm from another thread, but it's not allowed
* to set mm_cpumask, so mm_users may be > 1 here.
*/
WARN_ON(current->mm != mm);
atomic_set(&mm->context.active_cpus, 1);
cpumask_clear(mm_cpumask(mm));
cpumask_set_cpu(smp_processor_id(), mm_cpumask(mm));
}
#else /* CONFIG_PPC_BOOK3S_64 */ #else /* CONFIG_PPC_BOOK3S_64 */
static inline int mm_is_thread_local(struct mm_struct *mm) static inline int mm_is_thread_local(struct mm_struct *mm)
{ {
......
...@@ -12,6 +12,8 @@ ...@@ -12,6 +12,8 @@
#include <linux/mm.h> #include <linux/mm.h>
#include <linux/hugetlb.h> #include <linux/hugetlb.h>
#include <linux/memblock.h> #include <linux/memblock.h>
#include <linux/mmu_context.h>
#include <linux/sched/mm.h>
#include <asm/ppc-opcode.h> #include <asm/ppc-opcode.h>
#include <asm/tlb.h> #include <asm/tlb.h>
...@@ -504,6 +506,15 @@ void radix__local_flush_tlb_page(struct vm_area_struct *vma, unsigned long vmadd ...@@ -504,6 +506,15 @@ void radix__local_flush_tlb_page(struct vm_area_struct *vma, unsigned long vmadd
} }
EXPORT_SYMBOL(radix__local_flush_tlb_page); EXPORT_SYMBOL(radix__local_flush_tlb_page);
static bool mm_is_singlethreaded(struct mm_struct *mm)
{
if (atomic_read(&mm->context.copros) > 0)
return false;
if (atomic_read(&mm->mm_users) <= 1 && current->mm == mm)
return true;
return false;
}
static bool mm_needs_flush_escalation(struct mm_struct *mm) static bool mm_needs_flush_escalation(struct mm_struct *mm)
{ {
/* /*
...@@ -511,10 +522,47 @@ static bool mm_needs_flush_escalation(struct mm_struct *mm) ...@@ -511,10 +522,47 @@ static bool mm_needs_flush_escalation(struct mm_struct *mm)
* caching PTEs and not flushing them properly when * caching PTEs and not flushing them properly when
* RIC = 0 for a PID/LPID invalidate * RIC = 0 for a PID/LPID invalidate
*/ */
return atomic_read(&mm->context.copros) != 0; if (atomic_read(&mm->context.copros) > 0)
return true;
return false;
} }
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
static void do_exit_flush_lazy_tlb(void *arg)
{
struct mm_struct *mm = arg;
unsigned long pid = mm->context.id;
if (current->mm == mm)
return; /* Local CPU */
if (current->active_mm == mm) {
/*
* Must be a kernel thread because sender is single-threaded.
*/
BUG_ON(current->mm);
mmgrab(&init_mm);
switch_mm(mm, &init_mm, current);
current->active_mm = &init_mm;
mmdrop(mm);
}
_tlbiel_pid(pid, RIC_FLUSH_ALL);
}
static void exit_flush_lazy_tlbs(struct mm_struct *mm)
{
/*
* Would be nice if this was async so it could be run in
* parallel with our local flush, but generic code does not
* give a good API for it. Could extend the generic code or
* make a special powerpc IPI for flushing TLBs.
* For now it's not too performance critical.
*/
smp_call_function_many(mm_cpumask(mm), do_exit_flush_lazy_tlb,
(void *)mm, 1);
mm_reset_thread_local(mm);
}
void radix__flush_tlb_mm(struct mm_struct *mm) void radix__flush_tlb_mm(struct mm_struct *mm)
{ {
unsigned long pid; unsigned long pid;
...@@ -530,17 +578,24 @@ void radix__flush_tlb_mm(struct mm_struct *mm) ...@@ -530,17 +578,24 @@ void radix__flush_tlb_mm(struct mm_struct *mm)
*/ */
smp_mb(); smp_mb();
if (!mm_is_thread_local(mm)) { if (!mm_is_thread_local(mm)) {
if (unlikely(mm_is_singlethreaded(mm))) {
exit_flush_lazy_tlbs(mm);
goto local;
}
if (mm_needs_flush_escalation(mm)) if (mm_needs_flush_escalation(mm))
_tlbie_pid(pid, RIC_FLUSH_ALL); _tlbie_pid(pid, RIC_FLUSH_ALL);
else else
_tlbie_pid(pid, RIC_FLUSH_TLB); _tlbie_pid(pid, RIC_FLUSH_TLB);
} else } else {
local:
_tlbiel_pid(pid, RIC_FLUSH_TLB); _tlbiel_pid(pid, RIC_FLUSH_TLB);
}
preempt_enable(); preempt_enable();
} }
EXPORT_SYMBOL(radix__flush_tlb_mm); EXPORT_SYMBOL(radix__flush_tlb_mm);
void radix__flush_all_mm(struct mm_struct *mm) static void __flush_all_mm(struct mm_struct *mm, bool fullmm)
{ {
unsigned long pid; unsigned long pid;
...@@ -550,12 +605,24 @@ void radix__flush_all_mm(struct mm_struct *mm) ...@@ -550,12 +605,24 @@ void radix__flush_all_mm(struct mm_struct *mm)
preempt_disable(); preempt_disable();
smp_mb(); /* see radix__flush_tlb_mm */ smp_mb(); /* see radix__flush_tlb_mm */
if (!mm_is_thread_local(mm)) if (!mm_is_thread_local(mm)) {
if (unlikely(mm_is_singlethreaded(mm))) {
if (!fullmm) {
exit_flush_lazy_tlbs(mm);
goto local;
}
}
_tlbie_pid(pid, RIC_FLUSH_ALL); _tlbie_pid(pid, RIC_FLUSH_ALL);
else } else {
local:
_tlbiel_pid(pid, RIC_FLUSH_ALL); _tlbiel_pid(pid, RIC_FLUSH_ALL);
}
preempt_enable(); preempt_enable();
} }
void radix__flush_all_mm(struct mm_struct *mm)
{
__flush_all_mm(mm, false);
}
EXPORT_SYMBOL(radix__flush_all_mm); EXPORT_SYMBOL(radix__flush_all_mm);
void radix__flush_tlb_pwc(struct mmu_gather *tlb, unsigned long addr) void radix__flush_tlb_pwc(struct mmu_gather *tlb, unsigned long addr)
...@@ -575,10 +642,16 @@ void radix__flush_tlb_page_psize(struct mm_struct *mm, unsigned long vmaddr, ...@@ -575,10 +642,16 @@ void radix__flush_tlb_page_psize(struct mm_struct *mm, unsigned long vmaddr,
preempt_disable(); preempt_disable();
smp_mb(); /* see radix__flush_tlb_mm */ smp_mb(); /* see radix__flush_tlb_mm */
if (!mm_is_thread_local(mm)) if (!mm_is_thread_local(mm)) {
if (unlikely(mm_is_singlethreaded(mm))) {
exit_flush_lazy_tlbs(mm);
goto local;
}
_tlbie_va(vmaddr, pid, psize, RIC_FLUSH_TLB); _tlbie_va(vmaddr, pid, psize, RIC_FLUSH_TLB);
else } else {
local:
_tlbiel_va(vmaddr, pid, psize, RIC_FLUSH_TLB); _tlbiel_va(vmaddr, pid, psize, RIC_FLUSH_TLB);
}
preempt_enable(); preempt_enable();
} }
...@@ -638,14 +711,21 @@ void radix__flush_tlb_range(struct vm_area_struct *vma, unsigned long start, ...@@ -638,14 +711,21 @@ void radix__flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
preempt_disable(); preempt_disable();
smp_mb(); /* see radix__flush_tlb_mm */ smp_mb(); /* see radix__flush_tlb_mm */
if (mm_is_thread_local(mm)) { if (!mm_is_thread_local(mm)) {
local = true; if (unlikely(mm_is_singlethreaded(mm))) {
full = (end == TLB_FLUSH_ALL || if (end != TLB_FLUSH_ALL) {
nr_pages > tlb_local_single_page_flush_ceiling); exit_flush_lazy_tlbs(mm);
} else { goto is_local;
}
}
local = false; local = false;
full = (end == TLB_FLUSH_ALL || full = (end == TLB_FLUSH_ALL ||
nr_pages > tlb_single_page_flush_ceiling); nr_pages > tlb_single_page_flush_ceiling);
} else {
is_local:
local = true;
full = (end == TLB_FLUSH_ALL ||
nr_pages > tlb_local_single_page_flush_ceiling);
} }
if (full) { if (full) {
...@@ -766,7 +846,7 @@ void radix__tlb_flush(struct mmu_gather *tlb) ...@@ -766,7 +846,7 @@ void radix__tlb_flush(struct mmu_gather *tlb)
* See the comment for radix in arch_exit_mmap(). * See the comment for radix in arch_exit_mmap().
*/ */
if (tlb->fullmm) { if (tlb->fullmm) {
radix__flush_all_mm(mm); __flush_all_mm(mm, true);
} else if ( (psize = radix_get_mmu_psize(page_size)) == -1) { } else if ( (psize = radix_get_mmu_psize(page_size)) == -1) {
if (!tlb->need_flush_all) if (!tlb->need_flush_all)
radix__flush_tlb_mm(mm); radix__flush_tlb_mm(mm);
...@@ -800,24 +880,32 @@ static inline void __radix__flush_tlb_range_psize(struct mm_struct *mm, ...@@ -800,24 +880,32 @@ static inline void __radix__flush_tlb_range_psize(struct mm_struct *mm,
preempt_disable(); preempt_disable();
smp_mb(); /* see radix__flush_tlb_mm */ smp_mb(); /* see radix__flush_tlb_mm */
if (mm_is_thread_local(mm)) { if (!mm_is_thread_local(mm)) {
local = true; if (unlikely(mm_is_singlethreaded(mm))) {
full = (end == TLB_FLUSH_ALL || if (end != TLB_FLUSH_ALL) {
nr_pages > tlb_local_single_page_flush_ceiling); exit_flush_lazy_tlbs(mm);
} else { goto is_local;
}
}
local = false; local = false;
full = (end == TLB_FLUSH_ALL || full = (end == TLB_FLUSH_ALL ||
nr_pages > tlb_single_page_flush_ceiling); nr_pages > tlb_single_page_flush_ceiling);
} else {
is_local:
local = true;
full = (end == TLB_FLUSH_ALL ||
nr_pages > tlb_local_single_page_flush_ceiling);
} }
if (full) { if (full) {
if (!local && mm_needs_flush_escalation(mm)) if (local) {
also_pwc = true;
if (local)
_tlbiel_pid(pid, also_pwc ? RIC_FLUSH_ALL : RIC_FLUSH_TLB); _tlbiel_pid(pid, also_pwc ? RIC_FLUSH_ALL : RIC_FLUSH_TLB);
else } else {
_tlbie_pid(pid, also_pwc ? RIC_FLUSH_ALL: RIC_FLUSH_TLB); if (mm_needs_flush_escalation(mm))
also_pwc = true;
_tlbie_pid(pid, also_pwc ? RIC_FLUSH_ALL : RIC_FLUSH_TLB);
}
} else { } else {
if (local) if (local)
_tlbiel_va_range(start, end, pid, page_size, psize, also_pwc); _tlbiel_va_range(start, end, pid, page_size, psize, also_pwc);
...@@ -859,10 +947,16 @@ void radix__flush_tlb_collapsed_pmd(struct mm_struct *mm, unsigned long addr) ...@@ -859,10 +947,16 @@ void radix__flush_tlb_collapsed_pmd(struct mm_struct *mm, unsigned long addr)
/* Otherwise first do the PWC, then iterate the pages. */ /* Otherwise first do the PWC, then iterate the pages. */
preempt_disable(); preempt_disable();
smp_mb(); /* see radix__flush_tlb_mm */ smp_mb(); /* see radix__flush_tlb_mm */
if (mm_is_thread_local(mm)) { if (!mm_is_thread_local(mm)) {
_tlbiel_va_range(addr, end, pid, PAGE_SIZE, mmu_virtual_psize, true); if (unlikely(mm_is_singlethreaded(mm))) {
} else { exit_flush_lazy_tlbs(mm);
goto local;
}
_tlbie_va_range(addr, end, pid, PAGE_SIZE, mmu_virtual_psize, true); _tlbie_va_range(addr, end, pid, PAGE_SIZE, mmu_virtual_psize, true);
goto local;
} else {
local:
_tlbiel_va_range(addr, end, pid, PAGE_SIZE, mmu_virtual_psize, true);
} }
preempt_enable(); preempt_enable();
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment