Commit 454bbad9 authored by Andy Lutomirski's avatar Andy Lutomirski Committed by Ingo Molnar

x86/mm: Refactor flush_tlb_mm_range() to merge local and remote cases

The local flush path is very similar to the remote flush path.
Merge them.

This is intended to make no difference to behavior whatsoever.  It
removes some code and will make future changes to the flushing
mechanics simpler.

This patch does remove one small optimization: flush_tlb_mm_range()
now has an unconditional smp_mb() instead of using MOV to CR3 or
INVLPG as a full barrier when applicable.  I think this is okay for
a few reasons.  First, smp_mb() is quite cheap compared to the cost
of a TLB flush.  Second, this rearrangement makes a bigger
optimization available: with some work on the SMP function call
code, we could do the local and remote flushes in parallel.  Third,
I'm planning a rework of the TLB flush algorithm that will require
an atomic operation at the beginning of each flush, and that
operation will replace the smp_mb().
Signed-off-by: default avatarAndy Lutomirski <luto@kernel.org>
Cc: Andrew Morton <akpm@linux-foundation.org>
Cc: Arjan van de Ven <arjan@linux.intel.com>
Cc: Borislav Petkov <bpetkov@suse.de>
Cc: Dave Hansen <dave.hansen@intel.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Mel Gorman <mgorman@suse.de>
Cc: Michal Hocko <mhocko@suse.com>
Cc: Nadav Amit <nadav.amit@gmail.com>
Cc: Nadav Amit <namit@vmware.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Rik van Riel <riel@redhat.com>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: linux-mm@kvack.org
Signed-off-by: default avatarIngo Molnar <mingo@kernel.org>
parent 59f537c1
...@@ -225,7 +225,6 @@ static inline void __flush_tlb_one(unsigned long addr) ...@@ -225,7 +225,6 @@ static inline void __flush_tlb_one(unsigned long addr)
* ..but the i386 has somewhat limited tlb flushing capabilities, * ..but the i386 has somewhat limited tlb flushing capabilities,
* and page-granular flushes are available only on i486 and up. * and page-granular flushes are available only on i486 and up.
*/ */
struct flush_tlb_info { struct flush_tlb_info {
struct mm_struct *mm; struct mm_struct *mm;
unsigned long start; unsigned long start;
......
...@@ -216,22 +216,9 @@ void switch_mm_irqs_off(struct mm_struct *prev, struct mm_struct *next, ...@@ -216,22 +216,9 @@ void switch_mm_irqs_off(struct mm_struct *prev, struct mm_struct *next,
* write/read ordering problems. * write/read ordering problems.
*/ */
/* static void flush_tlb_func_common(const struct flush_tlb_info *f,
* TLB flush funcation: bool local, enum tlb_flush_reason reason)
* 1) Flush the tlb entries if the cpu uses the mm that's being flushed.
* 2) Leave the mm if we are in the lazy tlb mode.
*/
static void flush_tlb_func(void *info)
{ {
const struct flush_tlb_info *f = info;
inc_irq_stat(irq_tlb_count);
if (f->mm && f->mm != this_cpu_read(cpu_tlbstate.active_mm))
return;
count_vm_tlb_event(NR_TLB_REMOTE_FLUSH_RECEIVED);
if (this_cpu_read(cpu_tlbstate.state) != TLBSTATE_OK) { if (this_cpu_read(cpu_tlbstate.state) != TLBSTATE_OK) {
leave_mm(smp_processor_id()); leave_mm(smp_processor_id());
return; return;
...@@ -239,7 +226,9 @@ static void flush_tlb_func(void *info) ...@@ -239,7 +226,9 @@ static void flush_tlb_func(void *info)
if (f->end == TLB_FLUSH_ALL) { if (f->end == TLB_FLUSH_ALL) {
local_flush_tlb(); local_flush_tlb();
trace_tlb_flush(TLB_REMOTE_SHOOTDOWN, TLB_FLUSH_ALL); if (local)
count_vm_tlb_event(NR_TLB_LOCAL_FLUSH_ALL);
trace_tlb_flush(reason, TLB_FLUSH_ALL);
} else { } else {
unsigned long addr; unsigned long addr;
unsigned long nr_pages = unsigned long nr_pages =
...@@ -249,10 +238,32 @@ static void flush_tlb_func(void *info) ...@@ -249,10 +238,32 @@ static void flush_tlb_func(void *info)
__flush_tlb_single(addr); __flush_tlb_single(addr);
addr += PAGE_SIZE; addr += PAGE_SIZE;
} }
trace_tlb_flush(TLB_REMOTE_SHOOTDOWN, nr_pages); if (local)
count_vm_tlb_events(NR_TLB_LOCAL_FLUSH_ONE, nr_pages);
trace_tlb_flush(reason, nr_pages);
} }
} }
static void flush_tlb_func_local(void *info, enum tlb_flush_reason reason)
{
const struct flush_tlb_info *f = info;
flush_tlb_func_common(f, true, reason);
}
static void flush_tlb_func_remote(void *info)
{
const struct flush_tlb_info *f = info;
inc_irq_stat(irq_tlb_count);
if (f->mm && f->mm != this_cpu_read(cpu_tlbstate.active_mm))
return;
count_vm_tlb_event(NR_TLB_REMOTE_FLUSH_RECEIVED);
flush_tlb_func_common(f, false, TLB_REMOTE_SHOOTDOWN);
}
void native_flush_tlb_others(const struct cpumask *cpumask, void native_flush_tlb_others(const struct cpumask *cpumask,
const struct flush_tlb_info *info) const struct flush_tlb_info *info)
{ {
...@@ -269,11 +280,11 @@ void native_flush_tlb_others(const struct cpumask *cpumask, ...@@ -269,11 +280,11 @@ void native_flush_tlb_others(const struct cpumask *cpumask,
cpu = smp_processor_id(); cpu = smp_processor_id();
cpumask = uv_flush_tlb_others(cpumask, info); cpumask = uv_flush_tlb_others(cpumask, info);
if (cpumask) if (cpumask)
smp_call_function_many(cpumask, flush_tlb_func, smp_call_function_many(cpumask, flush_tlb_func_remote,
(void *)info, 1); (void *)info, 1);
return; return;
} }
smp_call_function_many(cpumask, flush_tlb_func, smp_call_function_many(cpumask, flush_tlb_func_remote,
(void *)info, 1); (void *)info, 1);
} }
...@@ -292,61 +303,33 @@ static unsigned long tlb_single_page_flush_ceiling __read_mostly = 33; ...@@ -292,61 +303,33 @@ static unsigned long tlb_single_page_flush_ceiling __read_mostly = 33;
void flush_tlb_mm_range(struct mm_struct *mm, unsigned long start, void flush_tlb_mm_range(struct mm_struct *mm, unsigned long start,
unsigned long end, unsigned long vmflag) unsigned long end, unsigned long vmflag)
{ {
unsigned long addr; int cpu;
struct flush_tlb_info info;
/* do a global flush by default */
unsigned long base_pages_to_flush = TLB_FLUSH_ALL;
preempt_disable();
if ((end != TLB_FLUSH_ALL) && !(vmflag & VM_HUGETLB)) struct flush_tlb_info info = {
base_pages_to_flush = (end - start) >> PAGE_SHIFT; .mm = mm,
if (base_pages_to_flush > tlb_single_page_flush_ceiling) };
base_pages_to_flush = TLB_FLUSH_ALL;
if (current->active_mm != mm) {
/* Synchronize with switch_mm. */
smp_mb();
goto out;
}
if (this_cpu_read(cpu_tlbstate.state) != TLBSTATE_OK) {
leave_mm(smp_processor_id());
/* Synchronize with switch_mm. */ cpu = get_cpu();
smp_mb();
goto out; /* Synchronize with switch_mm. */
} smp_mb();
/* /* Should we flush just the requested range? */
* Both branches below are implicit full barriers (MOV to CR or if ((end != TLB_FLUSH_ALL) &&
* INVLPG) that synchronize with switch_mm. !(vmflag & VM_HUGETLB) &&
*/ ((end - start) >> PAGE_SHIFT) <= tlb_single_page_flush_ceiling) {
if (base_pages_to_flush == TLB_FLUSH_ALL) { info.start = start;
count_vm_tlb_event(NR_TLB_LOCAL_FLUSH_ALL); info.end = end;
local_flush_tlb();
} else { } else {
/* flush range by one by one 'invlpg' */
for (addr = start; addr < end; addr += PAGE_SIZE) {
count_vm_tlb_event(NR_TLB_LOCAL_FLUSH_ONE);
__flush_tlb_single(addr);
}
}
trace_tlb_flush(TLB_LOCAL_MM_SHOOTDOWN, base_pages_to_flush);
out:
info.mm = mm;
if (base_pages_to_flush == TLB_FLUSH_ALL) {
info.start = 0UL; info.start = 0UL;
info.end = TLB_FLUSH_ALL; info.end = TLB_FLUSH_ALL;
} else {
info.start = start;
info.end = end;
} }
if (cpumask_any_but(mm_cpumask(mm), smp_processor_id()) < nr_cpu_ids)
if (mm == current->active_mm)
flush_tlb_func_local(&info, TLB_LOCAL_MM_SHOOTDOWN);
if (cpumask_any_but(mm_cpumask(mm), cpu) < nr_cpu_ids)
flush_tlb_others(mm_cpumask(mm), &info); flush_tlb_others(mm_cpumask(mm), &info);
preempt_enable(); put_cpu();
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment