Commit 9824cf97 authored by Dave Hansen's avatar Dave Hansen Committed by Linus Torvalds

mm: vmstats: tlb flush counters

I was investigating some TLB flush scaling issues and realized that we do
not have any good methods for figuring out how many TLB flushes we are
doing.

It would be nice to be able to do these in generic code, but the
arch-independent calls don't explicitly specify whether we actually need
to do remote flushes or not.  In the end, we really need to know if we
actually _did_ global vs.  local invalidations, so that leaves us with few
options other than to muck with the counters from arch-specific code.
Signed-off-by: default avatarDave Hansen <dave.hansen@linux.intel.com>
Cc: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Ingo Molnar <mingo@elte.hu>
Cc: "H. Peter Anvin" <hpa@zytor.com>
Cc: Thomas Gleixner <tglx@linutronix.de>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent 822518dc
...@@ -103,6 +103,7 @@ static void flush_tlb_func(void *info) ...@@ -103,6 +103,7 @@ static void flush_tlb_func(void *info)
if (f->flush_mm != this_cpu_read(cpu_tlbstate.active_mm)) if (f->flush_mm != this_cpu_read(cpu_tlbstate.active_mm))
return; return;
count_vm_event(NR_TLB_REMOTE_FLUSH_RECEIVED);
if (this_cpu_read(cpu_tlbstate.state) == TLBSTATE_OK) { if (this_cpu_read(cpu_tlbstate.state) == TLBSTATE_OK) {
if (f->flush_end == TLB_FLUSH_ALL) if (f->flush_end == TLB_FLUSH_ALL)
local_flush_tlb(); local_flush_tlb();
...@@ -130,6 +131,7 @@ void native_flush_tlb_others(const struct cpumask *cpumask, ...@@ -130,6 +131,7 @@ void native_flush_tlb_others(const struct cpumask *cpumask,
info.flush_start = start; info.flush_start = start;
info.flush_end = end; info.flush_end = end;
count_vm_event(NR_TLB_REMOTE_FLUSH);
if (is_uv_system()) { if (is_uv_system()) {
unsigned int cpu; unsigned int cpu;
...@@ -149,6 +151,7 @@ void flush_tlb_current_task(void) ...@@ -149,6 +151,7 @@ void flush_tlb_current_task(void)
preempt_disable(); preempt_disable();
count_vm_event(NR_TLB_LOCAL_FLUSH_ALL);
local_flush_tlb(); local_flush_tlb();
if (cpumask_any_but(mm_cpumask(mm), smp_processor_id()) < nr_cpu_ids) if (cpumask_any_but(mm_cpumask(mm), smp_processor_id()) < nr_cpu_ids)
flush_tlb_others(mm_cpumask(mm), mm, 0UL, TLB_FLUSH_ALL); flush_tlb_others(mm_cpumask(mm), mm, 0UL, TLB_FLUSH_ALL);
...@@ -211,16 +214,19 @@ void flush_tlb_mm_range(struct mm_struct *mm, unsigned long start, ...@@ -211,16 +214,19 @@ void flush_tlb_mm_range(struct mm_struct *mm, unsigned long start,
act_entries = mm->total_vm > tlb_entries ? tlb_entries : mm->total_vm; act_entries = mm->total_vm > tlb_entries ? tlb_entries : mm->total_vm;
/* tlb_flushall_shift is on balance point, details in commit log */ /* tlb_flushall_shift is on balance point, details in commit log */
if ((end - start) >> PAGE_SHIFT > act_entries >> tlb_flushall_shift) if ((end - start) >> PAGE_SHIFT > act_entries >> tlb_flushall_shift) {
count_vm_event(NR_TLB_LOCAL_FLUSH_ALL);
local_flush_tlb(); local_flush_tlb();
else { } else {
if (has_large_page(mm, start, end)) { if (has_large_page(mm, start, end)) {
local_flush_tlb(); local_flush_tlb();
goto flush_all; goto flush_all;
} }
/* flush range by one by one 'invlpg' */ /* flush range by one by one 'invlpg' */
for (addr = start; addr < end; addr += PAGE_SIZE) for (addr = start; addr < end; addr += PAGE_SIZE) {
count_vm_event(NR_TLB_LOCAL_FLUSH_ONE);
__flush_tlb_single(addr); __flush_tlb_single(addr);
}
if (cpumask_any_but(mm_cpumask(mm), if (cpumask_any_but(mm_cpumask(mm),
smp_processor_id()) < nr_cpu_ids) smp_processor_id()) < nr_cpu_ids)
...@@ -256,6 +262,7 @@ void flush_tlb_page(struct vm_area_struct *vma, unsigned long start) ...@@ -256,6 +262,7 @@ void flush_tlb_page(struct vm_area_struct *vma, unsigned long start)
static void do_flush_tlb_all(void *info) static void do_flush_tlb_all(void *info)
{ {
count_vm_event(NR_TLB_REMOTE_FLUSH_RECEIVED);
__flush_tlb_all(); __flush_tlb_all();
if (this_cpu_read(cpu_tlbstate.state) == TLBSTATE_LAZY) if (this_cpu_read(cpu_tlbstate.state) == TLBSTATE_LAZY)
leave_mm(smp_processor_id()); leave_mm(smp_processor_id());
...@@ -263,6 +270,7 @@ static void do_flush_tlb_all(void *info) ...@@ -263,6 +270,7 @@ static void do_flush_tlb_all(void *info)
void flush_tlb_all(void) void flush_tlb_all(void)
{ {
count_vm_event(NR_TLB_REMOTE_FLUSH);
on_each_cpu(do_flush_tlb_all, NULL, 1); on_each_cpu(do_flush_tlb_all, NULL, 1);
} }
...@@ -272,8 +280,10 @@ static void do_kernel_range_flush(void *info) ...@@ -272,8 +280,10 @@ static void do_kernel_range_flush(void *info)
unsigned long addr; unsigned long addr;
/* flush range by one by one 'invlpg' */ /* flush range by one by one 'invlpg' */
for (addr = f->flush_start; addr < f->flush_end; addr += PAGE_SIZE) for (addr = f->flush_start; addr < f->flush_end; addr += PAGE_SIZE) {
count_vm_event(NR_TLB_LOCAL_FLUSH_ONE_KERNEL);
__flush_tlb_single(addr); __flush_tlb_single(addr);
}
} }
void flush_tlb_kernel_range(unsigned long start, unsigned long end) void flush_tlb_kernel_range(unsigned long start, unsigned long end)
......
...@@ -70,6 +70,11 @@ enum vm_event_item { PGPGIN, PGPGOUT, PSWPIN, PSWPOUT, ...@@ -70,6 +70,11 @@ enum vm_event_item { PGPGIN, PGPGOUT, PSWPIN, PSWPOUT,
THP_ZERO_PAGE_ALLOC, THP_ZERO_PAGE_ALLOC,
THP_ZERO_PAGE_ALLOC_FAILED, THP_ZERO_PAGE_ALLOC_FAILED,
#endif #endif
NR_TLB_REMOTE_FLUSH, /* cpu tried to flush others' tlbs */
NR_TLB_REMOTE_FLUSH_RECEIVED,/* cpu received ipi for flush */
NR_TLB_LOCAL_FLUSH_ALL,
NR_TLB_LOCAL_FLUSH_ONE,
NR_TLB_LOCAL_FLUSH_ONE_KERNEL,
NR_VM_EVENT_ITEMS NR_VM_EVENT_ITEMS
}; };
......
...@@ -817,6 +817,11 @@ const char * const vmstat_text[] = { ...@@ -817,6 +817,11 @@ const char * const vmstat_text[] = {
"thp_zero_page_alloc", "thp_zero_page_alloc",
"thp_zero_page_alloc_failed", "thp_zero_page_alloc_failed",
#endif #endif
"nr_tlb_remote_flush",
"nr_tlb_remote_flush_received",
"nr_tlb_local_flush_all",
"nr_tlb_local_flush_one",
"nr_tlb_local_flush_one_kernel",
#endif /* CONFIG_VM_EVENTS_COUNTERS */ #endif /* CONFIG_VM_EVENTS_COUNTERS */
}; };
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment