Commit ec659934 authored by Mel Gorman's avatar Mel Gorman Committed by Ingo Molnar

mm, x86: Account for TLB flushes only when debugging

Bisection between 3.11 and 3.12 fingered commit 9824cf97 ("mm:
vmstats: tlb flush counters") to cause overhead problems.

The counters are undeniably useful but how often do we really
need to debug TLB flush related issues?  It does not justify
taking the penalty everywhere so make it a debugging option.
Signed-off-by: default avatarMel Gorman <mgorman@suse.de>
Tested-by: default avatarDavidlohr Bueso <davidlohr@hp.com>
Reviewed-by: default avatarRik van Riel <riel@redhat.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Cc: Hugh Dickins <hughd@google.com>
Cc: Alex Shi <alex.shi@linaro.org>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Link: http://lkml.kernel.org/n/tip-XzxjntugxuwpxXhcrxqqh53b@git.kernel.orgSigned-off-by: default avatarIngo Molnar <mingo@kernel.org>
parent 2993ae33
...@@ -62,7 +62,7 @@ static inline void __flush_tlb_all(void) ...@@ -62,7 +62,7 @@ static inline void __flush_tlb_all(void)
static inline void __flush_tlb_one(unsigned long addr) static inline void __flush_tlb_one(unsigned long addr)
{ {
count_vm_event(NR_TLB_LOCAL_FLUSH_ONE); count_vm_tlb_event(NR_TLB_LOCAL_FLUSH_ONE);
__flush_tlb_single(addr); __flush_tlb_single(addr);
} }
...@@ -93,13 +93,13 @@ static inline void __flush_tlb_one(unsigned long addr) ...@@ -93,13 +93,13 @@ static inline void __flush_tlb_one(unsigned long addr)
*/ */
static inline void __flush_tlb_up(void) static inline void __flush_tlb_up(void)
{ {
count_vm_event(NR_TLB_LOCAL_FLUSH_ALL); count_vm_tlb_event(NR_TLB_LOCAL_FLUSH_ALL);
__flush_tlb(); __flush_tlb();
} }
static inline void flush_tlb_all(void) static inline void flush_tlb_all(void)
{ {
count_vm_event(NR_TLB_LOCAL_FLUSH_ALL); count_vm_tlb_event(NR_TLB_LOCAL_FLUSH_ALL);
__flush_tlb_all(); __flush_tlb_all();
} }
......
...@@ -683,7 +683,7 @@ static void prepare_set(void) __acquires(set_atomicity_lock) ...@@ -683,7 +683,7 @@ static void prepare_set(void) __acquires(set_atomicity_lock)
} }
/* Flush all TLBs via a mov %cr3, %reg; mov %reg, %cr3 */ /* Flush all TLBs via a mov %cr3, %reg; mov %reg, %cr3 */
count_vm_event(NR_TLB_LOCAL_FLUSH_ALL); count_vm_tlb_event(NR_TLB_LOCAL_FLUSH_ALL);
__flush_tlb(); __flush_tlb();
/* Save MTRR state */ /* Save MTRR state */
...@@ -697,7 +697,7 @@ static void prepare_set(void) __acquires(set_atomicity_lock) ...@@ -697,7 +697,7 @@ static void prepare_set(void) __acquires(set_atomicity_lock)
static void post_set(void) __releases(set_atomicity_lock) static void post_set(void) __releases(set_atomicity_lock)
{ {
/* Flush TLBs (no need to flush caches - they are disabled) */ /* Flush TLBs (no need to flush caches - they are disabled) */
count_vm_event(NR_TLB_LOCAL_FLUSH_ALL); count_vm_tlb_event(NR_TLB_LOCAL_FLUSH_ALL);
__flush_tlb(); __flush_tlb();
/* Intel (P6) standard MTRRs */ /* Intel (P6) standard MTRRs */
......
...@@ -103,7 +103,7 @@ static void flush_tlb_func(void *info) ...@@ -103,7 +103,7 @@ static void flush_tlb_func(void *info)
if (f->flush_mm != this_cpu_read(cpu_tlbstate.active_mm)) if (f->flush_mm != this_cpu_read(cpu_tlbstate.active_mm))
return; return;
count_vm_event(NR_TLB_REMOTE_FLUSH_RECEIVED); count_vm_tlb_event(NR_TLB_REMOTE_FLUSH_RECEIVED);
if (this_cpu_read(cpu_tlbstate.state) == TLBSTATE_OK) { if (this_cpu_read(cpu_tlbstate.state) == TLBSTATE_OK) {
if (f->flush_end == TLB_FLUSH_ALL) if (f->flush_end == TLB_FLUSH_ALL)
local_flush_tlb(); local_flush_tlb();
...@@ -131,7 +131,7 @@ void native_flush_tlb_others(const struct cpumask *cpumask, ...@@ -131,7 +131,7 @@ void native_flush_tlb_others(const struct cpumask *cpumask,
info.flush_start = start; info.flush_start = start;
info.flush_end = end; info.flush_end = end;
count_vm_event(NR_TLB_REMOTE_FLUSH); count_vm_tlb_event(NR_TLB_REMOTE_FLUSH);
if (is_uv_system()) { if (is_uv_system()) {
unsigned int cpu; unsigned int cpu;
...@@ -151,7 +151,7 @@ void flush_tlb_current_task(void) ...@@ -151,7 +151,7 @@ void flush_tlb_current_task(void)
preempt_disable(); preempt_disable();
count_vm_event(NR_TLB_LOCAL_FLUSH_ALL); count_vm_tlb_event(NR_TLB_LOCAL_FLUSH_ALL);
local_flush_tlb(); local_flush_tlb();
if (cpumask_any_but(mm_cpumask(mm), smp_processor_id()) < nr_cpu_ids) if (cpumask_any_but(mm_cpumask(mm), smp_processor_id()) < nr_cpu_ids)
flush_tlb_others(mm_cpumask(mm), mm, 0UL, TLB_FLUSH_ALL); flush_tlb_others(mm_cpumask(mm), mm, 0UL, TLB_FLUSH_ALL);
...@@ -215,7 +215,7 @@ void flush_tlb_mm_range(struct mm_struct *mm, unsigned long start, ...@@ -215,7 +215,7 @@ void flush_tlb_mm_range(struct mm_struct *mm, unsigned long start,
/* tlb_flushall_shift is on balance point, details in commit log */ /* tlb_flushall_shift is on balance point, details in commit log */
if ((end - start) >> PAGE_SHIFT > act_entries >> tlb_flushall_shift) { if ((end - start) >> PAGE_SHIFT > act_entries >> tlb_flushall_shift) {
count_vm_event(NR_TLB_LOCAL_FLUSH_ALL); count_vm_tlb_event(NR_TLB_LOCAL_FLUSH_ALL);
local_flush_tlb(); local_flush_tlb();
} else { } else {
if (has_large_page(mm, start, end)) { if (has_large_page(mm, start, end)) {
...@@ -224,7 +224,7 @@ void flush_tlb_mm_range(struct mm_struct *mm, unsigned long start, ...@@ -224,7 +224,7 @@ void flush_tlb_mm_range(struct mm_struct *mm, unsigned long start,
} }
/* flush range by one by one 'invlpg' */ /* flush range by one by one 'invlpg' */
for (addr = start; addr < end; addr += PAGE_SIZE) { for (addr = start; addr < end; addr += PAGE_SIZE) {
count_vm_event(NR_TLB_LOCAL_FLUSH_ONE); count_vm_tlb_event(NR_TLB_LOCAL_FLUSH_ONE);
__flush_tlb_single(addr); __flush_tlb_single(addr);
} }
...@@ -262,7 +262,7 @@ void flush_tlb_page(struct vm_area_struct *vma, unsigned long start) ...@@ -262,7 +262,7 @@ void flush_tlb_page(struct vm_area_struct *vma, unsigned long start)
static void do_flush_tlb_all(void *info) static void do_flush_tlb_all(void *info)
{ {
count_vm_event(NR_TLB_REMOTE_FLUSH_RECEIVED); count_vm_tlb_event(NR_TLB_REMOTE_FLUSH_RECEIVED);
__flush_tlb_all(); __flush_tlb_all();
if (this_cpu_read(cpu_tlbstate.state) == TLBSTATE_LAZY) if (this_cpu_read(cpu_tlbstate.state) == TLBSTATE_LAZY)
leave_mm(smp_processor_id()); leave_mm(smp_processor_id());
...@@ -270,7 +270,7 @@ static void do_flush_tlb_all(void *info) ...@@ -270,7 +270,7 @@ static void do_flush_tlb_all(void *info)
void flush_tlb_all(void) void flush_tlb_all(void)
{ {
count_vm_event(NR_TLB_REMOTE_FLUSH); count_vm_tlb_event(NR_TLB_REMOTE_FLUSH);
on_each_cpu(do_flush_tlb_all, NULL, 1); on_each_cpu(do_flush_tlb_all, NULL, 1);
} }
......
...@@ -71,12 +71,14 @@ enum vm_event_item { PGPGIN, PGPGOUT, PSWPIN, PSWPOUT, ...@@ -71,12 +71,14 @@ enum vm_event_item { PGPGIN, PGPGOUT, PSWPIN, PSWPOUT,
THP_ZERO_PAGE_ALLOC, THP_ZERO_PAGE_ALLOC,
THP_ZERO_PAGE_ALLOC_FAILED, THP_ZERO_PAGE_ALLOC_FAILED,
#endif #endif
#ifdef CONFIG_DEBUG_TLBFLUSH
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
NR_TLB_REMOTE_FLUSH, /* cpu tried to flush others' tlbs */ NR_TLB_REMOTE_FLUSH, /* cpu tried to flush others' tlbs */
NR_TLB_REMOTE_FLUSH_RECEIVED,/* cpu received ipi for flush */ NR_TLB_REMOTE_FLUSH_RECEIVED,/* cpu received ipi for flush */
#endif #endif /* CONFIG_SMP */
NR_TLB_LOCAL_FLUSH_ALL, NR_TLB_LOCAL_FLUSH_ALL,
NR_TLB_LOCAL_FLUSH_ONE, NR_TLB_LOCAL_FLUSH_ONE,
#endif /* CONFIG_DEBUG_TLBFLUSH */
NR_VM_EVENT_ITEMS NR_VM_EVENT_ITEMS
}; };
......
...@@ -83,6 +83,14 @@ static inline void vm_events_fold_cpu(int cpu) ...@@ -83,6 +83,14 @@ static inline void vm_events_fold_cpu(int cpu)
#define count_vm_numa_events(x, y) do { (void)(y); } while (0) #define count_vm_numa_events(x, y) do { (void)(y); } while (0)
#endif /* CONFIG_NUMA_BALANCING */ #endif /* CONFIG_NUMA_BALANCING */
#ifdef CONFIG_DEBUG_TLBFLUSH
#define count_vm_tlb_event(x) count_vm_event(x)
#define count_vm_tlb_events(x, y) count_vm_events(x, y)
#else
#define count_vm_tlb_event(x) do {} while (0)
#define count_vm_tlb_events(x, y) do { (void)(y); } while (0)
#endif
#define __count_zone_vm_events(item, zone, delta) \ #define __count_zone_vm_events(item, zone, delta) \
__count_vm_events(item##_NORMAL - ZONE_NORMAL + \ __count_vm_events(item##_NORMAL - ZONE_NORMAL + \
zone_idx(zone), delta) zone_idx(zone), delta)
......
...@@ -851,12 +851,14 @@ const char * const vmstat_text[] = { ...@@ -851,12 +851,14 @@ const char * const vmstat_text[] = {
"thp_zero_page_alloc", "thp_zero_page_alloc",
"thp_zero_page_alloc_failed", "thp_zero_page_alloc_failed",
#endif #endif
#ifdef CONFIG_DEBUG_TLBFLUSH
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
"nr_tlb_remote_flush", "nr_tlb_remote_flush",
"nr_tlb_remote_flush_received", "nr_tlb_remote_flush_received",
#endif #endif /* CONFIG_SMP */
"nr_tlb_local_flush_all", "nr_tlb_local_flush_all",
"nr_tlb_local_flush_one", "nr_tlb_local_flush_one",
#endif /* CONFIG_DEBUG_TLBFLUSH */
#endif /* CONFIG_VM_EVENTS_COUNTERS */ #endif /* CONFIG_VM_EVENTS_COUNTERS */
}; };
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment