Commit ce4a4e56 authored by Andy Lutomirski's avatar Andy Lutomirski Committed by Ingo Molnar

x86/mm: Remove the UP asm/tlbflush.h code, always use the (formerly) SMP code

The UP asm/tlbflush.h generates somewhat nicer code than the SMP version.
Aside from that, it's fallen quite a bit behind the SMP code:

 - flush_tlb_mm_range() didn't flush individual pages if the range
   was small.

 - The lazy TLB code was much weaker.  This usually wouldn't matter,
   but, if a kernel thread flushed its lazy "active_mm" more than
   once (due to reclaim or similar), it wouldn't be unlazied and
   would instead pointlessly flush repeatedly.

 - Tracepoints were missing.

Aside from that, simply having the UP code around was a maintanence
burden, since it means that any change to the TLB flush code had to
make sure not to break it.

Simplify everything by deleting the UP code.
Signed-off-by: default avatarAndy Lutomirski <luto@kernel.org>
Cc: Andrew Morton <akpm@linux-foundation.org>
Cc: Arjan van de Ven <arjan@linux.intel.com>
Cc: Borislav Petkov <bpetkov@suse.de>
Cc: Dave Hansen <dave.hansen@intel.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Mel Gorman <mgorman@suse.de>
Cc: Michal Hocko <mhocko@suse.com>
Cc: Nadav Amit <nadav.amit@gmail.com>
Cc: Nadav Amit <namit@vmware.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Rik van Riel <riel@redhat.com>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: linux-mm@kvack.org
Signed-off-by: default avatarIngo Molnar <mingo@kernel.org>
parent 3f79e4c7
...@@ -69,7 +69,7 @@ config X86 ...@@ -69,7 +69,7 @@ config X86
select ARCH_USE_BUILTIN_BSWAP select ARCH_USE_BUILTIN_BSWAP
select ARCH_USE_QUEUED_RWLOCKS select ARCH_USE_QUEUED_RWLOCKS
select ARCH_USE_QUEUED_SPINLOCKS select ARCH_USE_QUEUED_SPINLOCKS
select ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH if SMP select ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH
select ARCH_WANT_FRAME_POINTERS select ARCH_WANT_FRAME_POINTERS
select ARCH_WANTS_DYNAMIC_TASK_STRUCT select ARCH_WANTS_DYNAMIC_TASK_STRUCT
select BUILDTIME_EXTABLE_SORT select BUILDTIME_EXTABLE_SORT
......
...@@ -22,8 +22,8 @@ typedef struct { ...@@ -22,8 +22,8 @@ typedef struct {
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
unsigned int irq_resched_count; unsigned int irq_resched_count;
unsigned int irq_call_count; unsigned int irq_call_count;
unsigned int irq_tlb_count;
#endif #endif
unsigned int irq_tlb_count;
#ifdef CONFIG_X86_THERMAL_VECTOR #ifdef CONFIG_X86_THERMAL_VECTOR
unsigned int irq_thermal_count; unsigned int irq_thermal_count;
#endif #endif
......
...@@ -37,12 +37,6 @@ typedef struct { ...@@ -37,12 +37,6 @@ typedef struct {
#endif #endif
} mm_context_t; } mm_context_t;
#ifdef CONFIG_SMP
void leave_mm(int cpu); void leave_mm(int cpu);
#else
static inline void leave_mm(int cpu)
{
}
#endif
#endif /* _ASM_X86_MMU_H */ #endif /* _ASM_X86_MMU_H */
...@@ -99,10 +99,8 @@ static inline void load_mm_ldt(struct mm_struct *mm) ...@@ -99,10 +99,8 @@ static inline void load_mm_ldt(struct mm_struct *mm)
static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk) static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
{ {
#ifdef CONFIG_SMP
if (this_cpu_read(cpu_tlbstate.state) == TLBSTATE_OK) if (this_cpu_read(cpu_tlbstate.state) == TLBSTATE_OK)
this_cpu_write(cpu_tlbstate.state, TLBSTATE_LAZY); this_cpu_write(cpu_tlbstate.state, TLBSTATE_LAZY);
#endif
} }
static inline int init_new_context(struct task_struct *tsk, static inline int init_new_context(struct task_struct *tsk,
......
...@@ -3,7 +3,6 @@ ...@@ -3,7 +3,6 @@
#include <linux/cpumask.h> #include <linux/cpumask.h>
#ifdef CONFIG_SMP
struct arch_tlbflush_unmap_batch { struct arch_tlbflush_unmap_batch {
/* /*
* Each bit set is a CPU that potentially has a TLB entry for one of * Each bit set is a CPU that potentially has a TLB entry for one of
...@@ -11,6 +10,5 @@ struct arch_tlbflush_unmap_batch { ...@@ -11,6 +10,5 @@ struct arch_tlbflush_unmap_batch {
*/ */
struct cpumask cpumask; struct cpumask cpumask;
}; };
#endif
#endif /* _ARCH_X86_TLBBATCH_H */ #endif /* _ARCH_X86_TLBBATCH_H */
...@@ -7,6 +7,7 @@ ...@@ -7,6 +7,7 @@
#include <asm/processor.h> #include <asm/processor.h>
#include <asm/cpufeature.h> #include <asm/cpufeature.h>
#include <asm/special_insns.h> #include <asm/special_insns.h>
#include <asm/smp.h>
static inline void __invpcid(unsigned long pcid, unsigned long addr, static inline void __invpcid(unsigned long pcid, unsigned long addr,
unsigned long type) unsigned long type)
...@@ -65,10 +66,8 @@ static inline void invpcid_flush_all_nonglobals(void) ...@@ -65,10 +66,8 @@ static inline void invpcid_flush_all_nonglobals(void)
#endif #endif
struct tlb_state { struct tlb_state {
#ifdef CONFIG_SMP
struct mm_struct *active_mm; struct mm_struct *active_mm;
int state; int state;
#endif
/* /*
* Access to this CR4 shadow and to H/W CR4 is protected by * Access to this CR4 shadow and to H/W CR4 is protected by
...@@ -231,77 +230,6 @@ struct flush_tlb_info { ...@@ -231,77 +230,6 @@ struct flush_tlb_info {
unsigned long end; unsigned long end;
}; };
#ifndef CONFIG_SMP
/* "_up" is for UniProcessor.
*
* This is a helper for other header functions. *Not* intended to be called
* directly. All global TLB flushes need to either call this, or to bump the
* vm statistics themselves.
*/
static inline void __flush_tlb_up(void)
{
count_vm_tlb_event(NR_TLB_LOCAL_FLUSH_ALL);
__flush_tlb();
}
static inline void flush_tlb_all(void)
{
count_vm_tlb_event(NR_TLB_LOCAL_FLUSH_ALL);
__flush_tlb_all();
}
static inline void local_flush_tlb(void)
{
__flush_tlb_up();
}
static inline void flush_tlb_mm(struct mm_struct *mm)
{
if (mm == current->active_mm)
__flush_tlb_up();
}
static inline void flush_tlb_page(struct vm_area_struct *vma,
unsigned long addr)
{
if (vma->vm_mm == current->active_mm)
__flush_tlb_one(addr);
}
static inline void flush_tlb_range(struct vm_area_struct *vma,
unsigned long start, unsigned long end)
{
if (vma->vm_mm == current->active_mm)
__flush_tlb_up();
}
static inline void flush_tlb_mm_range(struct mm_struct *mm,
unsigned long start, unsigned long end, unsigned long vmflag)
{
if (mm == current->active_mm)
__flush_tlb_up();
}
static inline void native_flush_tlb_others(const struct cpumask *cpumask,
const struct flush_tlb_info *info)
{
}
static inline void reset_lazy_tlbstate(void)
{
}
static inline void flush_tlb_kernel_range(unsigned long start,
unsigned long end)
{
flush_tlb_all();
}
#else /* SMP */
#include <asm/smp.h>
#define local_flush_tlb() __flush_tlb() #define local_flush_tlb() __flush_tlb()
#define flush_tlb_mm(mm) flush_tlb_mm_range(mm, 0UL, TLB_FLUSH_ALL, 0UL) #define flush_tlb_mm(mm) flush_tlb_mm_range(mm, 0UL, TLB_FLUSH_ALL, 0UL)
...@@ -339,8 +267,6 @@ static inline void arch_tlbbatch_add_mm(struct arch_tlbflush_unmap_batch *batch, ...@@ -339,8 +267,6 @@ static inline void arch_tlbbatch_add_mm(struct arch_tlbflush_unmap_batch *batch,
extern void arch_tlbbatch_flush(struct arch_tlbflush_unmap_batch *batch); extern void arch_tlbbatch_flush(struct arch_tlbflush_unmap_batch *batch);
#endif /* SMP */
#ifndef CONFIG_PARAVIRT #ifndef CONFIG_PARAVIRT
#define flush_tlb_others(mask, info) \ #define flush_tlb_others(mask, info) \
native_flush_tlb_others(mask, info) native_flush_tlb_others(mask, info)
......
...@@ -811,10 +811,8 @@ void __init zone_sizes_init(void) ...@@ -811,10 +811,8 @@ void __init zone_sizes_init(void)
} }
DEFINE_PER_CPU_SHARED_ALIGNED(struct tlb_state, cpu_tlbstate) = { DEFINE_PER_CPU_SHARED_ALIGNED(struct tlb_state, cpu_tlbstate) = {
#ifdef CONFIG_SMP
.active_mm = &init_mm, .active_mm = &init_mm,
.state = 0, .state = 0,
#endif
.cr4 = ~0UL, /* fail hard if we screw up cr4 shadow initialization */ .cr4 = ~0UL, /* fail hard if we screw up cr4 shadow initialization */
}; };
EXPORT_SYMBOL_GPL(cpu_tlbstate); EXPORT_SYMBOL_GPL(cpu_tlbstate);
......
...@@ -15,7 +15,7 @@ ...@@ -15,7 +15,7 @@
#include <linux/debugfs.h> #include <linux/debugfs.h>
/* /*
* Smarter SMP flushing macros. * TLB flushing, formerly SMP-only
* c/o Linus Torvalds. * c/o Linus Torvalds.
* *
* These mean you can really definitely utterly forget about * These mean you can really definitely utterly forget about
...@@ -28,8 +28,6 @@ ...@@ -28,8 +28,6 @@
* Implement flush IPI by CALL_FUNCTION_VECTOR, Alex Shi * Implement flush IPI by CALL_FUNCTION_VECTOR, Alex Shi
*/ */
#ifdef CONFIG_SMP
/* /*
* We cannot call mmdrop() because we are in interrupt context, * We cannot call mmdrop() because we are in interrupt context,
* instead update mm->cpu_vm_mask. * instead update mm->cpu_vm_mask.
...@@ -53,8 +51,6 @@ void leave_mm(int cpu) ...@@ -53,8 +51,6 @@ void leave_mm(int cpu)
} }
EXPORT_SYMBOL_GPL(leave_mm); EXPORT_SYMBOL_GPL(leave_mm);
#endif /* CONFIG_SMP */
void switch_mm(struct mm_struct *prev, struct mm_struct *next, void switch_mm(struct mm_struct *prev, struct mm_struct *next,
struct task_struct *tsk) struct task_struct *tsk)
{ {
...@@ -85,10 +81,8 @@ void switch_mm_irqs_off(struct mm_struct *prev, struct mm_struct *next, ...@@ -85,10 +81,8 @@ void switch_mm_irqs_off(struct mm_struct *prev, struct mm_struct *next,
set_pgd(pgd, init_mm.pgd[stack_pgd_index]); set_pgd(pgd, init_mm.pgd[stack_pgd_index]);
} }
#ifdef CONFIG_SMP
this_cpu_write(cpu_tlbstate.state, TLBSTATE_OK); this_cpu_write(cpu_tlbstate.state, TLBSTATE_OK);
this_cpu_write(cpu_tlbstate.active_mm, next); this_cpu_write(cpu_tlbstate.active_mm, next);
#endif
cpumask_set_cpu(cpu, mm_cpumask(next)); cpumask_set_cpu(cpu, mm_cpumask(next));
...@@ -146,9 +140,7 @@ void switch_mm_irqs_off(struct mm_struct *prev, struct mm_struct *next, ...@@ -146,9 +140,7 @@ void switch_mm_irqs_off(struct mm_struct *prev, struct mm_struct *next,
if (unlikely(prev->context.ldt != next->context.ldt)) if (unlikely(prev->context.ldt != next->context.ldt))
load_mm_ldt(next); load_mm_ldt(next);
#endif #endif
} } else {
#ifdef CONFIG_SMP
else {
this_cpu_write(cpu_tlbstate.state, TLBSTATE_OK); this_cpu_write(cpu_tlbstate.state, TLBSTATE_OK);
BUG_ON(this_cpu_read(cpu_tlbstate.active_mm) != next); BUG_ON(this_cpu_read(cpu_tlbstate.active_mm) != next);
...@@ -175,11 +167,8 @@ void switch_mm_irqs_off(struct mm_struct *prev, struct mm_struct *next, ...@@ -175,11 +167,8 @@ void switch_mm_irqs_off(struct mm_struct *prev, struct mm_struct *next,
load_mm_ldt(next); load_mm_ldt(next);
} }
} }
#endif
} }
#ifdef CONFIG_SMP
/* /*
* The flush IPI assumes that a thread switch happens in this order: * The flush IPI assumes that a thread switch happens in this order:
* [cpu0: the cpu that switches] * [cpu0: the cpu that switches]
...@@ -436,5 +425,3 @@ static int __init create_tlb_single_page_flush_ceiling(void) ...@@ -436,5 +425,3 @@ static int __init create_tlb_single_page_flush_ceiling(void)
return 0; return 0;
} }
late_initcall(create_tlb_single_page_flush_ceiling); late_initcall(create_tlb_single_page_flush_ceiling);
#endif /* CONFIG_SMP */
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment