Commit cfc31983 authored by Jan Beulich's avatar Jan Beulich Committed by Ingo Molnar

x86, 32-bit: improve lazy TLB handling code

Impact: micro-optimize the 32-bit TLB flush code

Use the faster x86_{read,write}_percpu() accessors here.
Signed-off-by: default avatarJan Beulich <jbeulich@novell.com>
Signed-off-by: default avatarIngo Molnar <mingo@elte.hu>
parent 1796316a
...@@ -4,9 +4,8 @@ ...@@ -4,9 +4,8 @@
static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk) static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
{ {
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
unsigned cpu = smp_processor_id(); if (x86_read_percpu(cpu_tlbstate.state) == TLBSTATE_OK)
if (per_cpu(cpu_tlbstate, cpu).state == TLBSTATE_OK) x86_write_percpu(cpu_tlbstate.state, TLBSTATE_LAZY);
per_cpu(cpu_tlbstate, cpu).state = TLBSTATE_LAZY;
#endif #endif
} }
...@@ -20,8 +19,8 @@ static inline void switch_mm(struct mm_struct *prev, ...@@ -20,8 +19,8 @@ static inline void switch_mm(struct mm_struct *prev,
/* stop flush ipis for the previous mm */ /* stop flush ipis for the previous mm */
cpu_clear(cpu, prev->cpu_vm_mask); cpu_clear(cpu, prev->cpu_vm_mask);
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
per_cpu(cpu_tlbstate, cpu).state = TLBSTATE_OK; x86_write_percpu(cpu_tlbstate.state, TLBSTATE_OK);
per_cpu(cpu_tlbstate, cpu).active_mm = next; x86_write_percpu(cpu_tlbstate.active_mm, next);
#endif #endif
cpu_set(cpu, next->cpu_vm_mask); cpu_set(cpu, next->cpu_vm_mask);
...@@ -36,8 +35,8 @@ static inline void switch_mm(struct mm_struct *prev, ...@@ -36,8 +35,8 @@ static inline void switch_mm(struct mm_struct *prev,
} }
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
else { else {
per_cpu(cpu_tlbstate, cpu).state = TLBSTATE_OK; x86_write_percpu(cpu_tlbstate.state, TLBSTATE_OK);
BUG_ON(per_cpu(cpu_tlbstate, cpu).active_mm != next); BUG_ON(x86_read_percpu(cpu_tlbstate.active_mm) != next);
if (!cpu_test_and_set(cpu, next->cpu_vm_mask)) { if (!cpu_test_and_set(cpu, next->cpu_vm_mask)) {
/* We were in lazy tlb mode and leave_mm disabled /* We were in lazy tlb mode and leave_mm disabled
......
...@@ -34,9 +34,8 @@ static DEFINE_SPINLOCK(tlbstate_lock); ...@@ -34,9 +34,8 @@ static DEFINE_SPINLOCK(tlbstate_lock);
*/ */
void leave_mm(int cpu) void leave_mm(int cpu)
{ {
if (per_cpu(cpu_tlbstate, cpu).state == TLBSTATE_OK) BUG_ON(x86_read_percpu(cpu_tlbstate.state) == TLBSTATE_OK);
BUG(); cpu_clear(cpu, x86_read_percpu(cpu_tlbstate.active_mm)->cpu_vm_mask);
cpu_clear(cpu, per_cpu(cpu_tlbstate, cpu).active_mm->cpu_vm_mask);
load_cr3(swapper_pg_dir); load_cr3(swapper_pg_dir);
} }
EXPORT_SYMBOL_GPL(leave_mm); EXPORT_SYMBOL_GPL(leave_mm);
...@@ -104,8 +103,8 @@ void smp_invalidate_interrupt(struct pt_regs *regs) ...@@ -104,8 +103,8 @@ void smp_invalidate_interrupt(struct pt_regs *regs)
* BUG(); * BUG();
*/ */
if (flush_mm == per_cpu(cpu_tlbstate, cpu).active_mm) { if (flush_mm == x86_read_percpu(cpu_tlbstate.active_mm)) {
if (per_cpu(cpu_tlbstate, cpu).state == TLBSTATE_OK) { if (x86_read_percpu(cpu_tlbstate.state) == TLBSTATE_OK) {
if (flush_va == TLB_FLUSH_ALL) if (flush_va == TLB_FLUSH_ALL)
local_flush_tlb(); local_flush_tlb();
else else
...@@ -238,7 +237,7 @@ static void do_flush_tlb_all(void *info) ...@@ -238,7 +237,7 @@ static void do_flush_tlb_all(void *info)
unsigned long cpu = smp_processor_id(); unsigned long cpu = smp_processor_id();
__flush_tlb_all(); __flush_tlb_all();
if (per_cpu(cpu_tlbstate, cpu).state == TLBSTATE_LAZY) if (x86_read_percpu(cpu_tlbstate.state) == TLBSTATE_LAZY)
leave_mm(cpu); leave_mm(cpu);
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment