Commit 27022a32 authored by Shai Fultheim's avatar Shai Fultheim Committed by Linus Torvalds

[PATCH] percpu: cpu_tlbstate

Use the percpu infrastructure rather than open-coded array[NR_CPUS].
Signed-off-by: default avatarAndrew Morton <akpm@osdl.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@osdl.org>
parent d987bdff
...@@ -104,7 +104,7 @@ ...@@ -104,7 +104,7 @@
* about nothing of note with C stepping upwards. * about nothing of note with C stepping upwards.
*/ */
struct tlb_state cpu_tlbstate[NR_CPUS] __cacheline_aligned = {[0 ... NR_CPUS-1] = { &init_mm, 0, }}; DEFINE_PER_CPU(struct tlb_state, cpu_tlbstate) ____cacheline_aligned = { &init_mm, 0, };
/* /*
* the following functions deal with sending IPIs between CPUs. * the following functions deal with sending IPIs between CPUs.
...@@ -255,9 +255,9 @@ static spinlock_t tlbstate_lock = SPIN_LOCK_UNLOCKED; ...@@ -255,9 +255,9 @@ static spinlock_t tlbstate_lock = SPIN_LOCK_UNLOCKED;
*/ */
static inline void leave_mm (unsigned long cpu) static inline void leave_mm (unsigned long cpu)
{ {
if (cpu_tlbstate[cpu].state == TLBSTATE_OK) if (per_cpu(cpu_tlbstate, cpu).state == TLBSTATE_OK)
BUG(); BUG();
cpu_clear(cpu, cpu_tlbstate[cpu].active_mm->cpu_vm_mask); cpu_clear(cpu, per_cpu(cpu_tlbstate, cpu).active_mm->cpu_vm_mask);
load_cr3(swapper_pg_dir); load_cr3(swapper_pg_dir);
} }
...@@ -324,8 +324,8 @@ asmlinkage void smp_invalidate_interrupt (void) ...@@ -324,8 +324,8 @@ asmlinkage void smp_invalidate_interrupt (void)
* BUG(); * BUG();
*/ */
if (flush_mm == cpu_tlbstate[cpu].active_mm) { if (flush_mm == per_cpu(cpu_tlbstate, cpu).active_mm) {
if (cpu_tlbstate[cpu].state == TLBSTATE_OK) { if (per_cpu(cpu_tlbstate, cpu).state == TLBSTATE_OK) {
if (flush_va == FLUSH_ALL) if (flush_va == FLUSH_ALL)
local_flush_tlb(); local_flush_tlb();
else else
...@@ -457,7 +457,7 @@ static void do_flush_tlb_all(void* info) ...@@ -457,7 +457,7 @@ static void do_flush_tlb_all(void* info)
unsigned long cpu = smp_processor_id(); unsigned long cpu = smp_processor_id();
__flush_tlb_all(); __flush_tlb_all();
if (cpu_tlbstate[cpu].state == TLBSTATE_LAZY) if (per_cpu(cpu_tlbstate, cpu).state == TLBSTATE_LAZY)
leave_mm(cpu); leave_mm(cpu);
} }
......
...@@ -35,7 +35,7 @@ ...@@ -35,7 +35,7 @@
int reboot_smp = 0; int reboot_smp = 0;
/* TLB state -- visible externally, indexed physically */ /* TLB state -- visible externally, indexed physically */
struct tlb_state cpu_tlbstate[NR_CPUS] __cacheline_aligned = {[0 ... NR_CPUS-1] = { &init_mm, 0 }}; DEFINE_PER_CPU(struct tlb_state, cpu_tlbstate) ____cacheline_aligned = { &init_mm, 0 };
/* CPU IRQ affinity -- set to all ones initially */ /* CPU IRQ affinity -- set to all ones initially */
static unsigned long cpu_irq_affinity[NR_CPUS] __cacheline_aligned = { [0 ... NR_CPUS-1] = ~0UL }; static unsigned long cpu_irq_affinity[NR_CPUS] __cacheline_aligned = { [0 ... NR_CPUS-1] = ~0UL };
...@@ -860,9 +860,9 @@ static spinlock_t tlbstate_lock = SPIN_LOCK_UNLOCKED; ...@@ -860,9 +860,9 @@ static spinlock_t tlbstate_lock = SPIN_LOCK_UNLOCKED;
static inline void static inline void
leave_mm (unsigned long cpu) leave_mm (unsigned long cpu)
{ {
if (cpu_tlbstate[cpu].state == TLBSTATE_OK) if (per_cpu(cpu_tlbstate, cpu).state == TLBSTATE_OK)
BUG(); BUG();
cpu_clear(cpu, cpu_tlbstate[cpu].active_mm->cpu_vm_mask); cpu_clear(cpu, per_cpu(cpu_tlbstate, cpu).active_mm->cpu_vm_mask);
load_cr3(swapper_pg_dir); load_cr3(swapper_pg_dir);
} }
...@@ -883,8 +883,8 @@ smp_invalidate_interrupt(void) ...@@ -883,8 +883,8 @@ smp_invalidate_interrupt(void)
smp_processor_id())); smp_processor_id()));
*/ */
if (flush_mm == cpu_tlbstate[cpu].active_mm) { if (flush_mm == per_cpu(cpu_tlbstate, cpu).active_mm) {
if (cpu_tlbstate[cpu].state == TLBSTATE_OK) { if (per_cpu(cpu_tlbstate, cpu).state == TLBSTATE_OK) {
if (flush_va == FLUSH_ALL) if (flush_va == FLUSH_ALL)
local_flush_tlb(); local_flush_tlb();
else else
...@@ -1218,7 +1218,7 @@ do_flush_tlb_all(void* info) ...@@ -1218,7 +1218,7 @@ do_flush_tlb_all(void* info)
unsigned long cpu = smp_processor_id(); unsigned long cpu = smp_processor_id();
__flush_tlb_all(); __flush_tlb_all();
if (cpu_tlbstate[cpu].state == TLBSTATE_LAZY) if (per_cpu(cpu_tlbstate, cpu).state == TLBSTATE_LAZY)
leave_mm(cpu); leave_mm(cpu);
} }
......
...@@ -18,8 +18,8 @@ static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk) ...@@ -18,8 +18,8 @@ static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
{ {
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
unsigned cpu = smp_processor_id(); unsigned cpu = smp_processor_id();
if (cpu_tlbstate[cpu].state == TLBSTATE_OK) if (per_cpu(cpu_tlbstate, cpu).state == TLBSTATE_OK)
cpu_tlbstate[cpu].state = TLBSTATE_LAZY; per_cpu(cpu_tlbstate, cpu).state = TLBSTATE_LAZY;
#endif #endif
} }
...@@ -33,8 +33,8 @@ static inline void switch_mm(struct mm_struct *prev, ...@@ -33,8 +33,8 @@ static inline void switch_mm(struct mm_struct *prev,
/* stop flush ipis for the previous mm */ /* stop flush ipis for the previous mm */
cpu_clear(cpu, prev->cpu_vm_mask); cpu_clear(cpu, prev->cpu_vm_mask);
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
cpu_tlbstate[cpu].state = TLBSTATE_OK; per_cpu(cpu_tlbstate, cpu).state = TLBSTATE_OK;
cpu_tlbstate[cpu].active_mm = next; per_cpu(cpu_tlbstate, cpu).active_mm = next;
#endif #endif
cpu_set(cpu, next->cpu_vm_mask); cpu_set(cpu, next->cpu_vm_mask);
...@@ -49,8 +49,8 @@ static inline void switch_mm(struct mm_struct *prev, ...@@ -49,8 +49,8 @@ static inline void switch_mm(struct mm_struct *prev,
} }
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
else { else {
cpu_tlbstate[cpu].state = TLBSTATE_OK; per_cpu(cpu_tlbstate, cpu).state = TLBSTATE_OK;
BUG_ON(cpu_tlbstate[cpu].active_mm != next); BUG_ON(per_cpu(cpu_tlbstate, cpu).active_mm != next);
if (!cpu_test_and_set(cpu, next->cpu_vm_mask)) { if (!cpu_test_and_set(cpu, next->cpu_vm_mask)) {
/* We were in lazy tlb mode and leave_mm disabled /* We were in lazy tlb mode and leave_mm disabled
......
...@@ -131,7 +131,7 @@ struct tlb_state ...@@ -131,7 +131,7 @@ struct tlb_state
int state; int state;
char __cacheline_padding[L1_CACHE_BYTES-8]; char __cacheline_padding[L1_CACHE_BYTES-8];
}; };
extern struct tlb_state cpu_tlbstate[NR_CPUS]; DECLARE_PER_CPU(struct tlb_state, cpu_tlbstate);
#endif #endif
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment