Commit a77754b4 authored by David S. Miller's avatar David S. Miller

[SPARC64]: Bulletproof MMU context locking.

1) Always spin_lock_init() in init_context().  The caller essentially
   clears it out, or copies the mm info from the parent.  In both
   cases we need to explicitly initialize the spinlock.

2) Always do explicit IRQ disabling while taking mm->context.lock
   and ctx_alloc_lock.
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent 9132983a
...@@ -639,9 +639,10 @@ void get_new_mmu_context(struct mm_struct *mm) ...@@ -639,9 +639,10 @@ void get_new_mmu_context(struct mm_struct *mm)
{ {
unsigned long ctx, new_ctx; unsigned long ctx, new_ctx;
unsigned long orig_pgsz_bits; unsigned long orig_pgsz_bits;
unsigned long flags;
int new_version; int new_version;
spin_lock(&ctx_alloc_lock); spin_lock_irqsave(&ctx_alloc_lock, flags);
orig_pgsz_bits = (mm->context.sparc64_ctx_val & CTX_PGSZ_MASK); orig_pgsz_bits = (mm->context.sparc64_ctx_val & CTX_PGSZ_MASK);
ctx = (tlb_context_cache + 1) & CTX_NR_MASK; ctx = (tlb_context_cache + 1) & CTX_NR_MASK;
new_ctx = find_next_zero_bit(mmu_context_bmap, 1 << CTX_NR_BITS, ctx); new_ctx = find_next_zero_bit(mmu_context_bmap, 1 << CTX_NR_BITS, ctx);
...@@ -677,7 +678,7 @@ void get_new_mmu_context(struct mm_struct *mm) ...@@ -677,7 +678,7 @@ void get_new_mmu_context(struct mm_struct *mm)
out: out:
tlb_context_cache = new_ctx; tlb_context_cache = new_ctx;
mm->context.sparc64_ctx_val = new_ctx | orig_pgsz_bits; mm->context.sparc64_ctx_val = new_ctx | orig_pgsz_bits;
spin_unlock(&ctx_alloc_lock); spin_unlock_irqrestore(&ctx_alloc_lock, flags);
if (unlikely(new_version)) if (unlikely(new_version))
smp_new_mmu_context_version(); smp_new_mmu_context_version();
......
...@@ -354,6 +354,7 @@ void tsb_grow(struct mm_struct *mm, unsigned long rss, gfp_t gfp_flags) ...@@ -354,6 +354,7 @@ void tsb_grow(struct mm_struct *mm, unsigned long rss, gfp_t gfp_flags)
int init_new_context(struct task_struct *tsk, struct mm_struct *mm) int init_new_context(struct task_struct *tsk, struct mm_struct *mm)
{ {
spin_lock_init(&mm->context.lock);
mm->context.sparc64_ctx_val = 0UL; mm->context.sparc64_ctx_val = 0UL;
......
...@@ -67,14 +67,14 @@ extern void __flush_tlb_mm(unsigned long, unsigned long); ...@@ -67,14 +67,14 @@ extern void __flush_tlb_mm(unsigned long, unsigned long);
/* Switch the current MM context. Interrupts are disabled. */ /* Switch the current MM context. Interrupts are disabled. */
static inline void switch_mm(struct mm_struct *old_mm, struct mm_struct *mm, struct task_struct *tsk) static inline void switch_mm(struct mm_struct *old_mm, struct mm_struct *mm, struct task_struct *tsk)
{ {
unsigned long ctx_valid; unsigned long ctx_valid, flags;
int cpu; int cpu;
spin_lock(&mm->context.lock); spin_lock_irqsave(&mm->context.lock, flags);
ctx_valid = CTX_VALID(mm->context); ctx_valid = CTX_VALID(mm->context);
if (!ctx_valid) if (!ctx_valid)
get_new_mmu_context(mm); get_new_mmu_context(mm);
spin_unlock(&mm->context.lock); spin_unlock_irqrestore(&mm->context.lock, flags);
if (!ctx_valid || (old_mm != mm)) { if (!ctx_valid || (old_mm != mm)) {
load_secondary_context(mm); load_secondary_context(mm);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment