Commit 60f07c8e authored by Martin Schwidefsky's avatar Martin Schwidefsky

s390/mm: fix race on mm->context.flush_mm

The order in __tlb_flush_mm_lazy is to flush TLB first and then clear
the mm->context.flush_mm bit. This can lead to missed flushes as the
bit can be set anytime, the order needs to be the other way aronud.

But this leads to a different race, __tlb_flush_mm_lazy may be called
on two CPUs concurrently. If mm->context.flush_mm is cleared first then
another CPU can bypass __tlb_flush_mm_lazy although the first CPU has
not done the flush yet. In a virtualized environment the time until the
flush is finally completed can be arbitrarily long.

Add a spinlock to serialize __tlb_flush_mm_lazy and use the function
in finish_arch_post_lock_switch as well.

Cc: <stable@vger.kernel.org>
Reviewed-by: default avatarHeiko Carstens <heiko.carstens@de.ibm.com>
Signed-off-by: default avatarMartin Schwidefsky <schwidefsky@de.ibm.com>
parent b3e5dc45
...@@ -5,6 +5,7 @@ ...@@ -5,6 +5,7 @@
#include <linux/errno.h> #include <linux/errno.h>
typedef struct { typedef struct {
spinlock_t lock;
cpumask_t cpu_attach_mask; cpumask_t cpu_attach_mask;
atomic_t flush_count; atomic_t flush_count;
unsigned int flush_mm; unsigned int flush_mm;
...@@ -27,6 +28,7 @@ typedef struct { ...@@ -27,6 +28,7 @@ typedef struct {
} mm_context_t; } mm_context_t;
#define INIT_MM_CONTEXT(name) \ #define INIT_MM_CONTEXT(name) \
.context.lock = __SPIN_LOCK_UNLOCKED(name.context.lock), \
.context.pgtable_lock = \ .context.pgtable_lock = \
__SPIN_LOCK_UNLOCKED(name.context.pgtable_lock), \ __SPIN_LOCK_UNLOCKED(name.context.pgtable_lock), \
.context.pgtable_list = LIST_HEAD_INIT(name.context.pgtable_list), \ .context.pgtable_list = LIST_HEAD_INIT(name.context.pgtable_list), \
......
...@@ -17,6 +17,7 @@ ...@@ -17,6 +17,7 @@
static inline int init_new_context(struct task_struct *tsk, static inline int init_new_context(struct task_struct *tsk,
struct mm_struct *mm) struct mm_struct *mm)
{ {
spin_lock_init(&mm->context.lock);
spin_lock_init(&mm->context.pgtable_lock); spin_lock_init(&mm->context.pgtable_lock);
INIT_LIST_HEAD(&mm->context.pgtable_list); INIT_LIST_HEAD(&mm->context.pgtable_list);
spin_lock_init(&mm->context.gmap_lock); spin_lock_init(&mm->context.gmap_lock);
...@@ -121,8 +122,7 @@ static inline void finish_arch_post_lock_switch(void) ...@@ -121,8 +122,7 @@ static inline void finish_arch_post_lock_switch(void)
while (atomic_read(&mm->context.flush_count)) while (atomic_read(&mm->context.flush_count))
cpu_relax(); cpu_relax();
cpumask_set_cpu(smp_processor_id(), mm_cpumask(mm)); cpumask_set_cpu(smp_processor_id(), mm_cpumask(mm));
if (mm->context.flush_mm) __tlb_flush_mm_lazy(mm);
__tlb_flush_mm(mm);
preempt_enable(); preempt_enable();
} }
set_fs(current->thread.mm_segment); set_fs(current->thread.mm_segment);
......
...@@ -101,10 +101,12 @@ static inline void __tlb_flush_kernel(void) ...@@ -101,10 +101,12 @@ static inline void __tlb_flush_kernel(void)
static inline void __tlb_flush_mm_lazy(struct mm_struct * mm) static inline void __tlb_flush_mm_lazy(struct mm_struct * mm)
{ {
spin_lock(&mm->context.lock);
if (mm->context.flush_mm) { if (mm->context.flush_mm) {
__tlb_flush_mm(mm);
mm->context.flush_mm = 0; mm->context.flush_mm = 0;
__tlb_flush_mm(mm);
} }
spin_unlock(&mm->context.lock);
} }
/* /*
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment