Commit bdae73cd authored by Catalin Marinas's avatar Catalin Marinas Committed by Russell King

ARM: 7790/1: Fix deferred mm switch on VIVT processors

As of commit b9d4d42a (ARM: Remove __ARCH_WANT_INTERRUPTS_ON_CTXSW on
pre-ARMv6 CPUs), the mm switching on VIVT processors is done in the
finish_arch_post_lock_switch() function to avoid whole cache flushing
with interrupts disabled. The need for deferred mm switch is stored as a
thread flag (TIF_SWITCH_MM). However, with preemption enabled, we can
have another thread switch before finish_arch_post_lock_switch(). If the
new thread has the same mm as the previous 'next' thread, the scheduler
will not call switch_mm() and the TIF_SWITCH_MM flag won't be set for
the new thread.

This patch moves the switch pending flag to the mm_context_t structure
since this is specific to the mm rather than thread.
Signed-off-by: default avatarCatalin Marinas <catalin.marinas@arm.com>
Reported-by: default avatarMarc Kleine-Budde <mkl@pengutronix.de>
Tested-by: default avatarMarc Kleine-Budde <mkl@pengutronix.de>
Cc: <stable@vger.kernel.org> # 3.5+
Signed-off-by: default avatarRussell King <rmk+kernel@arm.linux.org.uk>
parent 1f49856b
...@@ -6,6 +6,8 @@ ...@@ -6,6 +6,8 @@
typedef struct { typedef struct {
#ifdef CONFIG_CPU_HAS_ASID #ifdef CONFIG_CPU_HAS_ASID
atomic64_t id; atomic64_t id;
#else
int switch_pending;
#endif #endif
unsigned int vmalloc_seq; unsigned int vmalloc_seq;
} mm_context_t; } mm_context_t;
......
...@@ -56,7 +56,7 @@ static inline void check_and_switch_context(struct mm_struct *mm, ...@@ -56,7 +56,7 @@ static inline void check_and_switch_context(struct mm_struct *mm,
* on non-ASID CPUs, the old mm will remain valid until the * on non-ASID CPUs, the old mm will remain valid until the
* finish_arch_post_lock_switch() call. * finish_arch_post_lock_switch() call.
*/ */
set_ti_thread_flag(task_thread_info(tsk), TIF_SWITCH_MM); mm->context.switch_pending = 1;
else else
cpu_switch_mm(mm->pgd, mm); cpu_switch_mm(mm->pgd, mm);
} }
...@@ -65,10 +65,22 @@ static inline void check_and_switch_context(struct mm_struct *mm, ...@@ -65,10 +65,22 @@ static inline void check_and_switch_context(struct mm_struct *mm,
finish_arch_post_lock_switch finish_arch_post_lock_switch
static inline void finish_arch_post_lock_switch(void) static inline void finish_arch_post_lock_switch(void)
{ {
if (test_and_clear_thread_flag(TIF_SWITCH_MM)) {
struct mm_struct *mm = current->mm; struct mm_struct *mm = current->mm;
if (mm && mm->context.switch_pending) {
/*
* Preemption must be disabled during cpu_switch_mm() as we
* have some stateful cache flush implementations. Check
* switch_pending again in case we were preempted and the
* switch to this mm was already done.
*/
preempt_disable();
if (mm->context.switch_pending) {
mm->context.switch_pending = 0;
cpu_switch_mm(mm->pgd, mm); cpu_switch_mm(mm->pgd, mm);
} }
preempt_enable_no_resched();
}
} }
#endif /* CONFIG_MMU */ #endif /* CONFIG_MMU */
......
...@@ -156,7 +156,6 @@ extern int vfp_restore_user_hwstate(struct user_vfp __user *, ...@@ -156,7 +156,6 @@ extern int vfp_restore_user_hwstate(struct user_vfp __user *,
#define TIF_USING_IWMMXT 17 #define TIF_USING_IWMMXT 17
#define TIF_MEMDIE 18 /* is terminating due to OOM killer */ #define TIF_MEMDIE 18 /* is terminating due to OOM killer */
#define TIF_RESTORE_SIGMASK 20 #define TIF_RESTORE_SIGMASK 20
#define TIF_SWITCH_MM 22 /* deferred switch_mm */
#define _TIF_SIGPENDING (1 << TIF_SIGPENDING) #define _TIF_SIGPENDING (1 << TIF_SIGPENDING)
#define _TIF_NEED_RESCHED (1 << TIF_NEED_RESCHED) #define _TIF_NEED_RESCHED (1 << TIF_NEED_RESCHED)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment