Commit b9d4d42a authored by Catalin Marinas's avatar Catalin Marinas

ARM: Remove __ARCH_WANT_INTERRUPTS_ON_CTXSW on pre-ARMv6 CPUs

This patch removes the __ARCH_WANT_INTERRUPTS_ON_CTXSW definition for
ARMv5 and earlier processors. On such processors, the context switch
requires a full cache flush. To avoid high interrupt latencies, this
patch defers the mm switching to the post-lock switch hook if the
interrupts are disabled.
Reviewed-by: default avatarWill Deacon <will.deacon@arm.com>
Tested-by: default avatarWill Deacon <will.deacon@arm.com>
Reviewed-by: default avatarFrank Rowand <frank.rowand@am.sony.com>
Tested-by: default avatarMarc Zyngier <Marc.Zyngier@arm.com>
Signed-off-by: default avatarCatalin Marinas <catalin.marinas@arm.com>
parent e323969c
...@@ -34,13 +34,4 @@ typedef struct { ...@@ -34,13 +34,4 @@ typedef struct {
#endif #endif
/*
* switch_mm() may do a full cache flush over the context switch,
* so enable interrupts over the context switch to avoid high
* latency.
*/
#ifndef CONFIG_CPU_HAS_ASID
#define __ARCH_WANT_INTERRUPTS_ON_CTXSW
#endif
#endif #endif
...@@ -105,19 +105,40 @@ static inline void finish_arch_post_lock_switch(void) ...@@ -105,19 +105,40 @@ static inline void finish_arch_post_lock_switch(void)
#else /* !CONFIG_CPU_HAS_ASID */ #else /* !CONFIG_CPU_HAS_ASID */
#ifdef CONFIG_MMU
static inline void check_and_switch_context(struct mm_struct *mm, static inline void check_and_switch_context(struct mm_struct *mm,
struct task_struct *tsk) struct task_struct *tsk)
{ {
#ifdef CONFIG_MMU
if (unlikely(mm->context.kvm_seq != init_mm.context.kvm_seq)) if (unlikely(mm->context.kvm_seq != init_mm.context.kvm_seq))
__check_kvm_seq(mm); __check_kvm_seq(mm);
if (irqs_disabled())
/*
* cpu_switch_mm() needs to flush the VIVT caches. To avoid
* high interrupt latencies, defer the call and continue
* running with the old mm. Since we only support UP systems
* on non-ASID CPUs, the old mm will remain valid until the
* finish_arch_post_lock_switch() call.
*/
set_ti_thread_flag(task_thread_info(tsk), TIF_SWITCH_MM);
else
cpu_switch_mm(mm->pgd, mm); cpu_switch_mm(mm->pgd, mm);
#endif
} }
#define init_new_context(tsk,mm) 0 #define finish_arch_post_lock_switch \
finish_arch_post_lock_switch
static inline void finish_arch_post_lock_switch(void)
{
if (test_and_clear_thread_flag(TIF_SWITCH_MM)) {
struct mm_struct *mm = current->mm;
cpu_switch_mm(mm->pgd, mm);
}
}
#define finish_arch_post_lock_switch() do { } while (0) #endif /* CONFIG_MMU */
#define init_new_context(tsk,mm) 0
#endif /* CONFIG_CPU_HAS_ASID */ #endif /* CONFIG_CPU_HAS_ASID */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment