Commit 88485be5 authored by Will Deacon's avatar Will Deacon

scs: Move scs_overflow_check() out of architecture code

There is nothing architecture-specific about scs_overflow_check() as
it's just a trivial wrapper around scs_corrupted().

For parity with task_stack_end_corrupted(), rename scs_corrupted() to
task_scs_end_corrupted() and call it from schedule_debug() when
CONFIG_SCHED_STACK_END_CHECK_is enabled, which better reflects its
purpose as a debug feature to catch inadvertent overflow of the SCS.
Finally, remove the unused scs_overflow_check() function entirely.

This has absolutely no impact on architectures that do not support SCS
(currently arm64 only).
Tested-by: default avatarSami Tolvanen <samitolvanen@google.com>
Reviewed-by: default avatarMark Rutland <mark.rutland@arm.com>
Signed-off-by: default avatarWill Deacon <will@kernel.org>
parent 711e8b0d
...@@ -24,24 +24,6 @@ ...@@ -24,24 +24,6 @@
.endm .endm
#endif /* CONFIG_SHADOW_CALL_STACK */ #endif /* CONFIG_SHADOW_CALL_STACK */
#else /* __ASSEMBLY__ */
#include <linux/scs.h>
#ifdef CONFIG_SHADOW_CALL_STACK
static inline void scs_overflow_check(struct task_struct *tsk)
{
if (unlikely(scs_corrupted(tsk)))
panic("corrupted shadow stack detected inside scheduler\n");
}
#else /* CONFIG_SHADOW_CALL_STACK */
static inline void scs_overflow_check(struct task_struct *tsk) {}
#endif /* CONFIG_SHADOW_CALL_STACK */
#endif /* __ASSEMBLY __ */ #endif /* __ASSEMBLY __ */
#endif /* _ASM_SCS_H */ #endif /* _ASM_SCS_H */
...@@ -52,7 +52,6 @@ ...@@ -52,7 +52,6 @@
#include <asm/mmu_context.h> #include <asm/mmu_context.h>
#include <asm/processor.h> #include <asm/processor.h>
#include <asm/pointer_auth.h> #include <asm/pointer_auth.h>
#include <asm/scs.h>
#include <asm/stacktrace.h> #include <asm/stacktrace.h>
#if defined(CONFIG_STACKPROTECTOR) && !defined(CONFIG_STACKPROTECTOR_PER_TASK) #if defined(CONFIG_STACKPROTECTOR) && !defined(CONFIG_STACKPROTECTOR_PER_TASK)
...@@ -516,7 +515,6 @@ __notrace_funcgraph struct task_struct *__switch_to(struct task_struct *prev, ...@@ -516,7 +515,6 @@ __notrace_funcgraph struct task_struct *__switch_to(struct task_struct *prev,
entry_task_switch(next); entry_task_switch(next);
uao_thread_switch(next); uao_thread_switch(next);
ssbs_thread_switch(next); ssbs_thread_switch(next);
scs_overflow_check(next);
/* /*
* Complete any pending TLB or cache maintenance on this CPU in case * Complete any pending TLB or cache maintenance on this CPU in case
......
...@@ -6,7 +6,7 @@ ...@@ -6,7 +6,7 @@
*/ */
#include <linux/percpu.h> #include <linux/percpu.h>
#include <asm/scs.h> #include <linux/scs.h>
/* Allocate a static per-CPU shadow stack */ /* Allocate a static per-CPU shadow stack */
#define DEFINE_SCS(name) \ #define DEFINE_SCS(name) \
......
...@@ -47,7 +47,7 @@ static inline unsigned long *__scs_magic(void *s) ...@@ -47,7 +47,7 @@ static inline unsigned long *__scs_magic(void *s)
return (unsigned long *)(s + SCS_SIZE) - 1; return (unsigned long *)(s + SCS_SIZE) - 1;
} }
static inline bool scs_corrupted(struct task_struct *tsk) static inline bool task_scs_end_corrupted(struct task_struct *tsk)
{ {
unsigned long *magic = __scs_magic(task_scs(tsk)); unsigned long *magic = __scs_magic(task_scs(tsk));
unsigned long sz = task_scs_sp(tsk) - task_scs(tsk); unsigned long sz = task_scs_sp(tsk) - task_scs(tsk);
...@@ -60,8 +60,8 @@ static inline bool scs_corrupted(struct task_struct *tsk) ...@@ -60,8 +60,8 @@ static inline bool scs_corrupted(struct task_struct *tsk)
static inline void scs_init(void) {} static inline void scs_init(void) {}
static inline void scs_task_reset(struct task_struct *tsk) {} static inline void scs_task_reset(struct task_struct *tsk) {}
static inline int scs_prepare(struct task_struct *tsk, int node) { return 0; } static inline int scs_prepare(struct task_struct *tsk, int node) { return 0; }
static inline bool scs_corrupted(struct task_struct *tsk) { return false; }
static inline void scs_release(struct task_struct *tsk) {} static inline void scs_release(struct task_struct *tsk) {}
static inline bool task_scs_end_corrupted(struct task_struct *tsk) { return false; }
#endif /* CONFIG_SHADOW_CALL_STACK */ #endif /* CONFIG_SHADOW_CALL_STACK */
......
...@@ -3878,6 +3878,9 @@ static inline void schedule_debug(struct task_struct *prev, bool preempt) ...@@ -3878,6 +3878,9 @@ static inline void schedule_debug(struct task_struct *prev, bool preempt)
#ifdef CONFIG_SCHED_STACK_END_CHECK #ifdef CONFIG_SCHED_STACK_END_CHECK
if (task_stack_end_corrupted(prev)) if (task_stack_end_corrupted(prev))
panic("corrupted stack end detected inside scheduler\n"); panic("corrupted stack end detected inside scheduler\n");
if (task_scs_end_corrupted(prev))
panic("corrupted shadow stack detected inside scheduler\n");
#endif #endif
#ifdef CONFIG_DEBUG_ATOMIC_SLEEP #ifdef CONFIG_DEBUG_ATOMIC_SLEEP
......
...@@ -98,7 +98,8 @@ void scs_release(struct task_struct *tsk) ...@@ -98,7 +98,8 @@ void scs_release(struct task_struct *tsk)
if (!s) if (!s)
return; return;
WARN(scs_corrupted(tsk), "corrupted shadow stack detected when freeing task\n"); WARN(task_scs_end_corrupted(tsk),
"corrupted shadow stack detected when freeing task\n");
scs_check_usage(tsk); scs_check_usage(tsk);
scs_free(s); scs_free(s);
} }
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment