Commit 9772b7f0 authored by Will Deacon's avatar Will Deacon

Merge branch 'for-next/stacktrace' into for-next/core

* for-next/stacktrace:
  arm64: move PAC masks to <asm/pointer_auth.h>
  arm64: use XPACLRI to strip PAC
  arm64: avoid redundant PAC stripping in __builtin_return_address()
  arm64: stacktrace: always inline core stacktrace functions
  arm64: stacktrace: move dump functions to end of file
  arm64: stacktrace: recover return address for first entry
parents 9651f00e de1702f6
...@@ -366,6 +366,20 @@ config ARCH_PROC_KCORE_TEXT ...@@ -366,6 +366,20 @@ config ARCH_PROC_KCORE_TEXT
config BROKEN_GAS_INST config BROKEN_GAS_INST
def_bool !$(as-instr,1:\n.inst 0\n.rept . - 1b\n\nnop\n.endr\n) def_bool !$(as-instr,1:\n.inst 0\n.rept . - 1b\n\nnop\n.endr\n)
config BUILTIN_RETURN_ADDRESS_STRIPS_PAC
bool
# Clang's __builtin_return_adddress() strips the PAC since 12.0.0
# https://reviews.llvm.org/D75044
default y if CC_IS_CLANG && (CLANG_VERSION >= 120000)
# GCC's __builtin_return_address() strips the PAC since 11.1.0,
# and this was backported to 10.2.0, 9.4.0, 8.5.0, but not earlier
# https://gcc.gnu.org/bugzilla/show_bug.cgi?id=94891
default y if CC_IS_GCC && (GCC_VERSION >= 110100)
default y if CC_IS_GCC && (GCC_VERSION >= 100200) && (GCC_VERSION < 110000)
default y if CC_IS_GCC && (GCC_VERSION >= 90400) && (GCC_VERSION < 100000)
default y if CC_IS_GCC && (GCC_VERSION >= 80500) && (GCC_VERSION < 90000)
default n
config KASAN_SHADOW_OFFSET config KASAN_SHADOW_OFFSET
hex hex
depends on KASAN_GENERIC || KASAN_SW_TAGS depends on KASAN_GENERIC || KASAN_SW_TAGS
......
...@@ -8,19 +8,33 @@ ...@@ -8,19 +8,33 @@
#define ARM64_ASM_PREAMBLE #define ARM64_ASM_PREAMBLE
#endif #endif
/* #define xpaclri(ptr) \
* The EL0/EL1 pointer bits used by a pointer authentication code. ({ \
* This is dependent on TBI0/TBI1 being enabled, or bits 63:56 would also apply. register unsigned long __xpaclri_ptr asm("x30") = (ptr); \
*/ \
#define ptrauth_user_pac_mask() GENMASK_ULL(54, vabits_actual) asm( \
#define ptrauth_kernel_pac_mask() GENMASK_ULL(63, vabits_actual) ARM64_ASM_PREAMBLE \
" hint #7\n" \
: "+r" (__xpaclri_ptr)); \
\
__xpaclri_ptr; \
})
/* Valid for EL0 TTBR0 and EL1 TTBR1 instruction pointers */ #ifdef CONFIG_ARM64_PTR_AUTH_KERNEL
#define ptrauth_clear_pac(ptr) \ #define ptrauth_strip_kernel_insn_pac(ptr) xpaclri(ptr)
((ptr & BIT_ULL(55)) ? (ptr | ptrauth_kernel_pac_mask()) : \ #else
(ptr & ~ptrauth_user_pac_mask())) #define ptrauth_strip_kernel_insn_pac(ptr) (ptr)
#endif
#ifdef CONFIG_ARM64_PTR_AUTH
#define ptrauth_strip_user_insn_pac(ptr) xpaclri(ptr)
#else
#define ptrauth_strip_user_insn_pac(ptr) (ptr)
#endif
#if !defined(CONFIG_BUILTIN_RETURN_ADDRESS_STRIPS_PAC)
#define __builtin_return_address(val) \ #define __builtin_return_address(val) \
(void *)(ptrauth_clear_pac((unsigned long)__builtin_return_address(val))) (void *)(ptrauth_strip_kernel_insn_pac((unsigned long)__builtin_return_address(val)))
#endif
#endif /* __ASM_COMPILER_H */ #endif /* __ASM_COMPILER_H */
...@@ -10,6 +10,13 @@ ...@@ -10,6 +10,13 @@
#include <asm/memory.h> #include <asm/memory.h>
#include <asm/sysreg.h> #include <asm/sysreg.h>
/*
* The EL0/EL1 pointer bits used by a pointer authentication code.
* This is dependent on TBI0/TBI1 being enabled, or bits 63:56 would also apply.
*/
#define ptrauth_user_pac_mask() GENMASK_ULL(54, vabits_actual)
#define ptrauth_kernel_pac_mask() GENMASK_ULL(63, vabits_actual)
#define PR_PAC_ENABLED_KEYS_MASK \ #define PR_PAC_ENABLED_KEYS_MASK \
(PR_PAC_APIAKEY | PR_PAC_APIBKEY | PR_PAC_APDAKEY | PR_PAC_APDBKEY) (PR_PAC_APIAKEY | PR_PAC_APIBKEY | PR_PAC_APDAKEY | PR_PAC_APDBKEY)
...@@ -97,11 +104,6 @@ extern int ptrauth_set_enabled_keys(struct task_struct *tsk, unsigned long keys, ...@@ -97,11 +104,6 @@ extern int ptrauth_set_enabled_keys(struct task_struct *tsk, unsigned long keys,
unsigned long enabled); unsigned long enabled);
extern int ptrauth_get_enabled_keys(struct task_struct *tsk); extern int ptrauth_get_enabled_keys(struct task_struct *tsk);
static inline unsigned long ptrauth_strip_insn_pac(unsigned long ptr)
{
return ptrauth_clear_pac(ptr);
}
static __always_inline void ptrauth_enable(void) static __always_inline void ptrauth_enable(void)
{ {
if (!system_supports_address_auth()) if (!system_supports_address_auth())
...@@ -133,7 +135,6 @@ static __always_inline void ptrauth_enable(void) ...@@ -133,7 +135,6 @@ static __always_inline void ptrauth_enable(void)
#define ptrauth_prctl_reset_keys(tsk, arg) (-EINVAL) #define ptrauth_prctl_reset_keys(tsk, arg) (-EINVAL)
#define ptrauth_set_enabled_keys(tsk, keys, enabled) (-EINVAL) #define ptrauth_set_enabled_keys(tsk, keys, enabled) (-EINVAL)
#define ptrauth_get_enabled_keys(tsk) (-EINVAL) #define ptrauth_get_enabled_keys(tsk) (-EINVAL)
#define ptrauth_strip_insn_pac(lr) (lr)
#define ptrauth_suspend_exit() #define ptrauth_suspend_exit()
#define ptrauth_thread_init_user() #define ptrauth_thread_init_user()
#define ptrauth_thread_switch_user(tsk) #define ptrauth_thread_switch_user(tsk)
......
...@@ -8,6 +8,7 @@ ...@@ -8,6 +8,7 @@
#include <asm/cpufeature.h> #include <asm/cpufeature.h>
#include <asm/memory.h> #include <asm/memory.h>
#include <asm/pgtable-hwdef.h> #include <asm/pgtable-hwdef.h>
#include <asm/pointer_auth.h>
static inline u64 get_tcr_el1_t1sz(void); static inline u64 get_tcr_el1_t1sz(void);
......
...@@ -38,7 +38,7 @@ user_backtrace(struct frame_tail __user *tail, ...@@ -38,7 +38,7 @@ user_backtrace(struct frame_tail __user *tail,
if (err) if (err)
return NULL; return NULL;
lr = ptrauth_strip_insn_pac(buftail.lr); lr = ptrauth_strip_user_insn_pac(buftail.lr);
perf_callchain_store(entry, lr); perf_callchain_store(entry, lr);
......
...@@ -217,7 +217,7 @@ void __show_regs(struct pt_regs *regs) ...@@ -217,7 +217,7 @@ void __show_regs(struct pt_regs *regs)
if (!user_mode(regs)) { if (!user_mode(regs)) {
printk("pc : %pS\n", (void *)regs->pc); printk("pc : %pS\n", (void *)regs->pc);
printk("lr : %pS\n", (void *)ptrauth_strip_insn_pac(lr)); printk("lr : %pS\n", (void *)ptrauth_strip_kernel_insn_pac(lr));
} else { } else {
printk("pc : %016llx\n", regs->pc); printk("pc : %016llx\n", regs->pc);
printk("lr : %016llx\n", lr); printk("lr : %016llx\n", lr);
......
...@@ -25,8 +25,9 @@ ...@@ -25,8 +25,9 @@
* *
* The regs must be on a stack currently owned by the calling task. * The regs must be on a stack currently owned by the calling task.
*/ */
static __always_inline void unwind_init_from_regs(struct unwind_state *state, static __always_inline void
struct pt_regs *regs) unwind_init_from_regs(struct unwind_state *state,
struct pt_regs *regs)
{ {
unwind_init_common(state, current); unwind_init_common(state, current);
...@@ -42,7 +43,8 @@ static __always_inline void unwind_init_from_regs(struct unwind_state *state, ...@@ -42,7 +43,8 @@ static __always_inline void unwind_init_from_regs(struct unwind_state *state,
* *
* The function which invokes this must be noinline. * The function which invokes this must be noinline.
*/ */
static __always_inline void unwind_init_from_caller(struct unwind_state *state) static __always_inline void
unwind_init_from_caller(struct unwind_state *state)
{ {
unwind_init_common(state, current); unwind_init_common(state, current);
...@@ -60,8 +62,9 @@ static __always_inline void unwind_init_from_caller(struct unwind_state *state) ...@@ -60,8 +62,9 @@ static __always_inline void unwind_init_from_caller(struct unwind_state *state)
* duration of the unwind, or the unwind will be bogus. It is never valid to * duration of the unwind, or the unwind will be bogus. It is never valid to
* call this for the current task. * call this for the current task.
*/ */
static __always_inline void unwind_init_from_task(struct unwind_state *state, static __always_inline void
struct task_struct *task) unwind_init_from_task(struct unwind_state *state,
struct task_struct *task)
{ {
unwind_init_common(state, task); unwind_init_common(state, task);
...@@ -69,6 +72,32 @@ static __always_inline void unwind_init_from_task(struct unwind_state *state, ...@@ -69,6 +72,32 @@ static __always_inline void unwind_init_from_task(struct unwind_state *state,
state->pc = thread_saved_pc(task); state->pc = thread_saved_pc(task);
} }
static __always_inline int
unwind_recover_return_address(struct unwind_state *state)
{
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
if (state->task->ret_stack &&
(state->pc == (unsigned long)return_to_handler)) {
unsigned long orig_pc;
orig_pc = ftrace_graph_ret_addr(state->task, NULL, state->pc,
(void *)state->fp);
if (WARN_ON_ONCE(state->pc == orig_pc))
return -EINVAL;
state->pc = orig_pc;
}
#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
#ifdef CONFIG_KRETPROBES
if (is_kretprobe_trampoline(state->pc)) {
state->pc = kretprobe_find_ret_addr(state->task,
(void *)state->fp,
&state->kr_cur);
}
#endif /* CONFIG_KRETPROBES */
return 0;
}
/* /*
* Unwind from one frame record (A) to the next frame record (B). * Unwind from one frame record (A) to the next frame record (B).
* *
...@@ -76,7 +105,8 @@ static __always_inline void unwind_init_from_task(struct unwind_state *state, ...@@ -76,7 +105,8 @@ static __always_inline void unwind_init_from_task(struct unwind_state *state,
* records (e.g. a cycle), determined based on the location and fp value of A * records (e.g. a cycle), determined based on the location and fp value of A
* and the location (but not the fp value) of B. * and the location (but not the fp value) of B.
*/ */
static int notrace unwind_next(struct unwind_state *state) static __always_inline int
unwind_next(struct unwind_state *state)
{ {
struct task_struct *tsk = state->task; struct task_struct *tsk = state->task;
unsigned long fp = state->fp; unsigned long fp = state->fp;
...@@ -90,37 +120,18 @@ static int notrace unwind_next(struct unwind_state *state) ...@@ -90,37 +120,18 @@ static int notrace unwind_next(struct unwind_state *state)
if (err) if (err)
return err; return err;
state->pc = ptrauth_strip_insn_pac(state->pc); state->pc = ptrauth_strip_kernel_insn_pac(state->pc);
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
if (tsk->ret_stack &&
(state->pc == (unsigned long)return_to_handler)) {
unsigned long orig_pc;
/*
* This is a case where function graph tracer has
* modified a return address (LR) in a stack frame
* to hook a function return.
* So replace it to an original value.
*/
orig_pc = ftrace_graph_ret_addr(tsk, NULL, state->pc,
(void *)state->fp);
if (WARN_ON_ONCE(state->pc == orig_pc))
return -EINVAL;
state->pc = orig_pc;
}
#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
#ifdef CONFIG_KRETPROBES
if (is_kretprobe_trampoline(state->pc))
state->pc = kretprobe_find_ret_addr(tsk, (void *)state->fp, &state->kr_cur);
#endif
return 0; return unwind_recover_return_address(state);
} }
NOKPROBE_SYMBOL(unwind_next);
static void notrace unwind(struct unwind_state *state, static __always_inline void
stack_trace_consume_fn consume_entry, void *cookie) unwind(struct unwind_state *state, stack_trace_consume_fn consume_entry,
void *cookie)
{ {
if (unwind_recover_return_address(state))
return;
while (1) { while (1) {
int ret; int ret;
...@@ -131,40 +142,6 @@ static void notrace unwind(struct unwind_state *state, ...@@ -131,40 +142,6 @@ static void notrace unwind(struct unwind_state *state,
break; break;
} }
} }
NOKPROBE_SYMBOL(unwind);
static bool dump_backtrace_entry(void *arg, unsigned long where)
{
char *loglvl = arg;
printk("%s %pSb\n", loglvl, (void *)where);
return true;
}
void dump_backtrace(struct pt_regs *regs, struct task_struct *tsk,
const char *loglvl)
{
pr_debug("%s(regs = %p tsk = %p)\n", __func__, regs, tsk);
if (regs && user_mode(regs))
return;
if (!tsk)
tsk = current;
if (!try_get_task_stack(tsk))
return;
printk("%sCall trace:\n", loglvl);
arch_stack_walk(dump_backtrace_entry, (void *)loglvl, tsk, regs);
put_task_stack(tsk);
}
void show_stack(struct task_struct *tsk, unsigned long *sp, const char *loglvl)
{
dump_backtrace(NULL, tsk, loglvl);
barrier();
}
/* /*
* Per-cpu stacks are only accessible when unwinding the current task in a * Per-cpu stacks are only accessible when unwinding the current task in a
...@@ -230,3 +207,36 @@ noinline noinstr void arch_stack_walk(stack_trace_consume_fn consume_entry, ...@@ -230,3 +207,36 @@ noinline noinstr void arch_stack_walk(stack_trace_consume_fn consume_entry,
unwind(&state, consume_entry, cookie); unwind(&state, consume_entry, cookie);
} }
static bool dump_backtrace_entry(void *arg, unsigned long where)
{
char *loglvl = arg;
printk("%s %pSb\n", loglvl, (void *)where);
return true;
}
void dump_backtrace(struct pt_regs *regs, struct task_struct *tsk,
const char *loglvl)
{
pr_debug("%s(regs = %p tsk = %p)\n", __func__, regs, tsk);
if (regs && user_mode(regs))
return;
if (!tsk)
tsk = current;
if (!try_get_task_stack(tsk))
return;
printk("%sCall trace:\n", loglvl);
arch_stack_walk(dump_backtrace_entry, (void *)loglvl, tsk, regs);
put_task_stack(tsk);
}
void show_stack(struct task_struct *tsk, unsigned long *sp, const char *loglvl)
{
dump_backtrace(NULL, tsk, loglvl);
barrier();
}
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment