Commit 2a8db5ec authored by Conor Dooley's avatar Conor Dooley Committed by Palmer Dabbelt

RISC-V: Don't check text_mutex during stop_machine

We're currently using stop_machine() to update ftrace & kprobes, which
means that the thread that takes text_mutex during may not be the same
as the thread that eventually patches the code.  This isn't actually a
race because the lock is still held (preventing any other concurrent
accesses) and there is only one thread running during stop_machine(),
but it does trigger a lockdep failure.

This patch just elides the lockdep check during stop_machine.

Fixes: c15ac4fd ("riscv/ftrace: Add dynamic function tracer support")
Suggested-by: default avatarSteven Rostedt <rostedt@goodmis.org>
Reported-by: default avatarChangbin Du <changbin.du@gmail.com>
Signed-off-by: default avatarPalmer Dabbelt <palmerdabbelt@google.com>
Signed-off-by: default avatarConor Dooley <conor.dooley@microchip.com>
Link: https://lore.kernel.org/r/20230303143754.4005217-1-conor.dooley@microchip.comSigned-off-by: default avatarPalmer Dabbelt <palmer@rivosinc.com>
parent 76950340
...@@ -109,6 +109,6 @@ int ftrace_init_nop(struct module *mod, struct dyn_ftrace *rec); ...@@ -109,6 +109,6 @@ int ftrace_init_nop(struct module *mod, struct dyn_ftrace *rec);
#define ftrace_init_nop ftrace_init_nop #define ftrace_init_nop ftrace_init_nop
#endif #endif
#endif #endif /* CONFIG_DYNAMIC_FTRACE */
#endif /* _ASM_RISCV_FTRACE_H */ #endif /* _ASM_RISCV_FTRACE_H */
...@@ -9,4 +9,6 @@ ...@@ -9,4 +9,6 @@
int patch_text_nosync(void *addr, const void *insns, size_t len); int patch_text_nosync(void *addr, const void *insns, size_t len);
int patch_text(void *addr, u32 *insns, int ninsns); int patch_text(void *addr, u32 *insns, int ninsns);
extern int riscv_patch_in_stop_machine;
#endif /* _ASM_RISCV_PATCH_H */ #endif /* _ASM_RISCV_PATCH_H */
...@@ -15,10 +15,19 @@ ...@@ -15,10 +15,19 @@
void ftrace_arch_code_modify_prepare(void) __acquires(&text_mutex) void ftrace_arch_code_modify_prepare(void) __acquires(&text_mutex)
{ {
mutex_lock(&text_mutex); mutex_lock(&text_mutex);
/*
* The code sequences we use for ftrace can't be patched while the
* kernel is running, so we need to use stop_machine() to modify them
* for now. This doesn't play nice with text_mutex, we use this flag
* to elide the check.
*/
riscv_patch_in_stop_machine = true;
} }
void ftrace_arch_code_modify_post_process(void) __releases(&text_mutex) void ftrace_arch_code_modify_post_process(void) __releases(&text_mutex)
{ {
riscv_patch_in_stop_machine = false;
mutex_unlock(&text_mutex); mutex_unlock(&text_mutex);
} }
...@@ -107,9 +116,9 @@ int ftrace_init_nop(struct module *mod, struct dyn_ftrace *rec) ...@@ -107,9 +116,9 @@ int ftrace_init_nop(struct module *mod, struct dyn_ftrace *rec)
{ {
int out; int out;
ftrace_arch_code_modify_prepare(); mutex_lock(&text_mutex);
out = ftrace_make_nop(mod, rec, MCOUNT_ADDR); out = ftrace_make_nop(mod, rec, MCOUNT_ADDR);
ftrace_arch_code_modify_post_process(); mutex_unlock(&text_mutex);
return out; return out;
} }
......
...@@ -11,6 +11,7 @@ ...@@ -11,6 +11,7 @@
#include <asm/kprobes.h> #include <asm/kprobes.h>
#include <asm/cacheflush.h> #include <asm/cacheflush.h>
#include <asm/fixmap.h> #include <asm/fixmap.h>
#include <asm/ftrace.h>
#include <asm/patch.h> #include <asm/patch.h>
struct patch_insn { struct patch_insn {
...@@ -20,6 +21,8 @@ struct patch_insn { ...@@ -20,6 +21,8 @@ struct patch_insn {
atomic_t cpu_count; atomic_t cpu_count;
}; };
int riscv_patch_in_stop_machine = false;
#ifdef CONFIG_MMU #ifdef CONFIG_MMU
/* /*
* The fix_to_virt(, idx) needs a const value (not a dynamic variable of * The fix_to_virt(, idx) needs a const value (not a dynamic variable of
...@@ -60,8 +63,15 @@ static int patch_insn_write(void *addr, const void *insn, size_t len) ...@@ -60,8 +63,15 @@ static int patch_insn_write(void *addr, const void *insn, size_t len)
* Before reaching here, it was expected to lock the text_mutex * Before reaching here, it was expected to lock the text_mutex
* already, so we don't need to give another lock here and could * already, so we don't need to give another lock here and could
* ensure that it was safe between each cores. * ensure that it was safe between each cores.
*
* We're currently using stop_machine() for ftrace & kprobes, and while
* that ensures text_mutex is held before installing the mappings it
* does not ensure text_mutex is held by the calling thread. That's
* safe but triggers a lockdep failure, so just elide it for that
* specific case.
*/ */
lockdep_assert_held(&text_mutex); if (!riscv_patch_in_stop_machine)
lockdep_assert_held(&text_mutex);
if (across_pages) if (across_pages)
patch_map(addr + len, FIX_TEXT_POKE1); patch_map(addr + len, FIX_TEXT_POKE1);
...@@ -125,6 +135,7 @@ NOKPROBE_SYMBOL(patch_text_cb); ...@@ -125,6 +135,7 @@ NOKPROBE_SYMBOL(patch_text_cb);
int patch_text(void *addr, u32 *insns, int ninsns) int patch_text(void *addr, u32 *insns, int ninsns)
{ {
int ret;
struct patch_insn patch = { struct patch_insn patch = {
.addr = addr, .addr = addr,
.insns = insns, .insns = insns,
...@@ -132,7 +143,18 @@ int patch_text(void *addr, u32 *insns, int ninsns) ...@@ -132,7 +143,18 @@ int patch_text(void *addr, u32 *insns, int ninsns)
.cpu_count = ATOMIC_INIT(0), .cpu_count = ATOMIC_INIT(0),
}; };
return stop_machine_cpuslocked(patch_text_cb, /*
&patch, cpu_online_mask); * kprobes takes text_mutex, before calling patch_text(), but as we call
* calls stop_machine(), the lockdep assertion in patch_insn_write()
* gets confused by the context in which the lock is taken.
* Instead, ensure the lock is held before calling stop_machine(), and
* set riscv_patch_in_stop_machine to skip the check in
* patch_insn_write().
*/
lockdep_assert_held(&text_mutex);
riscv_patch_in_stop_machine = true;
ret = stop_machine_cpuslocked(patch_text_cb, &patch, cpu_online_mask);
riscv_patch_in_stop_machine = false;
return ret;
} }
NOKPROBE_SYMBOL(patch_text); NOKPROBE_SYMBOL(patch_text);
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment