Commit f1c6ece2 authored by Andrea Righi's avatar Andrea Righi Committed by Ingo Molnar

kprobes: Fix potential deadlock in kprobe_optimizer()

lockdep reports the following deadlock scenario:

 WARNING: possible circular locking dependency detected

 kworker/1:1/48 is trying to acquire lock:
 000000008d7a62b2 (text_mutex){+.+.}, at: kprobe_optimizer+0x163/0x290

 but task is already holding lock:
 00000000850b5e2d (module_mutex){+.+.}, at: kprobe_optimizer+0x31/0x290

 which lock already depends on the new lock.

 the existing dependency chain (in reverse order) is:

 -> #1 (module_mutex){+.+.}:
        __mutex_lock+0xac/0x9f0
        mutex_lock_nested+0x1b/0x20
        set_all_modules_text_rw+0x22/0x90
        ftrace_arch_code_modify_prepare+0x1c/0x20
        ftrace_run_update_code+0xe/0x30
        ftrace_startup_enable+0x2e/0x50
        ftrace_startup+0xa7/0x100
        register_ftrace_function+0x27/0x70
        arm_kprobe+0xb3/0x130
        enable_kprobe+0x83/0xa0
        enable_trace_kprobe.part.0+0x2e/0x80
        kprobe_register+0x6f/0xc0
        perf_trace_event_init+0x16b/0x270
        perf_kprobe_init+0xa7/0xe0
        perf_kprobe_event_init+0x3e/0x70
        perf_try_init_event+0x4a/0x140
        perf_event_alloc+0x93a/0xde0
        __do_sys_perf_event_open+0x19f/0xf30
        __x64_sys_perf_event_open+0x20/0x30
        do_syscall_64+0x65/0x1d0
        entry_SYSCALL_64_after_hwframe+0x49/0xbe

 -> #0 (text_mutex){+.+.}:
        __lock_acquire+0xfcb/0x1b60
        lock_acquire+0xca/0x1d0
        __mutex_lock+0xac/0x9f0
        mutex_lock_nested+0x1b/0x20
        kprobe_optimizer+0x163/0x290
        process_one_work+0x22b/0x560
        worker_thread+0x50/0x3c0
        kthread+0x112/0x150
        ret_from_fork+0x3a/0x50

 other info that might help us debug this:

  Possible unsafe locking scenario:

        CPU0                    CPU1
        ----                    ----
   lock(module_mutex);
                                lock(text_mutex);
                                lock(module_mutex);
   lock(text_mutex);

  *** DEADLOCK ***

As a reproducer I've been using bcc's funccount.py
(https://github.com/iovisor/bcc/blob/master/tools/funccount.py),
for example:

 # ./funccount.py '*interrupt*'

That immediately triggers the lockdep splat.

Fix by acquiring text_mutex before module_mutex in kprobe_optimizer().
Signed-off-by: default avatarAndrea Righi <andrea.righi@canonical.com>
Acked-by: default avatarMasami Hiramatsu <mhiramat@kernel.org>
Cc: Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>
Cc: David S. Miller <davem@davemloft.net>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Naveen N. Rao <naveen.n.rao@linux.ibm.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Fixes: d5b844a2 ("ftrace/x86: Remove possible deadlock between register_kprobe() and ftrace_run_update_code()")
Link: http://lkml.kernel.org/r/20190812184302.GA7010@xps-13Signed-off-by: default avatarIngo Molnar <mingo@kernel.org>
parent 77d76032
...@@ -470,6 +470,7 @@ static DECLARE_DELAYED_WORK(optimizing_work, kprobe_optimizer); ...@@ -470,6 +470,7 @@ static DECLARE_DELAYED_WORK(optimizing_work, kprobe_optimizer);
*/ */
static void do_optimize_kprobes(void) static void do_optimize_kprobes(void)
{ {
lockdep_assert_held(&text_mutex);
/* /*
* The optimization/unoptimization refers online_cpus via * The optimization/unoptimization refers online_cpus via
* stop_machine() and cpu-hotplug modifies online_cpus. * stop_machine() and cpu-hotplug modifies online_cpus.
...@@ -487,9 +488,7 @@ static void do_optimize_kprobes(void) ...@@ -487,9 +488,7 @@ static void do_optimize_kprobes(void)
list_empty(&optimizing_list)) list_empty(&optimizing_list))
return; return;
mutex_lock(&text_mutex);
arch_optimize_kprobes(&optimizing_list); arch_optimize_kprobes(&optimizing_list);
mutex_unlock(&text_mutex);
} }
/* /*
...@@ -500,6 +499,7 @@ static void do_unoptimize_kprobes(void) ...@@ -500,6 +499,7 @@ static void do_unoptimize_kprobes(void)
{ {
struct optimized_kprobe *op, *tmp; struct optimized_kprobe *op, *tmp;
lockdep_assert_held(&text_mutex);
/* See comment in do_optimize_kprobes() */ /* See comment in do_optimize_kprobes() */
lockdep_assert_cpus_held(); lockdep_assert_cpus_held();
...@@ -507,7 +507,6 @@ static void do_unoptimize_kprobes(void) ...@@ -507,7 +507,6 @@ static void do_unoptimize_kprobes(void)
if (list_empty(&unoptimizing_list)) if (list_empty(&unoptimizing_list))
return; return;
mutex_lock(&text_mutex);
arch_unoptimize_kprobes(&unoptimizing_list, &freeing_list); arch_unoptimize_kprobes(&unoptimizing_list, &freeing_list);
/* Loop free_list for disarming */ /* Loop free_list for disarming */
list_for_each_entry_safe(op, tmp, &freeing_list, list) { list_for_each_entry_safe(op, tmp, &freeing_list, list) {
...@@ -524,7 +523,6 @@ static void do_unoptimize_kprobes(void) ...@@ -524,7 +523,6 @@ static void do_unoptimize_kprobes(void)
} else } else
list_del_init(&op->list); list_del_init(&op->list);
} }
mutex_unlock(&text_mutex);
} }
/* Reclaim all kprobes on the free_list */ /* Reclaim all kprobes on the free_list */
...@@ -556,6 +554,7 @@ static void kprobe_optimizer(struct work_struct *work) ...@@ -556,6 +554,7 @@ static void kprobe_optimizer(struct work_struct *work)
{ {
mutex_lock(&kprobe_mutex); mutex_lock(&kprobe_mutex);
cpus_read_lock(); cpus_read_lock();
mutex_lock(&text_mutex);
/* Lock modules while optimizing kprobes */ /* Lock modules while optimizing kprobes */
mutex_lock(&module_mutex); mutex_lock(&module_mutex);
...@@ -583,6 +582,7 @@ static void kprobe_optimizer(struct work_struct *work) ...@@ -583,6 +582,7 @@ static void kprobe_optimizer(struct work_struct *work)
do_free_cleaned_kprobes(); do_free_cleaned_kprobes();
mutex_unlock(&module_mutex); mutex_unlock(&module_mutex);
mutex_unlock(&text_mutex);
cpus_read_unlock(); cpus_read_unlock();
mutex_unlock(&kprobe_mutex); mutex_unlock(&kprobe_mutex);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment