Commit 8391aa4b authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'trace-v6.1-rc3' of git://git.kernel.org/pub/scm/linux/kernel/git/trace/linux-trace

Pull `lTracing fixes for 6.1-rc3:

 - Fixed NULL pointer dereference in the ring buffer wait-waiters code
   for machines that have less CPUs than what nr_cpu_ids returns.

   The buffer array is of size nr_cpu_ids, but only the online CPUs get
   initialized.

 - Fixed use after free call in ftrace_shutdown.

 - Fix accounting of if a kprobe is enabled

 - Fix NULL pointer dereference on error path of fprobe rethook_alloc().

 - Fix unregistering of fprobe_kprobe_handler

 - Fix memory leak in kprobe test module

* tag 'trace-v6.1-rc3' of git://git.kernel.org/pub/scm/linux/kernel/git/trace/linux-trace:
  tracing: kprobe: Fix memory leak in test_gen_kprobe/kretprobe_cmd()
  tracing/fprobe: Fix to check whether fprobe is registered correctly
  fprobe: Check rethook_alloc() return in rethook initialization
  kprobe: reverse kp->flags when arm_kprobe failed
  ftrace: Fix use-after-free for dynamic ftrace_ops
  ring-buffer: Check for NULL cpu_buffer in ring_buffer_wake_waiters()
parents 2f5065a0 66f0919c
...@@ -2429,8 +2429,11 @@ int enable_kprobe(struct kprobe *kp) ...@@ -2429,8 +2429,11 @@ int enable_kprobe(struct kprobe *kp)
if (!kprobes_all_disarmed && kprobe_disabled(p)) { if (!kprobes_all_disarmed && kprobe_disabled(p)) {
p->flags &= ~KPROBE_FLAG_DISABLED; p->flags &= ~KPROBE_FLAG_DISABLED;
ret = arm_kprobe(p); ret = arm_kprobe(p);
if (ret) if (ret) {
p->flags |= KPROBE_FLAG_DISABLED; p->flags |= KPROBE_FLAG_DISABLED;
if (p != kp)
kp->flags |= KPROBE_FLAG_DISABLED;
}
} }
out: out:
mutex_unlock(&kprobe_mutex); mutex_unlock(&kprobe_mutex);
......
...@@ -141,6 +141,8 @@ static int fprobe_init_rethook(struct fprobe *fp, int num) ...@@ -141,6 +141,8 @@ static int fprobe_init_rethook(struct fprobe *fp, int num)
return -E2BIG; return -E2BIG;
fp->rethook = rethook_alloc((void *)fp, fprobe_exit_handler); fp->rethook = rethook_alloc((void *)fp, fprobe_exit_handler);
if (!fp->rethook)
return -ENOMEM;
for (i = 0; i < size; i++) { for (i = 0; i < size; i++) {
struct fprobe_rethook_node *node; struct fprobe_rethook_node *node;
...@@ -301,7 +303,8 @@ int unregister_fprobe(struct fprobe *fp) ...@@ -301,7 +303,8 @@ int unregister_fprobe(struct fprobe *fp)
{ {
int ret; int ret;
if (!fp || fp->ops.func != fprobe_handler) if (!fp || (fp->ops.saved_func != fprobe_handler &&
fp->ops.saved_func != fprobe_kprobe_handler))
return -EINVAL; return -EINVAL;
/* /*
......
...@@ -3028,18 +3028,8 @@ int ftrace_shutdown(struct ftrace_ops *ops, int command) ...@@ -3028,18 +3028,8 @@ int ftrace_shutdown(struct ftrace_ops *ops, int command)
command |= FTRACE_UPDATE_TRACE_FUNC; command |= FTRACE_UPDATE_TRACE_FUNC;
} }
if (!command || !ftrace_enabled) { if (!command || !ftrace_enabled)
/* goto out;
* If these are dynamic or per_cpu ops, they still
* need their data freed. Since, function tracing is
* not currently active, we can just free them
* without synchronizing all CPUs.
*/
if (ops->flags & FTRACE_OPS_FL_DYNAMIC)
goto free_ops;
return 0;
}
/* /*
* If the ops uses a trampoline, then it needs to be * If the ops uses a trampoline, then it needs to be
...@@ -3076,6 +3066,7 @@ int ftrace_shutdown(struct ftrace_ops *ops, int command) ...@@ -3076,6 +3066,7 @@ int ftrace_shutdown(struct ftrace_ops *ops, int command)
removed_ops = NULL; removed_ops = NULL;
ops->flags &= ~FTRACE_OPS_FL_REMOVING; ops->flags &= ~FTRACE_OPS_FL_REMOVING;
out:
/* /*
* Dynamic ops may be freed, we must make sure that all * Dynamic ops may be freed, we must make sure that all
* callers are done before leaving this function. * callers are done before leaving this function.
...@@ -3103,7 +3094,6 @@ int ftrace_shutdown(struct ftrace_ops *ops, int command) ...@@ -3103,7 +3094,6 @@ int ftrace_shutdown(struct ftrace_ops *ops, int command)
if (IS_ENABLED(CONFIG_PREEMPTION)) if (IS_ENABLED(CONFIG_PREEMPTION))
synchronize_rcu_tasks(); synchronize_rcu_tasks();
free_ops:
ftrace_trampoline_free(ops); ftrace_trampoline_free(ops);
} }
......
...@@ -100,20 +100,20 @@ static int __init test_gen_kprobe_cmd(void) ...@@ -100,20 +100,20 @@ static int __init test_gen_kprobe_cmd(void)
KPROBE_GEN_TEST_FUNC, KPROBE_GEN_TEST_FUNC,
KPROBE_GEN_TEST_ARG0, KPROBE_GEN_TEST_ARG1); KPROBE_GEN_TEST_ARG0, KPROBE_GEN_TEST_ARG1);
if (ret) if (ret)
goto free; goto out;
/* Use kprobe_event_add_fields to add the rest of the fields */ /* Use kprobe_event_add_fields to add the rest of the fields */
ret = kprobe_event_add_fields(&cmd, KPROBE_GEN_TEST_ARG2, KPROBE_GEN_TEST_ARG3); ret = kprobe_event_add_fields(&cmd, KPROBE_GEN_TEST_ARG2, KPROBE_GEN_TEST_ARG3);
if (ret) if (ret)
goto free; goto out;
/* /*
* This actually creates the event. * This actually creates the event.
*/ */
ret = kprobe_event_gen_cmd_end(&cmd); ret = kprobe_event_gen_cmd_end(&cmd);
if (ret) if (ret)
goto free; goto out;
/* /*
* Now get the gen_kprobe_test event file. We need to prevent * Now get the gen_kprobe_test event file. We need to prevent
...@@ -136,13 +136,11 @@ static int __init test_gen_kprobe_cmd(void) ...@@ -136,13 +136,11 @@ static int __init test_gen_kprobe_cmd(void)
goto delete; goto delete;
} }
out: out:
kfree(buf);
return ret; return ret;
delete: delete:
/* We got an error after creating the event, delete it */ /* We got an error after creating the event, delete it */
ret = kprobe_event_delete("gen_kprobe_test"); ret = kprobe_event_delete("gen_kprobe_test");
free:
kfree(buf);
goto out; goto out;
} }
...@@ -170,14 +168,14 @@ static int __init test_gen_kretprobe_cmd(void) ...@@ -170,14 +168,14 @@ static int __init test_gen_kretprobe_cmd(void)
KPROBE_GEN_TEST_FUNC, KPROBE_GEN_TEST_FUNC,
"$retval"); "$retval");
if (ret) if (ret)
goto free; goto out;
/* /*
* This actually creates the event. * This actually creates the event.
*/ */
ret = kretprobe_event_gen_cmd_end(&cmd); ret = kretprobe_event_gen_cmd_end(&cmd);
if (ret) if (ret)
goto free; goto out;
/* /*
* Now get the gen_kretprobe_test event file. We need to * Now get the gen_kretprobe_test event file. We need to
...@@ -201,13 +199,11 @@ static int __init test_gen_kretprobe_cmd(void) ...@@ -201,13 +199,11 @@ static int __init test_gen_kretprobe_cmd(void)
goto delete; goto delete;
} }
out: out:
kfree(buf);
return ret; return ret;
delete: delete:
/* We got an error after creating the event, delete it */ /* We got an error after creating the event, delete it */
ret = kprobe_event_delete("gen_kretprobe_test"); ret = kprobe_event_delete("gen_kretprobe_test");
free:
kfree(buf);
goto out; goto out;
} }
......
...@@ -937,6 +937,9 @@ void ring_buffer_wake_waiters(struct trace_buffer *buffer, int cpu) ...@@ -937,6 +937,9 @@ void ring_buffer_wake_waiters(struct trace_buffer *buffer, int cpu)
struct ring_buffer_per_cpu *cpu_buffer; struct ring_buffer_per_cpu *cpu_buffer;
struct rb_irq_work *rbwork; struct rb_irq_work *rbwork;
if (!buffer)
return;
if (cpu == RING_BUFFER_ALL_CPUS) { if (cpu == RING_BUFFER_ALL_CPUS) {
/* Wake up individual ones too. One level recursion */ /* Wake up individual ones too. One level recursion */
...@@ -945,7 +948,15 @@ void ring_buffer_wake_waiters(struct trace_buffer *buffer, int cpu) ...@@ -945,7 +948,15 @@ void ring_buffer_wake_waiters(struct trace_buffer *buffer, int cpu)
rbwork = &buffer->irq_work; rbwork = &buffer->irq_work;
} else { } else {
if (WARN_ON_ONCE(!buffer->buffers))
return;
if (WARN_ON_ONCE(cpu >= nr_cpu_ids))
return;
cpu_buffer = buffer->buffers[cpu]; cpu_buffer = buffer->buffers[cpu];
/* The CPU buffer may not have been initialized yet */
if (!cpu_buffer)
return;
rbwork = &cpu_buffer->irq_work; rbwork = &cpu_buffer->irq_work;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment