Commit c02c7e65 authored by Masami Hiramatsu's avatar Masami Hiramatsu Committed by Steven Rostedt

tracing/kprobes: Use rcu_dereference_raw for tp->files

Use rcu_dereference_raw() for accessing tp->files. Because the
write-side uses rcu_assign_pointer() for memory barrier,
the read-side also has to use rcu_dereference_raw() with
read memory barrier.

Link: http://lkml.kernel.org/r/20130513115834.6545.17022.stgit@mhiramat-M0-7522

Cc: Srikar Dronamraju <srikar@linux.vnet.ibm.com>
Cc: Oleg Nesterov <oleg@redhat.com>
Cc: Tom Zanussi <tom.zanussi@intel.com>
Cc: Frederic Weisbecker <fweisbec@gmail.com>
Cc: Ingo Molnar <mingo@redhat.com>
Signed-off-by: default avatarMasami Hiramatsu <masami.hiramatsu.pt@hitachi.com>
Signed-off-by: default avatarSteven Rostedt <rostedt@goodmis.org>
parent 60705c89
...@@ -185,9 +185,14 @@ static struct trace_probe *find_trace_probe(const char *event, ...@@ -185,9 +185,14 @@ static struct trace_probe *find_trace_probe(const char *event,
static int trace_probe_nr_files(struct trace_probe *tp) static int trace_probe_nr_files(struct trace_probe *tp)
{ {
struct ftrace_event_file **file = tp->files; struct ftrace_event_file **file;
int ret = 0; int ret = 0;
/*
* Since all tp->files updater is protected by probe_enable_lock,
* we don't need to lock an rcu_read_lock.
*/
file = rcu_dereference_raw(tp->files);
if (file) if (file)
while (*(file++)) while (*(file++))
ret++; ret++;
...@@ -209,9 +214,10 @@ enable_trace_probe(struct trace_probe *tp, struct ftrace_event_file *file) ...@@ -209,9 +214,10 @@ enable_trace_probe(struct trace_probe *tp, struct ftrace_event_file *file)
mutex_lock(&probe_enable_lock); mutex_lock(&probe_enable_lock);
if (file) { if (file) {
struct ftrace_event_file **new, **old = tp->files; struct ftrace_event_file **new, **old;
int n = trace_probe_nr_files(tp); int n = trace_probe_nr_files(tp);
old = rcu_dereference_raw(tp->files);
/* 1 is for new one and 1 is for stopper */ /* 1 is for new one and 1 is for stopper */
new = kzalloc((n + 2) * sizeof(struct ftrace_event_file *), new = kzalloc((n + 2) * sizeof(struct ftrace_event_file *),
GFP_KERNEL); GFP_KERNEL);
...@@ -251,11 +257,17 @@ enable_trace_probe(struct trace_probe *tp, struct ftrace_event_file *file) ...@@ -251,11 +257,17 @@ enable_trace_probe(struct trace_probe *tp, struct ftrace_event_file *file)
static int static int
trace_probe_file_index(struct trace_probe *tp, struct ftrace_event_file *file) trace_probe_file_index(struct trace_probe *tp, struct ftrace_event_file *file)
{ {
struct ftrace_event_file **files;
int i; int i;
if (tp->files) { /*
for (i = 0; tp->files[i]; i++) * Since all tp->files updater is protected by probe_enable_lock,
if (tp->files[i] == file) * we don't need to lock an rcu_read_lock.
*/
files = rcu_dereference_raw(tp->files);
if (files) {
for (i = 0; files[i]; i++)
if (files[i] == file)
return i; return i;
} }
...@@ -274,10 +286,11 @@ disable_trace_probe(struct trace_probe *tp, struct ftrace_event_file *file) ...@@ -274,10 +286,11 @@ disable_trace_probe(struct trace_probe *tp, struct ftrace_event_file *file)
mutex_lock(&probe_enable_lock); mutex_lock(&probe_enable_lock);
if (file) { if (file) {
struct ftrace_event_file **new, **old = tp->files; struct ftrace_event_file **new, **old;
int n = trace_probe_nr_files(tp); int n = trace_probe_nr_files(tp);
int i, j; int i, j;
old = rcu_dereference_raw(tp->files);
if (n == 0 || trace_probe_file_index(tp, file) < 0) { if (n == 0 || trace_probe_file_index(tp, file) < 0) {
ret = -EINVAL; ret = -EINVAL;
goto out_unlock; goto out_unlock;
...@@ -872,9 +885,16 @@ __kprobe_trace_func(struct trace_probe *tp, struct pt_regs *regs, ...@@ -872,9 +885,16 @@ __kprobe_trace_func(struct trace_probe *tp, struct pt_regs *regs,
static __kprobes void static __kprobes void
kprobe_trace_func(struct trace_probe *tp, struct pt_regs *regs) kprobe_trace_func(struct trace_probe *tp, struct pt_regs *regs)
{ {
struct ftrace_event_file **file = tp->files; /*
* Note: preempt is already disabled around the kprobe handler.
* However, we still need an smp_read_barrier_depends() corresponding
* to smp_wmb() in rcu_assign_pointer() to access the pointer.
*/
struct ftrace_event_file **file = rcu_dereference_raw(tp->files);
if (unlikely(!file))
return;
/* Note: preempt is already disabled around the kprobe handler */
while (*file) { while (*file) {
__kprobe_trace_func(tp, regs, *file); __kprobe_trace_func(tp, regs, *file);
file++; file++;
...@@ -925,9 +945,16 @@ static __kprobes void ...@@ -925,9 +945,16 @@ static __kprobes void
kretprobe_trace_func(struct trace_probe *tp, struct kretprobe_instance *ri, kretprobe_trace_func(struct trace_probe *tp, struct kretprobe_instance *ri,
struct pt_regs *regs) struct pt_regs *regs)
{ {
struct ftrace_event_file **file = tp->files; /*
* Note: preempt is already disabled around the kprobe handler.
* However, we still need an smp_read_barrier_depends() corresponding
* to smp_wmb() in rcu_assign_pointer() to access the pointer.
*/
struct ftrace_event_file **file = rcu_dereference_raw(tp->files);
if (unlikely(!file))
return;
/* Note: preempt is already disabled around the kprobe handler */
while (*file) { while (*file) {
__kretprobe_trace_func(tp, ri, regs, *file); __kretprobe_trace_func(tp, ri, regs, *file);
file++; file++;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment