Commit b0a81b94 authored by Thomas Gleixner's avatar Thomas Gleixner Committed by Alexei Starovoitov

bpf/trace: Remove redundant preempt_disable from trace_call_bpf()

Similar to __bpf_trace_run this is redundant because __bpf_trace_run() is
invoked from a trace point via __DO_TRACE() which already disables
preemption _before_ invoking any of the functions which are attached to a
trace point.

Remove it and add a cant_sleep() check.
Signed-off-by: default avatarThomas Gleixner <tglx@linutronix.de>
Signed-off-by: default avatarAlexei Starovoitov <ast@kernel.org>
Link: https://lore.kernel.org/bpf/20200224145643.059995527@linutronix.de
parent 70ed0706
...@@ -83,7 +83,7 @@ unsigned int trace_call_bpf(struct trace_event_call *call, void *ctx) ...@@ -83,7 +83,7 @@ unsigned int trace_call_bpf(struct trace_event_call *call, void *ctx)
if (in_nmi()) /* not supported yet */ if (in_nmi()) /* not supported yet */
return 1; return 1;
preempt_disable(); cant_sleep();
if (unlikely(__this_cpu_inc_return(bpf_prog_active) != 1)) { if (unlikely(__this_cpu_inc_return(bpf_prog_active) != 1)) {
/* /*
...@@ -115,7 +115,6 @@ unsigned int trace_call_bpf(struct trace_event_call *call, void *ctx) ...@@ -115,7 +115,6 @@ unsigned int trace_call_bpf(struct trace_event_call *call, void *ctx)
out: out:
__this_cpu_dec(bpf_prog_active); __this_cpu_dec(bpf_prog_active);
preempt_enable();
return ret; return ret;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment