tracing: Have dynamic events have a ref counter

As dynamic events are not created by modules, if something is attached to
one, calling "try_module_get()" on its "mod" field, is not going to keep
the dynamic event from going away.

Since dynamic events do not need the "mod" pointer of the event structure,
make a union out of it in order to save memory (there's one structure for
each of the thousand+ events in the kernel), and have any event with the
DYNAMIC flag set to use a ref counter instead.

Link: https://lore.kernel.org/linux-trace-devel/20210813004448.51c7de69ce432d338f4d226b@kernel.org/
Link: https://lkml.kernel.org/r/20210817035027.174869074@goodmis.orgSuggested-by: default avatarMasami Hiramatsu <mhiramat@kernel.org>
Acked-by: default avatarMasami Hiramatsu <mhiramat@kernel.org>
Signed-off-by: default avatarSteven Rostedt (VMware) <rostedt@goodmis.org>
parent 8b0e6c74
...@@ -350,7 +350,14 @@ struct trace_event_call { ...@@ -350,7 +350,14 @@ struct trace_event_call {
struct trace_event event; struct trace_event event;
char *print_fmt; char *print_fmt;
struct event_filter *filter; struct event_filter *filter;
void *mod; /*
* Static events can disappear with modules,
* where as dynamic ones need their own ref count.
*/
union {
void *module;
atomic_t refcnt;
};
void *data; void *data;
/* See the TRACE_EVENT_FL_* flags above */ /* See the TRACE_EVENT_FL_* flags above */
...@@ -366,6 +373,42 @@ struct trace_event_call { ...@@ -366,6 +373,42 @@ struct trace_event_call {
#endif #endif
}; };
#ifdef CONFIG_DYNAMIC_EVENTS
bool trace_event_dyn_try_get_ref(struct trace_event_call *call);
void trace_event_dyn_put_ref(struct trace_event_call *call);
bool trace_event_dyn_busy(struct trace_event_call *call);
#else
static inline bool trace_event_dyn_try_get_ref(struct trace_event_call *call)
{
/* Without DYNAMIC_EVENTS configured, nothing should be calling this */
return false;
}
static inline void trace_event_dyn_put_ref(struct trace_event_call *call)
{
}
static inline bool trace_event_dyn_busy(struct trace_event_call *call)
{
/* Nothing should call this without DYNAIMIC_EVENTS configured. */
return true;
}
#endif
static inline bool trace_event_try_get_ref(struct trace_event_call *call)
{
if (call->flags & TRACE_EVENT_FL_DYNAMIC)
return trace_event_dyn_try_get_ref(call);
else
return try_module_get(call->module);
}
static inline void trace_event_put_ref(struct trace_event_call *call)
{
if (call->flags & TRACE_EVENT_FL_DYNAMIC)
trace_event_dyn_put_ref(call);
else
module_put(call->module);
}
#ifdef CONFIG_PERF_EVENTS #ifdef CONFIG_PERF_EVENTS
static inline bool bpf_prog_array_valid(struct trace_event_call *call) static inline bool bpf_prog_array_valid(struct trace_event_call *call)
{ {
......
...@@ -3697,11 +3697,11 @@ static bool trace_safe_str(struct trace_iterator *iter, const char *str) ...@@ -3697,11 +3697,11 @@ static bool trace_safe_str(struct trace_iterator *iter, const char *str)
return false; return false;
event = container_of(trace_event, struct trace_event_call, event); event = container_of(trace_event, struct trace_event_call, event);
if (!event->mod) if ((event->flags & TRACE_EVENT_FL_DYNAMIC) || !event->module)
return false; return false;
/* Would rather have rodata, but this will suffice */ /* Would rather have rodata, but this will suffice */
if (within_module_core(addr, event->mod)) if (within_module_core(addr, event->module))
return true; return true;
return false; return false;
......
...@@ -13,11 +13,49 @@ ...@@ -13,11 +13,49 @@
#include <linux/tracefs.h> #include <linux/tracefs.h>
#include "trace.h" #include "trace.h"
#include "trace_output.h" /* for trace_event_sem */
#include "trace_dynevent.h" #include "trace_dynevent.h"
static DEFINE_MUTEX(dyn_event_ops_mutex); static DEFINE_MUTEX(dyn_event_ops_mutex);
static LIST_HEAD(dyn_event_ops_list); static LIST_HEAD(dyn_event_ops_list);
bool trace_event_dyn_try_get_ref(struct trace_event_call *dyn_call)
{
struct trace_event_call *call;
bool ret = false;
if (WARN_ON_ONCE(!(dyn_call->flags & TRACE_EVENT_FL_DYNAMIC)))
return false;
down_read(&trace_event_sem);
list_for_each_entry(call, &ftrace_events, list) {
if (call == dyn_call) {
atomic_inc(&dyn_call->refcnt);
ret = true;
}
}
up_read(&trace_event_sem);
return ret;
}
void trace_event_dyn_put_ref(struct trace_event_call *call)
{
if (WARN_ON_ONCE(!(call->flags & TRACE_EVENT_FL_DYNAMIC)))
return;
if (WARN_ON_ONCE(atomic_read(&call->refcnt) <= 0)) {
atomic_set(&call->refcnt, 0);
return;
}
atomic_dec(&call->refcnt);
}
bool trace_event_dyn_busy(struct trace_event_call *call)
{
return atomic_read(&call->refcnt) != 0;
}
int dyn_event_register(struct dyn_event_operations *ops) int dyn_event_register(struct dyn_event_operations *ops)
{ {
if (!ops || !ops->create || !ops->show || !ops->is_busy || if (!ops || !ops->create || !ops->show || !ops->is_busy ||
......
...@@ -177,7 +177,7 @@ static void perf_trace_event_unreg(struct perf_event *p_event) ...@@ -177,7 +177,7 @@ static void perf_trace_event_unreg(struct perf_event *p_event)
} }
} }
out: out:
module_put(tp_event->mod); trace_event_put_ref(tp_event);
} }
static int perf_trace_event_open(struct perf_event *p_event) static int perf_trace_event_open(struct perf_event *p_event)
...@@ -224,10 +224,10 @@ int perf_trace_init(struct perf_event *p_event) ...@@ -224,10 +224,10 @@ int perf_trace_init(struct perf_event *p_event)
list_for_each_entry(tp_event, &ftrace_events, list) { list_for_each_entry(tp_event, &ftrace_events, list) {
if (tp_event->event.type == event_id && if (tp_event->event.type == event_id &&
tp_event->class && tp_event->class->reg && tp_event->class && tp_event->class->reg &&
try_module_get(tp_event->mod)) { trace_event_try_get_ref(tp_event)) {
ret = perf_trace_event_init(tp_event, p_event); ret = perf_trace_event_init(tp_event, p_event);
if (ret) if (ret)
module_put(tp_event->mod); trace_event_put_ref(tp_event);
break; break;
} }
} }
......
...@@ -2525,7 +2525,10 @@ __register_event(struct trace_event_call *call, struct module *mod) ...@@ -2525,7 +2525,10 @@ __register_event(struct trace_event_call *call, struct module *mod)
return ret; return ret;
list_add(&call->list, &ftrace_events); list_add(&call->list, &ftrace_events);
call->mod = mod; if (call->flags & TRACE_EVENT_FL_DYNAMIC)
atomic_set(&call->refcnt, 0);
else
call->module = mod;
return 0; return 0;
} }
...@@ -2839,7 +2842,9 @@ static void trace_module_remove_events(struct module *mod) ...@@ -2839,7 +2842,9 @@ static void trace_module_remove_events(struct module *mod)
down_write(&trace_event_sem); down_write(&trace_event_sem);
list_for_each_entry_safe(call, p, &ftrace_events, list) { list_for_each_entry_safe(call, p, &ftrace_events, list) {
if (call->mod == mod) if ((call->flags & TRACE_EVENT_FL_DYNAMIC) || !call->module)
continue;
if (call->module == mod)
__trace_remove_event_call(call); __trace_remove_event_call(call);
} }
up_write(&trace_event_sem); up_write(&trace_event_sem);
...@@ -2982,7 +2987,7 @@ struct trace_event_file *trace_get_event_file(const char *instance, ...@@ -2982,7 +2987,7 @@ struct trace_event_file *trace_get_event_file(const char *instance,
} }
/* Don't let event modules unload while in use */ /* Don't let event modules unload while in use */
ret = try_module_get(file->event_call->mod); ret = trace_event_try_get_ref(file->event_call);
if (!ret) { if (!ret) {
trace_array_put(tr); trace_array_put(tr);
ret = -EBUSY; ret = -EBUSY;
...@@ -3012,7 +3017,7 @@ EXPORT_SYMBOL_GPL(trace_get_event_file); ...@@ -3012,7 +3017,7 @@ EXPORT_SYMBOL_GPL(trace_get_event_file);
void trace_put_event_file(struct trace_event_file *file) void trace_put_event_file(struct trace_event_file *file)
{ {
mutex_lock(&event_mutex); mutex_lock(&event_mutex);
module_put(file->event_call->mod); trace_event_put_ref(file->event_call);
mutex_unlock(&event_mutex); mutex_unlock(&event_mutex);
trace_array_put(file->tr); trace_array_put(file->tr);
...@@ -3147,7 +3152,7 @@ static int free_probe_data(void *data) ...@@ -3147,7 +3152,7 @@ static int free_probe_data(void *data)
if (!edata->ref) { if (!edata->ref) {
/* Remove the SOFT_MODE flag */ /* Remove the SOFT_MODE flag */
__ftrace_event_enable_disable(edata->file, 0, 1); __ftrace_event_enable_disable(edata->file, 0, 1);
module_put(edata->file->event_call->mod); trace_event_put_ref(edata->file->event_call);
kfree(edata); kfree(edata);
} }
return 0; return 0;
...@@ -3280,7 +3285,7 @@ event_enable_func(struct trace_array *tr, struct ftrace_hash *hash, ...@@ -3280,7 +3285,7 @@ event_enable_func(struct trace_array *tr, struct ftrace_hash *hash,
out_reg: out_reg:
/* Don't let event modules unload while probe registered */ /* Don't let event modules unload while probe registered */
ret = try_module_get(file->event_call->mod); ret = trace_event_try_get_ref(file->event_call);
if (!ret) { if (!ret) {
ret = -EBUSY; ret = -EBUSY;
goto out_free; goto out_free;
...@@ -3310,7 +3315,7 @@ event_enable_func(struct trace_array *tr, struct ftrace_hash *hash, ...@@ -3310,7 +3315,7 @@ event_enable_func(struct trace_array *tr, struct ftrace_hash *hash,
out_disable: out_disable:
__ftrace_event_enable_disable(file, 0, 1); __ftrace_event_enable_disable(file, 0, 1);
out_put: out_put:
module_put(file->event_call->mod); trace_event_put_ref(file->event_call);
out_free: out_free:
kfree(data); kfree(data);
goto out; goto out;
...@@ -3376,7 +3381,8 @@ void __trace_early_add_events(struct trace_array *tr) ...@@ -3376,7 +3381,8 @@ void __trace_early_add_events(struct trace_array *tr)
list_for_each_entry(call, &ftrace_events, list) { list_for_each_entry(call, &ftrace_events, list) {
/* Early boot up should not have any modules loaded */ /* Early boot up should not have any modules loaded */
if (WARN_ON_ONCE(call->mod)) if (!(call->flags & TRACE_EVENT_FL_DYNAMIC) &&
WARN_ON_ONCE(call->module))
continue; continue;
ret = __trace_early_add_new_event(call, tr); ret = __trace_early_add_new_event(call, tr);
......
...@@ -1369,14 +1369,16 @@ static int destroy_synth_event(struct synth_event *se) ...@@ -1369,14 +1369,16 @@ static int destroy_synth_event(struct synth_event *se)
int ret; int ret;
if (se->ref) if (se->ref)
ret = -EBUSY; return -EBUSY;
else {
if (trace_event_dyn_busy(&se->call))
return -EBUSY;
ret = unregister_synth_event(se); ret = unregister_synth_event(se);
if (!ret) { if (!ret) {
dyn_event_remove(&se->devent); dyn_event_remove(&se->devent);
free_synth_event(se); free_synth_event(se);
} }
}
return ret; return ret;
} }
...@@ -2102,6 +2104,9 @@ static int synth_event_release(struct dyn_event *ev) ...@@ -2102,6 +2104,9 @@ static int synth_event_release(struct dyn_event *ev)
if (event->ref) if (event->ref)
return -EBUSY; return -EBUSY;
if (trace_event_dyn_busy(&event->call))
return -EBUSY;
ret = unregister_synth_event(event); ret = unregister_synth_event(event);
if (ret) if (ret)
return ret; return ret;
......
...@@ -1334,7 +1334,7 @@ void event_enable_trigger_free(struct event_trigger_ops *ops, ...@@ -1334,7 +1334,7 @@ void event_enable_trigger_free(struct event_trigger_ops *ops,
if (!data->ref) { if (!data->ref) {
/* Remove the SOFT_MODE flag */ /* Remove the SOFT_MODE flag */
trace_event_enable_disable(enable_data->file, 0, 1); trace_event_enable_disable(enable_data->file, 0, 1);
module_put(enable_data->file->event_call->mod); trace_event_put_ref(enable_data->file->event_call);
trigger_data_free(data); trigger_data_free(data);
kfree(enable_data); kfree(enable_data);
} }
...@@ -1481,7 +1481,7 @@ int event_enable_trigger_func(struct event_command *cmd_ops, ...@@ -1481,7 +1481,7 @@ int event_enable_trigger_func(struct event_command *cmd_ops,
out_reg: out_reg:
/* Don't let event modules unload while probe registered */ /* Don't let event modules unload while probe registered */
ret = try_module_get(event_enable_file->event_call->mod); ret = trace_event_try_get_ref(event_enable_file->event_call);
if (!ret) { if (!ret) {
ret = -EBUSY; ret = -EBUSY;
goto out_free; goto out_free;
...@@ -1510,7 +1510,7 @@ int event_enable_trigger_func(struct event_command *cmd_ops, ...@@ -1510,7 +1510,7 @@ int event_enable_trigger_func(struct event_command *cmd_ops,
out_disable: out_disable:
trace_event_enable_disable(event_enable_file, 0, 1); trace_event_enable_disable(event_enable_file, 0, 1);
out_put: out_put:
module_put(event_enable_file->event_call->mod); trace_event_put_ref(event_enable_file->event_call);
out_free: out_free:
if (cmd_ops->set_filter) if (cmd_ops->set_filter)
cmd_ops->set_filter(NULL, trigger_data, NULL); cmd_ops->set_filter(NULL, trigger_data, NULL);
......
...@@ -543,6 +543,10 @@ static int unregister_trace_kprobe(struct trace_kprobe *tk) ...@@ -543,6 +543,10 @@ static int unregister_trace_kprobe(struct trace_kprobe *tk)
if (trace_probe_is_enabled(&tk->tp)) if (trace_probe_is_enabled(&tk->tp))
return -EBUSY; return -EBUSY;
/* If there's a reference to the dynamic event */
if (trace_event_dyn_busy(trace_probe_event_call(&tk->tp)))
return -EBUSY;
/* Will fail if probe is being used by ftrace or perf */ /* Will fail if probe is being used by ftrace or perf */
if (unregister_kprobe_event(tk)) if (unregister_kprobe_event(tk))
return -EBUSY; return -EBUSY;
......
...@@ -393,6 +393,10 @@ static int unregister_trace_uprobe(struct trace_uprobe *tu) ...@@ -393,6 +393,10 @@ static int unregister_trace_uprobe(struct trace_uprobe *tu)
if (trace_probe_has_sibling(&tu->tp)) if (trace_probe_has_sibling(&tu->tp))
goto unreg; goto unreg;
/* If there's a reference to the dynamic event */
if (trace_event_dyn_busy(trace_probe_event_call(&tu->tp)))
return -EBUSY;
ret = unregister_uprobe_event(tu); ret = unregister_uprobe_event(tu);
if (ret) if (ret)
return ret; return ret;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment