ftrace: Dynamically create the probe ftrace_ops for the trace_array

In order to eventually have each trace_array instance have its own unique
set of function probes (triggers), the trace array needs to hold the ops and
the filters for the probes.

This is the first step to accomplish this. Instead of having the private
data of the probe ops point to the trace_array, create a separate list that
the trace_array holds. There's only one private_data for a probe, we need
one per trace_array. The probe ftrace_ops will be dynamically created for
each instance, instead of being static.
Signed-off-by: default avatarSteven Rostedt (VMware) <rostedt@goodmis.org>
parent b5f081b5
...@@ -1101,6 +1101,14 @@ struct ftrace_func_entry { ...@@ -1101,6 +1101,14 @@ struct ftrace_func_entry {
unsigned long ip; unsigned long ip;
}; };
struct ftrace_func_probe {
struct ftrace_probe_ops *probe_ops;
struct ftrace_ops ops;
struct trace_array *tr;
struct list_head list;
int ref;
};
/* /*
* We make these constant because no one should touch them, * We make these constant because no one should touch them,
* but they are used as the default "empty hash", to avoid allocating * but they are used as the default "empty hash", to avoid allocating
...@@ -3054,7 +3062,7 @@ struct ftrace_iterator { ...@@ -3054,7 +3062,7 @@ struct ftrace_iterator {
loff_t func_pos; loff_t func_pos;
struct ftrace_page *pg; struct ftrace_page *pg;
struct dyn_ftrace *func; struct dyn_ftrace *func;
struct ftrace_probe_ops *probe; struct ftrace_func_probe *probe;
struct ftrace_func_entry *probe_entry; struct ftrace_func_entry *probe_entry;
struct trace_parser parser; struct trace_parser parser;
struct ftrace_hash *hash; struct ftrace_hash *hash;
...@@ -3088,7 +3096,7 @@ t_probe_next(struct seq_file *m, loff_t *pos) ...@@ -3088,7 +3096,7 @@ t_probe_next(struct seq_file *m, loff_t *pos)
if (!iter->probe) { if (!iter->probe) {
next = func_probes->next; next = func_probes->next;
iter->probe = list_entry(next, struct ftrace_probe_ops, list); iter->probe = list_entry(next, struct ftrace_func_probe, list);
} }
if (iter->probe_entry) if (iter->probe_entry)
...@@ -3102,7 +3110,7 @@ t_probe_next(struct seq_file *m, loff_t *pos) ...@@ -3102,7 +3110,7 @@ t_probe_next(struct seq_file *m, loff_t *pos)
if (iter->probe->list.next == func_probes) if (iter->probe->list.next == func_probes)
return NULL; return NULL;
next = iter->probe->list.next; next = iter->probe->list.next;
iter->probe = list_entry(next, struct ftrace_probe_ops, list); iter->probe = list_entry(next, struct ftrace_func_probe, list);
hash = iter->probe->ops.func_hash->filter_hash; hash = iter->probe->ops.func_hash->filter_hash;
size = 1 << hash->size_bits; size = 1 << hash->size_bits;
iter->pidx = 0; iter->pidx = 0;
...@@ -3166,8 +3174,9 @@ static void *t_probe_start(struct seq_file *m, loff_t *pos) ...@@ -3166,8 +3174,9 @@ static void *t_probe_start(struct seq_file *m, loff_t *pos)
static int static int
t_probe_show(struct seq_file *m, struct ftrace_iterator *iter) t_probe_show(struct seq_file *m, struct ftrace_iterator *iter)
{ {
struct ftrace_probe_ops *probe;
struct ftrace_func_entry *probe_entry; struct ftrace_func_entry *probe_entry;
struct ftrace_probe_ops *probe_ops;
struct ftrace_func_probe *probe;
probe = iter->probe; probe = iter->probe;
probe_entry = iter->probe_entry; probe_entry = iter->probe_entry;
...@@ -3175,10 +3184,13 @@ t_probe_show(struct seq_file *m, struct ftrace_iterator *iter) ...@@ -3175,10 +3184,13 @@ t_probe_show(struct seq_file *m, struct ftrace_iterator *iter)
if (WARN_ON_ONCE(!probe || !probe_entry)) if (WARN_ON_ONCE(!probe || !probe_entry))
return -EIO; return -EIO;
if (probe->print) probe_ops = probe->probe_ops;
return probe->print(m, probe_entry->ip, probe, NULL);
if (probe_ops->print)
return probe_ops->print(m, probe_entry->ip, probe_ops, NULL);
seq_printf(m, "%ps:%ps\n", (void *)probe_entry->ip, (void *)probe->func); seq_printf(m, "%ps:%ps\n", (void *)probe_entry->ip,
(void *)probe_ops->func);
return 0; return 0;
} }
...@@ -3791,9 +3803,10 @@ static void function_trace_probe_call(unsigned long ip, unsigned long parent_ip, ...@@ -3791,9 +3803,10 @@ static void function_trace_probe_call(unsigned long ip, unsigned long parent_ip,
struct ftrace_ops *op, struct pt_regs *pt_regs) struct ftrace_ops *op, struct pt_regs *pt_regs)
{ {
struct ftrace_probe_ops *probe_ops; struct ftrace_probe_ops *probe_ops;
struct trace_array *tr = op->private; struct ftrace_func_probe *probe;
probe_ops = container_of(op, struct ftrace_probe_ops, ops); probe = container_of(op, struct ftrace_func_probe, ops);
probe_ops = probe->probe_ops;
/* /*
* Disable preemption for these calls to prevent a RCU grace * Disable preemption for these calls to prevent a RCU grace
...@@ -3801,7 +3814,7 @@ static void function_trace_probe_call(unsigned long ip, unsigned long parent_ip, ...@@ -3801,7 +3814,7 @@ static void function_trace_probe_call(unsigned long ip, unsigned long parent_ip,
* on the hash. rcu_read_lock is too dangerous here. * on the hash. rcu_read_lock is too dangerous here.
*/ */
preempt_disable_notrace(); preempt_disable_notrace();
probe_ops->func(ip, parent_ip, tr, probe_ops, NULL); probe_ops->func(ip, parent_ip, probe->tr, probe_ops, NULL);
preempt_enable_notrace(); preempt_enable_notrace();
} }
...@@ -3946,11 +3959,41 @@ void free_ftrace_func_mapper(struct ftrace_func_mapper *mapper, ...@@ -3946,11 +3959,41 @@ void free_ftrace_func_mapper(struct ftrace_func_mapper *mapper,
free_ftrace_hash(&mapper->hash); free_ftrace_hash(&mapper->hash);
} }
static void release_probe(struct ftrace_func_probe *probe)
{
struct ftrace_probe_ops *probe_ops;
mutex_lock(&ftrace_lock);
WARN_ON(probe->ref <= 0);
/* Subtract the ref that was used to protect this instance */
probe->ref--;
if (!probe->ref) {
probe_ops = probe->probe_ops;
list_del(&probe->list);
kfree(probe);
}
mutex_unlock(&ftrace_lock);
}
static void acquire_probe_locked(struct ftrace_func_probe *probe)
{
/*
* Add one ref to keep it from being freed when releasing the
* ftrace_lock mutex.
*/
probe->ref++;
}
int int
register_ftrace_function_probe(char *glob, struct trace_array *tr, register_ftrace_function_probe(char *glob, struct trace_array *tr,
struct ftrace_probe_ops *ops, void *data) struct ftrace_probe_ops *probe_ops,
void *data)
{ {
struct ftrace_func_entry *entry; struct ftrace_func_entry *entry;
struct ftrace_func_probe *probe;
struct ftrace_hash **orig_hash; struct ftrace_hash **orig_hash;
struct ftrace_hash *old_hash; struct ftrace_hash *old_hash;
struct ftrace_hash *hash; struct ftrace_hash *hash;
...@@ -3966,16 +4009,33 @@ register_ftrace_function_probe(char *glob, struct trace_array *tr, ...@@ -3966,16 +4009,33 @@ register_ftrace_function_probe(char *glob, struct trace_array *tr,
if (WARN_ON(glob[0] == '!')) if (WARN_ON(glob[0] == '!'))
return -EINVAL; return -EINVAL;
if (!(ops->ops.flags & FTRACE_OPS_FL_INITIALIZED)) {
ops->ops.func = function_trace_probe_call; mutex_lock(&ftrace_lock);
ftrace_ops_init(&ops->ops); /* Check if the probe_ops is already registered */
INIT_LIST_HEAD(&ops->list); list_for_each_entry(probe, &tr->func_probes, list) {
ops->ops.private = tr; if (probe->probe_ops == probe_ops)
break;
} }
if (&probe->list == &tr->func_probes) {
probe = kzalloc(sizeof(*probe), GFP_KERNEL);
if (!probe) {
mutex_unlock(&ftrace_lock);
return -ENOMEM;
}
probe->probe_ops = probe_ops;
probe->ops.func = function_trace_probe_call;
probe->tr = tr;
ftrace_ops_init(&probe->ops);
list_add(&probe->list, &tr->func_probes);
}
acquire_probe_locked(probe);
mutex_lock(&ops->ops.func_hash->regex_lock); mutex_unlock(&ftrace_lock);
mutex_lock(&probe->ops.func_hash->regex_lock);
orig_hash = &ops->ops.func_hash->filter_hash; orig_hash = &probe->ops.func_hash->filter_hash;
old_hash = *orig_hash; old_hash = *orig_hash;
hash = alloc_and_copy_ftrace_hash(FTRACE_HASH_DEFAULT_BITS, old_hash); hash = alloc_and_copy_ftrace_hash(FTRACE_HASH_DEFAULT_BITS, old_hash);
...@@ -3998,8 +4058,9 @@ register_ftrace_function_probe(char *glob, struct trace_array *tr, ...@@ -3998,8 +4058,9 @@ register_ftrace_function_probe(char *glob, struct trace_array *tr,
* for each function we find. We call the callback * for each function we find. We call the callback
* to give the caller an opportunity to do so. * to give the caller an opportunity to do so.
*/ */
if (ops->init) { if (probe_ops->init) {
ret = ops->init(ops, tr, entry->ip, data); ret = probe_ops->init(probe_ops, tr,
entry->ip, data);
if (ret < 0) if (ret < 0)
goto out; goto out;
} }
...@@ -4009,16 +4070,22 @@ register_ftrace_function_probe(char *glob, struct trace_array *tr, ...@@ -4009,16 +4070,22 @@ register_ftrace_function_probe(char *glob, struct trace_array *tr,
mutex_lock(&ftrace_lock); mutex_lock(&ftrace_lock);
ret = ftrace_hash_move_and_update_ops(&ops->ops, orig_hash, if (!count) {
hash, 1); /* Nothing was added? */
ret = -EINVAL;
goto out_unlock;
}
ret = ftrace_hash_move_and_update_ops(&probe->ops, orig_hash,
hash, 1);
if (ret < 0) if (ret < 0)
goto err_unlock; goto err_unlock;
if (list_empty(&ops->list)) /* One ref for each new function traced */
list_add(&ops->list, &tr->func_probes); probe->ref += count;
if (!(ops->ops.flags & FTRACE_OPS_FL_ENABLED)) if (!(probe->ops.flags & FTRACE_OPS_FL_ENABLED))
ret = ftrace_startup(&ops->ops, 0); ret = ftrace_startup(&probe->ops, 0);
out_unlock: out_unlock:
mutex_unlock(&ftrace_lock); mutex_unlock(&ftrace_lock);
...@@ -4026,13 +4093,15 @@ register_ftrace_function_probe(char *glob, struct trace_array *tr, ...@@ -4026,13 +4093,15 @@ register_ftrace_function_probe(char *glob, struct trace_array *tr,
if (!ret) if (!ret)
ret = count; ret = count;
out: out:
mutex_unlock(&ops->ops.func_hash->regex_lock); mutex_unlock(&probe->ops.func_hash->regex_lock);
free_ftrace_hash(hash); free_ftrace_hash(hash);
release_probe(probe);
return ret; return ret;
err_unlock: err_unlock:
if (!ops->free) if (!probe_ops->free || !count)
goto out_unlock; goto out_unlock;
/* Failed to do the move, need to call the free functions */ /* Failed to do the move, need to call the free functions */
...@@ -4040,33 +4109,30 @@ register_ftrace_function_probe(char *glob, struct trace_array *tr, ...@@ -4040,33 +4109,30 @@ register_ftrace_function_probe(char *glob, struct trace_array *tr,
hlist_for_each_entry(entry, &hash->buckets[i], hlist) { hlist_for_each_entry(entry, &hash->buckets[i], hlist) {
if (ftrace_lookup_ip(old_hash, entry->ip)) if (ftrace_lookup_ip(old_hash, entry->ip))
continue; continue;
ops->free(ops, tr, entry->ip, NULL); probe_ops->free(probe_ops, tr, entry->ip, NULL);
} }
} }
goto out_unlock; goto out_unlock;
} }
int int
unregister_ftrace_function_probe_func(char *glob, struct ftrace_probe_ops *ops) unregister_ftrace_function_probe_func(char *glob, struct trace_array *tr,
struct ftrace_probe_ops *probe_ops)
{ {
struct ftrace_ops_hash old_hash_ops; struct ftrace_ops_hash old_hash_ops;
struct ftrace_func_entry *entry; struct ftrace_func_entry *entry;
struct ftrace_func_probe *probe;
struct ftrace_glob func_g; struct ftrace_glob func_g;
struct ftrace_hash **orig_hash; struct ftrace_hash **orig_hash;
struct ftrace_hash *old_hash; struct ftrace_hash *old_hash;
struct ftrace_hash *hash = NULL; struct ftrace_hash *hash = NULL;
struct hlist_node *tmp; struct hlist_node *tmp;
struct hlist_head hhd; struct hlist_head hhd;
struct trace_array *tr;
char str[KSYM_SYMBOL_LEN]; char str[KSYM_SYMBOL_LEN];
int i, ret; int count = 0;
int i, ret = -ENODEV;
int size; int size;
if (!(ops->ops.flags & FTRACE_OPS_FL_INITIALIZED))
return -EINVAL;
tr = ops->ops.private;
if (glob && (strcmp(glob, "*") == 0 || !strlen(glob))) if (glob && (strcmp(glob, "*") == 0 || !strlen(glob)))
func_g.search = NULL; func_g.search = NULL;
else if (glob) { else if (glob) {
...@@ -4082,12 +4148,28 @@ unregister_ftrace_function_probe_func(char *glob, struct ftrace_probe_ops *ops) ...@@ -4082,12 +4148,28 @@ unregister_ftrace_function_probe_func(char *glob, struct ftrace_probe_ops *ops)
return -EINVAL; return -EINVAL;
} }
mutex_lock(&ops->ops.func_hash->regex_lock); mutex_lock(&ftrace_lock);
/* Check if the probe_ops is already registered */
list_for_each_entry(probe, &tr->func_probes, list) {
if (probe->probe_ops == probe_ops)
break;
}
if (&probe->list == &tr->func_probes)
goto err_unlock_ftrace;
ret = -EINVAL;
if (!(probe->ops.flags & FTRACE_OPS_FL_INITIALIZED))
goto err_unlock_ftrace;
acquire_probe_locked(probe);
orig_hash = &ops->ops.func_hash->filter_hash; mutex_unlock(&ftrace_lock);
mutex_lock(&probe->ops.func_hash->regex_lock);
orig_hash = &probe->ops.func_hash->filter_hash;
old_hash = *orig_hash; old_hash = *orig_hash;
ret = -EINVAL;
if (ftrace_hash_empty(old_hash)) if (ftrace_hash_empty(old_hash))
goto out_unlock; goto out_unlock;
...@@ -4112,46 +4194,54 @@ unregister_ftrace_function_probe_func(char *glob, struct ftrace_probe_ops *ops) ...@@ -4112,46 +4194,54 @@ unregister_ftrace_function_probe_func(char *glob, struct ftrace_probe_ops *ops)
if (!ftrace_match(str, &func_g)) if (!ftrace_match(str, &func_g))
continue; continue;
} }
count++;
remove_hash_entry(hash, entry); remove_hash_entry(hash, entry);
hlist_add_head(&entry->hlist, &hhd); hlist_add_head(&entry->hlist, &hhd);
} }
} }
/* Nothing found? */ /* Nothing found? */
if (hlist_empty(&hhd)) { if (!count) {
ret = -EINVAL; ret = -EINVAL;
goto out_unlock; goto out_unlock;
} }
mutex_lock(&ftrace_lock); mutex_lock(&ftrace_lock);
if (ftrace_hash_empty(hash)) { WARN_ON(probe->ref < count);
ftrace_shutdown(&ops->ops, 0);
list_del_init(&ops->list);
}
probe->ref -= count;
ret = ftrace_hash_move_and_update_ops(&ops->ops, orig_hash, if (ftrace_hash_empty(hash))
ftrace_shutdown(&probe->ops, 0);
ret = ftrace_hash_move_and_update_ops(&probe->ops, orig_hash,
hash, 1); hash, 1);
/* still need to update the function call sites */ /* still need to update the function call sites */
if (ftrace_enabled && !ftrace_hash_empty(hash)) if (ftrace_enabled && !ftrace_hash_empty(hash))
ftrace_run_modify_code(&ops->ops, FTRACE_UPDATE_CALLS, ftrace_run_modify_code(&probe->ops, FTRACE_UPDATE_CALLS,
&old_hash_ops); &old_hash_ops);
synchronize_sched(); synchronize_sched();
hlist_for_each_entry_safe(entry, tmp, &hhd, hlist) { hlist_for_each_entry_safe(entry, tmp, &hhd, hlist) {
hlist_del(&entry->hlist); hlist_del(&entry->hlist);
if (ops->free) if (probe_ops->free)
ops->free(ops, tr, entry->ip, NULL); probe_ops->free(probe_ops, tr, entry->ip, NULL);
kfree(entry); kfree(entry);
} }
mutex_unlock(&ftrace_lock); mutex_unlock(&ftrace_lock);
out_unlock: out_unlock:
mutex_unlock(&ops->ops.func_hash->regex_lock); mutex_unlock(&probe->ops.func_hash->regex_lock);
free_ftrace_hash(hash); free_ftrace_hash(hash);
release_probe(probe);
return ret;
err_unlock_ftrace:
mutex_unlock(&ftrace_lock);
return ret; return ret;
} }
......
...@@ -6832,7 +6832,7 @@ ftrace_trace_snapshot_callback(struct trace_array *tr, struct ftrace_hash *hash, ...@@ -6832,7 +6832,7 @@ ftrace_trace_snapshot_callback(struct trace_array *tr, struct ftrace_hash *hash,
ops = param ? &snapshot_count_probe_ops : &snapshot_probe_ops; ops = param ? &snapshot_count_probe_ops : &snapshot_probe_ops;
if (glob[0] == '!') if (glob[0] == '!')
return unregister_ftrace_function_probe_func(glob+1, ops); return unregister_ftrace_function_probe_func(glob+1, tr, ops);
if (!param) if (!param)
goto out_reg; goto out_reg;
......
...@@ -939,8 +939,6 @@ static inline void ftrace_pid_follow_fork(struct trace_array *tr, bool enable) { ...@@ -939,8 +939,6 @@ static inline void ftrace_pid_follow_fork(struct trace_array *tr, bool enable) {
#if defined(CONFIG_FUNCTION_TRACER) && defined(CONFIG_DYNAMIC_FTRACE) #if defined(CONFIG_FUNCTION_TRACER) && defined(CONFIG_DYNAMIC_FTRACE)
struct ftrace_probe_ops { struct ftrace_probe_ops {
struct ftrace_ops ops;
struct list_head list;
void (*func)(unsigned long ip, void (*func)(unsigned long ip,
unsigned long parent_ip, unsigned long parent_ip,
struct trace_array *tr, struct trace_array *tr,
...@@ -976,7 +974,8 @@ extern int ...@@ -976,7 +974,8 @@ extern int
register_ftrace_function_probe(char *glob, struct trace_array *tr, register_ftrace_function_probe(char *glob, struct trace_array *tr,
struct ftrace_probe_ops *ops, void *data); struct ftrace_probe_ops *ops, void *data);
extern int extern int
unregister_ftrace_function_probe_func(char *glob, struct ftrace_probe_ops *ops); unregister_ftrace_function_probe_func(char *glob, struct trace_array *tr,
struct ftrace_probe_ops *ops);
int register_ftrace_command(struct ftrace_func_command *cmd); int register_ftrace_command(struct ftrace_func_command *cmd);
int unregister_ftrace_command(struct ftrace_func_command *cmd); int unregister_ftrace_command(struct ftrace_func_command *cmd);
......
...@@ -2653,7 +2653,7 @@ event_enable_func(struct trace_array *tr, struct ftrace_hash *hash, ...@@ -2653,7 +2653,7 @@ event_enable_func(struct trace_array *tr, struct ftrace_hash *hash,
ops = param ? &event_disable_count_probe_ops : &event_disable_probe_ops; ops = param ? &event_disable_count_probe_ops : &event_disable_probe_ops;
if (glob[0] == '!') { if (glob[0] == '!') {
ret = unregister_ftrace_function_probe_func(glob+1, ops); ret = unregister_ftrace_function_probe_func(glob+1, tr, ops);
goto out; goto out;
} }
......
...@@ -597,7 +597,7 @@ ftrace_trace_probe_callback(struct trace_array *tr, ...@@ -597,7 +597,7 @@ ftrace_trace_probe_callback(struct trace_array *tr,
return -EINVAL; return -EINVAL;
if (glob[0] == '!') if (glob[0] == '!')
return unregister_ftrace_function_probe_func(glob+1, ops); return unregister_ftrace_function_probe_func(glob+1, tr, ops);
if (!param) if (!param)
goto out_reg; goto out_reg;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment