Commit 345ddcc8 authored by Steven Rostedt (Red Hat)'s avatar Steven Rostedt (Red Hat) Committed by Steven Rostedt

ftrace: Have set_ftrace_pid use the bitmap like events do

Convert set_ftrace_pid to use the bitmap like set_event_pid does. This
allows for instances to use the pid filtering as well, and will allow for
function-fork option to set if the children of a traced function should be
traced or not.
Signed-off-by: default avatarSteven Rostedt <rostedt@goodmis.org>
parent 76c813e2
...@@ -89,16 +89,16 @@ struct ftrace_ops *function_trace_op __read_mostly = &ftrace_list_end; ...@@ -89,16 +89,16 @@ struct ftrace_ops *function_trace_op __read_mostly = &ftrace_list_end;
/* What to set function_trace_op to */ /* What to set function_trace_op to */
static struct ftrace_ops *set_function_trace_op; static struct ftrace_ops *set_function_trace_op;
/* List for set_ftrace_pid's pids. */ static bool ftrace_pids_enabled(struct ftrace_ops *ops)
LIST_HEAD(ftrace_pids);
struct ftrace_pid {
struct list_head list;
struct pid *pid;
};
static bool ftrace_pids_enabled(void)
{ {
return !list_empty(&ftrace_pids); struct trace_array *tr;
if (!(ops->flags & FTRACE_OPS_FL_PID) || !ops->private)
return false;
tr = ops->private;
return tr->function_pids != NULL;
} }
static void ftrace_update_trampoline(struct ftrace_ops *ops); static void ftrace_update_trampoline(struct ftrace_ops *ops);
...@@ -179,7 +179,9 @@ int ftrace_nr_registered_ops(void) ...@@ -179,7 +179,9 @@ int ftrace_nr_registered_ops(void)
static void ftrace_pid_func(unsigned long ip, unsigned long parent_ip, static void ftrace_pid_func(unsigned long ip, unsigned long parent_ip,
struct ftrace_ops *op, struct pt_regs *regs) struct ftrace_ops *op, struct pt_regs *regs)
{ {
if (!test_tsk_trace_trace(current)) struct trace_array *tr = op->private;
if (tr && this_cpu_read(tr->trace_buffer.data->ftrace_ignore_pid))
return; return;
op->saved_func(ip, parent_ip, op, regs); op->saved_func(ip, parent_ip, op, regs);
...@@ -417,7 +419,7 @@ static int __register_ftrace_function(struct ftrace_ops *ops) ...@@ -417,7 +419,7 @@ static int __register_ftrace_function(struct ftrace_ops *ops)
/* Always save the function, and reset at unregistering */ /* Always save the function, and reset at unregistering */
ops->saved_func = ops->func; ops->saved_func = ops->func;
if (ops->flags & FTRACE_OPS_FL_PID && ftrace_pids_enabled()) if (ftrace_pids_enabled(ops))
ops->func = ftrace_pid_func; ops->func = ftrace_pid_func;
ftrace_update_trampoline(ops); ftrace_update_trampoline(ops);
...@@ -450,7 +452,6 @@ static int __unregister_ftrace_function(struct ftrace_ops *ops) ...@@ -450,7 +452,6 @@ static int __unregister_ftrace_function(struct ftrace_ops *ops)
static void ftrace_update_pid_func(void) static void ftrace_update_pid_func(void)
{ {
bool enabled = ftrace_pids_enabled();
struct ftrace_ops *op; struct ftrace_ops *op;
/* Only do something if we are tracing something */ /* Only do something if we are tracing something */
...@@ -459,8 +460,8 @@ static void ftrace_update_pid_func(void) ...@@ -459,8 +460,8 @@ static void ftrace_update_pid_func(void)
do_for_each_ftrace_op(op, ftrace_ops_list) { do_for_each_ftrace_op(op, ftrace_ops_list) {
if (op->flags & FTRACE_OPS_FL_PID) { if (op->flags & FTRACE_OPS_FL_PID) {
op->func = enabled ? ftrace_pid_func : op->func = ftrace_pids_enabled(op) ?
op->saved_func; ftrace_pid_func : op->saved_func;
ftrace_update_trampoline(op); ftrace_update_trampoline(op);
} }
} while_for_each_ftrace_op(op); } while_for_each_ftrace_op(op);
...@@ -5324,179 +5325,99 @@ ftrace_func_t ftrace_ops_get_func(struct ftrace_ops *ops) ...@@ -5324,179 +5325,99 @@ ftrace_func_t ftrace_ops_get_func(struct ftrace_ops *ops)
return ops->func; return ops->func;
} }
static void clear_ftrace_swapper(void) static void
ftrace_filter_pid_sched_switch_probe(void *data, bool preempt,
struct task_struct *prev, struct task_struct *next)
{ {
struct task_struct *p; struct trace_array *tr = data;
int cpu; struct trace_pid_list *pid_list;
get_online_cpus(); pid_list = rcu_dereference_sched(tr->function_pids);
for_each_online_cpu(cpu) {
p = idle_task(cpu);
clear_tsk_trace_trace(p);
}
put_online_cpus();
}
static void set_ftrace_swapper(void) this_cpu_write(tr->trace_buffer.data->ftrace_ignore_pid,
{ trace_ignore_this_task(pid_list, next));
struct task_struct *p;
int cpu;
get_online_cpus();
for_each_online_cpu(cpu) {
p = idle_task(cpu);
set_tsk_trace_trace(p);
}
put_online_cpus();
} }
static void clear_ftrace_pid(struct pid *pid) static void clear_ftrace_pids(struct trace_array *tr)
{ {
struct task_struct *p; struct trace_pid_list *pid_list;
int cpu;
rcu_read_lock(); pid_list = rcu_dereference_protected(tr->function_pids,
do_each_pid_task(pid, PIDTYPE_PID, p) { lockdep_is_held(&ftrace_lock));
clear_tsk_trace_trace(p); if (!pid_list)
} while_each_pid_task(pid, PIDTYPE_PID, p); return;
rcu_read_unlock();
put_pid(pid); unregister_trace_sched_switch(ftrace_filter_pid_sched_switch_probe, tr);
}
static void set_ftrace_pid(struct pid *pid) for_each_possible_cpu(cpu)
{ per_cpu_ptr(tr->trace_buffer.data, cpu)->ftrace_ignore_pid = false;
struct task_struct *p;
rcu_read_lock(); rcu_assign_pointer(tr->function_pids, NULL);
do_each_pid_task(pid, PIDTYPE_PID, p) {
set_tsk_trace_trace(p);
} while_each_pid_task(pid, PIDTYPE_PID, p);
rcu_read_unlock();
}
static void clear_ftrace_pid_task(struct pid *pid) /* Wait till all users are no longer using pid filtering */
{ synchronize_sched();
if (pid == ftrace_swapper_pid)
clear_ftrace_swapper();
else
clear_ftrace_pid(pid);
}
static void set_ftrace_pid_task(struct pid *pid) trace_free_pid_list(pid_list);
{
if (pid == ftrace_swapper_pid)
set_ftrace_swapper();
else
set_ftrace_pid(pid);
} }
static int ftrace_pid_add(int p) static void ftrace_pid_reset(struct trace_array *tr)
{ {
struct pid *pid;
struct ftrace_pid *fpid;
int ret = -EINVAL;
mutex_lock(&ftrace_lock); mutex_lock(&ftrace_lock);
clear_ftrace_pids(tr);
if (!p)
pid = ftrace_swapper_pid;
else
pid = find_get_pid(p);
if (!pid)
goto out;
ret = 0;
list_for_each_entry(fpid, &ftrace_pids, list)
if (fpid->pid == pid)
goto out_put;
ret = -ENOMEM;
fpid = kmalloc(sizeof(*fpid), GFP_KERNEL);
if (!fpid)
goto out_put;
list_add(&fpid->list, &ftrace_pids);
fpid->pid = pid;
set_ftrace_pid_task(pid);
ftrace_update_pid_func(); ftrace_update_pid_func();
ftrace_startup_all(0); ftrace_startup_all(0);
mutex_unlock(&ftrace_lock); mutex_unlock(&ftrace_lock);
return 0;
out_put:
if (pid != ftrace_swapper_pid)
put_pid(pid);
out:
mutex_unlock(&ftrace_lock);
return ret;
} }
static void ftrace_pid_reset(void) /* Greater than any max PID */
{ #define FTRACE_NO_PIDS (void *)(PID_MAX_LIMIT + 1)
struct ftrace_pid *fpid, *safe;
mutex_lock(&ftrace_lock);
list_for_each_entry_safe(fpid, safe, &ftrace_pids, list) {
struct pid *pid = fpid->pid;
clear_ftrace_pid_task(pid);
list_del(&fpid->list);
kfree(fpid);
}
ftrace_update_pid_func();
ftrace_startup_all(0);
mutex_unlock(&ftrace_lock);
}
static void *fpid_start(struct seq_file *m, loff_t *pos) static void *fpid_start(struct seq_file *m, loff_t *pos)
__acquires(RCU)
{ {
struct trace_pid_list *pid_list;
struct trace_array *tr = m->private;
mutex_lock(&ftrace_lock); mutex_lock(&ftrace_lock);
rcu_read_lock_sched();
pid_list = rcu_dereference_sched(tr->function_pids);
if (!ftrace_pids_enabled() && (!*pos)) if (!pid_list)
return (void *) 1; return !(*pos) ? FTRACE_NO_PIDS : NULL;
return seq_list_start(&ftrace_pids, *pos); return trace_pid_start(pid_list, pos);
} }
static void *fpid_next(struct seq_file *m, void *v, loff_t *pos) static void *fpid_next(struct seq_file *m, void *v, loff_t *pos)
{ {
if (v == (void *)1) struct trace_array *tr = m->private;
struct trace_pid_list *pid_list = rcu_dereference_sched(tr->function_pids);
if (v == FTRACE_NO_PIDS)
return NULL; return NULL;
return seq_list_next(v, &ftrace_pids, pos); return trace_pid_next(pid_list, v, pos);
} }
static void fpid_stop(struct seq_file *m, void *p) static void fpid_stop(struct seq_file *m, void *p)
__releases(RCU)
{ {
rcu_read_unlock_sched();
mutex_unlock(&ftrace_lock); mutex_unlock(&ftrace_lock);
} }
static int fpid_show(struct seq_file *m, void *v) static int fpid_show(struct seq_file *m, void *v)
{ {
const struct ftrace_pid *fpid = list_entry(v, struct ftrace_pid, list); if (v == FTRACE_NO_PIDS) {
if (v == (void *)1) {
seq_puts(m, "no pid\n"); seq_puts(m, "no pid\n");
return 0; return 0;
} }
if (fpid->pid == ftrace_swapper_pid) return trace_pid_show(m, v);
seq_puts(m, "swapper tasks\n");
else
seq_printf(m, "%u\n", pid_vnr(fpid->pid));
return 0;
} }
static const struct seq_operations ftrace_pid_sops = { static const struct seq_operations ftrace_pid_sops = {
...@@ -5509,58 +5430,103 @@ static const struct seq_operations ftrace_pid_sops = { ...@@ -5509,58 +5430,103 @@ static const struct seq_operations ftrace_pid_sops = {
static int static int
ftrace_pid_open(struct inode *inode, struct file *file) ftrace_pid_open(struct inode *inode, struct file *file)
{ {
struct trace_array *tr = inode->i_private;
struct seq_file *m;
int ret = 0; int ret = 0;
if (trace_array_get(tr) < 0)
return -ENODEV;
if ((file->f_mode & FMODE_WRITE) && if ((file->f_mode & FMODE_WRITE) &&
(file->f_flags & O_TRUNC)) (file->f_flags & O_TRUNC))
ftrace_pid_reset(); ftrace_pid_reset(tr);
if (file->f_mode & FMODE_READ)
ret = seq_open(file, &ftrace_pid_sops); ret = seq_open(file, &ftrace_pid_sops);
if (ret < 0) {
trace_array_put(tr);
} else {
m = file->private_data;
/* copy tr over to seq ops */
m->private = tr;
}
return ret; return ret;
} }
static void ignore_task_cpu(void *data)
{
struct trace_array *tr = data;
struct trace_pid_list *pid_list;
/*
* This function is called by on_each_cpu() while the
* event_mutex is held.
*/
pid_list = rcu_dereference_protected(tr->function_pids,
mutex_is_locked(&ftrace_lock));
this_cpu_write(tr->trace_buffer.data->ftrace_ignore_pid,
trace_ignore_this_task(pid_list, current));
}
static ssize_t static ssize_t
ftrace_pid_write(struct file *filp, const char __user *ubuf, ftrace_pid_write(struct file *filp, const char __user *ubuf,
size_t cnt, loff_t *ppos) size_t cnt, loff_t *ppos)
{ {
char buf[64], *tmp; struct seq_file *m = filp->private_data;
long val; struct trace_array *tr = m->private;
int ret; struct trace_pid_list *filtered_pids = NULL;
struct trace_pid_list *pid_list;
ssize_t ret;
if (cnt >= sizeof(buf)) if (!cnt)
return -EINVAL; return 0;
if (copy_from_user(&buf, ubuf, cnt)) mutex_lock(&ftrace_lock);
return -EFAULT;
filtered_pids = rcu_dereference_protected(tr->function_pids,
lockdep_is_held(&ftrace_lock));
ret = trace_pid_write(filtered_pids, &pid_list, ubuf, cnt);
if (ret < 0)
goto out;
rcu_assign_pointer(tr->function_pids, pid_list);
buf[cnt] = 0; if (filtered_pids) {
synchronize_sched();
trace_free_pid_list(filtered_pids);
} else if (pid_list) {
/* Register a probe to set whether to ignore the tracing of a task */
register_trace_sched_switch(ftrace_filter_pid_sched_switch_probe, tr);
}
/* /*
* Allow "echo > set_ftrace_pid" or "echo -n '' > set_ftrace_pid" * Ignoring of pids is done at task switch. But we have to
* to clean the filter quietly. * check for those tasks that are currently running.
* Always do this in case a pid was appended or removed.
*/ */
tmp = strstrip(buf); on_each_cpu(ignore_task_cpu, tr, 1);
if (strlen(tmp) == 0)
return 1;
ret = kstrtol(tmp, 10, &val); ftrace_update_pid_func();
if (ret < 0) ftrace_startup_all(0);
return ret; out:
mutex_unlock(&ftrace_lock);
ret = ftrace_pid_add(val); if (ret > 0)
*ppos += ret;
return ret ? ret : cnt; return ret;
} }
static int static int
ftrace_pid_release(struct inode *inode, struct file *file) ftrace_pid_release(struct inode *inode, struct file *file)
{ {
if (file->f_mode & FMODE_READ) struct trace_array *tr = inode->i_private;
seq_release(inode, file);
return 0; trace_array_put(tr);
return seq_release(inode, file);
} }
static const struct file_operations ftrace_pid_fops = { static const struct file_operations ftrace_pid_fops = {
...@@ -5571,24 +5537,17 @@ static const struct file_operations ftrace_pid_fops = { ...@@ -5571,24 +5537,17 @@ static const struct file_operations ftrace_pid_fops = {
.release = ftrace_pid_release, .release = ftrace_pid_release,
}; };
static __init int ftrace_init_tracefs(void) void ftrace_init_tracefs(struct trace_array *tr, struct dentry *d_tracer)
{ {
struct dentry *d_tracer; /* Only the top level directory has the dyn_tracefs and profile */
if (tr->flags & TRACE_ARRAY_FL_GLOBAL) {
d_tracer = tracing_init_dentry();
if (IS_ERR(d_tracer))
return 0;
ftrace_init_dyn_tracefs(d_tracer); ftrace_init_dyn_tracefs(d_tracer);
trace_create_file("set_ftrace_pid", 0644, d_tracer,
NULL, &ftrace_pid_fops);
ftrace_profile_tracefs(d_tracer); ftrace_profile_tracefs(d_tracer);
}
return 0; trace_create_file("set_ftrace_pid", 0644, d_tracer,
tr, &ftrace_pid_fops);
} }
fs_initcall(ftrace_init_tracefs);
/** /**
* ftrace_kill - kill ftrace * ftrace_kill - kill ftrace
......
...@@ -7233,6 +7233,7 @@ init_tracer_tracefs(struct trace_array *tr, struct dentry *d_tracer) ...@@ -7233,6 +7233,7 @@ init_tracer_tracefs(struct trace_array *tr, struct dentry *d_tracer)
for_each_tracing_cpu(cpu) for_each_tracing_cpu(cpu)
tracing_init_tracefs_percpu(tr, cpu); tracing_init_tracefs_percpu(tr, cpu);
ftrace_init_tracefs(tr, d_tracer);
} }
static struct vfsmount *trace_automount(void *ingore) static struct vfsmount *trace_automount(void *ingore)
......
...@@ -156,6 +156,9 @@ struct trace_array_cpu { ...@@ -156,6 +156,9 @@ struct trace_array_cpu {
char comm[TASK_COMM_LEN]; char comm[TASK_COMM_LEN];
bool ignore_pid; bool ignore_pid;
#ifdef CONFIG_FUNCTION_TRACER
bool ftrace_ignore_pid;
#endif
}; };
struct tracer; struct tracer;
...@@ -247,6 +250,7 @@ struct trace_array { ...@@ -247,6 +250,7 @@ struct trace_array {
int ref; int ref;
#ifdef CONFIG_FUNCTION_TRACER #ifdef CONFIG_FUNCTION_TRACER
struct ftrace_ops *ops; struct ftrace_ops *ops;
struct trace_pid_list __rcu *function_pids;
/* function tracing enabled */ /* function tracing enabled */
int function_enabled; int function_enabled;
#endif #endif
...@@ -840,12 +844,9 @@ extern struct list_head ftrace_pids; ...@@ -840,12 +844,9 @@ extern struct list_head ftrace_pids;
#ifdef CONFIG_FUNCTION_TRACER #ifdef CONFIG_FUNCTION_TRACER
extern bool ftrace_filter_param __initdata; extern bool ftrace_filter_param __initdata;
static inline int ftrace_trace_task(struct task_struct *task) static inline int ftrace_trace_task(struct trace_array *tr)
{ {
if (list_empty(&ftrace_pids)) return !this_cpu_read(tr->trace_buffer.data->ftrace_ignore_pid);
return 1;
return test_tsk_trace_trace(task);
} }
extern int ftrace_is_dead(void); extern int ftrace_is_dead(void);
int ftrace_create_function_files(struct trace_array *tr, int ftrace_create_function_files(struct trace_array *tr,
...@@ -855,8 +856,9 @@ void ftrace_init_global_array_ops(struct trace_array *tr); ...@@ -855,8 +856,9 @@ void ftrace_init_global_array_ops(struct trace_array *tr);
void ftrace_init_array_ops(struct trace_array *tr, ftrace_func_t func); void ftrace_init_array_ops(struct trace_array *tr, ftrace_func_t func);
void ftrace_reset_array_ops(struct trace_array *tr); void ftrace_reset_array_ops(struct trace_array *tr);
int using_ftrace_ops_list_func(void); int using_ftrace_ops_list_func(void);
void ftrace_init_tracefs(struct trace_array *tr, struct dentry *d_tracer);
#else #else
static inline int ftrace_trace_task(struct task_struct *task) static inline int ftrace_trace_task(struct trace_array *tr)
{ {
return 1; return 1;
} }
...@@ -871,6 +873,7 @@ static inline void ftrace_destroy_function_files(struct trace_array *tr) { } ...@@ -871,6 +873,7 @@ static inline void ftrace_destroy_function_files(struct trace_array *tr) { }
static inline __init void static inline __init void
ftrace_init_global_array_ops(struct trace_array *tr) { } ftrace_init_global_array_ops(struct trace_array *tr) { }
static inline void ftrace_reset_array_ops(struct trace_array *tr) { } static inline void ftrace_reset_array_ops(struct trace_array *tr) { }
static inline void ftrace_init_tracefs(struct trace_array *tr, struct dentry *d) { }
/* ftace_func_t type is not defined, use macro instead of static inline */ /* ftace_func_t type is not defined, use macro instead of static inline */
#define ftrace_init_array_ops(tr, func) do { } while (0) #define ftrace_init_array_ops(tr, func) do { } while (0)
#endif /* CONFIG_FUNCTION_TRACER */ #endif /* CONFIG_FUNCTION_TRACER */
......
...@@ -43,7 +43,7 @@ static int allocate_ftrace_ops(struct trace_array *tr) ...@@ -43,7 +43,7 @@ static int allocate_ftrace_ops(struct trace_array *tr)
/* Currently only the non stack verision is supported */ /* Currently only the non stack verision is supported */
ops->func = function_trace_call; ops->func = function_trace_call;
ops->flags = FTRACE_OPS_FL_RECURSION_SAFE; ops->flags = FTRACE_OPS_FL_RECURSION_SAFE | FTRACE_OPS_FL_PID;
tr->ops = ops; tr->ops = ops;
ops->private = tr; ops->private = tr;
......
...@@ -319,7 +319,7 @@ int trace_graph_entry(struct ftrace_graph_ent *trace) ...@@ -319,7 +319,7 @@ int trace_graph_entry(struct ftrace_graph_ent *trace)
int cpu; int cpu;
int pc; int pc;
if (!ftrace_trace_task(current)) if (!ftrace_trace_task(tr))
return 0; return 0;
/* trace it when it is-nested-in or is a function enabled. */ /* trace it when it is-nested-in or is a function enabled. */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment