Commit 6803f37e authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'trace-fixes-3.11-rc2' of...

Merge tag 'trace-fixes-3.11-rc2' of git://git.kernel.org/pub/scm/linux/kernel/git/rostedt/linux-trace

Pull tracing fixes from Steven Rostedt:
 "Oleg is working on fixing a very tight race between opening a event
  file and deleting that event at the same time (both must be done as
  root).

  I also found a bug while testing Oleg's patches which has to do with a
  race with kprobes using the function tracer.

  There's also a deadlock fix that was introduced with the previous
  fixes"

* tag 'trace-fixes-3.11-rc2' of git://git.kernel.org/pub/scm/linux/kernel/git/rostedt/linux-trace:
  tracing: Remove locking trace_types_lock from tracing_reset_all_online_cpus()
  ftrace: Add check for NULL regs if ops has SAVE_REGS set
  tracing: Kill trace_cpu struct/members
  tracing: Change tracing_fops/snapshot_fops to rely on tracing_get_cpu()
  tracing: Change tracing_entries_fops to rely on tracing_get_cpu()
  tracing: Change tracing_stats_fops to rely on tracing_get_cpu()
  tracing: Change tracing_buffers_fops to rely on tracing_get_cpu()
  tracing: Change tracing_pipe_fops() to rely on tracing_get_cpu()
  tracing: Introduce trace_create_cpu_file() and tracing_get_cpu()
parents 561d9e81 09d8091c
...@@ -1441,12 +1441,22 @@ ftrace_hash_move(struct ftrace_ops *ops, int enable, ...@@ -1441,12 +1441,22 @@ ftrace_hash_move(struct ftrace_ops *ops, int enable,
* the hashes are freed with call_rcu_sched(). * the hashes are freed with call_rcu_sched().
*/ */
static int static int
ftrace_ops_test(struct ftrace_ops *ops, unsigned long ip) ftrace_ops_test(struct ftrace_ops *ops, unsigned long ip, void *regs)
{ {
struct ftrace_hash *filter_hash; struct ftrace_hash *filter_hash;
struct ftrace_hash *notrace_hash; struct ftrace_hash *notrace_hash;
int ret; int ret;
#ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS
/*
* There's a small race when adding ops that the ftrace handler
* that wants regs, may be called without them. We can not
* allow that handler to be called if regs is NULL.
*/
if (regs == NULL && (ops->flags & FTRACE_OPS_FL_SAVE_REGS))
return 0;
#endif
filter_hash = rcu_dereference_raw_notrace(ops->filter_hash); filter_hash = rcu_dereference_raw_notrace(ops->filter_hash);
notrace_hash = rcu_dereference_raw_notrace(ops->notrace_hash); notrace_hash = rcu_dereference_raw_notrace(ops->notrace_hash);
...@@ -4218,7 +4228,7 @@ static inline void ftrace_startup_enable(int command) { } ...@@ -4218,7 +4228,7 @@ static inline void ftrace_startup_enable(int command) { }
# define ftrace_shutdown_sysctl() do { } while (0) # define ftrace_shutdown_sysctl() do { } while (0)
static inline int static inline int
ftrace_ops_test(struct ftrace_ops *ops, unsigned long ip) ftrace_ops_test(struct ftrace_ops *ops, unsigned long ip, void *regs)
{ {
return 1; return 1;
} }
...@@ -4241,7 +4251,7 @@ ftrace_ops_control_func(unsigned long ip, unsigned long parent_ip, ...@@ -4241,7 +4251,7 @@ ftrace_ops_control_func(unsigned long ip, unsigned long parent_ip,
do_for_each_ftrace_op(op, ftrace_control_list) { do_for_each_ftrace_op(op, ftrace_control_list) {
if (!(op->flags & FTRACE_OPS_FL_STUB) && if (!(op->flags & FTRACE_OPS_FL_STUB) &&
!ftrace_function_local_disabled(op) && !ftrace_function_local_disabled(op) &&
ftrace_ops_test(op, ip)) ftrace_ops_test(op, ip, regs))
op->func(ip, parent_ip, op, regs); op->func(ip, parent_ip, op, regs);
} while_for_each_ftrace_op(op); } while_for_each_ftrace_op(op);
trace_recursion_clear(TRACE_CONTROL_BIT); trace_recursion_clear(TRACE_CONTROL_BIT);
...@@ -4274,7 +4284,7 @@ __ftrace_ops_list_func(unsigned long ip, unsigned long parent_ip, ...@@ -4274,7 +4284,7 @@ __ftrace_ops_list_func(unsigned long ip, unsigned long parent_ip,
*/ */
preempt_disable_notrace(); preempt_disable_notrace();
do_for_each_ftrace_op(op, ftrace_ops_list) { do_for_each_ftrace_op(op, ftrace_ops_list) {
if (ftrace_ops_test(op, ip)) if (ftrace_ops_test(op, ip, regs))
op->func(ip, parent_ip, op, regs); op->func(ip, parent_ip, op, regs);
} while_for_each_ftrace_op(op); } while_for_each_ftrace_op(op);
preempt_enable_notrace(); preempt_enable_notrace();
......
This diff is collapsed.
...@@ -130,19 +130,12 @@ enum trace_flag_type { ...@@ -130,19 +130,12 @@ enum trace_flag_type {
struct trace_array; struct trace_array;
struct trace_cpu {
struct trace_array *tr;
struct dentry *dir;
int cpu;
};
/* /*
* The CPU trace array - it consists of thousands of trace entries * The CPU trace array - it consists of thousands of trace entries
* plus some other descriptor data: (for example which task started * plus some other descriptor data: (for example which task started
* the trace, etc.) * the trace, etc.)
*/ */
struct trace_array_cpu { struct trace_array_cpu {
struct trace_cpu trace_cpu;
atomic_t disabled; atomic_t disabled;
void *buffer_page; /* ring buffer spare */ void *buffer_page; /* ring buffer spare */
...@@ -196,7 +189,6 @@ struct trace_array { ...@@ -196,7 +189,6 @@ struct trace_array {
bool allocated_snapshot; bool allocated_snapshot;
#endif #endif
int buffer_disabled; int buffer_disabled;
struct trace_cpu trace_cpu; /* place holder */
#ifdef CONFIG_FTRACE_SYSCALLS #ifdef CONFIG_FTRACE_SYSCALLS
int sys_refcount_enter; int sys_refcount_enter;
int sys_refcount_exit; int sys_refcount_exit;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment