Commit b7bc9e7d authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'trace-fixes-3.11-rc3' of...

Merge tag 'trace-fixes-3.11-rc3' of git://git.kernel.org/pub/scm/linux/kernel/git/rostedt/linux-trace

Pull tracing fixes from Steven Rostedt:
 "Oleg Nesterov has been working hard in closing all the holes that can
  lead to race conditions between deleting an event and accessing an
  event debugfs file.  This included a fix to the debugfs system (acked
  by Greg Kroah-Hartman).  We think that all the holes have been patched
  and hopefully we don't find more.  I haven't marked all of them for
  stable because I need to examine them more to figure out how far back
  some of the changes need to go.

  Along the way, some other fixes have been made.  Alexander Z Lam fixed
  some logic where the wrong buffer was being modifed.

  Andrew Vagin found a possible corruption for machines that actually
  allocate cpumask, as a reference to one was being zeroed out by
  mistake.

  Dhaval Giani found a bad prototype when tracing is not configured.

  And I not only had some changes to help Oleg, but also finally fixed a
  long standing bug that Dave Jones and others have been hitting, where
  a module unload and reload can cause the function tracing accounting
  to get screwed up"

* tag 'trace-fixes-3.11-rc3' of git://git.kernel.org/pub/scm/linux/kernel/git/rostedt/linux-trace:
  tracing: Fix reset of time stamps during trace_clock changes
  tracing: Make TRACE_ITER_STOP_ON_FREE stop the correct buffer
  tracing: Fix trace_dump_stack() proto when CONFIG_TRACING is not set
  tracing: Fix fields of struct trace_iterator that are zeroed by mistake
  tracing/uprobes: Fail to unregister if probe event files are in use
  tracing/kprobes: Fail to unregister if probe event files are in use
  tracing: Add comment to describe special break case in probe_remove_event_call()
  tracing: trace_remove_event_call() should fail if call/file is in use
  debugfs: debugfs_remove_recursive() must not rely on list_empty(d_subdirs)
  ftrace: Check module functions being traced on reload
  ftrace: Consolidate some duplicate code for updating ftrace ops
  tracing: Change remove_event_file_dir() to clear "d_subdirs"->i_private
  tracing: Introduce remove_event_file_dir()
  tracing: Change f_start() to take event_mutex and verify i_private != NULL
  tracing: Change event_filter_read/write to verify i_private != NULL
  tracing: Change event_enable/disable_read() to verify i_private != NULL
  tracing: Turn event/id->i_private into call->event.type
parents 8ef9c292 9457158b
...@@ -533,8 +533,7 @@ EXPORT_SYMBOL_GPL(debugfs_remove); ...@@ -533,8 +533,7 @@ EXPORT_SYMBOL_GPL(debugfs_remove);
*/ */
void debugfs_remove_recursive(struct dentry *dentry) void debugfs_remove_recursive(struct dentry *dentry)
{ {
struct dentry *child; struct dentry *child, *next, *parent;
struct dentry *parent;
if (IS_ERR_OR_NULL(dentry)) if (IS_ERR_OR_NULL(dentry))
return; return;
...@@ -544,61 +543,37 @@ void debugfs_remove_recursive(struct dentry *dentry) ...@@ -544,61 +543,37 @@ void debugfs_remove_recursive(struct dentry *dentry)
return; return;
parent = dentry; parent = dentry;
down:
mutex_lock(&parent->d_inode->i_mutex); mutex_lock(&parent->d_inode->i_mutex);
list_for_each_entry_safe(child, next, &parent->d_subdirs, d_u.d_child) {
if (!debugfs_positive(child))
continue;
while (1) { /* perhaps simple_empty(child) makes more sense */
/*
* When all dentries under "parent" has been removed,
* walk up the tree until we reach our starting point.
*/
if (list_empty(&parent->d_subdirs)) {
mutex_unlock(&parent->d_inode->i_mutex);
if (parent == dentry)
break;
parent = parent->d_parent;
mutex_lock(&parent->d_inode->i_mutex);
}
child = list_entry(parent->d_subdirs.next, struct dentry,
d_u.d_child);
next_sibling:
/*
* If "child" isn't empty, walk down the tree and
* remove all its descendants first.
*/
if (!list_empty(&child->d_subdirs)) { if (!list_empty(&child->d_subdirs)) {
mutex_unlock(&parent->d_inode->i_mutex); mutex_unlock(&parent->d_inode->i_mutex);
parent = child; parent = child;
mutex_lock(&parent->d_inode->i_mutex); goto down;
continue;
} }
__debugfs_remove(child, parent); up:
if (parent->d_subdirs.next == &child->d_u.d_child) { if (!__debugfs_remove(child, parent))
/* simple_release_fs(&debugfs_mount, &debugfs_mount_count);
* Try the next sibling.
*/
if (child->d_u.d_child.next != &parent->d_subdirs) {
child = list_entry(child->d_u.d_child.next,
struct dentry,
d_u.d_child);
goto next_sibling;
}
/*
* Avoid infinite loop if we fail to remove
* one dentry.
*/
mutex_unlock(&parent->d_inode->i_mutex);
break;
}
simple_release_fs(&debugfs_mount, &debugfs_mount_count);
} }
parent = dentry->d_parent; mutex_unlock(&parent->d_inode->i_mutex);
child = parent;
parent = parent->d_parent;
mutex_lock(&parent->d_inode->i_mutex); mutex_lock(&parent->d_inode->i_mutex);
__debugfs_remove(dentry, parent);
if (child != dentry) {
next = list_entry(child->d_u.d_child.next, struct dentry,
d_u.d_child);
goto up;
}
if (!__debugfs_remove(child, parent))
simple_release_fs(&debugfs_mount, &debugfs_mount_count);
mutex_unlock(&parent->d_inode->i_mutex); mutex_unlock(&parent->d_inode->i_mutex);
simple_release_fs(&debugfs_mount, &debugfs_mount_count);
} }
EXPORT_SYMBOL_GPL(debugfs_remove_recursive); EXPORT_SYMBOL_GPL(debugfs_remove_recursive);
......
...@@ -78,6 +78,11 @@ struct trace_iterator { ...@@ -78,6 +78,11 @@ struct trace_iterator {
/* trace_seq for __print_flags() and __print_symbolic() etc. */ /* trace_seq for __print_flags() and __print_symbolic() etc. */
struct trace_seq tmp_seq; struct trace_seq tmp_seq;
cpumask_var_t started;
/* it's true when current open file is snapshot */
bool snapshot;
/* The below is zeroed out in pipe_read */ /* The below is zeroed out in pipe_read */
struct trace_seq seq; struct trace_seq seq;
struct trace_entry *ent; struct trace_entry *ent;
...@@ -90,10 +95,7 @@ struct trace_iterator { ...@@ -90,10 +95,7 @@ struct trace_iterator {
loff_t pos; loff_t pos;
long idx; long idx;
cpumask_var_t started; /* All new field here will be zeroed out in pipe_read */
/* it's true when current open file is snapshot */
bool snapshot;
}; };
enum trace_iter_flags { enum trace_iter_flags {
...@@ -332,7 +334,7 @@ extern int trace_define_field(struct ftrace_event_call *call, const char *type, ...@@ -332,7 +334,7 @@ extern int trace_define_field(struct ftrace_event_call *call, const char *type,
const char *name, int offset, int size, const char *name, int offset, int size,
int is_signed, int filter_type); int is_signed, int filter_type);
extern int trace_add_event_call(struct ftrace_event_call *call); extern int trace_add_event_call(struct ftrace_event_call *call);
extern void trace_remove_event_call(struct ftrace_event_call *call); extern int trace_remove_event_call(struct ftrace_event_call *call);
#define is_signed_type(type) (((type)(-1)) < (type)1) #define is_signed_type(type) (((type)(-1)) < (type)1)
......
...@@ -629,7 +629,7 @@ extern void ftrace_dump(enum ftrace_dump_mode oops_dump_mode); ...@@ -629,7 +629,7 @@ extern void ftrace_dump(enum ftrace_dump_mode oops_dump_mode);
static inline void tracing_start(void) { } static inline void tracing_start(void) { }
static inline void tracing_stop(void) { } static inline void tracing_stop(void) { }
static inline void ftrace_off_permanent(void) { } static inline void ftrace_off_permanent(void) { }
static inline void trace_dump_stack(void) { } static inline void trace_dump_stack(int skip) { }
static inline void tracing_on(void) { } static inline void tracing_on(void) { }
static inline void tracing_off(void) { } static inline void tracing_off(void) { }
......
...@@ -2169,12 +2169,57 @@ static cycle_t ftrace_update_time; ...@@ -2169,12 +2169,57 @@ static cycle_t ftrace_update_time;
static unsigned long ftrace_update_cnt; static unsigned long ftrace_update_cnt;
unsigned long ftrace_update_tot_cnt; unsigned long ftrace_update_tot_cnt;
static int ops_traces_mod(struct ftrace_ops *ops) static inline int ops_traces_mod(struct ftrace_ops *ops)
{ {
struct ftrace_hash *hash; /*
* Filter_hash being empty will default to trace module.
* But notrace hash requires a test of individual module functions.
*/
return ftrace_hash_empty(ops->filter_hash) &&
ftrace_hash_empty(ops->notrace_hash);
}
/*
* Check if the current ops references the record.
*
* If the ops traces all functions, then it was already accounted for.
* If the ops does not trace the current record function, skip it.
* If the ops ignores the function via notrace filter, skip it.
*/
static inline bool
ops_references_rec(struct ftrace_ops *ops, struct dyn_ftrace *rec)
{
/* If ops isn't enabled, ignore it */
if (!(ops->flags & FTRACE_OPS_FL_ENABLED))
return 0;
/* If ops traces all mods, we already accounted for it */
if (ops_traces_mod(ops))
return 0;
/* The function must be in the filter */
if (!ftrace_hash_empty(ops->filter_hash) &&
!ftrace_lookup_ip(ops->filter_hash, rec->ip))
return 0;
/* If in notrace hash, we ignore it too */
if (ftrace_lookup_ip(ops->notrace_hash, rec->ip))
return 0;
return 1;
}
static int referenced_filters(struct dyn_ftrace *rec)
{
struct ftrace_ops *ops;
int cnt = 0;
hash = ops->filter_hash; for (ops = ftrace_ops_list; ops != &ftrace_list_end; ops = ops->next) {
return ftrace_hash_empty(hash); if (ops_references_rec(ops, rec))
cnt++;
}
return cnt;
} }
static int ftrace_update_code(struct module *mod) static int ftrace_update_code(struct module *mod)
...@@ -2183,6 +2228,7 @@ static int ftrace_update_code(struct module *mod) ...@@ -2183,6 +2228,7 @@ static int ftrace_update_code(struct module *mod)
struct dyn_ftrace *p; struct dyn_ftrace *p;
cycle_t start, stop; cycle_t start, stop;
unsigned long ref = 0; unsigned long ref = 0;
bool test = false;
int i; int i;
/* /*
...@@ -2196,9 +2242,12 @@ static int ftrace_update_code(struct module *mod) ...@@ -2196,9 +2242,12 @@ static int ftrace_update_code(struct module *mod)
for (ops = ftrace_ops_list; for (ops = ftrace_ops_list;
ops != &ftrace_list_end; ops = ops->next) { ops != &ftrace_list_end; ops = ops->next) {
if (ops->flags & FTRACE_OPS_FL_ENABLED && if (ops->flags & FTRACE_OPS_FL_ENABLED) {
ops_traces_mod(ops)) if (ops_traces_mod(ops))
ref++; ref++;
else
test = true;
}
} }
} }
...@@ -2208,12 +2257,16 @@ static int ftrace_update_code(struct module *mod) ...@@ -2208,12 +2257,16 @@ static int ftrace_update_code(struct module *mod)
for (pg = ftrace_new_pgs; pg; pg = pg->next) { for (pg = ftrace_new_pgs; pg; pg = pg->next) {
for (i = 0; i < pg->index; i++) { for (i = 0; i < pg->index; i++) {
int cnt = ref;
/* If something went wrong, bail without enabling anything */ /* If something went wrong, bail without enabling anything */
if (unlikely(ftrace_disabled)) if (unlikely(ftrace_disabled))
return -1; return -1;
p = &pg->records[i]; p = &pg->records[i];
p->flags = ref; if (test)
cnt += referenced_filters(p);
p->flags = cnt;
/* /*
* Do the initial record conversion from mcount jump * Do the initial record conversion from mcount jump
...@@ -2233,7 +2286,7 @@ static int ftrace_update_code(struct module *mod) ...@@ -2233,7 +2286,7 @@ static int ftrace_update_code(struct module *mod)
* conversion puts the module to the correct state, thus * conversion puts the module to the correct state, thus
* passing the ftrace_make_call check. * passing the ftrace_make_call check.
*/ */
if (ftrace_start_up && ref) { if (ftrace_start_up && cnt) {
int failed = __ftrace_replace_code(p, 1); int failed = __ftrace_replace_code(p, 1);
if (failed) if (failed)
ftrace_bug(failed, p->ip); ftrace_bug(failed, p->ip);
...@@ -3384,6 +3437,12 @@ ftrace_match_addr(struct ftrace_hash *hash, unsigned long ip, int remove) ...@@ -3384,6 +3437,12 @@ ftrace_match_addr(struct ftrace_hash *hash, unsigned long ip, int remove)
return add_hash_entry(hash, ip); return add_hash_entry(hash, ip);
} }
static void ftrace_ops_update_code(struct ftrace_ops *ops)
{
if (ops->flags & FTRACE_OPS_FL_ENABLED && ftrace_enabled)
ftrace_run_update_code(FTRACE_UPDATE_CALLS);
}
static int static int
ftrace_set_hash(struct ftrace_ops *ops, unsigned char *buf, int len, ftrace_set_hash(struct ftrace_ops *ops, unsigned char *buf, int len,
unsigned long ip, int remove, int reset, int enable) unsigned long ip, int remove, int reset, int enable)
...@@ -3426,9 +3485,8 @@ ftrace_set_hash(struct ftrace_ops *ops, unsigned char *buf, int len, ...@@ -3426,9 +3485,8 @@ ftrace_set_hash(struct ftrace_ops *ops, unsigned char *buf, int len,
mutex_lock(&ftrace_lock); mutex_lock(&ftrace_lock);
ret = ftrace_hash_move(ops, enable, orig_hash, hash); ret = ftrace_hash_move(ops, enable, orig_hash, hash);
if (!ret && ops->flags & FTRACE_OPS_FL_ENABLED if (!ret)
&& ftrace_enabled) ftrace_ops_update_code(ops);
ftrace_run_update_code(FTRACE_UPDATE_CALLS);
mutex_unlock(&ftrace_lock); mutex_unlock(&ftrace_lock);
...@@ -3655,9 +3713,8 @@ int ftrace_regex_release(struct inode *inode, struct file *file) ...@@ -3655,9 +3713,8 @@ int ftrace_regex_release(struct inode *inode, struct file *file)
mutex_lock(&ftrace_lock); mutex_lock(&ftrace_lock);
ret = ftrace_hash_move(iter->ops, filter_hash, ret = ftrace_hash_move(iter->ops, filter_hash,
orig_hash, iter->hash); orig_hash, iter->hash);
if (!ret && (iter->ops->flags & FTRACE_OPS_FL_ENABLED) if (!ret)
&& ftrace_enabled) ftrace_ops_update_code(iter->ops);
ftrace_run_update_code(FTRACE_UPDATE_CALLS);
mutex_unlock(&ftrace_lock); mutex_unlock(&ftrace_lock);
} }
......
...@@ -243,20 +243,25 @@ int filter_current_check_discard(struct ring_buffer *buffer, ...@@ -243,20 +243,25 @@ int filter_current_check_discard(struct ring_buffer *buffer,
} }
EXPORT_SYMBOL_GPL(filter_current_check_discard); EXPORT_SYMBOL_GPL(filter_current_check_discard);
cycle_t ftrace_now(int cpu) cycle_t buffer_ftrace_now(struct trace_buffer *buf, int cpu)
{ {
u64 ts; u64 ts;
/* Early boot up does not have a buffer yet */ /* Early boot up does not have a buffer yet */
if (!global_trace.trace_buffer.buffer) if (!buf->buffer)
return trace_clock_local(); return trace_clock_local();
ts = ring_buffer_time_stamp(global_trace.trace_buffer.buffer, cpu); ts = ring_buffer_time_stamp(buf->buffer, cpu);
ring_buffer_normalize_time_stamp(global_trace.trace_buffer.buffer, cpu, &ts); ring_buffer_normalize_time_stamp(buf->buffer, cpu, &ts);
return ts; return ts;
} }
cycle_t ftrace_now(int cpu)
{
return buffer_ftrace_now(&global_trace.trace_buffer, cpu);
}
/** /**
* tracing_is_enabled - Show if global_trace has been disabled * tracing_is_enabled - Show if global_trace has been disabled
* *
...@@ -1211,7 +1216,7 @@ void tracing_reset_online_cpus(struct trace_buffer *buf) ...@@ -1211,7 +1216,7 @@ void tracing_reset_online_cpus(struct trace_buffer *buf)
/* Make sure all commits have finished */ /* Make sure all commits have finished */
synchronize_sched(); synchronize_sched();
buf->time_start = ftrace_now(buf->cpu); buf->time_start = buffer_ftrace_now(buf, buf->cpu);
for_each_online_cpu(cpu) for_each_online_cpu(cpu)
ring_buffer_reset_cpu(buffer, cpu); ring_buffer_reset_cpu(buffer, cpu);
...@@ -1219,11 +1224,6 @@ void tracing_reset_online_cpus(struct trace_buffer *buf) ...@@ -1219,11 +1224,6 @@ void tracing_reset_online_cpus(struct trace_buffer *buf)
ring_buffer_record_enable(buffer); ring_buffer_record_enable(buffer);
} }
void tracing_reset_current(int cpu)
{
tracing_reset(&global_trace.trace_buffer, cpu);
}
/* Must have trace_types_lock held */ /* Must have trace_types_lock held */
void tracing_reset_all_online_cpus(void) void tracing_reset_all_online_cpus(void)
{ {
...@@ -4151,6 +4151,7 @@ tracing_read_pipe(struct file *filp, char __user *ubuf, ...@@ -4151,6 +4151,7 @@ tracing_read_pipe(struct file *filp, char __user *ubuf,
memset(&iter->seq, 0, memset(&iter->seq, 0,
sizeof(struct trace_iterator) - sizeof(struct trace_iterator) -
offsetof(struct trace_iterator, seq)); offsetof(struct trace_iterator, seq));
cpumask_clear(iter->started);
iter->pos = -1; iter->pos = -1;
trace_event_read_lock(); trace_event_read_lock();
...@@ -4468,7 +4469,7 @@ tracing_free_buffer_release(struct inode *inode, struct file *filp) ...@@ -4468,7 +4469,7 @@ tracing_free_buffer_release(struct inode *inode, struct file *filp)
/* disable tracing ? */ /* disable tracing ? */
if (trace_flags & TRACE_ITER_STOP_ON_FREE) if (trace_flags & TRACE_ITER_STOP_ON_FREE)
tracing_off(); tracer_tracing_off(tr);
/* resize the ring buffer to 0 */ /* resize the ring buffer to 0 */
tracing_resize_ring_buffer(tr, 0, RING_BUFFER_ALL_CPUS); tracing_resize_ring_buffer(tr, 0, RING_BUFFER_ALL_CPUS);
...@@ -4633,12 +4634,12 @@ static ssize_t tracing_clock_write(struct file *filp, const char __user *ubuf, ...@@ -4633,12 +4634,12 @@ static ssize_t tracing_clock_write(struct file *filp, const char __user *ubuf,
* New clock may not be consistent with the previous clock. * New clock may not be consistent with the previous clock.
* Reset the buffer so that it doesn't have incomparable timestamps. * Reset the buffer so that it doesn't have incomparable timestamps.
*/ */
tracing_reset_online_cpus(&global_trace.trace_buffer); tracing_reset_online_cpus(&tr->trace_buffer);
#ifdef CONFIG_TRACER_MAX_TRACE #ifdef CONFIG_TRACER_MAX_TRACE
if (tr->flags & TRACE_ARRAY_FL_GLOBAL && tr->max_buffer.buffer) if (tr->flags & TRACE_ARRAY_FL_GLOBAL && tr->max_buffer.buffer)
ring_buffer_set_clock(tr->max_buffer.buffer, trace_clocks[i].func); ring_buffer_set_clock(tr->max_buffer.buffer, trace_clocks[i].func);
tracing_reset_online_cpus(&global_trace.max_buffer); tracing_reset_online_cpus(&tr->max_buffer);
#endif #endif
mutex_unlock(&trace_types_lock); mutex_unlock(&trace_types_lock);
......
This diff is collapsed.
...@@ -637,17 +637,15 @@ static void append_filter_err(struct filter_parse_state *ps, ...@@ -637,17 +637,15 @@ static void append_filter_err(struct filter_parse_state *ps,
free_page((unsigned long) buf); free_page((unsigned long) buf);
} }
/* caller must hold event_mutex */
void print_event_filter(struct ftrace_event_call *call, struct trace_seq *s) void print_event_filter(struct ftrace_event_call *call, struct trace_seq *s)
{ {
struct event_filter *filter; struct event_filter *filter = call->filter;
mutex_lock(&event_mutex);
filter = call->filter;
if (filter && filter->filter_string) if (filter && filter->filter_string)
trace_seq_printf(s, "%s\n", filter->filter_string); trace_seq_printf(s, "%s\n", filter->filter_string);
else else
trace_seq_puts(s, "none\n"); trace_seq_puts(s, "none\n");
mutex_unlock(&event_mutex);
} }
void print_subsystem_event_filter(struct event_subsystem *system, void print_subsystem_event_filter(struct event_subsystem *system,
...@@ -1841,23 +1839,22 @@ static int create_system_filter(struct event_subsystem *system, ...@@ -1841,23 +1839,22 @@ static int create_system_filter(struct event_subsystem *system,
return err; return err;
} }
/* caller must hold event_mutex */
int apply_event_filter(struct ftrace_event_call *call, char *filter_string) int apply_event_filter(struct ftrace_event_call *call, char *filter_string)
{ {
struct event_filter *filter; struct event_filter *filter;
int err = 0; int err;
mutex_lock(&event_mutex);
if (!strcmp(strstrip(filter_string), "0")) { if (!strcmp(strstrip(filter_string), "0")) {
filter_disable(call); filter_disable(call);
filter = call->filter; filter = call->filter;
if (!filter) if (!filter)
goto out_unlock; return 0;
RCU_INIT_POINTER(call->filter, NULL); RCU_INIT_POINTER(call->filter, NULL);
/* Make sure the filter is not being used */ /* Make sure the filter is not being used */
synchronize_sched(); synchronize_sched();
__free_filter(filter); __free_filter(filter);
goto out_unlock; return 0;
} }
err = create_filter(call, filter_string, true, &filter); err = create_filter(call, filter_string, true, &filter);
...@@ -1884,8 +1881,6 @@ int apply_event_filter(struct ftrace_event_call *call, char *filter_string) ...@@ -1884,8 +1881,6 @@ int apply_event_filter(struct ftrace_event_call *call, char *filter_string)
__free_filter(tmp); __free_filter(tmp);
} }
} }
out_unlock:
mutex_unlock(&event_mutex);
return err; return err;
} }
......
...@@ -95,7 +95,7 @@ static __kprobes bool trace_probe_is_on_module(struct trace_probe *tp) ...@@ -95,7 +95,7 @@ static __kprobes bool trace_probe_is_on_module(struct trace_probe *tp)
} }
static int register_probe_event(struct trace_probe *tp); static int register_probe_event(struct trace_probe *tp);
static void unregister_probe_event(struct trace_probe *tp); static int unregister_probe_event(struct trace_probe *tp);
static DEFINE_MUTEX(probe_lock); static DEFINE_MUTEX(probe_lock);
static LIST_HEAD(probe_list); static LIST_HEAD(probe_list);
...@@ -351,9 +351,12 @@ static int unregister_trace_probe(struct trace_probe *tp) ...@@ -351,9 +351,12 @@ static int unregister_trace_probe(struct trace_probe *tp)
if (trace_probe_is_enabled(tp)) if (trace_probe_is_enabled(tp))
return -EBUSY; return -EBUSY;
/* Will fail if probe is being used by ftrace or perf */
if (unregister_probe_event(tp))
return -EBUSY;
__unregister_trace_probe(tp); __unregister_trace_probe(tp);
list_del(&tp->list); list_del(&tp->list);
unregister_probe_event(tp);
return 0; return 0;
} }
...@@ -632,7 +635,9 @@ static int release_all_trace_probes(void) ...@@ -632,7 +635,9 @@ static int release_all_trace_probes(void)
/* TODO: Use batch unregistration */ /* TODO: Use batch unregistration */
while (!list_empty(&probe_list)) { while (!list_empty(&probe_list)) {
tp = list_entry(probe_list.next, struct trace_probe, list); tp = list_entry(probe_list.next, struct trace_probe, list);
unregister_trace_probe(tp); ret = unregister_trace_probe(tp);
if (ret)
goto end;
free_trace_probe(tp); free_trace_probe(tp);
} }
...@@ -1247,11 +1252,15 @@ static int register_probe_event(struct trace_probe *tp) ...@@ -1247,11 +1252,15 @@ static int register_probe_event(struct trace_probe *tp)
return ret; return ret;
} }
static void unregister_probe_event(struct trace_probe *tp) static int unregister_probe_event(struct trace_probe *tp)
{ {
int ret;
/* tp->event is unregistered in trace_remove_event_call() */ /* tp->event is unregistered in trace_remove_event_call() */
trace_remove_event_call(&tp->call); ret = trace_remove_event_call(&tp->call);
kfree(tp->call.print_fmt); if (!ret)
kfree(tp->call.print_fmt);
return ret;
} }
/* Make a debugfs interface for controlling probe points */ /* Make a debugfs interface for controlling probe points */
......
...@@ -70,7 +70,7 @@ struct trace_uprobe { ...@@ -70,7 +70,7 @@ struct trace_uprobe {
(sizeof(struct probe_arg) * (n))) (sizeof(struct probe_arg) * (n)))
static int register_uprobe_event(struct trace_uprobe *tu); static int register_uprobe_event(struct trace_uprobe *tu);
static void unregister_uprobe_event(struct trace_uprobe *tu); static int unregister_uprobe_event(struct trace_uprobe *tu);
static DEFINE_MUTEX(uprobe_lock); static DEFINE_MUTEX(uprobe_lock);
static LIST_HEAD(uprobe_list); static LIST_HEAD(uprobe_list);
...@@ -164,11 +164,17 @@ static struct trace_uprobe *find_probe_event(const char *event, const char *grou ...@@ -164,11 +164,17 @@ static struct trace_uprobe *find_probe_event(const char *event, const char *grou
} }
/* Unregister a trace_uprobe and probe_event: call with locking uprobe_lock */ /* Unregister a trace_uprobe and probe_event: call with locking uprobe_lock */
static void unregister_trace_uprobe(struct trace_uprobe *tu) static int unregister_trace_uprobe(struct trace_uprobe *tu)
{ {
int ret;
ret = unregister_uprobe_event(tu);
if (ret)
return ret;
list_del(&tu->list); list_del(&tu->list);
unregister_uprobe_event(tu);
free_trace_uprobe(tu); free_trace_uprobe(tu);
return 0;
} }
/* Register a trace_uprobe and probe_event */ /* Register a trace_uprobe and probe_event */
...@@ -181,9 +187,12 @@ static int register_trace_uprobe(struct trace_uprobe *tu) ...@@ -181,9 +187,12 @@ static int register_trace_uprobe(struct trace_uprobe *tu)
/* register as an event */ /* register as an event */
old_tp = find_probe_event(tu->call.name, tu->call.class->system); old_tp = find_probe_event(tu->call.name, tu->call.class->system);
if (old_tp) if (old_tp) {
/* delete old event */ /* delete old event */
unregister_trace_uprobe(old_tp); ret = unregister_trace_uprobe(old_tp);
if (ret)
goto end;
}
ret = register_uprobe_event(tu); ret = register_uprobe_event(tu);
if (ret) { if (ret) {
...@@ -256,6 +265,8 @@ static int create_trace_uprobe(int argc, char **argv) ...@@ -256,6 +265,8 @@ static int create_trace_uprobe(int argc, char **argv)
group = UPROBE_EVENT_SYSTEM; group = UPROBE_EVENT_SYSTEM;
if (is_delete) { if (is_delete) {
int ret;
if (!event) { if (!event) {
pr_info("Delete command needs an event name.\n"); pr_info("Delete command needs an event name.\n");
return -EINVAL; return -EINVAL;
...@@ -269,9 +280,9 @@ static int create_trace_uprobe(int argc, char **argv) ...@@ -269,9 +280,9 @@ static int create_trace_uprobe(int argc, char **argv)
return -ENOENT; return -ENOENT;
} }
/* delete an event */ /* delete an event */
unregister_trace_uprobe(tu); ret = unregister_trace_uprobe(tu);
mutex_unlock(&uprobe_lock); mutex_unlock(&uprobe_lock);
return 0; return ret;
} }
if (argc < 2) { if (argc < 2) {
...@@ -408,16 +419,20 @@ static int create_trace_uprobe(int argc, char **argv) ...@@ -408,16 +419,20 @@ static int create_trace_uprobe(int argc, char **argv)
return ret; return ret;
} }
static void cleanup_all_probes(void) static int cleanup_all_probes(void)
{ {
struct trace_uprobe *tu; struct trace_uprobe *tu;
int ret = 0;
mutex_lock(&uprobe_lock); mutex_lock(&uprobe_lock);
while (!list_empty(&uprobe_list)) { while (!list_empty(&uprobe_list)) {
tu = list_entry(uprobe_list.next, struct trace_uprobe, list); tu = list_entry(uprobe_list.next, struct trace_uprobe, list);
unregister_trace_uprobe(tu); ret = unregister_trace_uprobe(tu);
if (ret)
break;
} }
mutex_unlock(&uprobe_lock); mutex_unlock(&uprobe_lock);
return ret;
} }
/* Probes listing interfaces */ /* Probes listing interfaces */
...@@ -462,8 +477,13 @@ static const struct seq_operations probes_seq_op = { ...@@ -462,8 +477,13 @@ static const struct seq_operations probes_seq_op = {
static int probes_open(struct inode *inode, struct file *file) static int probes_open(struct inode *inode, struct file *file)
{ {
if ((file->f_mode & FMODE_WRITE) && (file->f_flags & O_TRUNC)) int ret;
cleanup_all_probes();
if ((file->f_mode & FMODE_WRITE) && (file->f_flags & O_TRUNC)) {
ret = cleanup_all_probes();
if (ret)
return ret;
}
return seq_open(file, &probes_seq_op); return seq_open(file, &probes_seq_op);
} }
...@@ -968,12 +988,17 @@ static int register_uprobe_event(struct trace_uprobe *tu) ...@@ -968,12 +988,17 @@ static int register_uprobe_event(struct trace_uprobe *tu)
return ret; return ret;
} }
static void unregister_uprobe_event(struct trace_uprobe *tu) static int unregister_uprobe_event(struct trace_uprobe *tu)
{ {
int ret;
/* tu->event is unregistered in trace_remove_event_call() */ /* tu->event is unregistered in trace_remove_event_call() */
trace_remove_event_call(&tu->call); ret = trace_remove_event_call(&tu->call);
if (ret)
return ret;
kfree(tu->call.print_fmt); kfree(tu->call.print_fmt);
tu->call.print_fmt = NULL; tu->call.print_fmt = NULL;
return 0;
} }
/* Make a trace interface for controling probe points */ /* Make a trace interface for controling probe points */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment