Commit 3ebb6fb0 authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'trace-v4.18-rc6' of git://git.kernel.org/pub/scm/linux/kernel/git/rostedt/linux-trace

Pull tracing fixes from Steven Rostedt:
 "Various fixes to the tracing infrastructure:

   - Fix double free when the reg() call fails in
     event_trigger_callback()

   - Fix anomoly of snapshot causing tracing_on flag to change

   - Add selftest to test snapshot and tracing_on affecting each other

   - Fix setting of tracepoint flag on error that prevents probes from
     being deleted.

   - Fix another possible double free that is similar to
     event_trigger_callback()

   - Quiet a gcc warning of a false positive unused variable

   - Fix crash of partial exposed task->comm to trace events"

* tag 'trace-v4.18-rc6' of git://git.kernel.org/pub/scm/linux/kernel/git/rostedt/linux-trace:
  kthread, tracing: Don't expose half-written comm when creating kthreads
  tracing: Quiet gcc warning about maybe unused link variable
  tracing: Fix possible double free in event_enable_trigger_func()
  tracing/kprobes: Fix trace_probe flags on enable_trace_kprobe() failure
  selftests/ftrace: Add snapshot and tracing_on test case
  ring_buffer: tracing: Inherit the tracing setting to next ring buffer
  tracing: Fix double free of event_trigger_data
parents f636d300 3e536e22
...@@ -165,6 +165,7 @@ void ring_buffer_record_enable(struct ring_buffer *buffer); ...@@ -165,6 +165,7 @@ void ring_buffer_record_enable(struct ring_buffer *buffer);
void ring_buffer_record_off(struct ring_buffer *buffer); void ring_buffer_record_off(struct ring_buffer *buffer);
void ring_buffer_record_on(struct ring_buffer *buffer); void ring_buffer_record_on(struct ring_buffer *buffer);
int ring_buffer_record_is_on(struct ring_buffer *buffer); int ring_buffer_record_is_on(struct ring_buffer *buffer);
int ring_buffer_record_is_set_on(struct ring_buffer *buffer);
void ring_buffer_record_disable_cpu(struct ring_buffer *buffer, int cpu); void ring_buffer_record_disable_cpu(struct ring_buffer *buffer, int cpu);
void ring_buffer_record_enable_cpu(struct ring_buffer *buffer, int cpu); void ring_buffer_record_enable_cpu(struct ring_buffer *buffer, int cpu);
......
...@@ -325,8 +325,14 @@ struct task_struct *__kthread_create_on_node(int (*threadfn)(void *data), ...@@ -325,8 +325,14 @@ struct task_struct *__kthread_create_on_node(int (*threadfn)(void *data),
task = create->result; task = create->result;
if (!IS_ERR(task)) { if (!IS_ERR(task)) {
static const struct sched_param param = { .sched_priority = 0 }; static const struct sched_param param = { .sched_priority = 0 };
char name[TASK_COMM_LEN];
vsnprintf(task->comm, sizeof(task->comm), namefmt, args); /*
* task is already visible to other tasks, so updating
* COMM must be protected.
*/
vsnprintf(name, sizeof(name), namefmt, args);
set_task_comm(task, name);
/* /*
* root may have changed our (kthreadd's) priority or CPU mask. * root may have changed our (kthreadd's) priority or CPU mask.
* The kernel thread should not inherit these properties. * The kernel thread should not inherit these properties.
......
...@@ -3226,6 +3226,22 @@ int ring_buffer_record_is_on(struct ring_buffer *buffer) ...@@ -3226,6 +3226,22 @@ int ring_buffer_record_is_on(struct ring_buffer *buffer)
return !atomic_read(&buffer->record_disabled); return !atomic_read(&buffer->record_disabled);
} }
/**
* ring_buffer_record_is_set_on - return true if the ring buffer is set writable
* @buffer: The ring buffer to see if write is set enabled
*
* Returns true if the ring buffer is set writable by ring_buffer_record_on().
* Note that this does NOT mean it is in a writable state.
*
* It may return true when the ring buffer has been disabled by
* ring_buffer_record_disable(), as that is a temporary disabling of
* the ring buffer.
*/
int ring_buffer_record_is_set_on(struct ring_buffer *buffer)
{
return !(atomic_read(&buffer->record_disabled) & RB_BUFFER_OFF);
}
/** /**
* ring_buffer_record_disable_cpu - stop all writes into the cpu_buffer * ring_buffer_record_disable_cpu - stop all writes into the cpu_buffer
* @buffer: The ring buffer to stop writes to. * @buffer: The ring buffer to stop writes to.
......
...@@ -1373,6 +1373,12 @@ update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu) ...@@ -1373,6 +1373,12 @@ update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu)
arch_spin_lock(&tr->max_lock); arch_spin_lock(&tr->max_lock);
/* Inherit the recordable setting from trace_buffer */
if (ring_buffer_record_is_set_on(tr->trace_buffer.buffer))
ring_buffer_record_on(tr->max_buffer.buffer);
else
ring_buffer_record_off(tr->max_buffer.buffer);
swap(tr->trace_buffer.buffer, tr->max_buffer.buffer); swap(tr->trace_buffer.buffer, tr->max_buffer.buffer);
__update_max_tr(tr, tsk, cpu); __update_max_tr(tr, tsk, cpu);
......
...@@ -679,6 +679,8 @@ event_trigger_callback(struct event_command *cmd_ops, ...@@ -679,6 +679,8 @@ event_trigger_callback(struct event_command *cmd_ops,
goto out_free; goto out_free;
out_reg: out_reg:
/* Up the trigger_data count to make sure reg doesn't free it on failure */
event_trigger_init(trigger_ops, trigger_data);
ret = cmd_ops->reg(glob, trigger_ops, trigger_data, file); ret = cmd_ops->reg(glob, trigger_ops, trigger_data, file);
/* /*
* The above returns on success the # of functions enabled, * The above returns on success the # of functions enabled,
...@@ -686,11 +688,13 @@ event_trigger_callback(struct event_command *cmd_ops, ...@@ -686,11 +688,13 @@ event_trigger_callback(struct event_command *cmd_ops,
* Consider no functions a failure too. * Consider no functions a failure too.
*/ */
if (!ret) { if (!ret) {
cmd_ops->unreg(glob, trigger_ops, trigger_data, file);
ret = -ENOENT; ret = -ENOENT;
goto out_free; } else if (ret > 0)
} else if (ret < 0) ret = 0;
goto out_free;
ret = 0; /* Down the counter of trigger_data or free it if not used anymore */
event_trigger_free(trigger_ops, trigger_data);
out: out:
return ret; return ret;
...@@ -1416,6 +1420,9 @@ int event_enable_trigger_func(struct event_command *cmd_ops, ...@@ -1416,6 +1420,9 @@ int event_enable_trigger_func(struct event_command *cmd_ops,
goto out; goto out;
} }
/* Up the trigger_data count to make sure nothing frees it on failure */
event_trigger_init(trigger_ops, trigger_data);
if (trigger) { if (trigger) {
number = strsep(&trigger, ":"); number = strsep(&trigger, ":");
...@@ -1466,6 +1473,7 @@ int event_enable_trigger_func(struct event_command *cmd_ops, ...@@ -1466,6 +1473,7 @@ int event_enable_trigger_func(struct event_command *cmd_ops,
goto out_disable; goto out_disable;
/* Just return zero, not the number of enabled functions */ /* Just return zero, not the number of enabled functions */
ret = 0; ret = 0;
event_trigger_free(trigger_ops, trigger_data);
out: out:
return ret; return ret;
...@@ -1476,7 +1484,7 @@ int event_enable_trigger_func(struct event_command *cmd_ops, ...@@ -1476,7 +1484,7 @@ int event_enable_trigger_func(struct event_command *cmd_ops,
out_free: out_free:
if (cmd_ops->set_filter) if (cmd_ops->set_filter)
cmd_ops->set_filter(NULL, trigger_data, NULL); cmd_ops->set_filter(NULL, trigger_data, NULL);
kfree(trigger_data); event_trigger_free(trigger_ops, trigger_data);
kfree(enable_data); kfree(enable_data);
goto out; goto out;
} }
......
...@@ -400,11 +400,10 @@ static struct trace_kprobe *find_trace_kprobe(const char *event, ...@@ -400,11 +400,10 @@ static struct trace_kprobe *find_trace_kprobe(const char *event,
static int static int
enable_trace_kprobe(struct trace_kprobe *tk, struct trace_event_file *file) enable_trace_kprobe(struct trace_kprobe *tk, struct trace_event_file *file)
{ {
struct event_file_link *link = NULL;
int ret = 0; int ret = 0;
if (file) { if (file) {
struct event_file_link *link;
link = kmalloc(sizeof(*link), GFP_KERNEL); link = kmalloc(sizeof(*link), GFP_KERNEL);
if (!link) { if (!link) {
ret = -ENOMEM; ret = -ENOMEM;
...@@ -424,6 +423,18 @@ enable_trace_kprobe(struct trace_kprobe *tk, struct trace_event_file *file) ...@@ -424,6 +423,18 @@ enable_trace_kprobe(struct trace_kprobe *tk, struct trace_event_file *file)
else else
ret = enable_kprobe(&tk->rp.kp); ret = enable_kprobe(&tk->rp.kp);
} }
if (ret) {
if (file) {
/* Notice the if is true on not WARN() */
if (!WARN_ON_ONCE(!link))
list_del_rcu(&link->list);
kfree(link);
tk->tp.flags &= ~TP_FLAG_TRACE;
} else {
tk->tp.flags &= ~TP_FLAG_PROFILE;
}
}
out: out:
return ret; return ret;
} }
......
#!/bin/sh
# description: Snapshot and tracing setting
# flags: instance
[ ! -f snapshot ] && exit_unsupported
echo "Set tracing off"
echo 0 > tracing_on
echo "Allocate and take a snapshot"
echo 1 > snapshot
# Since trace buffer is empty, snapshot is also empty, but allocated
grep -q "Snapshot is allocated" snapshot
echo "Ensure keep tracing off"
test `cat tracing_on` -eq 0
echo "Set tracing on"
echo 1 > tracing_on
echo "Take a snapshot again"
echo 1 > snapshot
echo "Ensure keep tracing on"
test `cat tracing_on` -eq 1
exit 0
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment