Commit debdd57f authored by Hiraku Toyooka's avatar Hiraku Toyooka Committed by Steven Rostedt

tracing: Make a snapshot feature available from userspace

Ftrace has a snapshot feature available from kernel space and
latency tracers (e.g. irqsoff) are using it. This patch enables
user applictions to take a snapshot via debugfs.

Add "snapshot" debugfs file in "tracing" directory.

  snapshot:
    This is used to take a snapshot and to read the output of the
    snapshot.

     # echo 1 > snapshot

    This will allocate the spare buffer for snapshot (if it is
    not allocated), and take a snapshot.

     # cat snapshot

    This will show contents of the snapshot.

     # echo 0 > snapshot

    This will free the snapshot if it is allocated.

    Any other positive values will clear the snapshot contents if
    the snapshot is allocated, or return EINVAL if it is not allocated.

Link: http://lkml.kernel.org/r/20121226025300.3252.86850.stgit@liselsia

Cc: Jiri Olsa <jolsa@redhat.com>
Cc: David Sharp <dhsharp@google.com>
Signed-off-by: default avatarHiraku Toyooka <hiraku.toyooka.gu@hitachi.com>
[
   Fixed irqsoff selftest and also a conflict with a change
   that fixes the update_max_tr.
]
Signed-off-by: default avatarSteven Rostedt <rostedt@goodmis.org>
parent 2fd196ec
...@@ -83,6 +83,9 @@ struct trace_iterator { ...@@ -83,6 +83,9 @@ struct trace_iterator {
long idx; long idx;
cpumask_var_t started; cpumask_var_t started;
/* it's true when current open file is snapshot */
bool snapshot;
}; };
enum trace_iter_flags { enum trace_iter_flags {
......
...@@ -253,6 +253,16 @@ config FTRACE_SYSCALLS ...@@ -253,6 +253,16 @@ config FTRACE_SYSCALLS
help help
Basic tracer to catch the syscall entry and exit events. Basic tracer to catch the syscall entry and exit events.
config TRACER_SNAPSHOT
bool "Create a snapshot trace buffer"
select TRACER_MAX_TRACE
help
Allow tracing users to take snapshot of the current buffer using the
ftrace interface, e.g.:
echo 1 > /sys/kernel/debug/tracing/snapshot
cat snapshot
config TRACE_BRANCH_PROFILING config TRACE_BRANCH_PROFILING
bool bool
select GENERIC_TRACER select GENERIC_TRACER
......
...@@ -710,12 +710,11 @@ update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu) ...@@ -710,12 +710,11 @@ update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu)
WARN_ON_ONCE(!irqs_disabled()); WARN_ON_ONCE(!irqs_disabled());
/* If we disabled the tracer, stop now */ if (!current_trace->allocated_snapshot) {
if (current_trace == &nop_trace) /* Only the nop tracer should hit this when disabling */
return; WARN_ON_ONCE(current_trace != &nop_trace);
if (WARN_ON_ONCE(!current_trace->use_max_tr))
return; return;
}
arch_spin_lock(&ftrace_max_lock); arch_spin_lock(&ftrace_max_lock);
...@@ -743,10 +742,8 @@ update_max_tr_single(struct trace_array *tr, struct task_struct *tsk, int cpu) ...@@ -743,10 +742,8 @@ update_max_tr_single(struct trace_array *tr, struct task_struct *tsk, int cpu)
return; return;
WARN_ON_ONCE(!irqs_disabled()); WARN_ON_ONCE(!irqs_disabled());
if (!current_trace->use_max_tr) { if (WARN_ON_ONCE(!current_trace->allocated_snapshot))
WARN_ON_ONCE(1);
return; return;
}
arch_spin_lock(&ftrace_max_lock); arch_spin_lock(&ftrace_max_lock);
...@@ -866,10 +863,13 @@ int register_tracer(struct tracer *type) ...@@ -866,10 +863,13 @@ int register_tracer(struct tracer *type)
current_trace = type; current_trace = type;
if (type->use_max_tr) {
/* If we expanded the buffers, make sure the max is expanded too */ /* If we expanded the buffers, make sure the max is expanded too */
if (ring_buffer_expanded && type->use_max_tr) if (ring_buffer_expanded)
ring_buffer_resize(max_tr.buffer, trace_buf_size, ring_buffer_resize(max_tr.buffer, trace_buf_size,
RING_BUFFER_ALL_CPUS); RING_BUFFER_ALL_CPUS);
type->allocated_snapshot = true;
}
/* the test is responsible for initializing and enabling */ /* the test is responsible for initializing and enabling */
pr_info("Testing tracer %s: ", type->name); pr_info("Testing tracer %s: ", type->name);
...@@ -885,10 +885,14 @@ int register_tracer(struct tracer *type) ...@@ -885,10 +885,14 @@ int register_tracer(struct tracer *type)
/* Only reset on passing, to avoid touching corrupted buffers */ /* Only reset on passing, to avoid touching corrupted buffers */
tracing_reset_online_cpus(tr); tracing_reset_online_cpus(tr);
if (type->use_max_tr) {
type->allocated_snapshot = false;
/* Shrink the max buffer again */ /* Shrink the max buffer again */
if (ring_buffer_expanded && type->use_max_tr) if (ring_buffer_expanded)
ring_buffer_resize(max_tr.buffer, 1, ring_buffer_resize(max_tr.buffer, 1,
RING_BUFFER_ALL_CPUS); RING_BUFFER_ALL_CPUS);
}
printk(KERN_CONT "PASSED\n"); printk(KERN_CONT "PASSED\n");
} }
...@@ -1964,6 +1968,10 @@ static void *s_start(struct seq_file *m, loff_t *pos) ...@@ -1964,6 +1968,10 @@ static void *s_start(struct seq_file *m, loff_t *pos)
*iter->trace = *current_trace; *iter->trace = *current_trace;
mutex_unlock(&trace_types_lock); mutex_unlock(&trace_types_lock);
if (iter->snapshot && iter->trace->use_max_tr)
return ERR_PTR(-EBUSY);
if (!iter->snapshot)
atomic_inc(&trace_record_cmdline_disabled); atomic_inc(&trace_record_cmdline_disabled);
if (*pos != iter->pos) { if (*pos != iter->pos) {
...@@ -2003,6 +2011,10 @@ static void s_stop(struct seq_file *m, void *p) ...@@ -2003,6 +2011,10 @@ static void s_stop(struct seq_file *m, void *p)
{ {
struct trace_iterator *iter = m->private; struct trace_iterator *iter = m->private;
if (iter->snapshot && iter->trace->use_max_tr)
return;
if (!iter->snapshot)
atomic_dec(&trace_record_cmdline_disabled); atomic_dec(&trace_record_cmdline_disabled);
trace_access_unlock(iter->cpu_file); trace_access_unlock(iter->cpu_file);
trace_event_read_unlock(); trace_event_read_unlock();
...@@ -2438,7 +2450,7 @@ static const struct seq_operations tracer_seq_ops = { ...@@ -2438,7 +2450,7 @@ static const struct seq_operations tracer_seq_ops = {
}; };
static struct trace_iterator * static struct trace_iterator *
__tracing_open(struct inode *inode, struct file *file) __tracing_open(struct inode *inode, struct file *file, bool snapshot)
{ {
long cpu_file = (long) inode->i_private; long cpu_file = (long) inode->i_private;
struct trace_iterator *iter; struct trace_iterator *iter;
...@@ -2471,10 +2483,11 @@ __tracing_open(struct inode *inode, struct file *file) ...@@ -2471,10 +2483,11 @@ __tracing_open(struct inode *inode, struct file *file)
if (!zalloc_cpumask_var(&iter->started, GFP_KERNEL)) if (!zalloc_cpumask_var(&iter->started, GFP_KERNEL))
goto fail; goto fail;
if (current_trace && current_trace->print_max) if ((current_trace && current_trace->print_max) || snapshot)
iter->tr = &max_tr; iter->tr = &max_tr;
else else
iter->tr = &global_trace; iter->tr = &global_trace;
iter->snapshot = snapshot;
iter->pos = -1; iter->pos = -1;
mutex_init(&iter->mutex); mutex_init(&iter->mutex);
iter->cpu_file = cpu_file; iter->cpu_file = cpu_file;
...@@ -2491,7 +2504,8 @@ __tracing_open(struct inode *inode, struct file *file) ...@@ -2491,7 +2504,8 @@ __tracing_open(struct inode *inode, struct file *file)
if (trace_clocks[trace_clock_id].in_ns) if (trace_clocks[trace_clock_id].in_ns)
iter->iter_flags |= TRACE_FILE_TIME_IN_NS; iter->iter_flags |= TRACE_FILE_TIME_IN_NS;
/* stop the trace while dumping */ /* stop the trace while dumping if we are not opening "snapshot" */
if (!iter->snapshot)
tracing_stop(); tracing_stop();
if (iter->cpu_file == TRACE_PIPE_ALL_CPU) { if (iter->cpu_file == TRACE_PIPE_ALL_CPU) {
...@@ -2555,6 +2569,7 @@ static int tracing_release(struct inode *inode, struct file *file) ...@@ -2555,6 +2569,7 @@ static int tracing_release(struct inode *inode, struct file *file)
if (iter->trace && iter->trace->close) if (iter->trace && iter->trace->close)
iter->trace->close(iter); iter->trace->close(iter);
if (!iter->snapshot)
/* reenable tracing if it was previously enabled */ /* reenable tracing if it was previously enabled */
tracing_start(); tracing_start();
mutex_unlock(&trace_types_lock); mutex_unlock(&trace_types_lock);
...@@ -2584,7 +2599,7 @@ static int tracing_open(struct inode *inode, struct file *file) ...@@ -2584,7 +2599,7 @@ static int tracing_open(struct inode *inode, struct file *file)
} }
if (file->f_mode & FMODE_READ) { if (file->f_mode & FMODE_READ) {
iter = __tracing_open(inode, file); iter = __tracing_open(inode, file, false);
if (IS_ERR(iter)) if (IS_ERR(iter))
ret = PTR_ERR(iter); ret = PTR_ERR(iter);
else if (trace_flags & TRACE_ITER_LATENCY_FMT) else if (trace_flags & TRACE_ITER_LATENCY_FMT)
...@@ -3219,7 +3234,7 @@ static int tracing_set_tracer(const char *buf) ...@@ -3219,7 +3234,7 @@ static int tracing_set_tracer(const char *buf)
if (current_trace && current_trace->reset) if (current_trace && current_trace->reset)
current_trace->reset(tr); current_trace->reset(tr);
had_max_tr = current_trace && current_trace->use_max_tr; had_max_tr = current_trace && current_trace->allocated_snapshot;
current_trace = &nop_trace; current_trace = &nop_trace;
if (had_max_tr && !t->use_max_tr) { if (had_max_tr && !t->use_max_tr) {
...@@ -3238,6 +3253,8 @@ static int tracing_set_tracer(const char *buf) ...@@ -3238,6 +3253,8 @@ static int tracing_set_tracer(const char *buf)
*/ */
ring_buffer_resize(max_tr.buffer, 1, RING_BUFFER_ALL_CPUS); ring_buffer_resize(max_tr.buffer, 1, RING_BUFFER_ALL_CPUS);
set_buffer_entries(&max_tr, 1); set_buffer_entries(&max_tr, 1);
tracing_reset_online_cpus(&max_tr);
current_trace->allocated_snapshot = false;
} }
destroy_trace_option_files(topts); destroy_trace_option_files(topts);
...@@ -3248,6 +3265,7 @@ static int tracing_set_tracer(const char *buf) ...@@ -3248,6 +3265,7 @@ static int tracing_set_tracer(const char *buf)
RING_BUFFER_ALL_CPUS); RING_BUFFER_ALL_CPUS);
if (ret < 0) if (ret < 0)
goto out; goto out;
t->allocated_snapshot = true;
} }
if (t->init) { if (t->init) {
...@@ -4066,6 +4084,87 @@ static int tracing_clock_open(struct inode *inode, struct file *file) ...@@ -4066,6 +4084,87 @@ static int tracing_clock_open(struct inode *inode, struct file *file)
return single_open(file, tracing_clock_show, NULL); return single_open(file, tracing_clock_show, NULL);
} }
#ifdef CONFIG_TRACER_SNAPSHOT
static int tracing_snapshot_open(struct inode *inode, struct file *file)
{
struct trace_iterator *iter;
int ret = 0;
if (file->f_mode & FMODE_READ) {
iter = __tracing_open(inode, file, true);
if (IS_ERR(iter))
ret = PTR_ERR(iter);
}
return ret;
}
static ssize_t
tracing_snapshot_write(struct file *filp, const char __user *ubuf, size_t cnt,
loff_t *ppos)
{
unsigned long val;
int ret;
ret = tracing_update_buffers();
if (ret < 0)
return ret;
ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
if (ret)
return ret;
mutex_lock(&trace_types_lock);
if (current_trace && current_trace->use_max_tr) {
ret = -EBUSY;
goto out;
}
switch (val) {
case 0:
if (current_trace->allocated_snapshot) {
/* free spare buffer */
ring_buffer_resize(max_tr.buffer, 1,
RING_BUFFER_ALL_CPUS);
set_buffer_entries(&max_tr, 1);
tracing_reset_online_cpus(&max_tr);
current_trace->allocated_snapshot = false;
}
break;
case 1:
if (!current_trace->allocated_snapshot) {
/* allocate spare buffer */
ret = resize_buffer_duplicate_size(&max_tr,
&global_trace, RING_BUFFER_ALL_CPUS);
if (ret < 0)
break;
current_trace->allocated_snapshot = true;
}
local_irq_disable();
/* Now, we're going to swap */
update_max_tr(&global_trace, current, smp_processor_id());
local_irq_enable();
break;
default:
if (current_trace->allocated_snapshot)
tracing_reset_online_cpus(&max_tr);
else
ret = -EINVAL;
break;
}
if (ret >= 0) {
*ppos += cnt;
ret = cnt;
}
out:
mutex_unlock(&trace_types_lock);
return ret;
}
#endif /* CONFIG_TRACER_SNAPSHOT */
static const struct file_operations tracing_max_lat_fops = { static const struct file_operations tracing_max_lat_fops = {
.open = tracing_open_generic, .open = tracing_open_generic,
.read = tracing_max_lat_read, .read = tracing_max_lat_read,
...@@ -4122,6 +4221,16 @@ static const struct file_operations trace_clock_fops = { ...@@ -4122,6 +4221,16 @@ static const struct file_operations trace_clock_fops = {
.write = tracing_clock_write, .write = tracing_clock_write,
}; };
#ifdef CONFIG_TRACER_SNAPSHOT
static const struct file_operations snapshot_fops = {
.open = tracing_snapshot_open,
.read = seq_read,
.write = tracing_snapshot_write,
.llseek = tracing_seek,
.release = tracing_release,
};
#endif /* CONFIG_TRACER_SNAPSHOT */
struct ftrace_buffer_info { struct ftrace_buffer_info {
struct trace_array *tr; struct trace_array *tr;
void *spare; void *spare;
...@@ -4921,6 +5030,11 @@ static __init int tracer_init_debugfs(void) ...@@ -4921,6 +5030,11 @@ static __init int tracer_init_debugfs(void)
&ftrace_update_tot_cnt, &tracing_dyn_info_fops); &ftrace_update_tot_cnt, &tracing_dyn_info_fops);
#endif #endif
#ifdef CONFIG_TRACER_SNAPSHOT
trace_create_file("snapshot", 0644, d_tracer,
(void *) TRACE_PIPE_ALL_CPU, &snapshot_fops);
#endif
create_trace_options_dir(); create_trace_options_dir();
for_each_tracing_cpu(cpu) for_each_tracing_cpu(cpu)
......
...@@ -287,6 +287,7 @@ struct tracer { ...@@ -287,6 +287,7 @@ struct tracer {
struct tracer_flags *flags; struct tracer_flags *flags;
bool print_max; bool print_max;
bool use_max_tr; bool use_max_tr;
bool allocated_snapshot;
}; };
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment