Commit b91facc3 authored by Frederic Weisbecker's avatar Frederic Weisbecker Committed by Ingo Molnar

tracing/function-graph-tracer: handle the leaf functions from trace_pipe

When one cats the trace file, the leaf functions are printed without brackets:

 function();

whereas in the trace_pipe file we'll see the following:

 function() {
 }

This is because the ring_buffer handling is not the same between those two files.
On the trace file, when an entry is printed, the iterator advanced and then we can
check the next entry.

There is no iterator with trace_pipe, the current entry to print has been peeked
and not consumed. So checking the next entry will still return the current one while
we don't consume it.

This patch introduces a new value for the output callbacks to ask the tracing
core to not consume the current entry after printing it.

We need it because we will have to consume the current entry ourself to check
the next one.

Now the trace_pipe is able to handle well the leaf functions.
Signed-off-by: default avatarFrederic Weisbecker <fweisbec@gmail.com>
Acked-by: default avatarSteven Rostedt <rostedt@goodmis.org>
Signed-off-by: default avatarIngo Molnar <mingo@elte.hu>
parent 1dfba05d
......@@ -2468,8 +2468,8 @@ tracing_read_pipe(struct file *filp, char __user *ubuf,
iter->seq.len = len;
break;
}
trace_consume(iter);
if (ret != TRACE_TYPE_NO_CONSUME)
trace_consume(iter);
if (iter->seq.len >= cnt)
break;
......
......@@ -63,13 +63,13 @@ struct ftrace_entry {
/* Function call entry */
struct ftrace_graph_ent_entry {
struct trace_entry ent;
struct trace_entry ent;
struct ftrace_graph_ent graph_ent;
};
/* Function return entry */
struct ftrace_graph_ret_entry {
struct trace_entry ent;
struct trace_entry ent;
struct ftrace_graph_ret ret;
};
extern struct tracer boot_tracer;
......@@ -309,7 +309,8 @@ extern void __ftrace_bad_type(void);
enum print_line_t {
TRACE_TYPE_PARTIAL_LINE = 0, /* Retry after flushing the seq */
TRACE_TYPE_HANDLED = 1,
TRACE_TYPE_UNHANDLED = 2 /* Relay to other output functions */
TRACE_TYPE_UNHANDLED = 2, /* Relay to other output functions */
TRACE_TYPE_NO_CONSUME = 3 /* Handled but ask to not consume */
};
......
......@@ -212,8 +212,8 @@ verif_pid(struct trace_seq *s, pid_t pid, int cpu, pid_t *last_pids_cpu)
return ret;
}
static bool
trace_branch_is_leaf(struct trace_iterator *iter,
static struct ftrace_graph_ret_entry *
get_return_for_leaf(struct trace_iterator *iter,
struct ftrace_graph_ent_entry *curr)
{
struct ring_buffer_iter *ring_iter;
......@@ -222,24 +222,33 @@ trace_branch_is_leaf(struct trace_iterator *iter,
ring_iter = iter->buffer_iter[iter->cpu];
if (!ring_iter)
return false;
event = ring_buffer_iter_peek(ring_iter, NULL);
/* First peek to compare current entry and the next one */
if (ring_iter)
event = ring_buffer_iter_peek(ring_iter, NULL);
else {
/* We need to consume the current entry to see the next one */
ring_buffer_consume(iter->tr->buffer, iter->cpu, NULL);
event = ring_buffer_peek(iter->tr->buffer, iter->cpu,
NULL);
}
if (!event)
return false;
return NULL;
next = ring_buffer_event_data(event);
if (next->ent.type != TRACE_GRAPH_RET)
return false;
return NULL;
if (curr->ent.pid != next->ent.pid ||
curr->graph_ent.func != next->ret.func)
return false;
return NULL;
return true;
/* this is a leaf, now advance the iterator */
if (ring_iter)
ring_buffer_read(ring_iter, NULL);
return next;
}
/* Signal a overhead of time execution to the output */
......@@ -376,18 +385,15 @@ static int print_graph_abs_time(u64 t, struct trace_seq *s)
/* Case of a leaf function on its call entry */
static enum print_line_t
print_graph_entry_leaf(struct trace_iterator *iter,
struct ftrace_graph_ent_entry *entry, struct trace_seq *s)
struct ftrace_graph_ent_entry *entry,
struct ftrace_graph_ret_entry *ret_entry, struct trace_seq *s)
{
struct ftrace_graph_ret_entry *ret_entry;
struct ftrace_graph_ret *graph_ret;
struct ring_buffer_event *event;
struct ftrace_graph_ent *call;
unsigned long long duration;
int ret;
int i;
event = ring_buffer_read(iter->buffer_iter[iter->cpu], NULL);
ret_entry = ring_buffer_event_data(event);
graph_ret = &ret_entry->ret;
call = &entry->graph_ent;
duration = graph_ret->rettime - graph_ret->calltime;
......@@ -457,7 +463,11 @@ print_graph_entry_nested(struct ftrace_graph_ent_entry *entry,
if (!ret)
return TRACE_TYPE_PARTIAL_LINE;
return TRACE_TYPE_HANDLED;
/*
* we already consumed the current entry to check the next one
* and see if this is a leaf.
*/
return TRACE_TYPE_NO_CONSUME;
}
static enum print_line_t
......@@ -469,6 +479,7 @@ print_graph_entry(struct ftrace_graph_ent_entry *field, struct trace_seq *s,
pid_t *last_entry = iter->private;
struct trace_entry *ent = iter->ent;
struct ftrace_graph_ent *call = &field->graph_ent;
struct ftrace_graph_ret_entry *leaf_ret;
/* Pid */
if (verif_pid(s, ent->pid, cpu, last_entry) == TRACE_TYPE_PARTIAL_LINE)
......@@ -504,8 +515,9 @@ print_graph_entry(struct ftrace_graph_ent_entry *field, struct trace_seq *s,
return TRACE_TYPE_PARTIAL_LINE;
}
if (trace_branch_is_leaf(iter, field))
return print_graph_entry_leaf(iter, field, s);
leaf_ret = get_return_for_leaf(iter, field);
if (leaf_ret)
return print_graph_entry_leaf(iter, field, leaf_ret, s);
else
return print_graph_entry_nested(field, s, iter->ent->pid, cpu);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment