Commit 712406a6 authored by Steven Rostedt's avatar Steven Rostedt

tracing/function-graph-tracer: make arch generic push pop functions

There is nothing really arch specific of the push and pop functions
used by the function graph tracer. This patch moves them to generic
code.
Acked-by: default avatarFrederic Weisbecker <fweisbec@gmail.com>
Acked-by: default avatarIngo Molnar <mingo@elte.hu>
Signed-off-by: default avatarSteven Rostedt <srostedt@redhat.com>
parent d2f8d7ee
...@@ -55,29 +55,4 @@ struct dyn_arch_ftrace { ...@@ -55,29 +55,4 @@ struct dyn_arch_ftrace {
#endif /* __ASSEMBLY__ */ #endif /* __ASSEMBLY__ */
#endif /* CONFIG_FUNCTION_TRACER */ #endif /* CONFIG_FUNCTION_TRACER */
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
#ifndef __ASSEMBLY__
/*
* Stack of return addresses for functions
* of a thread.
* Used in struct thread_info
*/
struct ftrace_ret_stack {
unsigned long ret;
unsigned long func;
unsigned long long calltime;
};
/*
* Primary handler of a function return.
* It relays on ftrace_return_to_handler.
* Defined in entry_32/64.S
*/
extern void return_to_handler(void);
#endif /* __ASSEMBLY__ */
#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
#endif /* _ASM_X86_FTRACE_H */ #endif /* _ASM_X86_FTRACE_H */
...@@ -10,6 +10,7 @@ ...@@ -10,6 +10,7 @@
#include <linux/kdebug.h> #include <linux/kdebug.h>
#include <linux/module.h> #include <linux/module.h>
#include <linux/ptrace.h> #include <linux/ptrace.h>
#include <linux/ftrace.h>
#include <linux/kexec.h> #include <linux/kexec.h>
#include <linux/bug.h> #include <linux/bug.h>
#include <linux/nmi.h> #include <linux/nmi.h>
......
...@@ -389,79 +389,6 @@ void ftrace_nmi_exit(void) ...@@ -389,79 +389,6 @@ void ftrace_nmi_exit(void)
#endif /* !CONFIG_DYNAMIC_FTRACE */ #endif /* !CONFIG_DYNAMIC_FTRACE */
/* Add a function return address to the trace stack on thread info.*/
static int push_return_trace(unsigned long ret, unsigned long long time,
unsigned long func, int *depth)
{
int index;
if (!current->ret_stack)
return -EBUSY;
/* The return trace stack is full */
if (current->curr_ret_stack == FTRACE_RETFUNC_DEPTH - 1) {
atomic_inc(&current->trace_overrun);
return -EBUSY;
}
index = ++current->curr_ret_stack;
barrier();
current->ret_stack[index].ret = ret;
current->ret_stack[index].func = func;
current->ret_stack[index].calltime = time;
*depth = index;
return 0;
}
/* Retrieve a function return address to the trace stack on thread info.*/
static void pop_return_trace(struct ftrace_graph_ret *trace, unsigned long *ret)
{
int index;
index = current->curr_ret_stack;
if (unlikely(index < 0)) {
ftrace_graph_stop();
WARN_ON(1);
/* Might as well panic, otherwise we have no where to go */
*ret = (unsigned long)panic;
return;
}
*ret = current->ret_stack[index].ret;
trace->func = current->ret_stack[index].func;
trace->calltime = current->ret_stack[index].calltime;
trace->overrun = atomic_read(&current->trace_overrun);
trace->depth = index;
barrier();
current->curr_ret_stack--;
}
/*
* Send the trace to the ring-buffer.
* @return the original return address.
*/
unsigned long ftrace_return_to_handler(void)
{
struct ftrace_graph_ret trace;
unsigned long ret;
pop_return_trace(&trace, &ret);
trace.rettime = cpu_clock(raw_smp_processor_id());
ftrace_graph_return(&trace);
if (unlikely(!ret)) {
ftrace_graph_stop();
WARN_ON(1);
/* Might as well panic. What else to do? */
ret = (unsigned long)panic;
}
return ret;
}
/* /*
* Hook the return address and push it in the stack of return addrs * Hook the return address and push it in the stack of return addrs
* in current thread info. * in current thread info.
...@@ -521,7 +448,7 @@ void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr) ...@@ -521,7 +448,7 @@ void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr)
calltime = cpu_clock(raw_smp_processor_id()); calltime = cpu_clock(raw_smp_processor_id());
if (push_return_trace(old, calltime, if (ftrace_push_return_trace(old, calltime,
self_addr, &trace.depth) == -EBUSY) { self_addr, &trace.depth) == -EBUSY) {
*parent = old; *parent = old;
return; return;
......
...@@ -379,6 +379,30 @@ struct ftrace_graph_ret { ...@@ -379,6 +379,30 @@ struct ftrace_graph_ret {
#ifdef CONFIG_FUNCTION_GRAPH_TRACER #ifdef CONFIG_FUNCTION_GRAPH_TRACER
/*
* Stack of return addresses for functions
* of a thread.
* Used in struct thread_info
*/
struct ftrace_ret_stack {
unsigned long ret;
unsigned long func;
unsigned long long calltime;
};
/*
* Primary handler of a function return.
* It relays on ftrace_return_to_handler.
* Defined in entry_32/64.S
*/
extern void return_to_handler(void);
extern int
ftrace_push_return_trace(unsigned long ret, unsigned long long time,
unsigned long func, int *depth);
extern void
ftrace_pop_return_trace(struct ftrace_graph_ret *trace, unsigned long *ret);
/* /*
* Sometimes we don't want to trace a function with the function * Sometimes we don't want to trace a function with the function
* graph tracer but we want them to keep traced by the usual function * graph tracer but we want them to keep traced by the usual function
......
...@@ -42,6 +42,81 @@ static struct tracer_flags tracer_flags = { ...@@ -42,6 +42,81 @@ static struct tracer_flags tracer_flags = {
/* pid on the last trace processed */ /* pid on the last trace processed */
static pid_t last_pid[NR_CPUS] = { [0 ... NR_CPUS-1] = -1 }; static pid_t last_pid[NR_CPUS] = { [0 ... NR_CPUS-1] = -1 };
/* Add a function return address to the trace stack on thread info.*/
int
ftrace_push_return_trace(unsigned long ret, unsigned long long time,
unsigned long func, int *depth)
{
int index;
if (!current->ret_stack)
return -EBUSY;
/* The return trace stack is full */
if (current->curr_ret_stack == FTRACE_RETFUNC_DEPTH - 1) {
atomic_inc(&current->trace_overrun);
return -EBUSY;
}
index = ++current->curr_ret_stack;
barrier();
current->ret_stack[index].ret = ret;
current->ret_stack[index].func = func;
current->ret_stack[index].calltime = time;
*depth = index;
return 0;
}
/* Retrieve a function return address to the trace stack on thread info.*/
void
ftrace_pop_return_trace(struct ftrace_graph_ret *trace, unsigned long *ret)
{
int index;
index = current->curr_ret_stack;
if (unlikely(index < 0)) {
ftrace_graph_stop();
WARN_ON(1);
/* Might as well panic, otherwise we have no where to go */
*ret = (unsigned long)panic;
return;
}
*ret = current->ret_stack[index].ret;
trace->func = current->ret_stack[index].func;
trace->calltime = current->ret_stack[index].calltime;
trace->overrun = atomic_read(&current->trace_overrun);
trace->depth = index;
barrier();
current->curr_ret_stack--;
}
/*
* Send the trace to the ring-buffer.
* @return the original return address.
*/
unsigned long ftrace_return_to_handler(void)
{
struct ftrace_graph_ret trace;
unsigned long ret;
ftrace_pop_return_trace(&trace, &ret);
trace.rettime = cpu_clock(raw_smp_processor_id());
ftrace_graph_return(&trace);
if (unlikely(!ret)) {
ftrace_graph_stop();
WARN_ON(1);
/* Might as well panic. What else to do? */
ret = (unsigned long)panic;
}
return ret;
}
static int graph_trace_init(struct trace_array *tr) static int graph_trace_init(struct trace_array *tr)
{ {
int cpu, ret; int cpu, ret;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment