Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Support
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
L
linux
Project overview
Project overview
Details
Activity
Releases
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Issues
0
Issues
0
List
Boards
Labels
Milestones
Merge Requests
0
Merge Requests
0
Analytics
Analytics
Repository
Value Stream
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Create a new issue
Commits
Issue Boards
Open sidebar
Kirill Smelkov
linux
Commits
35f88e6b
Commit
35f88e6b
authored
Feb 23, 2009
by
Benjamin Herrenschmidt
Browse files
Options
Browse Files
Download
Plain Diff
Merge commit 'ftrace/function-graph' into next
parents
3b7faeb4
712406a6
Changes
5
Hide whitespace changes
Inline
Side-by-side
Showing
5 changed files
with
101 additions
and
99 deletions
+101
-99
arch/x86/include/asm/ftrace.h
arch/x86/include/asm/ftrace.h
+0
-25
arch/x86/kernel/dumpstack.c
arch/x86/kernel/dumpstack.c
+1
-0
arch/x86/kernel/ftrace.c
arch/x86/kernel/ftrace.c
+1
-74
include/linux/ftrace.h
include/linux/ftrace.h
+24
-0
kernel/trace/trace_functions_graph.c
kernel/trace/trace_functions_graph.c
+75
-0
No files found.
arch/x86/include/asm/ftrace.h
View file @
35f88e6b
...
...
@@ -55,29 +55,4 @@ struct dyn_arch_ftrace {
#endif
/* __ASSEMBLY__ */
#endif
/* CONFIG_FUNCTION_TRACER */
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
#ifndef __ASSEMBLY__
/*
* Stack of return addresses for functions
* of a thread.
* Used in struct thread_info
*/
struct
ftrace_ret_stack
{
unsigned
long
ret
;
unsigned
long
func
;
unsigned
long
long
calltime
;
};
/*
* Primary handler of a function return.
* It relays on ftrace_return_to_handler.
* Defined in entry_32/64.S
*/
extern
void
return_to_handler
(
void
);
#endif
/* __ASSEMBLY__ */
#endif
/* CONFIG_FUNCTION_GRAPH_TRACER */
#endif
/* _ASM_X86_FTRACE_H */
arch/x86/kernel/dumpstack.c
View file @
35f88e6b
...
...
@@ -10,6 +10,7 @@
#include <linux/kdebug.h>
#include <linux/module.h>
#include <linux/ptrace.h>
#include <linux/ftrace.h>
#include <linux/kexec.h>
#include <linux/bug.h>
#include <linux/nmi.h>
...
...
arch/x86/kernel/ftrace.c
View file @
35f88e6b
...
...
@@ -389,79 +389,6 @@ void ftrace_nmi_exit(void)
#endif
/* !CONFIG_DYNAMIC_FTRACE */
/* Add a function return address to the trace stack on thread info.*/
static
int
push_return_trace
(
unsigned
long
ret
,
unsigned
long
long
time
,
unsigned
long
func
,
int
*
depth
)
{
int
index
;
if
(
!
current
->
ret_stack
)
return
-
EBUSY
;
/* The return trace stack is full */
if
(
current
->
curr_ret_stack
==
FTRACE_RETFUNC_DEPTH
-
1
)
{
atomic_inc
(
&
current
->
trace_overrun
);
return
-
EBUSY
;
}
index
=
++
current
->
curr_ret_stack
;
barrier
();
current
->
ret_stack
[
index
].
ret
=
ret
;
current
->
ret_stack
[
index
].
func
=
func
;
current
->
ret_stack
[
index
].
calltime
=
time
;
*
depth
=
index
;
return
0
;
}
/* Retrieve a function return address to the trace stack on thread info.*/
static
void
pop_return_trace
(
struct
ftrace_graph_ret
*
trace
,
unsigned
long
*
ret
)
{
int
index
;
index
=
current
->
curr_ret_stack
;
if
(
unlikely
(
index
<
0
))
{
ftrace_graph_stop
();
WARN_ON
(
1
);
/* Might as well panic, otherwise we have no where to go */
*
ret
=
(
unsigned
long
)
panic
;
return
;
}
*
ret
=
current
->
ret_stack
[
index
].
ret
;
trace
->
func
=
current
->
ret_stack
[
index
].
func
;
trace
->
calltime
=
current
->
ret_stack
[
index
].
calltime
;
trace
->
overrun
=
atomic_read
(
&
current
->
trace_overrun
);
trace
->
depth
=
index
;
barrier
();
current
->
curr_ret_stack
--
;
}
/*
* Send the trace to the ring-buffer.
* @return the original return address.
*/
unsigned
long
ftrace_return_to_handler
(
void
)
{
struct
ftrace_graph_ret
trace
;
unsigned
long
ret
;
pop_return_trace
(
&
trace
,
&
ret
);
trace
.
rettime
=
cpu_clock
(
raw_smp_processor_id
());
ftrace_graph_return
(
&
trace
);
if
(
unlikely
(
!
ret
))
{
ftrace_graph_stop
();
WARN_ON
(
1
);
/* Might as well panic. What else to do? */
ret
=
(
unsigned
long
)
panic
;
}
return
ret
;
}
/*
* Hook the return address and push it in the stack of return addrs
* in current thread info.
...
...
@@ -521,7 +448,7 @@ void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr)
calltime
=
cpu_clock
(
raw_smp_processor_id
());
if
(
push_return_trace
(
old
,
calltime
,
if
(
ftrace_
push_return_trace
(
old
,
calltime
,
self_addr
,
&
trace
.
depth
)
==
-
EBUSY
)
{
*
parent
=
old
;
return
;
...
...
include/linux/ftrace.h
View file @
35f88e6b
...
...
@@ -379,6 +379,30 @@ struct ftrace_graph_ret {
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
/*
* Stack of return addresses for functions
* of a thread.
* Used in struct thread_info
*/
struct
ftrace_ret_stack
{
unsigned
long
ret
;
unsigned
long
func
;
unsigned
long
long
calltime
;
};
/*
* Primary handler of a function return.
* It relays on ftrace_return_to_handler.
* Defined in entry_32/64.S
*/
extern
void
return_to_handler
(
void
);
extern
int
ftrace_push_return_trace
(
unsigned
long
ret
,
unsigned
long
long
time
,
unsigned
long
func
,
int
*
depth
);
extern
void
ftrace_pop_return_trace
(
struct
ftrace_graph_ret
*
trace
,
unsigned
long
*
ret
);
/*
* Sometimes we don't want to trace a function with the function
* graph tracer but we want them to keep traced by the usual function
...
...
kernel/trace/trace_functions_graph.c
View file @
35f88e6b
...
...
@@ -42,6 +42,81 @@ static struct tracer_flags tracer_flags = {
/* pid on the last trace processed */
static
pid_t
last_pid
[
NR_CPUS
]
=
{
[
0
...
NR_CPUS
-
1
]
=
-
1
};
/* Add a function return address to the trace stack on thread info.*/
int
ftrace_push_return_trace
(
unsigned
long
ret
,
unsigned
long
long
time
,
unsigned
long
func
,
int
*
depth
)
{
int
index
;
if
(
!
current
->
ret_stack
)
return
-
EBUSY
;
/* The return trace stack is full */
if
(
current
->
curr_ret_stack
==
FTRACE_RETFUNC_DEPTH
-
1
)
{
atomic_inc
(
&
current
->
trace_overrun
);
return
-
EBUSY
;
}
index
=
++
current
->
curr_ret_stack
;
barrier
();
current
->
ret_stack
[
index
].
ret
=
ret
;
current
->
ret_stack
[
index
].
func
=
func
;
current
->
ret_stack
[
index
].
calltime
=
time
;
*
depth
=
index
;
return
0
;
}
/* Retrieve a function return address to the trace stack on thread info.*/
void
ftrace_pop_return_trace
(
struct
ftrace_graph_ret
*
trace
,
unsigned
long
*
ret
)
{
int
index
;
index
=
current
->
curr_ret_stack
;
if
(
unlikely
(
index
<
0
))
{
ftrace_graph_stop
();
WARN_ON
(
1
);
/* Might as well panic, otherwise we have no where to go */
*
ret
=
(
unsigned
long
)
panic
;
return
;
}
*
ret
=
current
->
ret_stack
[
index
].
ret
;
trace
->
func
=
current
->
ret_stack
[
index
].
func
;
trace
->
calltime
=
current
->
ret_stack
[
index
].
calltime
;
trace
->
overrun
=
atomic_read
(
&
current
->
trace_overrun
);
trace
->
depth
=
index
;
barrier
();
current
->
curr_ret_stack
--
;
}
/*
* Send the trace to the ring-buffer.
* @return the original return address.
*/
unsigned
long
ftrace_return_to_handler
(
void
)
{
struct
ftrace_graph_ret
trace
;
unsigned
long
ret
;
ftrace_pop_return_trace
(
&
trace
,
&
ret
);
trace
.
rettime
=
cpu_clock
(
raw_smp_processor_id
());
ftrace_graph_return
(
&
trace
);
if
(
unlikely
(
!
ret
))
{
ftrace_graph_stop
();
WARN_ON
(
1
);
/* Might as well panic. What else to do? */
ret
=
(
unsigned
long
)
panic
;
}
return
ret
;
}
static
int
graph_trace_init
(
struct
trace_array
*
tr
)
{
int
cpu
,
ret
;
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment