Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Support
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
L
linux
Project overview
Project overview
Details
Activity
Releases
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Issues
0
Issues
0
List
Boards
Labels
Milestones
Merge Requests
0
Merge Requests
0
Analytics
Analytics
Repository
Value Stream
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Create a new issue
Commits
Issue Boards
Open sidebar
Kirill Smelkov
linux
Commits
40999096
Commit
40999096
authored
Feb 19, 2009
by
Ingo Molnar
Browse files
Options
Browse Files
Download
Plain Diff
Merge branches 'tracing/blktrace', 'tracing/ftrace' and 'tracing/urgent' into tracing/core
parents
72c26c9a
fa7c7f6e
ed4a2f37
Changes
13
Hide whitespace changes
Inline
Side-by-side
Showing
13 changed files
with
183 additions
and
107 deletions
+183
-107
include/linux/ftrace.h
include/linux/ftrace.h
+6
-6
kernel/trace/Kconfig
kernel/trace/Kconfig
+2
-0
kernel/trace/ftrace.c
kernel/trace/ftrace.c
+52
-50
kernel/trace/trace.c
kernel/trace/trace.c
+44
-25
kernel/trace/trace.h
kernel/trace/trace.h
+22
-3
kernel/trace/trace_functions.c
kernel/trace/trace_functions.c
+17
-15
kernel/trace/trace_functions_graph.c
kernel/trace/trace_functions_graph.c
+1
-0
kernel/trace/trace_irqsoff.c
kernel/trace/trace_irqsoff.c
+1
-1
kernel/trace/trace_sched_switch.c
kernel/trace/trace_sched_switch.c
+2
-1
kernel/trace/trace_sched_wakeup.c
kernel/trace/trace_sched_wakeup.c
+2
-1
kernel/trace/trace_selftest.c
kernel/trace/trace_selftest.c
+32
-3
kernel/trace/trace_stat.c
kernel/trace/trace_stat.c
+1
-1
kernel/trace/trace_sysprof.c
kernel/trace/trace_sysprof.c
+1
-1
No files found.
include/linux/ftrace.h
View file @
40999096
...
@@ -108,7 +108,7 @@ struct ftrace_func_command {
...
@@ -108,7 +108,7 @@ struct ftrace_func_command {
struct
seq_file
;
struct
seq_file
;
struct
ftrace_
hook
_ops
{
struct
ftrace_
probe
_ops
{
void
(
*
func
)(
unsigned
long
ip
,
void
(
*
func
)(
unsigned
long
ip
,
unsigned
long
parent_ip
,
unsigned
long
parent_ip
,
void
**
data
);
void
**
data
);
...
@@ -116,19 +116,19 @@ struct ftrace_hook_ops {
...
@@ -116,19 +116,19 @@ struct ftrace_hook_ops {
void
(
*
free
)(
void
**
data
);
void
(
*
free
)(
void
**
data
);
int
(
*
print
)(
struct
seq_file
*
m
,
int
(
*
print
)(
struct
seq_file
*
m
,
unsigned
long
ip
,
unsigned
long
ip
,
struct
ftrace_
hook
_ops
*
ops
,
struct
ftrace_
probe
_ops
*
ops
,
void
*
data
);
void
*
data
);
};
};
extern
int
extern
int
register_ftrace_function_
hook
(
char
*
glob
,
struct
ftrace_hook
_ops
*
ops
,
register_ftrace_function_
probe
(
char
*
glob
,
struct
ftrace_probe
_ops
*
ops
,
void
*
data
);
void
*
data
);
extern
void
extern
void
unregister_ftrace_function_
hook
(
char
*
glob
,
struct
ftrace_hook
_ops
*
ops
,
unregister_ftrace_function_
probe
(
char
*
glob
,
struct
ftrace_probe
_ops
*
ops
,
void
*
data
);
void
*
data
);
extern
void
extern
void
unregister_ftrace_function_
hook_func
(
char
*
glob
,
struct
ftrace_hook
_ops
*
ops
);
unregister_ftrace_function_
probe_func
(
char
*
glob
,
struct
ftrace_probe
_ops
*
ops
);
extern
void
unregister_ftrace_function_
hook
_all
(
char
*
glob
);
extern
void
unregister_ftrace_function_
probe
_all
(
char
*
glob
);
enum
{
enum
{
FTRACE_FL_FREE
=
(
1
<<
0
),
FTRACE_FL_FREE
=
(
1
<<
0
),
...
...
kernel/trace/Kconfig
View file @
40999096
...
@@ -60,6 +60,7 @@ config FUNCTION_TRACER
...
@@ -60,6 +60,7 @@ config FUNCTION_TRACER
depends on HAVE_FUNCTION_TRACER
depends on HAVE_FUNCTION_TRACER
depends on DEBUG_KERNEL
depends on DEBUG_KERNEL
select FRAME_POINTER
select FRAME_POINTER
select KALLSYMS
select TRACING
select TRACING
select CONTEXT_SWITCH_TRACER
select CONTEXT_SWITCH_TRACER
help
help
...
@@ -246,6 +247,7 @@ config STACK_TRACER
...
@@ -246,6 +247,7 @@ config STACK_TRACER
depends on DEBUG_KERNEL
depends on DEBUG_KERNEL
select FUNCTION_TRACER
select FUNCTION_TRACER
select STACKTRACE
select STACKTRACE
select KALLSYMS
help
help
This special tracer records the maximum stack footprint of the
This special tracer records the maximum stack footprint of the
kernel and displays it in debugfs/tracing/stack_trace.
kernel and displays it in debugfs/tracing/stack_trace.
...
...
kernel/trace/ftrace.c
View file @
40999096
...
@@ -255,9 +255,9 @@ static struct pid * const ftrace_swapper_pid = &init_struct_pid;
...
@@ -255,9 +255,9 @@ static struct pid * const ftrace_swapper_pid = &init_struct_pid;
static
struct
hlist_head
ftrace_func_hash
[
FTRACE_FUNC_HASHSIZE
]
__read_mostly
;
static
struct
hlist_head
ftrace_func_hash
[
FTRACE_FUNC_HASHSIZE
]
__read_mostly
;
struct
ftrace_func_
hook
{
struct
ftrace_func_
probe
{
struct
hlist_node
node
;
struct
hlist_node
node
;
struct
ftrace_
hook
_ops
*
ops
;
struct
ftrace_
probe
_ops
*
ops
;
unsigned
long
flags
;
unsigned
long
flags
;
unsigned
long
ip
;
unsigned
long
ip
;
void
*
data
;
void
*
data
;
...
@@ -460,8 +460,8 @@ static void ftrace_bug(int failed, unsigned long ip)
...
@@ -460,8 +460,8 @@ static void ftrace_bug(int failed, unsigned long ip)
static
int
static
int
__ftrace_replace_code
(
struct
dyn_ftrace
*
rec
,
int
enable
)
__ftrace_replace_code
(
struct
dyn_ftrace
*
rec
,
int
enable
)
{
{
unsigned
long
ip
,
fl
;
unsigned
long
ftrace_addr
;
unsigned
long
ftrace_addr
;
unsigned
long
ip
,
fl
;
ftrace_addr
=
(
unsigned
long
)
FTRACE_ADDR
;
ftrace_addr
=
(
unsigned
long
)
FTRACE_ADDR
;
...
@@ -530,9 +530,9 @@ __ftrace_replace_code(struct dyn_ftrace *rec, int enable)
...
@@ -530,9 +530,9 @@ __ftrace_replace_code(struct dyn_ftrace *rec, int enable)
static
void
ftrace_replace_code
(
int
enable
)
static
void
ftrace_replace_code
(
int
enable
)
{
{
int
failed
;
struct
dyn_ftrace
*
rec
;
struct
dyn_ftrace
*
rec
;
struct
ftrace_page
*
pg
;
struct
ftrace_page
*
pg
;
int
failed
;
do_for_each_ftrace_rec
(
pg
,
rec
)
{
do_for_each_ftrace_rec
(
pg
,
rec
)
{
/*
/*
...
@@ -830,11 +830,11 @@ static void *t_hash_start(struct seq_file *m, loff_t *pos)
...
@@ -830,11 +830,11 @@ static void *t_hash_start(struct seq_file *m, loff_t *pos)
static
int
t_hash_show
(
struct
seq_file
*
m
,
void
*
v
)
static
int
t_hash_show
(
struct
seq_file
*
m
,
void
*
v
)
{
{
struct
ftrace_func_
hook
*
rec
;
struct
ftrace_func_
probe
*
rec
;
struct
hlist_node
*
hnd
=
v
;
struct
hlist_node
*
hnd
=
v
;
char
str
[
KSYM_SYMBOL_LEN
];
char
str
[
KSYM_SYMBOL_LEN
];
rec
=
hlist_entry
(
hnd
,
struct
ftrace_func_
hook
,
node
);
rec
=
hlist_entry
(
hnd
,
struct
ftrace_func_
probe
,
node
);
if
(
rec
->
ops
->
print
)
if
(
rec
->
ops
->
print
)
return
rec
->
ops
->
print
(
m
,
rec
->
ip
,
rec
->
ops
,
rec
->
data
);
return
rec
->
ops
->
print
(
m
,
rec
->
ip
,
rec
->
ops
,
rec
->
data
);
...
@@ -1208,14 +1208,15 @@ ftrace_match_record(struct dyn_ftrace *rec, char *regex, int len, int type)
...
@@ -1208,14 +1208,15 @@ ftrace_match_record(struct dyn_ftrace *rec, char *regex, int len, int type)
static
void
ftrace_match_records
(
char
*
buff
,
int
len
,
int
enable
)
static
void
ftrace_match_records
(
char
*
buff
,
int
len
,
int
enable
)
{
{
char
*
search
;
unsigned
int
search_len
;
struct
ftrace_page
*
pg
;
struct
ftrace_page
*
pg
;
struct
dyn_ftrace
*
rec
;
struct
dyn_ftrace
*
rec
;
unsigned
long
flag
;
char
*
search
;
int
type
;
int
type
;
unsigned
long
flag
=
enable
?
FTRACE_FL_FILTER
:
FTRACE_FL_NOTRACE
;
unsigned
search_len
;
int
not
;
int
not
;
flag
=
enable
?
FTRACE_FL_FILTER
:
FTRACE_FL_NOTRACE
;
type
=
ftrace_setup_glob
(
buff
,
len
,
&
search
,
&
not
);
type
=
ftrace_setup_glob
(
buff
,
len
,
&
search
,
&
not
);
search_len
=
strlen
(
search
);
search_len
=
strlen
(
search
);
...
@@ -1263,14 +1264,16 @@ ftrace_match_module_record(struct dyn_ftrace *rec, char *mod,
...
@@ -1263,14 +1264,16 @@ ftrace_match_module_record(struct dyn_ftrace *rec, char *mod,
static
void
ftrace_match_module_records
(
char
*
buff
,
char
*
mod
,
int
enable
)
static
void
ftrace_match_module_records
(
char
*
buff
,
char
*
mod
,
int
enable
)
{
{
char
*
search
=
buff
;
unsigned
search_len
=
0
;
struct
ftrace_page
*
pg
;
struct
ftrace_page
*
pg
;
struct
dyn_ftrace
*
rec
;
struct
dyn_ftrace
*
rec
;
int
type
=
MATCH_FULL
;
int
type
=
MATCH_FULL
;
unsigned
long
flag
=
enable
?
FTRACE_FL_FILTER
:
FTRACE_FL_NOTRACE
;
char
*
search
=
buff
;
unsigned
search_len
=
0
;
unsigned
long
flag
;
int
not
=
0
;
int
not
=
0
;
flag
=
enable
?
FTRACE_FL_FILTER
:
FTRACE_FL_NOTRACE
;
/* blank or '*' mean the same */
/* blank or '*' mean the same */
if
(
strcmp
(
buff
,
"*"
)
==
0
)
if
(
strcmp
(
buff
,
"*"
)
==
0
)
buff
[
0
]
=
0
;
buff
[
0
]
=
0
;
...
@@ -1348,9 +1351,9 @@ static int __init ftrace_mod_cmd_init(void)
...
@@ -1348,9 +1351,9 @@ static int __init ftrace_mod_cmd_init(void)
device_initcall
(
ftrace_mod_cmd_init
);
device_initcall
(
ftrace_mod_cmd_init
);
static
void
static
void
function_trace_
hook
_call
(
unsigned
long
ip
,
unsigned
long
parent_ip
)
function_trace_
probe
_call
(
unsigned
long
ip
,
unsigned
long
parent_ip
)
{
{
struct
ftrace_func_
hook
*
entry
;
struct
ftrace_func_
probe
*
entry
;
struct
hlist_head
*
hhd
;
struct
hlist_head
*
hhd
;
struct
hlist_node
*
n
;
struct
hlist_node
*
n
;
unsigned
long
key
;
unsigned
long
key
;
...
@@ -1376,18 +1379,18 @@ function_trace_hook_call(unsigned long ip, unsigned long parent_ip)
...
@@ -1376,18 +1379,18 @@ function_trace_hook_call(unsigned long ip, unsigned long parent_ip)
ftrace_preempt_enable
(
resched
);
ftrace_preempt_enable
(
resched
);
}
}
static
struct
ftrace_ops
trace_
hook
_ops
__read_mostly
=
static
struct
ftrace_ops
trace_
probe
_ops
__read_mostly
=
{
{
.
func
=
function_trace_
hook
_call
,
.
func
=
function_trace_
probe
_call
,
};
};
static
int
ftrace_
hook
_registered
;
static
int
ftrace_
probe
_registered
;
static
void
__enable_ftrace_function_
hook
(
void
)
static
void
__enable_ftrace_function_
probe
(
void
)
{
{
int
i
;
int
i
;
if
(
ftrace_
hook
_registered
)
if
(
ftrace_
probe
_registered
)
return
;
return
;
for
(
i
=
0
;
i
<
FTRACE_FUNC_HASHSIZE
;
i
++
)
{
for
(
i
=
0
;
i
<
FTRACE_FUNC_HASHSIZE
;
i
++
)
{
...
@@ -1399,16 +1402,16 @@ static void __enable_ftrace_function_hook(void)
...
@@ -1399,16 +1402,16 @@ static void __enable_ftrace_function_hook(void)
if
(
i
==
FTRACE_FUNC_HASHSIZE
)
if
(
i
==
FTRACE_FUNC_HASHSIZE
)
return
;
return
;
__register_ftrace_function
(
&
trace_
hook
_ops
);
__register_ftrace_function
(
&
trace_
probe
_ops
);
ftrace_startup
(
0
);
ftrace_startup
(
0
);
ftrace_
hook
_registered
=
1
;
ftrace_
probe
_registered
=
1
;
}
}
static
void
__disable_ftrace_function_
hook
(
void
)
static
void
__disable_ftrace_function_
probe
(
void
)
{
{
int
i
;
int
i
;
if
(
!
ftrace_
hook
_registered
)
if
(
!
ftrace_
probe
_registered
)
return
;
return
;
for
(
i
=
0
;
i
<
FTRACE_FUNC_HASHSIZE
;
i
++
)
{
for
(
i
=
0
;
i
<
FTRACE_FUNC_HASHSIZE
;
i
++
)
{
...
@@ -1418,16 +1421,16 @@ static void __disable_ftrace_function_hook(void)
...
@@ -1418,16 +1421,16 @@ static void __disable_ftrace_function_hook(void)
}
}
/* no more funcs left */
/* no more funcs left */
__unregister_ftrace_function
(
&
trace_
hook
_ops
);
__unregister_ftrace_function
(
&
trace_
probe
_ops
);
ftrace_shutdown
(
0
);
ftrace_shutdown
(
0
);
ftrace_
hook
_registered
=
0
;
ftrace_
probe
_registered
=
0
;
}
}
static
void
ftrace_free_entry_rcu
(
struct
rcu_head
*
rhp
)
static
void
ftrace_free_entry_rcu
(
struct
rcu_head
*
rhp
)
{
{
struct
ftrace_func_
hook
*
entry
=
struct
ftrace_func_
probe
*
entry
=
container_of
(
rhp
,
struct
ftrace_func_
hook
,
rcu
);
container_of
(
rhp
,
struct
ftrace_func_
probe
,
rcu
);
if
(
entry
->
ops
->
free
)
if
(
entry
->
ops
->
free
)
entry
->
ops
->
free
(
&
entry
->
data
);
entry
->
ops
->
free
(
&
entry
->
data
);
...
@@ -1436,21 +1439,21 @@ static void ftrace_free_entry_rcu(struct rcu_head *rhp)
...
@@ -1436,21 +1439,21 @@ static void ftrace_free_entry_rcu(struct rcu_head *rhp)
int
int
register_ftrace_function_
hook
(
char
*
glob
,
struct
ftrace_hook
_ops
*
ops
,
register_ftrace_function_
probe
(
char
*
glob
,
struct
ftrace_probe
_ops
*
ops
,
void
*
data
)
void
*
data
)
{
{
struct
ftrace_func_
hook
*
entry
;
struct
ftrace_func_
probe
*
entry
;
struct
ftrace_page
*
pg
;
struct
ftrace_page
*
pg
;
struct
dyn_ftrace
*
rec
;
struct
dyn_ftrace
*
rec
;
unsigned
long
key
;
int
type
,
len
,
not
;
int
type
,
len
,
not
;
unsigned
long
key
;
int
count
=
0
;
int
count
=
0
;
char
*
search
;
char
*
search
;
type
=
ftrace_setup_glob
(
glob
,
strlen
(
glob
),
&
search
,
&
not
);
type
=
ftrace_setup_glob
(
glob
,
strlen
(
glob
),
&
search
,
&
not
);
len
=
strlen
(
search
);
len
=
strlen
(
search
);
/* we do not support '!' for function
hook
s */
/* we do not support '!' for function
probe
s */
if
(
WARN_ON
(
not
))
if
(
WARN_ON
(
not
))
return
-
EINVAL
;
return
-
EINVAL
;
...
@@ -1465,7 +1468,7 @@ register_ftrace_function_hook(char *glob, struct ftrace_hook_ops *ops,
...
@@ -1465,7 +1468,7 @@ register_ftrace_function_hook(char *glob, struct ftrace_hook_ops *ops,
entry
=
kmalloc
(
sizeof
(
*
entry
),
GFP_KERNEL
);
entry
=
kmalloc
(
sizeof
(
*
entry
),
GFP_KERNEL
);
if
(
!
entry
)
{
if
(
!
entry
)
{
/* If we did not
hook to
any, then return error */
/* If we did not
process
any, then return error */
if
(
!
count
)
if
(
!
count
)
count
=
-
ENOMEM
;
count
=
-
ENOMEM
;
goto
out_unlock
;
goto
out_unlock
;
...
@@ -1495,7 +1498,7 @@ register_ftrace_function_hook(char *glob, struct ftrace_hook_ops *ops,
...
@@ -1495,7 +1498,7 @@ register_ftrace_function_hook(char *glob, struct ftrace_hook_ops *ops,
hlist_add_head_rcu
(
&
entry
->
node
,
&
ftrace_func_hash
[
key
]);
hlist_add_head_rcu
(
&
entry
->
node
,
&
ftrace_func_hash
[
key
]);
}
while_for_each_ftrace_rec
();
}
while_for_each_ftrace_rec
();
__enable_ftrace_function_
hook
();
__enable_ftrace_function_
probe
();
out_unlock:
out_unlock:
mutex_unlock
(
&
ftrace_lock
);
mutex_unlock
(
&
ftrace_lock
);
...
@@ -1504,15 +1507,15 @@ register_ftrace_function_hook(char *glob, struct ftrace_hook_ops *ops,
...
@@ -1504,15 +1507,15 @@ register_ftrace_function_hook(char *glob, struct ftrace_hook_ops *ops,
}
}
enum
{
enum
{
HOOK
_TEST_FUNC
=
1
,
PROBE
_TEST_FUNC
=
1
,
HOOK
_TEST_DATA
=
2
PROBE
_TEST_DATA
=
2
};
};
static
void
static
void
__unregister_ftrace_function_
hook
(
char
*
glob
,
struct
ftrace_hook
_ops
*
ops
,
__unregister_ftrace_function_
probe
(
char
*
glob
,
struct
ftrace_probe
_ops
*
ops
,
void
*
data
,
int
flags
)
void
*
data
,
int
flags
)
{
{
struct
ftrace_func_
hook
*
entry
;
struct
ftrace_func_
probe
*
entry
;
struct
hlist_node
*
n
,
*
tmp
;
struct
hlist_node
*
n
,
*
tmp
;
char
str
[
KSYM_SYMBOL_LEN
];
char
str
[
KSYM_SYMBOL_LEN
];
int
type
=
MATCH_FULL
;
int
type
=
MATCH_FULL
;
...
@@ -1527,7 +1530,7 @@ __unregister_ftrace_function_hook(char *glob, struct ftrace_hook_ops *ops,
...
@@ -1527,7 +1530,7 @@ __unregister_ftrace_function_hook(char *glob, struct ftrace_hook_ops *ops,
type
=
ftrace_setup_glob
(
glob
,
strlen
(
glob
),
&
search
,
&
not
);
type
=
ftrace_setup_glob
(
glob
,
strlen
(
glob
),
&
search
,
&
not
);
len
=
strlen
(
search
);
len
=
strlen
(
search
);
/* we do not support '!' for function
hook
s */
/* we do not support '!' for function
probe
s */
if
(
WARN_ON
(
not
))
if
(
WARN_ON
(
not
))
return
;
return
;
}
}
...
@@ -1539,10 +1542,10 @@ __unregister_ftrace_function_hook(char *glob, struct ftrace_hook_ops *ops,
...
@@ -1539,10 +1542,10 @@ __unregister_ftrace_function_hook(char *glob, struct ftrace_hook_ops *ops,
hlist_for_each_entry_safe
(
entry
,
n
,
tmp
,
hhd
,
node
)
{
hlist_for_each_entry_safe
(
entry
,
n
,
tmp
,
hhd
,
node
)
{
/* break up if statements for readability */
/* break up if statements for readability */
if
((
flags
&
HOOK
_TEST_FUNC
)
&&
entry
->
ops
!=
ops
)
if
((
flags
&
PROBE
_TEST_FUNC
)
&&
entry
->
ops
!=
ops
)
continue
;
continue
;
if
((
flags
&
HOOK
_TEST_DATA
)
&&
entry
->
data
!=
data
)
if
((
flags
&
PROBE
_TEST_DATA
)
&&
entry
->
data
!=
data
)
continue
;
continue
;
/* do this last, since it is the most expensive */
/* do this last, since it is the most expensive */
...
@@ -1557,27 +1560,27 @@ __unregister_ftrace_function_hook(char *glob, struct ftrace_hook_ops *ops,
...
@@ -1557,27 +1560,27 @@ __unregister_ftrace_function_hook(char *glob, struct ftrace_hook_ops *ops,
call_rcu
(
&
entry
->
rcu
,
ftrace_free_entry_rcu
);
call_rcu
(
&
entry
->
rcu
,
ftrace_free_entry_rcu
);
}
}
}
}
__disable_ftrace_function_
hook
();
__disable_ftrace_function_
probe
();
mutex_unlock
(
&
ftrace_lock
);
mutex_unlock
(
&
ftrace_lock
);
}
}
void
void
unregister_ftrace_function_
hook
(
char
*
glob
,
struct
ftrace_hook
_ops
*
ops
,
unregister_ftrace_function_
probe
(
char
*
glob
,
struct
ftrace_probe
_ops
*
ops
,
void
*
data
)
void
*
data
)
{
{
__unregister_ftrace_function_
hook
(
glob
,
ops
,
data
,
__unregister_ftrace_function_
probe
(
glob
,
ops
,
data
,
HOOK_TEST_FUNC
|
HOOK
_TEST_DATA
);
PROBE_TEST_FUNC
|
PROBE
_TEST_DATA
);
}
}
void
void
unregister_ftrace_function_
hook_func
(
char
*
glob
,
struct
ftrace_hook
_ops
*
ops
)
unregister_ftrace_function_
probe_func
(
char
*
glob
,
struct
ftrace_probe
_ops
*
ops
)
{
{
__unregister_ftrace_function_
hook
(
glob
,
ops
,
NULL
,
HOOK
_TEST_FUNC
);
__unregister_ftrace_function_
probe
(
glob
,
ops
,
NULL
,
PROBE
_TEST_FUNC
);
}
}
void
unregister_ftrace_function_
hook
_all
(
char
*
glob
)
void
unregister_ftrace_function_
probe
_all
(
char
*
glob
)
{
{
__unregister_ftrace_function_
hook
(
glob
,
NULL
,
NULL
,
0
);
__unregister_ftrace_function_
probe
(
glob
,
NULL
,
NULL
,
0
);
}
}
static
LIST_HEAD
(
ftrace_commands
);
static
LIST_HEAD
(
ftrace_commands
);
...
@@ -1623,8 +1626,8 @@ int unregister_ftrace_command(struct ftrace_func_command *cmd)
...
@@ -1623,8 +1626,8 @@ int unregister_ftrace_command(struct ftrace_func_command *cmd)
static
int
ftrace_process_regex
(
char
*
buff
,
int
len
,
int
enable
)
static
int
ftrace_process_regex
(
char
*
buff
,
int
len
,
int
enable
)
{
{
struct
ftrace_func_command
*
p
;
char
*
func
,
*
command
,
*
next
=
buff
;
char
*
func
,
*
command
,
*
next
=
buff
;
struct
ftrace_func_command
*
p
;
int
ret
=
-
EINVAL
;
int
ret
=
-
EINVAL
;
func
=
strsep
(
&
next
,
":"
);
func
=
strsep
(
&
next
,
":"
);
...
@@ -2392,7 +2395,6 @@ static __init int ftrace_init_debugfs(void)
...
@@ -2392,7 +2395,6 @@ static __init int ftrace_init_debugfs(void)
"'set_ftrace_pid' entry
\n
"
);
"'set_ftrace_pid' entry
\n
"
);
return
0
;
return
0
;
}
}
fs_initcall
(
ftrace_init_debugfs
);
fs_initcall
(
ftrace_init_debugfs
);
/**
/**
...
...
kernel/trace/trace.c
View file @
40999096
...
@@ -336,7 +336,7 @@ __update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu)
...
@@ -336,7 +336,7 @@ __update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu)
data
->
rt_priority
=
tsk
->
rt_priority
;
data
->
rt_priority
=
tsk
->
rt_priority
;
/* record this tasks comm */
/* record this tasks comm */
tracing_record_cmdline
(
current
);
tracing_record_cmdline
(
tsk
);
}
}
static
void
static
void
...
@@ -499,6 +499,9 @@ __acquires(kernel_lock)
...
@@ -499,6 +499,9 @@ __acquires(kernel_lock)
else
else
if
(
!
type
->
flags
->
opts
)
if
(
!
type
->
flags
->
opts
)
type
->
flags
->
opts
=
dummy_tracer_opt
;
type
->
flags
->
opts
=
dummy_tracer_opt
;
if
(
!
type
->
wait_pipe
)
type
->
wait_pipe
=
default_wait_pipe
;
#ifdef CONFIG_FTRACE_STARTUP_TEST
#ifdef CONFIG_FTRACE_STARTUP_TEST
if
(
type
->
selftest
&&
!
tracing_selftest_disabled
)
{
if
(
type
->
selftest
&&
!
tracing_selftest_disabled
)
{
...
@@ -1064,7 +1067,10 @@ tracing_sched_wakeup_trace(struct trace_array *tr,
...
@@ -1064,7 +1067,10 @@ tracing_sched_wakeup_trace(struct trace_array *tr,
entry
->
next_prio
=
wakee
->
prio
;
entry
->
next_prio
=
wakee
->
prio
;
entry
->
next_state
=
wakee
->
state
;
entry
->
next_state
=
wakee
->
state
;
entry
->
next_cpu
=
task_cpu
(
wakee
);
entry
->
next_cpu
=
task_cpu
(
wakee
);
trace_buffer_unlock_commit
(
tr
,
event
,
flags
,
pc
);
ring_buffer_unlock_commit
(
tr
->
buffer
,
event
);
ftrace_trace_stack
(
tr
,
flags
,
6
,
pc
);
ftrace_trace_userstack
(
tr
,
flags
,
pc
);
}
}
void
void
...
@@ -2392,6 +2398,38 @@ tracing_poll_pipe(struct file *filp, poll_table *poll_table)
...
@@ -2392,6 +2398,38 @@ tracing_poll_pipe(struct file *filp, poll_table *poll_table)
}
}
}
}
void
default_wait_pipe
(
struct
trace_iterator
*
iter
)
{
DEFINE_WAIT
(
wait
);
prepare_to_wait
(
&
trace_wait
,
&
wait
,
TASK_INTERRUPTIBLE
);
if
(
trace_empty
(
iter
))
schedule
();
finish_wait
(
&
trace_wait
,
&
wait
);
}
/*
* This is a make-shift waitqueue.
* A tracer might use this callback on some rare cases:
*
* 1) the current tracer might hold the runqueue lock when it wakes up
* a reader, hence a deadlock (sched, function, and function graph tracers)
* 2) the function tracers, trace all functions, we don't want
* the overhead of calling wake_up and friends
* (and tracing them too)
*
* Anyway, this is really very primitive wakeup.
*/
void
poll_wait_pipe
(
struct
trace_iterator
*
iter
)
{
set_current_state
(
TASK_INTERRUPTIBLE
);
/* sleep for 100 msecs, and try again. */
schedule_timeout
(
HZ
/
10
);
}
/* Must be called with trace_types_lock mutex held. */
/* Must be called with trace_types_lock mutex held. */
static
int
tracing_wait_pipe
(
struct
file
*
filp
)
static
int
tracing_wait_pipe
(
struct
file
*
filp
)
{
{
...
@@ -2403,30 +2441,14 @@ static int tracing_wait_pipe(struct file *filp)
...
@@ -2403,30 +2441,14 @@ static int tracing_wait_pipe(struct file *filp)
return
-
EAGAIN
;
return
-
EAGAIN
;
}
}
/*
* This is a make-shift waitqueue. The reason we don't use
* an actual wait queue is because:
* 1) we only ever have one waiter
* 2) the tracing, traces all functions, we don't want
* the overhead of calling wake_up and friends
* (and tracing them too)
* Anyway, this is really very primitive wakeup.
*/
set_current_state
(
TASK_INTERRUPTIBLE
);
iter
->
tr
->
waiter
=
current
;
mutex_unlock
(
&
trace_types_lock
);
mutex_unlock
(
&
trace_types_lock
);
/* sleep for 100 msecs, and try again. */
iter
->
trace
->
wait_pipe
(
iter
);
schedule_timeout
(
HZ
/
10
);
mutex_lock
(
&
trace_types_lock
);
mutex_lock
(
&
trace_types_lock
);
iter
->
tr
->
waiter
=
NULL
;
if
(
signal_pending
(
current
))
if
(
signal_pending
(
current
))
{
return
-
EINTR
;
return
-
EINTR
;
}
if
(
iter
->
trace
!=
current_trace
)
if
(
iter
->
trace
!=
current_trace
)
return
0
;
return
0
;
...
@@ -2442,8 +2464,6 @@ static int tracing_wait_pipe(struct file *filp)
...
@@ -2442,8 +2464,6 @@ static int tracing_wait_pipe(struct file *filp)
*/
*/
if
(
!
tracer_enabled
&&
iter
->
pos
)
if
(
!
tracer_enabled
&&
iter
->
pos
)
break
;
break
;
continue
;
}
}
return
1
;
return
1
;
...
@@ -2551,8 +2571,7 @@ static struct pipe_buf_operations tracing_pipe_buf_ops = {
...
@@ -2551,8 +2571,7 @@ static struct pipe_buf_operations tracing_pipe_buf_ops = {
};
};
static
size_t
static
size_t
tracing_fill_pipe_page
(
struct
page
*
pages
,
size_t
rem
,
tracing_fill_pipe_page
(
size_t
rem
,
struct
trace_iterator
*
iter
)
struct
trace_iterator
*
iter
)
{
{
size_t
count
;
size_t
count
;
int
ret
;
int
ret
;
...
@@ -2629,7 +2648,7 @@ static ssize_t tracing_splice_read_pipe(struct file *filp,
...
@@ -2629,7 +2648,7 @@ static ssize_t tracing_splice_read_pipe(struct file *filp,
if
(
!
pages
[
i
])
if
(
!
pages
[
i
])
break
;
break
;
rem
=
tracing_fill_pipe_page
(
pages
[
i
],
rem
,
iter
);
rem
=
tracing_fill_pipe_page
(
rem
,
iter
);
/* Copy the data into the page, so we can start over. */
/* Copy the data into the page, so we can start over. */
ret
=
trace_seq_to_buffer
(
&
iter
->
seq
,
ret
=
trace_seq_to_buffer
(
&
iter
->
seq
,
...
...
kernel/trace/trace.h
View file @
40999096
...
@@ -337,18 +337,34 @@ struct tracer_flags {
...
@@ -337,18 +337,34 @@ struct tracer_flags {
#define TRACER_OPT(s, b) .name = #s, .bit = b
#define TRACER_OPT(s, b) .name = #s, .bit = b
/*
/**
* A specific tracer, represented by methods that operate on a trace array:
* struct tracer - a specific tracer and its callbacks to interact with debugfs
* @name: the name chosen to select it on the available_tracers file
* @init: called when one switches to this tracer (echo name > current_tracer)
* @reset: called when one switches to another tracer
* @start: called when tracing is unpaused (echo 1 > tracing_enabled)
* @stop: called when tracing is paused (echo 0 > tracing_enabled)
* @open: called when the trace file is opened
* @pipe_open: called when the trace_pipe file is opened
* @wait_pipe: override how the user waits for traces on trace_pipe
* @close: called when the trace file is released
* @read: override the default read callback on trace_pipe
* @splice_read: override the default splice_read callback on trace_pipe
* @selftest: selftest to run on boot (see trace_selftest.c)
* @print_headers: override the first lines that describe your columns
* @print_line: callback that prints a trace
* @set_flag: signals one of your private flags changed (trace_options file)
* @flags: your private flags
*/
*/
struct
tracer
{
struct
tracer
{
const
char
*
name
;
const
char
*
name
;
/* Your tracer should raise a warning if init fails */
int
(
*
init
)(
struct
trace_array
*
tr
);
int
(
*
init
)(
struct
trace_array
*
tr
);
void
(
*
reset
)(
struct
trace_array
*
tr
);
void
(
*
reset
)(
struct
trace_array
*
tr
);
void
(
*
start
)(
struct
trace_array
*
tr
);
void
(
*
start
)(
struct
trace_array
*
tr
);
void
(
*
stop
)(
struct
trace_array
*
tr
);
void
(
*
stop
)(
struct
trace_array
*
tr
);
void
(
*
open
)(
struct
trace_iterator
*
iter
);
void
(
*
open
)(
struct
trace_iterator
*
iter
);
void
(
*
pipe_open
)(
struct
trace_iterator
*
iter
);
void
(
*
pipe_open
)(
struct
trace_iterator
*
iter
);
void
(
*
wait_pipe
)(
struct
trace_iterator
*
iter
);
void
(
*
close
)(
struct
trace_iterator
*
iter
);
void
(
*
close
)(
struct
trace_iterator
*
iter
);
ssize_t
(
*
read
)(
struct
trace_iterator
*
iter
,
ssize_t
(
*
read
)(
struct
trace_iterator
*
iter
,
struct
file
*
filp
,
char
__user
*
ubuf
,
struct
file
*
filp
,
char
__user
*
ubuf
,
...
@@ -432,6 +448,9 @@ void tracing_generic_entry_update(struct trace_entry *entry,
...
@@ -432,6 +448,9 @@ void tracing_generic_entry_update(struct trace_entry *entry,
unsigned
long
flags
,
unsigned
long
flags
,
int
pc
);
int
pc
);
void
default_wait_pipe
(
struct
trace_iterator
*
iter
);
void
poll_wait_pipe
(
struct
trace_iterator
*
iter
);
void
ftrace
(
struct
trace_array
*
tr
,
void
ftrace
(
struct
trace_array
*
tr
,
struct
trace_array_cpu
*
data
,
struct
trace_array_cpu
*
data
,
unsigned
long
ip
,
unsigned
long
ip
,
...
...
kernel/trace/trace_functions.c
View file @
40999096
...
@@ -225,6 +225,7 @@ static struct tracer function_trace __read_mostly =
...
@@ -225,6 +225,7 @@ static struct tracer function_trace __read_mostly =
.
init
=
function_trace_init
,
.
init
=
function_trace_init
,
.
reset
=
function_trace_reset
,
.
reset
=
function_trace_reset
,
.
start
=
function_trace_start
,
.
start
=
function_trace_start
,
.
wait_pipe
=
poll_wait_pipe
,
.
flags
=
&
func_flags
,
.
flags
=
&
func_flags
,
.
set_flag
=
func_set_flag
,
.
set_flag
=
func_set_flag
,
#ifdef CONFIG_FTRACE_SELFTEST
#ifdef CONFIG_FTRACE_SELFTEST
...
@@ -269,21 +270,21 @@ ftrace_traceoff(unsigned long ip, unsigned long parent_ip, void **data)
...
@@ -269,21 +270,21 @@ ftrace_traceoff(unsigned long ip, unsigned long parent_ip, void **data)
static
int
static
int
ftrace_trace_onoff_print
(
struct
seq_file
*
m
,
unsigned
long
ip
,
ftrace_trace_onoff_print
(
struct
seq_file
*
m
,
unsigned
long
ip
,
struct
ftrace_
hook
_ops
*
ops
,
void
*
data
);
struct
ftrace_
probe
_ops
*
ops
,
void
*
data
);
static
struct
ftrace_
hook_ops
traceon_hook
_ops
=
{
static
struct
ftrace_
probe_ops
traceon_probe
_ops
=
{
.
func
=
ftrace_traceon
,
.
func
=
ftrace_traceon
,
.
print
=
ftrace_trace_onoff_print
,
.
print
=
ftrace_trace_onoff_print
,
};
};
static
struct
ftrace_
hook_ops
traceoff_hook
_ops
=
{
static
struct
ftrace_
probe_ops
traceoff_probe
_ops
=
{
.
func
=
ftrace_traceoff
,
.
func
=
ftrace_traceoff
,
.
print
=
ftrace_trace_onoff_print
,
.
print
=
ftrace_trace_onoff_print
,
};
};
static
int
static
int
ftrace_trace_onoff_print
(
struct
seq_file
*
m
,
unsigned
long
ip
,
ftrace_trace_onoff_print
(
struct
seq_file
*
m
,
unsigned
long
ip
,
struct
ftrace_
hook
_ops
*
ops
,
void
*
data
)
struct
ftrace_
probe
_ops
*
ops
,
void
*
data
)
{
{
char
str
[
KSYM_SYMBOL_LEN
];
char
str
[
KSYM_SYMBOL_LEN
];
long
count
=
(
long
)
data
;
long
count
=
(
long
)
data
;
...
@@ -291,12 +292,14 @@ ftrace_trace_onoff_print(struct seq_file *m, unsigned long ip,
...
@@ -291,12 +292,14 @@ ftrace_trace_onoff_print(struct seq_file *m, unsigned long ip,
kallsyms_lookup
(
ip
,
NULL
,
NULL
,
NULL
,
str
);
kallsyms_lookup
(
ip
,
NULL
,
NULL
,
NULL
,
str
);
seq_printf
(
m
,
"%s:"
,
str
);
seq_printf
(
m
,
"%s:"
,
str
);
if
(
ops
==
&
traceon_
hook
_ops
)
if
(
ops
==
&
traceon_
probe
_ops
)
seq_printf
(
m
,
"traceon"
);
seq_printf
(
m
,
"traceon"
);
else
else
seq_printf
(
m
,
"traceoff"
);
seq_printf
(
m
,
"traceoff"
);
if
(
count
!=
-
1
)
if
(
count
==
-
1
)
seq_printf
(
m
,
":unlimited
\n
"
);
else
seq_printf
(
m
,
":count=%ld"
,
count
);
seq_printf
(
m
,
":count=%ld"
,
count
);
seq_putc
(
m
,
'\n'
);
seq_putc
(
m
,
'\n'
);
...
@@ -306,15 +309,15 @@ ftrace_trace_onoff_print(struct seq_file *m, unsigned long ip,
...
@@ -306,15 +309,15 @@ ftrace_trace_onoff_print(struct seq_file *m, unsigned long ip,
static
int
static
int
ftrace_trace_onoff_unreg
(
char
*
glob
,
char
*
cmd
,
char
*
param
)
ftrace_trace_onoff_unreg
(
char
*
glob
,
char
*
cmd
,
char
*
param
)
{
{
struct
ftrace_
hook
_ops
*
ops
;
struct
ftrace_
probe
_ops
*
ops
;
/* we register both traceon and traceoff to this callback */
/* we register both traceon and traceoff to this callback */
if
(
strcmp
(
cmd
,
"traceon"
)
==
0
)
if
(
strcmp
(
cmd
,
"traceon"
)
==
0
)
ops
=
&
traceon_
hook
_ops
;
ops
=
&
traceon_
probe
_ops
;
else
else
ops
=
&
traceoff_
hook
_ops
;
ops
=
&
traceoff_
probe
_ops
;
unregister_ftrace_function_
hook
_func
(
glob
,
ops
);
unregister_ftrace_function_
probe
_func
(
glob
,
ops
);
return
0
;
return
0
;
}
}
...
@@ -322,7 +325,7 @@ ftrace_trace_onoff_unreg(char *glob, char *cmd, char *param)
...
@@ -322,7 +325,7 @@ ftrace_trace_onoff_unreg(char *glob, char *cmd, char *param)
static
int
static
int
ftrace_trace_onoff_callback
(
char
*
glob
,
char
*
cmd
,
char
*
param
,
int
enable
)
ftrace_trace_onoff_callback
(
char
*
glob
,
char
*
cmd
,
char
*
param
,
int
enable
)
{
{
struct
ftrace_
hook
_ops
*
ops
;
struct
ftrace_
probe
_ops
*
ops
;
void
*
count
=
(
void
*
)
-
1
;
void
*
count
=
(
void
*
)
-
1
;
char
*
number
;
char
*
number
;
int
ret
;
int
ret
;
...
@@ -336,9 +339,9 @@ ftrace_trace_onoff_callback(char *glob, char *cmd, char *param, int enable)
...
@@ -336,9 +339,9 @@ ftrace_trace_onoff_callback(char *glob, char *cmd, char *param, int enable)
/* we register both traceon and traceoff to this callback */
/* we register both traceon and traceoff to this callback */
if
(
strcmp
(
cmd
,
"traceon"
)
==
0
)
if
(
strcmp
(
cmd
,
"traceon"
)
==
0
)
ops
=
&
traceon_
hook
_ops
;
ops
=
&
traceon_
probe
_ops
;
else
else
ops
=
&
traceoff_
hook
_ops
;
ops
=
&
traceoff_
probe
_ops
;
if
(
!
param
)
if
(
!
param
)
goto
out_reg
;
goto
out_reg
;
...
@@ -357,7 +360,7 @@ ftrace_trace_onoff_callback(char *glob, char *cmd, char *param, int enable)
...
@@ -357,7 +360,7 @@ ftrace_trace_onoff_callback(char *glob, char *cmd, char *param, int enable)
return
ret
;
return
ret
;
out_reg:
out_reg:
ret
=
register_ftrace_function_
hook
(
glob
,
ops
,
count
);
ret
=
register_ftrace_function_
probe
(
glob
,
ops
,
count
);
return
ret
;
return
ret
;
}
}
...
@@ -397,6 +400,5 @@ static __init int init_function_trace(void)
...
@@ -397,6 +400,5 @@ static __init int init_function_trace(void)
init_func_cmd_traceon
();
init_func_cmd_traceon
();
return
register_tracer
(
&
function_trace
);
return
register_tracer
(
&
function_trace
);
}
}
device_initcall
(
init_function_trace
);
device_initcall
(
init_function_trace
);
kernel/trace/trace_functions_graph.c
View file @
40999096
...
@@ -757,6 +757,7 @@ static struct tracer graph_trace __read_mostly = {
...
@@ -757,6 +757,7 @@ static struct tracer graph_trace __read_mostly = {
.
name
=
"function_graph"
,
.
name
=
"function_graph"
,
.
open
=
graph_trace_open
,
.
open
=
graph_trace_open
,
.
close
=
graph_trace_close
,
.
close
=
graph_trace_close
,
.
wait_pipe
=
poll_wait_pipe
,
.
init
=
graph_trace_init
,
.
init
=
graph_trace_init
,
.
reset
=
graph_trace_reset
,
.
reset
=
graph_trace_reset
,
.
print_line
=
print_graph_function
,
.
print_line
=
print_graph_function
,
...
...
kernel/trace/trace_irqsoff.c
View file @
40999096
/*
/*
* trace irqs off critical
l
timings
* trace irqs off critical timings
*
*
* Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com>
* Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com>
* Copyright (C) 2008 Ingo Molnar <mingo@redhat.com>
* Copyright (C) 2008 Ingo Molnar <mingo@redhat.com>
...
...
kernel/trace/trace_sched_switch.c
View file @
40999096
...
@@ -93,7 +93,7 @@ static int tracing_sched_register(void)
...
@@ -93,7 +93,7 @@ static int tracing_sched_register(void)
ret
=
register_trace_sched_switch
(
probe_sched_switch
);
ret
=
register_trace_sched_switch
(
probe_sched_switch
);
if
(
ret
)
{
if
(
ret
)
{
pr_info
(
"sched trace: Couldn't activate tracepoint"
pr_info
(
"sched trace: Couldn't activate tracepoint"
" probe to kernel_sched_s
chedule
\n
"
);
" probe to kernel_sched_s
witch
\n
"
);
goto
fail_deprobe_wake_new
;
goto
fail_deprobe_wake_new
;
}
}
...
@@ -221,6 +221,7 @@ static struct tracer sched_switch_trace __read_mostly =
...
@@ -221,6 +221,7 @@ static struct tracer sched_switch_trace __read_mostly =
.
reset
=
sched_switch_trace_reset
,
.
reset
=
sched_switch_trace_reset
,
.
start
=
sched_switch_trace_start
,
.
start
=
sched_switch_trace_start
,
.
stop
=
sched_switch_trace_stop
,
.
stop
=
sched_switch_trace_stop
,
.
wait_pipe
=
poll_wait_pipe
,
#ifdef CONFIG_FTRACE_SELFTEST
#ifdef CONFIG_FTRACE_SELFTEST
.
selftest
=
trace_selftest_startup_sched_switch
,
.
selftest
=
trace_selftest_startup_sched_switch
,
#endif
#endif
...
...
kernel/trace/trace_sched_wakeup.c
View file @
40999096
...
@@ -284,7 +284,7 @@ static void start_wakeup_tracer(struct trace_array *tr)
...
@@ -284,7 +284,7 @@ static void start_wakeup_tracer(struct trace_array *tr)
ret
=
register_trace_sched_switch
(
probe_wakeup_sched_switch
);
ret
=
register_trace_sched_switch
(
probe_wakeup_sched_switch
);
if
(
ret
)
{
if
(
ret
)
{
pr_info
(
"sched trace: Couldn't activate tracepoint"
pr_info
(
"sched trace: Couldn't activate tracepoint"
" probe to kernel_sched_s
chedule
\n
"
);
" probe to kernel_sched_s
witch
\n
"
);
goto
fail_deprobe_wake_new
;
goto
fail_deprobe_wake_new
;
}
}
...
@@ -380,6 +380,7 @@ static struct tracer wakeup_rt_tracer __read_mostly =
...
@@ -380,6 +380,7 @@ static struct tracer wakeup_rt_tracer __read_mostly =
.
reset
=
wakeup_tracer_reset
,
.
reset
=
wakeup_tracer_reset
,
.
start
=
wakeup_tracer_start
,
.
start
=
wakeup_tracer_start
,
.
stop
=
wakeup_tracer_stop
,
.
stop
=
wakeup_tracer_stop
,
.
wait_pipe
=
poll_wait_pipe
,
.
print_max
=
1
,
.
print_max
=
1
,
#ifdef CONFIG_FTRACE_SELFTEST
#ifdef CONFIG_FTRACE_SELFTEST
.
selftest
=
trace_selftest_startup_wakeup
,
.
selftest
=
trace_selftest_startup_wakeup
,
...
...
kernel/trace/trace_selftest.c
View file @
40999096
...
@@ -24,10 +24,20 @@ static int trace_test_buffer_cpu(struct trace_array *tr, int cpu)
...
@@ -24,10 +24,20 @@ static int trace_test_buffer_cpu(struct trace_array *tr, int cpu)
{
{
struct
ring_buffer_event
*
event
;
struct
ring_buffer_event
*
event
;
struct
trace_entry
*
entry
;
struct
trace_entry
*
entry
;
unsigned
int
loops
=
0
;
while
((
event
=
ring_buffer_consume
(
tr
->
buffer
,
cpu
,
NULL
)))
{
while
((
event
=
ring_buffer_consume
(
tr
->
buffer
,
cpu
,
NULL
)))
{
entry
=
ring_buffer_event_data
(
event
);
entry
=
ring_buffer_event_data
(
event
);
/*
* The ring buffer is a size of trace_buf_size, if
* we loop more than the size, there's something wrong
* with the ring buffer.
*/
if
(
loops
++
>
trace_buf_size
)
{
printk
(
KERN_CONT
".. bad ring buffer "
);
goto
failed
;
}
if
(
!
trace_valid_entry
(
entry
))
{
if
(
!
trace_valid_entry
(
entry
))
{
printk
(
KERN_CONT
".. invalid entry %d "
,
printk
(
KERN_CONT
".. invalid entry %d "
,
entry
->
type
);
entry
->
type
);
...
@@ -58,11 +68,20 @@ static int trace_test_buffer(struct trace_array *tr, unsigned long *count)
...
@@ -58,11 +68,20 @@ static int trace_test_buffer(struct trace_array *tr, unsigned long *count)
cnt
=
ring_buffer_entries
(
tr
->
buffer
);
cnt
=
ring_buffer_entries
(
tr
->
buffer
);
/*
* The trace_test_buffer_cpu runs a while loop to consume all data.
* If the calling tracer is broken, and is constantly filling
* the buffer, this will run forever, and hard lock the box.
* We disable the ring buffer while we do this test to prevent
* a hard lock up.
*/
tracing_off
();
for_each_possible_cpu
(
cpu
)
{
for_each_possible_cpu
(
cpu
)
{
ret
=
trace_test_buffer_cpu
(
tr
,
cpu
);
ret
=
trace_test_buffer_cpu
(
tr
,
cpu
);
if
(
ret
)
if
(
ret
)
break
;
break
;
}
}
tracing_on
();
__raw_spin_unlock
(
&
ftrace_max_lock
);
__raw_spin_unlock
(
&
ftrace_max_lock
);
local_irq_restore
(
flags
);
local_irq_restore
(
flags
);
...
@@ -107,9 +126,9 @@ int trace_selftest_startup_dynamic_tracing(struct tracer *trace,
...
@@ -107,9 +126,9 @@ int trace_selftest_startup_dynamic_tracing(struct tracer *trace,
func
();
func
();
/*
/*
* Some archs *cough*PowerPC*cough* add charac
h
ters to the
* Some archs *cough*PowerPC*cough* add characters to the
* start of the function names. We simply put a '*' to
* start of the function names. We simply put a '*' to
* accomodate them.
* accom
m
odate them.
*/
*/
func_name
=
"*"
STR
(
DYN_FTRACE_TEST_NAME
);
func_name
=
"*"
STR
(
DYN_FTRACE_TEST_NAME
);
...
@@ -622,7 +641,7 @@ trace_selftest_startup_sysprof(struct tracer *trace, struct trace_array *tr)
...
@@ -622,7 +641,7 @@ trace_selftest_startup_sysprof(struct tracer *trace, struct trace_array *tr)
ret
=
tracer_init
(
trace
,
tr
);
ret
=
tracer_init
(
trace
,
tr
);
if
(
ret
)
{
if
(
ret
)
{
warn_failed_init_tracer
(
trace
,
ret
);
warn_failed_init_tracer
(
trace
,
ret
);
return
0
;
return
ret
;
}
}
/* Sleep for a 1/10 of a second */
/* Sleep for a 1/10 of a second */
...
@@ -634,6 +653,11 @@ trace_selftest_startup_sysprof(struct tracer *trace, struct trace_array *tr)
...
@@ -634,6 +653,11 @@ trace_selftest_startup_sysprof(struct tracer *trace, struct trace_array *tr)
trace
->
reset
(
tr
);
trace
->
reset
(
tr
);
tracing_start
();
tracing_start
();
if
(
!
ret
&&
!
count
)
{
printk
(
KERN_CONT
".. no entries found .."
);
ret
=
-
1
;
}
return
ret
;
return
ret
;
}
}
#endif
/* CONFIG_SYSPROF_TRACER */
#endif
/* CONFIG_SYSPROF_TRACER */
...
@@ -661,6 +685,11 @@ trace_selftest_startup_branch(struct tracer *trace, struct trace_array *tr)
...
@@ -661,6 +685,11 @@ trace_selftest_startup_branch(struct tracer *trace, struct trace_array *tr)
trace
->
reset
(
tr
);
trace
->
reset
(
tr
);
tracing_start
();
tracing_start
();
if
(
!
ret
&&
!
count
)
{
printk
(
KERN_CONT
".. no entries found .."
);
ret
=
-
1
;
}
return
ret
;
return
ret
;
}
}
#endif
/* CONFIG_BRANCH_TRACER */
#endif
/* CONFIG_BRANCH_TRACER */
kernel/trace/trace_stat.c
View file @
40999096
...
@@ -30,7 +30,7 @@ struct tracer_stat_session {
...
@@ -30,7 +30,7 @@ struct tracer_stat_session {
struct
dentry
*
file
;
struct
dentry
*
file
;
};
};
/* All of the sessions currently in use. Each stat file embe
e
d one session */
/* All of the sessions currently in use. Each stat file embed one session */
static
LIST_HEAD
(
all_stat_sessions
);
static
LIST_HEAD
(
all_stat_sessions
);
static
DEFINE_MUTEX
(
all_stat_sessions_mutex
);
static
DEFINE_MUTEX
(
all_stat_sessions_mutex
);
...
...
kernel/trace/trace_sysprof.c
View file @
40999096
...
@@ -327,5 +327,5 @@ void init_tracer_sysprof_debugfs(struct dentry *d_tracer)
...
@@ -327,5 +327,5 @@ void init_tracer_sysprof_debugfs(struct dentry *d_tracer)
d_tracer
,
NULL
,
&
sysprof_sample_fops
);
d_tracer
,
NULL
,
&
sysprof_sample_fops
);
if
(
entry
)
if
(
entry
)
return
;
return
;
pr_warning
(
"Could not create debugfs '
dyn_ftrace_total_info
' entry
\n
"
);
pr_warning
(
"Could not create debugfs '
sysprof_sample_period
' entry
\n
"
);
}
}
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment