Commit e0d27242 authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'tracing-core-for-linus' of...

Merge branch 'tracing-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip

* 'tracing-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip: (28 commits)
  ftrace: Add function names to dangling } in function graph tracer
  tracing: Simplify memory recycle of trace_define_field
  tracing: Remove unnecessary variable in print_graph_return
  tracing: Fix typo of info text in trace_kprobe.c
  tracing: Fix typo in prof_sysexit_enable()
  tracing: Remove CONFIG_TRACE_POWER from kernel config
  tracing: Fix ftrace_event_call alignment for use with gcc 4.5
  ftrace: Remove memory barriers from NMI code when not needed
  tracing/kprobes: Add short documentation for HAVE_REGS_AND_STACK_ACCESS_API
  s390: Add pt_regs register and stack access API
  tracing/kprobes: Make Kconfig dependencies generic
  tracing: Unify arch_syscall_addr() implementations
  tracing: Add notrace to TRACE_EVENT implementation functions
  ftrace: Allow to remove a single function from function graph filter
  tracing: Add correct/incorrect to sort keys for branch annotation output
  tracing: Simplify test for function_graph tracing start point
  tracing: Drop the tr check from the graph tracing path
  tracing: Add stack dump to trace_printk if stacktrace option is set
  tracing: Use appropriate perl constructs in recordmcount.pl
  tracing: optimize recordmcount.pl for offsets-handling
  ...
parents d25e8dbd 48091742
...@@ -238,11 +238,10 @@ HAVE_SYSCALL_TRACEPOINTS ...@@ -238,11 +238,10 @@ HAVE_SYSCALL_TRACEPOINTS
You need very few things to get the syscalls tracing in an arch. You need very few things to get the syscalls tracing in an arch.
- Support HAVE_ARCH_TRACEHOOK (see arch/Kconfig).
- Have a NR_syscalls variable in <asm/unistd.h> that provides the number - Have a NR_syscalls variable in <asm/unistd.h> that provides the number
of syscalls supported by the arch. of syscalls supported by the arch.
- Implement arch_syscall_addr() that resolves a syscall address from a - Support the TIF_SYSCALL_TRACEPOINT thread flags.
syscall number.
- Support the TIF_SYSCALL_TRACEPOINT thread flags
- Put the trace_sys_enter() and trace_sys_exit() tracepoints calls from ptrace - Put the trace_sys_enter() and trace_sys_exit() tracepoints calls from ptrace
in the ptrace syscalls tracing path. in the ptrace syscalls tracing path.
- Tag this arch as HAVE_SYSCALL_TRACEPOINTS. - Tag this arch as HAVE_SYSCALL_TRACEPOINTS.
......
...@@ -105,6 +105,14 @@ config HAVE_DMA_ATTRS ...@@ -105,6 +105,14 @@ config HAVE_DMA_ATTRS
config USE_GENERIC_SMP_HELPERS config USE_GENERIC_SMP_HELPERS
bool bool
config HAVE_REGS_AND_STACK_ACCESS_API
bool
help
This symbol should be selected by an architecure if it supports
the API needed to access registers and stack entries from pt_regs,
declared in asm/ptrace.h
For example the kprobes-based event tracer needs this API.
config HAVE_CLK config HAVE_CLK
bool bool
help help
......
...@@ -90,6 +90,7 @@ config S390 ...@@ -90,6 +90,7 @@ config S390
select HAVE_SYSCALL_TRACEPOINTS select HAVE_SYSCALL_TRACEPOINTS
select HAVE_DYNAMIC_FTRACE select HAVE_DYNAMIC_FTRACE
select HAVE_FUNCTION_GRAPH_TRACER select HAVE_FUNCTION_GRAPH_TRACER
select HAVE_REGS_AND_STACK_ACCESS_API
select HAVE_DEFAULT_NO_SPIN_MUTEXES select HAVE_DEFAULT_NO_SPIN_MUTEXES
select HAVE_OPROFILE select HAVE_OPROFILE
select HAVE_KPROBES select HAVE_KPROBES
......
...@@ -492,13 +492,24 @@ struct user_regs_struct ...@@ -492,13 +492,24 @@ struct user_regs_struct
struct task_struct; struct task_struct;
extern void user_enable_single_step(struct task_struct *); extern void user_enable_single_step(struct task_struct *);
extern void user_disable_single_step(struct task_struct *); extern void user_disable_single_step(struct task_struct *);
extern void show_regs(struct pt_regs * regs);
#define user_mode(regs) (((regs)->psw.mask & PSW_MASK_PSTATE) != 0) #define user_mode(regs) (((regs)->psw.mask & PSW_MASK_PSTATE) != 0)
#define instruction_pointer(regs) ((regs)->psw.addr & PSW_ADDR_INSN) #define instruction_pointer(regs) ((regs)->psw.addr & PSW_ADDR_INSN)
#define user_stack_pointer(regs)((regs)->gprs[15]) #define user_stack_pointer(regs)((regs)->gprs[15])
#define regs_return_value(regs)((regs)->gprs[2]) #define regs_return_value(regs)((regs)->gprs[2])
#define profile_pc(regs) instruction_pointer(regs) #define profile_pc(regs) instruction_pointer(regs)
extern void show_regs(struct pt_regs * regs);
int regs_query_register_offset(const char *name);
const char *regs_query_register_name(unsigned int offset);
unsigned long regs_get_register(struct pt_regs *regs, unsigned int offset);
unsigned long regs_get_kernel_stack_nth(struct pt_regs *regs, unsigned int n);
static inline unsigned long kernel_stack_pointer(struct pt_regs *regs)
{
return regs->gprs[15] & PSW_ADDR_INSN;
}
#endif /* __KERNEL__ */ #endif /* __KERNEL__ */
#endif /* __ASSEMBLY__ */ #endif /* __ASSEMBLY__ */
......
...@@ -15,6 +15,13 @@ ...@@ -15,6 +15,13 @@
#include <linux/sched.h> #include <linux/sched.h>
#include <asm/ptrace.h> #include <asm/ptrace.h>
/*
* The syscall table always contains 32 bit pointers since we know that the
* address of the function to be called is (way) below 4GB. So the "int"
* type here is what we want [need] for both 32 bit and 64 bit systems.
*/
extern const unsigned int sys_call_table[];
static inline long syscall_get_nr(struct task_struct *task, static inline long syscall_get_nr(struct task_struct *task,
struct pt_regs *regs) struct pt_regs *regs)
{ {
......
...@@ -200,13 +200,3 @@ unsigned long prepare_ftrace_return(unsigned long ip, unsigned long parent) ...@@ -200,13 +200,3 @@ unsigned long prepare_ftrace_return(unsigned long ip, unsigned long parent)
return parent; return parent;
} }
#endif /* CONFIG_FUNCTION_GRAPH_TRACER */ #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
#ifdef CONFIG_FTRACE_SYSCALLS
extern unsigned int sys_call_table[];
unsigned long __init arch_syscall_addr(int nr)
{
return (unsigned long)sys_call_table[nr];
}
#endif
...@@ -992,3 +992,61 @@ const struct user_regset_view *task_user_regset_view(struct task_struct *task) ...@@ -992,3 +992,61 @@ const struct user_regset_view *task_user_regset_view(struct task_struct *task)
#endif #endif
return &user_s390_view; return &user_s390_view;
} }
static const char *gpr_names[NUM_GPRS] = {
"r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7",
"r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15",
};
unsigned long regs_get_register(struct pt_regs *regs, unsigned int offset)
{
if (offset >= NUM_GPRS)
return 0;
return regs->gprs[offset];
}
int regs_query_register_offset(const char *name)
{
unsigned long offset;
if (!name || *name != 'r')
return -EINVAL;
if (strict_strtoul(name + 1, 10, &offset))
return -EINVAL;
if (offset >= NUM_GPRS)
return -EINVAL;
return offset;
}
const char *regs_query_register_name(unsigned int offset)
{
if (offset >= NUM_GPRS)
return NULL;
return gpr_names[offset];
}
static int regs_within_kernel_stack(struct pt_regs *regs, unsigned long addr)
{
unsigned long ksp = kernel_stack_pointer(regs);
return (addr & ~(THREAD_SIZE - 1)) == (ksp & ~(THREAD_SIZE - 1));
}
/**
* regs_get_kernel_stack_nth() - get Nth entry of the stack
* @regs:pt_regs which contains kernel stack pointer.
* @n:stack entry number.
*
* regs_get_kernel_stack_nth() returns @n th entry of the kernel stack which
* is specifined by @regs. If the @n th entry is NOT in the kernel stack,
* this returns 0.
*/
unsigned long regs_get_kernel_stack_nth(struct pt_regs *regs, unsigned int n)
{
unsigned long addr;
addr = kernel_stack_pointer(regs) + n * sizeof(long);
if (!regs_within_kernel_stack(regs, addr))
return 0;
return *(unsigned long *)addr;
}
#ifndef __ASM_SH_SYSCALL_H #ifndef __ASM_SH_SYSCALL_H
#define __ASM_SH_SYSCALL_H #define __ASM_SH_SYSCALL_H
extern const unsigned long sys_call_table[];
#ifdef CONFIG_SUPERH32 #ifdef CONFIG_SUPERH32
# include "syscall_32.h" # include "syscall_32.h"
#else #else
......
...@@ -399,12 +399,3 @@ void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr) ...@@ -399,12 +399,3 @@ void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr)
} }
} }
#endif /* CONFIG_FUNCTION_GRAPH_TRACER */ #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
#ifdef CONFIG_FTRACE_SYSCALLS
extern unsigned long *sys_call_table;
unsigned long __init arch_syscall_addr(int nr)
{
return (unsigned long)sys_call_table[nr];
}
#endif /* CONFIG_FTRACE_SYSCALLS */
...@@ -5,6 +5,13 @@ ...@@ -5,6 +5,13 @@
#include <linux/sched.h> #include <linux/sched.h>
#include <asm/ptrace.h> #include <asm/ptrace.h>
/*
* The syscall table always contains 32 bit pointers since we know that the
* address of the function to be called is (way) below 4GB. So the "int"
* type here is what we want [need] for both 32 bit and 64 bit systems.
*/
extern const unsigned int sys_call_table[];
/* The system call number is given by the user in %g1 */ /* The system call number is given by the user in %g1 */
static inline long syscall_get_nr(struct task_struct *task, static inline long syscall_get_nr(struct task_struct *task,
struct pt_regs *regs) struct pt_regs *regs)
......
...@@ -91,14 +91,3 @@ int __init ftrace_dyn_arch_init(void *data) ...@@ -91,14 +91,3 @@ int __init ftrace_dyn_arch_init(void *data)
return 0; return 0;
} }
#endif #endif
#ifdef CONFIG_FTRACE_SYSCALLS
extern unsigned int sys_call_table[];
unsigned long __init arch_syscall_addr(int nr)
{
return (unsigned long)sys_call_table[nr];
}
#endif
...@@ -45,6 +45,7 @@ config X86 ...@@ -45,6 +45,7 @@ config X86
select HAVE_GENERIC_DMA_COHERENT if X86_32 select HAVE_GENERIC_DMA_COHERENT if X86_32
select HAVE_EFFICIENT_UNALIGNED_ACCESS select HAVE_EFFICIENT_UNALIGNED_ACCESS
select USER_STACKTRACE_SUPPORT select USER_STACKTRACE_SUPPORT
select HAVE_REGS_AND_STACK_ACCESS_API
select HAVE_DMA_API_DEBUG select HAVE_DMA_API_DEBUG
select HAVE_KERNEL_GZIP select HAVE_KERNEL_GZIP
select HAVE_KERNEL_BZIP2 select HAVE_KERNEL_BZIP2
......
...@@ -16,6 +16,8 @@ ...@@ -16,6 +16,8 @@
#include <linux/sched.h> #include <linux/sched.h>
#include <linux/err.h> #include <linux/err.h>
extern const unsigned long sys_call_table[];
/* /*
* Only the low 32 bits of orig_ax are meaningful, so we return int. * Only the low 32 bits of orig_ax are meaningful, so we return int.
* This importantly ignores the high bits on 64-bit, so comparisons * This importantly ignores the high bits on 64-bit, so comparisons
......
...@@ -30,14 +30,32 @@ ...@@ -30,14 +30,32 @@
#ifdef CONFIG_DYNAMIC_FTRACE #ifdef CONFIG_DYNAMIC_FTRACE
/*
* modifying_code is set to notify NMIs that they need to use
* memory barriers when entering or exiting. But we don't want
* to burden NMIs with unnecessary memory barriers when code
* modification is not being done (which is most of the time).
*
* A mutex is already held when ftrace_arch_code_modify_prepare
* and post_process are called. No locks need to be taken here.
*
* Stop machine will make sure currently running NMIs are done
* and new NMIs will see the updated variable before we need
* to worry about NMIs doing memory barriers.
*/
static int modifying_code __read_mostly;
static DEFINE_PER_CPU(int, save_modifying_code);
int ftrace_arch_code_modify_prepare(void) int ftrace_arch_code_modify_prepare(void)
{ {
set_kernel_text_rw(); set_kernel_text_rw();
modifying_code = 1;
return 0; return 0;
} }
int ftrace_arch_code_modify_post_process(void) int ftrace_arch_code_modify_post_process(void)
{ {
modifying_code = 0;
set_kernel_text_ro(); set_kernel_text_ro();
return 0; return 0;
} }
...@@ -149,6 +167,11 @@ static void ftrace_mod_code(void) ...@@ -149,6 +167,11 @@ static void ftrace_mod_code(void)
void ftrace_nmi_enter(void) void ftrace_nmi_enter(void)
{ {
__get_cpu_var(save_modifying_code) = modifying_code;
if (!__get_cpu_var(save_modifying_code))
return;
if (atomic_inc_return(&nmi_running) & MOD_CODE_WRITE_FLAG) { if (atomic_inc_return(&nmi_running) & MOD_CODE_WRITE_FLAG) {
smp_rmb(); smp_rmb();
ftrace_mod_code(); ftrace_mod_code();
...@@ -160,6 +183,9 @@ void ftrace_nmi_enter(void) ...@@ -160,6 +183,9 @@ void ftrace_nmi_enter(void)
void ftrace_nmi_exit(void) void ftrace_nmi_exit(void)
{ {
if (!__get_cpu_var(save_modifying_code))
return;
/* Finish all executions before clearing nmi_running */ /* Finish all executions before clearing nmi_running */
smp_mb(); smp_mb();
atomic_dec(&nmi_running); atomic_dec(&nmi_running);
...@@ -484,13 +510,3 @@ void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr, ...@@ -484,13 +510,3 @@ void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr,
} }
} }
#endif /* CONFIG_FUNCTION_GRAPH_TRACER */ #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
#ifdef CONFIG_FTRACE_SYSCALLS
extern unsigned long *sys_call_table;
unsigned long __init arch_syscall_addr(int nr)
{
return (unsigned long)(&sys_call_table)[nr];
}
#endif
...@@ -511,4 +511,10 @@ static inline void trace_hw_branch_oops(void) {} ...@@ -511,4 +511,10 @@ static inline void trace_hw_branch_oops(void) {}
#endif /* CONFIG_HW_BRANCH_TRACER */ #endif /* CONFIG_HW_BRANCH_TRACER */
#ifdef CONFIG_FTRACE_SYSCALLS
unsigned long arch_syscall_addr(int nr);
#endif /* CONFIG_FTRACE_SYSCALLS */
#endif /* _LINUX_FTRACE_H */ #endif /* _LINUX_FTRACE_H */
...@@ -121,9 +121,8 @@ struct ftrace_event_call { ...@@ -121,9 +121,8 @@ struct ftrace_event_call {
int (*regfunc)(struct ftrace_event_call *); int (*regfunc)(struct ftrace_event_call *);
void (*unregfunc)(struct ftrace_event_call *); void (*unregfunc)(struct ftrace_event_call *);
int id; int id;
const char *print_fmt;
int (*raw_init)(struct ftrace_event_call *); int (*raw_init)(struct ftrace_event_call *);
int (*show_format)(struct ftrace_event_call *,
struct trace_seq *);
int (*define_fields)(struct ftrace_event_call *); int (*define_fields)(struct ftrace_event_call *);
struct list_head fields; struct list_head fields;
int filter_active; int filter_active;
......
...@@ -132,7 +132,8 @@ struct perf_event_attr; ...@@ -132,7 +132,8 @@ struct perf_event_attr;
#define SYSCALL_TRACE_ENTER_EVENT(sname) \ #define SYSCALL_TRACE_ENTER_EVENT(sname) \
static const struct syscall_metadata __syscall_meta_##sname; \ static const struct syscall_metadata __syscall_meta_##sname; \
static struct ftrace_event_call event_enter_##sname; \ static struct ftrace_event_call \
__attribute__((__aligned__(4))) event_enter_##sname; \
static struct trace_event enter_syscall_print_##sname = { \ static struct trace_event enter_syscall_print_##sname = { \
.trace = print_syscall_enter, \ .trace = print_syscall_enter, \
}; \ }; \
...@@ -143,8 +144,7 @@ struct perf_event_attr; ...@@ -143,8 +144,7 @@ struct perf_event_attr;
.name = "sys_enter"#sname, \ .name = "sys_enter"#sname, \
.system = "syscalls", \ .system = "syscalls", \
.event = &enter_syscall_print_##sname, \ .event = &enter_syscall_print_##sname, \
.raw_init = trace_event_raw_init, \ .raw_init = init_syscall_trace, \
.show_format = syscall_enter_format, \
.define_fields = syscall_enter_define_fields, \ .define_fields = syscall_enter_define_fields, \
.regfunc = reg_event_syscall_enter, \ .regfunc = reg_event_syscall_enter, \
.unregfunc = unreg_event_syscall_enter, \ .unregfunc = unreg_event_syscall_enter, \
...@@ -154,7 +154,8 @@ struct perf_event_attr; ...@@ -154,7 +154,8 @@ struct perf_event_attr;
#define SYSCALL_TRACE_EXIT_EVENT(sname) \ #define SYSCALL_TRACE_EXIT_EVENT(sname) \
static const struct syscall_metadata __syscall_meta_##sname; \ static const struct syscall_metadata __syscall_meta_##sname; \
static struct ftrace_event_call event_exit_##sname; \ static struct ftrace_event_call \
__attribute__((__aligned__(4))) event_exit_##sname; \
static struct trace_event exit_syscall_print_##sname = { \ static struct trace_event exit_syscall_print_##sname = { \
.trace = print_syscall_exit, \ .trace = print_syscall_exit, \
}; \ }; \
...@@ -165,8 +166,7 @@ struct perf_event_attr; ...@@ -165,8 +166,7 @@ struct perf_event_attr;
.name = "sys_exit"#sname, \ .name = "sys_exit"#sname, \
.system = "syscalls", \ .system = "syscalls", \
.event = &exit_syscall_print_##sname, \ .event = &exit_syscall_print_##sname, \
.raw_init = trace_event_raw_init, \ .raw_init = init_syscall_trace, \
.show_format = syscall_exit_format, \
.define_fields = syscall_exit_define_fields, \ .define_fields = syscall_exit_define_fields, \
.regfunc = reg_event_syscall_exit, \ .regfunc = reg_event_syscall_exit, \
.unregfunc = unreg_event_syscall_exit, \ .unregfunc = unreg_event_syscall_exit, \
......
This diff is collapsed.
...@@ -34,10 +34,6 @@ struct syscall_metadata { ...@@ -34,10 +34,6 @@ struct syscall_metadata {
extern unsigned long arch_syscall_addr(int nr); extern unsigned long arch_syscall_addr(int nr);
extern int init_syscall_trace(struct ftrace_event_call *call); extern int init_syscall_trace(struct ftrace_event_call *call);
extern int syscall_enter_format(struct ftrace_event_call *call,
struct trace_seq *s);
extern int syscall_exit_format(struct ftrace_event_call *call,
struct trace_seq *s);
extern int syscall_enter_define_fields(struct ftrace_event_call *call); extern int syscall_enter_define_fields(struct ftrace_event_call *call);
extern int syscall_exit_define_fields(struct ftrace_event_call *call); extern int syscall_exit_define_fields(struct ftrace_event_call *call);
extern int reg_event_syscall_enter(struct ftrace_event_call *call); extern int reg_event_syscall_enter(struct ftrace_event_call *call);
......
...@@ -328,15 +328,6 @@ config BRANCH_TRACER ...@@ -328,15 +328,6 @@ config BRANCH_TRACER
Say N if unsure. Say N if unsure.
config POWER_TRACER
bool "Trace power consumption behavior"
depends on X86
select GENERIC_TRACER
help
This tracer helps developers to analyze and optimize the kernel's
power management decisions, specifically the C-state and P-state
behavior.
config KSYM_TRACER config KSYM_TRACER
bool "Trace read and write access on kernel memory locations" bool "Trace read and write access on kernel memory locations"
depends on HAVE_HW_BREAKPOINT depends on HAVE_HW_BREAKPOINT
...@@ -449,7 +440,7 @@ config BLK_DEV_IO_TRACE ...@@ -449,7 +440,7 @@ config BLK_DEV_IO_TRACE
config KPROBE_EVENT config KPROBE_EVENT
depends on KPROBES depends on KPROBES
depends on X86 depends on HAVE_REGS_AND_STACK_ACCESS_API
bool "Enable kprobes-based dynamic events" bool "Enable kprobes-based dynamic events"
select TRACING select TRACING
default y default y
......
...@@ -2426,6 +2426,7 @@ static const struct file_operations ftrace_notrace_fops = { ...@@ -2426,6 +2426,7 @@ static const struct file_operations ftrace_notrace_fops = {
static DEFINE_MUTEX(graph_lock); static DEFINE_MUTEX(graph_lock);
int ftrace_graph_count; int ftrace_graph_count;
int ftrace_graph_filter_enabled;
unsigned long ftrace_graph_funcs[FTRACE_GRAPH_MAX_FUNCS] __read_mostly; unsigned long ftrace_graph_funcs[FTRACE_GRAPH_MAX_FUNCS] __read_mostly;
static void * static void *
...@@ -2448,7 +2449,7 @@ static void *g_start(struct seq_file *m, loff_t *pos) ...@@ -2448,7 +2449,7 @@ static void *g_start(struct seq_file *m, loff_t *pos)
mutex_lock(&graph_lock); mutex_lock(&graph_lock);
/* Nothing, tell g_show to print all functions are enabled */ /* Nothing, tell g_show to print all functions are enabled */
if (!ftrace_graph_count && !*pos) if (!ftrace_graph_filter_enabled && !*pos)
return (void *)1; return (void *)1;
return __g_next(m, pos); return __g_next(m, pos);
...@@ -2494,6 +2495,7 @@ ftrace_graph_open(struct inode *inode, struct file *file) ...@@ -2494,6 +2495,7 @@ ftrace_graph_open(struct inode *inode, struct file *file)
mutex_lock(&graph_lock); mutex_lock(&graph_lock);
if ((file->f_mode & FMODE_WRITE) && if ((file->f_mode & FMODE_WRITE) &&
(file->f_flags & O_TRUNC)) { (file->f_flags & O_TRUNC)) {
ftrace_graph_filter_enabled = 0;
ftrace_graph_count = 0; ftrace_graph_count = 0;
memset(ftrace_graph_funcs, 0, sizeof(ftrace_graph_funcs)); memset(ftrace_graph_funcs, 0, sizeof(ftrace_graph_funcs));
} }
...@@ -2519,7 +2521,7 @@ ftrace_set_func(unsigned long *array, int *idx, char *buffer) ...@@ -2519,7 +2521,7 @@ ftrace_set_func(unsigned long *array, int *idx, char *buffer)
struct dyn_ftrace *rec; struct dyn_ftrace *rec;
struct ftrace_page *pg; struct ftrace_page *pg;
int search_len; int search_len;
int found = 0; int fail = 1;
int type, not; int type, not;
char *search; char *search;
bool exists; bool exists;
...@@ -2530,37 +2532,51 @@ ftrace_set_func(unsigned long *array, int *idx, char *buffer) ...@@ -2530,37 +2532,51 @@ ftrace_set_func(unsigned long *array, int *idx, char *buffer)
/* decode regex */ /* decode regex */
type = filter_parse_regex(buffer, strlen(buffer), &search, &not); type = filter_parse_regex(buffer, strlen(buffer), &search, &not);
if (not) if (!not && *idx >= FTRACE_GRAPH_MAX_FUNCS)
return -EINVAL; return -EBUSY;
search_len = strlen(search); search_len = strlen(search);
mutex_lock(&ftrace_lock); mutex_lock(&ftrace_lock);
do_for_each_ftrace_rec(pg, rec) { do_for_each_ftrace_rec(pg, rec) {
if (*idx >= FTRACE_GRAPH_MAX_FUNCS)
break;
if (rec->flags & (FTRACE_FL_FAILED | FTRACE_FL_FREE)) if (rec->flags & (FTRACE_FL_FAILED | FTRACE_FL_FREE))
continue; continue;
if (ftrace_match_record(rec, search, search_len, type)) { if (ftrace_match_record(rec, search, search_len, type)) {
/* ensure it is not already in the array */ /* if it is in the array */
exists = false; exists = false;
for (i = 0; i < *idx; i++) for (i = 0; i < *idx; i++) {
if (array[i] == rec->ip) { if (array[i] == rec->ip) {
exists = true; exists = true;
break; break;
} }
if (!exists) }
if (!not) {
fail = 0;
if (!exists) {
array[(*idx)++] = rec->ip; array[(*idx)++] = rec->ip;
found = 1; if (*idx >= FTRACE_GRAPH_MAX_FUNCS)
goto out;
}
} else {
if (exists) {
array[i] = array[--(*idx)];
array[*idx] = 0;
fail = 0;
}
}
} }
} while_for_each_ftrace_rec(); } while_for_each_ftrace_rec();
out:
mutex_unlock(&ftrace_lock); mutex_unlock(&ftrace_lock);
return found ? 0 : -EINVAL; if (fail)
return -EINVAL;
ftrace_graph_filter_enabled = 1;
return 0;
} }
static ssize_t static ssize_t
...@@ -2570,16 +2586,11 @@ ftrace_graph_write(struct file *file, const char __user *ubuf, ...@@ -2570,16 +2586,11 @@ ftrace_graph_write(struct file *file, const char __user *ubuf,
struct trace_parser parser; struct trace_parser parser;
ssize_t read, ret; ssize_t read, ret;
if (!cnt || cnt < 0) if (!cnt)
return 0; return 0;
mutex_lock(&graph_lock); mutex_lock(&graph_lock);
if (ftrace_graph_count >= FTRACE_GRAPH_MAX_FUNCS) {
ret = -EBUSY;
goto out_unlock;
}
if (trace_parser_get_init(&parser, FTRACE_BUFF_MAX)) { if (trace_parser_get_init(&parser, FTRACE_BUFF_MAX)) {
ret = -ENOMEM; ret = -ENOMEM;
goto out_unlock; goto out_unlock;
......
...@@ -32,6 +32,7 @@ ...@@ -32,6 +32,7 @@
#include <linux/splice.h> #include <linux/splice.h>
#include <linux/kdebug.h> #include <linux/kdebug.h>
#include <linux/string.h> #include <linux/string.h>
#include <linux/rwsem.h>
#include <linux/ctype.h> #include <linux/ctype.h>
#include <linux/init.h> #include <linux/init.h>
#include <linux/poll.h> #include <linux/poll.h>
...@@ -102,9 +103,6 @@ static inline void ftrace_enable_cpu(void) ...@@ -102,9 +103,6 @@ static inline void ftrace_enable_cpu(void)
static cpumask_var_t __read_mostly tracing_buffer_mask; static cpumask_var_t __read_mostly tracing_buffer_mask;
/* Define which cpu buffers are currently read in trace_pipe */
static cpumask_var_t tracing_reader_cpumask;
#define for_each_tracing_cpu(cpu) \ #define for_each_tracing_cpu(cpu) \
for_each_cpu(cpu, tracing_buffer_mask) for_each_cpu(cpu, tracing_buffer_mask)
...@@ -243,12 +241,91 @@ static struct tracer *current_trace __read_mostly; ...@@ -243,12 +241,91 @@ static struct tracer *current_trace __read_mostly;
/* /*
* trace_types_lock is used to protect the trace_types list. * trace_types_lock is used to protect the trace_types list.
* This lock is also used to keep user access serialized.
* Accesses from userspace will grab this lock while userspace
* activities happen inside the kernel.
*/ */
static DEFINE_MUTEX(trace_types_lock); static DEFINE_MUTEX(trace_types_lock);
/*
* serialize the access of the ring buffer
*
* ring buffer serializes readers, but it is low level protection.
* The validity of the events (which returns by ring_buffer_peek() ..etc)
* are not protected by ring buffer.
*
* The content of events may become garbage if we allow other process consumes
* these events concurrently:
* A) the page of the consumed events may become a normal page
* (not reader page) in ring buffer, and this page will be rewrited
* by events producer.
* B) The page of the consumed events may become a page for splice_read,
* and this page will be returned to system.
*
* These primitives allow multi process access to different cpu ring buffer
* concurrently.
*
* These primitives don't distinguish read-only and read-consume access.
* Multi read-only access are also serialized.
*/
#ifdef CONFIG_SMP
static DECLARE_RWSEM(all_cpu_access_lock);
static DEFINE_PER_CPU(struct mutex, cpu_access_lock);
static inline void trace_access_lock(int cpu)
{
if (cpu == TRACE_PIPE_ALL_CPU) {
/* gain it for accessing the whole ring buffer. */
down_write(&all_cpu_access_lock);
} else {
/* gain it for accessing a cpu ring buffer. */
/* Firstly block other trace_access_lock(TRACE_PIPE_ALL_CPU). */
down_read(&all_cpu_access_lock);
/* Secondly block other access to this @cpu ring buffer. */
mutex_lock(&per_cpu(cpu_access_lock, cpu));
}
}
static inline void trace_access_unlock(int cpu)
{
if (cpu == TRACE_PIPE_ALL_CPU) {
up_write(&all_cpu_access_lock);
} else {
mutex_unlock(&per_cpu(cpu_access_lock, cpu));
up_read(&all_cpu_access_lock);
}
}
static inline void trace_access_lock_init(void)
{
int cpu;
for_each_possible_cpu(cpu)
mutex_init(&per_cpu(cpu_access_lock, cpu));
}
#else
static DEFINE_MUTEX(access_lock);
static inline void trace_access_lock(int cpu)
{
(void)cpu;
mutex_lock(&access_lock);
}
static inline void trace_access_unlock(int cpu)
{
(void)cpu;
mutex_unlock(&access_lock);
}
static inline void trace_access_lock_init(void)
{
}
#endif
/* trace_wait is a waitqueue for tasks blocked on trace_poll */ /* trace_wait is a waitqueue for tasks blocked on trace_poll */
static DECLARE_WAIT_QUEUE_HEAD(trace_wait); static DECLARE_WAIT_QUEUE_HEAD(trace_wait);
...@@ -1320,8 +1397,10 @@ int trace_vbprintk(unsigned long ip, const char *fmt, va_list args) ...@@ -1320,8 +1397,10 @@ int trace_vbprintk(unsigned long ip, const char *fmt, va_list args)
entry->fmt = fmt; entry->fmt = fmt;
memcpy(entry->buf, trace_buf, sizeof(u32) * len); memcpy(entry->buf, trace_buf, sizeof(u32) * len);
if (!filter_check_discard(call, entry, buffer, event)) if (!filter_check_discard(call, entry, buffer, event)) {
ring_buffer_unlock_commit(buffer, event); ring_buffer_unlock_commit(buffer, event);
ftrace_trace_stack(buffer, flags, 6, pc);
}
out_unlock: out_unlock:
arch_spin_unlock(&trace_buf_lock); arch_spin_unlock(&trace_buf_lock);
...@@ -1394,8 +1473,10 @@ int trace_array_vprintk(struct trace_array *tr, ...@@ -1394,8 +1473,10 @@ int trace_array_vprintk(struct trace_array *tr,
memcpy(&entry->buf, trace_buf, len); memcpy(&entry->buf, trace_buf, len);
entry->buf[len] = '\0'; entry->buf[len] = '\0';
if (!filter_check_discard(call, entry, buffer, event)) if (!filter_check_discard(call, entry, buffer, event)) {
ring_buffer_unlock_commit(buffer, event); ring_buffer_unlock_commit(buffer, event);
ftrace_trace_stack(buffer, irq_flags, 6, pc);
}
out_unlock: out_unlock:
arch_spin_unlock(&trace_buf_lock); arch_spin_unlock(&trace_buf_lock);
...@@ -1585,12 +1666,6 @@ static void tracing_iter_reset(struct trace_iterator *iter, int cpu) ...@@ -1585,12 +1666,6 @@ static void tracing_iter_reset(struct trace_iterator *iter, int cpu)
} }
/* /*
* No necessary locking here. The worst thing which can
* happen is loosing events consumed at the same time
* by a trace_pipe reader.
* Other than that, we don't risk to crash the ring buffer
* because it serializes the readers.
*
* The current tracer is copied to avoid a global locking * The current tracer is copied to avoid a global locking
* all around. * all around.
*/ */
...@@ -1645,12 +1720,16 @@ static void *s_start(struct seq_file *m, loff_t *pos) ...@@ -1645,12 +1720,16 @@ static void *s_start(struct seq_file *m, loff_t *pos)
} }
trace_event_read_lock(); trace_event_read_lock();
trace_access_lock(cpu_file);
return p; return p;
} }
static void s_stop(struct seq_file *m, void *p) static void s_stop(struct seq_file *m, void *p)
{ {
struct trace_iterator *iter = m->private;
atomic_dec(&trace_record_cmdline_disabled); atomic_dec(&trace_record_cmdline_disabled);
trace_access_unlock(iter->cpu_file);
trace_event_read_unlock(); trace_event_read_unlock();
} }
...@@ -2841,22 +2920,6 @@ static int tracing_open_pipe(struct inode *inode, struct file *filp) ...@@ -2841,22 +2920,6 @@ static int tracing_open_pipe(struct inode *inode, struct file *filp)
mutex_lock(&trace_types_lock); mutex_lock(&trace_types_lock);
/* We only allow one reader per cpu */
if (cpu_file == TRACE_PIPE_ALL_CPU) {
if (!cpumask_empty(tracing_reader_cpumask)) {
ret = -EBUSY;
goto out;
}
cpumask_setall(tracing_reader_cpumask);
} else {
if (!cpumask_test_cpu(cpu_file, tracing_reader_cpumask))
cpumask_set_cpu(cpu_file, tracing_reader_cpumask);
else {
ret = -EBUSY;
goto out;
}
}
/* create a buffer to store the information to pass to userspace */ /* create a buffer to store the information to pass to userspace */
iter = kzalloc(sizeof(*iter), GFP_KERNEL); iter = kzalloc(sizeof(*iter), GFP_KERNEL);
if (!iter) { if (!iter) {
...@@ -2912,12 +2975,6 @@ static int tracing_release_pipe(struct inode *inode, struct file *file) ...@@ -2912,12 +2975,6 @@ static int tracing_release_pipe(struct inode *inode, struct file *file)
mutex_lock(&trace_types_lock); mutex_lock(&trace_types_lock);
if (iter->cpu_file == TRACE_PIPE_ALL_CPU)
cpumask_clear(tracing_reader_cpumask);
else
cpumask_clear_cpu(iter->cpu_file, tracing_reader_cpumask);
if (iter->trace->pipe_close) if (iter->trace->pipe_close)
iter->trace->pipe_close(iter); iter->trace->pipe_close(iter);
...@@ -3079,6 +3136,7 @@ tracing_read_pipe(struct file *filp, char __user *ubuf, ...@@ -3079,6 +3136,7 @@ tracing_read_pipe(struct file *filp, char __user *ubuf,
iter->pos = -1; iter->pos = -1;
trace_event_read_lock(); trace_event_read_lock();
trace_access_lock(iter->cpu_file);
while (find_next_entry_inc(iter) != NULL) { while (find_next_entry_inc(iter) != NULL) {
enum print_line_t ret; enum print_line_t ret;
int len = iter->seq.len; int len = iter->seq.len;
...@@ -3095,6 +3153,7 @@ tracing_read_pipe(struct file *filp, char __user *ubuf, ...@@ -3095,6 +3153,7 @@ tracing_read_pipe(struct file *filp, char __user *ubuf,
if (iter->seq.len >= cnt) if (iter->seq.len >= cnt)
break; break;
} }
trace_access_unlock(iter->cpu_file);
trace_event_read_unlock(); trace_event_read_unlock();
/* Now copy what we have to the user */ /* Now copy what we have to the user */
...@@ -3220,6 +3279,7 @@ static ssize_t tracing_splice_read_pipe(struct file *filp, ...@@ -3220,6 +3279,7 @@ static ssize_t tracing_splice_read_pipe(struct file *filp,
} }
trace_event_read_lock(); trace_event_read_lock();
trace_access_lock(iter->cpu_file);
/* Fill as many pages as possible. */ /* Fill as many pages as possible. */
for (i = 0, rem = len; i < PIPE_BUFFERS && rem; i++) { for (i = 0, rem = len; i < PIPE_BUFFERS && rem; i++) {
...@@ -3243,6 +3303,7 @@ static ssize_t tracing_splice_read_pipe(struct file *filp, ...@@ -3243,6 +3303,7 @@ static ssize_t tracing_splice_read_pipe(struct file *filp,
trace_seq_init(&iter->seq); trace_seq_init(&iter->seq);
} }
trace_access_unlock(iter->cpu_file);
trace_event_read_unlock(); trace_event_read_unlock();
mutex_unlock(&iter->mutex); mutex_unlock(&iter->mutex);
...@@ -3544,10 +3605,12 @@ tracing_buffers_read(struct file *filp, char __user *ubuf, ...@@ -3544,10 +3605,12 @@ tracing_buffers_read(struct file *filp, char __user *ubuf,
info->read = 0; info->read = 0;
trace_access_lock(info->cpu);
ret = ring_buffer_read_page(info->tr->buffer, ret = ring_buffer_read_page(info->tr->buffer,
&info->spare, &info->spare,
count, count,
info->cpu, 0); info->cpu, 0);
trace_access_unlock(info->cpu);
if (ret < 0) if (ret < 0)
return 0; return 0;
...@@ -3675,6 +3738,7 @@ tracing_buffers_splice_read(struct file *file, loff_t *ppos, ...@@ -3675,6 +3738,7 @@ tracing_buffers_splice_read(struct file *file, loff_t *ppos,
len &= PAGE_MASK; len &= PAGE_MASK;
} }
trace_access_lock(info->cpu);
entries = ring_buffer_entries_cpu(info->tr->buffer, info->cpu); entries = ring_buffer_entries_cpu(info->tr->buffer, info->cpu);
for (i = 0; i < PIPE_BUFFERS && len && entries; i++, len -= PAGE_SIZE) { for (i = 0; i < PIPE_BUFFERS && len && entries; i++, len -= PAGE_SIZE) {
...@@ -3722,6 +3786,7 @@ tracing_buffers_splice_read(struct file *file, loff_t *ppos, ...@@ -3722,6 +3786,7 @@ tracing_buffers_splice_read(struct file *file, loff_t *ppos,
entries = ring_buffer_entries_cpu(info->tr->buffer, info->cpu); entries = ring_buffer_entries_cpu(info->tr->buffer, info->cpu);
} }
trace_access_unlock(info->cpu);
spd.nr_pages = i; spd.nr_pages = i;
/* did we read anything? */ /* did we read anything? */
...@@ -4158,6 +4223,8 @@ static __init int tracer_init_debugfs(void) ...@@ -4158,6 +4223,8 @@ static __init int tracer_init_debugfs(void)
struct dentry *d_tracer; struct dentry *d_tracer;
int cpu; int cpu;
trace_access_lock_init();
d_tracer = tracing_init_dentry(); d_tracer = tracing_init_dentry();
trace_create_file("tracing_enabled", 0644, d_tracer, trace_create_file("tracing_enabled", 0644, d_tracer,
...@@ -4392,9 +4459,6 @@ __init static int tracer_alloc_buffers(void) ...@@ -4392,9 +4459,6 @@ __init static int tracer_alloc_buffers(void)
if (!alloc_cpumask_var(&tracing_cpumask, GFP_KERNEL)) if (!alloc_cpumask_var(&tracing_cpumask, GFP_KERNEL))
goto out_free_buffer_mask; goto out_free_buffer_mask;
if (!zalloc_cpumask_var(&tracing_reader_cpumask, GFP_KERNEL))
goto out_free_tracing_cpumask;
/* To save memory, keep the ring buffer size to its minimum */ /* To save memory, keep the ring buffer size to its minimum */
if (ring_buffer_expanded) if (ring_buffer_expanded)
ring_buf_size = trace_buf_size; ring_buf_size = trace_buf_size;
...@@ -4452,8 +4516,6 @@ __init static int tracer_alloc_buffers(void) ...@@ -4452,8 +4516,6 @@ __init static int tracer_alloc_buffers(void)
return 0; return 0;
out_free_cpumask: out_free_cpumask:
free_cpumask_var(tracing_reader_cpumask);
out_free_tracing_cpumask:
free_cpumask_var(tracing_cpumask); free_cpumask_var(tracing_cpumask);
out_free_buffer_mask: out_free_buffer_mask:
free_cpumask_var(tracing_buffer_mask); free_cpumask_var(tracing_buffer_mask);
......
...@@ -497,6 +497,7 @@ trace_print_graph_duration(unsigned long long duration, struct trace_seq *s); ...@@ -497,6 +497,7 @@ trace_print_graph_duration(unsigned long long duration, struct trace_seq *s);
#ifdef CONFIG_DYNAMIC_FTRACE #ifdef CONFIG_DYNAMIC_FTRACE
/* TODO: make this variable */ /* TODO: make this variable */
#define FTRACE_GRAPH_MAX_FUNCS 32 #define FTRACE_GRAPH_MAX_FUNCS 32
extern int ftrace_graph_filter_enabled;
extern int ftrace_graph_count; extern int ftrace_graph_count;
extern unsigned long ftrace_graph_funcs[FTRACE_GRAPH_MAX_FUNCS]; extern unsigned long ftrace_graph_funcs[FTRACE_GRAPH_MAX_FUNCS];
...@@ -504,7 +505,7 @@ static inline int ftrace_graph_addr(unsigned long addr) ...@@ -504,7 +505,7 @@ static inline int ftrace_graph_addr(unsigned long addr)
{ {
int i; int i;
if (!ftrace_graph_count || test_tsk_trace_graph(current)) if (!ftrace_graph_filter_enabled)
return 1; return 1;
for (i = 0; i < ftrace_graph_count; i++) { for (i = 0; i < ftrace_graph_count; i++) {
...@@ -791,7 +792,8 @@ extern const char *__stop___trace_bprintk_fmt[]; ...@@ -791,7 +792,8 @@ extern const char *__stop___trace_bprintk_fmt[];
#undef FTRACE_ENTRY #undef FTRACE_ENTRY
#define FTRACE_ENTRY(call, struct_name, id, tstruct, print) \ #define FTRACE_ENTRY(call, struct_name, id, tstruct, print) \
extern struct ftrace_event_call event_##call; extern struct ftrace_event_call \
__attribute__((__aligned__(4))) event_##call;
#undef FTRACE_ENTRY_DUP #undef FTRACE_ENTRY_DUP
#define FTRACE_ENTRY_DUP(call, struct_name, id, tstruct, print) \ #define FTRACE_ENTRY_DUP(call, struct_name, id, tstruct, print) \
FTRACE_ENTRY(call, struct_name, id, PARAMS(tstruct), PARAMS(print)) FTRACE_ENTRY(call, struct_name, id, PARAMS(tstruct), PARAMS(print))
......
...@@ -307,7 +307,22 @@ static int annotated_branch_stat_cmp(void *p1, void *p2) ...@@ -307,7 +307,22 @@ static int annotated_branch_stat_cmp(void *p1, void *p2)
return -1; return -1;
if (percent_a > percent_b) if (percent_a > percent_b)
return 1; return 1;
else
if (a->incorrect < b->incorrect)
return -1;
if (a->incorrect > b->incorrect)
return 1;
/*
* Since the above shows worse (incorrect) cases
* first, we continue that by showing best (correct)
* cases last.
*/
if (a->correct > b->correct)
return -1;
if (a->correct < b->correct)
return 1;
return 0; return 0;
} }
......
...@@ -60,10 +60,8 @@ int trace_define_field(struct ftrace_event_call *call, const char *type, ...@@ -60,10 +60,8 @@ int trace_define_field(struct ftrace_event_call *call, const char *type,
return 0; return 0;
err: err:
if (field) { if (field)
kfree(field->name); kfree(field->name);
kfree(field->type);
}
kfree(field); kfree(field);
return -ENOMEM; return -ENOMEM;
...@@ -520,41 +518,16 @@ system_enable_write(struct file *filp, const char __user *ubuf, size_t cnt, ...@@ -520,41 +518,16 @@ system_enable_write(struct file *filp, const char __user *ubuf, size_t cnt,
return ret; return ret;
} }
extern char *__bad_type_size(void);
#undef FIELD
#define FIELD(type, name) \
sizeof(type) != sizeof(field.name) ? __bad_type_size() : \
#type, "common_" #name, offsetof(typeof(field), name), \
sizeof(field.name), is_signed_type(type)
static int trace_write_header(struct trace_seq *s)
{
struct trace_entry field;
/* struct trace_entry */
return trace_seq_printf(s,
"\tfield:%s %s;\toffset:%zu;\tsize:%zu;\tsigned:%u;\n"
"\tfield:%s %s;\toffset:%zu;\tsize:%zu;\tsigned:%u;\n"
"\tfield:%s %s;\toffset:%zu;\tsize:%zu;\tsigned:%u;\n"
"\tfield:%s %s;\toffset:%zu;\tsize:%zu;\tsigned:%u;\n"
"\tfield:%s %s;\toffset:%zu;\tsize:%zu;\tsigned:%u;\n"
"\n",
FIELD(unsigned short, type),
FIELD(unsigned char, flags),
FIELD(unsigned char, preempt_count),
FIELD(int, pid),
FIELD(int, lock_depth));
}
static ssize_t static ssize_t
event_format_read(struct file *filp, char __user *ubuf, size_t cnt, event_format_read(struct file *filp, char __user *ubuf, size_t cnt,
loff_t *ppos) loff_t *ppos)
{ {
struct ftrace_event_call *call = filp->private_data; struct ftrace_event_call *call = filp->private_data;
struct ftrace_event_field *field;
struct trace_seq *s; struct trace_seq *s;
int common_field_count = 5;
char *buf; char *buf;
int r; int r = 0;
if (*ppos) if (*ppos)
return 0; return 0;
...@@ -565,14 +538,48 @@ event_format_read(struct file *filp, char __user *ubuf, size_t cnt, ...@@ -565,14 +538,48 @@ event_format_read(struct file *filp, char __user *ubuf, size_t cnt,
trace_seq_init(s); trace_seq_init(s);
/* If any of the first writes fail, so will the show_format. */
trace_seq_printf(s, "name: %s\n", call->name); trace_seq_printf(s, "name: %s\n", call->name);
trace_seq_printf(s, "ID: %d\n", call->id); trace_seq_printf(s, "ID: %d\n", call->id);
trace_seq_printf(s, "format:\n"); trace_seq_printf(s, "format:\n");
trace_write_header(s);
r = call->show_format(call, s); list_for_each_entry_reverse(field, &call->fields, link) {
/*
* Smartly shows the array type(except dynamic array).
* Normal:
* field:TYPE VAR
* If TYPE := TYPE[LEN], it is shown:
* field:TYPE VAR[LEN]
*/
const char *array_descriptor = strchr(field->type, '[');
if (!strncmp(field->type, "__data_loc", 10))
array_descriptor = NULL;
if (!array_descriptor) {
r = trace_seq_printf(s, "\tfield:%s %s;\toffset:%u;"
"\tsize:%u;\tsigned:%d;\n",
field->type, field->name, field->offset,
field->size, !!field->is_signed);
} else {
r = trace_seq_printf(s, "\tfield:%.*s %s%s;\toffset:%u;"
"\tsize:%u;\tsigned:%d;\n",
(int)(array_descriptor - field->type),
field->type, field->name,
array_descriptor, field->offset,
field->size, !!field->is_signed);
}
if (--common_field_count == 0)
r = trace_seq_printf(s, "\n");
if (!r)
break;
}
if (r)
r = trace_seq_printf(s, "\nprint fmt: %s\n",
call->print_fmt);
if (!r) { if (!r) {
/* /*
* ug! The format output is bigger than a PAGE!! * ug! The format output is bigger than a PAGE!!
...@@ -948,10 +955,6 @@ event_create_dir(struct ftrace_event_call *call, struct dentry *d_events, ...@@ -948,10 +955,6 @@ event_create_dir(struct ftrace_event_call *call, struct dentry *d_events,
filter); filter);
} }
/* A trace may not want to export its format */
if (!call->show_format)
return 0;
trace_create_file("format", 0444, call->dir, call, trace_create_file("format", 0444, call->dir, call,
format); format);
......
...@@ -62,78 +62,6 @@ static void __always_unused ____ftrace_check_##name(void) \ ...@@ -62,78 +62,6 @@ static void __always_unused ____ftrace_check_##name(void) \
#include "trace_entries.h" #include "trace_entries.h"
#undef __field
#define __field(type, item) \
ret = trace_seq_printf(s, "\tfield:" #type " " #item ";\t" \
"offset:%zu;\tsize:%zu;\tsigned:%u;\n", \
offsetof(typeof(field), item), \
sizeof(field.item), is_signed_type(type)); \
if (!ret) \
return 0;
#undef __field_desc
#define __field_desc(type, container, item) \
ret = trace_seq_printf(s, "\tfield:" #type " " #item ";\t" \
"offset:%zu;\tsize:%zu;\tsigned:%u;\n", \
offsetof(typeof(field), container.item), \
sizeof(field.container.item), \
is_signed_type(type)); \
if (!ret) \
return 0;
#undef __array
#define __array(type, item, len) \
ret = trace_seq_printf(s, "\tfield:" #type " " #item "[" #len "];\t" \
"offset:%zu;\tsize:%zu;\tsigned:%u;\n", \
offsetof(typeof(field), item), \
sizeof(field.item), is_signed_type(type)); \
if (!ret) \
return 0;
#undef __array_desc
#define __array_desc(type, container, item, len) \
ret = trace_seq_printf(s, "\tfield:" #type " " #item "[" #len "];\t" \
"offset:%zu;\tsize:%zu;\tsigned:%u;\n", \
offsetof(typeof(field), container.item), \
sizeof(field.container.item), \
is_signed_type(type)); \
if (!ret) \
return 0;
#undef __dynamic_array
#define __dynamic_array(type, item) \
ret = trace_seq_printf(s, "\tfield:" #type " " #item ";\t" \
"offset:%zu;\tsize:0;\tsigned:%u;\n", \
offsetof(typeof(field), item), \
is_signed_type(type)); \
if (!ret) \
return 0;
#undef F_printk
#define F_printk(fmt, args...) "%s, %s\n", #fmt, __stringify(args)
#undef __entry
#define __entry REC
#undef FTRACE_ENTRY
#define FTRACE_ENTRY(name, struct_name, id, tstruct, print) \
static int \
ftrace_format_##name(struct ftrace_event_call *unused, \
struct trace_seq *s) \
{ \
struct struct_name field __attribute__((unused)); \
int ret = 0; \
\
tstruct; \
\
trace_seq_printf(s, "\nprint fmt: " print); \
\
return ret; \
}
#include "trace_entries.h"
#undef __field #undef __field
#define __field(type, item) \ #define __field(type, item) \
ret = trace_define_field(event_call, #type, #item, \ ret = trace_define_field(event_call, #type, #item, \
...@@ -175,7 +103,12 @@ ftrace_format_##name(struct ftrace_event_call *unused, \ ...@@ -175,7 +103,12 @@ ftrace_format_##name(struct ftrace_event_call *unused, \
return ret; return ret;
#undef __dynamic_array #undef __dynamic_array
#define __dynamic_array(type, item) #define __dynamic_array(type, item) \
ret = trace_define_field(event_call, #type, #item, \
offsetof(typeof(field), item), \
0, is_signed_type(type), FILTER_OTHER);\
if (ret) \
return ret;
#undef FTRACE_ENTRY #undef FTRACE_ENTRY
#define FTRACE_ENTRY(name, struct_name, id, tstruct, print) \ #define FTRACE_ENTRY(name, struct_name, id, tstruct, print) \
...@@ -198,6 +131,9 @@ static int ftrace_raw_init_event(struct ftrace_event_call *call) ...@@ -198,6 +131,9 @@ static int ftrace_raw_init_event(struct ftrace_event_call *call)
return 0; return 0;
} }
#undef __entry
#define __entry REC
#undef __field #undef __field
#define __field(type, item) #define __field(type, item)
...@@ -213,6 +149,9 @@ static int ftrace_raw_init_event(struct ftrace_event_call *call) ...@@ -213,6 +149,9 @@ static int ftrace_raw_init_event(struct ftrace_event_call *call)
#undef __dynamic_array #undef __dynamic_array
#define __dynamic_array(type, item) #define __dynamic_array(type, item)
#undef F_printk
#define F_printk(fmt, args...) #fmt ", " __stringify(args)
#undef FTRACE_ENTRY #undef FTRACE_ENTRY
#define FTRACE_ENTRY(call, struct_name, type, tstruct, print) \ #define FTRACE_ENTRY(call, struct_name, type, tstruct, print) \
\ \
...@@ -223,7 +162,7 @@ __attribute__((section("_ftrace_events"))) event_##call = { \ ...@@ -223,7 +162,7 @@ __attribute__((section("_ftrace_events"))) event_##call = { \
.id = type, \ .id = type, \
.system = __stringify(TRACE_SYSTEM), \ .system = __stringify(TRACE_SYSTEM), \
.raw_init = ftrace_raw_init_event, \ .raw_init = ftrace_raw_init_event, \
.show_format = ftrace_format_##call, \ .print_fmt = print, \
.define_fields = ftrace_define_fields_##call, \ .define_fields = ftrace_define_fields_##call, \
}; \ }; \
......
...@@ -18,6 +18,7 @@ struct fgraph_cpu_data { ...@@ -18,6 +18,7 @@ struct fgraph_cpu_data {
pid_t last_pid; pid_t last_pid;
int depth; int depth;
int ignore; int ignore;
unsigned long enter_funcs[FTRACE_RETFUNC_DEPTH];
}; };
struct fgraph_data { struct fgraph_data {
...@@ -212,13 +213,11 @@ int trace_graph_entry(struct ftrace_graph_ent *trace) ...@@ -212,13 +213,11 @@ int trace_graph_entry(struct ftrace_graph_ent *trace)
int cpu; int cpu;
int pc; int pc;
if (unlikely(!tr))
return 0;
if (!ftrace_trace_task(current)) if (!ftrace_trace_task(current))
return 0; return 0;
if (!ftrace_graph_addr(trace->func)) /* trace it when it is-nested-in or is a function enabled. */
if (!(trace->depth || ftrace_graph_addr(trace->func)))
return 0; return 0;
local_irq_save(flags); local_irq_save(flags);
...@@ -231,9 +230,6 @@ int trace_graph_entry(struct ftrace_graph_ent *trace) ...@@ -231,9 +230,6 @@ int trace_graph_entry(struct ftrace_graph_ent *trace)
} else { } else {
ret = 0; ret = 0;
} }
/* Only do the atomic if it is not already set */
if (!test_tsk_trace_graph(current))
set_tsk_trace_graph(current);
atomic_dec(&data->disabled); atomic_dec(&data->disabled);
local_irq_restore(flags); local_irq_restore(flags);
...@@ -281,17 +277,24 @@ void trace_graph_return(struct ftrace_graph_ret *trace) ...@@ -281,17 +277,24 @@ void trace_graph_return(struct ftrace_graph_ret *trace)
pc = preempt_count(); pc = preempt_count();
__trace_graph_return(tr, trace, flags, pc); __trace_graph_return(tr, trace, flags, pc);
} }
if (!trace->depth)
clear_tsk_trace_graph(current);
atomic_dec(&data->disabled); atomic_dec(&data->disabled);
local_irq_restore(flags); local_irq_restore(flags);
} }
void set_graph_array(struct trace_array *tr)
{
graph_array = tr;
/* Make graph_array visible before we start tracing */
smp_mb();
}
static int graph_trace_init(struct trace_array *tr) static int graph_trace_init(struct trace_array *tr)
{ {
int ret; int ret;
graph_array = tr; set_graph_array(tr);
ret = register_ftrace_graph(&trace_graph_return, ret = register_ftrace_graph(&trace_graph_return,
&trace_graph_entry); &trace_graph_entry);
if (ret) if (ret)
...@@ -301,11 +304,6 @@ static int graph_trace_init(struct trace_array *tr) ...@@ -301,11 +304,6 @@ static int graph_trace_init(struct trace_array *tr)
return 0; return 0;
} }
void set_graph_array(struct trace_array *tr)
{
graph_array = tr;
}
static void graph_trace_reset(struct trace_array *tr) static void graph_trace_reset(struct trace_array *tr)
{ {
tracing_stop_cmdline_record(); tracing_stop_cmdline_record();
...@@ -673,15 +671,21 @@ print_graph_entry_leaf(struct trace_iterator *iter, ...@@ -673,15 +671,21 @@ print_graph_entry_leaf(struct trace_iterator *iter,
duration = graph_ret->rettime - graph_ret->calltime; duration = graph_ret->rettime - graph_ret->calltime;
if (data) { if (data) {
struct fgraph_cpu_data *cpu_data;
int cpu = iter->cpu; int cpu = iter->cpu;
int *depth = &(per_cpu_ptr(data->cpu_data, cpu)->depth);
cpu_data = per_cpu_ptr(data->cpu_data, cpu);
/* /*
* Comments display at + 1 to depth. Since * Comments display at + 1 to depth. Since
* this is a leaf function, keep the comments * this is a leaf function, keep the comments
* equal to this depth. * equal to this depth.
*/ */
*depth = call->depth - 1; cpu_data->depth = call->depth - 1;
/* No need to keep this function around for this depth */
if (call->depth < FTRACE_RETFUNC_DEPTH)
cpu_data->enter_funcs[call->depth] = 0;
} }
/* Overhead */ /* Overhead */
...@@ -721,10 +725,15 @@ print_graph_entry_nested(struct trace_iterator *iter, ...@@ -721,10 +725,15 @@ print_graph_entry_nested(struct trace_iterator *iter,
int i; int i;
if (data) { if (data) {
struct fgraph_cpu_data *cpu_data;
int cpu = iter->cpu; int cpu = iter->cpu;
int *depth = &(per_cpu_ptr(data->cpu_data, cpu)->depth);
*depth = call->depth; cpu_data = per_cpu_ptr(data->cpu_data, cpu);
cpu_data->depth = call->depth;
/* Save this function pointer to see if the exit matches */
if (call->depth < FTRACE_RETFUNC_DEPTH)
cpu_data->enter_funcs[call->depth] = call->func;
} }
/* No overhead */ /* No overhead */
...@@ -854,19 +863,28 @@ print_graph_return(struct ftrace_graph_ret *trace, struct trace_seq *s, ...@@ -854,19 +863,28 @@ print_graph_return(struct ftrace_graph_ret *trace, struct trace_seq *s,
struct fgraph_data *data = iter->private; struct fgraph_data *data = iter->private;
pid_t pid = ent->pid; pid_t pid = ent->pid;
int cpu = iter->cpu; int cpu = iter->cpu;
int func_match = 1;
int ret; int ret;
int i; int i;
if (data) { if (data) {
struct fgraph_cpu_data *cpu_data;
int cpu = iter->cpu; int cpu = iter->cpu;
int *depth = &(per_cpu_ptr(data->cpu_data, cpu)->depth);
cpu_data = per_cpu_ptr(data->cpu_data, cpu);
/* /*
* Comments display at + 1 to depth. This is the * Comments display at + 1 to depth. This is the
* return from a function, we now want the comments * return from a function, we now want the comments
* to display at the same level of the bracket. * to display at the same level of the bracket.
*/ */
*depth = trace->depth - 1; cpu_data->depth = trace->depth - 1;
if (trace->depth < FTRACE_RETFUNC_DEPTH) {
if (cpu_data->enter_funcs[trace->depth] != trace->func)
func_match = 0;
cpu_data->enter_funcs[trace->depth] = 0;
}
} }
if (print_graph_prologue(iter, s, 0, 0)) if (print_graph_prologue(iter, s, 0, 0))
...@@ -891,9 +909,21 @@ print_graph_return(struct ftrace_graph_ret *trace, struct trace_seq *s, ...@@ -891,9 +909,21 @@ print_graph_return(struct ftrace_graph_ret *trace, struct trace_seq *s,
return TRACE_TYPE_PARTIAL_LINE; return TRACE_TYPE_PARTIAL_LINE;
} }
/*
* If the return function does not have a matching entry,
* then the entry was lost. Instead of just printing
* the '}' and letting the user guess what function this
* belongs to, write out the function name.
*/
if (func_match) {
ret = trace_seq_printf(s, "}\n"); ret = trace_seq_printf(s, "}\n");
if (!ret) if (!ret)
return TRACE_TYPE_PARTIAL_LINE; return TRACE_TYPE_PARTIAL_LINE;
} else {
ret = trace_seq_printf(s, "} (%ps)\n", (void *)trace->func);
if (!ret)
return TRACE_TYPE_PARTIAL_LINE;
}
/* Overrun */ /* Overrun */
if (tracer_flags.val & TRACE_GRAPH_PRINT_OVERRUN) { if (tracer_flags.val & TRACE_GRAPH_PRINT_OVERRUN) {
......
...@@ -651,12 +651,12 @@ static int create_trace_probe(int argc, char **argv) ...@@ -651,12 +651,12 @@ static int create_trace_probe(int argc, char **argv)
event = strchr(group, '/') + 1; event = strchr(group, '/') + 1;
event[-1] = '\0'; event[-1] = '\0';
if (strlen(group) == 0) { if (strlen(group) == 0) {
pr_info("Group name is not specifiled\n"); pr_info("Group name is not specified\n");
return -EINVAL; return -EINVAL;
} }
} }
if (strlen(event) == 0) { if (strlen(event) == 0) {
pr_info("Event name is not specifiled\n"); pr_info("Event name is not specified\n");
return -EINVAL; return -EINVAL;
} }
} }
...@@ -1174,80 +1174,60 @@ static int kretprobe_event_define_fields(struct ftrace_event_call *event_call) ...@@ -1174,80 +1174,60 @@ static int kretprobe_event_define_fields(struct ftrace_event_call *event_call)
return 0; return 0;
} }
static int __probe_event_show_format(struct trace_seq *s, static int __set_print_fmt(struct trace_probe *tp, char *buf, int len)
struct trace_probe *tp, const char *fmt,
const char *arg)
{ {
int i; int i;
int pos = 0;
/* Show format */ const char *fmt, *arg;
if (!trace_seq_printf(s, "\nprint fmt: \"%s", fmt))
return 0;
for (i = 0; i < tp->nr_args; i++) if (!probe_is_return(tp)) {
if (!trace_seq_printf(s, " %s=%%lx", tp->args[i].name)) fmt = "(%lx)";
return 0; arg = "REC->" FIELD_STRING_IP;
} else {
fmt = "(%lx <- %lx)";
arg = "REC->" FIELD_STRING_FUNC ", REC->" FIELD_STRING_RETIP;
}
if (!trace_seq_printf(s, "\", %s", arg)) /* When len=0, we just calculate the needed length */
return 0; #define LEN_OR_ZERO (len ? len - pos : 0)
for (i = 0; i < tp->nr_args; i++) pos += snprintf(buf + pos, LEN_OR_ZERO, "\"%s", fmt);
if (!trace_seq_printf(s, ", REC->%s", tp->args[i].name))
return 0;
return trace_seq_puts(s, "\n"); for (i = 0; i < tp->nr_args; i++) {
} pos += snprintf(buf + pos, LEN_OR_ZERO, " %s=%%lx",
tp->args[i].name);
}
#undef SHOW_FIELD pos += snprintf(buf + pos, LEN_OR_ZERO, "\", %s", arg);
#define SHOW_FIELD(type, item, name) \
do { \
ret = trace_seq_printf(s, "\tfield:" #type " %s;\t" \
"offset:%u;\tsize:%u;\tsigned:%d;\n", name,\
(unsigned int)offsetof(typeof(field), item),\
(unsigned int)sizeof(type), \
is_signed_type(type)); \
if (!ret) \
return 0; \
} while (0)
static int kprobe_event_show_format(struct ftrace_event_call *call, for (i = 0; i < tp->nr_args; i++) {
struct trace_seq *s) pos += snprintf(buf + pos, LEN_OR_ZERO, ", REC->%s",
{ tp->args[i].name);
struct kprobe_trace_entry field __attribute__((unused)); }
int ret, i;
struct trace_probe *tp = (struct trace_probe *)call->data;
SHOW_FIELD(unsigned long, ip, FIELD_STRING_IP);
SHOW_FIELD(int, nargs, FIELD_STRING_NARGS);
/* Show fields */ #undef LEN_OR_ZERO
for (i = 0; i < tp->nr_args; i++)
SHOW_FIELD(unsigned long, args[i], tp->args[i].name);
trace_seq_puts(s, "\n");
return __probe_event_show_format(s, tp, "(%lx)", /* return the length of print_fmt */
"REC->" FIELD_STRING_IP); return pos;
} }
static int kretprobe_event_show_format(struct ftrace_event_call *call, static int set_print_fmt(struct trace_probe *tp)
struct trace_seq *s)
{ {
struct kretprobe_trace_entry field __attribute__((unused)); int len;
int ret, i; char *print_fmt;
struct trace_probe *tp = (struct trace_probe *)call->data;
SHOW_FIELD(unsigned long, func, FIELD_STRING_FUNC); /* First: called with 0 length to calculate the needed length */
SHOW_FIELD(unsigned long, ret_ip, FIELD_STRING_RETIP); len = __set_print_fmt(tp, NULL, 0);
SHOW_FIELD(int, nargs, FIELD_STRING_NARGS); print_fmt = kmalloc(len + 1, GFP_KERNEL);
if (!print_fmt)
return -ENOMEM;
/* Show fields */ /* Second: actually write the @print_fmt */
for (i = 0; i < tp->nr_args; i++) __set_print_fmt(tp, print_fmt, len + 1);
SHOW_FIELD(unsigned long, args[i], tp->args[i].name); tp->call.print_fmt = print_fmt;
trace_seq_puts(s, "\n");
return __probe_event_show_format(s, tp, "(%lx <- %lx)", return 0;
"REC->" FIELD_STRING_FUNC
", REC->" FIELD_STRING_RETIP);
} }
#ifdef CONFIG_EVENT_PROFILE #ifdef CONFIG_EVENT_PROFILE
...@@ -1448,18 +1428,20 @@ static int register_probe_event(struct trace_probe *tp) ...@@ -1448,18 +1428,20 @@ static int register_probe_event(struct trace_probe *tp)
if (probe_is_return(tp)) { if (probe_is_return(tp)) {
tp->event.trace = print_kretprobe_event; tp->event.trace = print_kretprobe_event;
call->raw_init = probe_event_raw_init; call->raw_init = probe_event_raw_init;
call->show_format = kretprobe_event_show_format;
call->define_fields = kretprobe_event_define_fields; call->define_fields = kretprobe_event_define_fields;
} else { } else {
tp->event.trace = print_kprobe_event; tp->event.trace = print_kprobe_event;
call->raw_init = probe_event_raw_init; call->raw_init = probe_event_raw_init;
call->show_format = kprobe_event_show_format;
call->define_fields = kprobe_event_define_fields; call->define_fields = kprobe_event_define_fields;
} }
if (set_print_fmt(tp) < 0)
return -ENOMEM;
call->event = &tp->event; call->event = &tp->event;
call->id = register_ftrace_event(&tp->event); call->id = register_ftrace_event(&tp->event);
if (!call->id) if (!call->id) {
kfree(call->print_fmt);
return -ENODEV; return -ENODEV;
}
call->enabled = 0; call->enabled = 0;
call->regfunc = probe_event_enable; call->regfunc = probe_event_enable;
call->unregfunc = probe_event_disable; call->unregfunc = probe_event_disable;
...@@ -1472,6 +1454,7 @@ static int register_probe_event(struct trace_probe *tp) ...@@ -1472,6 +1454,7 @@ static int register_probe_event(struct trace_probe *tp)
ret = trace_add_event_call(call); ret = trace_add_event_call(call);
if (ret) { if (ret) {
pr_info("Failed to register kprobe event: %s\n", call->name); pr_info("Failed to register kprobe event: %s\n", call->name);
kfree(call->print_fmt);
unregister_ftrace_event(&tp->event); unregister_ftrace_event(&tp->event);
} }
return ret; return ret;
...@@ -1481,6 +1464,7 @@ static void unregister_probe_event(struct trace_probe *tp) ...@@ -1481,6 +1464,7 @@ static void unregister_probe_event(struct trace_probe *tp)
{ {
/* tp->event is unregistered in trace_remove_event_call() */ /* tp->event is unregistered in trace_remove_event_call() */
trace_remove_event_call(&tp->call); trace_remove_event_call(&tp->call);
kfree(tp->call.print_fmt);
} }
/* Make a debugfs interface for controling probe points */ /* Make a debugfs interface for controling probe points */
......
...@@ -143,70 +143,65 @@ extern char *__bad_type_size(void); ...@@ -143,70 +143,65 @@ extern char *__bad_type_size(void);
#type, #name, offsetof(typeof(trace), name), \ #type, #name, offsetof(typeof(trace), name), \
sizeof(trace.name), is_signed_type(type) sizeof(trace.name), is_signed_type(type)
int syscall_enter_format(struct ftrace_event_call *call, struct trace_seq *s) static
int __set_enter_print_fmt(struct syscall_metadata *entry, char *buf, int len)
{ {
int i; int i;
int ret; int pos = 0;
struct syscall_metadata *entry = call->data;
struct syscall_trace_enter trace;
int offset = offsetof(struct syscall_trace_enter, args);
ret = trace_seq_printf(s, "\tfield:%s %s;\toffset:%zu;\tsize:%zu;"
"\tsigned:%u;\n",
SYSCALL_FIELD(int, nr));
if (!ret)
return 0;
for (i = 0; i < entry->nb_args; i++) { /* When len=0, we just calculate the needed length */
ret = trace_seq_printf(s, "\tfield:%s %s;", entry->types[i], #define LEN_OR_ZERO (len ? len - pos : 0)
entry->args[i]);
if (!ret)
return 0;
ret = trace_seq_printf(s, "\toffset:%d;\tsize:%zu;"
"\tsigned:%u;\n", offset,
sizeof(unsigned long),
is_signed_type(unsigned long));
if (!ret)
return 0;
offset += sizeof(unsigned long);
}
trace_seq_puts(s, "\nprint fmt: \""); pos += snprintf(buf + pos, LEN_OR_ZERO, "\"");
for (i = 0; i < entry->nb_args; i++) { for (i = 0; i < entry->nb_args; i++) {
ret = trace_seq_printf(s, "%s: 0x%%0%zulx%s", entry->args[i], pos += snprintf(buf + pos, LEN_OR_ZERO, "%s: 0x%%0%zulx%s",
sizeof(unsigned long), entry->args[i], sizeof(unsigned long),
i == entry->nb_args - 1 ? "" : ", "); i == entry->nb_args - 1 ? "" : ", ");
if (!ret)
return 0;
} }
trace_seq_putc(s, '"'); pos += snprintf(buf + pos, LEN_OR_ZERO, "\"");
for (i = 0; i < entry->nb_args; i++) { for (i = 0; i < entry->nb_args; i++) {
ret = trace_seq_printf(s, ", ((unsigned long)(REC->%s))", pos += snprintf(buf + pos, LEN_OR_ZERO,
entry->args[i]); ", ((unsigned long)(REC->%s))", entry->args[i]);
if (!ret)
return 0;
} }
return trace_seq_putc(s, '\n'); #undef LEN_OR_ZERO
/* return the length of print_fmt */
return pos;
} }
int syscall_exit_format(struct ftrace_event_call *call, struct trace_seq *s) static int set_syscall_print_fmt(struct ftrace_event_call *call)
{ {
int ret; char *print_fmt;
struct syscall_trace_exit trace; int len;
struct syscall_metadata *entry = call->data;
ret = trace_seq_printf(s, if (entry->enter_event != call) {
"\tfield:%s %s;\toffset:%zu;\tsize:%zu;" call->print_fmt = "\"0x%lx\", REC->ret";
"\tsigned:%u;\n"
"\tfield:%s %s;\toffset:%zu;\tsize:%zu;"
"\tsigned:%u;\n",
SYSCALL_FIELD(int, nr),
SYSCALL_FIELD(long, ret));
if (!ret)
return 0; return 0;
}
/* First: called with 0 length to calculate the needed length */
len = __set_enter_print_fmt(entry, NULL, 0);
print_fmt = kmalloc(len + 1, GFP_KERNEL);
if (!print_fmt)
return -ENOMEM;
/* Second: actually write the @print_fmt */
__set_enter_print_fmt(entry, print_fmt, len + 1);
call->print_fmt = print_fmt;
return 0;
}
static void free_syscall_print_fmt(struct ftrace_event_call *call)
{
struct syscall_metadata *entry = call->data;
return trace_seq_printf(s, "\nprint fmt: \"0x%%lx\", REC->ret\n"); if (entry->enter_event == call)
kfree(call->print_fmt);
} }
int syscall_enter_define_fields(struct ftrace_event_call *call) int syscall_enter_define_fields(struct ftrace_event_call *call)
...@@ -386,12 +381,22 @@ int init_syscall_trace(struct ftrace_event_call *call) ...@@ -386,12 +381,22 @@ int init_syscall_trace(struct ftrace_event_call *call)
{ {
int id; int id;
id = register_ftrace_event(call->event); if (set_syscall_print_fmt(call) < 0)
if (!id) return -ENOMEM;
return -ENODEV;
call->id = id; id = trace_event_raw_init(call);
INIT_LIST_HEAD(&call->fields);
return 0; if (id < 0) {
free_syscall_print_fmt(call);
return id;
}
return id;
}
unsigned long __init arch_syscall_addr(int nr)
{
return (unsigned long)sys_call_table[nr];
} }
int __init init_ftrace_syscalls(void) int __init init_ftrace_syscalls(void)
...@@ -603,7 +608,7 @@ int prof_sysexit_enable(struct ftrace_event_call *call) ...@@ -603,7 +608,7 @@ int prof_sysexit_enable(struct ftrace_event_call *call)
ret = register_trace_sys_exit(prof_syscall_exit); ret = register_trace_sys_exit(prof_syscall_exit);
if (ret) { if (ret) {
pr_info("event trace: Could not activate" pr_info("event trace: Could not activate"
"syscall entry trace point"); "syscall exit trace point");
} else { } else {
set_bit(num, enabled_prof_exit_syscalls); set_bit(num, enabled_prof_exit_syscalls);
sys_prof_refcount_exit++; sys_prof_refcount_exit++;
......
...@@ -136,13 +136,14 @@ my %text_sections = ( ...@@ -136,13 +136,14 @@ my %text_sections = (
".text.unlikely" => 1, ".text.unlikely" => 1,
); );
$objdump = "objdump" if ((length $objdump) == 0); # Note: we are nice to C-programmers here, thus we skip the '||='-idiom.
$objcopy = "objcopy" if ((length $objcopy) == 0); $objdump = 'objdump' if (!$objdump);
$cc = "gcc" if ((length $cc) == 0); $objcopy = 'objcopy' if (!$objcopy);
$ld = "ld" if ((length $ld) == 0); $cc = 'gcc' if (!$cc);
$nm = "nm" if ((length $nm) == 0); $ld = 'ld' if (!$ld);
$rm = "rm" if ((length $rm) == 0); $nm = 'nm' if (!$nm);
$mv = "mv" if ((length $mv) == 0); $rm = 'rm' if (!$rm);
$mv = 'mv' if (!$mv);
#print STDERR "running: $P '$arch' '$objdump' '$objcopy' '$cc' '$ld' " . #print STDERR "running: $P '$arch' '$objdump' '$objcopy' '$cc' '$ld' " .
# "'$nm' '$rm' '$mv' '$inputfile'\n"; # "'$nm' '$rm' '$mv' '$inputfile'\n";
...@@ -432,14 +433,14 @@ sub update_funcs ...@@ -432,14 +433,14 @@ sub update_funcs
# Loop through all the mcount caller offsets and print a reference # Loop through all the mcount caller offsets and print a reference
# to the caller based from the ref_func. # to the caller based from the ref_func.
for (my $i=0; $i <= $#offsets; $i++) {
if (!$opened) { if (!$opened) {
open(FILE, ">$mcount_s") || die "can't create $mcount_s\n"; open(FILE, ">$mcount_s") || die "can't create $mcount_s\n";
$opened = 1; $opened = 1;
print FILE "\t.section $mcount_section,\"a\",$section_type\n"; print FILE "\t.section $mcount_section,\"a\",$section_type\n";
print FILE "\t.align $alignment\n" if (defined($alignment)); print FILE "\t.align $alignment\n" if (defined($alignment));
} }
printf FILE "\t%s %s + %d\n", $type, $ref_func, $offsets[$i] - $offset; foreach my $cur_offset (@offsets) {
printf FILE "\t%s %s + %d\n", $type, $ref_func, $cur_offset - $offset;
} }
} }
...@@ -476,11 +477,7 @@ while (<IN>) { ...@@ -476,11 +477,7 @@ while (<IN>) {
$read_headers = 0; $read_headers = 0;
# Only record text sections that we know are safe # Only record text sections that we know are safe
if (defined($text_sections{$1})) { $read_function = defined($text_sections{$1});
$read_function = 1;
} else {
$read_function = 0;
}
# print out any recorded offsets # print out any recorded offsets
update_funcs(); update_funcs();
...@@ -514,7 +511,7 @@ while (<IN>) { ...@@ -514,7 +511,7 @@ while (<IN>) {
} }
# is this a call site to mcount? If so, record it to print later # is this a call site to mcount? If so, record it to print later
if ($text_found && /$mcount_regex/) { if ($text_found && /$mcount_regex/) {
$offsets[$#offsets + 1] = hex $1; push(@offsets, hex $1);
} }
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment