Commit 79b17ea7 authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'trace-v4.11' of git://git.kernel.org/pub/scm/linux/kernel/git/rostedt/linux-trace

Pull tracing updates from Steven Rostedt:
 "This release has no new tracing features, just clean ups, minor fixes
  and small optimizations"

* tag 'trace-v4.11' of git://git.kernel.org/pub/scm/linux/kernel/git/rostedt/linux-trace: (25 commits)
  tracing: Remove outdated ring buffer comment
  tracing/probes: Fix a warning message to show correct maximum length
  tracing: Fix return value check in trace_benchmark_reg()
  tracing: Use modern function declaration
  jump_label: Reduce the size of struct static_key
  tracing/probe: Show subsystem name in messages
  tracing/hwlat: Update old comment about migration
  timers: Make flags output in the timer_start tracepoint useful
  tracing: Have traceprobe_probes_write() not access userspace unnecessarily
  tracing: Have COMM event filter key be treated as a string
  ftrace: Have set_graph_function handle multiple functions in one write
  ftrace: Do not hold references of ftrace_graph_{notrace_}hash out of graph_lock
  tracing: Reset parser->buffer to allow multiple "puts"
  ftrace: Have set_graph_functions handle write with RDWR
  ftrace: Reset fgd->hash in ftrace_graph_write()
  ftrace: Replace (void *)1 with a meaningful macro name FTRACE_GRAPH_EMPTY
  ftrace: Create a slight optimization on searching the ftrace_hash
  tracing: Add ftrace_hash_key() helper function
  ftrace: Convert graph filter to use hash tables
  ftrace: Expose ftrace_hash_empty and ftrace_lookup_ip
  ...
parents e5d56efc 67d04bb2
...@@ -155,7 +155,9 @@ or: ...@@ -155,7 +155,9 @@ or:
There are a few functions and macros that architectures must implement in order There are a few functions and macros that architectures must implement in order
to take advantage of this optimization. If there is no architecture support, we to take advantage of this optimization. If there is no architecture support, we
simply fall back to a traditional, load, test, and jump sequence. simply fall back to a traditional, load, test, and jump sequence. Also, the
struct jump_entry table must be at least 4-byte aligned because the
static_key->entry field makes use of the two least significant bits.
* select HAVE_ARCH_JUMP_LABEL, see: arch/x86/Kconfig * select HAVE_ARCH_JUMP_LABEL, see: arch/x86/Kconfig
......
...@@ -105,29 +105,36 @@ struct ftrace_branch_data { ...@@ -105,29 +105,36 @@ struct ftrace_branch_data {
}; };
}; };
struct ftrace_likely_data {
struct ftrace_branch_data data;
unsigned long constant;
};
/* /*
* Note: DISABLE_BRANCH_PROFILING can be used by special lowlevel code * Note: DISABLE_BRANCH_PROFILING can be used by special lowlevel code
* to disable branch tracing on a per file basis. * to disable branch tracing on a per file basis.
*/ */
#if defined(CONFIG_TRACE_BRANCH_PROFILING) \ #if defined(CONFIG_TRACE_BRANCH_PROFILING) \
&& !defined(DISABLE_BRANCH_PROFILING) && !defined(__CHECKER__) && !defined(DISABLE_BRANCH_PROFILING) && !defined(__CHECKER__)
void ftrace_likely_update(struct ftrace_branch_data *f, int val, int expect); void ftrace_likely_update(struct ftrace_likely_data *f, int val,
int expect, int is_constant);
#define likely_notrace(x) __builtin_expect(!!(x), 1) #define likely_notrace(x) __builtin_expect(!!(x), 1)
#define unlikely_notrace(x) __builtin_expect(!!(x), 0) #define unlikely_notrace(x) __builtin_expect(!!(x), 0)
#define __branch_check__(x, expect) ({ \ #define __branch_check__(x, expect, is_constant) ({ \
int ______r; \ int ______r; \
static struct ftrace_branch_data \ static struct ftrace_likely_data \
__attribute__((__aligned__(4))) \ __attribute__((__aligned__(4))) \
__attribute__((section("_ftrace_annotated_branch"))) \ __attribute__((section("_ftrace_annotated_branch"))) \
______f = { \ ______f = { \
.func = __func__, \ .data.func = __func__, \
.file = __FILE__, \ .data.file = __FILE__, \
.line = __LINE__, \ .data.line = __LINE__, \
}; \ }; \
______r = likely_notrace(x); \ ______r = __builtin_expect(!!(x), expect); \
ftrace_likely_update(&______f, ______r, expect); \ ftrace_likely_update(&______f, ______r, \
expect, is_constant); \
______r; \ ______r; \
}) })
...@@ -137,10 +144,10 @@ void ftrace_likely_update(struct ftrace_branch_data *f, int val, int expect); ...@@ -137,10 +144,10 @@ void ftrace_likely_update(struct ftrace_branch_data *f, int val, int expect);
* written by Daniel Walker. * written by Daniel Walker.
*/ */
# ifndef likely # ifndef likely
# define likely(x) (__builtin_constant_p(x) ? !!(x) : __branch_check__(x, 1)) # define likely(x) (__branch_check__(x, 1, __builtin_constant_p(x)))
# endif # endif
# ifndef unlikely # ifndef unlikely
# define unlikely(x) (__builtin_constant_p(x) ? !!(x) : __branch_check__(x, 0)) # define unlikely(x) (__branch_check__(x, 0, __builtin_constant_p(x)))
# endif # endif
#ifdef CONFIG_PROFILE_ALL_BRANCHES #ifdef CONFIG_PROFILE_ALL_BRANCHES
......
...@@ -89,11 +89,17 @@ extern bool static_key_initialized; ...@@ -89,11 +89,17 @@ extern bool static_key_initialized;
struct static_key { struct static_key {
atomic_t enabled; atomic_t enabled;
/* Set lsb bit to 1 if branch is default true, 0 ot */ /*
struct jump_entry *entries; * bit 0 => 1 if key is initially true
#ifdef CONFIG_MODULES * 0 if initially false
struct static_key_mod *next; * bit 1 => 1 if points to struct static_key_mod
#endif * 0 if points to struct jump_entry
*/
union {
unsigned long type;
struct jump_entry *entries;
struct static_key_mod *next;
};
}; };
#else #else
...@@ -118,9 +124,10 @@ struct module; ...@@ -118,9 +124,10 @@ struct module;
#ifdef HAVE_JUMP_LABEL #ifdef HAVE_JUMP_LABEL
#define JUMP_TYPE_FALSE 0UL #define JUMP_TYPE_FALSE 0UL
#define JUMP_TYPE_TRUE 1UL #define JUMP_TYPE_TRUE 1UL
#define JUMP_TYPE_MASK 1UL #define JUMP_TYPE_LINKED 2UL
#define JUMP_TYPE_MASK 3UL
static __always_inline bool static_key_false(struct static_key *key) static __always_inline bool static_key_false(struct static_key *key)
{ {
......
...@@ -61,6 +61,8 @@ struct timer_list { ...@@ -61,6 +61,8 @@ struct timer_list {
#define TIMER_ARRAYSHIFT 22 #define TIMER_ARRAYSHIFT 22
#define TIMER_ARRAYMASK 0xFFC00000 #define TIMER_ARRAYMASK 0xFFC00000
#define TIMER_TRACE_FLAGMASK (TIMER_MIGRATING | TIMER_DEFERRABLE | TIMER_PINNED | TIMER_IRQSAFE)
#define __TIMER_INITIALIZER(_function, _expires, _data, _flags) { \ #define __TIMER_INITIALIZER(_function, _expires, _data, _flags) { \
.entry = { .next = TIMER_ENTRY_STATIC }, \ .entry = { .next = TIMER_ENTRY_STATIC }, \
.function = (_function), \ .function = (_function), \
......
...@@ -36,6 +36,13 @@ DEFINE_EVENT(timer_class, timer_init, ...@@ -36,6 +36,13 @@ DEFINE_EVENT(timer_class, timer_init,
TP_ARGS(timer) TP_ARGS(timer)
); );
#define decode_timer_flags(flags) \
__print_flags(flags, "|", \
{ TIMER_MIGRATING, "M" }, \
{ TIMER_DEFERRABLE, "D" }, \
{ TIMER_PINNED, "P" }, \
{ TIMER_IRQSAFE, "I" })
/** /**
* timer_start - called when the timer is started * timer_start - called when the timer is started
* @timer: pointer to struct timer_list * @timer: pointer to struct timer_list
...@@ -65,9 +72,12 @@ TRACE_EVENT(timer_start, ...@@ -65,9 +72,12 @@ TRACE_EVENT(timer_start,
__entry->flags = flags; __entry->flags = flags;
), ),
TP_printk("timer=%p function=%pf expires=%lu [timeout=%ld] flags=0x%08x", TP_printk("timer=%p function=%pf expires=%lu [timeout=%ld] cpu=%u idx=%u flags=%s",
__entry->timer, __entry->function, __entry->expires, __entry->timer, __entry->function, __entry->expires,
(long)__entry->expires - __entry->now, __entry->flags) (long)__entry->expires - __entry->now,
__entry->flags & TIMER_CPUMASK,
__entry->flags >> TIMER_ARRAYSHIFT,
decode_timer_flags(__entry->flags & TIMER_TRACE_FLAGMASK))
); );
/** /**
......
...@@ -236,12 +236,28 @@ void __weak __init_or_module arch_jump_label_transform_static(struct jump_entry ...@@ -236,12 +236,28 @@ void __weak __init_or_module arch_jump_label_transform_static(struct jump_entry
static inline struct jump_entry *static_key_entries(struct static_key *key) static inline struct jump_entry *static_key_entries(struct static_key *key)
{ {
return (struct jump_entry *)((unsigned long)key->entries & ~JUMP_TYPE_MASK); WARN_ON_ONCE(key->type & JUMP_TYPE_LINKED);
return (struct jump_entry *)(key->type & ~JUMP_TYPE_MASK);
} }
static inline bool static_key_type(struct static_key *key) static inline bool static_key_type(struct static_key *key)
{ {
return (unsigned long)key->entries & JUMP_TYPE_MASK; return key->type & JUMP_TYPE_TRUE;
}
static inline bool static_key_linked(struct static_key *key)
{
return key->type & JUMP_TYPE_LINKED;
}
static inline void static_key_clear_linked(struct static_key *key)
{
key->type &= ~JUMP_TYPE_LINKED;
}
static inline void static_key_set_linked(struct static_key *key)
{
key->type |= JUMP_TYPE_LINKED;
} }
static inline struct static_key *jump_entry_key(struct jump_entry *entry) static inline struct static_key *jump_entry_key(struct jump_entry *entry)
...@@ -254,6 +270,26 @@ static bool jump_entry_branch(struct jump_entry *entry) ...@@ -254,6 +270,26 @@ static bool jump_entry_branch(struct jump_entry *entry)
return (unsigned long)entry->key & 1UL; return (unsigned long)entry->key & 1UL;
} }
/***
* A 'struct static_key' uses a union such that it either points directly
* to a table of 'struct jump_entry' or to a linked list of modules which in
* turn point to 'struct jump_entry' tables.
*
* The two lower bits of the pointer are used to keep track of which pointer
* type is in use and to store the initial branch direction, we use an access
* function which preserves these bits.
*/
static void static_key_set_entries(struct static_key *key,
struct jump_entry *entries)
{
unsigned long type;
WARN_ON_ONCE((unsigned long)entries & JUMP_TYPE_MASK);
type = key->type & JUMP_TYPE_MASK;
key->entries = entries;
key->type |= type;
}
static enum jump_label_type jump_label_type(struct jump_entry *entry) static enum jump_label_type jump_label_type(struct jump_entry *entry)
{ {
struct static_key *key = jump_entry_key(entry); struct static_key *key = jump_entry_key(entry);
...@@ -313,13 +349,7 @@ void __init jump_label_init(void) ...@@ -313,13 +349,7 @@ void __init jump_label_init(void)
continue; continue;
key = iterk; key = iterk;
/* static_key_set_entries(key, iter);
* Set key->entries to iter, but preserve JUMP_LABEL_TRUE_BRANCH.
*/
*((unsigned long *)&key->entries) += (unsigned long)iter;
#ifdef CONFIG_MODULES
key->next = NULL;
#endif
} }
static_key_initialized = true; static_key_initialized = true;
jump_label_unlock(); jump_label_unlock();
...@@ -343,6 +373,29 @@ struct static_key_mod { ...@@ -343,6 +373,29 @@ struct static_key_mod {
struct module *mod; struct module *mod;
}; };
static inline struct static_key_mod *static_key_mod(struct static_key *key)
{
WARN_ON_ONCE(!(key->type & JUMP_TYPE_LINKED));
return (struct static_key_mod *)(key->type & ~JUMP_TYPE_MASK);
}
/***
* key->type and key->next are the same via union.
* This sets key->next and preserves the type bits.
*
* See additional comments above static_key_set_entries().
*/
static void static_key_set_mod(struct static_key *key,
struct static_key_mod *mod)
{
unsigned long type;
WARN_ON_ONCE((unsigned long)mod & JUMP_TYPE_MASK);
type = key->type & JUMP_TYPE_MASK;
key->next = mod;
key->type |= type;
}
static int __jump_label_mod_text_reserved(void *start, void *end) static int __jump_label_mod_text_reserved(void *start, void *end)
{ {
struct module *mod; struct module *mod;
...@@ -365,11 +418,23 @@ static void __jump_label_mod_update(struct static_key *key) ...@@ -365,11 +418,23 @@ static void __jump_label_mod_update(struct static_key *key)
{ {
struct static_key_mod *mod; struct static_key_mod *mod;
for (mod = key->next; mod; mod = mod->next) { for (mod = static_key_mod(key); mod; mod = mod->next) {
struct module *m = mod->mod; struct jump_entry *stop;
struct module *m;
/*
* NULL if the static_key is defined in a module
* that does not use it
*/
if (!mod->entries)
continue;
__jump_label_update(key, mod->entries, m = mod->mod;
m->jump_entries + m->num_jump_entries); if (!m)
stop = __stop___jump_table;
else
stop = m->jump_entries + m->num_jump_entries;
__jump_label_update(key, mod->entries, stop);
} }
} }
...@@ -404,7 +469,7 @@ static int jump_label_add_module(struct module *mod) ...@@ -404,7 +469,7 @@ static int jump_label_add_module(struct module *mod)
struct jump_entry *iter_stop = iter_start + mod->num_jump_entries; struct jump_entry *iter_stop = iter_start + mod->num_jump_entries;
struct jump_entry *iter; struct jump_entry *iter;
struct static_key *key = NULL; struct static_key *key = NULL;
struct static_key_mod *jlm; struct static_key_mod *jlm, *jlm2;
/* if the module doesn't have jump label entries, just return */ /* if the module doesn't have jump label entries, just return */
if (iter_start == iter_stop) if (iter_start == iter_stop)
...@@ -421,20 +486,32 @@ static int jump_label_add_module(struct module *mod) ...@@ -421,20 +486,32 @@ static int jump_label_add_module(struct module *mod)
key = iterk; key = iterk;
if (within_module(iter->key, mod)) { if (within_module(iter->key, mod)) {
/* static_key_set_entries(key, iter);
* Set key->entries to iter, but preserve JUMP_LABEL_TRUE_BRANCH.
*/
*((unsigned long *)&key->entries) += (unsigned long)iter;
key->next = NULL;
continue; continue;
} }
jlm = kzalloc(sizeof(struct static_key_mod), GFP_KERNEL); jlm = kzalloc(sizeof(struct static_key_mod), GFP_KERNEL);
if (!jlm) if (!jlm)
return -ENOMEM; return -ENOMEM;
if (!static_key_linked(key)) {
jlm2 = kzalloc(sizeof(struct static_key_mod),
GFP_KERNEL);
if (!jlm2) {
kfree(jlm);
return -ENOMEM;
}
preempt_disable();
jlm2->mod = __module_address((unsigned long)key);
preempt_enable();
jlm2->entries = static_key_entries(key);
jlm2->next = NULL;
static_key_set_mod(key, jlm2);
static_key_set_linked(key);
}
jlm->mod = mod; jlm->mod = mod;
jlm->entries = iter; jlm->entries = iter;
jlm->next = key->next; jlm->next = static_key_mod(key);
key->next = jlm; static_key_set_mod(key, jlm);
static_key_set_linked(key);
/* Only update if we've changed from our initial state */ /* Only update if we've changed from our initial state */
if (jump_label_type(iter) != jump_label_init_type(iter)) if (jump_label_type(iter) != jump_label_init_type(iter))
...@@ -461,16 +538,34 @@ static void jump_label_del_module(struct module *mod) ...@@ -461,16 +538,34 @@ static void jump_label_del_module(struct module *mod)
if (within_module(iter->key, mod)) if (within_module(iter->key, mod))
continue; continue;
/* No memory during module load */
if (WARN_ON(!static_key_linked(key)))
continue;
prev = &key->next; prev = &key->next;
jlm = key->next; jlm = static_key_mod(key);
while (jlm && jlm->mod != mod) { while (jlm && jlm->mod != mod) {
prev = &jlm->next; prev = &jlm->next;
jlm = jlm->next; jlm = jlm->next;
} }
if (jlm) { /* No memory during module load */
if (WARN_ON(!jlm))
continue;
if (prev == &key->next)
static_key_set_mod(key, jlm->next);
else
*prev = jlm->next; *prev = jlm->next;
kfree(jlm);
jlm = static_key_mod(key);
/* if only one etry is left, fold it back into the static_key */
if (jlm->next == NULL) {
static_key_set_entries(key, jlm->entries);
static_key_clear_linked(key);
kfree(jlm); kfree(jlm);
} }
} }
...@@ -499,8 +594,10 @@ jump_label_module_notify(struct notifier_block *self, unsigned long val, ...@@ -499,8 +594,10 @@ jump_label_module_notify(struct notifier_block *self, unsigned long val,
case MODULE_STATE_COMING: case MODULE_STATE_COMING:
jump_label_lock(); jump_label_lock();
ret = jump_label_add_module(mod); ret = jump_label_add_module(mod);
if (ret) if (ret) {
WARN(1, "Failed to allocatote memory: jump_label may not work properly.\n");
jump_label_del_module(mod); jump_label_del_module(mod);
}
jump_label_unlock(); jump_label_unlock();
break; break;
case MODULE_STATE_GOING: case MODULE_STATE_GOING:
...@@ -561,11 +658,14 @@ int jump_label_text_reserved(void *start, void *end) ...@@ -561,11 +658,14 @@ int jump_label_text_reserved(void *start, void *end)
static void jump_label_update(struct static_key *key) static void jump_label_update(struct static_key *key)
{ {
struct jump_entry *stop = __stop___jump_table; struct jump_entry *stop = __stop___jump_table;
struct jump_entry *entry = static_key_entries(key); struct jump_entry *entry;
#ifdef CONFIG_MODULES #ifdef CONFIG_MODULES
struct module *mod; struct module *mod;
__jump_label_mod_update(key); if (static_key_linked(key)) {
__jump_label_mod_update(key);
return;
}
preempt_disable(); preempt_disable();
mod = __module_address((unsigned long)key); mod = __module_address((unsigned long)key);
...@@ -573,6 +673,7 @@ static void jump_label_update(struct static_key *key) ...@@ -573,6 +673,7 @@ static void jump_label_update(struct static_key *key)
stop = mod->jump_entries + mod->num_jump_entries; stop = mod->jump_entries + mod->num_jump_entries;
preempt_enable(); preempt_enable();
#endif #endif
entry = static_key_entries(key);
/* if there are no users, entry can be NULL */ /* if there are no users, entry can be NULL */
if (entry) if (entry)
__jump_label_update(key, entry, stop); __jump_label_update(key, entry, stop);
......
This diff is collapsed.
...@@ -260,16 +260,8 @@ unsigned long long ns2usecs(u64 nsec) ...@@ -260,16 +260,8 @@ unsigned long long ns2usecs(u64 nsec)
TRACE_ITER_EVENT_FORK TRACE_ITER_EVENT_FORK
/* /*
* The global_trace is the descriptor that holds the tracing * The global_trace is the descriptor that holds the top-level tracing
* buffers for the live tracing. For each CPU, it contains * buffers for the live tracing.
* a link list of pages that will store trace entries. The
* page descriptor of the pages in the memory is used to hold
* the link list by linking the lru item in the page descriptor
* to each of the pages in the buffer per CPU.
*
* For each active CPU there is a data field that holds the
* pages for the buffer for that CPU. Each CPU has the same number
* of pages allocated for its buffer.
*/ */
static struct trace_array global_trace = { static struct trace_array global_trace = {
.trace_flags = TRACE_DEFAULT_FLAGS, .trace_flags = TRACE_DEFAULT_FLAGS,
...@@ -1193,6 +1185,7 @@ int trace_parser_get_init(struct trace_parser *parser, int size) ...@@ -1193,6 +1185,7 @@ int trace_parser_get_init(struct trace_parser *parser, int size)
void trace_parser_put(struct trace_parser *parser) void trace_parser_put(struct trace_parser *parser)
{ {
kfree(parser->buffer); kfree(parser->buffer);
parser->buffer = NULL;
} }
/* /*
......
...@@ -753,6 +753,21 @@ enum print_line_t print_trace_line(struct trace_iterator *iter); ...@@ -753,6 +753,21 @@ enum print_line_t print_trace_line(struct trace_iterator *iter);
extern char trace_find_mark(unsigned long long duration); extern char trace_find_mark(unsigned long long duration);
struct ftrace_hash {
unsigned long size_bits;
struct hlist_head *buckets;
unsigned long count;
struct rcu_head rcu;
};
struct ftrace_func_entry *
ftrace_lookup_ip(struct ftrace_hash *hash, unsigned long ip);
static __always_inline bool ftrace_hash_empty(struct ftrace_hash *hash)
{
return !hash || !hash->count;
}
/* Standard output formatting function used for function return traces */ /* Standard output formatting function used for function return traces */
#ifdef CONFIG_FUNCTION_GRAPH_TRACER #ifdef CONFIG_FUNCTION_GRAPH_TRACER
...@@ -787,53 +802,50 @@ extern void __trace_graph_return(struct trace_array *tr, ...@@ -787,53 +802,50 @@ extern void __trace_graph_return(struct trace_array *tr,
struct ftrace_graph_ret *trace, struct ftrace_graph_ret *trace,
unsigned long flags, int pc); unsigned long flags, int pc);
#ifdef CONFIG_DYNAMIC_FTRACE #ifdef CONFIG_DYNAMIC_FTRACE
/* TODO: make this variable */ extern struct ftrace_hash *ftrace_graph_hash;
#define FTRACE_GRAPH_MAX_FUNCS 32 extern struct ftrace_hash *ftrace_graph_notrace_hash;
extern int ftrace_graph_count;
extern unsigned long ftrace_graph_funcs[FTRACE_GRAPH_MAX_FUNCS];
extern int ftrace_graph_notrace_count;
extern unsigned long ftrace_graph_notrace_funcs[FTRACE_GRAPH_MAX_FUNCS];
static inline int ftrace_graph_addr(unsigned long addr) static inline int ftrace_graph_addr(unsigned long addr)
{ {
int i; int ret = 0;
if (!ftrace_graph_count) preempt_disable_notrace();
return 1;
if (ftrace_hash_empty(ftrace_graph_hash)) {
for (i = 0; i < ftrace_graph_count; i++) { ret = 1;
if (addr == ftrace_graph_funcs[i]) { goto out;
/*
* If no irqs are to be traced, but a set_graph_function
* is set, and called by an interrupt handler, we still
* want to trace it.
*/
if (in_irq())
trace_recursion_set(TRACE_IRQ_BIT);
else
trace_recursion_clear(TRACE_IRQ_BIT);
return 1;
}
} }
return 0; if (ftrace_lookup_ip(ftrace_graph_hash, addr)) {
/*
* If no irqs are to be traced, but a set_graph_function
* is set, and called by an interrupt handler, we still
* want to trace it.
*/
if (in_irq())
trace_recursion_set(TRACE_IRQ_BIT);
else
trace_recursion_clear(TRACE_IRQ_BIT);
ret = 1;
}
out:
preempt_enable_notrace();
return ret;
} }
static inline int ftrace_graph_notrace_addr(unsigned long addr) static inline int ftrace_graph_notrace_addr(unsigned long addr)
{ {
int i; int ret = 0;
if (!ftrace_graph_notrace_count) preempt_disable_notrace();
return 0;
for (i = 0; i < ftrace_graph_notrace_count; i++) { if (ftrace_lookup_ip(ftrace_graph_notrace_hash, addr))
if (addr == ftrace_graph_notrace_funcs[i]) ret = 1;
return 1;
}
return 0; preempt_enable_notrace();
return ret;
} }
#else #else
static inline int ftrace_graph_addr(unsigned long addr) static inline int ftrace_graph_addr(unsigned long addr)
...@@ -1300,7 +1312,8 @@ static inline bool is_string_field(struct ftrace_event_field *field) ...@@ -1300,7 +1312,8 @@ static inline bool is_string_field(struct ftrace_event_field *field)
{ {
return field->filter_type == FILTER_DYN_STRING || return field->filter_type == FILTER_DYN_STRING ||
field->filter_type == FILTER_STATIC_STRING || field->filter_type == FILTER_STATIC_STRING ||
field->filter_type == FILTER_PTR_STRING; field->filter_type == FILTER_PTR_STRING ||
field->filter_type == FILTER_COMM;
} }
static inline bool is_function_field(struct ftrace_event_field *field) static inline bool is_function_field(struct ftrace_event_field *field)
......
...@@ -175,9 +175,9 @@ int trace_benchmark_reg(void) ...@@ -175,9 +175,9 @@ int trace_benchmark_reg(void)
bm_event_thread = kthread_run(benchmark_event_kthread, bm_event_thread = kthread_run(benchmark_event_kthread,
NULL, "event_benchmark"); NULL, "event_benchmark");
if (!bm_event_thread) { if (IS_ERR(bm_event_thread)) {
pr_warning("trace benchmark failed to create kernel thread\n"); pr_warning("trace benchmark failed to create kernel thread\n");
return -ENOMEM; return PTR_ERR(bm_event_thread);
} }
return 0; return 0;
......
...@@ -27,7 +27,7 @@ static DEFINE_MUTEX(branch_tracing_mutex); ...@@ -27,7 +27,7 @@ static DEFINE_MUTEX(branch_tracing_mutex);
static struct trace_array *branch_tracer; static struct trace_array *branch_tracer;
static void static void
probe_likely_condition(struct ftrace_branch_data *f, int val, int expect) probe_likely_condition(struct ftrace_likely_data *f, int val, int expect)
{ {
struct trace_event_call *call = &event_branch; struct trace_event_call *call = &event_branch;
struct trace_array *tr = branch_tracer; struct trace_array *tr = branch_tracer;
...@@ -68,16 +68,17 @@ probe_likely_condition(struct ftrace_branch_data *f, int val, int expect) ...@@ -68,16 +68,17 @@ probe_likely_condition(struct ftrace_branch_data *f, int val, int expect)
entry = ring_buffer_event_data(event); entry = ring_buffer_event_data(event);
/* Strip off the path, only save the file */ /* Strip off the path, only save the file */
p = f->file + strlen(f->file); p = f->data.file + strlen(f->data.file);
while (p >= f->file && *p != '/') while (p >= f->data.file && *p != '/')
p--; p--;
p++; p++;
strncpy(entry->func, f->func, TRACE_FUNC_SIZE); strncpy(entry->func, f->data.func, TRACE_FUNC_SIZE);
strncpy(entry->file, p, TRACE_FILE_SIZE); strncpy(entry->file, p, TRACE_FILE_SIZE);
entry->func[TRACE_FUNC_SIZE] = 0; entry->func[TRACE_FUNC_SIZE] = 0;
entry->file[TRACE_FILE_SIZE] = 0; entry->file[TRACE_FILE_SIZE] = 0;
entry->line = f->line; entry->constant = f->constant;
entry->line = f->data.line;
entry->correct = val == expect; entry->correct = val == expect;
if (!call_filter_check_discard(call, entry, buffer, event)) if (!call_filter_check_discard(call, entry, buffer, event))
...@@ -89,7 +90,7 @@ probe_likely_condition(struct ftrace_branch_data *f, int val, int expect) ...@@ -89,7 +90,7 @@ probe_likely_condition(struct ftrace_branch_data *f, int val, int expect)
} }
static inline static inline
void trace_likely_condition(struct ftrace_branch_data *f, int val, int expect) void trace_likely_condition(struct ftrace_likely_data *f, int val, int expect)
{ {
if (!branch_tracing_enabled) if (!branch_tracing_enabled)
return; return;
...@@ -195,13 +196,19 @@ core_initcall(init_branch_tracer); ...@@ -195,13 +196,19 @@ core_initcall(init_branch_tracer);
#else #else
static inline static inline
void trace_likely_condition(struct ftrace_branch_data *f, int val, int expect) void trace_likely_condition(struct ftrace_likely_data *f, int val, int expect)
{ {
} }
#endif /* CONFIG_BRANCH_TRACER */ #endif /* CONFIG_BRANCH_TRACER */
void ftrace_likely_update(struct ftrace_branch_data *f, int val, int expect) void ftrace_likely_update(struct ftrace_likely_data *f, int val,
int expect, int is_constant)
{ {
/* A constant is always correct */
if (is_constant) {
f->constant++;
val = expect;
}
/* /*
* I would love to have a trace point here instead, but the * I would love to have a trace point here instead, but the
* trace point code is so inundated with unlikely and likely * trace point code is so inundated with unlikely and likely
...@@ -212,9 +219,9 @@ void ftrace_likely_update(struct ftrace_branch_data *f, int val, int expect) ...@@ -212,9 +219,9 @@ void ftrace_likely_update(struct ftrace_branch_data *f, int val, int expect)
/* FIXME: Make this atomic! */ /* FIXME: Make this atomic! */
if (val == expect) if (val == expect)
f->correct++; f->data.correct++;
else else
f->incorrect++; f->data.incorrect++;
} }
EXPORT_SYMBOL(ftrace_likely_update); EXPORT_SYMBOL(ftrace_likely_update);
...@@ -245,29 +252,60 @@ static inline long get_incorrect_percent(struct ftrace_branch_data *p) ...@@ -245,29 +252,60 @@ static inline long get_incorrect_percent(struct ftrace_branch_data *p)
return percent; return percent;
} }
static int branch_stat_show(struct seq_file *m, void *v) static const char *branch_stat_process_file(struct ftrace_branch_data *p)
{ {
struct ftrace_branch_data *p = v;
const char *f; const char *f;
long percent;
/* Only print the file, not the path */ /* Only print the file, not the path */
f = p->file + strlen(p->file); f = p->file + strlen(p->file);
while (f >= p->file && *f != '/') while (f >= p->file && *f != '/')
f--; f--;
f++; return ++f;
}
static void branch_stat_show(struct seq_file *m,
struct ftrace_branch_data *p, const char *f)
{
long percent;
/* /*
* The miss is overlayed on correct, and hit on incorrect. * The miss is overlayed on correct, and hit on incorrect.
*/ */
percent = get_incorrect_percent(p); percent = get_incorrect_percent(p);
seq_printf(m, "%8lu %8lu ", p->correct, p->incorrect);
if (percent < 0) if (percent < 0)
seq_puts(m, " X "); seq_puts(m, " X ");
else else
seq_printf(m, "%3ld ", percent); seq_printf(m, "%3ld ", percent);
seq_printf(m, "%-30.30s %-20.20s %d\n", p->func, f, p->line); seq_printf(m, "%-30.30s %-20.20s %d\n", p->func, f, p->line);
}
static int branch_stat_show_normal(struct seq_file *m,
struct ftrace_branch_data *p, const char *f)
{
seq_printf(m, "%8lu %8lu ", p->correct, p->incorrect);
branch_stat_show(m, p, f);
return 0;
}
static int annotate_branch_stat_show(struct seq_file *m, void *v)
{
struct ftrace_likely_data *p = v;
const char *f;
int l;
f = branch_stat_process_file(&p->data);
if (!p->constant)
return branch_stat_show_normal(m, &p->data, f);
l = snprintf(NULL, 0, "/%lu", p->constant);
l = l > 8 ? 0 : 8 - l;
seq_printf(m, "%8lu/%lu %*lu ",
p->data.correct, p->constant, l, p->data.incorrect);
branch_stat_show(m, &p->data, f);
return 0; return 0;
} }
...@@ -279,7 +317,7 @@ static void *annotated_branch_stat_start(struct tracer_stat *trace) ...@@ -279,7 +317,7 @@ static void *annotated_branch_stat_start(struct tracer_stat *trace)
static void * static void *
annotated_branch_stat_next(void *v, int idx) annotated_branch_stat_next(void *v, int idx)
{ {
struct ftrace_branch_data *p = v; struct ftrace_likely_data *p = v;
++p; ++p;
...@@ -328,7 +366,7 @@ static struct tracer_stat annotated_branch_stats = { ...@@ -328,7 +366,7 @@ static struct tracer_stat annotated_branch_stats = {
.stat_next = annotated_branch_stat_next, .stat_next = annotated_branch_stat_next,
.stat_cmp = annotated_branch_stat_cmp, .stat_cmp = annotated_branch_stat_cmp,
.stat_headers = annotated_branch_stat_headers, .stat_headers = annotated_branch_stat_headers,
.stat_show = branch_stat_show .stat_show = annotate_branch_stat_show
}; };
__init static int init_annotated_branch_stats(void) __init static int init_annotated_branch_stats(void)
...@@ -379,12 +417,21 @@ all_branch_stat_next(void *v, int idx) ...@@ -379,12 +417,21 @@ all_branch_stat_next(void *v, int idx)
return p; return p;
} }
static int all_branch_stat_show(struct seq_file *m, void *v)
{
struct ftrace_branch_data *p = v;
const char *f;
f = branch_stat_process_file(p);
return branch_stat_show_normal(m, p, f);
}
static struct tracer_stat all_branch_stats = { static struct tracer_stat all_branch_stats = {
.name = "branch_all", .name = "branch_all",
.stat_start = all_branch_stat_start, .stat_start = all_branch_stat_start,
.stat_next = all_branch_stat_next, .stat_next = all_branch_stat_next,
.stat_headers = all_branch_stat_headers, .stat_headers = all_branch_stat_headers,
.stat_show = branch_stat_show .stat_show = all_branch_stat_show
}; };
__init static int all_annotated_branch_stats(void) __init static int all_annotated_branch_stats(void)
......
...@@ -328,11 +328,13 @@ FTRACE_ENTRY(branch, trace_branch, ...@@ -328,11 +328,13 @@ FTRACE_ENTRY(branch, trace_branch,
__array( char, func, TRACE_FUNC_SIZE+1 ) __array( char, func, TRACE_FUNC_SIZE+1 )
__array( char, file, TRACE_FILE_SIZE+1 ) __array( char, file, TRACE_FILE_SIZE+1 )
__field( char, correct ) __field( char, correct )
__field( char, constant )
), ),
F_printk("%u:%s:%s (%u)", F_printk("%u:%s:%s (%u)%s",
__entry->line, __entry->line,
__entry->func, __entry->file, __entry->correct), __entry->func, __entry->file, __entry->correct,
__entry->constant ? " CONSTANT" : ""),
FILTER_OTHER FILTER_OTHER
); );
......
...@@ -322,10 +322,7 @@ static void move_to_next_cpu(bool initmask) ...@@ -322,10 +322,7 @@ static void move_to_next_cpu(bool initmask)
* need to ensure nothing else might be running (and thus preempting). * need to ensure nothing else might be running (and thus preempting).
* Obviously this should never be used in production environments. * Obviously this should never be used in production environments.
* *
* Currently this runs on which ever CPU it was scheduled on, but most * Executes one loop interaction on each CPU in tracing_cpumask sysfs file.
* real-world hardware latency situations occur across several CPUs,
* but we might later generalize this if we find there are any actualy
* systems with alternate SMI delivery or other hardware latencies.
*/ */
static int kthread_fn(void *data) static int kthread_fn(void *data)
{ {
......
...@@ -16,6 +16,7 @@ ...@@ -16,6 +16,7 @@
* along with this program; if not, write to the Free Software * along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/ */
#define pr_fmt(fmt) "trace_kprobe: " fmt
#include <linux/module.h> #include <linux/module.h>
#include <linux/uaccess.h> #include <linux/uaccess.h>
......
...@@ -21,6 +21,7 @@ ...@@ -21,6 +21,7 @@
* Copyright (C) IBM Corporation, 2010-2011 * Copyright (C) IBM Corporation, 2010-2011
* Author: Srikar Dronamraju * Author: Srikar Dronamraju
*/ */
#define pr_fmt(fmt) "trace_probe: " fmt
#include "trace_probe.h" #include "trace_probe.h"
...@@ -647,7 +648,7 @@ ssize_t traceprobe_probes_write(struct file *file, const char __user *buffer, ...@@ -647,7 +648,7 @@ ssize_t traceprobe_probes_write(struct file *file, const char __user *buffer,
size_t count, loff_t *ppos, size_t count, loff_t *ppos,
int (*createfn)(int, char **)) int (*createfn)(int, char **))
{ {
char *kbuf, *tmp; char *kbuf, *buf, *tmp;
int ret = 0; int ret = 0;
size_t done = 0; size_t done = 0;
size_t size; size_t size;
...@@ -667,27 +668,38 @@ ssize_t traceprobe_probes_write(struct file *file, const char __user *buffer, ...@@ -667,27 +668,38 @@ ssize_t traceprobe_probes_write(struct file *file, const char __user *buffer,
goto out; goto out;
} }
kbuf[size] = '\0'; kbuf[size] = '\0';
tmp = strchr(kbuf, '\n'); buf = kbuf;
do {
tmp = strchr(buf, '\n');
if (tmp) {
*tmp = '\0';
size = tmp - buf + 1;
} else {
size = strlen(buf);
if (done + size < count) {
if (buf != kbuf)
break;
/* This can accept WRITE_BUFSIZE - 2 ('\n' + '\0') */
pr_warn("Line length is too long: Should be less than %d\n",
WRITE_BUFSIZE - 2);
ret = -EINVAL;
goto out;
}
}
done += size;
if (tmp) { /* Remove comments */
*tmp = '\0'; tmp = strchr(buf, '#');
size = tmp - kbuf + 1;
} else if (done + size < count) {
pr_warn("Line length is too long: Should be less than %d\n",
WRITE_BUFSIZE);
ret = -EINVAL;
goto out;
}
done += size;
/* Remove comments */
tmp = strchr(kbuf, '#');
if (tmp) if (tmp)
*tmp = '\0'; *tmp = '\0';
ret = traceprobe_command(kbuf, createfn); ret = traceprobe_command(buf, createfn);
if (ret) if (ret)
goto out; goto out;
buf += size;
} while (done < count);
} }
ret = done; ret = done;
......
...@@ -17,6 +17,7 @@ ...@@ -17,6 +17,7 @@
* Copyright (C) IBM Corporation, 2010-2012 * Copyright (C) IBM Corporation, 2010-2012
* Author: Srikar Dronamraju <srikar@linux.vnet.ibm.com> * Author: Srikar Dronamraju <srikar@linux.vnet.ibm.com>
*/ */
#define pr_fmt(fmt) "trace_kprobe: " fmt
#include <linux/module.h> #include <linux/module.h>
#include <linux/uaccess.h> #include <linux/uaccess.h>
...@@ -431,7 +432,8 @@ static int create_trace_uprobe(int argc, char **argv) ...@@ -431,7 +432,8 @@ static int create_trace_uprobe(int argc, char **argv)
pr_info("Probe point is not specified.\n"); pr_info("Probe point is not specified.\n");
return -EINVAL; return -EINVAL;
} }
arg = strchr(argv[1], ':'); /* Find the last occurrence, in case the path contains ':' too. */
arg = strrchr(argv[1], ':');
if (!arg) { if (!arg) {
ret = -EINVAL; ret = -EINVAL;
goto fail_address_parse; goto fail_address_parse;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment