Commit 09bda443 authored by Ingo Molnar's avatar Ingo Molnar

Merge branch 'tip/perf/core' of...

Merge branch 'tip/perf/core' of git://git.kernel.org/pub/scm/linux/kernel/git/rostedt/linux-trace into perf/core
parents d1e169da 47b0edcb
......@@ -226,6 +226,13 @@ Here is the list of current tracers that may be configured.
Traces and records the max latency that it takes for
the highest priority task to get scheduled after
it has been woken up.
Traces all tasks as an average developer would expect.
"wakeup_rt"
Traces and records the max latency that it takes for just
RT tasks (as the current "wakeup" does). This is useful
for those interested in wake up timings of RT tasks.
"hw-branch-tracer"
......
......@@ -377,8 +377,8 @@ static inline int hlt_use_halt(void)
void default_idle(void)
{
if (hlt_use_halt()) {
trace_power_start(POWER_CSTATE, 1, smp_processor_id());
trace_cpu_idle(1, smp_processor_id());
trace_power_start_rcuidle(POWER_CSTATE, 1, smp_processor_id());
trace_cpu_idle_rcuidle(1, smp_processor_id());
current_thread_info()->status &= ~TS_POLLING;
/*
* TS_POLLING-cleared state must be visible before we
......@@ -391,8 +391,8 @@ void default_idle(void)
else
local_irq_enable();
current_thread_info()->status |= TS_POLLING;
trace_power_end(smp_processor_id());
trace_cpu_idle(PWR_EVENT_EXIT, smp_processor_id());
trace_power_end_rcuidle(smp_processor_id());
trace_cpu_idle_rcuidle(PWR_EVENT_EXIT, smp_processor_id());
} else {
local_irq_enable();
/* loop is done by the caller */
......@@ -450,8 +450,8 @@ EXPORT_SYMBOL_GPL(cpu_idle_wait);
static void mwait_idle(void)
{
if (!need_resched()) {
trace_power_start(POWER_CSTATE, 1, smp_processor_id());
trace_cpu_idle(1, smp_processor_id());
trace_power_start_rcuidle(POWER_CSTATE, 1, smp_processor_id());
trace_cpu_idle_rcuidle(1, smp_processor_id());
if (this_cpu_has(X86_FEATURE_CLFLUSH_MONITOR))
clflush((void *)&current_thread_info()->flags);
......@@ -461,8 +461,8 @@ static void mwait_idle(void)
__sti_mwait(0, 0);
else
local_irq_enable();
trace_power_end(smp_processor_id());
trace_cpu_idle(PWR_EVENT_EXIT, smp_processor_id());
trace_power_end_rcuidle(smp_processor_id());
trace_cpu_idle_rcuidle(PWR_EVENT_EXIT, smp_processor_id());
} else
local_irq_enable();
}
......@@ -474,13 +474,13 @@ static void mwait_idle(void)
*/
static void poll_idle(void)
{
trace_power_start(POWER_CSTATE, 0, smp_processor_id());
trace_cpu_idle(0, smp_processor_id());
trace_power_start_rcuidle(POWER_CSTATE, 0, smp_processor_id());
trace_cpu_idle_rcuidle(0, smp_processor_id());
local_irq_enable();
while (!need_resched())
cpu_relax();
trace_power_end(smp_processor_id());
trace_cpu_idle(PWR_EVENT_EXIT, smp_processor_id());
trace_power_end_rcuidle(smp_processor_id());
trace_cpu_idle_rcuidle(PWR_EVENT_EXIT, smp_processor_id());
}
/*
......
......@@ -94,13 +94,13 @@ int cpuidle_idle_call(void)
target_state = &drv->states[next_state];
trace_power_start(POWER_CSTATE, next_state, dev->cpu);
trace_cpu_idle(next_state, dev->cpu);
trace_power_start_rcuidle(POWER_CSTATE, next_state, dev->cpu);
trace_cpu_idle_rcuidle(next_state, dev->cpu);
entered_state = target_state->enter(dev, drv, next_state);
trace_power_end(dev->cpu);
trace_cpu_idle(PWR_EVENT_EXIT, dev->cpu);
trace_power_end_rcuidle(dev->cpu);
trace_cpu_idle_rcuidle(PWR_EVENT_EXIT, dev->cpu);
if (entered_state >= 0) {
/* Update cpuidle counters */
......
......@@ -178,9 +178,9 @@ struct dyn_ftrace {
};
int ftrace_force_update(void);
void ftrace_set_filter(struct ftrace_ops *ops, unsigned char *buf,
int ftrace_set_filter(struct ftrace_ops *ops, unsigned char *buf,
int len, int reset);
void ftrace_set_notrace(struct ftrace_ops *ops, unsigned char *buf,
int ftrace_set_notrace(struct ftrace_ops *ops, unsigned char *buf,
int len, int reset);
void ftrace_set_global_filter(unsigned char *buf, int len, int reset);
void ftrace_set_global_notrace(unsigned char *buf, int len, int reset);
......
......@@ -20,7 +20,6 @@
#include <linux/atomic.h>
#include <asm/ptrace.h>
#include <asm/system.h>
#include <trace/events/irq.h>
/*
* These correspond to the IORESOURCE_IRQ_* defines in
......@@ -456,11 +455,7 @@ asmlinkage void do_softirq(void);
asmlinkage void __do_softirq(void);
extern void open_softirq(int nr, void (*action)(struct softirq_action *));
extern void softirq_init(void);
static inline void __raise_softirq_irqoff(unsigned int nr)
{
trace_softirq_raise(nr);
or_softirq_pending(1UL << nr);
}
extern void __raise_softirq_irqoff(unsigned int nr);
extern void raise_softirq_irqoff(unsigned int nr);
extern void raise_softirq(unsigned int nr);
......
......@@ -114,7 +114,7 @@ static inline void tracepoint_synchronize_unregister(void)
* as "(void *, void)". The DECLARE_TRACE_NOARGS() will pass in just
* "void *data", where as the DECLARE_TRACE() will pass in "void *data, proto".
*/
#define __DO_TRACE(tp, proto, args, cond) \
#define __DO_TRACE(tp, proto, args, cond, prercu, postrcu) \
do { \
struct tracepoint_func *it_func_ptr; \
void *it_func; \
......@@ -122,6 +122,7 @@ static inline void tracepoint_synchronize_unregister(void)
\
if (!(cond)) \
return; \
prercu; \
rcu_read_lock_sched_notrace(); \
it_func_ptr = rcu_dereference_sched((tp)->funcs); \
if (it_func_ptr) { \
......@@ -132,6 +133,7 @@ static inline void tracepoint_synchronize_unregister(void)
} while ((++it_func_ptr)->func); \
} \
rcu_read_unlock_sched_notrace(); \
postrcu; \
} while (0)
/*
......@@ -139,7 +141,7 @@ static inline void tracepoint_synchronize_unregister(void)
* not add unwanted padding between the beginning of the section and the
* structure. Force alignment to the same alignment as the section start.
*/
#define __DECLARE_TRACE(name, proto, args, cond, data_proto, data_args) \
#define __DECLARE_TRACE(name, proto, args, cond, data_proto, data_args) \
extern struct tracepoint __tracepoint_##name; \
static inline void trace_##name(proto) \
{ \
......@@ -147,7 +149,17 @@ static inline void tracepoint_synchronize_unregister(void)
__DO_TRACE(&__tracepoint_##name, \
TP_PROTO(data_proto), \
TP_ARGS(data_args), \
TP_CONDITION(cond)); \
TP_CONDITION(cond),,); \
} \
static inline void trace_##name##_rcuidle(proto) \
{ \
if (static_branch(&__tracepoint_##name.key)) \
__DO_TRACE(&__tracepoint_##name, \
TP_PROTO(data_proto), \
TP_ARGS(data_args), \
TP_CONDITION(cond), \
rcu_idle_exit(), \
rcu_idle_enter()); \
} \
static inline int \
register_trace_##name(void (*probe)(data_proto), void *data) \
......@@ -190,9 +202,11 @@ static inline void tracepoint_synchronize_unregister(void)
EXPORT_SYMBOL(__tracepoint_##name)
#else /* !CONFIG_TRACEPOINTS */
#define __DECLARE_TRACE(name, proto, args, cond, data_proto, data_args) \
#define __DECLARE_TRACE(name, proto, args, cond, data_proto, data_args) \
static inline void trace_##name(proto) \
{ } \
static inline void trace_##name##_rcuidle(proto) \
{ } \
static inline int \
register_trace_##name(void (*probe)(data_proto), \
void *data) \
......
......@@ -151,6 +151,8 @@ enum {
events get removed */
static inline void trace_power_start(u64 type, u64 state, u64 cpuid) {};
static inline void trace_power_end(u64 cpuid) {};
static inline void trace_power_start_rcuidle(u64 type, u64 state, u64 cpuid) {};
static inline void trace_power_end_rcuidle(u64 cpuid) {};
static inline void trace_power_frequency(u64 type, u64 state, u64 cpuid) {};
#endif /* _PWR_EVENT_AVOID_DOUBLE_DEFINING_DEPRECATED */
......
#undef TRACE_SYSTEM
#define TRACE_SYSTEM printk
#if !defined(_TRACE_PRINTK_H) || defined(TRACE_HEADER_MULTI_READ)
#define _TRACE_PRINTK_H
#include <linux/tracepoint.h>
TRACE_EVENT_CONDITION(console,
TP_PROTO(const char *log_buf, unsigned start, unsigned end,
unsigned log_buf_len),
TP_ARGS(log_buf, start, end, log_buf_len),
TP_CONDITION(start != end),
TP_STRUCT__entry(
__dynamic_array(char, msg, end - start + 1)
),
TP_fast_assign(
if ((start & (log_buf_len - 1)) > (end & (log_buf_len - 1))) {
memcpy(__get_dynamic_array(msg),
log_buf + (start & (log_buf_len - 1)),
log_buf_len - (start & (log_buf_len - 1)));
memcpy((char *)__get_dynamic_array(msg) +
log_buf_len - (start & (log_buf_len - 1)),
log_buf, end & (log_buf_len - 1));
} else
memcpy(__get_dynamic_array(msg),
log_buf + (start & (log_buf_len - 1)),
end - start);
((char *)__get_dynamic_array(msg))[end - start] = 0;
),
TP_printk("%s", __get_str(msg))
);
#endif /* _TRACE_PRINTK_H */
/* This part must be outside protection */
#include <trace/define_trace.h>
......@@ -16,6 +16,8 @@
#include <linux/interrupt.h>
#include <linux/kernel_stat.h>
#include <trace/events/irq.h>
#include "internals.h"
/**
......
......@@ -44,6 +44,9 @@
#include <asm/uaccess.h>
#define CREATE_TRACE_POINTS
#include <trace/events/printk.h>
/*
* Architectures can override it:
*/
......@@ -542,6 +545,8 @@ MODULE_PARM_DESC(ignore_loglevel, "ignore loglevel setting, to"
static void _call_console_drivers(unsigned start,
unsigned end, int msg_log_level)
{
trace_console(&LOG_BUF(0), start, end, log_buf_len);
if ((msg_log_level < console_loglevel || ignore_loglevel) &&
console_drivers && start != end) {
if ((start & LOG_BUF_MASK) > (end & LOG_BUF_MASK)) {
......
......@@ -385,6 +385,12 @@ void raise_softirq(unsigned int nr)
local_irq_restore(flags);
}
void __raise_softirq_irqoff(unsigned int nr)
{
trace_softirq_raise(nr);
or_softirq_pending(1UL << nr);
}
void open_softirq(int nr, void (*action)(struct softirq_action *))
{
softirq_vec[nr].action = action;
......
......@@ -1129,7 +1129,7 @@ static struct ftrace_hash *alloc_ftrace_hash(int size_bits)
return NULL;
size = 1 << size_bits;
hash->buckets = kzalloc(sizeof(*hash->buckets) * size, GFP_KERNEL);
hash->buckets = kcalloc(size, sizeof(*hash->buckets), GFP_KERNEL);
if (!hash->buckets) {
kfree(hash);
......@@ -3146,8 +3146,10 @@ ftrace_set_regex(struct ftrace_ops *ops, unsigned char *buf, int len,
mutex_lock(&ftrace_regex_lock);
if (reset)
ftrace_filter_reset(hash);
if (buf)
ftrace_match_records(hash, buf, len);
if (buf && !ftrace_match_records(hash, buf, len)) {
ret = -EINVAL;
goto out_regex_unlock;
}
mutex_lock(&ftrace_lock);
ret = ftrace_hash_move(ops, enable, orig_hash, hash);
......@@ -3157,6 +3159,7 @@ ftrace_set_regex(struct ftrace_ops *ops, unsigned char *buf, int len,
mutex_unlock(&ftrace_lock);
out_regex_unlock:
mutex_unlock(&ftrace_regex_lock);
free_ftrace_hash(hash);
......@@ -3173,10 +3176,10 @@ ftrace_set_regex(struct ftrace_ops *ops, unsigned char *buf, int len,
* Filters denote which functions should be enabled when tracing is enabled.
* If @buf is NULL and reset is set, all functions will be enabled for tracing.
*/
void ftrace_set_filter(struct ftrace_ops *ops, unsigned char *buf,
int ftrace_set_filter(struct ftrace_ops *ops, unsigned char *buf,
int len, int reset)
{
ftrace_set_regex(ops, buf, len, reset, 1);
return ftrace_set_regex(ops, buf, len, reset, 1);
}
EXPORT_SYMBOL_GPL(ftrace_set_filter);
......@@ -3191,10 +3194,10 @@ EXPORT_SYMBOL_GPL(ftrace_set_filter);
* is enabled. If @buf is NULL and reset is set, all functions will be enabled
* for tracing.
*/
void ftrace_set_notrace(struct ftrace_ops *ops, unsigned char *buf,
int ftrace_set_notrace(struct ftrace_ops *ops, unsigned char *buf,
int len, int reset)
{
ftrace_set_regex(ops, buf, len, reset, 0);
return ftrace_set_regex(ops, buf, len, reset, 0);
}
EXPORT_SYMBOL_GPL(ftrace_set_notrace);
/**
......
......@@ -2764,12 +2764,12 @@ static const char readme_msg[] =
"tracing mini-HOWTO:\n\n"
"# mount -t debugfs nodev /sys/kernel/debug\n\n"
"# cat /sys/kernel/debug/tracing/available_tracers\n"
"wakeup preemptirqsoff preemptoff irqsoff function sched_switch nop\n\n"
"wakeup wakeup_rt preemptirqsoff preemptoff irqsoff function nop\n\n"
"# cat /sys/kernel/debug/tracing/current_tracer\n"
"nop\n"
"# echo sched_switch > /sys/kernel/debug/tracing/current_tracer\n"
"# echo wakeup > /sys/kernel/debug/tracing/current_tracer\n"
"# cat /sys/kernel/debug/tracing/current_tracer\n"
"sched_switch\n"
"wakeup\n"
"# cat /sys/kernel/debug/tracing/trace_options\n"
"noprint-parent nosym-offset nosym-addr noverbose\n"
"# echo print-parent > /sys/kernel/debug/tracing/trace_options\n"
......
......@@ -685,7 +685,7 @@ find_event_field(struct ftrace_event_call *call, char *name)
static int __alloc_pred_stack(struct pred_stack *stack, int n_preds)
{
stack->preds = kzalloc(sizeof(*stack->preds)*(n_preds + 1), GFP_KERNEL);
stack->preds = kcalloc(n_preds + 1, sizeof(*stack->preds), GFP_KERNEL);
if (!stack->preds)
return -ENOMEM;
stack->index = n_preds;
......@@ -826,8 +826,7 @@ static int __alloc_preds(struct event_filter *filter, int n_preds)
if (filter->preds)
__free_preds(filter);
filter->preds =
kzalloc(sizeof(*filter->preds) * n_preds, GFP_KERNEL);
filter->preds = kcalloc(n_preds, sizeof(*filter->preds), GFP_KERNEL);
if (!filter->preds)
return -ENOMEM;
......@@ -1486,7 +1485,7 @@ static int fold_pred(struct filter_pred *preds, struct filter_pred *root)
children = count_leafs(preds, &preds[root->left]);
children += count_leafs(preds, &preds[root->right]);
root->ops = kzalloc(sizeof(*root->ops) * children, GFP_KERNEL);
root->ops = kcalloc(children, sizeof(*root->ops), GFP_KERNEL);
if (!root->ops)
return -ENOMEM;
......
......@@ -468,8 +468,8 @@ int __init init_ftrace_syscalls(void)
unsigned long addr;
int i;
syscalls_metadata = kzalloc(sizeof(*syscalls_metadata) *
NR_syscalls, GFP_KERNEL);
syscalls_metadata = kcalloc(NR_syscalls, sizeof(*syscalls_metadata),
GFP_KERNEL);
if (!syscalls_metadata) {
WARN_ON(1);
return -ENOMEM;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment