Commit 6605f9ac authored by Ingo Molnar's avatar Ingo Molnar

Merge branch 'tip/perf/core' of...

Merge branch 'tip/perf/core' of git://git.kernel.org/pub/scm/linux/kernel/git/rostedt/linux-trace into perf/urgent
parents ad2a8e60 93d68e52
...@@ -144,12 +144,14 @@ struct event_filter; ...@@ -144,12 +144,14 @@ struct event_filter;
enum trace_reg { enum trace_reg {
TRACE_REG_REGISTER, TRACE_REG_REGISTER,
TRACE_REG_UNREGISTER, TRACE_REG_UNREGISTER,
#ifdef CONFIG_PERF_EVENTS
TRACE_REG_PERF_REGISTER, TRACE_REG_PERF_REGISTER,
TRACE_REG_PERF_UNREGISTER, TRACE_REG_PERF_UNREGISTER,
TRACE_REG_PERF_OPEN, TRACE_REG_PERF_OPEN,
TRACE_REG_PERF_CLOSE, TRACE_REG_PERF_CLOSE,
TRACE_REG_PERF_ADD, TRACE_REG_PERF_ADD,
TRACE_REG_PERF_DEL, TRACE_REG_PERF_DEL,
#endif
}; };
struct ftrace_event_call; struct ftrace_event_call;
......
...@@ -427,16 +427,10 @@ extern int __must_check hex2bin(u8 *dst, const char *src, size_t count); ...@@ -427,16 +427,10 @@ extern int __must_check hex2bin(u8 *dst, const char *src, size_t count);
* Most likely, you want to use tracing_on/tracing_off. * Most likely, you want to use tracing_on/tracing_off.
*/ */
#ifdef CONFIG_RING_BUFFER #ifdef CONFIG_RING_BUFFER
void tracing_on(void);
void tracing_off(void);
/* trace_off_permanent stops recording with no way to bring it back */ /* trace_off_permanent stops recording with no way to bring it back */
void tracing_off_permanent(void); void tracing_off_permanent(void);
int tracing_is_on(void);
#else #else
static inline void tracing_on(void) { }
static inline void tracing_off(void) { }
static inline void tracing_off_permanent(void) { } static inline void tracing_off_permanent(void) { }
static inline int tracing_is_on(void) { return 0; }
#endif #endif
enum ftrace_dump_mode { enum ftrace_dump_mode {
...@@ -446,6 +440,10 @@ enum ftrace_dump_mode { ...@@ -446,6 +440,10 @@ enum ftrace_dump_mode {
}; };
#ifdef CONFIG_TRACING #ifdef CONFIG_TRACING
void tracing_on(void);
void tracing_off(void);
int tracing_is_on(void);
extern void tracing_start(void); extern void tracing_start(void);
extern void tracing_stop(void); extern void tracing_stop(void);
extern void ftrace_off_permanent(void); extern void ftrace_off_permanent(void);
...@@ -530,6 +528,11 @@ static inline void tracing_start(void) { } ...@@ -530,6 +528,11 @@ static inline void tracing_start(void) { }
static inline void tracing_stop(void) { } static inline void tracing_stop(void) { }
static inline void ftrace_off_permanent(void) { } static inline void ftrace_off_permanent(void) { }
static inline void trace_dump_stack(void) { } static inline void trace_dump_stack(void) { }
static inline void tracing_on(void) { }
static inline void tracing_off(void) { }
static inline int tracing_is_on(void) { return 0; }
static inline int static inline int
trace_printk(const char *fmt, ...) trace_printk(const char *fmt, ...)
{ {
......
...@@ -151,6 +151,9 @@ int ring_buffer_empty_cpu(struct ring_buffer *buffer, int cpu); ...@@ -151,6 +151,9 @@ int ring_buffer_empty_cpu(struct ring_buffer *buffer, int cpu);
void ring_buffer_record_disable(struct ring_buffer *buffer); void ring_buffer_record_disable(struct ring_buffer *buffer);
void ring_buffer_record_enable(struct ring_buffer *buffer); void ring_buffer_record_enable(struct ring_buffer *buffer);
void ring_buffer_record_off(struct ring_buffer *buffer);
void ring_buffer_record_on(struct ring_buffer *buffer);
int ring_buffer_record_is_on(struct ring_buffer *buffer);
void ring_buffer_record_disable_cpu(struct ring_buffer *buffer, int cpu); void ring_buffer_record_disable_cpu(struct ring_buffer *buffer, int cpu);
void ring_buffer_record_enable_cpu(struct ring_buffer *buffer, int cpu); void ring_buffer_record_enable_cpu(struct ring_buffer *buffer, int cpu);
......
...@@ -141,7 +141,7 @@ if FTRACE ...@@ -141,7 +141,7 @@ if FTRACE
config FUNCTION_TRACER config FUNCTION_TRACER
bool "Kernel Function Tracer" bool "Kernel Function Tracer"
depends on HAVE_FUNCTION_TRACER depends on HAVE_FUNCTION_TRACER
select FRAME_POINTER if !ARM_UNWIND && !S390 && !MICROBLAZE select FRAME_POINTER if !ARM_UNWIND && !PPC && !S390 && !MICROBLAZE
select KALLSYMS select KALLSYMS
select GENERIC_TRACER select GENERIC_TRACER
select CONTEXT_SWITCH_TRACER select CONTEXT_SWITCH_TRACER
......
...@@ -249,7 +249,8 @@ static void update_ftrace_function(void) ...@@ -249,7 +249,8 @@ static void update_ftrace_function(void)
#else #else
__ftrace_trace_function = func; __ftrace_trace_function = func;
#endif #endif
ftrace_trace_function = ftrace_test_stop_func; ftrace_trace_function =
(func == ftrace_stub) ? func : ftrace_test_stop_func;
#endif #endif
} }
......
...@@ -154,33 +154,10 @@ enum { ...@@ -154,33 +154,10 @@ enum {
static unsigned long ring_buffer_flags __read_mostly = RB_BUFFERS_ON; static unsigned long ring_buffer_flags __read_mostly = RB_BUFFERS_ON;
#define BUF_PAGE_HDR_SIZE offsetof(struct buffer_data_page, data) /* Used for individual buffers (after the counter) */
#define RB_BUFFER_OFF (1 << 20)
/**
* tracing_on - enable all tracing buffers
*
* This function enables all tracing buffers that may have been
* disabled with tracing_off.
*/
void tracing_on(void)
{
set_bit(RB_BUFFERS_ON_BIT, &ring_buffer_flags);
}
EXPORT_SYMBOL_GPL(tracing_on);
/** #define BUF_PAGE_HDR_SIZE offsetof(struct buffer_data_page, data)
* tracing_off - turn off all tracing buffers
*
* This function stops all tracing buffers from recording data.
* It does not disable any overhead the tracers themselves may
* be causing. This function simply causes all recording to
* the ring buffers to fail.
*/
void tracing_off(void)
{
clear_bit(RB_BUFFERS_ON_BIT, &ring_buffer_flags);
}
EXPORT_SYMBOL_GPL(tracing_off);
/** /**
* tracing_off_permanent - permanently disable ring buffers * tracing_off_permanent - permanently disable ring buffers
...@@ -193,15 +170,6 @@ void tracing_off_permanent(void) ...@@ -193,15 +170,6 @@ void tracing_off_permanent(void)
set_bit(RB_BUFFERS_DISABLED_BIT, &ring_buffer_flags); set_bit(RB_BUFFERS_DISABLED_BIT, &ring_buffer_flags);
} }
/**
* tracing_is_on - show state of ring buffers enabled
*/
int tracing_is_on(void)
{
return ring_buffer_flags == RB_BUFFERS_ON;
}
EXPORT_SYMBOL_GPL(tracing_is_on);
#define RB_EVNT_HDR_SIZE (offsetof(struct ring_buffer_event, array)) #define RB_EVNT_HDR_SIZE (offsetof(struct ring_buffer_event, array))
#define RB_ALIGNMENT 4U #define RB_ALIGNMENT 4U
#define RB_MAX_SMALL_DATA (RB_ALIGNMENT * RINGBUF_TYPE_DATA_TYPE_LEN_MAX) #define RB_MAX_SMALL_DATA (RB_ALIGNMENT * RINGBUF_TYPE_DATA_TYPE_LEN_MAX)
...@@ -2618,6 +2586,63 @@ void ring_buffer_record_enable(struct ring_buffer *buffer) ...@@ -2618,6 +2586,63 @@ void ring_buffer_record_enable(struct ring_buffer *buffer)
} }
EXPORT_SYMBOL_GPL(ring_buffer_record_enable); EXPORT_SYMBOL_GPL(ring_buffer_record_enable);
/**
* ring_buffer_record_off - stop all writes into the buffer
* @buffer: The ring buffer to stop writes to.
*
* This prevents all writes to the buffer. Any attempt to write
* to the buffer after this will fail and return NULL.
*
* This is different than ring_buffer_record_disable() as
* it works like an on/off switch, where as the disable() verison
* must be paired with a enable().
*/
void ring_buffer_record_off(struct ring_buffer *buffer)
{
unsigned int rd;
unsigned int new_rd;
do {
rd = atomic_read(&buffer->record_disabled);
new_rd = rd | RB_BUFFER_OFF;
} while (atomic_cmpxchg(&buffer->record_disabled, rd, new_rd) != rd);
}
EXPORT_SYMBOL_GPL(ring_buffer_record_off);
/**
* ring_buffer_record_on - restart writes into the buffer
* @buffer: The ring buffer to start writes to.
*
* This enables all writes to the buffer that was disabled by
* ring_buffer_record_off().
*
* This is different than ring_buffer_record_enable() as
* it works like an on/off switch, where as the enable() verison
* must be paired with a disable().
*/
void ring_buffer_record_on(struct ring_buffer *buffer)
{
unsigned int rd;
unsigned int new_rd;
do {
rd = atomic_read(&buffer->record_disabled);
new_rd = rd & ~RB_BUFFER_OFF;
} while (atomic_cmpxchg(&buffer->record_disabled, rd, new_rd) != rd);
}
EXPORT_SYMBOL_GPL(ring_buffer_record_on);
/**
* ring_buffer_record_is_on - return true if the ring buffer can write
* @buffer: The ring buffer to see if write is enabled
*
* Returns true if the ring buffer is in a state that it accepts writes.
*/
int ring_buffer_record_is_on(struct ring_buffer *buffer)
{
return !atomic_read(&buffer->record_disabled);
}
/** /**
* ring_buffer_record_disable_cpu - stop all writes into the cpu_buffer * ring_buffer_record_disable_cpu - stop all writes into the cpu_buffer
* @buffer: The ring buffer to stop writes to. * @buffer: The ring buffer to stop writes to.
...@@ -4039,68 +4064,6 @@ int ring_buffer_read_page(struct ring_buffer *buffer, ...@@ -4039,68 +4064,6 @@ int ring_buffer_read_page(struct ring_buffer *buffer,
} }
EXPORT_SYMBOL_GPL(ring_buffer_read_page); EXPORT_SYMBOL_GPL(ring_buffer_read_page);
#ifdef CONFIG_TRACING
static ssize_t
rb_simple_read(struct file *filp, char __user *ubuf,
size_t cnt, loff_t *ppos)
{
unsigned long *p = filp->private_data;
char buf[64];
int r;
if (test_bit(RB_BUFFERS_DISABLED_BIT, p))
r = sprintf(buf, "permanently disabled\n");
else
r = sprintf(buf, "%d\n", test_bit(RB_BUFFERS_ON_BIT, p));
return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
}
static ssize_t
rb_simple_write(struct file *filp, const char __user *ubuf,
size_t cnt, loff_t *ppos)
{
unsigned long *p = filp->private_data;
unsigned long val;
int ret;
ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
if (ret)
return ret;
if (val)
set_bit(RB_BUFFERS_ON_BIT, p);
else
clear_bit(RB_BUFFERS_ON_BIT, p);
(*ppos)++;
return cnt;
}
static const struct file_operations rb_simple_fops = {
.open = tracing_open_generic,
.read = rb_simple_read,
.write = rb_simple_write,
.llseek = default_llseek,
};
static __init int rb_init_debugfs(void)
{
struct dentry *d_tracer;
d_tracer = tracing_init_dentry();
trace_create_file("tracing_on", 0644, d_tracer,
&ring_buffer_flags, &rb_simple_fops);
return 0;
}
fs_initcall(rb_init_debugfs);
#endif
#ifdef CONFIG_HOTPLUG_CPU #ifdef CONFIG_HOTPLUG_CPU
static int rb_cpu_notify(struct notifier_block *self, static int rb_cpu_notify(struct notifier_block *self,
unsigned long action, void *hcpu) unsigned long action, void *hcpu)
......
...@@ -36,6 +36,7 @@ ...@@ -36,6 +36,7 @@
#include <linux/ctype.h> #include <linux/ctype.h>
#include <linux/init.h> #include <linux/init.h>
#include <linux/poll.h> #include <linux/poll.h>
#include <linux/nmi.h>
#include <linux/fs.h> #include <linux/fs.h>
#include "trace.h" #include "trace.h"
...@@ -351,6 +352,59 @@ static void wakeup_work_handler(struct work_struct *work) ...@@ -351,6 +352,59 @@ static void wakeup_work_handler(struct work_struct *work)
static DECLARE_DELAYED_WORK(wakeup_work, wakeup_work_handler); static DECLARE_DELAYED_WORK(wakeup_work, wakeup_work_handler);
/**
* tracing_on - enable tracing buffers
*
* This function enables tracing buffers that may have been
* disabled with tracing_off.
*/
void tracing_on(void)
{
if (global_trace.buffer)
ring_buffer_record_on(global_trace.buffer);
/*
* This flag is only looked at when buffers haven't been
* allocated yet. We don't really care about the race
* between setting this flag and actually turning
* on the buffer.
*/
global_trace.buffer_disabled = 0;
}
EXPORT_SYMBOL_GPL(tracing_on);
/**
* tracing_off - turn off tracing buffers
*
* This function stops the tracing buffers from recording data.
* It does not disable any overhead the tracers themselves may
* be causing. This function simply causes all recording to
* the ring buffers to fail.
*/
void tracing_off(void)
{
if (global_trace.buffer)
ring_buffer_record_on(global_trace.buffer);
/*
* This flag is only looked at when buffers haven't been
* allocated yet. We don't really care about the race
* between setting this flag and actually turning
* on the buffer.
*/
global_trace.buffer_disabled = 1;
}
EXPORT_SYMBOL_GPL(tracing_off);
/**
* tracing_is_on - show state of ring buffers enabled
*/
int tracing_is_on(void)
{
if (global_trace.buffer)
return ring_buffer_record_is_on(global_trace.buffer);
return !global_trace.buffer_disabled;
}
EXPORT_SYMBOL_GPL(tracing_is_on);
/** /**
* trace_wake_up - wake up tasks waiting for trace input * trace_wake_up - wake up tasks waiting for trace input
* *
...@@ -4567,6 +4621,55 @@ static __init void create_trace_options_dir(void) ...@@ -4567,6 +4621,55 @@ static __init void create_trace_options_dir(void)
create_trace_option_core_file(trace_options[i], i); create_trace_option_core_file(trace_options[i], i);
} }
static ssize_t
rb_simple_read(struct file *filp, char __user *ubuf,
size_t cnt, loff_t *ppos)
{
struct ring_buffer *buffer = filp->private_data;
char buf[64];
int r;
if (buffer)
r = ring_buffer_record_is_on(buffer);
else
r = 0;
r = sprintf(buf, "%d\n", r);
return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
}
static ssize_t
rb_simple_write(struct file *filp, const char __user *ubuf,
size_t cnt, loff_t *ppos)
{
struct ring_buffer *buffer = filp->private_data;
unsigned long val;
int ret;
ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
if (ret)
return ret;
if (buffer) {
if (val)
ring_buffer_record_on(buffer);
else
ring_buffer_record_off(buffer);
}
(*ppos)++;
return cnt;
}
static const struct file_operations rb_simple_fops = {
.open = tracing_open_generic,
.read = rb_simple_read,
.write = rb_simple_write,
.llseek = default_llseek,
};
static __init int tracer_init_debugfs(void) static __init int tracer_init_debugfs(void)
{ {
struct dentry *d_tracer; struct dentry *d_tracer;
...@@ -4626,6 +4729,9 @@ static __init int tracer_init_debugfs(void) ...@@ -4626,6 +4729,9 @@ static __init int tracer_init_debugfs(void)
trace_create_file("trace_clock", 0644, d_tracer, NULL, trace_create_file("trace_clock", 0644, d_tracer, NULL,
&trace_clock_fops); &trace_clock_fops);
trace_create_file("tracing_on", 0644, d_tracer,
global_trace.buffer, &rb_simple_fops);
#ifdef CONFIG_DYNAMIC_FTRACE #ifdef CONFIG_DYNAMIC_FTRACE
trace_create_file("dyn_ftrace_total_info", 0444, d_tracer, trace_create_file("dyn_ftrace_total_info", 0444, d_tracer,
&ftrace_update_tot_cnt, &tracing_dyn_info_fops); &ftrace_update_tot_cnt, &tracing_dyn_info_fops);
...@@ -4798,6 +4904,7 @@ __ftrace_dump(bool disable_tracing, enum ftrace_dump_mode oops_dump_mode) ...@@ -4798,6 +4904,7 @@ __ftrace_dump(bool disable_tracing, enum ftrace_dump_mode oops_dump_mode)
if (ret != TRACE_TYPE_NO_CONSUME) if (ret != TRACE_TYPE_NO_CONSUME)
trace_consume(&iter); trace_consume(&iter);
} }
touch_nmi_watchdog();
trace_printk_seq(&iter.seq); trace_printk_seq(&iter.seq);
} }
...@@ -4863,6 +4970,8 @@ __init static int tracer_alloc_buffers(void) ...@@ -4863,6 +4970,8 @@ __init static int tracer_alloc_buffers(void)
goto out_free_cpumask; goto out_free_cpumask;
} }
global_trace.entries = ring_buffer_size(global_trace.buffer); global_trace.entries = ring_buffer_size(global_trace.buffer);
if (global_trace.buffer_disabled)
tracing_off();
#ifdef CONFIG_TRACER_MAX_TRACE #ifdef CONFIG_TRACER_MAX_TRACE
......
...@@ -154,6 +154,7 @@ struct trace_array { ...@@ -154,6 +154,7 @@ struct trace_array {
struct ring_buffer *buffer; struct ring_buffer *buffer;
unsigned long entries; unsigned long entries;
int cpu; int cpu;
int buffer_disabled;
cycle_t time_start; cycle_t time_start;
struct task_struct *waiter; struct task_struct *waiter;
struct trace_array_cpu *data[NR_CPUS]; struct trace_array_cpu *data[NR_CPUS];
...@@ -835,13 +836,11 @@ extern const char *__stop___trace_bprintk_fmt[]; ...@@ -835,13 +836,11 @@ extern const char *__stop___trace_bprintk_fmt[];
filter) filter)
#include "trace_entries.h" #include "trace_entries.h"
#ifdef CONFIG_PERF_EVENTS
#ifdef CONFIG_FUNCTION_TRACER #ifdef CONFIG_FUNCTION_TRACER
int perf_ftrace_event_register(struct ftrace_event_call *call, int perf_ftrace_event_register(struct ftrace_event_call *call,
enum trace_reg type, void *data); enum trace_reg type, void *data);
#else #else
#define perf_ftrace_event_register NULL #define perf_ftrace_event_register NULL
#endif /* CONFIG_FUNCTION_TRACER */ #endif /* CONFIG_FUNCTION_TRACER */
#endif /* CONFIG_PERF_EVENTS */
#endif /* _LINUX_KERNEL_TRACE_H */ #endif /* _LINUX_KERNEL_TRACE_H */
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment