Commit bc289ae9 authored by Lai Jiangshan's avatar Lai Jiangshan Committed by Steven Rostedt

tracing: Reduce latency and remove percpu trace_seq

__print_flags() and __print_symbolic() use percpu trace_seq:

1) Its memory is allocated at compile time, it wastes memory if we don't use tracing.
2) It is percpu data and it wastes more memory for multi-cpus system.
3) It disables preemption when it executes its core routine
   "trace_seq_printf(s, "%s: ", #call);" and introduces latency.

So we move this trace_seq to struct trace_iterator.
Signed-off-by: default avatarLai Jiangshan <laijs@cn.fujitsu.com>
LKML-Reference: <4C078350.7090106@cn.fujitsu.com>
Signed-off-by: default avatarSteven Rostedt <rostedt@goodmis.org>
parent 985023de
...@@ -11,8 +11,6 @@ struct trace_array; ...@@ -11,8 +11,6 @@ struct trace_array;
struct tracer; struct tracer;
struct dentry; struct dentry;
DECLARE_PER_CPU(struct trace_seq, ftrace_event_seq);
struct trace_print_flags { struct trace_print_flags {
unsigned long mask; unsigned long mask;
const char *name; const char *name;
...@@ -58,6 +56,9 @@ struct trace_iterator { ...@@ -58,6 +56,9 @@ struct trace_iterator {
struct ring_buffer_iter *buffer_iter[NR_CPUS]; struct ring_buffer_iter *buffer_iter[NR_CPUS];
unsigned long iter_flags; unsigned long iter_flags;
/* trace_seq for __print_flags() and __print_symbolic() etc. */
struct trace_seq tmp_seq;
/* The below is zeroed out in pipe_read */ /* The below is zeroed out in pipe_read */
struct trace_seq seq; struct trace_seq seq;
struct trace_entry *ent; struct trace_entry *ent;
......
...@@ -145,7 +145,7 @@ ...@@ -145,7 +145,7 @@
* struct trace_seq *s = &iter->seq; * struct trace_seq *s = &iter->seq;
* struct ftrace_raw_<call> *field; <-- defined in stage 1 * struct ftrace_raw_<call> *field; <-- defined in stage 1
* struct trace_entry *entry; * struct trace_entry *entry;
* struct trace_seq *p; * struct trace_seq *p = &iter->tmp_seq;
* int ret; * int ret;
* *
* entry = iter->ent; * entry = iter->ent;
...@@ -157,12 +157,10 @@ ...@@ -157,12 +157,10 @@
* *
* field = (typeof(field))entry; * field = (typeof(field))entry;
* *
* p = &get_cpu_var(ftrace_event_seq);
* trace_seq_init(p); * trace_seq_init(p);
* ret = trace_seq_printf(s, "%s: ", <call>); * ret = trace_seq_printf(s, "%s: ", <call>);
* if (ret) * if (ret)
* ret = trace_seq_printf(s, <TP_printk> "\n"); * ret = trace_seq_printf(s, <TP_printk> "\n");
* put_cpu();
* if (!ret) * if (!ret)
* return TRACE_TYPE_PARTIAL_LINE; * return TRACE_TYPE_PARTIAL_LINE;
* *
...@@ -216,7 +214,7 @@ ftrace_raw_output_##call(struct trace_iterator *iter, int flags, \ ...@@ -216,7 +214,7 @@ ftrace_raw_output_##call(struct trace_iterator *iter, int flags, \
struct trace_seq *s = &iter->seq; \ struct trace_seq *s = &iter->seq; \
struct ftrace_raw_##call *field; \ struct ftrace_raw_##call *field; \
struct trace_entry *entry; \ struct trace_entry *entry; \
struct trace_seq *p; \ struct trace_seq *p = &iter->tmp_seq; \
int ret; \ int ret; \
\ \
event = container_of(trace_event, struct ftrace_event_call, \ event = container_of(trace_event, struct ftrace_event_call, \
...@@ -231,12 +229,10 @@ ftrace_raw_output_##call(struct trace_iterator *iter, int flags, \ ...@@ -231,12 +229,10 @@ ftrace_raw_output_##call(struct trace_iterator *iter, int flags, \
\ \
field = (typeof(field))entry; \ field = (typeof(field))entry; \
\ \
p = &get_cpu_var(ftrace_event_seq); \
trace_seq_init(p); \ trace_seq_init(p); \
ret = trace_seq_printf(s, "%s: ", event->name); \ ret = trace_seq_printf(s, "%s: ", event->name); \
if (ret) \ if (ret) \
ret = trace_seq_printf(s, print); \ ret = trace_seq_printf(s, print); \
put_cpu(); \
if (!ret) \ if (!ret) \
return TRACE_TYPE_PARTIAL_LINE; \ return TRACE_TYPE_PARTIAL_LINE; \
\ \
...@@ -255,7 +251,7 @@ ftrace_raw_output_##call(struct trace_iterator *iter, int flags, \ ...@@ -255,7 +251,7 @@ ftrace_raw_output_##call(struct trace_iterator *iter, int flags, \
struct trace_seq *s = &iter->seq; \ struct trace_seq *s = &iter->seq; \
struct ftrace_raw_##template *field; \ struct ftrace_raw_##template *field; \
struct trace_entry *entry; \ struct trace_entry *entry; \
struct trace_seq *p; \ struct trace_seq *p = &iter->tmp_seq; \
int ret; \ int ret; \
\ \
entry = iter->ent; \ entry = iter->ent; \
...@@ -267,12 +263,10 @@ ftrace_raw_output_##call(struct trace_iterator *iter, int flags, \ ...@@ -267,12 +263,10 @@ ftrace_raw_output_##call(struct trace_iterator *iter, int flags, \
\ \
field = (typeof(field))entry; \ field = (typeof(field))entry; \
\ \
p = &get_cpu_var(ftrace_event_seq); \
trace_seq_init(p); \ trace_seq_init(p); \
ret = trace_seq_printf(s, "%s: ", #call); \ ret = trace_seq_printf(s, "%s: ", #call); \
if (ret) \ if (ret) \
ret = trace_seq_printf(s, print); \ ret = trace_seq_printf(s, print); \
put_cpu(); \
if (!ret) \ if (!ret) \
return TRACE_TYPE_PARTIAL_LINE; \ return TRACE_TYPE_PARTIAL_LINE; \
\ \
......
...@@ -16,9 +16,6 @@ ...@@ -16,9 +16,6 @@
DECLARE_RWSEM(trace_event_mutex); DECLARE_RWSEM(trace_event_mutex);
DEFINE_PER_CPU(struct trace_seq, ftrace_event_seq);
EXPORT_PER_CPU_SYMBOL(ftrace_event_seq);
static struct hlist_head event_hash[EVENT_HASHSIZE] __read_mostly; static struct hlist_head event_hash[EVENT_HASHSIZE] __read_mostly;
static int next_event_type = __TRACE_LAST_TYPE + 1; static int next_event_type = __TRACE_LAST_TYPE + 1;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment