Commit 4040068d authored by Ingo Molnar's avatar Ingo Molnar

Merge branch 'tip/tracing/ftrace' of...

Merge branch 'tip/tracing/ftrace' of git://git.kernel.org/pub/scm/linux/kernel/git/rostedt/linux-2.6-trace into tracing/ftrace
parents d524e032 c3706f00
...@@ -468,8 +468,8 @@ void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr) ...@@ -468,8 +468,8 @@ void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr)
* ignore such a protection. * ignore such a protection.
*/ */
asm volatile( asm volatile(
"1: " _ASM_MOV " (%[parent_old]), %[old]\n" "1: " _ASM_MOV " (%[parent]), %[old]\n"
"2: " _ASM_MOV " %[return_hooker], (%[parent_replaced])\n" "2: " _ASM_MOV " %[return_hooker], (%[parent])\n"
" movl $0, %[faulted]\n" " movl $0, %[faulted]\n"
"3:\n" "3:\n"
...@@ -481,9 +481,8 @@ void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr) ...@@ -481,9 +481,8 @@ void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr)
_ASM_EXTABLE(1b, 4b) _ASM_EXTABLE(1b, 4b)
_ASM_EXTABLE(2b, 4b) _ASM_EXTABLE(2b, 4b)
: [parent_replaced] "=r" (parent), [old] "=r" (old), : [old] "=r" (old), [faulted] "=r" (faulted)
[faulted] "=r" (faulted) : [parent] "r" (parent), [return_hooker] "r" (return_hooker)
: [parent_old] "0" (parent), [return_hooker] "r" (return_hooker)
: "memory" : "memory"
); );
......
...@@ -8,7 +8,7 @@ struct ring_buffer; ...@@ -8,7 +8,7 @@ struct ring_buffer;
struct ring_buffer_iter; struct ring_buffer_iter;
/* /*
* Don't reference this struct directly, use functions below. * Don't refer to this struct directly, use functions below.
*/ */
struct ring_buffer_event { struct ring_buffer_event {
u32 type:2, len:3, time_delta:27; u32 type:2, len:3, time_delta:27;
......
...@@ -91,7 +91,7 @@ EXPORT_SYMBOL_GPL(tracing_off); ...@@ -91,7 +91,7 @@ EXPORT_SYMBOL_GPL(tracing_off);
* tracing_off_permanent - permanently disable ring buffers * tracing_off_permanent - permanently disable ring buffers
* *
* This function, once called, will disable all ring buffers * This function, once called, will disable all ring buffers
* permanenty. * permanently.
*/ */
void tracing_off_permanent(void) void tracing_off_permanent(void)
{ {
...@@ -210,7 +210,7 @@ EXPORT_SYMBOL_GPL(ring_buffer_event_data); ...@@ -210,7 +210,7 @@ EXPORT_SYMBOL_GPL(ring_buffer_event_data);
struct buffer_data_page { struct buffer_data_page {
u64 time_stamp; /* page time stamp */ u64 time_stamp; /* page time stamp */
local_t commit; /* write commited index */ local_t commit; /* write committed index */
unsigned char data[]; /* data of buffer page */ unsigned char data[]; /* data of buffer page */
}; };
...@@ -260,7 +260,7 @@ struct ring_buffer_per_cpu { ...@@ -260,7 +260,7 @@ struct ring_buffer_per_cpu {
struct list_head pages; struct list_head pages;
struct buffer_page *head_page; /* read from head */ struct buffer_page *head_page; /* read from head */
struct buffer_page *tail_page; /* write to tail */ struct buffer_page *tail_page; /* write to tail */
struct buffer_page *commit_page; /* commited pages */ struct buffer_page *commit_page; /* committed pages */
struct buffer_page *reader_page; struct buffer_page *reader_page;
unsigned long overrun; unsigned long overrun;
unsigned long entries; unsigned long entries;
...@@ -303,7 +303,7 @@ struct ring_buffer_iter { ...@@ -303,7 +303,7 @@ struct ring_buffer_iter {
* check_pages - integrity check of buffer pages * check_pages - integrity check of buffer pages
* @cpu_buffer: CPU buffer with pages to test * @cpu_buffer: CPU buffer with pages to test
* *
* As a safty measure we check to make sure the data pages have not * As a safety measure we check to make sure the data pages have not
* been corrupted. * been corrupted.
*/ */
static int rb_check_pages(struct ring_buffer_per_cpu *cpu_buffer) static int rb_check_pages(struct ring_buffer_per_cpu *cpu_buffer)
...@@ -2332,13 +2332,14 @@ int ring_buffer_swap_cpu(struct ring_buffer *buffer_a, ...@@ -2332,13 +2332,14 @@ int ring_buffer_swap_cpu(struct ring_buffer *buffer_a,
EXPORT_SYMBOL_GPL(ring_buffer_swap_cpu); EXPORT_SYMBOL_GPL(ring_buffer_swap_cpu);
static void rb_remove_entries(struct ring_buffer_per_cpu *cpu_buffer, static void rb_remove_entries(struct ring_buffer_per_cpu *cpu_buffer,
struct buffer_data_page *bpage) struct buffer_data_page *bpage,
unsigned int offset)
{ {
struct ring_buffer_event *event; struct ring_buffer_event *event;
unsigned long head; unsigned long head;
__raw_spin_lock(&cpu_buffer->lock); __raw_spin_lock(&cpu_buffer->lock);
for (head = 0; head < local_read(&bpage->commit); for (head = offset; head < local_read(&bpage->commit);
head += rb_event_length(event)) { head += rb_event_length(event)) {
event = __rb_data_page_index(bpage, head); event = __rb_data_page_index(bpage, head);
...@@ -2406,12 +2407,12 @@ void ring_buffer_free_read_page(struct ring_buffer *buffer, void *data) ...@@ -2406,12 +2407,12 @@ void ring_buffer_free_read_page(struct ring_buffer *buffer, void *data)
* to swap with a page in the ring buffer. * to swap with a page in the ring buffer.
* *
* for example: * for example:
* rpage = ring_buffer_alloc_page(buffer); * rpage = ring_buffer_alloc_read_page(buffer);
* if (!rpage) * if (!rpage)
* return error; * return error;
* ret = ring_buffer_read_page(buffer, &rpage, cpu, 0); * ret = ring_buffer_read_page(buffer, &rpage, cpu, 0);
* if (ret) * if (ret >= 0)
* process_page(rpage); * process_page(rpage, ret);
* *
* When @full is set, the function will not return true unless * When @full is set, the function will not return true unless
* the writer is off the reader page. * the writer is off the reader page.
...@@ -2422,8 +2423,8 @@ void ring_buffer_free_read_page(struct ring_buffer *buffer, void *data) ...@@ -2422,8 +2423,8 @@ void ring_buffer_free_read_page(struct ring_buffer *buffer, void *data)
* responsible for that. * responsible for that.
* *
* Returns: * Returns:
* 1 if data has been transferred * >=0 if data has been transferred, returns the offset of consumed data.
* 0 if no data has been transferred. * <0 if no data has been transferred.
*/ */
int ring_buffer_read_page(struct ring_buffer *buffer, int ring_buffer_read_page(struct ring_buffer *buffer,
void **data_page, int cpu, int full) void **data_page, int cpu, int full)
...@@ -2432,7 +2433,8 @@ int ring_buffer_read_page(struct ring_buffer *buffer, ...@@ -2432,7 +2433,8 @@ int ring_buffer_read_page(struct ring_buffer *buffer,
struct ring_buffer_event *event; struct ring_buffer_event *event;
struct buffer_data_page *bpage; struct buffer_data_page *bpage;
unsigned long flags; unsigned long flags;
int ret = 0; unsigned int read;
int ret = -1;
if (!data_page) if (!data_page)
return 0; return 0;
...@@ -2454,25 +2456,29 @@ int ring_buffer_read_page(struct ring_buffer *buffer, ...@@ -2454,25 +2456,29 @@ int ring_buffer_read_page(struct ring_buffer *buffer,
/* check for data */ /* check for data */
if (!local_read(&cpu_buffer->reader_page->page->commit)) if (!local_read(&cpu_buffer->reader_page->page->commit))
goto out; goto out;
read = cpu_buffer->reader_page->read;
/* /*
* If the writer is already off of the read page, then simply * If the writer is already off of the read page, then simply
* switch the read page with the given page. Otherwise * switch the read page with the given page. Otherwise
* we need to copy the data from the reader to the writer. * we need to copy the data from the reader to the writer.
*/ */
if (cpu_buffer->reader_page == cpu_buffer->commit_page) { if (cpu_buffer->reader_page == cpu_buffer->commit_page) {
unsigned int read = cpu_buffer->reader_page->read; unsigned int commit = rb_page_commit(cpu_buffer->reader_page);
struct buffer_data_page *rpage = cpu_buffer->reader_page->page;
if (full) if (full)
goto out; goto out;
/* The writer is still on the reader page, we must copy */ /* The writer is still on the reader page, we must copy */
bpage = cpu_buffer->reader_page->page; memcpy(bpage->data + read, rpage->data + read, commit - read);
memcpy(bpage->data,
cpu_buffer->reader_page->page->data + read,
local_read(&bpage->commit) - read);
/* consume what was read */ /* consume what was read */
cpu_buffer->reader_page += read; cpu_buffer->reader_page->read = commit;
/* update bpage */
local_set(&bpage->commit, commit);
if (!read)
bpage->time_stamp = rpage->time_stamp;
} else { } else {
/* swap the pages */ /* swap the pages */
rb_init_page(bpage); rb_init_page(bpage);
...@@ -2481,10 +2487,10 @@ int ring_buffer_read_page(struct ring_buffer *buffer, ...@@ -2481,10 +2487,10 @@ int ring_buffer_read_page(struct ring_buffer *buffer,
cpu_buffer->reader_page->read = 0; cpu_buffer->reader_page->read = 0;
*data_page = bpage; *data_page = bpage;
} }
ret = 1; ret = read;
/* update the entry counter */ /* update the entry counter */
rb_remove_entries(cpu_buffer, bpage); rb_remove_entries(cpu_buffer, bpage, read);
out: out:
spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
......
...@@ -1963,7 +1963,7 @@ tracing_trace_options_read(struct file *filp, char __user *ubuf, ...@@ -1963,7 +1963,7 @@ tracing_trace_options_read(struct file *filp, char __user *ubuf,
struct tracer_opt *trace_opts = current_trace->flags->opts; struct tracer_opt *trace_opts = current_trace->flags->opts;
/* calulate max size */ /* calculate max size */
for (i = 0; trace_options[i]; i++) { for (i = 0; trace_options[i]; i++) {
len += strlen(trace_options[i]); len += strlen(trace_options[i]);
len += 3; /* "no" and space */ len += 3; /* "no" and space */
......
...@@ -91,8 +91,6 @@ void trace_likely_condition(struct ftrace_branch_data *f, int val, int expect) ...@@ -91,8 +91,6 @@ void trace_likely_condition(struct ftrace_branch_data *f, int val, int expect)
int enable_branch_tracing(struct trace_array *tr) int enable_branch_tracing(struct trace_array *tr)
{ {
int ret = 0;
mutex_lock(&branch_tracing_mutex); mutex_lock(&branch_tracing_mutex);
branch_tracer = tr; branch_tracer = tr;
/* /*
...@@ -103,7 +101,7 @@ int enable_branch_tracing(struct trace_array *tr) ...@@ -103,7 +101,7 @@ int enable_branch_tracing(struct trace_array *tr)
branch_tracing_enabled++; branch_tracing_enabled++;
mutex_unlock(&branch_tracing_mutex); mutex_unlock(&branch_tracing_mutex);
return ret; return 0;
} }
void disable_branch_tracing(void) void disable_branch_tracing(void)
......
...@@ -186,30 +186,30 @@ verif_pid(struct trace_seq *s, pid_t pid, int cpu, pid_t *last_pids_cpu) ...@@ -186,30 +186,30 @@ verif_pid(struct trace_seq *s, pid_t pid, int cpu, pid_t *last_pids_cpu)
ret = trace_seq_printf(s, ret = trace_seq_printf(s,
" ------------------------------------------\n"); " ------------------------------------------\n");
if (!ret) if (!ret)
TRACE_TYPE_PARTIAL_LINE; return TRACE_TYPE_PARTIAL_LINE;
ret = print_graph_cpu(s, cpu); ret = print_graph_cpu(s, cpu);
if (ret == TRACE_TYPE_PARTIAL_LINE) if (ret == TRACE_TYPE_PARTIAL_LINE)
TRACE_TYPE_PARTIAL_LINE; return TRACE_TYPE_PARTIAL_LINE;
ret = print_graph_proc(s, prev_pid); ret = print_graph_proc(s, prev_pid);
if (ret == TRACE_TYPE_PARTIAL_LINE) if (ret == TRACE_TYPE_PARTIAL_LINE)
TRACE_TYPE_PARTIAL_LINE; return TRACE_TYPE_PARTIAL_LINE;
ret = trace_seq_printf(s, " => "); ret = trace_seq_printf(s, " => ");
if (!ret) if (!ret)
TRACE_TYPE_PARTIAL_LINE; return TRACE_TYPE_PARTIAL_LINE;
ret = print_graph_proc(s, pid); ret = print_graph_proc(s, pid);
if (ret == TRACE_TYPE_PARTIAL_LINE) if (ret == TRACE_TYPE_PARTIAL_LINE)
TRACE_TYPE_PARTIAL_LINE; return TRACE_TYPE_PARTIAL_LINE;
ret = trace_seq_printf(s, ret = trace_seq_printf(s,
"\n ------------------------------------------\n\n"); "\n ------------------------------------------\n\n");
if (!ret) if (!ret)
TRACE_TYPE_PARTIAL_LINE; return TRACE_TYPE_PARTIAL_LINE;
return ret; return TRACE_TYPE_HANDLED;
} }
static struct ftrace_graph_ret_entry * static struct ftrace_graph_ret_entry *
......
...@@ -75,7 +75,7 @@ static void bts_trace_start(struct trace_array *tr) ...@@ -75,7 +75,7 @@ static void bts_trace_start(struct trace_array *tr)
} }
/* /*
* Start tracing on the current cpu. * Stop tracing on the current cpu.
* The argument is ignored. * The argument is ignored.
* *
* pre: bts_tracer_mutex must be locked. * pre: bts_tracer_mutex must be locked.
......
...@@ -88,7 +88,7 @@ static void backtrace_address(void *data, unsigned long addr, int reliable) ...@@ -88,7 +88,7 @@ static void backtrace_address(void *data, unsigned long addr, int reliable)
} }
} }
const static struct stacktrace_ops backtrace_ops = { static const struct stacktrace_ops backtrace_ops = {
.warning = backtrace_warning, .warning = backtrace_warning,
.warning_symbol = backtrace_warning_symbol, .warning_symbol = backtrace_warning_symbol,
.stack = backtrace_stack, .stack = backtrace_stack,
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment