Commit ef7a4a16 authored by Steven Rostedt's avatar Steven Rostedt

ring-buffer: fix ring_buffer_read_page

The ring_buffer_read_page was broken if it were to only copy part
of the page. This patch fixes that up as well as adds a parameter
to allow a length field, in order to only copy part of the buffer page.
Signed-off-by: default avatarSteven Rostedt <srostedt@redhat.com>
parent 41be4da4
...@@ -121,6 +121,9 @@ unsigned long ring_buffer_overrun_cpu(struct ring_buffer *buffer, int cpu); ...@@ -121,6 +121,9 @@ unsigned long ring_buffer_overrun_cpu(struct ring_buffer *buffer, int cpu);
u64 ring_buffer_time_stamp(int cpu); u64 ring_buffer_time_stamp(int cpu);
void ring_buffer_normalize_time_stamp(int cpu, u64 *ts); void ring_buffer_normalize_time_stamp(int cpu, u64 *ts);
size_t ring_buffer_page_len(void *page);
/* /*
* The below functions are fine to use outside the tracing facility. * The below functions are fine to use outside the tracing facility.
*/ */
...@@ -138,8 +141,8 @@ static inline int tracing_is_on(void) { return 0; } ...@@ -138,8 +141,8 @@ static inline int tracing_is_on(void) { return 0; }
void *ring_buffer_alloc_read_page(struct ring_buffer *buffer); void *ring_buffer_alloc_read_page(struct ring_buffer *buffer);
void ring_buffer_free_read_page(struct ring_buffer *buffer, void *data); void ring_buffer_free_read_page(struct ring_buffer *buffer, void *data);
int ring_buffer_read_page(struct ring_buffer *buffer, int ring_buffer_read_page(struct ring_buffer *buffer, void **data_page,
void **data_page, int cpu, int full); size_t len, int cpu, int full);
enum ring_buffer_flags { enum ring_buffer_flags {
RB_FL_OVERWRITE = 1 << 0, RB_FL_OVERWRITE = 1 << 0,
......
...@@ -234,6 +234,11 @@ static void rb_init_page(struct buffer_data_page *bpage) ...@@ -234,6 +234,11 @@ static void rb_init_page(struct buffer_data_page *bpage)
local_set(&bpage->commit, 0); local_set(&bpage->commit, 0);
} }
size_t ring_buffer_page_len(void *page)
{
return local_read(&((struct buffer_data_page *)page)->commit);
}
/* /*
* Also stolen from mm/slob.c. Thanks to Mathieu Desnoyers for pointing * Also stolen from mm/slob.c. Thanks to Mathieu Desnoyers for pointing
* this issue out. * this issue out.
...@@ -2378,8 +2383,8 @@ static void rb_remove_entries(struct ring_buffer_per_cpu *cpu_buffer, ...@@ -2378,8 +2383,8 @@ static void rb_remove_entries(struct ring_buffer_per_cpu *cpu_buffer,
*/ */
void *ring_buffer_alloc_read_page(struct ring_buffer *buffer) void *ring_buffer_alloc_read_page(struct ring_buffer *buffer)
{ {
unsigned long addr;
struct buffer_data_page *bpage; struct buffer_data_page *bpage;
unsigned long addr;
addr = __get_free_page(GFP_KERNEL); addr = __get_free_page(GFP_KERNEL);
if (!addr) if (!addr)
...@@ -2387,6 +2392,8 @@ void *ring_buffer_alloc_read_page(struct ring_buffer *buffer) ...@@ -2387,6 +2392,8 @@ void *ring_buffer_alloc_read_page(struct ring_buffer *buffer)
bpage = (void *)addr; bpage = (void *)addr;
rb_init_page(bpage);
return bpage; return bpage;
} }
...@@ -2406,6 +2413,7 @@ void ring_buffer_free_read_page(struct ring_buffer *buffer, void *data) ...@@ -2406,6 +2413,7 @@ void ring_buffer_free_read_page(struct ring_buffer *buffer, void *data)
* ring_buffer_read_page - extract a page from the ring buffer * ring_buffer_read_page - extract a page from the ring buffer
* @buffer: buffer to extract from * @buffer: buffer to extract from
* @data_page: the page to use allocated from ring_buffer_alloc_read_page * @data_page: the page to use allocated from ring_buffer_alloc_read_page
* @len: amount to extract
* @cpu: the cpu of the buffer to extract * @cpu: the cpu of the buffer to extract
* @full: should the extraction only happen when the page is full. * @full: should the extraction only happen when the page is full.
* *
...@@ -2418,7 +2426,7 @@ void ring_buffer_free_read_page(struct ring_buffer *buffer, void *data) ...@@ -2418,7 +2426,7 @@ void ring_buffer_free_read_page(struct ring_buffer *buffer, void *data)
* rpage = ring_buffer_alloc_read_page(buffer); * rpage = ring_buffer_alloc_read_page(buffer);
* if (!rpage) * if (!rpage)
* return error; * return error;
* ret = ring_buffer_read_page(buffer, &rpage, cpu, 0); * ret = ring_buffer_read_page(buffer, &rpage, len, cpu, 0);
* if (ret >= 0) * if (ret >= 0)
* process_page(rpage, ret); * process_page(rpage, ret);
* *
...@@ -2435,71 +2443,89 @@ void ring_buffer_free_read_page(struct ring_buffer *buffer, void *data) ...@@ -2435,71 +2443,89 @@ void ring_buffer_free_read_page(struct ring_buffer *buffer, void *data)
* <0 if no data has been transferred. * <0 if no data has been transferred.
*/ */
int ring_buffer_read_page(struct ring_buffer *buffer, int ring_buffer_read_page(struct ring_buffer *buffer,
void **data_page, int cpu, int full) void **data_page, size_t len, int cpu, int full)
{ {
struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu]; struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu];
struct ring_buffer_event *event; struct ring_buffer_event *event;
struct buffer_data_page *bpage; struct buffer_data_page *bpage;
struct buffer_page *reader;
unsigned long flags; unsigned long flags;
unsigned int commit;
unsigned int read; unsigned int read;
int ret = -1; int ret = -1;
if (!data_page) if (!data_page)
return 0; return -1;
bpage = *data_page; bpage = *data_page;
if (!bpage) if (!bpage)
return 0; return -1;
spin_lock_irqsave(&cpu_buffer->reader_lock, flags); spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
/* reader = rb_get_reader_page(cpu_buffer);
* rb_buffer_peek will get the next ring buffer if if (!reader)
* the current reader page is empty.
*/
event = rb_buffer_peek(buffer, cpu, NULL);
if (!event)
goto out; goto out;
/* check for data */ event = rb_reader_event(cpu_buffer);
if (!local_read(&cpu_buffer->reader_page->page->commit))
goto out; read = reader->read;
commit = rb_page_commit(reader);
read = cpu_buffer->reader_page->read;
/* /*
* If the writer is already off of the read page, then simply * If len > what's left on the page, and the writer is also off of
* switch the read page with the given page. Otherwise * the read page, then simply switch the read page with the given
* we need to copy the data from the reader to the writer. * page. Otherwise we need to copy the data from the reader to the
* writer.
*/ */
if (cpu_buffer->reader_page == cpu_buffer->commit_page) { if ((len < (commit - read)) ||
unsigned int commit = rb_page_commit(cpu_buffer->reader_page); cpu_buffer->reader_page == cpu_buffer->commit_page) {
struct buffer_data_page *rpage = cpu_buffer->reader_page->page; struct buffer_data_page *rpage = cpu_buffer->reader_page->page;
unsigned int pos = read;
unsigned int size;
if (full) if (full)
goto out; goto out;
/* The writer is still on the reader page, we must copy */
memcpy(bpage->data + read, rpage->data + read, commit - read);
/* consume what was read */ if (len > (commit - read))
cpu_buffer->reader_page->read = commit; len = (commit - read);
size = rb_event_length(event);
if (len < size)
goto out;
/* Need to copy one event at a time */
do {
memcpy(bpage->data + pos, rpage->data + pos, size);
len -= size;
rb_advance_reader(cpu_buffer);
pos = reader->read;
event = rb_reader_event(cpu_buffer);
size = rb_event_length(event);
} while (len > size);
/* update bpage */ /* update bpage */
local_set(&bpage->commit, commit); local_set(&bpage->commit, pos);
if (!read) bpage->time_stamp = rpage->time_stamp;
bpage->time_stamp = rpage->time_stamp;
} else { } else {
/* swap the pages */ /* swap the pages */
rb_init_page(bpage); rb_init_page(bpage);
bpage = cpu_buffer->reader_page->page; bpage = reader->page;
cpu_buffer->reader_page->page = *data_page; reader->page = *data_page;
local_set(&cpu_buffer->reader_page->write, 0); local_set(&reader->write, 0);
cpu_buffer->reader_page->read = 0; reader->read = 0;
*data_page = bpage; *data_page = bpage;
/* update the entry counter */
rb_remove_entries(cpu_buffer, bpage, read);
} }
ret = read; ret = read;
/* update the entry counter */
rb_remove_entries(cpu_buffer, bpage, read);
out: out:
spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment