Commit aa20ae84 authored by Steven Rostedt's avatar Steven Rostedt Committed by Steven Rostedt

ring-buffer: move big if statement down

In the hot path of the ring buffer "__rb_reserve_next" there's a big
if statement that does not even return back to the work flow.

	code;

	if (cross to next page) {

		[ lots of code ]

		return;
	}

	more code;

The condition is even the unlikely path, although we do not denote it
with an unlikely because gcc is fine with it. The condition is true when
the write crosses a page boundary, and we need to start at a new page.

Having this if statement makes it hard to read, but calling another
function to do the work is also not appropriate, because we are using a lot
of variables that were set before the if statement, and we do not want to
send them as parameters.

This patch changes it to a goto:

	code;

	if (cross to next page)
		goto next_page;

	more code;

	return;

next_page:

	[ lots of code]

This makes the code easier to understand, and a bit more obvious.

The output from gcc is practically identical. For some reason, gcc decided
to use different registers when I switched it to a goto. But other than that,
the logic is the same.

[ Impact: easier to read code ]
Signed-off-by: default avatarSteven Rostedt <rostedt@goodmis.org>
parent 94487d6d
...@@ -1159,6 +1159,7 @@ __rb_reserve_next(struct ring_buffer_per_cpu *cpu_buffer, ...@@ -1159,6 +1159,7 @@ __rb_reserve_next(struct ring_buffer_per_cpu *cpu_buffer,
unsigned type, unsigned long length, u64 *ts) unsigned type, unsigned long length, u64 *ts)
{ {
struct buffer_page *tail_page, *head_page, *reader_page, *commit_page; struct buffer_page *tail_page, *head_page, *reader_page, *commit_page;
struct buffer_page *next_page;
unsigned long tail, write; unsigned long tail, write;
struct ring_buffer *buffer = cpu_buffer->buffer; struct ring_buffer *buffer = cpu_buffer->buffer;
struct ring_buffer_event *event; struct ring_buffer_event *event;
...@@ -1173,137 +1174,140 @@ __rb_reserve_next(struct ring_buffer_per_cpu *cpu_buffer, ...@@ -1173,137 +1174,140 @@ __rb_reserve_next(struct ring_buffer_per_cpu *cpu_buffer,
tail = write - length; tail = write - length;
/* See if we shot pass the end of this buffer page */ /* See if we shot pass the end of this buffer page */
if (write > BUF_PAGE_SIZE) { if (write > BUF_PAGE_SIZE)
struct buffer_page *next_page = tail_page; goto next_page;
local_irq_save(flags); /* We reserved something on the buffer */
/*
* Since the write to the buffer is still not
* fully lockless, we must be careful with NMIs.
* The locks in the writers are taken when a write
* crosses to a new page. The locks protect against
* races with the readers (this will soon be fixed
* with a lockless solution).
*
* Because we can not protect against NMIs, and we
* want to keep traces reentrant, we need to manage
* what happens when we are in an NMI.
*
* NMIs can happen after we take the lock.
* If we are in an NMI, only take the lock
* if it is not already taken. Otherwise
* simply fail.
*/
if (unlikely(in_nmi())) {
if (!__raw_spin_trylock(&cpu_buffer->lock)) {
cpu_buffer->nmi_dropped++;
goto out_reset;
}
} else
__raw_spin_lock(&cpu_buffer->lock);
lock_taken = true;
rb_inc_page(cpu_buffer, &next_page); if (RB_WARN_ON(cpu_buffer, write > BUF_PAGE_SIZE))
return NULL;
head_page = cpu_buffer->head_page; event = __rb_page_index(tail_page, tail);
reader_page = cpu_buffer->reader_page; rb_update_event(event, type, length);
/* we grabbed the lock before incrementing */ /* The passed in type is zero for DATA */
if (RB_WARN_ON(cpu_buffer, next_page == reader_page)) if (likely(!type))
goto out_reset; local_inc(&tail_page->entries);
/* /*
* If for some reason, we had an interrupt storm that made * If this is a commit and the tail is zero, then update
* it all the way around the buffer, bail, and warn * this page's time stamp.
* about it. */
*/ if (!tail && rb_is_commit(cpu_buffer, event))
if (unlikely(next_page == commit_page)) { cpu_buffer->commit_page->page->time_stamp = *ts;
cpu_buffer->commit_overrun++;
goto out_reset;
}
if (next_page == head_page) { return event;
if (!(buffer->flags & RB_FL_OVERWRITE))
goto out_reset;
/* tail_page has not moved yet? */ next_page:
if (tail_page == cpu_buffer->tail_page) {
/* count overflows */
cpu_buffer->overrun +=
local_read(&head_page->entries);
rb_inc_page(cpu_buffer, &head_page); next_page = tail_page;
cpu_buffer->head_page = head_page;
cpu_buffer->head_page->read = 0;
}
}
/* local_irq_save(flags);
* If the tail page is still the same as what we think /*
* it is, then it is up to us to update the tail * Since the write to the buffer is still not
* pointer. * fully lockless, we must be careful with NMIs.
*/ * The locks in the writers are taken when a write
if (tail_page == cpu_buffer->tail_page) { * crosses to a new page. The locks protect against
local_set(&next_page->write, 0); * races with the readers (this will soon be fixed
local_set(&next_page->entries, 0); * with a lockless solution).
local_set(&next_page->page->commit, 0); *
cpu_buffer->tail_page = next_page; * Because we can not protect against NMIs, and we
* want to keep traces reentrant, we need to manage
/* reread the time stamp */ * what happens when we are in an NMI.
*ts = ring_buffer_time_stamp(buffer, cpu_buffer->cpu); *
cpu_buffer->tail_page->page->time_stamp = *ts; * NMIs can happen after we take the lock.
* If we are in an NMI, only take the lock
* if it is not already taken. Otherwise
* simply fail.
*/
if (unlikely(in_nmi())) {
if (!__raw_spin_trylock(&cpu_buffer->lock)) {
cpu_buffer->nmi_dropped++;
goto out_reset;
} }
} else
__raw_spin_lock(&cpu_buffer->lock);
/* lock_taken = true;
* The actual tail page has moved forward.
*/
if (tail < BUF_PAGE_SIZE) {
/* Mark the rest of the page with padding */
event = __rb_page_index(tail_page, tail);
rb_event_set_padding(event);
}
if (tail <= BUF_PAGE_SIZE) rb_inc_page(cpu_buffer, &next_page);
/* Set the write back to the previous setting */
local_set(&tail_page->write, tail);
/* head_page = cpu_buffer->head_page;
* If this was a commit entry that failed, reader_page = cpu_buffer->reader_page;
* increment that too
*/
if (tail_page == cpu_buffer->commit_page &&
tail == rb_commit_index(cpu_buffer)) {
rb_set_commit_to_write(cpu_buffer);
}
__raw_spin_unlock(&cpu_buffer->lock); /* we grabbed the lock before incrementing */
local_irq_restore(flags); if (RB_WARN_ON(cpu_buffer, next_page == reader_page))
goto out_reset;
/* fail and let the caller try again */ /*
return ERR_PTR(-EAGAIN); * If for some reason, we had an interrupt storm that made
* it all the way around the buffer, bail, and warn
* about it.
*/
if (unlikely(next_page == commit_page)) {
cpu_buffer->commit_overrun++;
goto out_reset;
} }
/* We reserved something on the buffer */ if (next_page == head_page) {
if (!(buffer->flags & RB_FL_OVERWRITE))
goto out_reset;
if (RB_WARN_ON(cpu_buffer, write > BUF_PAGE_SIZE)) /* tail_page has not moved yet? */
return NULL; if (tail_page == cpu_buffer->tail_page) {
/* count overflows */
cpu_buffer->overrun +=
local_read(&head_page->entries);
event = __rb_page_index(tail_page, tail); rb_inc_page(cpu_buffer, &head_page);
rb_update_event(event, type, length); cpu_buffer->head_page = head_page;
cpu_buffer->head_page->read = 0;
}
}
/* The passed in type is zero for DATA */ /*
if (likely(!type)) * If the tail page is still the same as what we think
local_inc(&tail_page->entries); * it is, then it is up to us to update the tail
* pointer.
*/
if (tail_page == cpu_buffer->tail_page) {
local_set(&next_page->write, 0);
local_set(&next_page->entries, 0);
local_set(&next_page->page->commit, 0);
cpu_buffer->tail_page = next_page;
/* reread the time stamp */
*ts = ring_buffer_time_stamp(buffer, cpu_buffer->cpu);
cpu_buffer->tail_page->page->time_stamp = *ts;
}
/* /*
* If this is a commit and the tail is zero, then update * The actual tail page has moved forward.
* this page's time stamp.
*/ */
if (!tail && rb_is_commit(cpu_buffer, event)) if (tail < BUF_PAGE_SIZE) {
cpu_buffer->commit_page->page->time_stamp = *ts; /* Mark the rest of the page with padding */
event = __rb_page_index(tail_page, tail);
rb_event_set_padding(event);
}
return event; if (tail <= BUF_PAGE_SIZE)
/* Set the write back to the previous setting */
local_set(&tail_page->write, tail);
/*
* If this was a commit entry that failed,
* increment that too
*/
if (tail_page == cpu_buffer->commit_page &&
tail == rb_commit_index(cpu_buffer)) {
rb_set_commit_to_write(cpu_buffer);
}
__raw_spin_unlock(&cpu_buffer->lock);
local_irq_restore(flags);
/* fail and let the caller try again */
return ERR_PTR(-EAGAIN);
out_reset: out_reset:
/* reset write */ /* reset write */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment