Commit 18421015 authored by Steven Rostedt's avatar Steven Rostedt Committed by Steven Rostedt

ring-buffer: Use sync sched protection on ring buffer resizing

There was a comment in the ring buffer code that says the calling
layers should prevent tracing or reading of the ring buffer while
resizing. I have discovered that the tracers do not honor this
arrangement.

This patch moves the disabling and synchronizing the ring buffer to
a higher layer during resizing. This guarantees that no writes
are occurring while the resize takes place.
Signed-off-by: default avatarSteven Rostedt <rostedt@goodmis.org>
parent d954fbf0
...@@ -1193,9 +1193,6 @@ rb_remove_pages(struct ring_buffer_per_cpu *cpu_buffer, unsigned nr_pages) ...@@ -1193,9 +1193,6 @@ rb_remove_pages(struct ring_buffer_per_cpu *cpu_buffer, unsigned nr_pages)
struct list_head *p; struct list_head *p;
unsigned i; unsigned i;
atomic_inc(&cpu_buffer->record_disabled);
synchronize_sched();
spin_lock_irq(&cpu_buffer->reader_lock); spin_lock_irq(&cpu_buffer->reader_lock);
rb_head_page_deactivate(cpu_buffer); rb_head_page_deactivate(cpu_buffer);
...@@ -1214,9 +1211,6 @@ rb_remove_pages(struct ring_buffer_per_cpu *cpu_buffer, unsigned nr_pages) ...@@ -1214,9 +1211,6 @@ rb_remove_pages(struct ring_buffer_per_cpu *cpu_buffer, unsigned nr_pages)
spin_unlock_irq(&cpu_buffer->reader_lock); spin_unlock_irq(&cpu_buffer->reader_lock);
rb_check_pages(cpu_buffer); rb_check_pages(cpu_buffer);
atomic_dec(&cpu_buffer->record_disabled);
} }
static void static void
...@@ -1227,9 +1221,6 @@ rb_insert_pages(struct ring_buffer_per_cpu *cpu_buffer, ...@@ -1227,9 +1221,6 @@ rb_insert_pages(struct ring_buffer_per_cpu *cpu_buffer,
struct list_head *p; struct list_head *p;
unsigned i; unsigned i;
atomic_inc(&cpu_buffer->record_disabled);
synchronize_sched();
spin_lock_irq(&cpu_buffer->reader_lock); spin_lock_irq(&cpu_buffer->reader_lock);
rb_head_page_deactivate(cpu_buffer); rb_head_page_deactivate(cpu_buffer);
...@@ -1245,8 +1236,6 @@ rb_insert_pages(struct ring_buffer_per_cpu *cpu_buffer, ...@@ -1245,8 +1236,6 @@ rb_insert_pages(struct ring_buffer_per_cpu *cpu_buffer,
spin_unlock_irq(&cpu_buffer->reader_lock); spin_unlock_irq(&cpu_buffer->reader_lock);
rb_check_pages(cpu_buffer); rb_check_pages(cpu_buffer);
atomic_dec(&cpu_buffer->record_disabled);
} }
/** /**
...@@ -1254,11 +1243,6 @@ rb_insert_pages(struct ring_buffer_per_cpu *cpu_buffer, ...@@ -1254,11 +1243,6 @@ rb_insert_pages(struct ring_buffer_per_cpu *cpu_buffer,
* @buffer: the buffer to resize. * @buffer: the buffer to resize.
* @size: the new size. * @size: the new size.
* *
* The tracer is responsible for making sure that the buffer is
* not being used while changing the size.
* Note: We may be able to change the above requirement by using
* RCU synchronizations.
*
* Minimum size is 2 * BUF_PAGE_SIZE. * Minimum size is 2 * BUF_PAGE_SIZE.
* *
* Returns -1 on failure. * Returns -1 on failure.
...@@ -1290,6 +1274,11 @@ int ring_buffer_resize(struct ring_buffer *buffer, unsigned long size) ...@@ -1290,6 +1274,11 @@ int ring_buffer_resize(struct ring_buffer *buffer, unsigned long size)
if (size == buffer_size) if (size == buffer_size)
return size; return size;
atomic_inc(&buffer->record_disabled);
/* Make sure all writers are done with this buffer. */
synchronize_sched();
mutex_lock(&buffer->mutex); mutex_lock(&buffer->mutex);
get_online_cpus(); get_online_cpus();
...@@ -1352,6 +1341,8 @@ int ring_buffer_resize(struct ring_buffer *buffer, unsigned long size) ...@@ -1352,6 +1341,8 @@ int ring_buffer_resize(struct ring_buffer *buffer, unsigned long size)
put_online_cpus(); put_online_cpus();
mutex_unlock(&buffer->mutex); mutex_unlock(&buffer->mutex);
atomic_dec(&buffer->record_disabled);
return size; return size;
free_pages: free_pages:
...@@ -1361,6 +1352,7 @@ int ring_buffer_resize(struct ring_buffer *buffer, unsigned long size) ...@@ -1361,6 +1352,7 @@ int ring_buffer_resize(struct ring_buffer *buffer, unsigned long size)
} }
put_online_cpus(); put_online_cpus();
mutex_unlock(&buffer->mutex); mutex_unlock(&buffer->mutex);
atomic_dec(&buffer->record_disabled);
return -ENOMEM; return -ENOMEM;
/* /*
...@@ -1370,6 +1362,7 @@ int ring_buffer_resize(struct ring_buffer *buffer, unsigned long size) ...@@ -1370,6 +1362,7 @@ int ring_buffer_resize(struct ring_buffer *buffer, unsigned long size)
out_fail: out_fail:
put_online_cpus(); put_online_cpus();
mutex_unlock(&buffer->mutex); mutex_unlock(&buffer->mutex);
atomic_dec(&buffer->record_disabled);
return -1; return -1;
} }
EXPORT_SYMBOL_GPL(ring_buffer_resize); EXPORT_SYMBOL_GPL(ring_buffer_resize);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment