Commit 09661f75 authored by Petr Pavlu's avatar Petr Pavlu Committed by Steven Rostedt (Google)

ring-buffer: Fix reader locking when changing the sub buffer order

The function ring_buffer_subbuf_order_set() updates each
ring_buffer_per_cpu and installs new sub buffers that match the requested
page order. This operation may be invoked concurrently with readers that
rely on some of the modified data, such as the head bit (RB_PAGE_HEAD), or
the ring_buffer_per_cpu.pages and reader_page pointers. However, no
exclusive access is acquired by ring_buffer_subbuf_order_set(). Modifying
the mentioned data while a reader also operates on them can then result in
incorrect memory access and various crashes.

Fix the problem by taking the reader_lock when updating a specific
ring_buffer_per_cpu in ring_buffer_subbuf_order_set().

Link: https://lore.kernel.org/linux-trace-kernel/20240715145141.5528-1-petr.pavlu@suse.com/
Link: https://lore.kernel.org/linux-trace-kernel/20241010195849.2f77cc3f@gandalf.local.home/
Link: https://lore.kernel.org/linux-trace-kernel/20241011112850.17212b25@gandalf.local.home/

Cc: Masami Hiramatsu <mhiramat@kernel.org>
Cc: Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
Link: https://lore.kernel.org/20241015112440.26987-1-petr.pavlu@suse.com
Fixes: 8e7b58c2 ("ring-buffer: Just update the subbuffers when changing their allocation order")
Signed-off-by: default avatarPetr Pavlu <petr.pavlu@suse.com>
Signed-off-by: default avatarSteven Rostedt (Google) <rostedt@goodmis.org>
parent 2cf97338
...@@ -6728,39 +6728,38 @@ int ring_buffer_subbuf_order_set(struct trace_buffer *buffer, int order) ...@@ -6728,39 +6728,38 @@ int ring_buffer_subbuf_order_set(struct trace_buffer *buffer, int order)
} }
for_each_buffer_cpu(buffer, cpu) { for_each_buffer_cpu(buffer, cpu) {
struct buffer_data_page *old_free_data_page;
struct list_head old_pages;
unsigned long flags;
if (!cpumask_test_cpu(cpu, buffer->cpumask)) if (!cpumask_test_cpu(cpu, buffer->cpumask))
continue; continue;
cpu_buffer = buffer->buffers[cpu]; cpu_buffer = buffer->buffers[cpu];
raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
/* Clear the head bit to make the link list normal to read */ /* Clear the head bit to make the link list normal to read */
rb_head_page_deactivate(cpu_buffer); rb_head_page_deactivate(cpu_buffer);
/* Now walk the list and free all the old sub buffers */ /*
list_for_each_entry_safe(bpage, tmp, cpu_buffer->pages, list) { * Collect buffers from the cpu_buffer pages list and the
list_del_init(&bpage->list); * reader_page on old_pages, so they can be freed later when not
free_buffer_page(bpage); * under a spinlock. The pages list is a linked list with no
} * head, adding old_pages turns it into a regular list with
/* The above loop stopped an the last page needing to be freed */ * old_pages being the head.
bpage = list_entry(cpu_buffer->pages, struct buffer_page, list); */
free_buffer_page(bpage); list_add(&old_pages, cpu_buffer->pages);
list_add(&cpu_buffer->reader_page->list, &old_pages);
/* Free the current reader page */
free_buffer_page(cpu_buffer->reader_page);
/* One page was allocated for the reader page */ /* One page was allocated for the reader page */
cpu_buffer->reader_page = list_entry(cpu_buffer->new_pages.next, cpu_buffer->reader_page = list_entry(cpu_buffer->new_pages.next,
struct buffer_page, list); struct buffer_page, list);
list_del_init(&cpu_buffer->reader_page->list); list_del_init(&cpu_buffer->reader_page->list);
/* The cpu_buffer pages are a link list with no head */ /* Install the new pages, remove the head from the list */
cpu_buffer->pages = cpu_buffer->new_pages.next; cpu_buffer->pages = cpu_buffer->new_pages.next;
cpu_buffer->new_pages.next->prev = cpu_buffer->new_pages.prev; list_del_init(&cpu_buffer->new_pages);
cpu_buffer->new_pages.prev->next = cpu_buffer->new_pages.next;
/* Clear the new_pages list */
INIT_LIST_HEAD(&cpu_buffer->new_pages);
cpu_buffer->head_page cpu_buffer->head_page
= list_entry(cpu_buffer->pages, struct buffer_page, list); = list_entry(cpu_buffer->pages, struct buffer_page, list);
...@@ -6769,11 +6768,20 @@ int ring_buffer_subbuf_order_set(struct trace_buffer *buffer, int order) ...@@ -6769,11 +6768,20 @@ int ring_buffer_subbuf_order_set(struct trace_buffer *buffer, int order)
cpu_buffer->nr_pages = cpu_buffer->nr_pages_to_update; cpu_buffer->nr_pages = cpu_buffer->nr_pages_to_update;
cpu_buffer->nr_pages_to_update = 0; cpu_buffer->nr_pages_to_update = 0;
free_pages((unsigned long)cpu_buffer->free_page, old_order); old_free_data_page = cpu_buffer->free_page;
cpu_buffer->free_page = NULL; cpu_buffer->free_page = NULL;
rb_head_page_activate(cpu_buffer); rb_head_page_activate(cpu_buffer);
raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
/* Free old sub buffers */
list_for_each_entry_safe(bpage, tmp, &old_pages, list) {
list_del_init(&bpage->list);
free_buffer_page(bpage);
}
free_pages((unsigned long)old_free_data_page, old_order);
rb_check_pages(cpu_buffer); rb_check_pages(cpu_buffer);
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment