Commit 3e03fb7f authored by Steven Rostedt's avatar Steven Rostedt Committed by Ingo Molnar

ring-buffer: convert to raw spinlocks

Impact: no lockdep debugging of ring buffer

The problem with running lockdep on the ring buffer is that the
ring buffer is the core infrastructure of ftrace. What happens is
that the tracer will start tracing the lockdep code while lockdep
is testing the ring buffers locks.  This can cause lockdep to
fail due to testing cases that have not fully finished their
locking transition.

This patch converts the spin locks used by the ring buffer back
into raw spin locks which lockdep does not check.
Signed-off-by: default avatarSteven Rostedt <srostedt@redhat.com>
Signed-off-by: default avatarIngo Molnar <mingo@elte.hu>
parent 9036990d
...@@ -154,7 +154,7 @@ static inline int test_time_stamp(u64 delta) ...@@ -154,7 +154,7 @@ static inline int test_time_stamp(u64 delta)
struct ring_buffer_per_cpu { struct ring_buffer_per_cpu {
int cpu; int cpu;
struct ring_buffer *buffer; struct ring_buffer *buffer;
spinlock_t lock; raw_spinlock_t lock;
struct lock_class_key lock_key; struct lock_class_key lock_key;
struct list_head pages; struct list_head pages;
struct buffer_page *head_page; /* read from head */ struct buffer_page *head_page; /* read from head */
...@@ -291,7 +291,7 @@ rb_allocate_cpu_buffer(struct ring_buffer *buffer, int cpu) ...@@ -291,7 +291,7 @@ rb_allocate_cpu_buffer(struct ring_buffer *buffer, int cpu)
cpu_buffer->cpu = cpu; cpu_buffer->cpu = cpu;
cpu_buffer->buffer = buffer; cpu_buffer->buffer = buffer;
spin_lock_init(&cpu_buffer->lock); cpu_buffer->lock = (raw_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED;
INIT_LIST_HEAD(&cpu_buffer->pages); INIT_LIST_HEAD(&cpu_buffer->pages);
page = kzalloc_node(ALIGN(sizeof(*page), cache_line_size()), page = kzalloc_node(ALIGN(sizeof(*page), cache_line_size()),
...@@ -854,7 +854,8 @@ __rb_reserve_next(struct ring_buffer_per_cpu *cpu_buffer, ...@@ -854,7 +854,8 @@ __rb_reserve_next(struct ring_buffer_per_cpu *cpu_buffer,
if (write > BUF_PAGE_SIZE) { if (write > BUF_PAGE_SIZE) {
struct buffer_page *next_page = tail_page; struct buffer_page *next_page = tail_page;
spin_lock_irqsave(&cpu_buffer->lock, flags); local_irq_save(flags);
__raw_spin_lock(&cpu_buffer->lock);
rb_inc_page(cpu_buffer, &next_page); rb_inc_page(cpu_buffer, &next_page);
...@@ -930,7 +931,8 @@ __rb_reserve_next(struct ring_buffer_per_cpu *cpu_buffer, ...@@ -930,7 +931,8 @@ __rb_reserve_next(struct ring_buffer_per_cpu *cpu_buffer,
rb_set_commit_to_write(cpu_buffer); rb_set_commit_to_write(cpu_buffer);
} }
spin_unlock_irqrestore(&cpu_buffer->lock, flags); __raw_spin_unlock(&cpu_buffer->lock);
local_irq_restore(flags);
/* fail and let the caller try again */ /* fail and let the caller try again */
return ERR_PTR(-EAGAIN); return ERR_PTR(-EAGAIN);
...@@ -953,7 +955,8 @@ __rb_reserve_next(struct ring_buffer_per_cpu *cpu_buffer, ...@@ -953,7 +955,8 @@ __rb_reserve_next(struct ring_buffer_per_cpu *cpu_buffer,
return event; return event;
out_unlock: out_unlock:
spin_unlock_irqrestore(&cpu_buffer->lock, flags); __raw_spin_unlock(&cpu_buffer->lock);
local_irq_restore(flags);
return NULL; return NULL;
} }
...@@ -1524,7 +1527,8 @@ rb_get_reader_page(struct ring_buffer_per_cpu *cpu_buffer) ...@@ -1524,7 +1527,8 @@ rb_get_reader_page(struct ring_buffer_per_cpu *cpu_buffer)
struct buffer_page *reader = NULL; struct buffer_page *reader = NULL;
unsigned long flags; unsigned long flags;
spin_lock_irqsave(&cpu_buffer->lock, flags); local_irq_save(flags);
__raw_spin_lock(&cpu_buffer->lock);
again: again:
reader = cpu_buffer->reader_page; reader = cpu_buffer->reader_page;
...@@ -1574,7 +1578,8 @@ rb_get_reader_page(struct ring_buffer_per_cpu *cpu_buffer) ...@@ -1574,7 +1578,8 @@ rb_get_reader_page(struct ring_buffer_per_cpu *cpu_buffer)
goto again; goto again;
out: out:
spin_unlock_irqrestore(&cpu_buffer->lock, flags); __raw_spin_unlock(&cpu_buffer->lock);
local_irq_restore(flags);
return reader; return reader;
} }
...@@ -1815,9 +1820,11 @@ ring_buffer_read_start(struct ring_buffer *buffer, int cpu) ...@@ -1815,9 +1820,11 @@ ring_buffer_read_start(struct ring_buffer *buffer, int cpu)
atomic_inc(&cpu_buffer->record_disabled); atomic_inc(&cpu_buffer->record_disabled);
synchronize_sched(); synchronize_sched();
spin_lock_irqsave(&cpu_buffer->lock, flags); local_irq_save(flags);
__raw_spin_lock(&cpu_buffer->lock);
ring_buffer_iter_reset(iter); ring_buffer_iter_reset(iter);
spin_unlock_irqrestore(&cpu_buffer->lock, flags); __raw_spin_unlock(&cpu_buffer->lock);
local_irq_restore(flags);
return iter; return iter;
} }
...@@ -1903,11 +1910,13 @@ void ring_buffer_reset_cpu(struct ring_buffer *buffer, int cpu) ...@@ -1903,11 +1910,13 @@ void ring_buffer_reset_cpu(struct ring_buffer *buffer, int cpu)
if (!cpu_isset(cpu, buffer->cpumask)) if (!cpu_isset(cpu, buffer->cpumask))
return; return;
spin_lock_irqsave(&cpu_buffer->lock, flags); local_irq_save(flags);
__raw_spin_lock(&cpu_buffer->lock);
rb_reset_cpu(cpu_buffer); rb_reset_cpu(cpu_buffer);
spin_unlock_irqrestore(&cpu_buffer->lock, flags); __raw_spin_unlock(&cpu_buffer->lock);
local_irq_restore(flags);
} }
/** /**
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment