Commit 259d69b7 authored by Peter Zijlstra's avatar Peter Zijlstra Committed by Ingo Molnar

locking/percpu-rwsem: Add down_read_preempt_disable()

Provide a down_read()/up_read() variant that keeps preemption disabled
over the whole thing, when possible.

This avoids a needless preemption point for constructs such as:

	percpu_down_read(&global_rwsem);
	spin_lock(&lock);
	...
	spin_unlock(&lock);
	percpu_up_read(&global_rwsem);

Which perturbs timings. In particular it was found to cure a
performance regression in a follow up patch in fs/locks.c
Signed-off-by: default avatarPeter Zijlstra (Intel) <peterz@infradead.org>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: linux-kernel@vger.kernel.org
Signed-off-by: default avatarIngo Molnar <mingo@kernel.org>
parent 7c3f654d
...@@ -28,7 +28,7 @@ static struct percpu_rw_semaphore name = { \ ...@@ -28,7 +28,7 @@ static struct percpu_rw_semaphore name = { \
extern int __percpu_down_read(struct percpu_rw_semaphore *, int); extern int __percpu_down_read(struct percpu_rw_semaphore *, int);
extern void __percpu_up_read(struct percpu_rw_semaphore *); extern void __percpu_up_read(struct percpu_rw_semaphore *);
static inline void percpu_down_read(struct percpu_rw_semaphore *sem) static inline void percpu_down_read_preempt_disable(struct percpu_rw_semaphore *sem)
{ {
might_sleep(); might_sleep();
...@@ -46,13 +46,19 @@ static inline void percpu_down_read(struct percpu_rw_semaphore *sem) ...@@ -46,13 +46,19 @@ static inline void percpu_down_read(struct percpu_rw_semaphore *sem)
__this_cpu_inc(*sem->read_count); __this_cpu_inc(*sem->read_count);
if (unlikely(!rcu_sync_is_idle(&sem->rss))) if (unlikely(!rcu_sync_is_idle(&sem->rss)))
__percpu_down_read(sem, false); /* Unconditional memory barrier */ __percpu_down_read(sem, false); /* Unconditional memory barrier */
preempt_enable(); barrier();
/* /*
* The barrier() from preempt_enable() prevents the compiler from * The barrier() prevents the compiler from
* bleeding the critical section out. * bleeding the critical section out.
*/ */
} }
static inline void percpu_down_read(struct percpu_rw_semaphore *sem)
{
percpu_down_read_preempt_disable(sem);
preempt_enable();
}
static inline int percpu_down_read_trylock(struct percpu_rw_semaphore *sem) static inline int percpu_down_read_trylock(struct percpu_rw_semaphore *sem)
{ {
int ret = 1; int ret = 1;
...@@ -76,13 +82,13 @@ static inline int percpu_down_read_trylock(struct percpu_rw_semaphore *sem) ...@@ -76,13 +82,13 @@ static inline int percpu_down_read_trylock(struct percpu_rw_semaphore *sem)
return ret; return ret;
} }
static inline void percpu_up_read(struct percpu_rw_semaphore *sem) static inline void percpu_up_read_preempt_enable(struct percpu_rw_semaphore *sem)
{ {
/* /*
* The barrier() in preempt_disable() prevents the compiler from * The barrier() prevents the compiler from
* bleeding the critical section out. * bleeding the critical section out.
*/ */
preempt_disable(); barrier();
/* /*
* Same as in percpu_down_read(). * Same as in percpu_down_read().
*/ */
...@@ -95,6 +101,12 @@ static inline void percpu_up_read(struct percpu_rw_semaphore *sem) ...@@ -95,6 +101,12 @@ static inline void percpu_up_read(struct percpu_rw_semaphore *sem)
rwsem_release(&sem->rw_sem.dep_map, 1, _RET_IP_); rwsem_release(&sem->rw_sem.dep_map, 1, _RET_IP_);
} }
static inline void percpu_up_read(struct percpu_rw_semaphore *sem)
{
preempt_disable();
percpu_up_read_preempt_enable(sem);
}
extern void percpu_down_write(struct percpu_rw_semaphore *); extern void percpu_down_write(struct percpu_rw_semaphore *);
extern void percpu_up_write(struct percpu_rw_semaphore *); extern void percpu_up_write(struct percpu_rw_semaphore *);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment