Commit 02e525b2 authored by Peter Zijlstra's avatar Peter Zijlstra Committed by Ingo Molnar

locking/percpu-rwsem: Remove preempt_disable variants

Effective revert commit:

  87709e28 ("fs/locks: Use percpu_down_read_preempt_disable()")

This is causing major pain for PREEMPT_RT.

Sebastian did a lot of lockperf runs on 2 and 4 node machines with all
preemption modes (PREEMPT=n should be an obvious NOP for this patch
and thus serves as a good control) and no results showed significance
over 2-sigma (the PREEMPT=n results were almost empty at 1-sigma).
Signed-off-by: default avatarPeter Zijlstra (Intel) <peterz@infradead.org>
Signed-off-by: default avatarSebastian Andrzej Siewior <bigeasy@linutronix.de>
Signed-off-by: default avatarPeter Zijlstra (Intel) <peterz@infradead.org>
Cc: Andrew Morton <akpm@linux-foundation.org>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Will Deacon <will.deacon@arm.com>
Signed-off-by: default avatarIngo Molnar <mingo@kernel.org>
parent 0614621d
...@@ -1058,7 +1058,7 @@ static int flock_lock_inode(struct inode *inode, struct file_lock *request) ...@@ -1058,7 +1058,7 @@ static int flock_lock_inode(struct inode *inode, struct file_lock *request)
return -ENOMEM; return -ENOMEM;
} }
percpu_down_read_preempt_disable(&file_rwsem); percpu_down_read(&file_rwsem);
spin_lock(&ctx->flc_lock); spin_lock(&ctx->flc_lock);
if (request->fl_flags & FL_ACCESS) if (request->fl_flags & FL_ACCESS)
goto find_conflict; goto find_conflict;
...@@ -1100,7 +1100,7 @@ static int flock_lock_inode(struct inode *inode, struct file_lock *request) ...@@ -1100,7 +1100,7 @@ static int flock_lock_inode(struct inode *inode, struct file_lock *request)
out: out:
spin_unlock(&ctx->flc_lock); spin_unlock(&ctx->flc_lock);
percpu_up_read_preempt_enable(&file_rwsem); percpu_up_read(&file_rwsem);
if (new_fl) if (new_fl)
locks_free_lock(new_fl); locks_free_lock(new_fl);
locks_dispose_list(&dispose); locks_dispose_list(&dispose);
...@@ -1138,7 +1138,7 @@ static int posix_lock_inode(struct inode *inode, struct file_lock *request, ...@@ -1138,7 +1138,7 @@ static int posix_lock_inode(struct inode *inode, struct file_lock *request,
new_fl2 = locks_alloc_lock(); new_fl2 = locks_alloc_lock();
} }
percpu_down_read_preempt_disable(&file_rwsem); percpu_down_read(&file_rwsem);
spin_lock(&ctx->flc_lock); spin_lock(&ctx->flc_lock);
/* /*
* New lock request. Walk all POSIX locks and look for conflicts. If * New lock request. Walk all POSIX locks and look for conflicts. If
...@@ -1312,7 +1312,7 @@ static int posix_lock_inode(struct inode *inode, struct file_lock *request, ...@@ -1312,7 +1312,7 @@ static int posix_lock_inode(struct inode *inode, struct file_lock *request,
} }
out: out:
spin_unlock(&ctx->flc_lock); spin_unlock(&ctx->flc_lock);
percpu_up_read_preempt_enable(&file_rwsem); percpu_up_read(&file_rwsem);
/* /*
* Free any unused locks. * Free any unused locks.
*/ */
...@@ -1584,7 +1584,7 @@ int __break_lease(struct inode *inode, unsigned int mode, unsigned int type) ...@@ -1584,7 +1584,7 @@ int __break_lease(struct inode *inode, unsigned int mode, unsigned int type)
return error; return error;
} }
percpu_down_read_preempt_disable(&file_rwsem); percpu_down_read(&file_rwsem);
spin_lock(&ctx->flc_lock); spin_lock(&ctx->flc_lock);
time_out_leases(inode, &dispose); time_out_leases(inode, &dispose);
...@@ -1636,13 +1636,13 @@ int __break_lease(struct inode *inode, unsigned int mode, unsigned int type) ...@@ -1636,13 +1636,13 @@ int __break_lease(struct inode *inode, unsigned int mode, unsigned int type)
locks_insert_block(fl, new_fl, leases_conflict); locks_insert_block(fl, new_fl, leases_conflict);
trace_break_lease_block(inode, new_fl); trace_break_lease_block(inode, new_fl);
spin_unlock(&ctx->flc_lock); spin_unlock(&ctx->flc_lock);
percpu_up_read_preempt_enable(&file_rwsem); percpu_up_read(&file_rwsem);
locks_dispose_list(&dispose); locks_dispose_list(&dispose);
error = wait_event_interruptible_timeout(new_fl->fl_wait, error = wait_event_interruptible_timeout(new_fl->fl_wait,
!new_fl->fl_blocker, break_time); !new_fl->fl_blocker, break_time);
percpu_down_read_preempt_disable(&file_rwsem); percpu_down_read(&file_rwsem);
spin_lock(&ctx->flc_lock); spin_lock(&ctx->flc_lock);
trace_break_lease_unblock(inode, new_fl); trace_break_lease_unblock(inode, new_fl);
locks_delete_block(new_fl); locks_delete_block(new_fl);
...@@ -1659,7 +1659,7 @@ int __break_lease(struct inode *inode, unsigned int mode, unsigned int type) ...@@ -1659,7 +1659,7 @@ int __break_lease(struct inode *inode, unsigned int mode, unsigned int type)
} }
out: out:
spin_unlock(&ctx->flc_lock); spin_unlock(&ctx->flc_lock);
percpu_up_read_preempt_enable(&file_rwsem); percpu_up_read(&file_rwsem);
locks_dispose_list(&dispose); locks_dispose_list(&dispose);
locks_free_lock(new_fl); locks_free_lock(new_fl);
return error; return error;
...@@ -1729,7 +1729,7 @@ int fcntl_getlease(struct file *filp) ...@@ -1729,7 +1729,7 @@ int fcntl_getlease(struct file *filp)
ctx = smp_load_acquire(&inode->i_flctx); ctx = smp_load_acquire(&inode->i_flctx);
if (ctx && !list_empty_careful(&ctx->flc_lease)) { if (ctx && !list_empty_careful(&ctx->flc_lease)) {
percpu_down_read_preempt_disable(&file_rwsem); percpu_down_read(&file_rwsem);
spin_lock(&ctx->flc_lock); spin_lock(&ctx->flc_lock);
time_out_leases(inode, &dispose); time_out_leases(inode, &dispose);
list_for_each_entry(fl, &ctx->flc_lease, fl_list) { list_for_each_entry(fl, &ctx->flc_lease, fl_list) {
...@@ -1739,7 +1739,7 @@ int fcntl_getlease(struct file *filp) ...@@ -1739,7 +1739,7 @@ int fcntl_getlease(struct file *filp)
break; break;
} }
spin_unlock(&ctx->flc_lock); spin_unlock(&ctx->flc_lock);
percpu_up_read_preempt_enable(&file_rwsem); percpu_up_read(&file_rwsem);
locks_dispose_list(&dispose); locks_dispose_list(&dispose);
} }
...@@ -1813,7 +1813,7 @@ generic_add_lease(struct file *filp, long arg, struct file_lock **flp, void **pr ...@@ -1813,7 +1813,7 @@ generic_add_lease(struct file *filp, long arg, struct file_lock **flp, void **pr
return -EINVAL; return -EINVAL;
} }
percpu_down_read_preempt_disable(&file_rwsem); percpu_down_read(&file_rwsem);
spin_lock(&ctx->flc_lock); spin_lock(&ctx->flc_lock);
time_out_leases(inode, &dispose); time_out_leases(inode, &dispose);
error = check_conflicting_open(dentry, arg, lease->fl_flags); error = check_conflicting_open(dentry, arg, lease->fl_flags);
...@@ -1884,7 +1884,7 @@ generic_add_lease(struct file *filp, long arg, struct file_lock **flp, void **pr ...@@ -1884,7 +1884,7 @@ generic_add_lease(struct file *filp, long arg, struct file_lock **flp, void **pr
lease->fl_lmops->lm_setup(lease, priv); lease->fl_lmops->lm_setup(lease, priv);
out: out:
spin_unlock(&ctx->flc_lock); spin_unlock(&ctx->flc_lock);
percpu_up_read_preempt_enable(&file_rwsem); percpu_up_read(&file_rwsem);
locks_dispose_list(&dispose); locks_dispose_list(&dispose);
if (is_deleg) if (is_deleg)
inode_unlock(inode); inode_unlock(inode);
...@@ -1907,7 +1907,7 @@ static int generic_delete_lease(struct file *filp, void *owner) ...@@ -1907,7 +1907,7 @@ static int generic_delete_lease(struct file *filp, void *owner)
return error; return error;
} }
percpu_down_read_preempt_disable(&file_rwsem); percpu_down_read(&file_rwsem);
spin_lock(&ctx->flc_lock); spin_lock(&ctx->flc_lock);
list_for_each_entry(fl, &ctx->flc_lease, fl_list) { list_for_each_entry(fl, &ctx->flc_lease, fl_list) {
if (fl->fl_file == filp && if (fl->fl_file == filp &&
...@@ -1920,7 +1920,7 @@ static int generic_delete_lease(struct file *filp, void *owner) ...@@ -1920,7 +1920,7 @@ static int generic_delete_lease(struct file *filp, void *owner)
if (victim) if (victim)
error = fl->fl_lmops->lm_change(victim, F_UNLCK, &dispose); error = fl->fl_lmops->lm_change(victim, F_UNLCK, &dispose);
spin_unlock(&ctx->flc_lock); spin_unlock(&ctx->flc_lock);
percpu_up_read_preempt_enable(&file_rwsem); percpu_up_read(&file_rwsem);
locks_dispose_list(&dispose); locks_dispose_list(&dispose);
return error; return error;
} }
...@@ -2643,13 +2643,13 @@ locks_remove_lease(struct file *filp, struct file_lock_context *ctx) ...@@ -2643,13 +2643,13 @@ locks_remove_lease(struct file *filp, struct file_lock_context *ctx)
if (list_empty(&ctx->flc_lease)) if (list_empty(&ctx->flc_lease))
return; return;
percpu_down_read_preempt_disable(&file_rwsem); percpu_down_read(&file_rwsem);
spin_lock(&ctx->flc_lock); spin_lock(&ctx->flc_lock);
list_for_each_entry_safe(fl, tmp, &ctx->flc_lease, fl_list) list_for_each_entry_safe(fl, tmp, &ctx->flc_lease, fl_list)
if (filp == fl->fl_file) if (filp == fl->fl_file)
lease_modify(fl, F_UNLCK, &dispose); lease_modify(fl, F_UNLCK, &dispose);
spin_unlock(&ctx->flc_lock); spin_unlock(&ctx->flc_lock);
percpu_up_read_preempt_enable(&file_rwsem); percpu_up_read(&file_rwsem);
locks_dispose_list(&dispose); locks_dispose_list(&dispose);
} }
......
...@@ -29,7 +29,7 @@ static struct percpu_rw_semaphore name = { \ ...@@ -29,7 +29,7 @@ static struct percpu_rw_semaphore name = { \
extern int __percpu_down_read(struct percpu_rw_semaphore *, int); extern int __percpu_down_read(struct percpu_rw_semaphore *, int);
extern void __percpu_up_read(struct percpu_rw_semaphore *); extern void __percpu_up_read(struct percpu_rw_semaphore *);
static inline void percpu_down_read_preempt_disable(struct percpu_rw_semaphore *sem) static inline void percpu_down_read(struct percpu_rw_semaphore *sem)
{ {
might_sleep(); might_sleep();
...@@ -47,16 +47,10 @@ static inline void percpu_down_read_preempt_disable(struct percpu_rw_semaphore * ...@@ -47,16 +47,10 @@ static inline void percpu_down_read_preempt_disable(struct percpu_rw_semaphore *
__this_cpu_inc(*sem->read_count); __this_cpu_inc(*sem->read_count);
if (unlikely(!rcu_sync_is_idle(&sem->rss))) if (unlikely(!rcu_sync_is_idle(&sem->rss)))
__percpu_down_read(sem, false); /* Unconditional memory barrier */ __percpu_down_read(sem, false); /* Unconditional memory barrier */
barrier();
/* /*
* The barrier() prevents the compiler from * The preempt_enable() prevents the compiler from
* bleeding the critical section out. * bleeding the critical section out.
*/ */
}
static inline void percpu_down_read(struct percpu_rw_semaphore *sem)
{
percpu_down_read_preempt_disable(sem);
preempt_enable(); preempt_enable();
} }
...@@ -83,13 +77,9 @@ static inline int percpu_down_read_trylock(struct percpu_rw_semaphore *sem) ...@@ -83,13 +77,9 @@ static inline int percpu_down_read_trylock(struct percpu_rw_semaphore *sem)
return ret; return ret;
} }
static inline void percpu_up_read_preempt_enable(struct percpu_rw_semaphore *sem) static inline void percpu_up_read(struct percpu_rw_semaphore *sem)
{ {
/* preempt_disable();
* The barrier() prevents the compiler from
* bleeding the critical section out.
*/
barrier();
/* /*
* Same as in percpu_down_read(). * Same as in percpu_down_read().
*/ */
...@@ -102,12 +92,6 @@ static inline void percpu_up_read_preempt_enable(struct percpu_rw_semaphore *sem ...@@ -102,12 +92,6 @@ static inline void percpu_up_read_preempt_enable(struct percpu_rw_semaphore *sem
rwsem_release(&sem->rw_sem.dep_map, 1, _RET_IP_); rwsem_release(&sem->rw_sem.dep_map, 1, _RET_IP_);
} }
static inline void percpu_up_read(struct percpu_rw_semaphore *sem)
{
preempt_disable();
percpu_up_read_preempt_enable(sem);
}
extern void percpu_down_write(struct percpu_rw_semaphore *); extern void percpu_down_write(struct percpu_rw_semaphore *);
extern void percpu_up_write(struct percpu_rw_semaphore *); extern void percpu_up_write(struct percpu_rw_semaphore *);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment