Commit 664b4e24 authored by Michal Hocko's avatar Michal Hocko Committed by Ingo Molnar

locking/rwsem, x86: Provide __down_write_killable()

which uses the same fast path as __down_write() except it falls back to
call_rwsem_down_write_failed_killable() slow path and return -EINTR if
killed. To prevent from code duplication extract the skeleton of
__down_write() into a helper macro which just takes the semaphore
and the slow path function to be called.
Signed-off-by: default avatarMichal Hocko <mhocko@suse.com>
Cc: Andrew Morton <akpm@linux-foundation.org>
Cc: Chris Zankel <chris@zankel.net>
Cc: David S. Miller <davem@davemloft.net>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Max Filippov <jcmvbkbc@gmail.com>
Cc: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Signed-off-by: Davidlohr Bueso <dbueso@suse.de>
Cc: Signed-off-by: Jason Low <jason.low2@hp.com>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Tony Luck <tony.luck@intel.com>
Cc: linux-alpha@vger.kernel.org
Cc: linux-arch@vger.kernel.org
Cc: linux-ia64@vger.kernel.org
Cc: linux-s390@vger.kernel.org
Cc: linux-sh@vger.kernel.org
Cc: linux-xtensa@linux-xtensa.org
Cc: sparclinux@vger.kernel.org
Link: http://lkml.kernel.org/r/1460041951-22347-11-git-send-email-mhocko@kernel.orgSigned-off-by: default avatarIngo Molnar <mingo@kernel.org>
parent 4edab14e
...@@ -99,21 +99,36 @@ static inline int __down_read_trylock(struct rw_semaphore *sem) ...@@ -99,21 +99,36 @@ static inline int __down_read_trylock(struct rw_semaphore *sem)
/* /*
* lock for writing * lock for writing
*/ */
#define ____down_write(sem, slow_path) \
({ \
long tmp; \
struct rw_semaphore* ret = sem; \
asm volatile("# beginning down_write\n\t" \
LOCK_PREFIX " xadd %1,(%2)\n\t" \
/* adds 0xffff0001, returns the old value */ \
" test " __ASM_SEL(%w1,%k1) "," __ASM_SEL(%w1,%k1) "\n\t" \
/* was the active mask 0 before? */\
" jz 1f\n" \
" call " slow_path "\n" \
"1:\n" \
"# ending down_write" \
: "+m" (sem->count), "=d" (tmp), "+a" (ret) \
: "a" (sem), "1" (RWSEM_ACTIVE_WRITE_BIAS) \
: "memory", "cc"); \
ret; \
})
static inline void __down_write(struct rw_semaphore *sem) static inline void __down_write(struct rw_semaphore *sem)
{ {
long tmp; ____down_write(sem, "call_rwsem_down_write_failed");
asm volatile("# beginning down_write\n\t" }
LOCK_PREFIX " xadd %1,(%2)\n\t"
/* adds 0xffff0001, returns the old value */ static inline int __down_write_killable(struct rw_semaphore *sem)
" test " __ASM_SEL(%w1,%k1) "," __ASM_SEL(%w1,%k1) "\n\t" {
/* was the active mask 0 before? */ if (IS_ERR(____down_write(sem, "call_rwsem_down_write_failed_killable")))
" jz 1f\n" return -EINTR;
" call call_rwsem_down_write_failed\n"
"1:\n" return 0;
"# ending down_write"
: "+m" (sem->count), "=d" (tmp)
: "a" (sem), "1" (RWSEM_ACTIVE_WRITE_BIAS)
: "memory", "cc");
} }
/* /*
......
...@@ -106,6 +106,14 @@ ENTRY(call_rwsem_down_write_failed) ...@@ -106,6 +106,14 @@ ENTRY(call_rwsem_down_write_failed)
ret ret
ENDPROC(call_rwsem_down_write_failed) ENDPROC(call_rwsem_down_write_failed)
ENTRY(call_rwsem_down_write_failed_killable)
save_common_regs
movq %rax,%rdi
call rwsem_down_write_failed_killable
restore_common_regs
ret
ENDPROC(call_rwsem_down_write_failed_killable)
ENTRY(call_rwsem_wake) ENTRY(call_rwsem_wake)
FRAME_BEGIN FRAME_BEGIN
/* do nothing if still outstanding active readers */ /* do nothing if still outstanding active readers */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment