Commit 3469d261 authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'locking-rwsem-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip

Pull support for killable rwsems from Ingo Molnar:
 "This, by Michal Hocko, implements down_write_killable().

  The main usecase will be to update mm_sem usage sites to use this new
  API, to allow the mm-reaper introduced in commit aac45363 ("mm,
  oom: introduce oom reaper") to tear down oom victim address spaces
  asynchronously with minimum latencies and without deadlock worries"

[ The vfs will want it too as the inode lock is changed from a mutex to
  a rwsem due to the parallel lookup and readdir updates ]

* 'locking-rwsem-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
  locking/rwsem: Fix comment on register clobbering
  locking/rwsem: Fix down_write_killable()
  locking/rwsem, x86: Add frame annotation for call_rwsem_down_write_failed_killable()
  locking/rwsem: Provide down_write_killable()
  locking/rwsem, x86: Provide __down_write_killable()
  locking/rwsem, s390: Provide __down_write_killable()
  locking/rwsem, ia64: Provide __down_write_killable()
  locking/rwsem, alpha: Provide __down_write_killable()
  locking/rwsem: Introduce basis for down_write_killable()
  locking/rwsem, sparc: Drop superfluous arch specific implementation
  locking/rwsem, sh: Drop superfluous arch specific implementation
  locking/rwsem, xtensa: Drop superfluous arch specific implementation
  locking/rwsem: Drop explicit memory barriers
  locking/rwsem: Get rid of __down_write_nested()
parents 1c19b68a 4544ba8c
...@@ -63,7 +63,7 @@ static inline int __down_read_trylock(struct rw_semaphore *sem) ...@@ -63,7 +63,7 @@ static inline int __down_read_trylock(struct rw_semaphore *sem)
return res >= 0 ? 1 : 0; return res >= 0 ? 1 : 0;
} }
static inline void __down_write(struct rw_semaphore *sem) static inline long ___down_write(struct rw_semaphore *sem)
{ {
long oldcount; long oldcount;
#ifndef CONFIG_SMP #ifndef CONFIG_SMP
...@@ -83,10 +83,24 @@ static inline void __down_write(struct rw_semaphore *sem) ...@@ -83,10 +83,24 @@ static inline void __down_write(struct rw_semaphore *sem)
:"=&r" (oldcount), "=m" (sem->count), "=&r" (temp) :"=&r" (oldcount), "=m" (sem->count), "=&r" (temp)
:"Ir" (RWSEM_ACTIVE_WRITE_BIAS), "m" (sem->count) : "memory"); :"Ir" (RWSEM_ACTIVE_WRITE_BIAS), "m" (sem->count) : "memory");
#endif #endif
if (unlikely(oldcount)) return oldcount;
}
static inline void __down_write(struct rw_semaphore *sem)
{
if (unlikely(___down_write(sem)))
rwsem_down_write_failed(sem); rwsem_down_write_failed(sem);
} }
static inline int __down_write_killable(struct rw_semaphore *sem)
{
if (unlikely(___down_write(sem)))
if (IS_ERR(rwsem_down_write_failed_killable(sem)))
return -EINTR;
return 0;
}
/* /*
* trylock for writing -- returns 1 if successful, 0 if contention * trylock for writing -- returns 1 if successful, 0 if contention
*/ */
......
...@@ -49,8 +49,8 @@ __down_read (struct rw_semaphore *sem) ...@@ -49,8 +49,8 @@ __down_read (struct rw_semaphore *sem)
/* /*
* lock for writing * lock for writing
*/ */
static inline void static inline long
__down_write (struct rw_semaphore *sem) ___down_write (struct rw_semaphore *sem)
{ {
long old, new; long old, new;
...@@ -59,10 +59,26 @@ __down_write (struct rw_semaphore *sem) ...@@ -59,10 +59,26 @@ __down_write (struct rw_semaphore *sem)
new = old + RWSEM_ACTIVE_WRITE_BIAS; new = old + RWSEM_ACTIVE_WRITE_BIAS;
} while (cmpxchg_acq(&sem->count, old, new) != old); } while (cmpxchg_acq(&sem->count, old, new) != old);
if (old != 0) return old;
}
static inline void
__down_write (struct rw_semaphore *sem)
{
if (___down_write(sem))
rwsem_down_write_failed(sem); rwsem_down_write_failed(sem);
} }
static inline int
__down_write_killable (struct rw_semaphore *sem)
{
if (___down_write(sem))
if (IS_ERR(rwsem_down_write_failed_killable(sem)))
return -EINTR;
return 0;
}
/* /*
* unlock after reading * unlock after reading
*/ */
......
...@@ -90,7 +90,7 @@ static inline int __down_read_trylock(struct rw_semaphore *sem) ...@@ -90,7 +90,7 @@ static inline int __down_read_trylock(struct rw_semaphore *sem)
/* /*
* lock for writing * lock for writing
*/ */
static inline void __down_write_nested(struct rw_semaphore *sem, int subclass) static inline long ___down_write(struct rw_semaphore *sem)
{ {
signed long old, new, tmp; signed long old, new, tmp;
...@@ -104,13 +104,23 @@ static inline void __down_write_nested(struct rw_semaphore *sem, int subclass) ...@@ -104,13 +104,23 @@ static inline void __down_write_nested(struct rw_semaphore *sem, int subclass)
: "=&d" (old), "=&d" (new), "=Q" (sem->count) : "=&d" (old), "=&d" (new), "=Q" (sem->count)
: "Q" (sem->count), "m" (tmp) : "Q" (sem->count), "m" (tmp)
: "cc", "memory"); : "cc", "memory");
if (old != 0)
rwsem_down_write_failed(sem); return old;
} }
static inline void __down_write(struct rw_semaphore *sem) static inline void __down_write(struct rw_semaphore *sem)
{ {
__down_write_nested(sem, 0); if (___down_write(sem))
rwsem_down_write_failed(sem);
}
static inline int __down_write_killable(struct rw_semaphore *sem)
{
if (___down_write(sem))
if (IS_ERR(rwsem_down_write_failed_killable(sem)))
return -EINTR;
return 0;
} }
/* /*
......
...@@ -26,6 +26,7 @@ generic-y += percpu.h ...@@ -26,6 +26,7 @@ generic-y += percpu.h
generic-y += poll.h generic-y += poll.h
generic-y += preempt.h generic-y += preempt.h
generic-y += resource.h generic-y += resource.h
generic-y += rwsem.h
generic-y += sembuf.h generic-y += sembuf.h
generic-y += serial.h generic-y += serial.h
generic-y += shmbuf.h generic-y += shmbuf.h
......
/*
* include/asm-sh/rwsem.h: R/W semaphores for SH using the stuff
* in lib/rwsem.c.
*/
#ifndef _ASM_SH_RWSEM_H
#define _ASM_SH_RWSEM_H
#ifndef _LINUX_RWSEM_H
#error "please don't include asm/rwsem.h directly, use linux/rwsem.h instead"
#endif
#ifdef __KERNEL__
#define RWSEM_UNLOCKED_VALUE 0x00000000
#define RWSEM_ACTIVE_BIAS 0x00000001
#define RWSEM_ACTIVE_MASK 0x0000ffff
#define RWSEM_WAITING_BIAS (-0x00010000)
#define RWSEM_ACTIVE_READ_BIAS RWSEM_ACTIVE_BIAS
#define RWSEM_ACTIVE_WRITE_BIAS (RWSEM_WAITING_BIAS + RWSEM_ACTIVE_BIAS)
/*
* lock for reading
*/
static inline void __down_read(struct rw_semaphore *sem)
{
if (atomic_inc_return((atomic_t *)(&sem->count)) > 0)
smp_wmb();
else
rwsem_down_read_failed(sem);
}
static inline int __down_read_trylock(struct rw_semaphore *sem)
{
int tmp;
while ((tmp = sem->count) >= 0) {
if (tmp == cmpxchg(&sem->count, tmp,
tmp + RWSEM_ACTIVE_READ_BIAS)) {
smp_wmb();
return 1;
}
}
return 0;
}
/*
* lock for writing
*/
static inline void __down_write(struct rw_semaphore *sem)
{
int tmp;
tmp = atomic_add_return(RWSEM_ACTIVE_WRITE_BIAS,
(atomic_t *)(&sem->count));
if (tmp == RWSEM_ACTIVE_WRITE_BIAS)
smp_wmb();
else
rwsem_down_write_failed(sem);
}
static inline int __down_write_trylock(struct rw_semaphore *sem)
{
int tmp;
tmp = cmpxchg(&sem->count, RWSEM_UNLOCKED_VALUE,
RWSEM_ACTIVE_WRITE_BIAS);
smp_wmb();
return tmp == RWSEM_UNLOCKED_VALUE;
}
/*
* unlock after reading
*/
static inline void __up_read(struct rw_semaphore *sem)
{
int tmp;
smp_wmb();
tmp = atomic_dec_return((atomic_t *)(&sem->count));
if (tmp < -1 && (tmp & RWSEM_ACTIVE_MASK) == 0)
rwsem_wake(sem);
}
/*
* unlock after writing
*/
static inline void __up_write(struct rw_semaphore *sem)
{
smp_wmb();
if (atomic_sub_return(RWSEM_ACTIVE_WRITE_BIAS,
(atomic_t *)(&sem->count)) < 0)
rwsem_wake(sem);
}
/*
* implement atomic add functionality
*/
static inline void rwsem_atomic_add(int delta, struct rw_semaphore *sem)
{
atomic_add(delta, (atomic_t *)(&sem->count));
}
/*
* downgrade write lock to read lock
*/
static inline void __downgrade_write(struct rw_semaphore *sem)
{
int tmp;
smp_wmb();
tmp = atomic_add_return(-RWSEM_WAITING_BIAS, (atomic_t *)(&sem->count));
if (tmp < 0)
rwsem_downgrade_wake(sem);
}
static inline void __down_write_nested(struct rw_semaphore *sem, int subclass)
{
__down_write(sem);
}
/*
* implement exchange and add functionality
*/
static inline int rwsem_atomic_update(int delta, struct rw_semaphore *sem)
{
smp_mb();
return atomic_add_return(delta, (atomic_t *)(&sem->count));
}
#endif /* __KERNEL__ */
#endif /* _ASM_SH_RWSEM_H */
...@@ -16,6 +16,7 @@ generic-y += mm-arch-hooks.h ...@@ -16,6 +16,7 @@ generic-y += mm-arch-hooks.h
generic-y += module.h generic-y += module.h
generic-y += mutex.h generic-y += mutex.h
generic-y += preempt.h generic-y += preempt.h
generic-y += rwsem.h
generic-y += serial.h generic-y += serial.h
generic-y += trace_clock.h generic-y += trace_clock.h
generic-y += types.h generic-y += types.h
......
/*
* rwsem.h: R/W semaphores implemented using CAS
*
* Written by David S. Miller (davem@redhat.com), 2001.
* Derived from asm-i386/rwsem.h
*/
#ifndef _SPARC64_RWSEM_H
#define _SPARC64_RWSEM_H
#ifndef _LINUX_RWSEM_H
#error "please don't include asm/rwsem.h directly, use linux/rwsem.h instead"
#endif
#ifdef __KERNEL__
#define RWSEM_UNLOCKED_VALUE 0x00000000L
#define RWSEM_ACTIVE_BIAS 0x00000001L
#define RWSEM_ACTIVE_MASK 0xffffffffL
#define RWSEM_WAITING_BIAS (-RWSEM_ACTIVE_MASK-1)
#define RWSEM_ACTIVE_READ_BIAS RWSEM_ACTIVE_BIAS
#define RWSEM_ACTIVE_WRITE_BIAS (RWSEM_WAITING_BIAS + RWSEM_ACTIVE_BIAS)
/*
* lock for reading
*/
static inline void __down_read(struct rw_semaphore *sem)
{
if (unlikely(atomic64_inc_return((atomic64_t *)(&sem->count)) <= 0L))
rwsem_down_read_failed(sem);
}
static inline int __down_read_trylock(struct rw_semaphore *sem)
{
long tmp;
while ((tmp = sem->count) >= 0L) {
if (tmp == cmpxchg(&sem->count, tmp,
tmp + RWSEM_ACTIVE_READ_BIAS)) {
return 1;
}
}
return 0;
}
/*
* lock for writing
*/
static inline void __down_write_nested(struct rw_semaphore *sem, int subclass)
{
long tmp;
tmp = atomic64_add_return(RWSEM_ACTIVE_WRITE_BIAS,
(atomic64_t *)(&sem->count));
if (unlikely(tmp != RWSEM_ACTIVE_WRITE_BIAS))
rwsem_down_write_failed(sem);
}
static inline void __down_write(struct rw_semaphore *sem)
{
__down_write_nested(sem, 0);
}
static inline int __down_write_trylock(struct rw_semaphore *sem)
{
long tmp;
tmp = cmpxchg(&sem->count, RWSEM_UNLOCKED_VALUE,
RWSEM_ACTIVE_WRITE_BIAS);
return tmp == RWSEM_UNLOCKED_VALUE;
}
/*
* unlock after reading
*/
static inline void __up_read(struct rw_semaphore *sem)
{
long tmp;
tmp = atomic64_dec_return((atomic64_t *)(&sem->count));
if (unlikely(tmp < -1L && (tmp & RWSEM_ACTIVE_MASK) == 0L))
rwsem_wake(sem);
}
/*
* unlock after writing
*/
static inline void __up_write(struct rw_semaphore *sem)
{
if (unlikely(atomic64_sub_return(RWSEM_ACTIVE_WRITE_BIAS,
(atomic64_t *)(&sem->count)) < 0L))
rwsem_wake(sem);
}
/*
* implement atomic add functionality
*/
static inline void rwsem_atomic_add(long delta, struct rw_semaphore *sem)
{
atomic64_add(delta, (atomic64_t *)(&sem->count));
}
/*
* downgrade write lock to read lock
*/
static inline void __downgrade_write(struct rw_semaphore *sem)
{
long tmp;
tmp = atomic64_add_return(-RWSEM_WAITING_BIAS, (atomic64_t *)(&sem->count));
if (tmp < 0L)
rwsem_downgrade_wake(sem);
}
/*
* implement exchange and add functionality
*/
static inline long rwsem_atomic_update(long delta, struct rw_semaphore *sem)
{
return atomic64_add_return(delta, (atomic64_t *)(&sem->count));
}
#endif /* __KERNEL__ */
#endif /* _SPARC64_RWSEM_H */
...@@ -99,26 +99,36 @@ static inline int __down_read_trylock(struct rw_semaphore *sem) ...@@ -99,26 +99,36 @@ static inline int __down_read_trylock(struct rw_semaphore *sem)
/* /*
* lock for writing * lock for writing
*/ */
static inline void __down_write_nested(struct rw_semaphore *sem, int subclass) #define ____down_write(sem, slow_path) \
({ \
long tmp; \
struct rw_semaphore* ret; \
asm volatile("# beginning down_write\n\t" \
LOCK_PREFIX " xadd %1,(%3)\n\t" \
/* adds 0xffff0001, returns the old value */ \
" test " __ASM_SEL(%w1,%k1) "," __ASM_SEL(%w1,%k1) "\n\t" \
/* was the active mask 0 before? */\
" jz 1f\n" \
" call " slow_path "\n" \
"1:\n" \
"# ending down_write" \
: "+m" (sem->count), "=d" (tmp), "=a" (ret) \
: "a" (sem), "1" (RWSEM_ACTIVE_WRITE_BIAS) \
: "memory", "cc"); \
ret; \
})
static inline void __down_write(struct rw_semaphore *sem)
{ {
long tmp; ____down_write(sem, "call_rwsem_down_write_failed");
asm volatile("# beginning down_write\n\t"
LOCK_PREFIX " xadd %1,(%2)\n\t"
/* adds 0xffff0001, returns the old value */
" test " __ASM_SEL(%w1,%k1) "," __ASM_SEL(%w1,%k1) "\n\t"
/* was the active mask 0 before? */
" jz 1f\n"
" call call_rwsem_down_write_failed\n"
"1:\n"
"# ending down_write"
: "+m" (sem->count), "=d" (tmp)
: "a" (sem), "1" (RWSEM_ACTIVE_WRITE_BIAS)
: "memory", "cc");
} }
static inline void __down_write(struct rw_semaphore *sem) static inline int __down_write_killable(struct rw_semaphore *sem)
{ {
__down_write_nested(sem, 0); if (IS_ERR(____down_write(sem, "call_rwsem_down_write_failed_killable")))
return -EINTR;
return 0;
} }
/* /*
......
...@@ -29,8 +29,10 @@ ...@@ -29,8 +29,10 @@
* there is contention on the semaphore. * there is contention on the semaphore.
* *
* %eax contains the semaphore pointer on entry. Save the C-clobbered * %eax contains the semaphore pointer on entry. Save the C-clobbered
* registers (%eax, %edx and %ecx) except %eax whish is either a return * registers (%eax, %edx and %ecx) except %eax which is either a return
* value or just clobbered.. * value or just gets clobbered. Same is true for %edx so make sure GCC
* reloads it after the slow path, by making it hold a temporary, for
* example see ____down_write().
*/ */
#define save_common_regs \ #define save_common_regs \
...@@ -106,6 +108,16 @@ ENTRY(call_rwsem_down_write_failed) ...@@ -106,6 +108,16 @@ ENTRY(call_rwsem_down_write_failed)
ret ret
ENDPROC(call_rwsem_down_write_failed) ENDPROC(call_rwsem_down_write_failed)
ENTRY(call_rwsem_down_write_failed_killable)
FRAME_BEGIN
save_common_regs
movq %rax,%rdi
call rwsem_down_write_failed_killable
restore_common_regs
FRAME_END
ret
ENDPROC(call_rwsem_down_write_failed_killable)
ENTRY(call_rwsem_wake) ENTRY(call_rwsem_wake)
FRAME_BEGIN FRAME_BEGIN
/* do nothing if still outstanding active readers */ /* do nothing if still outstanding active readers */
......
...@@ -22,6 +22,7 @@ generic-y += mm-arch-hooks.h ...@@ -22,6 +22,7 @@ generic-y += mm-arch-hooks.h
generic-y += percpu.h generic-y += percpu.h
generic-y += preempt.h generic-y += preempt.h
generic-y += resource.h generic-y += resource.h
generic-y += rwsem.h
generic-y += sections.h generic-y += sections.h
generic-y += siginfo.h generic-y += siginfo.h
generic-y += statfs.h generic-y += statfs.h
......
/*
* include/asm-xtensa/rwsem.h
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Largely copied from include/asm-ppc/rwsem.h
*
* Copyright (C) 2001 - 2005 Tensilica Inc.
*/
#ifndef _XTENSA_RWSEM_H
#define _XTENSA_RWSEM_H
#ifndef _LINUX_RWSEM_H
#error "Please don't include <asm/rwsem.h> directly, use <linux/rwsem.h> instead."
#endif
#define RWSEM_UNLOCKED_VALUE 0x00000000
#define RWSEM_ACTIVE_BIAS 0x00000001
#define RWSEM_ACTIVE_MASK 0x0000ffff
#define RWSEM_WAITING_BIAS (-0x00010000)
#define RWSEM_ACTIVE_READ_BIAS RWSEM_ACTIVE_BIAS
#define RWSEM_ACTIVE_WRITE_BIAS (RWSEM_WAITING_BIAS + RWSEM_ACTIVE_BIAS)
/*
* lock for reading
*/
static inline void __down_read(struct rw_semaphore *sem)
{
if (atomic_add_return(1,(atomic_t *)(&sem->count)) > 0)
smp_wmb();
else
rwsem_down_read_failed(sem);
}
static inline int __down_read_trylock(struct rw_semaphore *sem)
{
int tmp;
while ((tmp = sem->count) >= 0) {
if (tmp == cmpxchg(&sem->count, tmp,
tmp + RWSEM_ACTIVE_READ_BIAS)) {
smp_wmb();
return 1;
}
}
return 0;
}
/*
* lock for writing
*/
static inline void __down_write(struct rw_semaphore *sem)
{
int tmp;
tmp = atomic_add_return(RWSEM_ACTIVE_WRITE_BIAS,
(atomic_t *)(&sem->count));
if (tmp == RWSEM_ACTIVE_WRITE_BIAS)
smp_wmb();
else
rwsem_down_write_failed(sem);
}
static inline int __down_write_trylock(struct rw_semaphore *sem)
{
int tmp;
tmp = cmpxchg(&sem->count, RWSEM_UNLOCKED_VALUE,
RWSEM_ACTIVE_WRITE_BIAS);
smp_wmb();
return tmp == RWSEM_UNLOCKED_VALUE;
}
/*
* unlock after reading
*/
static inline void __up_read(struct rw_semaphore *sem)
{
int tmp;
smp_wmb();
tmp = atomic_sub_return(1,(atomic_t *)(&sem->count));
if (tmp < -1 && (tmp & RWSEM_ACTIVE_MASK) == 0)
rwsem_wake(sem);
}
/*
* unlock after writing
*/
static inline void __up_write(struct rw_semaphore *sem)
{
smp_wmb();
if (atomic_sub_return(RWSEM_ACTIVE_WRITE_BIAS,
(atomic_t *)(&sem->count)) < 0)
rwsem_wake(sem);
}
/*
* implement atomic add functionality
*/
static inline void rwsem_atomic_add(int delta, struct rw_semaphore *sem)
{
atomic_add(delta, (atomic_t *)(&sem->count));
}
/*
* downgrade write lock to read lock
*/
static inline void __downgrade_write(struct rw_semaphore *sem)
{
int tmp;
smp_wmb();
tmp = atomic_add_return(-RWSEM_WAITING_BIAS, (atomic_t *)(&sem->count));
if (tmp < 0)
rwsem_downgrade_wake(sem);
}
/*
* implement exchange and add functionality
*/
static inline int rwsem_atomic_update(int delta, struct rw_semaphore *sem)
{
smp_mb();
return atomic_add_return(delta, (atomic_t *)(&sem->count));
}
#endif /* _XTENSA_RWSEM_H */
...@@ -53,7 +53,7 @@ static inline int __down_read_trylock(struct rw_semaphore *sem) ...@@ -53,7 +53,7 @@ static inline int __down_read_trylock(struct rw_semaphore *sem)
/* /*
* lock for writing * lock for writing
*/ */
static inline void __down_write_nested(struct rw_semaphore *sem, int subclass) static inline void __down_write(struct rw_semaphore *sem)
{ {
long tmp; long tmp;
...@@ -63,9 +63,16 @@ static inline void __down_write_nested(struct rw_semaphore *sem, int subclass) ...@@ -63,9 +63,16 @@ static inline void __down_write_nested(struct rw_semaphore *sem, int subclass)
rwsem_down_write_failed(sem); rwsem_down_write_failed(sem);
} }
static inline void __down_write(struct rw_semaphore *sem) static inline int __down_write_killable(struct rw_semaphore *sem)
{ {
__down_write_nested(sem, 0); long tmp;
tmp = atomic_long_add_return_acquire(RWSEM_ACTIVE_WRITE_BIAS,
(atomic_long_t *)&sem->count);
if (unlikely(tmp != RWSEM_ACTIVE_WRITE_BIAS))
if (IS_ERR(rwsem_down_write_failed_killable(sem)))
return -EINTR;
return 0;
} }
static inline int __down_write_trylock(struct rw_semaphore *sem) static inline int __down_write_trylock(struct rw_semaphore *sem)
......
...@@ -446,6 +446,18 @@ do { \ ...@@ -446,6 +446,18 @@ do { \
lock_acquired(&(_lock)->dep_map, _RET_IP_); \ lock_acquired(&(_lock)->dep_map, _RET_IP_); \
} while (0) } while (0)
#define LOCK_CONTENDED_RETURN(_lock, try, lock) \
({ \
int ____err = 0; \
if (!try(_lock)) { \
lock_contended(&(_lock)->dep_map, _RET_IP_); \
____err = lock(_lock); \
} \
if (!____err) \
lock_acquired(&(_lock)->dep_map, _RET_IP_); \
____err; \
})
#else /* CONFIG_LOCK_STAT */ #else /* CONFIG_LOCK_STAT */
#define lock_contended(lockdep_map, ip) do {} while (0) #define lock_contended(lockdep_map, ip) do {} while (0)
...@@ -454,6 +466,9 @@ do { \ ...@@ -454,6 +466,9 @@ do { \
#define LOCK_CONTENDED(_lock, try, lock) \ #define LOCK_CONTENDED(_lock, try, lock) \
lock(_lock) lock(_lock)
#define LOCK_CONTENDED_RETURN(_lock, try, lock) \
lock(_lock)
#endif /* CONFIG_LOCK_STAT */ #endif /* CONFIG_LOCK_STAT */
#ifdef CONFIG_LOCKDEP #ifdef CONFIG_LOCKDEP
......
...@@ -34,7 +34,7 @@ struct rw_semaphore { ...@@ -34,7 +34,7 @@ struct rw_semaphore {
extern void __down_read(struct rw_semaphore *sem); extern void __down_read(struct rw_semaphore *sem);
extern int __down_read_trylock(struct rw_semaphore *sem); extern int __down_read_trylock(struct rw_semaphore *sem);
extern void __down_write(struct rw_semaphore *sem); extern void __down_write(struct rw_semaphore *sem);
extern void __down_write_nested(struct rw_semaphore *sem, int subclass); extern int __must_check __down_write_killable(struct rw_semaphore *sem);
extern int __down_write_trylock(struct rw_semaphore *sem); extern int __down_write_trylock(struct rw_semaphore *sem);
extern void __up_read(struct rw_semaphore *sem); extern void __up_read(struct rw_semaphore *sem);
extern void __up_write(struct rw_semaphore *sem); extern void __up_write(struct rw_semaphore *sem);
......
...@@ -14,6 +14,7 @@ ...@@ -14,6 +14,7 @@
#include <linux/list.h> #include <linux/list.h>
#include <linux/spinlock.h> #include <linux/spinlock.h>
#include <linux/atomic.h> #include <linux/atomic.h>
#include <linux/err.h>
#ifdef CONFIG_RWSEM_SPIN_ON_OWNER #ifdef CONFIG_RWSEM_SPIN_ON_OWNER
#include <linux/osq_lock.h> #include <linux/osq_lock.h>
#endif #endif
...@@ -43,6 +44,7 @@ struct rw_semaphore { ...@@ -43,6 +44,7 @@ struct rw_semaphore {
extern struct rw_semaphore *rwsem_down_read_failed(struct rw_semaphore *sem); extern struct rw_semaphore *rwsem_down_read_failed(struct rw_semaphore *sem);
extern struct rw_semaphore *rwsem_down_write_failed(struct rw_semaphore *sem); extern struct rw_semaphore *rwsem_down_write_failed(struct rw_semaphore *sem);
extern struct rw_semaphore *rwsem_down_write_failed_killable(struct rw_semaphore *sem);
extern struct rw_semaphore *rwsem_wake(struct rw_semaphore *); extern struct rw_semaphore *rwsem_wake(struct rw_semaphore *);
extern struct rw_semaphore *rwsem_downgrade_wake(struct rw_semaphore *sem); extern struct rw_semaphore *rwsem_downgrade_wake(struct rw_semaphore *sem);
...@@ -116,6 +118,7 @@ extern int down_read_trylock(struct rw_semaphore *sem); ...@@ -116,6 +118,7 @@ extern int down_read_trylock(struct rw_semaphore *sem);
* lock for writing * lock for writing
*/ */
extern void down_write(struct rw_semaphore *sem); extern void down_write(struct rw_semaphore *sem);
extern int __must_check down_write_killable(struct rw_semaphore *sem);
/* /*
* trylock for writing -- returns 1 if successful, 0 if contention * trylock for writing -- returns 1 if successful, 0 if contention
......
...@@ -191,11 +191,12 @@ int __down_read_trylock(struct rw_semaphore *sem) ...@@ -191,11 +191,12 @@ int __down_read_trylock(struct rw_semaphore *sem)
/* /*
* get a write lock on the semaphore * get a write lock on the semaphore
*/ */
void __sched __down_write_nested(struct rw_semaphore *sem, int subclass) int __sched __down_write_common(struct rw_semaphore *sem, int state)
{ {
struct rwsem_waiter waiter; struct rwsem_waiter waiter;
struct task_struct *tsk; struct task_struct *tsk;
unsigned long flags; unsigned long flags;
int ret = 0;
raw_spin_lock_irqsave(&sem->wait_lock, flags); raw_spin_lock_irqsave(&sem->wait_lock, flags);
...@@ -215,21 +216,33 @@ void __sched __down_write_nested(struct rw_semaphore *sem, int subclass) ...@@ -215,21 +216,33 @@ void __sched __down_write_nested(struct rw_semaphore *sem, int subclass)
*/ */
if (sem->count == 0) if (sem->count == 0)
break; break;
set_task_state(tsk, TASK_UNINTERRUPTIBLE); if (signal_pending_state(state, current)) {
ret = -EINTR;
goto out;
}
set_task_state(tsk, state);
raw_spin_unlock_irqrestore(&sem->wait_lock, flags); raw_spin_unlock_irqrestore(&sem->wait_lock, flags);
schedule(); schedule();
raw_spin_lock_irqsave(&sem->wait_lock, flags); raw_spin_lock_irqsave(&sem->wait_lock, flags);
} }
/* got the lock */ /* got the lock */
sem->count = -1; sem->count = -1;
out:
list_del(&waiter.list); list_del(&waiter.list);
raw_spin_unlock_irqrestore(&sem->wait_lock, flags); raw_spin_unlock_irqrestore(&sem->wait_lock, flags);
return ret;
} }
void __sched __down_write(struct rw_semaphore *sem) void __sched __down_write(struct rw_semaphore *sem)
{ {
__down_write_nested(sem, 0); __down_write_common(sem, TASK_UNINTERRUPTIBLE);
}
int __sched __down_write_killable(struct rw_semaphore *sem)
{
return __down_write_common(sem, TASK_KILLABLE);
} }
/* /*
......
...@@ -433,12 +433,13 @@ static inline bool rwsem_has_spinner(struct rw_semaphore *sem) ...@@ -433,12 +433,13 @@ static inline bool rwsem_has_spinner(struct rw_semaphore *sem)
/* /*
* Wait until we successfully acquire the write lock * Wait until we successfully acquire the write lock
*/ */
__visible static inline struct rw_semaphore *
struct rw_semaphore __sched *rwsem_down_write_failed(struct rw_semaphore *sem) __rwsem_down_write_failed_common(struct rw_semaphore *sem, int state)
{ {
long count; long count;
bool waiting = true; /* any queued threads before us */ bool waiting = true; /* any queued threads before us */
struct rwsem_waiter waiter; struct rwsem_waiter waiter;
struct rw_semaphore *ret = sem;
/* undo write bias from down_write operation, stop active locking */ /* undo write bias from down_write operation, stop active locking */
count = rwsem_atomic_update(-RWSEM_ACTIVE_WRITE_BIAS, sem); count = rwsem_atomic_update(-RWSEM_ACTIVE_WRITE_BIAS, sem);
...@@ -478,7 +479,7 @@ struct rw_semaphore __sched *rwsem_down_write_failed(struct rw_semaphore *sem) ...@@ -478,7 +479,7 @@ struct rw_semaphore __sched *rwsem_down_write_failed(struct rw_semaphore *sem)
count = rwsem_atomic_update(RWSEM_WAITING_BIAS, sem); count = rwsem_atomic_update(RWSEM_WAITING_BIAS, sem);
/* wait until we successfully acquire the lock */ /* wait until we successfully acquire the lock */
set_current_state(TASK_UNINTERRUPTIBLE); set_current_state(state);
while (true) { while (true) {
if (rwsem_try_write_lock(count, sem)) if (rwsem_try_write_lock(count, sem))
break; break;
...@@ -486,21 +487,48 @@ struct rw_semaphore __sched *rwsem_down_write_failed(struct rw_semaphore *sem) ...@@ -486,21 +487,48 @@ struct rw_semaphore __sched *rwsem_down_write_failed(struct rw_semaphore *sem)
/* Block until there are no active lockers. */ /* Block until there are no active lockers. */
do { do {
if (signal_pending_state(state, current))
goto out_nolock;
schedule(); schedule();
set_current_state(TASK_UNINTERRUPTIBLE); set_current_state(state);
} while ((count = sem->count) & RWSEM_ACTIVE_MASK); } while ((count = sem->count) & RWSEM_ACTIVE_MASK);
raw_spin_lock_irq(&sem->wait_lock); raw_spin_lock_irq(&sem->wait_lock);
} }
__set_current_state(TASK_RUNNING); __set_current_state(TASK_RUNNING);
list_del(&waiter.list);
raw_spin_unlock_irq(&sem->wait_lock);
return ret;
out_nolock:
__set_current_state(TASK_RUNNING);
raw_spin_lock_irq(&sem->wait_lock);
list_del(&waiter.list); list_del(&waiter.list);
if (list_empty(&sem->wait_list))
rwsem_atomic_update(-RWSEM_WAITING_BIAS, sem);
else
__rwsem_do_wake(sem, RWSEM_WAKE_ANY);
raw_spin_unlock_irq(&sem->wait_lock); raw_spin_unlock_irq(&sem->wait_lock);
return sem; return ERR_PTR(-EINTR);
}
__visible struct rw_semaphore * __sched
rwsem_down_write_failed(struct rw_semaphore *sem)
{
return __rwsem_down_write_failed_common(sem, TASK_UNINTERRUPTIBLE);
} }
EXPORT_SYMBOL(rwsem_down_write_failed); EXPORT_SYMBOL(rwsem_down_write_failed);
__visible struct rw_semaphore * __sched
rwsem_down_write_failed_killable(struct rw_semaphore *sem)
{
return __rwsem_down_write_failed_common(sem, TASK_KILLABLE);
}
EXPORT_SYMBOL(rwsem_down_write_failed_killable);
/* /*
* handle waking up a waiter on the semaphore * handle waking up a waiter on the semaphore
* - up_read/up_write has decremented the active part of count if we come here * - up_read/up_write has decremented the active part of count if we come here
......
...@@ -54,6 +54,25 @@ void __sched down_write(struct rw_semaphore *sem) ...@@ -54,6 +54,25 @@ void __sched down_write(struct rw_semaphore *sem)
EXPORT_SYMBOL(down_write); EXPORT_SYMBOL(down_write);
/*
* lock for writing
*/
int __sched down_write_killable(struct rw_semaphore *sem)
{
might_sleep();
rwsem_acquire(&sem->dep_map, 0, 0, _RET_IP_);
if (LOCK_CONTENDED_RETURN(sem, __down_write_trylock, __down_write_killable)) {
rwsem_release(&sem->dep_map, 1, _RET_IP_);
return -EINTR;
}
rwsem_set_owner(sem);
return 0;
}
EXPORT_SYMBOL(down_write_killable);
/* /*
* trylock for writing -- returns 1 if successful, 0 if contention * trylock for writing -- returns 1 if successful, 0 if contention
*/ */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment