Commit a41b56ef authored by Maarten Lankhorst's avatar Maarten Lankhorst Committed by Ingo Molnar

arch: Make __mutex_fastpath_lock_retval return whether fastpath succeeded or not

This will allow me to call functions that have multiple
arguments if fastpath fails. This is required to support ticket
mutexes, because they need to be able to pass an extra argument
to the fail function.

Originally I duplicated the functions, by adding
__mutex_fastpath_lock_retval_arg. This ended up being just a
duplication of the existing function, so a way to test if
fastpath was called ended up being better.

This also cleaned up the reservation mutex patch some by being
able to call an atomic_set instead of atomic_xchg, and making it
easier to detect if the wrong unlock function was previously
used.
Signed-off-by: default avatarMaarten Lankhorst <maarten.lankhorst@canonical.com>
Acked-by: default avatarPeter Zijlstra <a.p.zijlstra@chello.nl>
Cc: dri-devel@lists.freedesktop.org
Cc: linaro-mm-sig@lists.linaro.org
Cc: robclark@gmail.com
Cc: rostedt@goodmis.org
Cc: daniel@ffwll.ch
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Andrew Morton <akpm@linux-foundation.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Link: http://lkml.kernel.org/r/20130620113105.4001.83929.stgit@patserSigned-off-by: default avatarIngo Molnar <mingo@kernel.org>
parent 1e876e3b
...@@ -29,17 +29,15 @@ __mutex_fastpath_lock(atomic_t *count, void (*fail_fn)(atomic_t *)) ...@@ -29,17 +29,15 @@ __mutex_fastpath_lock(atomic_t *count, void (*fail_fn)(atomic_t *))
* __mutex_fastpath_lock_retval - try to take the lock by moving the count * __mutex_fastpath_lock_retval - try to take the lock by moving the count
* from 1 to a 0 value * from 1 to a 0 value
* @count: pointer of type atomic_t * @count: pointer of type atomic_t
* @fail_fn: function to call if the original value was not 1
* *
* Change the count from 1 to a value lower than 1, and call <fail_fn> if * Change the count from 1 to a value lower than 1. This function returns 0
* it wasn't 1 originally. This function returns 0 if the fastpath succeeds, * if the fastpath succeeds, or -1 otherwise.
* or anything the slow path function returns.
*/ */
static inline int static inline int
__mutex_fastpath_lock_retval(atomic_t *count, int (*fail_fn)(atomic_t *)) __mutex_fastpath_lock_retval(atomic_t *count)
{ {
if (unlikely(ia64_fetchadd4_acq(count, -1) != 1)) if (unlikely(ia64_fetchadd4_acq(count, -1) != 1))
return fail_fn(count); return -1;
return 0; return 0;
} }
......
...@@ -82,17 +82,15 @@ __mutex_fastpath_lock(atomic_t *count, void (*fail_fn)(atomic_t *)) ...@@ -82,17 +82,15 @@ __mutex_fastpath_lock(atomic_t *count, void (*fail_fn)(atomic_t *))
* __mutex_fastpath_lock_retval - try to take the lock by moving the count * __mutex_fastpath_lock_retval - try to take the lock by moving the count
* from 1 to a 0 value * from 1 to a 0 value
* @count: pointer of type atomic_t * @count: pointer of type atomic_t
* @fail_fn: function to call if the original value was not 1
* *
* Change the count from 1 to a value lower than 1, and call <fail_fn> if * Change the count from 1 to a value lower than 1. This function returns 0
* it wasn't 1 originally. This function returns 0 if the fastpath succeeds, * if the fastpath succeeds, or -1 otherwise.
* or anything the slow path function returns.
*/ */
static inline int static inline int
__mutex_fastpath_lock_retval(atomic_t *count, int (*fail_fn)(atomic_t *)) __mutex_fastpath_lock_retval(atomic_t *count)
{ {
if (unlikely(__mutex_dec_return_lock(count) < 0)) if (unlikely(__mutex_dec_return_lock(count) < 0))
return fail_fn(count); return -1;
return 0; return 0;
} }
......
...@@ -37,7 +37,7 @@ __mutex_fastpath_lock(atomic_t *count, void (*fail_fn)(atomic_t *)) ...@@ -37,7 +37,7 @@ __mutex_fastpath_lock(atomic_t *count, void (*fail_fn)(atomic_t *))
} }
static inline int static inline int
__mutex_fastpath_lock_retval(atomic_t *count, int (*fail_fn)(atomic_t *)) __mutex_fastpath_lock_retval(atomic_t *count)
{ {
int __done, __res; int __done, __res;
...@@ -51,7 +51,7 @@ __mutex_fastpath_lock_retval(atomic_t *count, int (*fail_fn)(atomic_t *)) ...@@ -51,7 +51,7 @@ __mutex_fastpath_lock_retval(atomic_t *count, int (*fail_fn)(atomic_t *))
: "t"); : "t");
if (unlikely(!__done || __res != 0)) if (unlikely(!__done || __res != 0))
__res = fail_fn(count); __res = -1;
return __res; return __res;
} }
......
...@@ -42,17 +42,14 @@ do { \ ...@@ -42,17 +42,14 @@ do { \
* __mutex_fastpath_lock_retval - try to take the lock by moving the count * __mutex_fastpath_lock_retval - try to take the lock by moving the count
* from 1 to a 0 value * from 1 to a 0 value
* @count: pointer of type atomic_t * @count: pointer of type atomic_t
* @fail_fn: function to call if the original value was not 1
* *
* Change the count from 1 to a value lower than 1, and call <fail_fn> if it * Change the count from 1 to a value lower than 1. This function returns 0
* wasn't 1 originally. This function returns 0 if the fastpath succeeds, * if the fastpath succeeds, or -1 otherwise.
* or anything the slow path function returns
*/ */
static inline int __mutex_fastpath_lock_retval(atomic_t *count, static inline int __mutex_fastpath_lock_retval(atomic_t *count)
int (*fail_fn)(atomic_t *))
{ {
if (unlikely(atomic_dec_return(count) < 0)) if (unlikely(atomic_dec_return(count) < 0))
return fail_fn(count); return -1;
else else
return 0; return 0;
} }
......
...@@ -37,17 +37,14 @@ do { \ ...@@ -37,17 +37,14 @@ do { \
* __mutex_fastpath_lock_retval - try to take the lock by moving the count * __mutex_fastpath_lock_retval - try to take the lock by moving the count
* from 1 to a 0 value * from 1 to a 0 value
* @count: pointer of type atomic_t * @count: pointer of type atomic_t
* @fail_fn: function to call if the original value was not 1
* *
* Change the count from 1 to a value lower than 1, and call <fail_fn> if * Change the count from 1 to a value lower than 1. This function returns 0
* it wasn't 1 originally. This function returns 0 if the fastpath succeeds, * if the fastpath succeeds, or -1 otherwise.
* or anything the slow path function returns
*/ */
static inline int __mutex_fastpath_lock_retval(atomic_t *count, static inline int __mutex_fastpath_lock_retval(atomic_t *count)
int (*fail_fn)(atomic_t *))
{ {
if (unlikely(atomic_dec_return(count) < 0)) if (unlikely(atomic_dec_return(count) < 0))
return fail_fn(count); return -1;
else else
return 0; return 0;
} }
......
...@@ -28,17 +28,15 @@ __mutex_fastpath_lock(atomic_t *count, void (*fail_fn)(atomic_t *)) ...@@ -28,17 +28,15 @@ __mutex_fastpath_lock(atomic_t *count, void (*fail_fn)(atomic_t *))
* __mutex_fastpath_lock_retval - try to take the lock by moving the count * __mutex_fastpath_lock_retval - try to take the lock by moving the count
* from 1 to a 0 value * from 1 to a 0 value
* @count: pointer of type atomic_t * @count: pointer of type atomic_t
* @fail_fn: function to call if the original value was not 1
* *
* Change the count from 1 to a value lower than 1, and call <fail_fn> if * Change the count from 1 to a value lower than 1. This function returns 0
* it wasn't 1 originally. This function returns 0 if the fastpath succeeds, * if the fastpath succeeds, or -1 otherwise.
* or anything the slow path function returns.
*/ */
static inline int static inline int
__mutex_fastpath_lock_retval(atomic_t *count, int (*fail_fn)(atomic_t *)) __mutex_fastpath_lock_retval(atomic_t *count)
{ {
if (unlikely(atomic_dec_return(count) < 0)) if (unlikely(atomic_dec_return(count) < 0))
return fail_fn(count); return -1;
return 0; return 0;
} }
......
...@@ -11,7 +11,7 @@ ...@@ -11,7 +11,7 @@
#define _ASM_GENERIC_MUTEX_NULL_H #define _ASM_GENERIC_MUTEX_NULL_H
#define __mutex_fastpath_lock(count, fail_fn) fail_fn(count) #define __mutex_fastpath_lock(count, fail_fn) fail_fn(count)
#define __mutex_fastpath_lock_retval(count, fail_fn) fail_fn(count) #define __mutex_fastpath_lock_retval(count) (-1)
#define __mutex_fastpath_unlock(count, fail_fn) fail_fn(count) #define __mutex_fastpath_unlock(count, fail_fn) fail_fn(count)
#define __mutex_fastpath_trylock(count, fail_fn) fail_fn(count) #define __mutex_fastpath_trylock(count, fail_fn) fail_fn(count)
#define __mutex_slowpath_needs_to_unlock() 1 #define __mutex_slowpath_needs_to_unlock() 1
......
...@@ -39,18 +39,16 @@ __mutex_fastpath_lock(atomic_t *count, void (*fail_fn)(atomic_t *)) ...@@ -39,18 +39,16 @@ __mutex_fastpath_lock(atomic_t *count, void (*fail_fn)(atomic_t *))
* __mutex_fastpath_lock_retval - try to take the lock by moving the count * __mutex_fastpath_lock_retval - try to take the lock by moving the count
* from 1 to a 0 value * from 1 to a 0 value
* @count: pointer of type atomic_t * @count: pointer of type atomic_t
* @fail_fn: function to call if the original value was not 1
* *
* Change the count from 1 to a value lower than 1, and call <fail_fn> if it * Change the count from 1 to a value lower than 1. This function returns 0
* wasn't 1 originally. This function returns 0 if the fastpath succeeds, * if the fastpath succeeds, or -1 otherwise.
* or anything the slow path function returns
*/ */
static inline int static inline int
__mutex_fastpath_lock_retval(atomic_t *count, int (*fail_fn)(atomic_t *)) __mutex_fastpath_lock_retval(atomic_t *count)
{ {
if (unlikely(atomic_xchg(count, 0) != 1)) if (unlikely(atomic_xchg(count, 0) != 1))
if (likely(atomic_xchg(count, -1) != 1)) if (likely(atomic_xchg(count, -1) != 1))
return fail_fn(count); return -1;
return 0; return 0;
} }
......
...@@ -494,10 +494,10 @@ __mutex_unlock_slowpath(atomic_t *lock_count) ...@@ -494,10 +494,10 @@ __mutex_unlock_slowpath(atomic_t *lock_count)
* mutex_lock_interruptible() and mutex_trylock(). * mutex_lock_interruptible() and mutex_trylock().
*/ */
static noinline int __sched static noinline int __sched
__mutex_lock_killable_slowpath(atomic_t *lock_count); __mutex_lock_killable_slowpath(struct mutex *lock);
static noinline int __sched static noinline int __sched
__mutex_lock_interruptible_slowpath(atomic_t *lock_count); __mutex_lock_interruptible_slowpath(struct mutex *lock);
/** /**
* mutex_lock_interruptible - acquire the mutex, interruptible * mutex_lock_interruptible - acquire the mutex, interruptible
...@@ -515,12 +515,12 @@ int __sched mutex_lock_interruptible(struct mutex *lock) ...@@ -515,12 +515,12 @@ int __sched mutex_lock_interruptible(struct mutex *lock)
int ret; int ret;
might_sleep(); might_sleep();
ret = __mutex_fastpath_lock_retval ret = __mutex_fastpath_lock_retval(&lock->count);
(&lock->count, __mutex_lock_interruptible_slowpath); if (likely(!ret)) {
if (!ret)
mutex_set_owner(lock); mutex_set_owner(lock);
return 0;
return ret; } else
return __mutex_lock_interruptible_slowpath(lock);
} }
EXPORT_SYMBOL(mutex_lock_interruptible); EXPORT_SYMBOL(mutex_lock_interruptible);
...@@ -530,12 +530,12 @@ int __sched mutex_lock_killable(struct mutex *lock) ...@@ -530,12 +530,12 @@ int __sched mutex_lock_killable(struct mutex *lock)
int ret; int ret;
might_sleep(); might_sleep();
ret = __mutex_fastpath_lock_retval ret = __mutex_fastpath_lock_retval(&lock->count);
(&lock->count, __mutex_lock_killable_slowpath); if (likely(!ret)) {
if (!ret)
mutex_set_owner(lock); mutex_set_owner(lock);
return 0;
return ret; } else
return __mutex_lock_killable_slowpath(lock);
} }
EXPORT_SYMBOL(mutex_lock_killable); EXPORT_SYMBOL(mutex_lock_killable);
...@@ -548,18 +548,14 @@ __mutex_lock_slowpath(atomic_t *lock_count) ...@@ -548,18 +548,14 @@ __mutex_lock_slowpath(atomic_t *lock_count)
} }
static noinline int __sched static noinline int __sched
__mutex_lock_killable_slowpath(atomic_t *lock_count) __mutex_lock_killable_slowpath(struct mutex *lock)
{ {
struct mutex *lock = container_of(lock_count, struct mutex, count);
return __mutex_lock_common(lock, TASK_KILLABLE, 0, NULL, _RET_IP_); return __mutex_lock_common(lock, TASK_KILLABLE, 0, NULL, _RET_IP_);
} }
static noinline int __sched static noinline int __sched
__mutex_lock_interruptible_slowpath(atomic_t *lock_count) __mutex_lock_interruptible_slowpath(struct mutex *lock)
{ {
struct mutex *lock = container_of(lock_count, struct mutex, count);
return __mutex_lock_common(lock, TASK_INTERRUPTIBLE, 0, NULL, _RET_IP_); return __mutex_lock_common(lock, TASK_INTERRUPTIBLE, 0, NULL, _RET_IP_);
} }
#endif #endif
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment