Commit 22d9fd34 authored by Andi Kleen's avatar Andi Kleen Committed by H. Peter Anvin

asmlinkage, mutex: Mark __visible

Various kernel/mutex.c functions can be called from
inline assembler, so they should be all global and
__visible.

Cc: Ingo Molnar <mingo@kernel.org>
Signed-off-by: default avatarAndi Kleen <ak@linux.intel.com>
Link: http://lkml.kernel.org/r/1391845930-28580-7-git-send-email-ak@linux.intel.comSigned-off-by: default avatarH. Peter Anvin <hpa@linux.intel.com>
parent b35f8305
...@@ -67,8 +67,7 @@ EXPORT_SYMBOL(__mutex_init); ...@@ -67,8 +67,7 @@ EXPORT_SYMBOL(__mutex_init);
* We also put the fastpath first in the kernel image, to make sure the * We also put the fastpath first in the kernel image, to make sure the
* branch is predicted by the CPU as default-untaken. * branch is predicted by the CPU as default-untaken.
*/ */
static __used noinline void __sched __visible void __sched __mutex_lock_slowpath(atomic_t *lock_count);
__mutex_lock_slowpath(atomic_t *lock_count);
/** /**
* mutex_lock - acquire the mutex * mutex_lock - acquire the mutex
...@@ -225,7 +224,8 @@ static inline int mutex_can_spin_on_owner(struct mutex *lock) ...@@ -225,7 +224,8 @@ static inline int mutex_can_spin_on_owner(struct mutex *lock)
} }
#endif #endif
static __used noinline void __sched __mutex_unlock_slowpath(atomic_t *lock_count); __visible __used noinline
void __sched __mutex_unlock_slowpath(atomic_t *lock_count);
/** /**
* mutex_unlock - release the mutex * mutex_unlock - release the mutex
...@@ -746,7 +746,7 @@ __mutex_unlock_common_slowpath(atomic_t *lock_count, int nested) ...@@ -746,7 +746,7 @@ __mutex_unlock_common_slowpath(atomic_t *lock_count, int nested)
/* /*
* Release the lock, slowpath: * Release the lock, slowpath:
*/ */
static __used noinline void __visible void
__mutex_unlock_slowpath(atomic_t *lock_count) __mutex_unlock_slowpath(atomic_t *lock_count)
{ {
__mutex_unlock_common_slowpath(lock_count, 1); __mutex_unlock_common_slowpath(lock_count, 1);
...@@ -803,7 +803,7 @@ int __sched mutex_lock_killable(struct mutex *lock) ...@@ -803,7 +803,7 @@ int __sched mutex_lock_killable(struct mutex *lock)
} }
EXPORT_SYMBOL(mutex_lock_killable); EXPORT_SYMBOL(mutex_lock_killable);
static __used noinline void __sched __visible void __sched
__mutex_lock_slowpath(atomic_t *lock_count) __mutex_lock_slowpath(atomic_t *lock_count)
{ {
struct mutex *lock = container_of(lock_count, struct mutex, count); struct mutex *lock = container_of(lock_count, struct mutex, count);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment