Commit 389fb07b authored by Linus Torvalds's avatar Linus Torvalds

Un-inline the big kernel lock.

Now that spinlocks are uninlined, it is silly to keep the
BKL inlined. And this should make it a lot easier for people
to play around with variations on the locking (ie Ingo's
semaphores etc).
parent 6ae62940
......@@ -5,54 +5,28 @@
#include <linux/sched.h>
#include <linux/spinlock.h>
#if defined(CONFIG_SMP) || defined(CONFIG_PREEMPT)
extern spinlock_t kernel_flag;
#ifdef CONFIG_LOCK_KERNEL
#define kernel_locked() (current->lock_depth >= 0)
#define get_kernel_lock() spin_lock(&kernel_flag)
#define put_kernel_lock() spin_unlock(&kernel_flag)
/*
* Release global kernel lock.
*/
static inline void release_kernel_lock(struct task_struct *task)
{
if (unlikely(task->lock_depth >= 0))
put_kernel_lock();
}
extern void __lockfunc get_kernel_lock(void);
extern void __lockfunc put_kernel_lock(void);
/*
* Re-acquire the kernel lock
* Release/re-acquire global kernel lock for the scheduler
*/
static inline void reacquire_kernel_lock(struct task_struct *task)
{
if (unlikely(task->lock_depth >= 0))
get_kernel_lock();
}
#define release_kernel_lock(tsk) do { \
if (unlikely((tsk)->lock_depth >= 0)) \
put_kernel_lock(); \
} while (0)
/*
* Getting the big kernel lock.
*
* This cannot happen asynchronously,
* so we only need to worry about other
* CPU's.
*/
static inline void lock_kernel(void)
{
int depth = current->lock_depth+1;
if (likely(!depth))
get_kernel_lock();
current->lock_depth = depth;
}
#define reacquire_kernel_lock(tsk) do { \
if (unlikely((tsk)->lock_depth >= 0)) \
get_kernel_lock(); \
} while (0)
static inline void unlock_kernel(void)
{
BUG_ON(current->lock_depth < 0);
if (likely(--current->lock_depth < 0))
put_kernel_lock();
}
extern void __lockfunc lock_kernel(void) __acquires(kernel_lock);
extern void __lockfunc unlock_kernel(void) __releases(kernel_lock);
#else
......@@ -62,5 +36,5 @@ static inline void unlock_kernel(void)
#define reacquire_kernel_lock(task) do { } while(0)
#define kernel_locked() 1
#endif /* CONFIG_SMP || CONFIG_PREEMPT */
#endif /* CONFIG_LOCK_KERNEL */
#endif /* __LINUX_SMPLOCK_H */
......@@ -32,14 +32,14 @@
#define LOCK_SECTION_END \
".previous\n\t"
#define __lockfunc fastcall __attribute__((section(".spinlock.text")))
/*
* If CONFIG_SMP is set, pull in the _raw_* definitions
*/
#ifdef CONFIG_SMP
#include <asm/spinlock.h>
#define __lockfunc fastcall __attribute__((section(".spinlock.text")))
int __lockfunc _spin_trylock(spinlock_t *lock);
int __lockfunc _write_trylock(rwlock_t *lock);
......
......@@ -50,6 +50,11 @@ config BROKEN_ON_SMP
depends on BROKEN || !SMP
default y
config LOCK_KERNEL
bool
depends on SMP || PREEMPT
default y
endmenu
menu "General setup"
......
......@@ -4048,21 +4048,6 @@ int __init migration_init(void)
}
#endif
/*
* The 'big kernel lock'
*
* This spinlock is taken and released recursively by lock_kernel()
* and unlock_kernel(). It is transparently dropped and reaquired
* over schedule(). It is used to protect legacy code that hasn't
* been migrated to a proper locking design yet.
*
* Don't use in new code.
*
* Note: spinlock debugging needs this even on !CONFIG_SMP.
*/
spinlock_t kernel_flag __cacheline_aligned_in_smp = SPIN_LOCK_UNLOCKED;
EXPORT_SYMBOL(kernel_flag);
#ifdef CONFIG_SMP
/*
* Attach the domain 'sd' to 'cpu' as its base domain. Callers must
......
......@@ -14,6 +14,7 @@ endif
lib-$(CONFIG_RWSEM_GENERIC_SPINLOCK) += rwsem-spinlock.o
lib-$(CONFIG_RWSEM_XCHGADD_ALGORITHM) += rwsem.o
obj-$(CONFIG_LOCK_KERNEL) += kernel_lock.o
ifneq ($(CONFIG_HAVE_DEC_LOCK),y)
lib-y += dec_and_lock.o
......
/*
* lib/kernel_lock.c
*
* This is the traditional BKL - big kernel lock. Largely
* relegated to obsolescense, but used by various less
* important (or lazy) subsystems.
*/
#include <linux/smp_lock.h>
#include <linux/module.h>
/*
* The 'big kernel lock'
*
* This spinlock is taken and released recursively by lock_kernel()
* and unlock_kernel(). It is transparently dropped and reaquired
* over schedule(). It is used to protect legacy code that hasn't
* been migrated to a proper locking design yet.
*
* Don't use in new code.
*/
static spinlock_t kernel_flag __cacheline_aligned_in_smp = SPIN_LOCK_UNLOCKED;
/*
* Acquire/release the underlying lock from the scheduler.
*
* The scheduler release and re-acquire currently always happen
* with preemption disabled. Which is likely a bug in the acquire
* case...
*
* Regardless, we try to be polite about preemption. If SMP is
* not on (ie UP preemption), this all goes away because the
* _raw_spin_trylock() will always succeed.
*/
#ifdef CONFIG_PREEMPT
inline void __lockfunc get_kernel_lock(void)
{
preempt_disable();
if (unlikely(!_raw_spin_trylock(&kernel_flag))) {
/*
* If preemption was disabled even before this
* was called, there's nothing we can be polite
* about - just spin.
*/
if (preempt_count() > 1) {
_raw_spin_lock(&kernel_flag);
return;
}
/*
* Otherwise, let's wait for the kernel lock
* with preemption enabled..
*/
do {
preempt_enable();
while (spin_is_locked(&kernel_flag))
cpu_relax();
preempt_disable();
} while (!_raw_spin_trylock(&kernel_flag));
}
}
#else
/*
* Non-preemption case - just get the spinlock
*/
inline void __lockfunc get_kernel_lock(void)
{
_raw_spin_lock(&kernel_flag);
}
#endif
inline void __lockfunc put_kernel_lock(void)
{
_raw_spin_unlock(&kernel_flag);
preempt_enable();
}
/*
* Getting the big kernel lock.
*
* This cannot happen asynchronously, so we only need to
* worry about other CPU's.
*/
void __lockfunc lock_kernel(void)
{
int depth = current->lock_depth+1;
if (likely(!depth))
get_kernel_lock();
current->lock_depth = depth;
}
void __lockfunc unlock_kernel(void)
{
BUG_ON(current->lock_depth < 0);
if (likely(--current->lock_depth < 0))
put_kernel_lock();
}
EXPORT_SYMBOL(lock_kernel);
EXPORT_SYMBOL(unlock_kernel);
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment