Commit c0c6dff9 authored by Waiman Long's avatar Waiman Long Committed by Greg Kroah-Hartman

locking/spinlock/debug: Remove spinlock lockup detection code

commit bc88c10d upstream.

The current spinlock lockup detection code can sometimes produce false
positives because of the unfairness of the locking algorithm itself.

So the lockup detection code is now removed. Instead, we are relying
on the NMI watchdog to detect potential lockup. We won't have lockup
detection if the watchdog isn't running.

The commented-out read-write lock lockup detection code are also
removed.
Signed-off-by: default avatarWaiman Long <longman@redhat.com>
Signed-off-by: default avatarPeter Zijlstra (Intel) <peterz@infradead.org>
Cc: Andrew Morton <akpm@linux-foundation.org>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Sasha Levin <sasha.levin@oracle.com>
Cc: Thomas Gleixner <tglx@linutronix.de>
Link: http://lkml.kernel.org/r/1486583208-11038-1-git-send-email-longman@redhat.comSigned-off-by: default avatarIngo Molnar <mingo@kernel.org>
Cc: Geert Uytterhoeven <geert@linux-m68k.org>
Signed-off-by: default avatarGreg Kroah-Hartman <gregkh@linuxfoundation.org>
parent 27e7506c
......@@ -103,38 +103,14 @@ static inline void debug_spin_unlock(raw_spinlock_t *lock)
lock->owner_cpu = -1;
}
static void __spin_lock_debug(raw_spinlock_t *lock)
{
u64 i;
u64 loops = loops_per_jiffy * HZ;
for (i = 0; i < loops; i++) {
if (arch_spin_trylock(&lock->raw_lock))
return;
__delay(1);
}
/* lockup suspected: */
spin_dump(lock, "lockup suspected");
#ifdef CONFIG_SMP
trigger_all_cpu_backtrace();
#endif
/*
* The trylock above was causing a livelock. Give the lower level arch
* specific lock code a chance to acquire the lock. We have already
* printed a warning/backtrace at this point. The non-debug arch
* specific code might actually succeed in acquiring the lock. If it is
* not successful, the end-result is the same - there is no forward
* progress.
*/
arch_spin_lock(&lock->raw_lock);
}
/*
* We are now relying on the NMI watchdog to detect lockup instead of doing
* the detection here with an unfair lock which can cause problem of its own.
*/
void do_raw_spin_lock(raw_spinlock_t *lock)
{
debug_spin_lock_before(lock);
if (unlikely(!arch_spin_trylock(&lock->raw_lock)))
__spin_lock_debug(lock);
arch_spin_lock(&lock->raw_lock);
debug_spin_lock_after(lock);
}
......@@ -172,32 +148,6 @@ static void rwlock_bug(rwlock_t *lock, const char *msg)
#define RWLOCK_BUG_ON(cond, lock, msg) if (unlikely(cond)) rwlock_bug(lock, msg)
#if 0 /* __write_lock_debug() can lock up - maybe this can too? */
static void __read_lock_debug(rwlock_t *lock)
{
u64 i;
u64 loops = loops_per_jiffy * HZ;
int print_once = 1;
for (;;) {
for (i = 0; i < loops; i++) {
if (arch_read_trylock(&lock->raw_lock))
return;
__delay(1);
}
/* lockup suspected: */
if (print_once) {
print_once = 0;
printk(KERN_EMERG "BUG: read-lock lockup on CPU#%d, "
"%s/%d, %p\n",
raw_smp_processor_id(), current->comm,
current->pid, lock);
dump_stack();
}
}
}
#endif
void do_raw_read_lock(rwlock_t *lock)
{
RWLOCK_BUG_ON(lock->magic != RWLOCK_MAGIC, lock, "bad magic");
......@@ -247,32 +197,6 @@ static inline void debug_write_unlock(rwlock_t *lock)
lock->owner_cpu = -1;
}
#if 0 /* This can cause lockups */
static void __write_lock_debug(rwlock_t *lock)
{
u64 i;
u64 loops = loops_per_jiffy * HZ;
int print_once = 1;
for (;;) {
for (i = 0; i < loops; i++) {
if (arch_write_trylock(&lock->raw_lock))
return;
__delay(1);
}
/* lockup suspected: */
if (print_once) {
print_once = 0;
printk(KERN_EMERG "BUG: write-lock lockup on CPU#%d, "
"%s/%d, %p\n",
raw_smp_processor_id(), current->comm,
current->pid, lock);
dump_stack();
}
}
}
#endif
void do_raw_write_lock(rwlock_t *lock)
{
debug_write_lock_before(lock);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment