Commit 381a2292 authored by Jarek Poplawski's avatar Jarek Poplawski Committed by Linus Torvalds

[PATCH] lockdep: more unlock-on-error fixes

- returns after DEBUG_LOCKS_WARN_ON added in 3 places

- debug_locks checking after lookup_chain_cache() added in
  __lock_acquire()

- locking for testing and changing global variable max_lockdep_depth
  added in __lock_acquire()

From: Ingo Molnar <mingo@elte.hu>

My __acquire_lock() cleanup introduced a locking bug: on SMP systems we'd
release a non-owned graph lock.  Fix this by moving the graph unlock back,
and by leaving the max_lockdep_depth variable update possibly racy.  (we
dont care, it's just statistics)

Also add some minimal debugging code to graph_unlock()/graph_lock(),
which caught this locking bug.
Signed-off-by: default avatarJarek Poplawski <jarkao2@o2.pl>
Signed-off-by: default avatarIngo Molnar <mingo@elte.hu>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent 898552c9
......@@ -70,6 +70,9 @@ static int graph_lock(void)
static inline int graph_unlock(void)
{
if (debug_locks && !__raw_spin_is_locked(&lockdep_lock))
return DEBUG_LOCKS_WARN_ON(1);
__raw_spin_unlock(&lockdep_lock);
return 0;
}
......@@ -712,6 +715,9 @@ find_usage_backwards(struct lock_class *source, unsigned int depth)
struct lock_list *entry;
int ret;
if (!__raw_spin_is_locked(&lockdep_lock))
return DEBUG_LOCKS_WARN_ON(1);
if (depth > max_recursion_depth)
max_recursion_depth = depth;
if (depth >= RECURSION_LIMIT)
......@@ -1293,7 +1299,8 @@ register_lock_class(struct lockdep_map *lock, unsigned int subclass, int force)
if (!subclass || force)
lock->class_cache = class;
DEBUG_LOCKS_WARN_ON(class->subclass != subclass);
if (DEBUG_LOCKS_WARN_ON(class->subclass != subclass))
return NULL;
return class;
}
......@@ -1308,7 +1315,8 @@ static inline int lookup_chain_cache(u64 chain_key, struct lock_class *class)
struct list_head *hash_head = chainhashentry(chain_key);
struct lock_chain *chain;
DEBUG_LOCKS_WARN_ON(!irqs_disabled());
if (DEBUG_LOCKS_WARN_ON(!irqs_disabled()))
return 0;
/*
* We can walk it lock-free, because entries only get added
* to the hash:
......@@ -1394,7 +1402,9 @@ static void check_chain_key(struct task_struct *curr)
return;
}
id = hlock->class - lock_classes;
DEBUG_LOCKS_WARN_ON(id >= MAX_LOCKDEP_KEYS);
if (DEBUG_LOCKS_WARN_ON(id >= MAX_LOCKDEP_KEYS))
return;
if (prev_hlock && (prev_hlock->irq_context !=
hlock->irq_context))
chain_key = 0;
......@@ -2205,7 +2215,11 @@ static int __lock_acquire(struct lockdep_map *lock, unsigned int subclass,
if (!check_prevs_add(curr, hlock))
return 0;
graph_unlock();
}
} else
/* after lookup_chain_cache(): */
if (unlikely(!debug_locks))
return 0;
curr->lockdep_depth++;
check_chain_key(curr);
if (unlikely(curr->lockdep_depth >= MAX_LOCK_DEPTH)) {
......@@ -2214,6 +2228,7 @@ static int __lock_acquire(struct lockdep_map *lock, unsigned int subclass,
printk("turning off the locking correctness validator.\n");
return 0;
}
if (unlikely(curr->lockdep_depth > max_lockdep_depth))
max_lockdep_depth = curr->lockdep_depth;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment