Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Support
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
L
linux
Project overview
Project overview
Details
Activity
Releases
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Issues
0
Issues
0
List
Boards
Labels
Milestones
Merge Requests
0
Merge Requests
0
Analytics
Analytics
Repository
Value Stream
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Create a new issue
Commits
Issue Boards
Open sidebar
nexedi
linux
Commits
23a0ee90
Commit
23a0ee90
authored
Aug 12, 2008
by
Ingo Molnar
Browse files
Options
Browse Files
Download
Plain Diff
Merge branch 'core/locking' into core/urgent
parents
cc7a486c
0f2bc27b
Changes
15
Hide whitespace changes
Inline
Side-by-side
Showing
15 changed files
with
366 additions
and
146 deletions
+366
-146
fs/jbd/transaction.c
fs/jbd/transaction.c
+2
-2
fs/jbd2/transaction.c
fs/jbd2/transaction.c
+2
-2
include/linux/lockdep.h
include/linux/lockdep.h
+49
-21
include/linux/rcuclassic.h
include/linux/rcuclassic.h
+1
-1
include/linux/spinlock.h
include/linux/spinlock.h
+6
-0
include/linux/spinlock_api_smp.h
include/linux/spinlock_api_smp.h
+2
-0
kernel/lockdep.c
kernel/lockdep.c
+239
-56
kernel/lockdep_internals.h
kernel/lockdep_internals.h
+3
-3
kernel/lockdep_proc.c
kernel/lockdep_proc.c
+6
-31
kernel/sched.c
kernel/sched.c
+13
-8
kernel/sched_rt.c
kernel/sched_rt.c
+5
-3
kernel/spinlock.c
kernel/spinlock.c
+11
-0
kernel/workqueue.c
kernel/workqueue.c
+12
-12
lib/debug_locks.c
lib/debug_locks.c
+2
-0
mm/mmap.c
mm/mmap.c
+13
-7
No files found.
fs/jbd/transaction.c
View file @
23a0ee90
...
@@ -291,7 +291,7 @@ handle_t *journal_start(journal_t *journal, int nblocks)
...
@@ -291,7 +291,7 @@ handle_t *journal_start(journal_t *journal, int nblocks)
goto
out
;
goto
out
;
}
}
lock_
acquire
(
&
handle
->
h_lockdep_map
,
0
,
0
,
0
,
2
,
_THIS_IP_
);
lock_
map_acquire
(
&
handle
->
h_lockdep_map
);
out:
out:
return
handle
;
return
handle
;
...
@@ -1448,7 +1448,7 @@ int journal_stop(handle_t *handle)
...
@@ -1448,7 +1448,7 @@ int journal_stop(handle_t *handle)
spin_unlock
(
&
journal
->
j_state_lock
);
spin_unlock
(
&
journal
->
j_state_lock
);
}
}
lock_
release
(
&
handle
->
h_lockdep_map
,
1
,
_THIS_IP_
);
lock_
map_release
(
&
handle
->
h_lockdep_map
);
jbd_free_handle
(
handle
);
jbd_free_handle
(
handle
);
return
err
;
return
err
;
...
...
fs/jbd2/transaction.c
View file @
23a0ee90
...
@@ -301,7 +301,7 @@ handle_t *jbd2_journal_start(journal_t *journal, int nblocks)
...
@@ -301,7 +301,7 @@ handle_t *jbd2_journal_start(journal_t *journal, int nblocks)
goto
out
;
goto
out
;
}
}
lock_
acquire
(
&
handle
->
h_lockdep_map
,
0
,
0
,
0
,
2
,
_THIS_IP_
);
lock_
map_acquire
(
&
handle
->
h_lockdep_map
);
out:
out:
return
handle
;
return
handle
;
}
}
...
@@ -1279,7 +1279,7 @@ int jbd2_journal_stop(handle_t *handle)
...
@@ -1279,7 +1279,7 @@ int jbd2_journal_stop(handle_t *handle)
spin_unlock
(
&
journal
->
j_state_lock
);
spin_unlock
(
&
journal
->
j_state_lock
);
}
}
lock_
release
(
&
handle
->
h_lockdep_map
,
1
,
_THIS_IP_
);
lock_
map_release
(
&
handle
->
h_lockdep_map
);
jbd2_free_handle
(
handle
);
jbd2_free_handle
(
handle
);
return
err
;
return
err
;
...
...
include/linux/lockdep.h
View file @
23a0ee90
...
@@ -89,6 +89,7 @@ struct lock_class {
...
@@ -89,6 +89,7 @@ struct lock_class {
struct
lockdep_subclass_key
*
key
;
struct
lockdep_subclass_key
*
key
;
unsigned
int
subclass
;
unsigned
int
subclass
;
unsigned
int
dep_gen_id
;
/*
/*
* IRQ/softirq usage tracking bits:
* IRQ/softirq usage tracking bits:
...
@@ -189,6 +190,14 @@ struct lock_chain {
...
@@ -189,6 +190,14 @@ struct lock_chain {
u64
chain_key
;
u64
chain_key
;
};
};
#define MAX_LOCKDEP_KEYS_BITS 13
/*
* Subtract one because we offset hlock->class_idx by 1 in order
* to make 0 mean no class. This avoids overflowing the class_idx
* bitfield and hitting the BUG in hlock_class().
*/
#define MAX_LOCKDEP_KEYS ((1UL << MAX_LOCKDEP_KEYS_BITS) - 1)
struct
held_lock
{
struct
held_lock
{
/*
/*
* One-way hash of the dependency chain up to this point. We
* One-way hash of the dependency chain up to this point. We
...
@@ -205,14 +214,14 @@ struct held_lock {
...
@@ -205,14 +214,14 @@ struct held_lock {
* with zero), here we store the previous hash value:
* with zero), here we store the previous hash value:
*/
*/
u64
prev_chain_key
;
u64
prev_chain_key
;
struct
lock_class
*
class
;
unsigned
long
acquire_ip
;
unsigned
long
acquire_ip
;
struct
lockdep_map
*
instance
;
struct
lockdep_map
*
instance
;
struct
lockdep_map
*
nest_lock
;
#ifdef CONFIG_LOCK_STAT
#ifdef CONFIG_LOCK_STAT
u64
waittime_stamp
;
u64
waittime_stamp
;
u64
holdtime_stamp
;
u64
holdtime_stamp
;
#endif
#endif
unsigned
int
class_idx
:
MAX_LOCKDEP_KEYS_BITS
;
/*
/*
* The lock-stack is unified in that the lock chains of interrupt
* The lock-stack is unified in that the lock chains of interrupt
* contexts nest ontop of process context chains, but we 'separate'
* contexts nest ontop of process context chains, but we 'separate'
...
@@ -226,11 +235,11 @@ struct held_lock {
...
@@ -226,11 +235,11 @@ struct held_lock {
* The following field is used to detect when we cross into an
* The following field is used to detect when we cross into an
* interrupt context:
* interrupt context:
*/
*/
int
irq_context
;
unsigned
int
irq_context
:
2
;
/* bit 0 - soft, bit 1 - hard */
int
trylock
;
unsigned
int
trylock
:
1
;
int
read
;
unsigned
int
read
:
2
;
/* see lock_acquire() comment */
int
check
;
unsigned
int
check
:
2
;
/* see lock_acquire() comment */
int
hardirqs_off
;
unsigned
int
hardirqs_off
:
1
;
};
};
/*
/*
...
@@ -294,11 +303,15 @@ extern void lockdep_init_map(struct lockdep_map *lock, const char *name,
...
@@ -294,11 +303,15 @@ extern void lockdep_init_map(struct lockdep_map *lock, const char *name,
* 2: full validation
* 2: full validation
*/
*/
extern
void
lock_acquire
(
struct
lockdep_map
*
lock
,
unsigned
int
subclass
,
extern
void
lock_acquire
(
struct
lockdep_map
*
lock
,
unsigned
int
subclass
,
int
trylock
,
int
read
,
int
check
,
unsigned
long
ip
);
int
trylock
,
int
read
,
int
check
,
struct
lockdep_map
*
nest_lock
,
unsigned
long
ip
);
extern
void
lock_release
(
struct
lockdep_map
*
lock
,
int
nested
,
extern
void
lock_release
(
struct
lockdep_map
*
lock
,
int
nested
,
unsigned
long
ip
);
unsigned
long
ip
);
extern
void
lock_set_subclass
(
struct
lockdep_map
*
lock
,
unsigned
int
subclass
,
unsigned
long
ip
);
# define INIT_LOCKDEP .lockdep_recursion = 0,
# define INIT_LOCKDEP .lockdep_recursion = 0,
#define lockdep_depth(tsk) (debug_locks ? (tsk)->lockdep_depth : 0)
#define lockdep_depth(tsk) (debug_locks ? (tsk)->lockdep_depth : 0)
...
@@ -313,8 +326,9 @@ static inline void lockdep_on(void)
...
@@ -313,8 +326,9 @@ static inline void lockdep_on(void)
{
{
}
}
# define lock_acquire(l, s, t, r, c,
i)
do { } while (0)
# define lock_acquire(l, s, t, r, c,
n, i)
do { } while (0)
# define lock_release(l, n, i) do { } while (0)
# define lock_release(l, n, i) do { } while (0)
# define lock_set_subclass(l, s, i) do { } while (0)
# define lockdep_init() do { } while (0)
# define lockdep_init() do { } while (0)
# define lockdep_info() do { } while (0)
# define lockdep_info() do { } while (0)
# define lockdep_init_map(lock, name, key, sub) do { (void)(key); } while (0)
# define lockdep_init_map(lock, name, key, sub) do { (void)(key); } while (0)
...
@@ -400,9 +414,11 @@ static inline void print_irqtrace_events(struct task_struct *curr)
...
@@ -400,9 +414,11 @@ static inline void print_irqtrace_events(struct task_struct *curr)
#ifdef CONFIG_DEBUG_LOCK_ALLOC
#ifdef CONFIG_DEBUG_LOCK_ALLOC
# ifdef CONFIG_PROVE_LOCKING
# ifdef CONFIG_PROVE_LOCKING
# define spin_acquire(l, s, t, i) lock_acquire(l, s, t, 0, 2, i)
# define spin_acquire(l, s, t, i) lock_acquire(l, s, t, 0, 2, NULL, i)
# define spin_acquire_nest(l, s, t, n, i) lock_acquire(l, s, t, 0, 2, n, i)
# else
# else
# define spin_acquire(l, s, t, i) lock_acquire(l, s, t, 0, 1, i)
# define spin_acquire(l, s, t, i) lock_acquire(l, s, t, 0, 1, NULL, i)
# define spin_acquire_nest(l, s, t, n, i) lock_acquire(l, s, t, 0, 1, NULL, i)
# endif
# endif
# define spin_release(l, n, i) lock_release(l, n, i)
# define spin_release(l, n, i) lock_release(l, n, i)
#else
#else
...
@@ -412,11 +428,11 @@ static inline void print_irqtrace_events(struct task_struct *curr)
...
@@ -412,11 +428,11 @@ static inline void print_irqtrace_events(struct task_struct *curr)
#ifdef CONFIG_DEBUG_LOCK_ALLOC
#ifdef CONFIG_DEBUG_LOCK_ALLOC
# ifdef CONFIG_PROVE_LOCKING
# ifdef CONFIG_PROVE_LOCKING
# define rwlock_acquire(l, s, t, i) lock_acquire(l, s, t, 0, 2, i)
# define rwlock_acquire(l, s, t, i) lock_acquire(l, s, t, 0, 2,
NULL,
i)
# define rwlock_acquire_read(l, s, t, i) lock_acquire(l, s, t, 2, 2, i)
# define rwlock_acquire_read(l, s, t, i) lock_acquire(l, s, t, 2, 2,
NULL,
i)
# else
# else
# define rwlock_acquire(l, s, t, i) lock_acquire(l, s, t, 0, 1, i)
# define rwlock_acquire(l, s, t, i) lock_acquire(l, s, t, 0, 1,
NULL,
i)
# define rwlock_acquire_read(l, s, t, i) lock_acquire(l, s, t, 2, 1, i)
# define rwlock_acquire_read(l, s, t, i) lock_acquire(l, s, t, 2, 1,
NULL,
i)
# endif
# endif
# define rwlock_release(l, n, i) lock_release(l, n, i)
# define rwlock_release(l, n, i) lock_release(l, n, i)
#else
#else
...
@@ -427,9 +443,9 @@ static inline void print_irqtrace_events(struct task_struct *curr)
...
@@ -427,9 +443,9 @@ static inline void print_irqtrace_events(struct task_struct *curr)
#ifdef CONFIG_DEBUG_LOCK_ALLOC
#ifdef CONFIG_DEBUG_LOCK_ALLOC
# ifdef CONFIG_PROVE_LOCKING
# ifdef CONFIG_PROVE_LOCKING
# define mutex_acquire(l, s, t, i) lock_acquire(l, s, t, 0, 2, i)
# define mutex_acquire(l, s, t, i) lock_acquire(l, s, t, 0, 2,
NULL,
i)
# else
# else
# define mutex_acquire(l, s, t, i) lock_acquire(l, s, t, 0, 1, i)
# define mutex_acquire(l, s, t, i) lock_acquire(l, s, t, 0, 1,
NULL,
i)
# endif
# endif
# define mutex_release(l, n, i) lock_release(l, n, i)
# define mutex_release(l, n, i) lock_release(l, n, i)
#else
#else
...
@@ -439,11 +455,11 @@ static inline void print_irqtrace_events(struct task_struct *curr)
...
@@ -439,11 +455,11 @@ static inline void print_irqtrace_events(struct task_struct *curr)
#ifdef CONFIG_DEBUG_LOCK_ALLOC
#ifdef CONFIG_DEBUG_LOCK_ALLOC
# ifdef CONFIG_PROVE_LOCKING
# ifdef CONFIG_PROVE_LOCKING
# define rwsem_acquire(l, s, t, i) lock_acquire(l, s, t, 0, 2, i)
# define rwsem_acquire(l, s, t, i) lock_acquire(l, s, t, 0, 2,
NULL,
i)
# define rwsem_acquire_read(l, s, t, i) lock_acquire(l, s, t, 1, 2, i)
# define rwsem_acquire_read(l, s, t, i) lock_acquire(l, s, t, 1, 2,
NULL,
i)
# else
# else
# define rwsem_acquire(l, s, t, i) lock_acquire(l, s, t, 0, 1, i)
# define rwsem_acquire(l, s, t, i) lock_acquire(l, s, t, 0, 1,
NULL,
i)
# define rwsem_acquire_read(l, s, t, i) lock_acquire(l, s, t, 1, 1, i)
# define rwsem_acquire_read(l, s, t, i) lock_acquire(l, s, t, 1, 1,
NULL,
i)
# endif
# endif
# define rwsem_release(l, n, i) lock_release(l, n, i)
# define rwsem_release(l, n, i) lock_release(l, n, i)
#else
#else
...
@@ -452,4 +468,16 @@ static inline void print_irqtrace_events(struct task_struct *curr)
...
@@ -452,4 +468,16 @@ static inline void print_irqtrace_events(struct task_struct *curr)
# define rwsem_release(l, n, i) do { } while (0)
# define rwsem_release(l, n, i) do { } while (0)
#endif
#endif
#ifdef CONFIG_DEBUG_LOCK_ALLOC
# ifdef CONFIG_PROVE_LOCKING
# define lock_map_acquire(l) lock_acquire(l, 0, 0, 0, 2, NULL, _THIS_IP_)
# else
# define lock_map_acquire(l) lock_acquire(l, 0, 0, 0, 1, NULL, _THIS_IP_)
# endif
# define lock_map_release(l) lock_release(l, 1, _THIS_IP_)
#else
# define lock_map_acquire(l) do { } while (0)
# define lock_map_release(l) do { } while (0)
#endif
#endif
/* __LINUX_LOCKDEP_H */
#endif
/* __LINUX_LOCKDEP_H */
include/linux/rcuclassic.h
View file @
23a0ee90
...
@@ -117,7 +117,7 @@ extern int rcu_needs_cpu(int cpu);
...
@@ -117,7 +117,7 @@ extern int rcu_needs_cpu(int cpu);
#ifdef CONFIG_DEBUG_LOCK_ALLOC
#ifdef CONFIG_DEBUG_LOCK_ALLOC
extern
struct
lockdep_map
rcu_lock_map
;
extern
struct
lockdep_map
rcu_lock_map
;
# define rcu_read_acquire() \
# define rcu_read_acquire() \
lock_acquire(&rcu_lock_map, 0, 0, 2, 1, _THIS_IP_)
lock_acquire(&rcu_lock_map, 0, 0, 2, 1,
NULL,
_THIS_IP_)
# define rcu_read_release() lock_release(&rcu_lock_map, 1, _THIS_IP_)
# define rcu_read_release() lock_release(&rcu_lock_map, 1, _THIS_IP_)
#else
#else
# define rcu_read_acquire() do { } while (0)
# define rcu_read_acquire() do { } while (0)
...
...
include/linux/spinlock.h
View file @
23a0ee90
...
@@ -183,8 +183,14 @@ do { \
...
@@ -183,8 +183,14 @@ do { \
#ifdef CONFIG_DEBUG_LOCK_ALLOC
#ifdef CONFIG_DEBUG_LOCK_ALLOC
# define spin_lock_nested(lock, subclass) _spin_lock_nested(lock, subclass)
# define spin_lock_nested(lock, subclass) _spin_lock_nested(lock, subclass)
# define spin_lock_nest_lock(lock, nest_lock) \
do { \
typecheck(struct lockdep_map *, &(nest_lock)->dep_map);\
_spin_lock_nest_lock(lock, &(nest_lock)->dep_map); \
} while (0)
#else
#else
# define spin_lock_nested(lock, subclass) _spin_lock(lock)
# define spin_lock_nested(lock, subclass) _spin_lock(lock)
# define spin_lock_nest_lock(lock, nest_lock) _spin_lock(lock)
#endif
#endif
#define write_lock(lock) _write_lock(lock)
#define write_lock(lock) _write_lock(lock)
...
...
include/linux/spinlock_api_smp.h
View file @
23a0ee90
...
@@ -22,6 +22,8 @@ int in_lock_functions(unsigned long addr);
...
@@ -22,6 +22,8 @@ int in_lock_functions(unsigned long addr);
void
__lockfunc
_spin_lock
(
spinlock_t
*
lock
)
__acquires
(
lock
);
void
__lockfunc
_spin_lock
(
spinlock_t
*
lock
)
__acquires
(
lock
);
void
__lockfunc
_spin_lock_nested
(
spinlock_t
*
lock
,
int
subclass
)
void
__lockfunc
_spin_lock_nested
(
spinlock_t
*
lock
,
int
subclass
)
__acquires
(
lock
);
__acquires
(
lock
);
void
__lockfunc
_spin_lock_nest_lock
(
spinlock_t
*
lock
,
struct
lockdep_map
*
map
)
__acquires
(
lock
);
void
__lockfunc
_read_lock
(
rwlock_t
*
lock
)
__acquires
(
lock
);
void
__lockfunc
_read_lock
(
rwlock_t
*
lock
)
__acquires
(
lock
);
void
__lockfunc
_write_lock
(
rwlock_t
*
lock
)
__acquires
(
lock
);
void
__lockfunc
_write_lock
(
rwlock_t
*
lock
)
__acquires
(
lock
);
void
__lockfunc
_spin_lock_bh
(
spinlock_t
*
lock
)
__acquires
(
lock
);
void
__lockfunc
_spin_lock_bh
(
spinlock_t
*
lock
)
__acquires
(
lock
);
...
...
kernel/lockdep.c
View file @
23a0ee90
...
@@ -124,6 +124,15 @@ static struct lock_list list_entries[MAX_LOCKDEP_ENTRIES];
...
@@ -124,6 +124,15 @@ static struct lock_list list_entries[MAX_LOCKDEP_ENTRIES];
unsigned
long
nr_lock_classes
;
unsigned
long
nr_lock_classes
;
static
struct
lock_class
lock_classes
[
MAX_LOCKDEP_KEYS
];
static
struct
lock_class
lock_classes
[
MAX_LOCKDEP_KEYS
];
static
inline
struct
lock_class
*
hlock_class
(
struct
held_lock
*
hlock
)
{
if
(
!
hlock
->
class_idx
)
{
DEBUG_LOCKS_WARN_ON
(
1
);
return
NULL
;
}
return
lock_classes
+
hlock
->
class_idx
-
1
;
}
#ifdef CONFIG_LOCK_STAT
#ifdef CONFIG_LOCK_STAT
static
DEFINE_PER_CPU
(
struct
lock_class_stats
[
MAX_LOCKDEP_KEYS
],
lock_stats
);
static
DEFINE_PER_CPU
(
struct
lock_class_stats
[
MAX_LOCKDEP_KEYS
],
lock_stats
);
...
@@ -222,7 +231,7 @@ static void lock_release_holdtime(struct held_lock *hlock)
...
@@ -222,7 +231,7 @@ static void lock_release_holdtime(struct held_lock *hlock)
holdtime
=
sched_clock
()
-
hlock
->
holdtime_stamp
;
holdtime
=
sched_clock
()
-
hlock
->
holdtime_stamp
;
stats
=
get_lock_stats
(
hlock
->
class
);
stats
=
get_lock_stats
(
hlock
_class
(
hlock
)
);
if
(
hlock
->
read
)
if
(
hlock
->
read
)
lock_time_inc
(
&
stats
->
read_holdtime
,
holdtime
);
lock_time_inc
(
&
stats
->
read_holdtime
,
holdtime
);
else
else
...
@@ -372,6 +381,19 @@ unsigned int nr_process_chains;
...
@@ -372,6 +381,19 @@ unsigned int nr_process_chains;
unsigned
int
max_lockdep_depth
;
unsigned
int
max_lockdep_depth
;
unsigned
int
max_recursion_depth
;
unsigned
int
max_recursion_depth
;
static
unsigned
int
lockdep_dependency_gen_id
;
static
bool
lockdep_dependency_visit
(
struct
lock_class
*
source
,
unsigned
int
depth
)
{
if
(
!
depth
)
lockdep_dependency_gen_id
++
;
if
(
source
->
dep_gen_id
==
lockdep_dependency_gen_id
)
return
true
;
source
->
dep_gen_id
=
lockdep_dependency_gen_id
;
return
false
;
}
#ifdef CONFIG_DEBUG_LOCKDEP
#ifdef CONFIG_DEBUG_LOCKDEP
/*
/*
* We cannot printk in early bootup code. Not even early_printk()
* We cannot printk in early bootup code. Not even early_printk()
...
@@ -505,7 +527,7 @@ static void print_lockdep_cache(struct lockdep_map *lock)
...
@@ -505,7 +527,7 @@ static void print_lockdep_cache(struct lockdep_map *lock)
static
void
print_lock
(
struct
held_lock
*
hlock
)
static
void
print_lock
(
struct
held_lock
*
hlock
)
{
{
print_lock_name
(
hlock
->
class
);
print_lock_name
(
hlock
_class
(
hlock
)
);
printk
(
", at: "
);
printk
(
", at: "
);
print_ip_sym
(
hlock
->
acquire_ip
);
print_ip_sym
(
hlock
->
acquire_ip
);
}
}
...
@@ -558,6 +580,9 @@ static void print_lock_dependencies(struct lock_class *class, int depth)
...
@@ -558,6 +580,9 @@ static void print_lock_dependencies(struct lock_class *class, int depth)
{
{
struct
lock_list
*
entry
;
struct
lock_list
*
entry
;
if
(
lockdep_dependency_visit
(
class
,
depth
))
return
;
if
(
DEBUG_LOCKS_WARN_ON
(
depth
>=
20
))
if
(
DEBUG_LOCKS_WARN_ON
(
depth
>=
20
))
return
;
return
;
...
@@ -932,7 +957,7 @@ static noinline int print_circular_bug_tail(void)
...
@@ -932,7 +957,7 @@ static noinline int print_circular_bug_tail(void)
if
(
debug_locks_silent
)
if
(
debug_locks_silent
)
return
0
;
return
0
;
this
.
class
=
check_source
->
class
;
this
.
class
=
hlock_class
(
check_source
)
;
if
(
!
save_trace
(
&
this
.
trace
))
if
(
!
save_trace
(
&
this
.
trace
))
return
0
;
return
0
;
...
@@ -959,6 +984,67 @@ static int noinline print_infinite_recursion_bug(void)
...
@@ -959,6 +984,67 @@ static int noinline print_infinite_recursion_bug(void)
return
0
;
return
0
;
}
}
unsigned
long
__lockdep_count_forward_deps
(
struct
lock_class
*
class
,
unsigned
int
depth
)
{
struct
lock_list
*
entry
;
unsigned
long
ret
=
1
;
if
(
lockdep_dependency_visit
(
class
,
depth
))
return
0
;
/*
* Recurse this class's dependency list:
*/
list_for_each_entry
(
entry
,
&
class
->
locks_after
,
entry
)
ret
+=
__lockdep_count_forward_deps
(
entry
->
class
,
depth
+
1
);
return
ret
;
}
unsigned
long
lockdep_count_forward_deps
(
struct
lock_class
*
class
)
{
unsigned
long
ret
,
flags
;
local_irq_save
(
flags
);
__raw_spin_lock
(
&
lockdep_lock
);
ret
=
__lockdep_count_forward_deps
(
class
,
0
);
__raw_spin_unlock
(
&
lockdep_lock
);
local_irq_restore
(
flags
);
return
ret
;
}
unsigned
long
__lockdep_count_backward_deps
(
struct
lock_class
*
class
,
unsigned
int
depth
)
{
struct
lock_list
*
entry
;
unsigned
long
ret
=
1
;
if
(
lockdep_dependency_visit
(
class
,
depth
))
return
0
;
/*
* Recurse this class's dependency list:
*/
list_for_each_entry
(
entry
,
&
class
->
locks_before
,
entry
)
ret
+=
__lockdep_count_backward_deps
(
entry
->
class
,
depth
+
1
);
return
ret
;
}
unsigned
long
lockdep_count_backward_deps
(
struct
lock_class
*
class
)
{
unsigned
long
ret
,
flags
;
local_irq_save
(
flags
);
__raw_spin_lock
(
&
lockdep_lock
);
ret
=
__lockdep_count_backward_deps
(
class
,
0
);
__raw_spin_unlock
(
&
lockdep_lock
);
local_irq_restore
(
flags
);
return
ret
;
}
/*
/*
* Prove that the dependency graph starting at <entry> can not
* Prove that the dependency graph starting at <entry> can not
* lead to <target>. Print an error and return 0 if it does.
* lead to <target>. Print an error and return 0 if it does.
...
@@ -968,6 +1054,9 @@ check_noncircular(struct lock_class *source, unsigned int depth)
...
@@ -968,6 +1054,9 @@ check_noncircular(struct lock_class *source, unsigned int depth)
{
{
struct
lock_list
*
entry
;
struct
lock_list
*
entry
;
if
(
lockdep_dependency_visit
(
source
,
depth
))
return
1
;
debug_atomic_inc
(
&
nr_cyclic_check_recursions
);
debug_atomic_inc
(
&
nr_cyclic_check_recursions
);
if
(
depth
>
max_recursion_depth
)
if
(
depth
>
max_recursion_depth
)
max_recursion_depth
=
depth
;
max_recursion_depth
=
depth
;
...
@@ -977,7 +1066,7 @@ check_noncircular(struct lock_class *source, unsigned int depth)
...
@@ -977,7 +1066,7 @@ check_noncircular(struct lock_class *source, unsigned int depth)
* Check this lock's dependency list:
* Check this lock's dependency list:
*/
*/
list_for_each_entry
(
entry
,
&
source
->
locks_after
,
entry
)
{
list_for_each_entry
(
entry
,
&
source
->
locks_after
,
entry
)
{
if
(
entry
->
class
==
check_target
->
class
)
if
(
entry
->
class
==
hlock_class
(
check_target
)
)
return
print_circular_bug_header
(
entry
,
depth
+
1
);
return
print_circular_bug_header
(
entry
,
depth
+
1
);
debug_atomic_inc
(
&
nr_cyclic_checks
);
debug_atomic_inc
(
&
nr_cyclic_checks
);
if
(
!
check_noncircular
(
entry
->
class
,
depth
+
1
))
if
(
!
check_noncircular
(
entry
->
class
,
depth
+
1
))
...
@@ -1011,6 +1100,9 @@ find_usage_forwards(struct lock_class *source, unsigned int depth)
...
@@ -1011,6 +1100,9 @@ find_usage_forwards(struct lock_class *source, unsigned int depth)
struct
lock_list
*
entry
;
struct
lock_list
*
entry
;
int
ret
;
int
ret
;
if
(
lockdep_dependency_visit
(
source
,
depth
))
return
1
;
if
(
depth
>
max_recursion_depth
)
if
(
depth
>
max_recursion_depth
)
max_recursion_depth
=
depth
;
max_recursion_depth
=
depth
;
if
(
depth
>=
RECURSION_LIMIT
)
if
(
depth
>=
RECURSION_LIMIT
)
...
@@ -1050,6 +1142,9 @@ find_usage_backwards(struct lock_class *source, unsigned int depth)
...
@@ -1050,6 +1142,9 @@ find_usage_backwards(struct lock_class *source, unsigned int depth)
struct
lock_list
*
entry
;
struct
lock_list
*
entry
;
int
ret
;
int
ret
;
if
(
lockdep_dependency_visit
(
source
,
depth
))
return
1
;
if
(
!
__raw_spin_is_locked
(
&
lockdep_lock
))
if
(
!
__raw_spin_is_locked
(
&
lockdep_lock
))
return
DEBUG_LOCKS_WARN_ON
(
1
);
return
DEBUG_LOCKS_WARN_ON
(
1
);
...
@@ -1064,6 +1159,11 @@ find_usage_backwards(struct lock_class *source, unsigned int depth)
...
@@ -1064,6 +1159,11 @@ find_usage_backwards(struct lock_class *source, unsigned int depth)
return
2
;
return
2
;
}
}
if
(
!
source
&&
debug_locks_off_graph_unlock
())
{
WARN_ON
(
1
);
return
0
;
}
/*
/*
* Check this lock's dependency list:
* Check this lock's dependency list:
*/
*/
...
@@ -1103,9 +1203,9 @@ print_bad_irq_dependency(struct task_struct *curr,
...
@@ -1103,9 +1203,9 @@ print_bad_irq_dependency(struct task_struct *curr,
printk
(
"
\n
and this task is already holding:
\n
"
);
printk
(
"
\n
and this task is already holding:
\n
"
);
print_lock
(
prev
);
print_lock
(
prev
);
printk
(
"which would create a new lock dependency:
\n
"
);
printk
(
"which would create a new lock dependency:
\n
"
);
print_lock_name
(
prev
->
class
);
print_lock_name
(
hlock_class
(
prev
)
);
printk
(
" ->"
);
printk
(
" ->"
);
print_lock_name
(
next
->
class
);
print_lock_name
(
hlock_class
(
next
)
);
printk
(
"
\n
"
);
printk
(
"
\n
"
);
printk
(
"
\n
but this new dependency connects a %s-irq-safe lock:
\n
"
,
printk
(
"
\n
but this new dependency connects a %s-irq-safe lock:
\n
"
,
...
@@ -1146,12 +1246,12 @@ check_usage(struct task_struct *curr, struct held_lock *prev,
...
@@ -1146,12 +1246,12 @@ check_usage(struct task_struct *curr, struct held_lock *prev,
find_usage_bit
=
bit_backwards
;
find_usage_bit
=
bit_backwards
;
/* fills in <backwards_match> */
/* fills in <backwards_match> */
ret
=
find_usage_backwards
(
prev
->
class
,
0
);
ret
=
find_usage_backwards
(
hlock_class
(
prev
)
,
0
);
if
(
!
ret
||
ret
==
1
)
if
(
!
ret
||
ret
==
1
)
return
ret
;
return
ret
;
find_usage_bit
=
bit_forwards
;
find_usage_bit
=
bit_forwards
;
ret
=
find_usage_forwards
(
next
->
class
,
0
);
ret
=
find_usage_forwards
(
hlock_class
(
next
)
,
0
);
if
(
!
ret
||
ret
==
1
)
if
(
!
ret
||
ret
==
1
)
return
ret
;
return
ret
;
/* ret == 2 */
/* ret == 2 */
...
@@ -1272,18 +1372,32 @@ check_deadlock(struct task_struct *curr, struct held_lock *next,
...
@@ -1272,18 +1372,32 @@ check_deadlock(struct task_struct *curr, struct held_lock *next,
struct
lockdep_map
*
next_instance
,
int
read
)
struct
lockdep_map
*
next_instance
,
int
read
)
{
{
struct
held_lock
*
prev
;
struct
held_lock
*
prev
;
struct
held_lock
*
nest
=
NULL
;
int
i
;
int
i
;
for
(
i
=
0
;
i
<
curr
->
lockdep_depth
;
i
++
)
{
for
(
i
=
0
;
i
<
curr
->
lockdep_depth
;
i
++
)
{
prev
=
curr
->
held_locks
+
i
;
prev
=
curr
->
held_locks
+
i
;
if
(
prev
->
class
!=
next
->
class
)
if
(
prev
->
instance
==
next
->
nest_lock
)
nest
=
prev
;
if
(
hlock_class
(
prev
)
!=
hlock_class
(
next
))
continue
;
continue
;
/*
/*
* Allow read-after-read recursion of the same
* Allow read-after-read recursion of the same
* lock class (i.e. read_lock(lock)+read_lock(lock)):
* lock class (i.e. read_lock(lock)+read_lock(lock)):
*/
*/
if
((
read
==
2
)
&&
prev
->
read
)
if
((
read
==
2
)
&&
prev
->
read
)
return
2
;
return
2
;
/*
* We're holding the nest_lock, which serializes this lock's
* nesting behaviour.
*/
if
(
nest
)
return
2
;
return
print_deadlock_bug
(
curr
,
prev
,
next
);
return
print_deadlock_bug
(
curr
,
prev
,
next
);
}
}
return
1
;
return
1
;
...
@@ -1329,7 +1443,7 @@ check_prev_add(struct task_struct *curr, struct held_lock *prev,
...
@@ -1329,7 +1443,7 @@ check_prev_add(struct task_struct *curr, struct held_lock *prev,
*/
*/
check_source
=
next
;
check_source
=
next
;
check_target
=
prev
;
check_target
=
prev
;
if
(
!
(
check_noncircular
(
next
->
class
,
0
)))
if
(
!
(
check_noncircular
(
hlock_class
(
next
)
,
0
)))
return
print_circular_bug_tail
();
return
print_circular_bug_tail
();
if
(
!
check_prev_add_irq
(
curr
,
prev
,
next
))
if
(
!
check_prev_add_irq
(
curr
,
prev
,
next
))
...
@@ -1353,8 +1467,8 @@ check_prev_add(struct task_struct *curr, struct held_lock *prev,
...
@@ -1353,8 +1467,8 @@ check_prev_add(struct task_struct *curr, struct held_lock *prev,
* chains - the second one will be new, but L1 already has
* chains - the second one will be new, but L1 already has
* L2 added to its dependency list, due to the first chain.)
* L2 added to its dependency list, due to the first chain.)
*/
*/
list_for_each_entry
(
entry
,
&
prev
->
class
->
locks_after
,
entry
)
{
list_for_each_entry
(
entry
,
&
hlock_class
(
prev
)
->
locks_after
,
entry
)
{
if
(
entry
->
class
==
next
->
class
)
{
if
(
entry
->
class
==
hlock_class
(
next
)
)
{
if
(
distance
==
1
)
if
(
distance
==
1
)
entry
->
distance
=
1
;
entry
->
distance
=
1
;
return
2
;
return
2
;
...
@@ -1365,26 +1479,28 @@ check_prev_add(struct task_struct *curr, struct held_lock *prev,
...
@@ -1365,26 +1479,28 @@ check_prev_add(struct task_struct *curr, struct held_lock *prev,
* Ok, all validations passed, add the new lock
* Ok, all validations passed, add the new lock
* to the previous lock's dependency list:
* to the previous lock's dependency list:
*/
*/
ret
=
add_lock_to_list
(
prev
->
class
,
next
->
class
,
ret
=
add_lock_to_list
(
hlock_class
(
prev
),
hlock_class
(
next
),
&
prev
->
class
->
locks_after
,
next
->
acquire_ip
,
distance
);
&
hlock_class
(
prev
)
->
locks_after
,
next
->
acquire_ip
,
distance
);
if
(
!
ret
)
if
(
!
ret
)
return
0
;
return
0
;
ret
=
add_lock_to_list
(
next
->
class
,
prev
->
class
,
ret
=
add_lock_to_list
(
hlock_class
(
next
),
hlock_class
(
prev
),
&
next
->
class
->
locks_before
,
next
->
acquire_ip
,
distance
);
&
hlock_class
(
next
)
->
locks_before
,
next
->
acquire_ip
,
distance
);
if
(
!
ret
)
if
(
!
ret
)
return
0
;
return
0
;
/*
/*
* Debugging printouts:
* Debugging printouts:
*/
*/
if
(
verbose
(
prev
->
class
)
||
verbose
(
next
->
class
))
{
if
(
verbose
(
hlock_class
(
prev
))
||
verbose
(
hlock_class
(
next
)
))
{
graph_unlock
();
graph_unlock
();
printk
(
"
\n
new dependency: "
);
printk
(
"
\n
new dependency: "
);
print_lock_name
(
prev
->
class
);
print_lock_name
(
hlock_class
(
prev
)
);
printk
(
" => "
);
printk
(
" => "
);
print_lock_name
(
next
->
class
);
print_lock_name
(
hlock_class
(
next
)
);
printk
(
"
\n
"
);
printk
(
"
\n
"
);
dump_stack
();
dump_stack
();
return
graph_lock
();
return
graph_lock
();
...
@@ -1481,7 +1597,7 @@ static inline int lookup_chain_cache(struct task_struct *curr,
...
@@ -1481,7 +1597,7 @@ static inline int lookup_chain_cache(struct task_struct *curr,
struct
held_lock
*
hlock
,
struct
held_lock
*
hlock
,
u64
chain_key
)
u64
chain_key
)
{
{
struct
lock_class
*
class
=
hlock
->
class
;
struct
lock_class
*
class
=
hlock
_class
(
hlock
)
;
struct
list_head
*
hash_head
=
chainhashentry
(
chain_key
);
struct
list_head
*
hash_head
=
chainhashentry
(
chain_key
);
struct
lock_chain
*
chain
;
struct
lock_chain
*
chain
;
struct
held_lock
*
hlock_curr
,
*
hlock_next
;
struct
held_lock
*
hlock_curr
,
*
hlock_next
;
...
@@ -1554,7 +1670,7 @@ static inline int lookup_chain_cache(struct task_struct *curr,
...
@@ -1554,7 +1670,7 @@ static inline int lookup_chain_cache(struct task_struct *curr,
if
(
likely
(
cn
+
chain
->
depth
<=
MAX_LOCKDEP_CHAIN_HLOCKS
))
{
if
(
likely
(
cn
+
chain
->
depth
<=
MAX_LOCKDEP_CHAIN_HLOCKS
))
{
chain
->
base
=
cn
;
chain
->
base
=
cn
;
for
(
j
=
0
;
j
<
chain
->
depth
-
1
;
j
++
,
i
++
)
{
for
(
j
=
0
;
j
<
chain
->
depth
-
1
;
j
++
,
i
++
)
{
int
lock_id
=
curr
->
held_locks
[
i
].
class
-
lock_classes
;
int
lock_id
=
curr
->
held_locks
[
i
].
class
_idx
-
1
;
chain_hlocks
[
chain
->
base
+
j
]
=
lock_id
;
chain_hlocks
[
chain
->
base
+
j
]
=
lock_id
;
}
}
chain_hlocks
[
chain
->
base
+
j
]
=
class
-
lock_classes
;
chain_hlocks
[
chain
->
base
+
j
]
=
class
-
lock_classes
;
...
@@ -1650,7 +1766,7 @@ static void check_chain_key(struct task_struct *curr)
...
@@ -1650,7 +1766,7 @@ static void check_chain_key(struct task_struct *curr)
WARN_ON
(
1
);
WARN_ON
(
1
);
return
;
return
;
}
}
id
=
hlock
->
class
-
lock_classes
;
id
=
hlock
->
class
_idx
-
1
;
if
(
DEBUG_LOCKS_WARN_ON
(
id
>=
MAX_LOCKDEP_KEYS
))
if
(
DEBUG_LOCKS_WARN_ON
(
id
>=
MAX_LOCKDEP_KEYS
))
return
;
return
;
...
@@ -1695,7 +1811,7 @@ print_usage_bug(struct task_struct *curr, struct held_lock *this,
...
@@ -1695,7 +1811,7 @@ print_usage_bug(struct task_struct *curr, struct held_lock *this,
print_lock
(
this
);
print_lock
(
this
);
printk
(
"{%s} state was registered at:
\n
"
,
usage_str
[
prev_bit
]);
printk
(
"{%s} state was registered at:
\n
"
,
usage_str
[
prev_bit
]);
print_stack_trace
(
this
->
class
->
usage_traces
+
prev_bit
,
1
);
print_stack_trace
(
hlock_class
(
this
)
->
usage_traces
+
prev_bit
,
1
);
print_irqtrace_events
(
curr
);
print_irqtrace_events
(
curr
);
printk
(
"
\n
other info that might help us debug this:
\n
"
);
printk
(
"
\n
other info that might help us debug this:
\n
"
);
...
@@ -1714,7 +1830,7 @@ static inline int
...
@@ -1714,7 +1830,7 @@ static inline int
valid_state
(
struct
task_struct
*
curr
,
struct
held_lock
*
this
,
valid_state
(
struct
task_struct
*
curr
,
struct
held_lock
*
this
,
enum
lock_usage_bit
new_bit
,
enum
lock_usage_bit
bad_bit
)
enum
lock_usage_bit
new_bit
,
enum
lock_usage_bit
bad_bit
)
{
{
if
(
unlikely
(
this
->
class
->
usage_mask
&
(
1
<<
bad_bit
)))
if
(
unlikely
(
hlock_class
(
this
)
->
usage_mask
&
(
1
<<
bad_bit
)))
return
print_usage_bug
(
curr
,
this
,
bad_bit
,
new_bit
);
return
print_usage_bug
(
curr
,
this
,
bad_bit
,
new_bit
);
return
1
;
return
1
;
}
}
...
@@ -1753,7 +1869,7 @@ print_irq_inversion_bug(struct task_struct *curr, struct lock_class *other,
...
@@ -1753,7 +1869,7 @@ print_irq_inversion_bug(struct task_struct *curr, struct lock_class *other,
lockdep_print_held_locks
(
curr
);
lockdep_print_held_locks
(
curr
);
printk
(
"
\n
the first lock's dependencies:
\n
"
);
printk
(
"
\n
the first lock's dependencies:
\n
"
);
print_lock_dependencies
(
this
->
class
,
0
);
print_lock_dependencies
(
hlock_class
(
this
)
,
0
);
printk
(
"
\n
the second lock's dependencies:
\n
"
);
printk
(
"
\n
the second lock's dependencies:
\n
"
);
print_lock_dependencies
(
other
,
0
);
print_lock_dependencies
(
other
,
0
);
...
@@ -1776,7 +1892,7 @@ check_usage_forwards(struct task_struct *curr, struct held_lock *this,
...
@@ -1776,7 +1892,7 @@ check_usage_forwards(struct task_struct *curr, struct held_lock *this,
find_usage_bit
=
bit
;
find_usage_bit
=
bit
;
/* fills in <forwards_match> */
/* fills in <forwards_match> */
ret
=
find_usage_forwards
(
this
->
class
,
0
);
ret
=
find_usage_forwards
(
hlock_class
(
this
)
,
0
);
if
(
!
ret
||
ret
==
1
)
if
(
!
ret
||
ret
==
1
)
return
ret
;
return
ret
;
...
@@ -1795,7 +1911,7 @@ check_usage_backwards(struct task_struct *curr, struct held_lock *this,
...
@@ -1795,7 +1911,7 @@ check_usage_backwards(struct task_struct *curr, struct held_lock *this,
find_usage_bit
=
bit
;
find_usage_bit
=
bit
;
/* fills in <backwards_match> */
/* fills in <backwards_match> */
ret
=
find_usage_backwards
(
this
->
class
,
0
);
ret
=
find_usage_backwards
(
hlock_class
(
this
)
,
0
);
if
(
!
ret
||
ret
==
1
)
if
(
!
ret
||
ret
==
1
)
return
ret
;
return
ret
;
...
@@ -1861,7 +1977,7 @@ static int mark_lock_irq(struct task_struct *curr, struct held_lock *this,
...
@@ -1861,7 +1977,7 @@ static int mark_lock_irq(struct task_struct *curr, struct held_lock *this,
LOCK_ENABLED_HARDIRQS_READ
,
"hard-read"
))
LOCK_ENABLED_HARDIRQS_READ
,
"hard-read"
))
return
0
;
return
0
;
#endif
#endif
if
(
hardirq_verbose
(
this
->
class
))
if
(
hardirq_verbose
(
hlock_class
(
this
)
))
ret
=
2
;
ret
=
2
;
break
;
break
;
case
LOCK_USED_IN_SOFTIRQ
:
case
LOCK_USED_IN_SOFTIRQ
:
...
@@ -1886,7 +2002,7 @@ static int mark_lock_irq(struct task_struct *curr, struct held_lock *this,
...
@@ -1886,7 +2002,7 @@ static int mark_lock_irq(struct task_struct *curr, struct held_lock *this,
LOCK_ENABLED_SOFTIRQS_READ
,
"soft-read"
))
LOCK_ENABLED_SOFTIRQS_READ
,
"soft-read"
))
return
0
;
return
0
;
#endif
#endif
if
(
softirq_verbose
(
this
->
class
))
if
(
softirq_verbose
(
hlock_class
(
this
)
))
ret
=
2
;
ret
=
2
;
break
;
break
;
case
LOCK_USED_IN_HARDIRQ_READ
:
case
LOCK_USED_IN_HARDIRQ_READ
:
...
@@ -1899,7 +2015,7 @@ static int mark_lock_irq(struct task_struct *curr, struct held_lock *this,
...
@@ -1899,7 +2015,7 @@ static int mark_lock_irq(struct task_struct *curr, struct held_lock *this,
if
(
!
check_usage_forwards
(
curr
,
this
,
if
(
!
check_usage_forwards
(
curr
,
this
,
LOCK_ENABLED_HARDIRQS
,
"hard"
))
LOCK_ENABLED_HARDIRQS
,
"hard"
))
return
0
;
return
0
;
if
(
hardirq_verbose
(
this
->
class
))
if
(
hardirq_verbose
(
hlock_class
(
this
)
))
ret
=
2
;
ret
=
2
;
break
;
break
;
case
LOCK_USED_IN_SOFTIRQ_READ
:
case
LOCK_USED_IN_SOFTIRQ_READ
:
...
@@ -1912,7 +2028,7 @@ static int mark_lock_irq(struct task_struct *curr, struct held_lock *this,
...
@@ -1912,7 +2028,7 @@ static int mark_lock_irq(struct task_struct *curr, struct held_lock *this,
if
(
!
check_usage_forwards
(
curr
,
this
,
if
(
!
check_usage_forwards
(
curr
,
this
,
LOCK_ENABLED_SOFTIRQS
,
"soft"
))
LOCK_ENABLED_SOFTIRQS
,
"soft"
))
return
0
;
return
0
;
if
(
softirq_verbose
(
this
->
class
))
if
(
softirq_verbose
(
hlock_class
(
this
)
))
ret
=
2
;
ret
=
2
;
break
;
break
;
case
LOCK_ENABLED_HARDIRQS
:
case
LOCK_ENABLED_HARDIRQS
:
...
@@ -1938,7 +2054,7 @@ static int mark_lock_irq(struct task_struct *curr, struct held_lock *this,
...
@@ -1938,7 +2054,7 @@ static int mark_lock_irq(struct task_struct *curr, struct held_lock *this,
LOCK_USED_IN_HARDIRQ_READ
,
"hard-read"
))
LOCK_USED_IN_HARDIRQ_READ
,
"hard-read"
))
return
0
;
return
0
;
#endif
#endif
if
(
hardirq_verbose
(
this
->
class
))
if
(
hardirq_verbose
(
hlock_class
(
this
)
))
ret
=
2
;
ret
=
2
;
break
;
break
;
case
LOCK_ENABLED_SOFTIRQS
:
case
LOCK_ENABLED_SOFTIRQS
:
...
@@ -1964,7 +2080,7 @@ static int mark_lock_irq(struct task_struct *curr, struct held_lock *this,
...
@@ -1964,7 +2080,7 @@ static int mark_lock_irq(struct task_struct *curr, struct held_lock *this,
LOCK_USED_IN_SOFTIRQ_READ
,
"soft-read"
))
LOCK_USED_IN_SOFTIRQ_READ
,
"soft-read"
))
return
0
;
return
0
;
#endif
#endif
if
(
softirq_verbose
(
this
->
class
))
if
(
softirq_verbose
(
hlock_class
(
this
)
))
ret
=
2
;
ret
=
2
;
break
;
break
;
case
LOCK_ENABLED_HARDIRQS_READ
:
case
LOCK_ENABLED_HARDIRQS_READ
:
...
@@ -1979,7 +2095,7 @@ static int mark_lock_irq(struct task_struct *curr, struct held_lock *this,
...
@@ -1979,7 +2095,7 @@ static int mark_lock_irq(struct task_struct *curr, struct held_lock *this,
LOCK_USED_IN_HARDIRQ
,
"hard"
))
LOCK_USED_IN_HARDIRQ
,
"hard"
))
return
0
;
return
0
;
#endif
#endif
if
(
hardirq_verbose
(
this
->
class
))
if
(
hardirq_verbose
(
hlock_class
(
this
)
))
ret
=
2
;
ret
=
2
;
break
;
break
;
case
LOCK_ENABLED_SOFTIRQS_READ
:
case
LOCK_ENABLED_SOFTIRQS_READ
:
...
@@ -1994,7 +2110,7 @@ static int mark_lock_irq(struct task_struct *curr, struct held_lock *this,
...
@@ -1994,7 +2110,7 @@ static int mark_lock_irq(struct task_struct *curr, struct held_lock *this,
LOCK_USED_IN_SOFTIRQ
,
"soft"
))
LOCK_USED_IN_SOFTIRQ
,
"soft"
))
return
0
;
return
0
;
#endif
#endif
if
(
softirq_verbose
(
this
->
class
))
if
(
softirq_verbose
(
hlock_class
(
this
)
))
ret
=
2
;
ret
=
2
;
break
;
break
;
default:
default:
...
@@ -2310,7 +2426,7 @@ static int mark_lock(struct task_struct *curr, struct held_lock *this,
...
@@ -2310,7 +2426,7 @@ static int mark_lock(struct task_struct *curr, struct held_lock *this,
* If already set then do not dirty the cacheline,
* If already set then do not dirty the cacheline,
* nor do any checks:
* nor do any checks:
*/
*/
if
(
likely
(
this
->
class
->
usage_mask
&
new_mask
))
if
(
likely
(
hlock_class
(
this
)
->
usage_mask
&
new_mask
))
return
1
;
return
1
;
if
(
!
graph_lock
())
if
(
!
graph_lock
())
...
@@ -2318,14 +2434,14 @@ static int mark_lock(struct task_struct *curr, struct held_lock *this,
...
@@ -2318,14 +2434,14 @@ static int mark_lock(struct task_struct *curr, struct held_lock *this,
/*
/*
* Make sure we didnt race:
* Make sure we didnt race:
*/
*/
if
(
unlikely
(
this
->
class
->
usage_mask
&
new_mask
))
{
if
(
unlikely
(
hlock_class
(
this
)
->
usage_mask
&
new_mask
))
{
graph_unlock
();
graph_unlock
();
return
1
;
return
1
;
}
}
this
->
class
->
usage_mask
|=
new_mask
;
hlock_class
(
this
)
->
usage_mask
|=
new_mask
;
if
(
!
save_trace
(
this
->
class
->
usage_traces
+
new_bit
))
if
(
!
save_trace
(
hlock_class
(
this
)
->
usage_traces
+
new_bit
))
return
0
;
return
0
;
switch
(
new_bit
)
{
switch
(
new_bit
)
{
...
@@ -2405,7 +2521,7 @@ EXPORT_SYMBOL_GPL(lockdep_init_map);
...
@@ -2405,7 +2521,7 @@ EXPORT_SYMBOL_GPL(lockdep_init_map);
*/
*/
static
int
__lock_acquire
(
struct
lockdep_map
*
lock
,
unsigned
int
subclass
,
static
int
__lock_acquire
(
struct
lockdep_map
*
lock
,
unsigned
int
subclass
,
int
trylock
,
int
read
,
int
check
,
int
hardirqs_off
,
int
trylock
,
int
read
,
int
check
,
int
hardirqs_off
,
unsigned
long
ip
)
struct
lockdep_map
*
nest_lock
,
unsigned
long
ip
)
{
{
struct
task_struct
*
curr
=
current
;
struct
task_struct
*
curr
=
current
;
struct
lock_class
*
class
=
NULL
;
struct
lock_class
*
class
=
NULL
;
...
@@ -2459,10 +2575,12 @@ static int __lock_acquire(struct lockdep_map *lock, unsigned int subclass,
...
@@ -2459,10 +2575,12 @@ static int __lock_acquire(struct lockdep_map *lock, unsigned int subclass,
return
0
;
return
0
;
hlock
=
curr
->
held_locks
+
depth
;
hlock
=
curr
->
held_locks
+
depth
;
if
(
DEBUG_LOCKS_WARN_ON
(
!
class
))
hlock
->
class
=
class
;
return
0
;
hlock
->
class_idx
=
class
-
lock_classes
+
1
;
hlock
->
acquire_ip
=
ip
;
hlock
->
acquire_ip
=
ip
;
hlock
->
instance
=
lock
;
hlock
->
instance
=
lock
;
hlock
->
nest_lock
=
nest_lock
;
hlock
->
trylock
=
trylock
;
hlock
->
trylock
=
trylock
;
hlock
->
read
=
read
;
hlock
->
read
=
read
;
hlock
->
check
=
check
;
hlock
->
check
=
check
;
...
@@ -2574,6 +2692,55 @@ static int check_unlock(struct task_struct *curr, struct lockdep_map *lock,
...
@@ -2574,6 +2692,55 @@ static int check_unlock(struct task_struct *curr, struct lockdep_map *lock,
return
1
;
return
1
;
}
}
static
int
__lock_set_subclass
(
struct
lockdep_map
*
lock
,
unsigned
int
subclass
,
unsigned
long
ip
)
{
struct
task_struct
*
curr
=
current
;
struct
held_lock
*
hlock
,
*
prev_hlock
;
struct
lock_class
*
class
;
unsigned
int
depth
;
int
i
;
depth
=
curr
->
lockdep_depth
;
if
(
DEBUG_LOCKS_WARN_ON
(
!
depth
))
return
0
;
prev_hlock
=
NULL
;
for
(
i
=
depth
-
1
;
i
>=
0
;
i
--
)
{
hlock
=
curr
->
held_locks
+
i
;
/*
* We must not cross into another context:
*/
if
(
prev_hlock
&&
prev_hlock
->
irq_context
!=
hlock
->
irq_context
)
break
;
if
(
hlock
->
instance
==
lock
)
goto
found_it
;
prev_hlock
=
hlock
;
}
return
print_unlock_inbalance_bug
(
curr
,
lock
,
ip
);
found_it:
class
=
register_lock_class
(
lock
,
subclass
,
0
);
hlock
->
class_idx
=
class
-
lock_classes
+
1
;
curr
->
lockdep_depth
=
i
;
curr
->
curr_chain_key
=
hlock
->
prev_chain_key
;
for
(;
i
<
depth
;
i
++
)
{
hlock
=
curr
->
held_locks
+
i
;
if
(
!
__lock_acquire
(
hlock
->
instance
,
hlock_class
(
hlock
)
->
subclass
,
hlock
->
trylock
,
hlock
->
read
,
hlock
->
check
,
hlock
->
hardirqs_off
,
hlock
->
nest_lock
,
hlock
->
acquire_ip
))
return
0
;
}
if
(
DEBUG_LOCKS_WARN_ON
(
curr
->
lockdep_depth
!=
depth
))
return
0
;
return
1
;
}
/*
/*
* Remove the lock to the list of currently held locks in a
* Remove the lock to the list of currently held locks in a
* potentially non-nested (out of order) manner. This is a
* potentially non-nested (out of order) manner. This is a
...
@@ -2624,9 +2791,9 @@ lock_release_non_nested(struct task_struct *curr,
...
@@ -2624,9 +2791,9 @@ lock_release_non_nested(struct task_struct *curr,
for
(
i
++
;
i
<
depth
;
i
++
)
{
for
(
i
++
;
i
<
depth
;
i
++
)
{
hlock
=
curr
->
held_locks
+
i
;
hlock
=
curr
->
held_locks
+
i
;
if
(
!
__lock_acquire
(
hlock
->
instance
,
if
(
!
__lock_acquire
(
hlock
->
instance
,
hlock
->
class
->
subclass
,
hlock
->
trylock
,
hlock
_class
(
hlock
)
->
subclass
,
hlock
->
trylock
,
hlock
->
read
,
hlock
->
check
,
hlock
->
hardirqs_off
,
hlock
->
read
,
hlock
->
check
,
hlock
->
hardirqs_off
,
hlock
->
acquire_ip
))
hlock
->
nest_lock
,
hlock
->
acquire_ip
))
return
0
;
return
0
;
}
}
...
@@ -2669,7 +2836,7 @@ static int lock_release_nested(struct task_struct *curr,
...
@@ -2669,7 +2836,7 @@ static int lock_release_nested(struct task_struct *curr,
#ifdef CONFIG_DEBUG_LOCKDEP
#ifdef CONFIG_DEBUG_LOCKDEP
hlock
->
prev_chain_key
=
0
;
hlock
->
prev_chain_key
=
0
;
hlock
->
class
=
NULL
;
hlock
->
class
_idx
=
0
;
hlock
->
acquire_ip
=
0
;
hlock
->
acquire_ip
=
0
;
hlock
->
irq_context
=
0
;
hlock
->
irq_context
=
0
;
#endif
#endif
...
@@ -2738,18 +2905,36 @@ static void check_flags(unsigned long flags)
...
@@ -2738,18 +2905,36 @@ static void check_flags(unsigned long flags)
#endif
#endif
}
}
void
lock_set_subclass
(
struct
lockdep_map
*
lock
,
unsigned
int
subclass
,
unsigned
long
ip
)
{
unsigned
long
flags
;
if
(
unlikely
(
current
->
lockdep_recursion
))
return
;
raw_local_irq_save
(
flags
);
current
->
lockdep_recursion
=
1
;
check_flags
(
flags
);
if
(
__lock_set_subclass
(
lock
,
subclass
,
ip
))
check_chain_key
(
current
);
current
->
lockdep_recursion
=
0
;
raw_local_irq_restore
(
flags
);
}
EXPORT_SYMBOL_GPL
(
lock_set_subclass
);
/*
/*
* We are not always called with irqs disabled - do that here,
* We are not always called with irqs disabled - do that here,
* and also avoid lockdep recursion:
* and also avoid lockdep recursion:
*/
*/
void
lock_acquire
(
struct
lockdep_map
*
lock
,
unsigned
int
subclass
,
void
lock_acquire
(
struct
lockdep_map
*
lock
,
unsigned
int
subclass
,
int
trylock
,
int
read
,
int
check
,
unsigned
long
ip
)
int
trylock
,
int
read
,
int
check
,
struct
lockdep_map
*
nest_lock
,
unsigned
long
ip
)
{
{
unsigned
long
flags
;
unsigned
long
flags
;
if
(
unlikely
(
!
lock_stat
&&
!
prove_locking
))
return
;
if
(
unlikely
(
current
->
lockdep_recursion
))
if
(
unlikely
(
current
->
lockdep_recursion
))
return
;
return
;
...
@@ -2758,7 +2943,7 @@ void lock_acquire(struct lockdep_map *lock, unsigned int subclass,
...
@@ -2758,7 +2943,7 @@ void lock_acquire(struct lockdep_map *lock, unsigned int subclass,
current
->
lockdep_recursion
=
1
;
current
->
lockdep_recursion
=
1
;
__lock_acquire
(
lock
,
subclass
,
trylock
,
read
,
check
,
__lock_acquire
(
lock
,
subclass
,
trylock
,
read
,
check
,
irqs_disabled_flags
(
flags
),
ip
);
irqs_disabled_flags
(
flags
),
nest_lock
,
ip
);
current
->
lockdep_recursion
=
0
;
current
->
lockdep_recursion
=
0
;
raw_local_irq_restore
(
flags
);
raw_local_irq_restore
(
flags
);
}
}
...
@@ -2770,9 +2955,6 @@ void lock_release(struct lockdep_map *lock, int nested,
...
@@ -2770,9 +2955,6 @@ void lock_release(struct lockdep_map *lock, int nested,
{
{
unsigned
long
flags
;
unsigned
long
flags
;
if
(
unlikely
(
!
lock_stat
&&
!
prove_locking
))
return
;
if
(
unlikely
(
current
->
lockdep_recursion
))
if
(
unlikely
(
current
->
lockdep_recursion
))
return
;
return
;
...
@@ -2845,9 +3027,9 @@ __lock_contended(struct lockdep_map *lock, unsigned long ip)
...
@@ -2845,9 +3027,9 @@ __lock_contended(struct lockdep_map *lock, unsigned long ip)
found_it:
found_it:
hlock
->
waittime_stamp
=
sched_clock
();
hlock
->
waittime_stamp
=
sched_clock
();
point
=
lock_contention_point
(
hlock
->
class
,
ip
);
point
=
lock_contention_point
(
hlock
_class
(
hlock
)
,
ip
);
stats
=
get_lock_stats
(
hlock
->
class
);
stats
=
get_lock_stats
(
hlock
_class
(
hlock
)
);
if
(
point
<
ARRAY_SIZE
(
stats
->
contention_point
))
if
(
point
<
ARRAY_SIZE
(
stats
->
contention_point
))
stats
->
contention_point
[
i
]
++
;
stats
->
contention_point
[
i
]
++
;
if
(
lock
->
cpu
!=
smp_processor_id
())
if
(
lock
->
cpu
!=
smp_processor_id
())
...
@@ -2893,7 +3075,7 @@ __lock_acquired(struct lockdep_map *lock)
...
@@ -2893,7 +3075,7 @@ __lock_acquired(struct lockdep_map *lock)
hlock
->
holdtime_stamp
=
now
;
hlock
->
holdtime_stamp
=
now
;
}
}
stats
=
get_lock_stats
(
hlock
->
class
);
stats
=
get_lock_stats
(
hlock
_class
(
hlock
)
);
if
(
waittime
)
{
if
(
waittime
)
{
if
(
hlock
->
read
)
if
(
hlock
->
read
)
lock_time_inc
(
&
stats
->
read_waittime
,
waittime
);
lock_time_inc
(
&
stats
->
read_waittime
,
waittime
);
...
@@ -2988,6 +3170,7 @@ static void zap_class(struct lock_class *class)
...
@@ -2988,6 +3170,7 @@ static void zap_class(struct lock_class *class)
list_del_rcu
(
&
class
->
hash_entry
);
list_del_rcu
(
&
class
->
hash_entry
);
list_del_rcu
(
&
class
->
lock_entry
);
list_del_rcu
(
&
class
->
lock_entry
);
class
->
key
=
NULL
;
}
}
static
inline
int
within
(
const
void
*
addr
,
void
*
start
,
unsigned
long
size
)
static
inline
int
within
(
const
void
*
addr
,
void
*
start
,
unsigned
long
size
)
...
...
kernel/lockdep_internals.h
View file @
23a0ee90
...
@@ -17,9 +17,6 @@
...
@@ -17,9 +17,6 @@
*/
*/
#define MAX_LOCKDEP_ENTRIES 8192UL
#define MAX_LOCKDEP_ENTRIES 8192UL
#define MAX_LOCKDEP_KEYS_BITS 11
#define MAX_LOCKDEP_KEYS (1UL << MAX_LOCKDEP_KEYS_BITS)
#define MAX_LOCKDEP_CHAINS_BITS 14
#define MAX_LOCKDEP_CHAINS_BITS 14
#define MAX_LOCKDEP_CHAINS (1UL << MAX_LOCKDEP_CHAINS_BITS)
#define MAX_LOCKDEP_CHAINS (1UL << MAX_LOCKDEP_CHAINS_BITS)
...
@@ -53,6 +50,9 @@ extern unsigned int nr_process_chains;
...
@@ -53,6 +50,9 @@ extern unsigned int nr_process_chains;
extern
unsigned
int
max_lockdep_depth
;
extern
unsigned
int
max_lockdep_depth
;
extern
unsigned
int
max_recursion_depth
;
extern
unsigned
int
max_recursion_depth
;
extern
unsigned
long
lockdep_count_forward_deps
(
struct
lock_class
*
);
extern
unsigned
long
lockdep_count_backward_deps
(
struct
lock_class
*
);
#ifdef CONFIG_DEBUG_LOCKDEP
#ifdef CONFIG_DEBUG_LOCKDEP
/*
/*
* Various lockdep statistics:
* Various lockdep statistics:
...
...
kernel/lockdep_proc.c
View file @
23a0ee90
...
@@ -63,34 +63,6 @@ static void l_stop(struct seq_file *m, void *v)
...
@@ -63,34 +63,6 @@ static void l_stop(struct seq_file *m, void *v)
{
{
}
}
static
unsigned
long
count_forward_deps
(
struct
lock_class
*
class
)
{
struct
lock_list
*
entry
;
unsigned
long
ret
=
1
;
/*
* Recurse this class's dependency list:
*/
list_for_each_entry
(
entry
,
&
class
->
locks_after
,
entry
)
ret
+=
count_forward_deps
(
entry
->
class
);
return
ret
;
}
static
unsigned
long
count_backward_deps
(
struct
lock_class
*
class
)
{
struct
lock_list
*
entry
;
unsigned
long
ret
=
1
;
/*
* Recurse this class's dependency list:
*/
list_for_each_entry
(
entry
,
&
class
->
locks_before
,
entry
)
ret
+=
count_backward_deps
(
entry
->
class
);
return
ret
;
}
static
void
print_name
(
struct
seq_file
*
m
,
struct
lock_class
*
class
)
static
void
print_name
(
struct
seq_file
*
m
,
struct
lock_class
*
class
)
{
{
char
str
[
128
];
char
str
[
128
];
...
@@ -124,10 +96,10 @@ static int l_show(struct seq_file *m, void *v)
...
@@ -124,10 +96,10 @@ static int l_show(struct seq_file *m, void *v)
#ifdef CONFIG_DEBUG_LOCKDEP
#ifdef CONFIG_DEBUG_LOCKDEP
seq_printf
(
m
,
" OPS:%8ld"
,
class
->
ops
);
seq_printf
(
m
,
" OPS:%8ld"
,
class
->
ops
);
#endif
#endif
nr_forward_deps
=
count_forward_deps
(
class
);
nr_forward_deps
=
lockdep_
count_forward_deps
(
class
);
seq_printf
(
m
,
" FD:%5ld"
,
nr_forward_deps
);
seq_printf
(
m
,
" FD:%5ld"
,
nr_forward_deps
);
nr_backward_deps
=
count_backward_deps
(
class
);
nr_backward_deps
=
lockdep_
count_backward_deps
(
class
);
seq_printf
(
m
,
" BD:%5ld"
,
nr_backward_deps
);
seq_printf
(
m
,
" BD:%5ld"
,
nr_backward_deps
);
get_usage_chars
(
class
,
&
c1
,
&
c2
,
&
c3
,
&
c4
);
get_usage_chars
(
class
,
&
c1
,
&
c2
,
&
c3
,
&
c4
);
...
@@ -229,6 +201,9 @@ static int lc_show(struct seq_file *m, void *v)
...
@@ -229,6 +201,9 @@ static int lc_show(struct seq_file *m, void *v)
for
(
i
=
0
;
i
<
chain
->
depth
;
i
++
)
{
for
(
i
=
0
;
i
<
chain
->
depth
;
i
++
)
{
class
=
lock_chain_get_class
(
chain
,
i
);
class
=
lock_chain_get_class
(
chain
,
i
);
if
(
!
class
->
key
)
continue
;
seq_printf
(
m
,
"[%p] "
,
class
->
key
);
seq_printf
(
m
,
"[%p] "
,
class
->
key
);
print_name
(
m
,
class
);
print_name
(
m
,
class
);
seq_puts
(
m
,
"
\n
"
);
seq_puts
(
m
,
"
\n
"
);
...
@@ -350,7 +325,7 @@ static int lockdep_stats_show(struct seq_file *m, void *v)
...
@@ -350,7 +325,7 @@ static int lockdep_stats_show(struct seq_file *m, void *v)
if
(
class
->
usage_mask
&
LOCKF_ENABLED_HARDIRQS_READ
)
if
(
class
->
usage_mask
&
LOCKF_ENABLED_HARDIRQS_READ
)
nr_hardirq_read_unsafe
++
;
nr_hardirq_read_unsafe
++
;
sum_forward_deps
+=
count_forward_deps
(
class
);
sum_forward_deps
+=
lockdep_
count_forward_deps
(
class
);
}
}
#ifdef CONFIG_DEBUG_LOCKDEP
#ifdef CONFIG_DEBUG_LOCKDEP
DEBUG_LOCKS_WARN_ON
(
debug_atomic_read
(
&
nr_unused_locks
)
!=
nr_unused
);
DEBUG_LOCKS_WARN_ON
(
debug_atomic_read
(
&
nr_unused_locks
)
!=
nr_unused
);
...
...
kernel/sched.c
View file @
23a0ee90
...
@@ -600,7 +600,6 @@ struct rq {
...
@@ -600,7 +600,6 @@ struct rq {
/* BKL stats */
/* BKL stats */
unsigned
int
bkl_count
;
unsigned
int
bkl_count
;
#endif
#endif
struct
lock_class_key
rq_lock_key
;
};
};
static
DEFINE_PER_CPU_SHARED_ALIGNED
(
struct
rq
,
runqueues
);
static
DEFINE_PER_CPU_SHARED_ALIGNED
(
struct
rq
,
runqueues
);
...
@@ -2759,10 +2758,10 @@ static void double_rq_lock(struct rq *rq1, struct rq *rq2)
...
@@ -2759,10 +2758,10 @@ static void double_rq_lock(struct rq *rq1, struct rq *rq2)
}
else
{
}
else
{
if
(
rq1
<
rq2
)
{
if
(
rq1
<
rq2
)
{
spin_lock
(
&
rq1
->
lock
);
spin_lock
(
&
rq1
->
lock
);
spin_lock
(
&
rq2
->
lock
);
spin_lock
_nested
(
&
rq2
->
lock
,
SINGLE_DEPTH_NESTING
);
}
else
{
}
else
{
spin_lock
(
&
rq2
->
lock
);
spin_lock
(
&
rq2
->
lock
);
spin_lock
(
&
rq1
->
lock
);
spin_lock
_nested
(
&
rq1
->
lock
,
SINGLE_DEPTH_NESTING
);
}
}
}
}
update_rq_clock
(
rq1
);
update_rq_clock
(
rq1
);
...
@@ -2805,14 +2804,21 @@ static int double_lock_balance(struct rq *this_rq, struct rq *busiest)
...
@@ -2805,14 +2804,21 @@ static int double_lock_balance(struct rq *this_rq, struct rq *busiest)
if
(
busiest
<
this_rq
)
{
if
(
busiest
<
this_rq
)
{
spin_unlock
(
&
this_rq
->
lock
);
spin_unlock
(
&
this_rq
->
lock
);
spin_lock
(
&
busiest
->
lock
);
spin_lock
(
&
busiest
->
lock
);
spin_lock
(
&
this_rq
->
lock
);
spin_lock
_nested
(
&
this_rq
->
lock
,
SINGLE_DEPTH_NESTING
);
ret
=
1
;
ret
=
1
;
}
else
}
else
spin_lock
(
&
busiest
->
lock
);
spin_lock
_nested
(
&
busiest
->
lock
,
SINGLE_DEPTH_NESTING
);
}
}
return
ret
;
return
ret
;
}
}
static
void
double_unlock_balance
(
struct
rq
*
this_rq
,
struct
rq
*
busiest
)
__releases
(
busiest
->
lock
)
{
spin_unlock
(
&
busiest
->
lock
);
lock_set_subclass
(
&
this_rq
->
lock
.
dep_map
,
0
,
_RET_IP_
);
}
/*
/*
* If dest_cpu is allowed for this process, migrate the task to it.
* If dest_cpu is allowed for this process, migrate the task to it.
* This is accomplished by forcing the cpu_allowed mask to only
* This is accomplished by forcing the cpu_allowed mask to only
...
@@ -3637,7 +3643,7 @@ load_balance_newidle(int this_cpu, struct rq *this_rq, struct sched_domain *sd,
...
@@ -3637,7 +3643,7 @@ load_balance_newidle(int this_cpu, struct rq *this_rq, struct sched_domain *sd,
ld_moved
=
move_tasks
(
this_rq
,
this_cpu
,
busiest
,
ld_moved
=
move_tasks
(
this_rq
,
this_cpu
,
busiest
,
imbalance
,
sd
,
CPU_NEWLY_IDLE
,
imbalance
,
sd
,
CPU_NEWLY_IDLE
,
&
all_pinned
);
&
all_pinned
);
spin_unlock
(
&
busiest
->
lock
);
double_unlock_balance
(
this_rq
,
busiest
);
if
(
unlikely
(
all_pinned
))
{
if
(
unlikely
(
all_pinned
))
{
cpu_clear
(
cpu_of
(
busiest
),
*
cpus
);
cpu_clear
(
cpu_of
(
busiest
),
*
cpus
);
...
@@ -3752,7 +3758,7 @@ static void active_load_balance(struct rq *busiest_rq, int busiest_cpu)
...
@@ -3752,7 +3758,7 @@ static void active_load_balance(struct rq *busiest_rq, int busiest_cpu)
else
else
schedstat_inc
(
sd
,
alb_failed
);
schedstat_inc
(
sd
,
alb_failed
);
}
}
spin_unlock
(
&
target_rq
->
lock
);
double_unlock_balance
(
busiest_rq
,
target_rq
);
}
}
#ifdef CONFIG_NO_HZ
#ifdef CONFIG_NO_HZ
...
@@ -8000,7 +8006,6 @@ void __init sched_init(void)
...
@@ -8000,7 +8006,6 @@ void __init sched_init(void)
rq
=
cpu_rq
(
i
);
rq
=
cpu_rq
(
i
);
spin_lock_init
(
&
rq
->
lock
);
spin_lock_init
(
&
rq
->
lock
);
lockdep_set_class
(
&
rq
->
lock
,
&
rq
->
rq_lock_key
);
rq
->
nr_running
=
0
;
rq
->
nr_running
=
0
;
init_cfs_rq
(
&
rq
->
cfs
,
rq
);
init_cfs_rq
(
&
rq
->
cfs
,
rq
);
init_rt_rq
(
&
rq
->
rt
,
rq
);
init_rt_rq
(
&
rq
->
rt
,
rq
);
...
...
kernel/sched_rt.c
View file @
23a0ee90
...
@@ -861,6 +861,8 @@ static void put_prev_task_rt(struct rq *rq, struct task_struct *p)
...
@@ -861,6 +861,8 @@ static void put_prev_task_rt(struct rq *rq, struct task_struct *p)
#define RT_MAX_TRIES 3
#define RT_MAX_TRIES 3
static
int
double_lock_balance
(
struct
rq
*
this_rq
,
struct
rq
*
busiest
);
static
int
double_lock_balance
(
struct
rq
*
this_rq
,
struct
rq
*
busiest
);
static
void
double_unlock_balance
(
struct
rq
*
this_rq
,
struct
rq
*
busiest
);
static
void
deactivate_task
(
struct
rq
*
rq
,
struct
task_struct
*
p
,
int
sleep
);
static
void
deactivate_task
(
struct
rq
*
rq
,
struct
task_struct
*
p
,
int
sleep
);
static
int
pick_rt_task
(
struct
rq
*
rq
,
struct
task_struct
*
p
,
int
cpu
)
static
int
pick_rt_task
(
struct
rq
*
rq
,
struct
task_struct
*
p
,
int
cpu
)
...
@@ -1022,7 +1024,7 @@ static struct rq *find_lock_lowest_rq(struct task_struct *task, struct rq *rq)
...
@@ -1022,7 +1024,7 @@ static struct rq *find_lock_lowest_rq(struct task_struct *task, struct rq *rq)
break
;
break
;
/* try again */
/* try again */
spin_unlock
(
&
lowest_rq
->
lock
);
double_unlock_balance
(
rq
,
lowest_rq
);
lowest_rq
=
NULL
;
lowest_rq
=
NULL
;
}
}
...
@@ -1091,7 +1093,7 @@ static int push_rt_task(struct rq *rq)
...
@@ -1091,7 +1093,7 @@ static int push_rt_task(struct rq *rq)
resched_task
(
lowest_rq
->
curr
);
resched_task
(
lowest_rq
->
curr
);
spin_unlock
(
&
lowest_rq
->
lock
);
double_unlock_balance
(
rq
,
lowest_rq
);
ret
=
1
;
ret
=
1
;
out:
out:
...
@@ -1197,7 +1199,7 @@ static int pull_rt_task(struct rq *this_rq)
...
@@ -1197,7 +1199,7 @@ static int pull_rt_task(struct rq *this_rq)
}
}
skip:
skip:
spin_unlock
(
&
src_rq
->
lock
);
double_unlock_balance
(
this_rq
,
src_rq
);
}
}
return
ret
;
return
ret
;
...
...
kernel/spinlock.c
View file @
23a0ee90
...
@@ -292,6 +292,7 @@ void __lockfunc _spin_lock_nested(spinlock_t *lock, int subclass)
...
@@ -292,6 +292,7 @@ void __lockfunc _spin_lock_nested(spinlock_t *lock, int subclass)
}
}
EXPORT_SYMBOL
(
_spin_lock_nested
);
EXPORT_SYMBOL
(
_spin_lock_nested
);
unsigned
long
__lockfunc
_spin_lock_irqsave_nested
(
spinlock_t
*
lock
,
int
subclass
)
unsigned
long
__lockfunc
_spin_lock_irqsave_nested
(
spinlock_t
*
lock
,
int
subclass
)
{
{
unsigned
long
flags
;
unsigned
long
flags
;
...
@@ -314,6 +315,16 @@ unsigned long __lockfunc _spin_lock_irqsave_nested(spinlock_t *lock, int subclas
...
@@ -314,6 +315,16 @@ unsigned long __lockfunc _spin_lock_irqsave_nested(spinlock_t *lock, int subclas
EXPORT_SYMBOL
(
_spin_lock_irqsave_nested
);
EXPORT_SYMBOL
(
_spin_lock_irqsave_nested
);
void
__lockfunc
_spin_lock_nest_lock
(
spinlock_t
*
lock
,
struct
lockdep_map
*
nest_lock
)
{
preempt_disable
();
spin_acquire_nest
(
&
lock
->
dep_map
,
0
,
0
,
nest_lock
,
_RET_IP_
);
LOCK_CONTENDED
(
lock
,
_raw_spin_trylock
,
_raw_spin_lock
);
}
EXPORT_SYMBOL
(
_spin_lock_nest_lock
);
#endif
#endif
void
__lockfunc
_spin_unlock
(
spinlock_t
*
lock
)
void
__lockfunc
_spin_unlock
(
spinlock_t
*
lock
)
...
...
kernel/workqueue.c
View file @
23a0ee90
...
@@ -290,11 +290,11 @@ static void run_workqueue(struct cpu_workqueue_struct *cwq)
...
@@ -290,11 +290,11 @@ static void run_workqueue(struct cpu_workqueue_struct *cwq)
BUG_ON
(
get_wq_data
(
work
)
!=
cwq
);
BUG_ON
(
get_wq_data
(
work
)
!=
cwq
);
work_clear_pending
(
work
);
work_clear_pending
(
work
);
lock_
acquire
(
&
cwq
->
wq
->
lockdep_map
,
0
,
0
,
0
,
2
,
_THIS_IP_
);
lock_
map_acquire
(
&
cwq
->
wq
->
lockdep_map
);
lock_
acquire
(
&
lockdep_map
,
0
,
0
,
0
,
2
,
_THIS_IP_
);
lock_
map_acquire
(
&
lockdep_map
);
f
(
work
);
f
(
work
);
lock_
release
(
&
lockdep_map
,
1
,
_THIS_IP_
);
lock_
map_release
(
&
lockdep_map
);
lock_
release
(
&
cwq
->
wq
->
lockdep_map
,
1
,
_THIS_IP_
);
lock_
map_release
(
&
cwq
->
wq
->
lockdep_map
);
if
(
unlikely
(
in_atomic
()
||
lockdep_depth
(
current
)
>
0
))
{
if
(
unlikely
(
in_atomic
()
||
lockdep_depth
(
current
)
>
0
))
{
printk
(
KERN_ERR
"BUG: workqueue leaked lock or atomic: "
printk
(
KERN_ERR
"BUG: workqueue leaked lock or atomic: "
...
@@ -413,8 +413,8 @@ void flush_workqueue(struct workqueue_struct *wq)
...
@@ -413,8 +413,8 @@ void flush_workqueue(struct workqueue_struct *wq)
int
cpu
;
int
cpu
;
might_sleep
();
might_sleep
();
lock_
acquire
(
&
wq
->
lockdep_map
,
0
,
0
,
0
,
2
,
_THIS_IP_
);
lock_
map_acquire
(
&
wq
->
lockdep_map
);
lock_
release
(
&
wq
->
lockdep_map
,
1
,
_THIS_IP_
);
lock_
map_release
(
&
wq
->
lockdep_map
);
for_each_cpu_mask_nr
(
cpu
,
*
cpu_map
)
for_each_cpu_mask_nr
(
cpu
,
*
cpu_map
)
flush_cpu_workqueue
(
per_cpu_ptr
(
wq
->
cpu_wq
,
cpu
));
flush_cpu_workqueue
(
per_cpu_ptr
(
wq
->
cpu_wq
,
cpu
));
}
}
...
@@ -441,8 +441,8 @@ int flush_work(struct work_struct *work)
...
@@ -441,8 +441,8 @@ int flush_work(struct work_struct *work)
if
(
!
cwq
)
if
(
!
cwq
)
return
0
;
return
0
;
lock_
acquire
(
&
cwq
->
wq
->
lockdep_map
,
0
,
0
,
0
,
2
,
_THIS_IP_
);
lock_
map_acquire
(
&
cwq
->
wq
->
lockdep_map
);
lock_
release
(
&
cwq
->
wq
->
lockdep_map
,
1
,
_THIS_IP_
);
lock_
map_release
(
&
cwq
->
wq
->
lockdep_map
);
prev
=
NULL
;
prev
=
NULL
;
spin_lock_irq
(
&
cwq
->
lock
);
spin_lock_irq
(
&
cwq
->
lock
);
...
@@ -536,8 +536,8 @@ static void wait_on_work(struct work_struct *work)
...
@@ -536,8 +536,8 @@ static void wait_on_work(struct work_struct *work)
might_sleep
();
might_sleep
();
lock_
acquire
(
&
work
->
lockdep_map
,
0
,
0
,
0
,
2
,
_THIS_IP_
);
lock_
map_acquire
(
&
work
->
lockdep_map
);
lock_
release
(
&
work
->
lockdep_map
,
1
,
_THIS_IP_
);
lock_
map_release
(
&
work
->
lockdep_map
);
cwq
=
get_wq_data
(
work
);
cwq
=
get_wq_data
(
work
);
if
(
!
cwq
)
if
(
!
cwq
)
...
@@ -872,8 +872,8 @@ static void cleanup_workqueue_thread(struct cpu_workqueue_struct *cwq)
...
@@ -872,8 +872,8 @@ static void cleanup_workqueue_thread(struct cpu_workqueue_struct *cwq)
if
(
cwq
->
thread
==
NULL
)
if
(
cwq
->
thread
==
NULL
)
return
;
return
;
lock_
acquire
(
&
cwq
->
wq
->
lockdep_map
,
0
,
0
,
0
,
2
,
_THIS_IP_
);
lock_
map_acquire
(
&
cwq
->
wq
->
lockdep_map
);
lock_
release
(
&
cwq
->
wq
->
lockdep_map
,
1
,
_THIS_IP_
);
lock_
map_release
(
&
cwq
->
wq
->
lockdep_map
);
flush_cpu_workqueue
(
cwq
);
flush_cpu_workqueue
(
cwq
);
/*
/*
...
...
lib/debug_locks.c
View file @
23a0ee90
...
@@ -8,6 +8,7 @@
...
@@ -8,6 +8,7 @@
*
*
* Copyright (C) 2006 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
* Copyright (C) 2006 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
*/
*/
#include <linux/kernel.h>
#include <linux/rwsem.h>
#include <linux/rwsem.h>
#include <linux/mutex.h>
#include <linux/mutex.h>
#include <linux/module.h>
#include <linux/module.h>
...
@@ -37,6 +38,7 @@ int debug_locks_off(void)
...
@@ -37,6 +38,7 @@ int debug_locks_off(void)
{
{
if
(
xchg
(
&
debug_locks
,
0
))
{
if
(
xchg
(
&
debug_locks
,
0
))
{
if
(
!
debug_locks_silent
)
{
if
(
!
debug_locks_silent
)
{
oops_in_progress
=
1
;
console_verbose
();
console_verbose
();
return
1
;
return
1
;
}
}
...
...
mm/mmap.c
View file @
23a0ee90
...
@@ -2273,14 +2273,14 @@ int install_special_mapping(struct mm_struct *mm,
...
@@ -2273,14 +2273,14 @@ int install_special_mapping(struct mm_struct *mm,
static
DEFINE_MUTEX
(
mm_all_locks_mutex
);
static
DEFINE_MUTEX
(
mm_all_locks_mutex
);
static
void
vm_lock_anon_vma
(
struct
anon_vma
*
anon_vma
)
static
void
vm_lock_anon_vma
(
struct
mm_struct
*
mm
,
struct
anon_vma
*
anon_vma
)
{
{
if
(
!
test_bit
(
0
,
(
unsigned
long
*
)
&
anon_vma
->
head
.
next
))
{
if
(
!
test_bit
(
0
,
(
unsigned
long
*
)
&
anon_vma
->
head
.
next
))
{
/*
/*
* The LSB of head.next can't change from under us
* The LSB of head.next can't change from under us
* because we hold the mm_all_locks_mutex.
* because we hold the mm_all_locks_mutex.
*/
*/
spin_lock
(
&
anon_vma
->
lock
);
spin_lock
_nest_lock
(
&
anon_vma
->
lock
,
&
mm
->
mmap_sem
);
/*
/*
* We can safely modify head.next after taking the
* We can safely modify head.next after taking the
* anon_vma->lock. If some other vma in this mm shares
* anon_vma->lock. If some other vma in this mm shares
...
@@ -2296,7 +2296,7 @@ static void vm_lock_anon_vma(struct anon_vma *anon_vma)
...
@@ -2296,7 +2296,7 @@ static void vm_lock_anon_vma(struct anon_vma *anon_vma)
}
}
}
}
static
void
vm_lock_mapping
(
struct
address_space
*
mapping
)
static
void
vm_lock_mapping
(
struct
mm_struct
*
mm
,
struct
address_space
*
mapping
)
{
{
if
(
!
test_bit
(
AS_MM_ALL_LOCKS
,
&
mapping
->
flags
))
{
if
(
!
test_bit
(
AS_MM_ALL_LOCKS
,
&
mapping
->
flags
))
{
/*
/*
...
@@ -2310,7 +2310,7 @@ static void vm_lock_mapping(struct address_space *mapping)
...
@@ -2310,7 +2310,7 @@ static void vm_lock_mapping(struct address_space *mapping)
*/
*/
if
(
test_and_set_bit
(
AS_MM_ALL_LOCKS
,
&
mapping
->
flags
))
if
(
test_and_set_bit
(
AS_MM_ALL_LOCKS
,
&
mapping
->
flags
))
BUG
();
BUG
();
spin_lock
(
&
mapping
->
i_mmap_lock
);
spin_lock
_nest_lock
(
&
mapping
->
i_mmap_lock
,
&
mm
->
mmap_sem
);
}
}
}
}
...
@@ -2358,11 +2358,17 @@ int mm_take_all_locks(struct mm_struct *mm)
...
@@ -2358,11 +2358,17 @@ int mm_take_all_locks(struct mm_struct *mm)
for
(
vma
=
mm
->
mmap
;
vma
;
vma
=
vma
->
vm_next
)
{
for
(
vma
=
mm
->
mmap
;
vma
;
vma
=
vma
->
vm_next
)
{
if
(
signal_pending
(
current
))
if
(
signal_pending
(
current
))
goto
out_unlock
;
goto
out_unlock
;
if
(
vma
->
anon_vma
)
vm_lock_anon_vma
(
vma
->
anon_vma
);
if
(
vma
->
vm_file
&&
vma
->
vm_file
->
f_mapping
)
if
(
vma
->
vm_file
&&
vma
->
vm_file
->
f_mapping
)
vm_lock_mapping
(
vma
->
vm_file
->
f_mapping
);
vm_lock_mapping
(
mm
,
vma
->
vm_file
->
f_mapping
);
}
for
(
vma
=
mm
->
mmap
;
vma
;
vma
=
vma
->
vm_next
)
{
if
(
signal_pending
(
current
))
goto
out_unlock
;
if
(
vma
->
anon_vma
)
vm_lock_anon_vma
(
mm
,
vma
->
anon_vma
);
}
}
ret
=
0
;
ret
=
0
;
out_unlock:
out_unlock:
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment