Commit bff7c57c authored by Peter Zijlstra's avatar Peter Zijlstra

futex: Simplify double_lock_hb()

We need to make sure that all requeue operations take the hash bucket
locks in the same order to avoid deadlock. Simplify the current
double_lock_hb implementation by making sure hb1 is always the
"smallest" bucket to avoid extra checks.

[André: Add commit description]
Signed-off-by: default avatarPeter Zijlstra (Intel) <peterz@infradead.org>
Signed-off-by: default avatarAndré Almeida <andrealmeid@collabora.com>
Signed-off-by: default avatarPeter Zijlstra (Intel) <peterz@infradead.org>
Reviewed-by: default avatarAndré Almeida <andrealmeid@collabora.com>
Link: https://lore.kernel.org/r/20210923171111.300673-16-andrealmeid@collabora.com
parent a046f1a0
...@@ -239,14 +239,12 @@ extern int fixup_pi_owner(u32 __user *uaddr, struct futex_q *q, int locked); ...@@ -239,14 +239,12 @@ extern int fixup_pi_owner(u32 __user *uaddr, struct futex_q *q, int locked);
static inline void static inline void
double_lock_hb(struct futex_hash_bucket *hb1, struct futex_hash_bucket *hb2) double_lock_hb(struct futex_hash_bucket *hb1, struct futex_hash_bucket *hb2)
{ {
if (hb1 <= hb2) { if (hb1 > hb2)
spin_lock(&hb1->lock); swap(hb1, hb2);
if (hb1 < hb2)
spin_lock_nested(&hb2->lock, SINGLE_DEPTH_NESTING); spin_lock(&hb1->lock);
} else { /* hb1 > hb2 */ if (hb1 != hb2)
spin_lock(&hb2->lock); spin_lock_nested(&hb2->lock, SINGLE_DEPTH_NESTING);
spin_lock_nested(&hb1->lock, SINGLE_DEPTH_NESTING);
}
} }
static inline void static inline void
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment