Commit 83ab38ef authored by Thomas Gleixner's avatar Thomas Gleixner Committed by Peter Zijlstra

jump_label: Fix concurrency issues in static_key_slow_dec()

The commit which tried to fix the concurrency issues of concurrent
static_key_slow_inc() failed to fix the equivalent issues
vs. static_key_slow_dec():

CPU0                     CPU1

static_key_slow_dec()
  static_key_slow_try_dec()

	key->enabled == 1
	val = atomic_fetch_add_unless(&key->enabled, -1, 1);
	if (val == 1)
	     return false;

  jump_label_lock();
  if (atomic_dec_and_test(&key->enabled)) {
     --> key->enabled == 0
   __jump_label_update()

			 static_key_slow_dec()
			   static_key_slow_try_dec()

			     key->enabled == 0
			     val = atomic_fetch_add_unless(&key->enabled, -1, 1);

			      --> key->enabled == -1 <- FAIL

There is another bug in that code, when there is a concurrent
static_key_slow_inc() which enables the key as that sets key->enabled to -1
so on the other CPU

	val = atomic_fetch_add_unless(&key->enabled, -1, 1);

will succeed and decrement to -2, which is invalid.

Cure all of this by replacing the atomic_fetch_add_unless() with a
atomic_try_cmpxchg() loop similar to static_key_fast_inc_not_disabled().

[peterz: add WARN_ON_ONCE for the -1 race]
Fixes: 4c5ea0a9 ("locking/static_key: Fix concurrent static_key_slow_inc()")
Reported-by: default avatarYue Sun <samsun1006219@gmail.com>
Reported-by: default avatarXingwei Lee <xrivendell7@gmail.com>
Signed-off-by: default avatarThomas Gleixner <tglx@linutronix.de>
Signed-off-by: default avatarPeter Zijlstra (Intel) <peterz@infradead.org>
Link: https://lkml.kernel.org/r/20240610124406.422897838@linutronix.de
parent bb9bb45f
...@@ -131,7 +131,7 @@ bool static_key_fast_inc_not_disabled(struct static_key *key) ...@@ -131,7 +131,7 @@ bool static_key_fast_inc_not_disabled(struct static_key *key)
STATIC_KEY_CHECK_USE(key); STATIC_KEY_CHECK_USE(key);
/* /*
* Negative key->enabled has a special meaning: it sends * Negative key->enabled has a special meaning: it sends
* static_key_slow_inc() down the slow path, and it is non-zero * static_key_slow_inc/dec() down the slow path, and it is non-zero
* so it counts as "enabled" in jump_label_update(). Note that * so it counts as "enabled" in jump_label_update(). Note that
* atomic_inc_unless_negative() checks >= 0, so roll our own. * atomic_inc_unless_negative() checks >= 0, so roll our own.
*/ */
...@@ -150,7 +150,7 @@ bool static_key_slow_inc_cpuslocked(struct static_key *key) ...@@ -150,7 +150,7 @@ bool static_key_slow_inc_cpuslocked(struct static_key *key)
lockdep_assert_cpus_held(); lockdep_assert_cpus_held();
/* /*
* Careful if we get concurrent static_key_slow_inc() calls; * Careful if we get concurrent static_key_slow_inc/dec() calls;
* later calls must wait for the first one to _finish_ the * later calls must wait for the first one to _finish_ the
* jump_label_update() process. At the same time, however, * jump_label_update() process. At the same time, however,
* the jump_label_update() call below wants to see * the jump_label_update() call below wants to see
...@@ -247,20 +247,32 @@ EXPORT_SYMBOL_GPL(static_key_disable); ...@@ -247,20 +247,32 @@ EXPORT_SYMBOL_GPL(static_key_disable);
static bool static_key_slow_try_dec(struct static_key *key) static bool static_key_slow_try_dec(struct static_key *key)
{ {
int val; int v;
val = atomic_fetch_add_unless(&key->enabled, -1, 1);
if (val == 1)
return false;
/* /*
* The negative count check is valid even when a negative * Go into the slow path if key::enabled is less than or equal than
* key->enabled is in use by static_key_slow_inc(); a * one. One is valid to shut down the key, anything less than one
* __static_key_slow_dec() before the first static_key_slow_inc() * is an imbalance, which is handled at the call site.
* returns is unbalanced, because all other static_key_slow_inc() *
* instances block while the update is in progress. * That includes the special case of '-1' which is set in
* static_key_slow_inc_cpuslocked(), but that's harmless as it is
* fully serialized in the slow path below. By the time this task
* acquires the jump label lock the value is back to one and the
* retry under the lock must succeed.
*/
v = atomic_read(&key->enabled);
do {
/*
* Warn about the '-1' case though; since that means a
* decrement is concurrent with a first (0->1) increment. IOW
* people are trying to disable something that wasn't yet fully
* enabled. This suggests an ordering problem on the user side.
*/ */
WARN(val < 0, "jump label: negative count!\n"); WARN_ON_ONCE(v < 0);
if (v <= 1)
return false;
} while (!likely(atomic_try_cmpxchg(&key->enabled, &v, v - 1)));
return true; return true;
} }
...@@ -271,10 +283,11 @@ static void __static_key_slow_dec_cpuslocked(struct static_key *key) ...@@ -271,10 +283,11 @@ static void __static_key_slow_dec_cpuslocked(struct static_key *key)
if (static_key_slow_try_dec(key)) if (static_key_slow_try_dec(key))
return; return;
jump_label_lock(); guard(mutex)(&jump_label_mutex);
if (atomic_dec_and_test(&key->enabled)) if (atomic_cmpxchg(&key->enabled, 1, 0))
jump_label_update(key); jump_label_update(key);
jump_label_unlock(); else
WARN_ON_ONCE(!static_key_slow_try_dec(key));
} }
static void __static_key_slow_dec(struct static_key *key) static void __static_key_slow_dec(struct static_key *key)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment