Commit e3b22bc3 authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'locking-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip

Pull locking fix from Thomas Gleixner:
 "A single fix to address a race in the static key logic"

* 'locking-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
  locking/static_key: Fix concurrent static_key_slow_inc()
parents 2de23071 4c5ea0a9
...@@ -117,13 +117,18 @@ struct module; ...@@ -117,13 +117,18 @@ struct module;
#include <linux/atomic.h> #include <linux/atomic.h>
#ifdef HAVE_JUMP_LABEL
static inline int static_key_count(struct static_key *key) static inline int static_key_count(struct static_key *key)
{ {
return atomic_read(&key->enabled); /*
* -1 means the first static_key_slow_inc() is in progress.
* static_key_enabled() must return true, so return 1 here.
*/
int n = atomic_read(&key->enabled);
return n >= 0 ? n : 1;
} }
#ifdef HAVE_JUMP_LABEL
#define JUMP_TYPE_FALSE 0UL #define JUMP_TYPE_FALSE 0UL
#define JUMP_TYPE_TRUE 1UL #define JUMP_TYPE_TRUE 1UL
#define JUMP_TYPE_MASK 1UL #define JUMP_TYPE_MASK 1UL
...@@ -162,6 +167,11 @@ extern void jump_label_apply_nops(struct module *mod); ...@@ -162,6 +167,11 @@ extern void jump_label_apply_nops(struct module *mod);
#else /* !HAVE_JUMP_LABEL */ #else /* !HAVE_JUMP_LABEL */
static inline int static_key_count(struct static_key *key)
{
return atomic_read(&key->enabled);
}
static __always_inline void jump_label_init(void) static __always_inline void jump_label_init(void)
{ {
static_key_initialized = true; static_key_initialized = true;
......
...@@ -58,13 +58,36 @@ static void jump_label_update(struct static_key *key); ...@@ -58,13 +58,36 @@ static void jump_label_update(struct static_key *key);
void static_key_slow_inc(struct static_key *key) void static_key_slow_inc(struct static_key *key)
{ {
int v, v1;
STATIC_KEY_CHECK_USE(); STATIC_KEY_CHECK_USE();
if (atomic_inc_not_zero(&key->enabled))
/*
* Careful if we get concurrent static_key_slow_inc() calls;
* later calls must wait for the first one to _finish_ the
* jump_label_update() process. At the same time, however,
* the jump_label_update() call below wants to see
* static_key_enabled(&key) for jumps to be updated properly.
*
* So give a special meaning to negative key->enabled: it sends
* static_key_slow_inc() down the slow path, and it is non-zero
* so it counts as "enabled" in jump_label_update(). Note that
* atomic_inc_unless_negative() checks >= 0, so roll our own.
*/
for (v = atomic_read(&key->enabled); v > 0; v = v1) {
v1 = atomic_cmpxchg(&key->enabled, v, v + 1);
if (likely(v1 == v))
return; return;
}
jump_label_lock(); jump_label_lock();
if (atomic_inc_return(&key->enabled) == 1) if (atomic_read(&key->enabled) == 0) {
atomic_set(&key->enabled, -1);
jump_label_update(key); jump_label_update(key);
atomic_set(&key->enabled, 1);
} else {
atomic_inc(&key->enabled);
}
jump_label_unlock(); jump_label_unlock();
} }
EXPORT_SYMBOL_GPL(static_key_slow_inc); EXPORT_SYMBOL_GPL(static_key_slow_inc);
...@@ -72,6 +95,13 @@ EXPORT_SYMBOL_GPL(static_key_slow_inc); ...@@ -72,6 +95,13 @@ EXPORT_SYMBOL_GPL(static_key_slow_inc);
static void __static_key_slow_dec(struct static_key *key, static void __static_key_slow_dec(struct static_key *key,
unsigned long rate_limit, struct delayed_work *work) unsigned long rate_limit, struct delayed_work *work)
{ {
/*
* The negative count check is valid even when a negative
* key->enabled is in use by static_key_slow_inc(); a
* __static_key_slow_dec() before the first static_key_slow_inc()
* returns is unbalanced, because all other static_key_slow_inc()
* instances block while the update is in progress.
*/
if (!atomic_dec_and_mutex_lock(&key->enabled, &jump_label_mutex)) { if (!atomic_dec_and_mutex_lock(&key->enabled, &jump_label_mutex)) {
WARN(atomic_read(&key->enabled) < 0, WARN(atomic_read(&key->enabled) < 0,
"jump label: negative count!\n"); "jump label: negative count!\n");
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment