Commit 151647ab authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'locking-core-2024-07-15' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip

Pull locking updates from Ingo Molnar:

 - Jump label fixes, including a perf events fix that originally
   manifested as jump label failures, but was a serialization bug at the
   usage site

 - Mark down_write*() helpers as __always_inline, to improve WCHAN
   debuggability

 - Misc cleanups and fixes

* tag 'locking-core-2024-07-15' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
  locking/rwsem: Add __always_inline annotation to __down_write_common() and inlined callers
  jump_label: Simplify and clarify static_key_fast_inc_cpus_locked()
  jump_label: Clarify condition in static_key_fast_inc_not_disabled()
  jump_label: Fix concurrency issues in static_key_slow_dec()
  perf/x86: Serialize set_attr_rdpmc()
  cleanup: Standardize the header guard define's name
parents 923a327e e81859fe
...@@ -2547,6 +2547,7 @@ static ssize_t set_attr_rdpmc(struct device *cdev, ...@@ -2547,6 +2547,7 @@ static ssize_t set_attr_rdpmc(struct device *cdev,
struct device_attribute *attr, struct device_attribute *attr,
const char *buf, size_t count) const char *buf, size_t count)
{ {
static DEFINE_MUTEX(rdpmc_mutex);
unsigned long val; unsigned long val;
ssize_t ret; ssize_t ret;
...@@ -2560,6 +2561,8 @@ static ssize_t set_attr_rdpmc(struct device *cdev, ...@@ -2560,6 +2561,8 @@ static ssize_t set_attr_rdpmc(struct device *cdev,
if (x86_pmu.attr_rdpmc_broken) if (x86_pmu.attr_rdpmc_broken)
return -ENOTSUPP; return -ENOTSUPP;
guard(mutex)(&rdpmc_mutex);
if (val != x86_pmu.attr_rdpmc) { if (val != x86_pmu.attr_rdpmc) {
/* /*
* Changing into or out of never available or always available, * Changing into or out of never available or always available,
......
/* SPDX-License-Identifier: GPL-2.0 */ /* SPDX-License-Identifier: GPL-2.0 */
#ifndef __LINUX_GUARDS_H #ifndef _LINUX_CLEANUP_H
#define __LINUX_GUARDS_H #define _LINUX_CLEANUP_H
#include <linux/compiler.h> #include <linux/compiler.h>
...@@ -250,4 +250,4 @@ __DEFINE_LOCK_GUARD_0(_name, _lock) ...@@ -250,4 +250,4 @@ __DEFINE_LOCK_GUARD_0(_name, _lock)
{ return class_##_name##_lock_ptr(_T); } { return class_##_name##_lock_ptr(_T); }
#endif /* __LINUX_GUARDS_H */ #endif /* _LINUX_CLEANUP_H */
...@@ -131,13 +131,16 @@ bool static_key_fast_inc_not_disabled(struct static_key *key) ...@@ -131,13 +131,16 @@ bool static_key_fast_inc_not_disabled(struct static_key *key)
STATIC_KEY_CHECK_USE(key); STATIC_KEY_CHECK_USE(key);
/* /*
* Negative key->enabled has a special meaning: it sends * Negative key->enabled has a special meaning: it sends
* static_key_slow_inc() down the slow path, and it is non-zero * static_key_slow_inc/dec() down the slow path, and it is non-zero
* so it counts as "enabled" in jump_label_update(). Note that * so it counts as "enabled" in jump_label_update().
* atomic_inc_unless_negative() checks >= 0, so roll our own. *
* The INT_MAX overflow condition is either used by the networking
* code to reset or detected in the slow path of
* static_key_slow_inc_cpuslocked().
*/ */
v = atomic_read(&key->enabled); v = atomic_read(&key->enabled);
do { do {
if (v <= 0 || (v + 1) < 0) if (v <= 0 || v == INT_MAX)
return false; return false;
} while (!likely(atomic_try_cmpxchg(&key->enabled, &v, v + 1))); } while (!likely(atomic_try_cmpxchg(&key->enabled, &v, v + 1)));
...@@ -150,7 +153,7 @@ bool static_key_slow_inc_cpuslocked(struct static_key *key) ...@@ -150,7 +153,7 @@ bool static_key_slow_inc_cpuslocked(struct static_key *key)
lockdep_assert_cpus_held(); lockdep_assert_cpus_held();
/* /*
* Careful if we get concurrent static_key_slow_inc() calls; * Careful if we get concurrent static_key_slow_inc/dec() calls;
* later calls must wait for the first one to _finish_ the * later calls must wait for the first one to _finish_ the
* jump_label_update() process. At the same time, however, * jump_label_update() process. At the same time, however,
* the jump_label_update() call below wants to see * the jump_label_update() call below wants to see
...@@ -159,22 +162,24 @@ bool static_key_slow_inc_cpuslocked(struct static_key *key) ...@@ -159,22 +162,24 @@ bool static_key_slow_inc_cpuslocked(struct static_key *key)
if (static_key_fast_inc_not_disabled(key)) if (static_key_fast_inc_not_disabled(key))
return true; return true;
jump_label_lock(); guard(mutex)(&jump_label_mutex);
if (atomic_read(&key->enabled) == 0) { /* Try to mark it as 'enabling in progress. */
atomic_set(&key->enabled, -1); if (!atomic_cmpxchg(&key->enabled, 0, -1)) {
jump_label_update(key); jump_label_update(key);
/* /*
* Ensure that if the above cmpxchg loop observes our positive * Ensure that when static_key_fast_inc_not_disabled() or
* value, it must also observe all the text changes. * static_key_slow_try_dec() observe the positive value,
* they must also observe all the text changes.
*/ */
atomic_set_release(&key->enabled, 1); atomic_set_release(&key->enabled, 1);
} else { } else {
if (WARN_ON_ONCE(!static_key_fast_inc_not_disabled(key))) { /*
jump_label_unlock(); * While holding the mutex this should never observe
* anything else than a value >= 1 and succeed
*/
if (WARN_ON_ONCE(!static_key_fast_inc_not_disabled(key)))
return false; return false;
}
} }
jump_label_unlock();
return true; return true;
} }
...@@ -247,20 +252,32 @@ EXPORT_SYMBOL_GPL(static_key_disable); ...@@ -247,20 +252,32 @@ EXPORT_SYMBOL_GPL(static_key_disable);
static bool static_key_slow_try_dec(struct static_key *key) static bool static_key_slow_try_dec(struct static_key *key)
{ {
int val; int v;
val = atomic_fetch_add_unless(&key->enabled, -1, 1);
if (val == 1)
return false;
/* /*
* The negative count check is valid even when a negative * Go into the slow path if key::enabled is less than or equal than
* key->enabled is in use by static_key_slow_inc(); a * one. One is valid to shut down the key, anything less than one
* __static_key_slow_dec() before the first static_key_slow_inc() * is an imbalance, which is handled at the call site.
* returns is unbalanced, because all other static_key_slow_inc() *
* instances block while the update is in progress. * That includes the special case of '-1' which is set in
* static_key_slow_inc_cpuslocked(), but that's harmless as it is
* fully serialized in the slow path below. By the time this task
* acquires the jump label lock the value is back to one and the
* retry under the lock must succeed.
*/ */
WARN(val < 0, "jump label: negative count!\n"); v = atomic_read(&key->enabled);
do {
/*
* Warn about the '-1' case though; since that means a
* decrement is concurrent with a first (0->1) increment. IOW
* people are trying to disable something that wasn't yet fully
* enabled. This suggests an ordering problem on the user side.
*/
WARN_ON_ONCE(v < 0);
if (v <= 1)
return false;
} while (!likely(atomic_try_cmpxchg(&key->enabled, &v, v - 1)));
return true; return true;
} }
...@@ -271,10 +288,11 @@ static void __static_key_slow_dec_cpuslocked(struct static_key *key) ...@@ -271,10 +288,11 @@ static void __static_key_slow_dec_cpuslocked(struct static_key *key)
if (static_key_slow_try_dec(key)) if (static_key_slow_try_dec(key))
return; return;
jump_label_lock(); guard(mutex)(&jump_label_mutex);
if (atomic_dec_and_test(&key->enabled)) if (atomic_cmpxchg(&key->enabled, 1, 0))
jump_label_update(key); jump_label_update(key);
jump_label_unlock(); else
WARN_ON_ONCE(!static_key_slow_try_dec(key));
} }
static void __static_key_slow_dec(struct static_key *key) static void __static_key_slow_dec(struct static_key *key)
......
...@@ -1297,7 +1297,7 @@ static inline int __down_read_trylock(struct rw_semaphore *sem) ...@@ -1297,7 +1297,7 @@ static inline int __down_read_trylock(struct rw_semaphore *sem)
/* /*
* lock for writing * lock for writing
*/ */
static inline int __down_write_common(struct rw_semaphore *sem, int state) static __always_inline int __down_write_common(struct rw_semaphore *sem, int state)
{ {
int ret = 0; int ret = 0;
...@@ -1310,12 +1310,12 @@ static inline int __down_write_common(struct rw_semaphore *sem, int state) ...@@ -1310,12 +1310,12 @@ static inline int __down_write_common(struct rw_semaphore *sem, int state)
return ret; return ret;
} }
static inline void __down_write(struct rw_semaphore *sem) static __always_inline void __down_write(struct rw_semaphore *sem)
{ {
__down_write_common(sem, TASK_UNINTERRUPTIBLE); __down_write_common(sem, TASK_UNINTERRUPTIBLE);
} }
static inline int __down_write_killable(struct rw_semaphore *sem) static __always_inline int __down_write_killable(struct rw_semaphore *sem)
{ {
return __down_write_common(sem, TASK_KILLABLE); return __down_write_common(sem, TASK_KILLABLE);
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment