Commit 1c19b68a authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'locking-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip

Pull locking changes from Ingo Molnar:
 "The main changes in this cycle were:

   - pvqspinlock statistics fixes (Davidlohr Bueso)

   - flip atomic_fetch_or() arguments (Peter Zijlstra)

   - locktorture simplification (Paul E.  McKenney)

   - documentation updates (SeongJae Park, David Howells, Davidlohr
     Bueso, Paul E McKenney, Peter Zijlstra, Will Deacon)

   - various fixes"

* 'locking-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
  locking/atomics: Flip atomic_fetch_or() arguments
  locking/pvqspinlock: Robustify init_qspinlock_stat()
  locking/pvqspinlock: Avoid double resetting of stats
  lcoking/locktorture: Simplify the torture_runnable computation
  locking/Documentation: Clarify that ACQUIRE applies to loads, RELEASE applies to stores
  locking/Documentation: State purpose of memory-barriers.txt
  locking/Documentation: Add disclaimer
  locking/Documentation/lockdep: Fix spelling mistakes
  locking/lockdep: Deinline register_lock_class(), save 2328 bytes
  locking/locktorture: Fix NULL pointer dereference for cleanup paths
  locking/locktorture: Fix deboosting NULL pointer dereference
  locking/Documentation: Mention smp_cond_acquire()
  locking/Documentation: Insert white spaces consistently
  locking/Documentation: Fix formatting inconsistencies
  locking/Documentation: Add missed subsection in TOC
  locking/Documentation: Fix missed s/lock/acquire renames
  locking/Documentation: Clarify relationship of barrier() to control dependencies
parents 49817c33 a1cc5bcf
...@@ -97,7 +97,7 @@ between any two lock-classes: ...@@ -97,7 +97,7 @@ between any two lock-classes:
<hardirq-safe> -> <hardirq-unsafe> <hardirq-safe> -> <hardirq-unsafe>
<softirq-safe> -> <softirq-unsafe> <softirq-safe> -> <softirq-unsafe>
The first rule comes from the fact the a hardirq-safe lock could be The first rule comes from the fact that a hardirq-safe lock could be
taken by a hardirq context, interrupting a hardirq-unsafe lock - and taken by a hardirq context, interrupting a hardirq-unsafe lock - and
thus could result in a lock inversion deadlock. Likewise, a softirq-safe thus could result in a lock inversion deadlock. Likewise, a softirq-safe
lock could be taken by an softirq context, interrupting a softirq-unsafe lock could be taken by an softirq context, interrupting a softirq-unsafe
...@@ -220,7 +220,7 @@ calculated, which hash is unique for every lock chain. The hash value, ...@@ -220,7 +220,7 @@ calculated, which hash is unique for every lock chain. The hash value,
when the chain is validated for the first time, is then put into a hash when the chain is validated for the first time, is then put into a hash
table, which hash-table can be checked in a lockfree manner. If the table, which hash-table can be checked in a lockfree manner. If the
locking chain occurs again later on, the hash table tells us that we locking chain occurs again later on, the hash table tells us that we
dont have to validate the chain again. don't have to validate the chain again.
Troubleshooting: Troubleshooting:
---------------- ----------------
......
This diff is collapsed.
...@@ -560,11 +560,11 @@ static inline int atomic_dec_if_positive(atomic_t *v) ...@@ -560,11 +560,11 @@ static inline int atomic_dec_if_positive(atomic_t *v)
/** /**
* atomic_fetch_or - perform *p |= mask and return old value of *p * atomic_fetch_or - perform *p |= mask and return old value of *p
* @p: pointer to atomic_t
* @mask: mask to OR on the atomic_t * @mask: mask to OR on the atomic_t
* @p: pointer to atomic_t
*/ */
#ifndef atomic_fetch_or #ifndef atomic_fetch_or
static inline int atomic_fetch_or(atomic_t *p, int mask) static inline int atomic_fetch_or(int mask, atomic_t *p)
{ {
int old, val = atomic_read(p); int old, val = atomic_read(p);
......
...@@ -708,7 +708,7 @@ look_up_lock_class(struct lockdep_map *lock, unsigned int subclass) ...@@ -708,7 +708,7 @@ look_up_lock_class(struct lockdep_map *lock, unsigned int subclass)
* yet. Otherwise we look it up. We cache the result in the lock object * yet. Otherwise we look it up. We cache the result in the lock object
* itself, so actual lookup of the hash should be once per lock object. * itself, so actual lookup of the hash should be once per lock object.
*/ */
static inline struct lock_class * static struct lock_class *
register_lock_class(struct lockdep_map *lock, unsigned int subclass, int force) register_lock_class(struct lockdep_map *lock, unsigned int subclass, int force)
{ {
struct lockdep_subclass_key *key; struct lockdep_subclass_key *key;
......
...@@ -75,12 +75,7 @@ struct lock_stress_stats { ...@@ -75,12 +75,7 @@ struct lock_stress_stats {
long n_lock_acquired; long n_lock_acquired;
}; };
#if defined(MODULE) int torture_runnable = IS_ENABLED(MODULE);
#define LOCKTORTURE_RUNNABLE_INIT 1
#else
#define LOCKTORTURE_RUNNABLE_INIT 0
#endif
int torture_runnable = LOCKTORTURE_RUNNABLE_INIT;
module_param(torture_runnable, int, 0444); module_param(torture_runnable, int, 0444);
MODULE_PARM_DESC(torture_runnable, "Start locktorture at module init"); MODULE_PARM_DESC(torture_runnable, "Start locktorture at module init");
...@@ -394,12 +389,12 @@ static void torture_rtmutex_boost(struct torture_random_state *trsp) ...@@ -394,12 +389,12 @@ static void torture_rtmutex_boost(struct torture_random_state *trsp)
if (!rt_task(current)) { if (!rt_task(current)) {
/* /*
* (1) Boost priority once every ~50k operations. When the * Boost priority once every ~50k operations. When the
* task tries to take the lock, the rtmutex it will account * task tries to take the lock, the rtmutex it will account
* for the new priority, and do any corresponding pi-dance. * for the new priority, and do any corresponding pi-dance.
*/ */
if (!(torture_random(trsp) % if (trsp && !(torture_random(trsp) %
(cxt.nrealwriters_stress * factor))) { (cxt.nrealwriters_stress * factor))) {
policy = SCHED_FIFO; policy = SCHED_FIFO;
param.sched_priority = MAX_RT_PRIO - 1; param.sched_priority = MAX_RT_PRIO - 1;
} else /* common case, do nothing */ } else /* common case, do nothing */
...@@ -748,6 +743,15 @@ static void lock_torture_cleanup(void) ...@@ -748,6 +743,15 @@ static void lock_torture_cleanup(void)
if (torture_cleanup_begin()) if (torture_cleanup_begin())
return; return;
/*
* Indicates early cleanup, meaning that the test has not run,
* such as when passing bogus args when loading the module. As
* such, only perform the underlying torture-specific cleanups,
* and avoid anything related to locktorture.
*/
if (!cxt.lwsa)
goto end;
if (writer_tasks) { if (writer_tasks) {
for (i = 0; i < cxt.nrealwriters_stress; i++) for (i = 0; i < cxt.nrealwriters_stress; i++)
torture_stop_kthread(lock_torture_writer, torture_stop_kthread(lock_torture_writer,
...@@ -776,6 +780,7 @@ static void lock_torture_cleanup(void) ...@@ -776,6 +780,7 @@ static void lock_torture_cleanup(void)
else else
lock_torture_print_module_parms(cxt.cur_ops, lock_torture_print_module_parms(cxt.cur_ops,
"End of test: SUCCESS"); "End of test: SUCCESS");
end:
torture_cleanup_end(); torture_cleanup_end();
} }
...@@ -870,6 +875,7 @@ static int __init lock_torture_init(void) ...@@ -870,6 +875,7 @@ static int __init lock_torture_init(void)
VERBOSE_TOROUT_STRING("cxt.lrsa: Out of memory"); VERBOSE_TOROUT_STRING("cxt.lrsa: Out of memory");
firsterr = -ENOMEM; firsterr = -ENOMEM;
kfree(cxt.lwsa); kfree(cxt.lwsa);
cxt.lwsa = NULL;
goto unwind; goto unwind;
} }
...@@ -878,6 +884,7 @@ static int __init lock_torture_init(void) ...@@ -878,6 +884,7 @@ static int __init lock_torture_init(void)
cxt.lrsa[i].n_lock_acquired = 0; cxt.lrsa[i].n_lock_acquired = 0;
} }
} }
lock_torture_print_module_parms(cxt.cur_ops, "Start of test"); lock_torture_print_module_parms(cxt.cur_ops, "Start of test");
/* Prepare torture context. */ /* Prepare torture context. */
......
...@@ -191,8 +191,6 @@ static ssize_t qstat_write(struct file *file, const char __user *user_buf, ...@@ -191,8 +191,6 @@ static ssize_t qstat_write(struct file *file, const char __user *user_buf,
for (i = 0 ; i < qstat_num; i++) for (i = 0 ; i < qstat_num; i++)
WRITE_ONCE(ptr[i], 0); WRITE_ONCE(ptr[i], 0);
for (i = 0 ; i < qstat_num; i++)
WRITE_ONCE(ptr[i], 0);
} }
return count; return count;
} }
...@@ -214,10 +212,8 @@ static int __init init_qspinlock_stat(void) ...@@ -214,10 +212,8 @@ static int __init init_qspinlock_stat(void)
struct dentry *d_qstat = debugfs_create_dir("qlockstat", NULL); struct dentry *d_qstat = debugfs_create_dir("qlockstat", NULL);
int i; int i;
if (!d_qstat) { if (!d_qstat)
pr_warn("Could not create 'qlockstat' debugfs directory\n"); goto out;
return 0;
}
/* /*
* Create the debugfs files * Create the debugfs files
...@@ -227,12 +223,20 @@ static int __init init_qspinlock_stat(void) ...@@ -227,12 +223,20 @@ static int __init init_qspinlock_stat(void)
* performance. * performance.
*/ */
for (i = 0; i < qstat_num; i++) for (i = 0; i < qstat_num; i++)
debugfs_create_file(qstat_names[i], 0400, d_qstat, if (!debugfs_create_file(qstat_names[i], 0400, d_qstat,
(void *)(long)i, &fops_qstat); (void *)(long)i, &fops_qstat))
goto fail_undo;
if (!debugfs_create_file(qstat_names[qstat_reset_cnts], 0200, d_qstat,
(void *)(long)qstat_reset_cnts, &fops_qstat))
goto fail_undo;
debugfs_create_file(qstat_names[qstat_reset_cnts], 0200, d_qstat,
(void *)(long)qstat_reset_cnts, &fops_qstat);
return 0; return 0;
fail_undo:
debugfs_remove_recursive(d_qstat);
out:
pr_warn("Could not create 'qlockstat' debugfs entries\n");
return -ENOMEM;
} }
fs_initcall(init_qspinlock_stat); fs_initcall(init_qspinlock_stat);
......
...@@ -262,7 +262,7 @@ static void tick_nohz_dep_set_all(atomic_t *dep, ...@@ -262,7 +262,7 @@ static void tick_nohz_dep_set_all(atomic_t *dep,
{ {
int prev; int prev;
prev = atomic_fetch_or(dep, BIT(bit)); prev = atomic_fetch_or(BIT(bit), dep);
if (!prev) if (!prev)
tick_nohz_full_kick_all(); tick_nohz_full_kick_all();
} }
...@@ -292,7 +292,7 @@ void tick_nohz_dep_set_cpu(int cpu, enum tick_dep_bits bit) ...@@ -292,7 +292,7 @@ void tick_nohz_dep_set_cpu(int cpu, enum tick_dep_bits bit)
ts = per_cpu_ptr(&tick_cpu_sched, cpu); ts = per_cpu_ptr(&tick_cpu_sched, cpu);
prev = atomic_fetch_or(&ts->tick_dep_mask, BIT(bit)); prev = atomic_fetch_or(BIT(bit), &ts->tick_dep_mask);
if (!prev) { if (!prev) {
preempt_disable(); preempt_disable();
/* Perf needs local kick that is NMI safe */ /* Perf needs local kick that is NMI safe */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment