Commit 6fedc280 authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'rcu.2021.11.01a' of git://git.kernel.org/pub/scm/linux/kernel/git/paulmck/linux-rcu

Pull RCU updates from Paul McKenney:

 - Miscellaneous fixes

 - Torture-test updates for smp_call_function(), most notably improved
   checking of module parameters.

 - Tasks-trace RCU updates that fix a number of rare but important
   race-condition bugs.

 - Other torture-test updates, most notably better checking of module
   parameters. In addition, rcutorture may once again be run on
   CONFIG_PREEMPT_RT kernels.

 - Torture-test scripting updates, most notably specifying the new
   CONFIG_KCSAN_STRICT kconfig option rather than maintaining an
   ever-changing list of individual KCSAN kconfig options.

* tag 'rcu.2021.11.01a' of git://git.kernel.org/pub/scm/linux/kernel/git/paulmck/linux-rcu: (46 commits)
  rcu: Fix rcu_dynticks_curr_cpu_in_eqs() vs noinstr
  rcu: Always inline rcu_dynticks_task*_{enter,exit}()
  torture: Make kvm-remote.sh print size of downloaded tarball
  torture: Allot 1G of memory for scftorture runs
  tools/rcu: Add an extract-stall script
  scftorture: Warn on individual scf_torture_init() error conditions
  scftorture: Count reschedule IPIs
  scftorture: Account for weight_resched when checking for all zeroes
  scftorture: Shut down if nonsensical arguments given
  scftorture: Allow zero weight to exclude an smp_call_function*() category
  rcu: Avoid unneeded function call in rcu_read_unlock()
  rcu-tasks: Update comments to cond_resched_tasks_rcu_qs()
  rcu-tasks: Fix IPI failure handling in trc_wait_for_one_reader
  rcu-tasks: Fix read-side primitives comment for call_rcu_tasks_trace
  rcu-tasks: Clarify read side section info for rcu_tasks_rude GP primitives
  rcu-tasks: Correct comparisons for CPU numbers in show_stalled_task_trace
  rcu-tasks: Correct firstreport usage in check_all_holdout_tasks_trace
  rcu-tasks: Fix s/rcu_add_holdout/trc_add_holdout/ typo in comment
  rcu-tasks: Move RTGS_WAIT_CBS to beginning of rcu_tasks_kthread() loop
  rcu-tasks: Fix s/instruction/instructions/ typo in comment
  ...
parents 79ef0c00 dd1277d2
...@@ -202,49 +202,44 @@ newly arrived RCU callbacks against future grace periods: ...@@ -202,49 +202,44 @@ newly arrived RCU callbacks against future grace periods:
1 static void rcu_prepare_for_idle(void) 1 static void rcu_prepare_for_idle(void)
2 { 2 {
3 bool needwake; 3 bool needwake;
4 struct rcu_data *rdp; 4 struct rcu_data *rdp = this_cpu_ptr(&rcu_data);
5 struct rcu_dynticks *rdtp = this_cpu_ptr(&rcu_dynticks); 5 struct rcu_node *rnp;
6 struct rcu_node *rnp; 6 int tne;
7 struct rcu_state *rsp; 7
8 int tne; 8 lockdep_assert_irqs_disabled();
9 9 if (rcu_rdp_is_offloaded(rdp))
10 if (IS_ENABLED(CONFIG_RCU_NOCB_CPU_ALL) || 10 return;
11 rcu_is_nocb_cpu(smp_processor_id())) 11
12 return; 12 /* Handle nohz enablement switches conservatively. */
13 tne = READ_ONCE(tick_nohz_active); 13 tne = READ_ONCE(tick_nohz_active);
14 if (tne != rdtp->tick_nohz_enabled_snap) { 14 if (tne != rdp->tick_nohz_enabled_snap) {
15 if (rcu_cpu_has_callbacks(NULL)) 15 if (!rcu_segcblist_empty(&rdp->cblist))
16 invoke_rcu_core(); 16 invoke_rcu_core(); /* force nohz to see update. */
17 rdtp->tick_nohz_enabled_snap = tne; 17 rdp->tick_nohz_enabled_snap = tne;
18 return; 18 return;
19 } 19 }
20 if (!tne) 20 if (!tne)
21 return; 21 return;
22 if (rdtp->all_lazy && 22
23 rdtp->nonlazy_posted != rdtp->nonlazy_posted_snap) { 23 /*
24 rdtp->all_lazy = false; 24 * If we have not yet accelerated this jiffy, accelerate all
25 rdtp->nonlazy_posted_snap = rdtp->nonlazy_posted; 25 * callbacks on this CPU.
26 invoke_rcu_core(); 26 */
27 return; 27 if (rdp->last_accelerate == jiffies)
28 } 28 return;
29 if (rdtp->last_accelerate == jiffies) 29 rdp->last_accelerate = jiffies;
30 return; 30 if (rcu_segcblist_pend_cbs(&rdp->cblist)) {
31 rdtp->last_accelerate = jiffies; 31 rnp = rdp->mynode;
32 for_each_rcu_flavor(rsp) { 32 raw_spin_lock_rcu_node(rnp); /* irqs already disabled. */
33 rdp = this_cpu_ptr(rsp->rda); 33 needwake = rcu_accelerate_cbs(rnp, rdp);
34 if (rcu_segcblist_pend_cbs(&rdp->cblist)) 34 raw_spin_unlock_rcu_node(rnp); /* irqs remain disabled. */
35 continue; 35 if (needwake)
36 rnp = rdp->mynode; 36 rcu_gp_kthread_wake();
37 raw_spin_lock_rcu_node(rnp); 37 }
38 needwake = rcu_accelerate_cbs(rsp, rnp, rdp); 38 }
39 raw_spin_unlock_rcu_node(rnp);
40 if (needwake)
41 rcu_gp_kthread_wake(rsp);
42 }
43 }
But the only part of ``rcu_prepare_for_idle()`` that really matters for But the only part of ``rcu_prepare_for_idle()`` that really matters for
this discussion are lines 37–39. We will therefore abbreviate this this discussion are lines 32–34. We will therefore abbreviate this
function as follows: function as follows:
.. kernel-figure:: rcu_node-lock.svg .. kernel-figure:: rcu_node-lock.svg
......
...@@ -96,6 +96,16 @@ warnings: ...@@ -96,6 +96,16 @@ warnings:
the ``rcu_.*timer wakeup didn't happen for`` console-log message, the ``rcu_.*timer wakeup didn't happen for`` console-log message,
which will include additional debugging information. which will include additional debugging information.
- A low-level kernel issue that either fails to invoke one of the
variants of rcu_user_enter(), rcu_user_exit(), rcu_idle_enter(),
rcu_idle_exit(), rcu_irq_enter(), or rcu_irq_exit() on the one
hand, or that invokes one of them too many times on the other.
Historically, the most frequent issue has been an omission
of either irq_enter() or irq_exit(), which in turn invoke
rcu_irq_enter() or rcu_irq_exit(), respectively. Building your
kernel with CONFIG_RCU_EQS_DEBUG=y can help track down these types
of issues, which sometimes arise in architecture-specific code.
- A bug in the RCU implementation. - A bug in the RCU implementation.
- A hardware failure. This is quite unlikely, but has occurred - A hardware failure. This is quite unlikely, but has occurred
......
...@@ -5,7 +5,6 @@ CONFIG_BSD_PROCESS_ACCT=y ...@@ -5,7 +5,6 @@ CONFIG_BSD_PROCESS_ACCT=y
CONFIG_BSD_PROCESS_ACCT_V3=y CONFIG_BSD_PROCESS_ACCT_V3=y
CONFIG_AUDIT=y CONFIG_AUDIT=y
CONFIG_AUDITSYSCALL=y CONFIG_AUDITSYSCALL=y
CONFIG_TREE_PREEMPT_RCU=y
CONFIG_RCU_TRACE=y CONFIG_RCU_TRACE=y
CONFIG_IKCONFIG=y CONFIG_IKCONFIG=y
CONFIG_IKCONFIG_PROC=y CONFIG_IKCONFIG_PROC=y
......
...@@ -119,7 +119,6 @@ CONFIG_DEBUG_SPINLOCK=y ...@@ -119,7 +119,6 @@ CONFIG_DEBUG_SPINLOCK=y
CONFIG_DEBUG_MUTEXES=y CONFIG_DEBUG_MUTEXES=y
CONFIG_DEBUG_ATOMIC_SLEEP=y CONFIG_DEBUG_ATOMIC_SLEEP=y
CONFIG_STACKTRACE=y CONFIG_STACKTRACE=y
# CONFIG_RCU_CPU_STALL_INFO is not set
CONFIG_RCU_TRACE=y CONFIG_RCU_TRACE=y
# CONFIG_FTRACE is not set # CONFIG_FTRACE is not set
# CONFIG_LD_NO_RELAX is not set # CONFIG_LD_NO_RELAX is not set
......
...@@ -71,6 +71,7 @@ static inline void __rcu_read_lock(void) ...@@ -71,6 +71,7 @@ static inline void __rcu_read_lock(void)
static inline void __rcu_read_unlock(void) static inline void __rcu_read_unlock(void)
{ {
preempt_enable(); preempt_enable();
if (IS_ENABLED(CONFIG_RCU_STRICT_GRACE_PERIOD))
rcu_read_unlock_strict(); rcu_read_unlock_strict();
} }
......
...@@ -31,7 +31,7 @@ static inline int rcu_read_lock_trace_held(void) ...@@ -31,7 +31,7 @@ static inline int rcu_read_lock_trace_held(void)
#ifdef CONFIG_TASKS_TRACE_RCU #ifdef CONFIG_TASKS_TRACE_RCU
void rcu_read_unlock_trace_special(struct task_struct *t, int nesting); void rcu_read_unlock_trace_special(struct task_struct *t);
/** /**
* rcu_read_lock_trace - mark beginning of RCU-trace read-side critical section * rcu_read_lock_trace - mark beginning of RCU-trace read-side critical section
...@@ -80,7 +80,8 @@ static inline void rcu_read_unlock_trace(void) ...@@ -80,7 +80,8 @@ static inline void rcu_read_unlock_trace(void)
WRITE_ONCE(t->trc_reader_nesting, nesting); WRITE_ONCE(t->trc_reader_nesting, nesting);
return; // We assume shallow reader nesting. return; // We assume shallow reader nesting.
} }
rcu_read_unlock_trace_special(t, nesting); WARN_ON_ONCE(nesting != 0);
rcu_read_unlock_trace_special(t);
} }
void call_rcu_tasks_trace(struct rcu_head *rhp, rcu_callback_t func); void call_rcu_tasks_trace(struct rcu_head *rhp, rcu_callback_t func);
......
...@@ -47,6 +47,14 @@ do { \ ...@@ -47,6 +47,14 @@ do { \
} while (0) } while (0)
void verbose_torout_sleep(void); void verbose_torout_sleep(void);
#define torture_init_error(firsterr) \
({ \
int ___firsterr = (firsterr); \
\
WARN_ONCE(!IS_MODULE(CONFIG_RCU_TORTURE_TEST) && ___firsterr < 0, "Torture-test initialization failed with error code %d\n", ___firsterr); \
___firsterr < 0; \
})
/* Definitions for online/offline exerciser. */ /* Definitions for online/offline exerciser. */
#ifdef CONFIG_HOTPLUG_CPU #ifdef CONFIG_HOTPLUG_CPU
int torture_num_online_cpus(void); int torture_num_online_cpus(void);
......
...@@ -1022,23 +1022,23 @@ static int __init lock_torture_init(void) ...@@ -1022,23 +1022,23 @@ static int __init lock_torture_init(void)
if (onoff_interval > 0) { if (onoff_interval > 0) {
firsterr = torture_onoff_init(onoff_holdoff * HZ, firsterr = torture_onoff_init(onoff_holdoff * HZ,
onoff_interval * HZ, NULL); onoff_interval * HZ, NULL);
if (firsterr) if (torture_init_error(firsterr))
goto unwind; goto unwind;
} }
if (shuffle_interval > 0) { if (shuffle_interval > 0) {
firsterr = torture_shuffle_init(shuffle_interval); firsterr = torture_shuffle_init(shuffle_interval);
if (firsterr) if (torture_init_error(firsterr))
goto unwind; goto unwind;
} }
if (shutdown_secs > 0) { if (shutdown_secs > 0) {
firsterr = torture_shutdown_init(shutdown_secs, firsterr = torture_shutdown_init(shutdown_secs,
lock_torture_cleanup); lock_torture_cleanup);
if (firsterr) if (torture_init_error(firsterr))
goto unwind; goto unwind;
} }
if (stutter > 0) { if (stutter > 0) {
firsterr = torture_stutter_init(stutter, stutter); firsterr = torture_stutter_init(stutter, stutter);
if (firsterr) if (torture_init_error(firsterr))
goto unwind; goto unwind;
} }
...@@ -1082,7 +1082,7 @@ static int __init lock_torture_init(void) ...@@ -1082,7 +1082,7 @@ static int __init lock_torture_init(void)
/* Create writer. */ /* Create writer. */
firsterr = torture_create_kthread(lock_torture_writer, &cxt.lwsa[i], firsterr = torture_create_kthread(lock_torture_writer, &cxt.lwsa[i],
writer_tasks[i]); writer_tasks[i]);
if (firsterr) if (torture_init_error(firsterr))
goto unwind; goto unwind;
create_reader: create_reader:
...@@ -1091,13 +1091,13 @@ static int __init lock_torture_init(void) ...@@ -1091,13 +1091,13 @@ static int __init lock_torture_init(void)
/* Create reader. */ /* Create reader. */
firsterr = torture_create_kthread(lock_torture_reader, &cxt.lrsa[j], firsterr = torture_create_kthread(lock_torture_reader, &cxt.lrsa[j],
reader_tasks[j]); reader_tasks[j]);
if (firsterr) if (torture_init_error(firsterr))
goto unwind; goto unwind;
} }
if (stat_interval > 0) { if (stat_interval > 0) {
firsterr = torture_create_kthread(lock_torture_stats, NULL, firsterr = torture_create_kthread(lock_torture_stats, NULL,
stats_task); stats_task);
if (firsterr) if (torture_init_error(firsterr))
goto unwind; goto unwind;
} }
torture_init_end(); torture_init_end();
......
...@@ -758,7 +758,7 @@ kfree_scale_init(void) ...@@ -758,7 +758,7 @@ kfree_scale_init(void)
init_waitqueue_head(&shutdown_wq); init_waitqueue_head(&shutdown_wq);
firsterr = torture_create_kthread(kfree_scale_shutdown, NULL, firsterr = torture_create_kthread(kfree_scale_shutdown, NULL,
shutdown_task); shutdown_task);
if (firsterr) if (torture_init_error(firsterr))
goto unwind; goto unwind;
schedule_timeout_uninterruptible(1); schedule_timeout_uninterruptible(1);
} }
...@@ -775,7 +775,7 @@ kfree_scale_init(void) ...@@ -775,7 +775,7 @@ kfree_scale_init(void)
for (i = 0; i < kfree_nrealthreads; i++) { for (i = 0; i < kfree_nrealthreads; i++) {
firsterr = torture_create_kthread(kfree_scale_thread, (void *)i, firsterr = torture_create_kthread(kfree_scale_thread, (void *)i,
kfree_reader_tasks[i]); kfree_reader_tasks[i]);
if (firsterr) if (torture_init_error(firsterr))
goto unwind; goto unwind;
} }
...@@ -838,7 +838,7 @@ rcu_scale_init(void) ...@@ -838,7 +838,7 @@ rcu_scale_init(void)
init_waitqueue_head(&shutdown_wq); init_waitqueue_head(&shutdown_wq);
firsterr = torture_create_kthread(rcu_scale_shutdown, NULL, firsterr = torture_create_kthread(rcu_scale_shutdown, NULL,
shutdown_task); shutdown_task);
if (firsterr) if (torture_init_error(firsterr))
goto unwind; goto unwind;
schedule_timeout_uninterruptible(1); schedule_timeout_uninterruptible(1);
} }
...@@ -852,7 +852,7 @@ rcu_scale_init(void) ...@@ -852,7 +852,7 @@ rcu_scale_init(void)
for (i = 0; i < nrealreaders; i++) { for (i = 0; i < nrealreaders; i++) {
firsterr = torture_create_kthread(rcu_scale_reader, (void *)i, firsterr = torture_create_kthread(rcu_scale_reader, (void *)i,
reader_tasks[i]); reader_tasks[i]);
if (firsterr) if (torture_init_error(firsterr))
goto unwind; goto unwind;
} }
while (atomic_read(&n_rcu_scale_reader_started) < nrealreaders) while (atomic_read(&n_rcu_scale_reader_started) < nrealreaders)
...@@ -879,7 +879,7 @@ rcu_scale_init(void) ...@@ -879,7 +879,7 @@ rcu_scale_init(void)
} }
firsterr = torture_create_kthread(rcu_scale_writer, (void *)i, firsterr = torture_create_kthread(rcu_scale_writer, (void *)i,
writer_tasks[i]); writer_tasks[i]);
if (firsterr) if (torture_init_error(firsterr))
goto unwind; goto unwind;
} }
torture_init_end(); torture_init_end();
......
...@@ -1432,28 +1432,34 @@ static void rcutorture_one_extend(int *readstate, int newstate, ...@@ -1432,28 +1432,34 @@ static void rcutorture_one_extend(int *readstate, int newstate,
/* First, put new protection in place to avoid critical-section gap. */ /* First, put new protection in place to avoid critical-section gap. */
if (statesnew & RCUTORTURE_RDR_BH) if (statesnew & RCUTORTURE_RDR_BH)
local_bh_disable(); local_bh_disable();
if (statesnew & RCUTORTURE_RDR_RBH)
rcu_read_lock_bh();
if (statesnew & RCUTORTURE_RDR_IRQ) if (statesnew & RCUTORTURE_RDR_IRQ)
local_irq_disable(); local_irq_disable();
if (statesnew & RCUTORTURE_RDR_PREEMPT) if (statesnew & RCUTORTURE_RDR_PREEMPT)
preempt_disable(); preempt_disable();
if (statesnew & RCUTORTURE_RDR_RBH)
rcu_read_lock_bh();
if (statesnew & RCUTORTURE_RDR_SCHED) if (statesnew & RCUTORTURE_RDR_SCHED)
rcu_read_lock_sched(); rcu_read_lock_sched();
if (statesnew & RCUTORTURE_RDR_RCU) if (statesnew & RCUTORTURE_RDR_RCU)
idxnew = cur_ops->readlock() << RCUTORTURE_RDR_SHIFT; idxnew = cur_ops->readlock() << RCUTORTURE_RDR_SHIFT;
/* Next, remove old protection, irq first due to bh conflict. */ /*
* Next, remove old protection, in decreasing order of strength
* to avoid unlock paths that aren't safe in the stronger
* context. Namely: BH can not be enabled with disabled interrupts.
* Additionally PREEMPT_RT requires that BH is enabled in preemptible
* context.
*/
if (statesold & RCUTORTURE_RDR_IRQ) if (statesold & RCUTORTURE_RDR_IRQ)
local_irq_enable(); local_irq_enable();
if (statesold & RCUTORTURE_RDR_BH)
local_bh_enable();
if (statesold & RCUTORTURE_RDR_PREEMPT) if (statesold & RCUTORTURE_RDR_PREEMPT)
preempt_enable(); preempt_enable();
if (statesold & RCUTORTURE_RDR_RBH)
rcu_read_unlock_bh();
if (statesold & RCUTORTURE_RDR_SCHED) if (statesold & RCUTORTURE_RDR_SCHED)
rcu_read_unlock_sched(); rcu_read_unlock_sched();
if (statesold & RCUTORTURE_RDR_BH)
local_bh_enable();
if (statesold & RCUTORTURE_RDR_RBH)
rcu_read_unlock_bh();
if (statesold & RCUTORTURE_RDR_RCU) { if (statesold & RCUTORTURE_RDR_RCU) {
bool lockit = !statesnew && !(torture_random(trsp) & 0xffff); bool lockit = !statesnew && !(torture_random(trsp) & 0xffff);
...@@ -1496,6 +1502,9 @@ rcutorture_extend_mask(int oldmask, struct torture_random_state *trsp) ...@@ -1496,6 +1502,9 @@ rcutorture_extend_mask(int oldmask, struct torture_random_state *trsp)
int mask = rcutorture_extend_mask_max(); int mask = rcutorture_extend_mask_max();
unsigned long randmask1 = torture_random(trsp) >> 8; unsigned long randmask1 = torture_random(trsp) >> 8;
unsigned long randmask2 = randmask1 >> 3; unsigned long randmask2 = randmask1 >> 3;
unsigned long preempts = RCUTORTURE_RDR_PREEMPT | RCUTORTURE_RDR_SCHED;
unsigned long preempts_irq = preempts | RCUTORTURE_RDR_IRQ;
unsigned long bhs = RCUTORTURE_RDR_BH | RCUTORTURE_RDR_RBH;
WARN_ON_ONCE(mask >> RCUTORTURE_RDR_SHIFT); WARN_ON_ONCE(mask >> RCUTORTURE_RDR_SHIFT);
/* Mostly only one bit (need preemption!), sometimes lots of bits. */ /* Mostly only one bit (need preemption!), sometimes lots of bits. */
...@@ -1503,11 +1512,26 @@ rcutorture_extend_mask(int oldmask, struct torture_random_state *trsp) ...@@ -1503,11 +1512,26 @@ rcutorture_extend_mask(int oldmask, struct torture_random_state *trsp)
mask = mask & randmask2; mask = mask & randmask2;
else else
mask = mask & (1 << (randmask2 % RCUTORTURE_RDR_NBITS)); mask = mask & (1 << (randmask2 % RCUTORTURE_RDR_NBITS));
/* Can't enable bh w/irq disabled. */
if ((mask & RCUTORTURE_RDR_IRQ) && /*
((!(mask & RCUTORTURE_RDR_BH) && (oldmask & RCUTORTURE_RDR_BH)) || * Can't enable bh w/irq disabled.
(!(mask & RCUTORTURE_RDR_RBH) && (oldmask & RCUTORTURE_RDR_RBH)))) */
mask |= RCUTORTURE_RDR_BH | RCUTORTURE_RDR_RBH; if (mask & RCUTORTURE_RDR_IRQ)
mask |= oldmask & bhs;
/*
* Ideally these sequences would be detected in debug builds
* (regardless of RT), but until then don't stop testing
* them on non-RT.
*/
if (IS_ENABLED(CONFIG_PREEMPT_RT)) {
/* Can't modify BH in atomic context */
if (oldmask & preempts_irq)
mask &= ~bhs;
if ((oldmask | mask) & preempts_irq)
mask |= oldmask & bhs;
}
return mask ?: RCUTORTURE_RDR_RCU; return mask ?: RCUTORTURE_RDR_RCU;
} }
...@@ -2449,7 +2473,7 @@ static int __init rcu_torture_fwd_prog_init(void) ...@@ -2449,7 +2473,7 @@ static int __init rcu_torture_fwd_prog_init(void)
} }
if (stall_cpu > 0) { if (stall_cpu > 0) {
VERBOSE_TOROUT_STRING("rcu_torture_fwd_prog_init: Disabled, conflicts with CPU-stall testing"); VERBOSE_TOROUT_STRING("rcu_torture_fwd_prog_init: Disabled, conflicts with CPU-stall testing");
if (IS_MODULE(CONFIG_RCU_TORTURE_TESTS)) if (IS_MODULE(CONFIG_RCU_TORTURE_TEST))
return -EINVAL; /* In module, can fail back to user. */ return -EINVAL; /* In module, can fail back to user. */
WARN_ON(1); /* Make sure rcutorture notices conflict. */ WARN_ON(1); /* Make sure rcutorture notices conflict. */
return 0; return 0;
...@@ -2741,7 +2765,7 @@ static int rcu_torture_read_exit(void *unused) ...@@ -2741,7 +2765,7 @@ static int rcu_torture_read_exit(void *unused)
static int rcu_torture_read_exit_init(void) static int rcu_torture_read_exit_init(void)
{ {
if (read_exit_burst <= 0) if (read_exit_burst <= 0)
return -EINVAL; return 0;
init_waitqueue_head(&read_exit_wq); init_waitqueue_head(&read_exit_wq);
read_exit_child_stop = false; read_exit_child_stop = false;
read_exit_child_stopped = false; read_exit_child_stopped = false;
...@@ -2819,7 +2843,7 @@ rcu_torture_cleanup(void) ...@@ -2819,7 +2843,7 @@ rcu_torture_cleanup(void)
rcutorture_seq_diff(gp_seq, start_gp_seq)); rcutorture_seq_diff(gp_seq, start_gp_seq));
torture_stop_kthread(rcu_torture_stats, stats_task); torture_stop_kthread(rcu_torture_stats, stats_task);
torture_stop_kthread(rcu_torture_fqs, fqs_task); torture_stop_kthread(rcu_torture_fqs, fqs_task);
if (rcu_torture_can_boost()) if (rcu_torture_can_boost() && rcutor_hp >= 0)
cpuhp_remove_state(rcutor_hp); cpuhp_remove_state(rcutor_hp);
/* /*
...@@ -3037,7 +3061,7 @@ rcu_torture_init(void) ...@@ -3037,7 +3061,7 @@ rcu_torture_init(void)
rcu_torture_write_types(); rcu_torture_write_types();
firsterr = torture_create_kthread(rcu_torture_writer, NULL, firsterr = torture_create_kthread(rcu_torture_writer, NULL,
writer_task); writer_task);
if (firsterr) if (torture_init_error(firsterr))
goto unwind; goto unwind;
if (nfakewriters > 0) { if (nfakewriters > 0) {
fakewriter_tasks = kcalloc(nfakewriters, fakewriter_tasks = kcalloc(nfakewriters,
...@@ -3052,7 +3076,7 @@ rcu_torture_init(void) ...@@ -3052,7 +3076,7 @@ rcu_torture_init(void)
for (i = 0; i < nfakewriters; i++) { for (i = 0; i < nfakewriters; i++) {
firsterr = torture_create_kthread(rcu_torture_fakewriter, firsterr = torture_create_kthread(rcu_torture_fakewriter,
NULL, fakewriter_tasks[i]); NULL, fakewriter_tasks[i]);
if (firsterr) if (torture_init_error(firsterr))
goto unwind; goto unwind;
} }
reader_tasks = kcalloc(nrealreaders, sizeof(reader_tasks[0]), reader_tasks = kcalloc(nrealreaders, sizeof(reader_tasks[0]),
...@@ -3068,7 +3092,7 @@ rcu_torture_init(void) ...@@ -3068,7 +3092,7 @@ rcu_torture_init(void)
rcu_torture_reader_mbchk[i].rtc_chkrdr = -1; rcu_torture_reader_mbchk[i].rtc_chkrdr = -1;
firsterr = torture_create_kthread(rcu_torture_reader, (void *)i, firsterr = torture_create_kthread(rcu_torture_reader, (void *)i,
reader_tasks[i]); reader_tasks[i]);
if (firsterr) if (torture_init_error(firsterr))
goto unwind; goto unwind;
} }
nrealnocbers = nocbs_nthreads; nrealnocbers = nocbs_nthreads;
...@@ -3088,18 +3112,18 @@ rcu_torture_init(void) ...@@ -3088,18 +3112,18 @@ rcu_torture_init(void)
} }
for (i = 0; i < nrealnocbers; i++) { for (i = 0; i < nrealnocbers; i++) {
firsterr = torture_create_kthread(rcu_nocb_toggle, NULL, nocb_tasks[i]); firsterr = torture_create_kthread(rcu_nocb_toggle, NULL, nocb_tasks[i]);
if (firsterr) if (torture_init_error(firsterr))
goto unwind; goto unwind;
} }
if (stat_interval > 0) { if (stat_interval > 0) {
firsterr = torture_create_kthread(rcu_torture_stats, NULL, firsterr = torture_create_kthread(rcu_torture_stats, NULL,
stats_task); stats_task);
if (firsterr) if (torture_init_error(firsterr))
goto unwind; goto unwind;
} }
if (test_no_idle_hz && shuffle_interval > 0) { if (test_no_idle_hz && shuffle_interval > 0) {
firsterr = torture_shuffle_init(shuffle_interval * HZ); firsterr = torture_shuffle_init(shuffle_interval * HZ);
if (firsterr) if (torture_init_error(firsterr))
goto unwind; goto unwind;
} }
if (stutter < 0) if (stutter < 0)
...@@ -3109,7 +3133,7 @@ rcu_torture_init(void) ...@@ -3109,7 +3133,7 @@ rcu_torture_init(void)
t = cur_ops->stall_dur ? cur_ops->stall_dur() : stutter * HZ; t = cur_ops->stall_dur ? cur_ops->stall_dur() : stutter * HZ;
firsterr = torture_stutter_init(stutter * HZ, t); firsterr = torture_stutter_init(stutter * HZ, t);
if (firsterr) if (torture_init_error(firsterr))
goto unwind; goto unwind;
} }
if (fqs_duration < 0) if (fqs_duration < 0)
...@@ -3118,7 +3142,7 @@ rcu_torture_init(void) ...@@ -3118,7 +3142,7 @@ rcu_torture_init(void)
/* Create the fqs thread */ /* Create the fqs thread */
firsterr = torture_create_kthread(rcu_torture_fqs, NULL, firsterr = torture_create_kthread(rcu_torture_fqs, NULL,
fqs_task); fqs_task);
if (firsterr) if (torture_init_error(firsterr))
goto unwind; goto unwind;
} }
if (test_boost_interval < 1) if (test_boost_interval < 1)
...@@ -3132,9 +3156,9 @@ rcu_torture_init(void) ...@@ -3132,9 +3156,9 @@ rcu_torture_init(void)
firsterr = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "RCU_TORTURE", firsterr = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "RCU_TORTURE",
rcutorture_booster_init, rcutorture_booster_init,
rcutorture_booster_cleanup); rcutorture_booster_cleanup);
if (firsterr < 0)
goto unwind;
rcutor_hp = firsterr; rcutor_hp = firsterr;
if (torture_init_error(firsterr))
goto unwind;
// Testing RCU priority boosting requires rcutorture do // Testing RCU priority boosting requires rcutorture do
// some serious abuse. Counter this by running ksoftirqd // some serious abuse. Counter this by running ksoftirqd
...@@ -3153,23 +3177,23 @@ rcu_torture_init(void) ...@@ -3153,23 +3177,23 @@ rcu_torture_init(void)
} }
shutdown_jiffies = jiffies + shutdown_secs * HZ; shutdown_jiffies = jiffies + shutdown_secs * HZ;
firsterr = torture_shutdown_init(shutdown_secs, rcu_torture_cleanup); firsterr = torture_shutdown_init(shutdown_secs, rcu_torture_cleanup);
if (firsterr) if (torture_init_error(firsterr))
goto unwind; goto unwind;
firsterr = torture_onoff_init(onoff_holdoff * HZ, onoff_interval, firsterr = torture_onoff_init(onoff_holdoff * HZ, onoff_interval,
rcutorture_sync); rcutorture_sync);
if (firsterr) if (torture_init_error(firsterr))
goto unwind; goto unwind;
firsterr = rcu_torture_stall_init(); firsterr = rcu_torture_stall_init();
if (firsterr) if (torture_init_error(firsterr))
goto unwind; goto unwind;
firsterr = rcu_torture_fwd_prog_init(); firsterr = rcu_torture_fwd_prog_init();
if (firsterr) if (torture_init_error(firsterr))
goto unwind; goto unwind;
firsterr = rcu_torture_barrier_init(); firsterr = rcu_torture_barrier_init();
if (firsterr) if (torture_init_error(firsterr))
goto unwind; goto unwind;
firsterr = rcu_torture_read_exit_init(); firsterr = rcu_torture_read_exit_init();
if (firsterr) if (torture_init_error(firsterr))
goto unwind; goto unwind;
if (object_debug) if (object_debug)
rcu_test_debug_objects(); rcu_test_debug_objects();
......
...@@ -824,7 +824,7 @@ ref_scale_init(void) ...@@ -824,7 +824,7 @@ ref_scale_init(void)
init_waitqueue_head(&shutdown_wq); init_waitqueue_head(&shutdown_wq);
firsterr = torture_create_kthread(ref_scale_shutdown, NULL, firsterr = torture_create_kthread(ref_scale_shutdown, NULL,
shutdown_task); shutdown_task);
if (firsterr) if (torture_init_error(firsterr))
goto unwind; goto unwind;
schedule_timeout_uninterruptible(1); schedule_timeout_uninterruptible(1);
} }
...@@ -851,7 +851,7 @@ ref_scale_init(void) ...@@ -851,7 +851,7 @@ ref_scale_init(void)
for (i = 0; i < nreaders; i++) { for (i = 0; i < nreaders; i++) {
firsterr = torture_create_kthread(ref_scale_reader, (void *)i, firsterr = torture_create_kthread(ref_scale_reader, (void *)i,
reader_tasks[i].task); reader_tasks[i].task);
if (firsterr) if (torture_init_error(firsterr))
goto unwind; goto unwind;
init_waitqueue_head(&(reader_tasks[i].wq)); init_waitqueue_head(&(reader_tasks[i].wq));
...@@ -860,7 +860,7 @@ ref_scale_init(void) ...@@ -860,7 +860,7 @@ ref_scale_init(void)
// Main Task // Main Task
init_waitqueue_head(&main_wq); init_waitqueue_head(&main_wq);
firsterr = torture_create_kthread(main_func, NULL, main_task); firsterr = torture_create_kthread(main_func, NULL, main_task);
if (firsterr) if (torture_init_error(firsterr))
goto unwind; goto unwind;
torture_init_end(); torture_init_end();
......
This diff is collapsed.
...@@ -327,7 +327,7 @@ static void rcu_dynticks_eqs_online(void) ...@@ -327,7 +327,7 @@ static void rcu_dynticks_eqs_online(void)
*/ */
static __always_inline bool rcu_dynticks_curr_cpu_in_eqs(void) static __always_inline bool rcu_dynticks_curr_cpu_in_eqs(void)
{ {
return !(atomic_read(this_cpu_ptr(&rcu_data.dynticks)) & 0x1); return !(arch_atomic_read(this_cpu_ptr(&rcu_data.dynticks)) & 0x1);
} }
/* /*
...@@ -1219,8 +1219,6 @@ static int dyntick_save_progress_counter(struct rcu_data *rdp) ...@@ -1219,8 +1219,6 @@ static int dyntick_save_progress_counter(struct rcu_data *rdp)
static int rcu_implicit_dynticks_qs(struct rcu_data *rdp) static int rcu_implicit_dynticks_qs(struct rcu_data *rdp)
{ {
unsigned long jtsq; unsigned long jtsq;
bool *rnhqp;
bool *ruqp;
struct rcu_node *rnp = rdp->mynode; struct rcu_node *rnp = rdp->mynode;
/* /*
...@@ -1285,17 +1283,15 @@ static int rcu_implicit_dynticks_qs(struct rcu_data *rdp) ...@@ -1285,17 +1283,15 @@ static int rcu_implicit_dynticks_qs(struct rcu_data *rdp)
* is set way high. * is set way high.
*/ */
jtsq = READ_ONCE(jiffies_to_sched_qs); jtsq = READ_ONCE(jiffies_to_sched_qs);
ruqp = per_cpu_ptr(&rcu_data.rcu_urgent_qs, rdp->cpu); if (!READ_ONCE(rdp->rcu_need_heavy_qs) &&
rnhqp = per_cpu_ptr(&rcu_data.rcu_need_heavy_qs, rdp->cpu);
if (!READ_ONCE(*rnhqp) &&
(time_after(jiffies, rcu_state.gp_start + jtsq * 2) || (time_after(jiffies, rcu_state.gp_start + jtsq * 2) ||
time_after(jiffies, rcu_state.jiffies_resched) || time_after(jiffies, rcu_state.jiffies_resched) ||
rcu_state.cbovld)) { rcu_state.cbovld)) {
WRITE_ONCE(*rnhqp, true); WRITE_ONCE(rdp->rcu_need_heavy_qs, true);
/* Store rcu_need_heavy_qs before rcu_urgent_qs. */ /* Store rcu_need_heavy_qs before rcu_urgent_qs. */
smp_store_release(ruqp, true); smp_store_release(&rdp->rcu_urgent_qs, true);
} else if (time_after(jiffies, rcu_state.gp_start + jtsq)) { } else if (time_after(jiffies, rcu_state.gp_start + jtsq)) {
WRITE_ONCE(*ruqp, true); WRITE_ONCE(rdp->rcu_urgent_qs, true);
} }
/* /*
...@@ -1309,7 +1305,7 @@ static int rcu_implicit_dynticks_qs(struct rcu_data *rdp) ...@@ -1309,7 +1305,7 @@ static int rcu_implicit_dynticks_qs(struct rcu_data *rdp)
if (tick_nohz_full_cpu(rdp->cpu) && if (tick_nohz_full_cpu(rdp->cpu) &&
(time_after(jiffies, READ_ONCE(rdp->last_fqs_resched) + jtsq * 3) || (time_after(jiffies, READ_ONCE(rdp->last_fqs_resched) + jtsq * 3) ||
rcu_state.cbovld)) { rcu_state.cbovld)) {
WRITE_ONCE(*ruqp, true); WRITE_ONCE(rdp->rcu_urgent_qs, true);
resched_cpu(rdp->cpu); resched_cpu(rdp->cpu);
WRITE_ONCE(rdp->last_fqs_resched, jiffies); WRITE_ONCE(rdp->last_fqs_resched, jiffies);
} }
...@@ -1779,6 +1775,8 @@ static noinline_for_stack bool rcu_gp_init(void) ...@@ -1779,6 +1775,8 @@ static noinline_for_stack bool rcu_gp_init(void)
*/ */
WRITE_ONCE(rcu_state.gp_state, RCU_GP_ONOFF); WRITE_ONCE(rcu_state.gp_state, RCU_GP_ONOFF);
rcu_for_each_leaf_node(rnp) { rcu_for_each_leaf_node(rnp) {
// Wait for CPU-hotplug operations that might have
// started before this grace period did.
smp_mb(); // Pair with barriers used when updating ->ofl_seq to odd values. smp_mb(); // Pair with barriers used when updating ->ofl_seq to odd values.
firstseq = READ_ONCE(rnp->ofl_seq); firstseq = READ_ONCE(rnp->ofl_seq);
if (firstseq & 0x1) if (firstseq & 0x1)
...@@ -1907,7 +1905,7 @@ static void rcu_gp_fqs(bool first_time) ...@@ -1907,7 +1905,7 @@ static void rcu_gp_fqs(bool first_time)
struct rcu_node *rnp = rcu_get_root(); struct rcu_node *rnp = rcu_get_root();
WRITE_ONCE(rcu_state.gp_activity, jiffies); WRITE_ONCE(rcu_state.gp_activity, jiffies);
rcu_state.n_force_qs++; WRITE_ONCE(rcu_state.n_force_qs, rcu_state.n_force_qs + 1);
if (first_time) { if (first_time) {
/* Collect dyntick-idle snapshots. */ /* Collect dyntick-idle snapshots. */
force_qs_rnp(dyntick_save_progress_counter); force_qs_rnp(dyntick_save_progress_counter);
...@@ -2358,7 +2356,7 @@ rcu_check_quiescent_state(struct rcu_data *rdp) ...@@ -2358,7 +2356,7 @@ rcu_check_quiescent_state(struct rcu_data *rdp)
int rcutree_dying_cpu(unsigned int cpu) int rcutree_dying_cpu(unsigned int cpu)
{ {
bool blkd; bool blkd;
struct rcu_data *rdp = this_cpu_ptr(&rcu_data); struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu);
struct rcu_node *rnp = rdp->mynode; struct rcu_node *rnp = rdp->mynode;
if (!IS_ENABLED(CONFIG_HOTPLUG_CPU)) if (!IS_ENABLED(CONFIG_HOTPLUG_CPU))
...@@ -2550,7 +2548,7 @@ static void rcu_do_batch(struct rcu_data *rdp) ...@@ -2550,7 +2548,7 @@ static void rcu_do_batch(struct rcu_data *rdp)
/* Reset ->qlen_last_fqs_check trigger if enough CBs have drained. */ /* Reset ->qlen_last_fqs_check trigger if enough CBs have drained. */
if (count == 0 && rdp->qlen_last_fqs_check != 0) { if (count == 0 && rdp->qlen_last_fqs_check != 0) {
rdp->qlen_last_fqs_check = 0; rdp->qlen_last_fqs_check = 0;
rdp->n_force_qs_snap = rcu_state.n_force_qs; rdp->n_force_qs_snap = READ_ONCE(rcu_state.n_force_qs);
} else if (count < rdp->qlen_last_fqs_check - qhimark) } else if (count < rdp->qlen_last_fqs_check - qhimark)
rdp->qlen_last_fqs_check = count; rdp->qlen_last_fqs_check = count;
...@@ -2898,10 +2896,10 @@ static void __call_rcu_core(struct rcu_data *rdp, struct rcu_head *head, ...@@ -2898,10 +2896,10 @@ static void __call_rcu_core(struct rcu_data *rdp, struct rcu_head *head,
} else { } else {
/* Give the grace period a kick. */ /* Give the grace period a kick. */
rdp->blimit = DEFAULT_MAX_RCU_BLIMIT; rdp->blimit = DEFAULT_MAX_RCU_BLIMIT;
if (rcu_state.n_force_qs == rdp->n_force_qs_snap && if (READ_ONCE(rcu_state.n_force_qs) == rdp->n_force_qs_snap &&
rcu_segcblist_first_pend_cb(&rdp->cblist) != head) rcu_segcblist_first_pend_cb(&rdp->cblist) != head)
rcu_force_quiescent_state(); rcu_force_quiescent_state();
rdp->n_force_qs_snap = rcu_state.n_force_qs; rdp->n_force_qs_snap = READ_ONCE(rcu_state.n_force_qs);
rdp->qlen_last_fqs_check = rcu_segcblist_n_cbs(&rdp->cblist); rdp->qlen_last_fqs_check = rcu_segcblist_n_cbs(&rdp->cblist);
} }
} }
...@@ -4128,10 +4126,9 @@ int rcutree_prepare_cpu(unsigned int cpu) ...@@ -4128,10 +4126,9 @@ int rcutree_prepare_cpu(unsigned int cpu)
/* Set up local state, ensuring consistent view of global state. */ /* Set up local state, ensuring consistent view of global state. */
raw_spin_lock_irqsave_rcu_node(rnp, flags); raw_spin_lock_irqsave_rcu_node(rnp, flags);
rdp->qlen_last_fqs_check = 0; rdp->qlen_last_fqs_check = 0;
rdp->n_force_qs_snap = rcu_state.n_force_qs; rdp->n_force_qs_snap = READ_ONCE(rcu_state.n_force_qs);
rdp->blimit = blimit; rdp->blimit = blimit;
rdp->dynticks_nesting = 1; /* CPU not up, no tearing. */ rdp->dynticks_nesting = 1; /* CPU not up, no tearing. */
rcu_dynticks_eqs_online();
raw_spin_unlock_rcu_node(rnp); /* irqs remain disabled. */ raw_spin_unlock_rcu_node(rnp); /* irqs remain disabled. */
/* /*
...@@ -4251,6 +4248,7 @@ void rcu_cpu_starting(unsigned int cpu) ...@@ -4251,6 +4248,7 @@ void rcu_cpu_starting(unsigned int cpu)
mask = rdp->grpmask; mask = rdp->grpmask;
WRITE_ONCE(rnp->ofl_seq, rnp->ofl_seq + 1); WRITE_ONCE(rnp->ofl_seq, rnp->ofl_seq + 1);
WARN_ON_ONCE(!(rnp->ofl_seq & 0x1)); WARN_ON_ONCE(!(rnp->ofl_seq & 0x1));
rcu_dynticks_eqs_online();
smp_mb(); // Pair with rcu_gp_cleanup()'s ->ofl_seq barrier(). smp_mb(); // Pair with rcu_gp_cleanup()'s ->ofl_seq barrier().
raw_spin_lock_irqsave_rcu_node(rnp, flags); raw_spin_lock_irqsave_rcu_node(rnp, flags);
WRITE_ONCE(rnp->qsmaskinitnext, rnp->qsmaskinitnext | mask); WRITE_ONCE(rnp->qsmaskinitnext, rnp->qsmaskinitnext | mask);
...@@ -4296,9 +4294,7 @@ void rcu_report_dead(unsigned int cpu) ...@@ -4296,9 +4294,7 @@ void rcu_report_dead(unsigned int cpu)
do_nocb_deferred_wakeup(rdp); do_nocb_deferred_wakeup(rdp);
/* QS for any half-done expedited grace period. */ /* QS for any half-done expedited grace period. */
preempt_disable(); rcu_report_exp_rdp(rdp);
rcu_report_exp_rdp(this_cpu_ptr(&rcu_data));
preempt_enable();
rcu_preempt_deferred_qs(current); rcu_preempt_deferred_qs(current);
/* Remove outgoing CPU from mask in the leaf rcu_node structure. */ /* Remove outgoing CPU from mask in the leaf rcu_node structure. */
......
...@@ -512,7 +512,6 @@ static void synchronize_rcu_expedited_wait(void) ...@@ -512,7 +512,6 @@ static void synchronize_rcu_expedited_wait(void)
j = READ_ONCE(jiffies_till_first_fqs); j = READ_ONCE(jiffies_till_first_fqs);
if (synchronize_rcu_expedited_wait_once(j + HZ)) if (synchronize_rcu_expedited_wait_once(j + HZ))
return; return;
WARN_ON_ONCE(IS_ENABLED(CONFIG_PREEMPT_RT));
} }
for (;;) { for (;;) {
...@@ -760,7 +759,7 @@ static void sync_sched_exp_online_cleanup(int cpu) ...@@ -760,7 +759,7 @@ static void sync_sched_exp_online_cleanup(int cpu)
my_cpu = get_cpu(); my_cpu = get_cpu();
/* Quiescent state either not needed or already requested, leave. */ /* Quiescent state either not needed or already requested, leave. */
if (!(READ_ONCE(rnp->expmask) & rdp->grpmask) || if (!(READ_ONCE(rnp->expmask) & rdp->grpmask) ||
__this_cpu_read(rcu_data.cpu_no_qs.b.exp)) { rdp->cpu_no_qs.b.exp) {
put_cpu(); put_cpu();
return; return;
} }
......
...@@ -549,7 +549,6 @@ static void __call_rcu_nocb_wake(struct rcu_data *rdp, bool was_alldone, ...@@ -549,7 +549,6 @@ static void __call_rcu_nocb_wake(struct rcu_data *rdp, bool was_alldone,
rcu_nocb_unlock_irqrestore(rdp, flags); rcu_nocb_unlock_irqrestore(rdp, flags);
trace_rcu_nocb_wake(rcu_state.name, rdp->cpu, TPS("WakeNot")); trace_rcu_nocb_wake(rcu_state.name, rdp->cpu, TPS("WakeNot"));
} }
return;
} }
/* /*
...@@ -767,6 +766,7 @@ static int rcu_nocb_gp_kthread(void *arg) ...@@ -767,6 +766,7 @@ static int rcu_nocb_gp_kthread(void *arg)
static inline bool nocb_cb_can_run(struct rcu_data *rdp) static inline bool nocb_cb_can_run(struct rcu_data *rdp)
{ {
u8 flags = SEGCBLIST_OFFLOADED | SEGCBLIST_KTHREAD_CB; u8 flags = SEGCBLIST_OFFLOADED | SEGCBLIST_KTHREAD_CB;
return rcu_segcblist_test_flags(&rdp->cblist, flags); return rcu_segcblist_test_flags(&rdp->cblist, flags);
} }
......
...@@ -814,8 +814,7 @@ void rcu_read_unlock_strict(void) ...@@ -814,8 +814,7 @@ void rcu_read_unlock_strict(void)
{ {
struct rcu_data *rdp; struct rcu_data *rdp;
if (!IS_ENABLED(CONFIG_RCU_STRICT_GRACE_PERIOD) || if (irqs_disabled() || preempt_count() || !rcu_state.gp_kthread)
irqs_disabled() || preempt_count() || !rcu_state.gp_kthread)
return; return;
rdp = this_cpu_ptr(&rcu_data); rdp = this_cpu_ptr(&rcu_data);
rcu_report_qs_rdp(rdp); rcu_report_qs_rdp(rdp);
...@@ -1480,7 +1479,7 @@ static void rcu_bind_gp_kthread(void) ...@@ -1480,7 +1479,7 @@ static void rcu_bind_gp_kthread(void)
} }
/* Record the current task on dyntick-idle entry. */ /* Record the current task on dyntick-idle entry. */
static void noinstr rcu_dynticks_task_enter(void) static __always_inline void rcu_dynticks_task_enter(void)
{ {
#if defined(CONFIG_TASKS_RCU) && defined(CONFIG_NO_HZ_FULL) #if defined(CONFIG_TASKS_RCU) && defined(CONFIG_NO_HZ_FULL)
WRITE_ONCE(current->rcu_tasks_idle_cpu, smp_processor_id()); WRITE_ONCE(current->rcu_tasks_idle_cpu, smp_processor_id());
...@@ -1488,7 +1487,7 @@ static void noinstr rcu_dynticks_task_enter(void) ...@@ -1488,7 +1487,7 @@ static void noinstr rcu_dynticks_task_enter(void)
} }
/* Record no current task on dyntick-idle exit. */ /* Record no current task on dyntick-idle exit. */
static void noinstr rcu_dynticks_task_exit(void) static __always_inline void rcu_dynticks_task_exit(void)
{ {
#if defined(CONFIG_TASKS_RCU) && defined(CONFIG_NO_HZ_FULL) #if defined(CONFIG_TASKS_RCU) && defined(CONFIG_NO_HZ_FULL)
WRITE_ONCE(current->rcu_tasks_idle_cpu, -1); WRITE_ONCE(current->rcu_tasks_idle_cpu, -1);
...@@ -1496,7 +1495,7 @@ static void noinstr rcu_dynticks_task_exit(void) ...@@ -1496,7 +1495,7 @@ static void noinstr rcu_dynticks_task_exit(void)
} }
/* Turn on heavyweight RCU tasks trace readers on idle/user entry. */ /* Turn on heavyweight RCU tasks trace readers on idle/user entry. */
static void rcu_dynticks_task_trace_enter(void) static __always_inline void rcu_dynticks_task_trace_enter(void)
{ {
#ifdef CONFIG_TASKS_TRACE_RCU #ifdef CONFIG_TASKS_TRACE_RCU
if (IS_ENABLED(CONFIG_TASKS_TRACE_RCU_READ_MB)) if (IS_ENABLED(CONFIG_TASKS_TRACE_RCU_READ_MB))
...@@ -1505,7 +1504,7 @@ static void rcu_dynticks_task_trace_enter(void) ...@@ -1505,7 +1504,7 @@ static void rcu_dynticks_task_trace_enter(void)
} }
/* Turn off heavyweight RCU tasks trace readers on idle/user exit. */ /* Turn off heavyweight RCU tasks trace readers on idle/user exit. */
static void rcu_dynticks_task_trace_exit(void) static __always_inline void rcu_dynticks_task_trace_exit(void)
{ {
#ifdef CONFIG_TASKS_TRACE_RCU #ifdef CONFIG_TASKS_TRACE_RCU
if (IS_ENABLED(CONFIG_TASKS_TRACE_RCU_READ_MB)) if (IS_ENABLED(CONFIG_TASKS_TRACE_RCU_READ_MB))
......
...@@ -54,11 +54,11 @@ ...@@ -54,11 +54,11 @@
#define MODULE_PARAM_PREFIX "rcupdate." #define MODULE_PARAM_PREFIX "rcupdate."
#ifndef CONFIG_TINY_RCU #ifndef CONFIG_TINY_RCU
module_param(rcu_expedited, int, 0); module_param(rcu_expedited, int, 0444);
module_param(rcu_normal, int, 0); module_param(rcu_normal, int, 0444);
static int rcu_normal_after_boot = IS_ENABLED(CONFIG_PREEMPT_RT); static int rcu_normal_after_boot = IS_ENABLED(CONFIG_PREEMPT_RT);
#ifndef CONFIG_PREEMPT_RT #if !defined(CONFIG_PREEMPT_RT) || defined(CONFIG_NO_HZ_FULL)
module_param(rcu_normal_after_boot, int, 0); module_param(rcu_normal_after_boot, int, 0444);
#endif #endif
#endif /* #ifndef CONFIG_TINY_RCU */ #endif /* #ifndef CONFIG_TINY_RCU */
......
...@@ -341,6 +341,7 @@ static void scftorture_invoke_one(struct scf_statistics *scfp, struct torture_ra ...@@ -341,6 +341,7 @@ static void scftorture_invoke_one(struct scf_statistics *scfp, struct torture_ra
cpu = torture_random(trsp) % nr_cpu_ids; cpu = torture_random(trsp) % nr_cpu_ids;
scfp->n_resched++; scfp->n_resched++;
resched_cpu(cpu); resched_cpu(cpu);
this_cpu_inc(scf_invoked_count);
} }
break; break;
case SCF_PRIM_SINGLE: case SCF_PRIM_SINGLE:
...@@ -553,18 +554,18 @@ static int __init scf_torture_init(void) ...@@ -553,18 +554,18 @@ static int __init scf_torture_init(void)
scftorture_print_module_parms("Start of test"); scftorture_print_module_parms("Start of test");
if (weight_resched == -1 && if (weight_resched <= 0 &&
weight_single == -1 && weight_single_rpc == -1 && weight_single_wait == -1 && weight_single <= 0 && weight_single_rpc <= 0 && weight_single_wait <= 0 &&
weight_many == -1 && weight_many_wait == -1 && weight_many <= 0 && weight_many_wait <= 0 &&
weight_all == -1 && weight_all_wait == -1) { weight_all <= 0 && weight_all_wait <= 0) {
weight_resched1 = 2 * nr_cpu_ids; weight_resched1 = weight_resched == 0 ? 0 : 2 * nr_cpu_ids;
weight_single1 = 2 * nr_cpu_ids; weight_single1 = weight_single == 0 ? 0 : 2 * nr_cpu_ids;
weight_single_rpc1 = 2 * nr_cpu_ids; weight_single_rpc1 = weight_single_rpc == 0 ? 0 : 2 * nr_cpu_ids;
weight_single_wait1 = 2 * nr_cpu_ids; weight_single_wait1 = weight_single_wait == 0 ? 0 : 2 * nr_cpu_ids;
weight_many1 = 2; weight_many1 = weight_many == 0 ? 0 : 2;
weight_many_wait1 = 2; weight_many_wait1 = weight_many_wait == 0 ? 0 : 2;
weight_all1 = 1; weight_all1 = weight_all == 0 ? 0 : 1;
weight_all_wait1 = 1; weight_all_wait1 = weight_all_wait == 0 ? 0 : 1;
} else { } else {
if (weight_resched == -1) if (weight_resched == -1)
weight_resched1 = 0; weight_resched1 = 0;
...@@ -583,8 +584,8 @@ static int __init scf_torture_init(void) ...@@ -583,8 +584,8 @@ static int __init scf_torture_init(void)
if (weight_all_wait == -1) if (weight_all_wait == -1)
weight_all_wait1 = 0; weight_all_wait1 = 0;
} }
if (weight_single1 == 0 && weight_single_rpc1 == 0 && weight_single_wait1 == 0 && if (weight_resched1 == 0 && weight_single1 == 0 && weight_single_rpc1 == 0 &&
weight_many1 == 0 && weight_many_wait1 == 0 && weight_single_wait1 == 0 && weight_many1 == 0 && weight_many_wait1 == 0 &&
weight_all1 == 0 && weight_all_wait1 == 0) { weight_all1 == 0 && weight_all_wait1 == 0) {
VERBOSE_SCFTORTOUT_ERRSTRING("all zero weights makes no sense"); VERBOSE_SCFTORTOUT_ERRSTRING("all zero weights makes no sense");
firsterr = -EINVAL; firsterr = -EINVAL;
...@@ -605,17 +606,17 @@ static int __init scf_torture_init(void) ...@@ -605,17 +606,17 @@ static int __init scf_torture_init(void)
if (onoff_interval > 0) { if (onoff_interval > 0) {
firsterr = torture_onoff_init(onoff_holdoff * HZ, onoff_interval, NULL); firsterr = torture_onoff_init(onoff_holdoff * HZ, onoff_interval, NULL);
if (firsterr) if (torture_init_error(firsterr))
goto unwind; goto unwind;
} }
if (shutdown_secs > 0) { if (shutdown_secs > 0) {
firsterr = torture_shutdown_init(shutdown_secs, scf_torture_cleanup); firsterr = torture_shutdown_init(shutdown_secs, scf_torture_cleanup);
if (firsterr) if (torture_init_error(firsterr))
goto unwind; goto unwind;
} }
if (stutter > 0) { if (stutter > 0) {
firsterr = torture_stutter_init(stutter, stutter); firsterr = torture_stutter_init(stutter, stutter);
if (firsterr) if (torture_init_error(firsterr))
goto unwind; goto unwind;
} }
...@@ -636,12 +637,12 @@ static int __init scf_torture_init(void) ...@@ -636,12 +637,12 @@ static int __init scf_torture_init(void)
scf_stats_p[i].cpu = i; scf_stats_p[i].cpu = i;
firsterr = torture_create_kthread(scftorture_invoker, (void *)&scf_stats_p[i], firsterr = torture_create_kthread(scftorture_invoker, (void *)&scf_stats_p[i],
scf_stats_p[i].task); scf_stats_p[i].task);
if (firsterr) if (torture_init_error(firsterr))
goto unwind; goto unwind;
} }
if (stat_interval > 0) { if (stat_interval > 0) {
firsterr = torture_create_kthread(scf_torture_stats, NULL, scf_torture_stats_task); firsterr = torture_create_kthread(scf_torture_stats, NULL, scf_torture_stats_task);
if (firsterr) if (torture_init_error(firsterr))
goto unwind; goto unwind;
} }
...@@ -651,6 +652,10 @@ static int __init scf_torture_init(void) ...@@ -651,6 +652,10 @@ static int __init scf_torture_init(void)
unwind: unwind:
torture_init_end(); torture_init_end();
scf_torture_cleanup(); scf_torture_cleanup();
if (shutdown_secs) {
WARN_ON(!IS_MODULE(CONFIG_SCF_TORTURE_TEST));
kernel_power_off();
}
return firsterr; return firsterr;
} }
......
#!/bin/sh
# SPDX-License-Identifier: GPL-2.0+
#
# Extract any RCU CPU stall warnings present in specified file.
# Filter out clocksource lines. Note that preceding-lines excludes the
# initial line of the stall warning but trailing-lines includes it.
#
# Usage: extract-stall.sh dmesg-file [ preceding-lines [ trailing-lines ] ]
echo $1
preceding_lines="${2-3}"
trailing_lines="${3-10}"
awk -v preceding_lines="$preceding_lines" -v trailing_lines="$trailing_lines" '
suffix <= 0 {
for (i = preceding_lines; i > 0; i--)
last[i] = last[i - 1];
last[0] = $0;
}
suffix > 0 {
print $0;
suffix--;
if (suffix <= 0)
print "";
}
suffix <= 0 && /detected stall/ {
for (i = preceding_lines; i >= 0; i--)
if (last[i] != "")
print last[i];
suffix = trailing_lines;
}' < "$1" | tr -d '\015' | grep -v clocksource
...@@ -149,6 +149,7 @@ do ...@@ -149,6 +149,7 @@ do
done done
# Download and expand the tarball on all systems. # Download and expand the tarball on all systems.
echo Build-products tarball: `du -h $T/binres.tgz` | tee -a "$oldrun/remote-log"
for i in $systems for i in $systems
do do
echo Downloading tarball to $i `date` | tee -a "$oldrun/remote-log" echo Downloading tarball to $i `date` | tee -a "$oldrun/remote-log"
......
...@@ -184,7 +184,7 @@ do ...@@ -184,7 +184,7 @@ do
TORTURE_KCONFIG_KASAN_ARG="CONFIG_DEBUG_INFO=y CONFIG_KASAN=y"; export TORTURE_KCONFIG_KASAN_ARG TORTURE_KCONFIG_KASAN_ARG="CONFIG_DEBUG_INFO=y CONFIG_KASAN=y"; export TORTURE_KCONFIG_KASAN_ARG
;; ;;
--kcsan) --kcsan)
TORTURE_KCONFIG_KCSAN_ARG="CONFIG_DEBUG_INFO=y CONFIG_KCSAN=y CONFIG_KCSAN_ASSUME_PLAIN_WRITES_ATOMIC=n CONFIG_KCSAN_REPORT_VALUE_CHANGE_ONLY=n CONFIG_KCSAN_REPORT_ONCE_IN_MS=100000 CONFIG_KCSAN_INTERRUPT_WATCHER=y CONFIG_KCSAN_VERBOSE=y CONFIG_DEBUG_LOCK_ALLOC=y CONFIG_PROVE_LOCKING=y"; export TORTURE_KCONFIG_KCSAN_ARG TORTURE_KCONFIG_KCSAN_ARG="CONFIG_DEBUG_INFO=y CONFIG_KCSAN=y CONFIG_KCSAN_STRICT=y CONFIG_KCSAN_REPORT_ONCE_IN_MS=100000 CONFIG_KCSAN_VERBOSE=y CONFIG_DEBUG_LOCK_ALLOC=y CONFIG_PROVE_LOCKING=y"; export TORTURE_KCONFIG_KCSAN_ARG
;; ;;
--kmake-arg|--kmake-args) --kmake-arg|--kmake-args)
checkarg --kmake-arg "(kernel make arguments)" $# "$2" '.*' '^error$' checkarg --kmake-arg "(kernel make arguments)" $# "$2" '.*' '^error$'
......
...@@ -351,7 +351,7 @@ fi ...@@ -351,7 +351,7 @@ fi
if test "$do_scftorture" = "yes" if test "$do_scftorture" = "yes"
then then
torture_bootargs="scftorture.nthreads=$HALF_ALLOTED_CPUS torture.disable_onoff_at_boot" torture_bootargs="scftorture.nthreads=$HALF_ALLOTED_CPUS torture.disable_onoff_at_boot"
torture_set "scftorture" tools/testing/selftests/rcutorture/bin/kvm.sh --torture scf --allcpus --duration "$duration_scftorture" --configs "$configs_scftorture" --kconfig "CONFIG_NR_CPUS=$HALF_ALLOTED_CPUS" --trust-make torture_set "scftorture" tools/testing/selftests/rcutorture/bin/kvm.sh --torture scf --allcpus --duration "$duration_scftorture" --configs "$configs_scftorture" --kconfig "CONFIG_NR_CPUS=$HALF_ALLOTED_CPUS" --memory 1G --trust-make
fi fi
if test "$do_refscale" = yes if test "$do_refscale" = yes
...@@ -434,7 +434,12 @@ then ...@@ -434,7 +434,12 @@ then
batchno=1 batchno=1
if test -s $T/xz-todo if test -s $T/xz-todo
then then
echo Size before compressing: `du -sh $tdir | awk '{ print $1 }'` `date` 2>&1 | tee -a "$tdir/log-xz" | tee -a $T/log for i in `cat $T/xz-todo`
do
find $i -name 'vmlinux*' -print
done | wc -l | awk '{ print $1 }' > $T/xz-todo-count
n2compress="`cat $T/xz-todo-count`"
echo Size before compressing $n2compress files: `du -sh $tdir | awk '{ print $1 }'` `date` 2>&1 | tee -a "$tdir/log-xz" | tee -a $T/log
for i in `cat $T/xz-todo` for i in `cat $T/xz-todo`
do do
echo Compressing vmlinux files in ${i}: `date` >> "$tdir/log-xz" 2>&1 echo Compressing vmlinux files in ${i}: `date` >> "$tdir/log-xz" 2>&1
...@@ -456,7 +461,7 @@ then ...@@ -456,7 +461,7 @@ then
echo Waiting for final batch $batchno of $ncompresses compressions `date` | tee -a "$tdir/log-xz" | tee -a $T/log echo Waiting for final batch $batchno of $ncompresses compressions `date` | tee -a "$tdir/log-xz" | tee -a $T/log
fi fi
wait wait
echo Size after compressing: `du -sh $tdir | awk '{ print $1 }'` `date` 2>&1 | tee -a "$tdir/log-xz" | tee -a $T/log echo Size after compressing $n2compress files: `du -sh $tdir | awk '{ print $1 }'` `date` 2>&1 | tee -a "$tdir/log-xz" | tee -a $T/log
echo Total duration `get_starttime_duration $starttime`. | tee -a $T/log echo Total duration `get_starttime_duration $starttime`. | tee -a $T/log
else else
echo No compression needed: `date` >> "$tdir/log-xz" 2>&1 echo No compression needed: `date` >> "$tdir/log-xz" 2>&1
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment