Commit 81b4a7bc authored by Paul E. McKenney's avatar Paul E. McKenney

rcu-tasks: Disable CPU hotplug across RCU tasks trace scans

This commit disables CPU hotplug across RCU tasks trace scans, which
is a first step towards correctly recognizing idle tasks "running" on
offline CPUs.
Signed-off-by: default avatarPaul E. McKenney <paulmck@kernel.org>
parent b38f57c1
...@@ -910,16 +910,16 @@ static void rcu_tasks_trace_pregp_step(void) ...@@ -910,16 +910,16 @@ static void rcu_tasks_trace_pregp_step(void)
{ {
int cpu; int cpu;
// Wait for CPU-hotplug paths to complete.
cpus_read_lock();
cpus_read_unlock();
// Allow for fast-acting IPIs. // Allow for fast-acting IPIs.
atomic_set(&trc_n_readers_need_end, 1); atomic_set(&trc_n_readers_need_end, 1);
// There shouldn't be any old IPIs, but... // There shouldn't be any old IPIs, but...
for_each_possible_cpu(cpu) for_each_possible_cpu(cpu)
WARN_ON_ONCE(per_cpu(trc_ipi_to_cpu, cpu)); WARN_ON_ONCE(per_cpu(trc_ipi_to_cpu, cpu));
// Disable CPU hotplug across the tasklist scan.
// This also waits for all readers in CPU-hotplug code paths.
cpus_read_lock();
} }
/* Do first-round processing for the specified task. */ /* Do first-round processing for the specified task. */
...@@ -935,6 +935,9 @@ static void rcu_tasks_trace_pertask(struct task_struct *t, ...@@ -935,6 +935,9 @@ static void rcu_tasks_trace_pertask(struct task_struct *t,
/* Do intermediate processing between task and holdout scans. */ /* Do intermediate processing between task and holdout scans. */
static void rcu_tasks_trace_postscan(void) static void rcu_tasks_trace_postscan(void)
{ {
// Re-enable CPU hotplug now that the tasklist scan has completed.
cpus_read_unlock();
// Wait for late-stage exiting tasks to finish exiting. // Wait for late-stage exiting tasks to finish exiting.
// These might have passed the call to exit_tasks_rcu_finish(). // These might have passed the call to exit_tasks_rcu_finish().
synchronize_rcu(); synchronize_rcu();
...@@ -979,6 +982,9 @@ static void check_all_holdout_tasks_trace(struct list_head *hop, ...@@ -979,6 +982,9 @@ static void check_all_holdout_tasks_trace(struct list_head *hop,
{ {
struct task_struct *g, *t; struct task_struct *g, *t;
// Disable CPU hotplug across the holdout list scan.
cpus_read_lock();
list_for_each_entry_safe(t, g, hop, trc_holdout_list) { list_for_each_entry_safe(t, g, hop, trc_holdout_list) {
// If safe and needed, try to check the current task. // If safe and needed, try to check the current task.
if (READ_ONCE(t->trc_ipi_to_cpu) == -1 && if (READ_ONCE(t->trc_ipi_to_cpu) == -1 &&
...@@ -991,6 +997,10 @@ static void check_all_holdout_tasks_trace(struct list_head *hop, ...@@ -991,6 +997,10 @@ static void check_all_holdout_tasks_trace(struct list_head *hop,
else if (needreport) else if (needreport)
show_stalled_task_trace(t, firstreport); show_stalled_task_trace(t, firstreport);
} }
// Re-enable CPU hotplug now that the holdout list scan has completed.
cpus_read_unlock();
if (needreport) { if (needreport) {
if (firstreport) if (firstreport)
pr_err("INFO: rcu_tasks_trace detected stalls? (Late IPI?)\n"); pr_err("INFO: rcu_tasks_trace detected stalls? (Late IPI?)\n");
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment