Commit 90450a06 authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'rcu-fixes-v6.7' of git://git.kernel.org/pub/scm/linux/kernel/git/frederic/linux-dynticks

Pull RCU fixes from Frederic Weisbecker:

 - Fix a lock inversion between scheduler and RCU introduced in
   v6.2-rc4. The scenario could trigger on any user of RCU_NOCB
   (mostly Android but also nohz_full)

 - Fix PF_IDLE semantic changes introduced in v6.6-rc3 breaking
   some RCU-Tasks and RCU-Tasks-Trace expectations as to what
   exactly is an idle task. This resulted in potential spurious
   stalls and warnings.

* tag 'rcu-fixes-v6.7' of git://git.kernel.org/pub/scm/linux/kernel/git/frederic/linux-dynticks:
  rcu/tasks-trace: Handle new PF_IDLE semantics
  rcu/tasks: Handle new PF_IDLE semantics
  rcu: Introduce rcu_cpu_online()
  rcu: Break rcu_node_0 --> &rq->__lock order
parents 447cec03 a80712b9
......@@ -500,6 +500,7 @@ static inline void rcu_expedite_gp(void) { }
static inline void rcu_unexpedite_gp(void) { }
static inline void rcu_async_hurry(void) { }
static inline void rcu_async_relax(void) { }
static inline bool rcu_cpu_online(int cpu) { return true; }
#else /* #ifdef CONFIG_TINY_RCU */
bool rcu_gp_is_normal(void); /* Internal RCU use. */
bool rcu_gp_is_expedited(void); /* Internal RCU use. */
......@@ -509,6 +510,7 @@ void rcu_unexpedite_gp(void);
void rcu_async_hurry(void);
void rcu_async_relax(void);
void rcupdate_announce_bootup_oddness(void);
bool rcu_cpu_online(int cpu);
#ifdef CONFIG_TASKS_RCU_GENERIC
void show_rcu_tasks_gp_kthreads(void);
#else /* #ifdef CONFIG_TASKS_RCU_GENERIC */
......
......@@ -895,10 +895,36 @@ static void rcu_tasks_pregp_step(struct list_head *hop)
synchronize_rcu();
}
/* Check for quiescent states since the pregp's synchronize_rcu() */
static bool rcu_tasks_is_holdout(struct task_struct *t)
{
int cpu;
/* Has the task been seen voluntarily sleeping? */
if (!READ_ONCE(t->on_rq))
return false;
/*
* Idle tasks (or idle injection) within the idle loop are RCU-tasks
* quiescent states. But CPU boot code performed by the idle task
* isn't a quiescent state.
*/
if (is_idle_task(t))
return false;
cpu = task_cpu(t);
/* Idle tasks on offline CPUs are RCU-tasks quiescent states. */
if (t == idle_task(cpu) && !rcu_cpu_online(cpu))
return false;
return true;
}
/* Per-task initial processing. */
static void rcu_tasks_pertask(struct task_struct *t, struct list_head *hop)
{
if (t != current && READ_ONCE(t->on_rq) && !is_idle_task(t)) {
if (t != current && rcu_tasks_is_holdout(t)) {
get_task_struct(t);
t->rcu_tasks_nvcsw = READ_ONCE(t->nvcsw);
WRITE_ONCE(t->rcu_tasks_holdout, true);
......@@ -947,7 +973,7 @@ static void check_holdout_task(struct task_struct *t,
if (!READ_ONCE(t->rcu_tasks_holdout) ||
t->rcu_tasks_nvcsw != READ_ONCE(t->nvcsw) ||
!READ_ONCE(t->on_rq) ||
!rcu_tasks_is_holdout(t) ||
(IS_ENABLED(CONFIG_NO_HZ_FULL) &&
!is_idle_task(t) && t->rcu_tasks_idle_cpu >= 0)) {
WRITE_ONCE(t->rcu_tasks_holdout, false);
......@@ -1525,7 +1551,7 @@ static int trc_inspect_reader(struct task_struct *t, void *bhp_in)
} else {
// The task is not running, so C-language access is safe.
nesting = t->trc_reader_nesting;
WARN_ON_ONCE(ofl && task_curr(t) && !is_idle_task(t));
WARN_ON_ONCE(ofl && task_curr(t) && (t != idle_task(task_cpu(t))));
if (IS_ENABLED(CONFIG_TASKS_TRACE_RCU_READ_MB) && ofl)
n_heavy_reader_ofl_updates++;
}
......
......@@ -755,14 +755,19 @@ static int dyntick_save_progress_counter(struct rcu_data *rdp)
}
/*
* Return true if the specified CPU has passed through a quiescent
* state by virtue of being in or having passed through an dynticks
* idle state since the last call to dyntick_save_progress_counter()
* for this same CPU, or by virtue of having been offline.
* Returns positive if the specified CPU has passed through a quiescent state
* by virtue of being in or having passed through an dynticks idle state since
* the last call to dyntick_save_progress_counter() for this same CPU, or by
* virtue of having been offline.
*
* Returns negative if the specified CPU needs a force resched.
*
* Returns zero otherwise.
*/
static int rcu_implicit_dynticks_qs(struct rcu_data *rdp)
{
unsigned long jtsq;
int ret = 0;
struct rcu_node *rnp = rdp->mynode;
/*
......@@ -848,8 +853,8 @@ static int rcu_implicit_dynticks_qs(struct rcu_data *rdp)
(time_after(jiffies, READ_ONCE(rdp->last_fqs_resched) + jtsq * 3) ||
rcu_state.cbovld)) {
WRITE_ONCE(rdp->rcu_urgent_qs, true);
resched_cpu(rdp->cpu);
WRITE_ONCE(rdp->last_fqs_resched, jiffies);
ret = -1;
}
/*
......@@ -862,8 +867,8 @@ static int rcu_implicit_dynticks_qs(struct rcu_data *rdp)
if (time_after(jiffies, rcu_state.jiffies_resched)) {
if (time_after(jiffies,
READ_ONCE(rdp->last_fqs_resched) + jtsq)) {
resched_cpu(rdp->cpu);
WRITE_ONCE(rdp->last_fqs_resched, jiffies);
ret = -1;
}
if (IS_ENABLED(CONFIG_IRQ_WORK) &&
!rdp->rcu_iw_pending && rdp->rcu_iw_gp_seq != rnp->gp_seq &&
......@@ -892,7 +897,7 @@ static int rcu_implicit_dynticks_qs(struct rcu_data *rdp)
}
}
return 0;
return ret;
}
/* Trace-event wrapper function for trace_rcu_future_grace_period. */
......@@ -2271,15 +2276,15 @@ static void force_qs_rnp(int (*f)(struct rcu_data *rdp))
{
int cpu;
unsigned long flags;
unsigned long mask;
struct rcu_data *rdp;
struct rcu_node *rnp;
rcu_state.cbovld = rcu_state.cbovldnext;
rcu_state.cbovldnext = false;
rcu_for_each_leaf_node(rnp) {
unsigned long mask = 0;
unsigned long rsmask = 0;
cond_resched_tasks_rcu_qs();
mask = 0;
raw_spin_lock_irqsave_rcu_node(rnp, flags);
rcu_state.cbovldnext |= !!rnp->cbovldmask;
if (rnp->qsmask == 0) {
......@@ -2297,11 +2302,17 @@ static void force_qs_rnp(int (*f)(struct rcu_data *rdp))
continue;
}
for_each_leaf_node_cpu_mask(rnp, cpu, rnp->qsmask) {
struct rcu_data *rdp;
int ret;
rdp = per_cpu_ptr(&rcu_data, cpu);
if (f(rdp)) {
ret = f(rdp);
if (ret > 0) {
mask |= rdp->grpmask;
rcu_disable_urgency_upon_qs(rdp);
}
if (ret < 0)
rsmask |= rdp->grpmask;
}
if (mask != 0) {
/* Idle/offline CPUs, report (releases rnp->lock). */
......@@ -2310,6 +2321,9 @@ static void force_qs_rnp(int (*f)(struct rcu_data *rdp))
/* Nothing to do here, so just drop the lock. */
raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
}
for_each_leaf_node_cpu_mask(rnp, cpu, rsmask)
resched_cpu(cpu);
}
}
......@@ -4195,6 +4209,13 @@ static bool rcu_rdp_cpu_online(struct rcu_data *rdp)
return !!(rdp->grpmask & rcu_rnp_online_cpus(rdp->mynode));
}
bool rcu_cpu_online(int cpu)
{
struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu);
return rcu_rdp_cpu_online(rdp);
}
#if defined(CONFIG_PROVE_RCU) && defined(CONFIG_HOTPLUG_CPU)
/*
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment