Commit fced9c8c authored by Paul E. McKenney's avatar Paul E. McKenney

rcu: Avoid resched_cpu() when rescheduling the current CPU

The resched_cpu() interface is quite handy, but it does acquire the
specified CPU's runqueue lock, which does not come for free.  This
commit therefore substitutes the following when directing resched_cpu()
at the current CPU:

	set_tsk_need_resched(current);
	set_preempt_need_resched();
Signed-off-by: default avatarPaul E. McKenney <paulmck@linux.vnet.ibm.com>
Cc: Peter Zijlstra <peterz@infradead.org>
parent d3052109
...@@ -1354,7 +1354,8 @@ static void print_cpu_stall(void) ...@@ -1354,7 +1354,8 @@ static void print_cpu_stall(void)
* progress and it could be we're stuck in kernel space without context * progress and it could be we're stuck in kernel space without context
* switches for an entirely unreasonable amount of time. * switches for an entirely unreasonable amount of time.
*/ */
resched_cpu(smp_processor_id()); set_tsk_need_resched(current);
set_preempt_need_resched();
} }
static void check_cpu_stall(struct rcu_data *rdp) static void check_cpu_stall(struct rcu_data *rdp)
...@@ -2675,10 +2676,12 @@ static __latent_entropy void rcu_process_callbacks(struct softirq_action *unused ...@@ -2675,10 +2676,12 @@ static __latent_entropy void rcu_process_callbacks(struct softirq_action *unused
WARN_ON_ONCE(!rdp->beenonline); WARN_ON_ONCE(!rdp->beenonline);
/* Report any deferred quiescent states if preemption enabled. */ /* Report any deferred quiescent states if preemption enabled. */
if (!(preempt_count() & PREEMPT_MASK)) if (!(preempt_count() & PREEMPT_MASK)) {
rcu_preempt_deferred_qs(current); rcu_preempt_deferred_qs(current);
else if (rcu_preempt_need_deferred_qs(current)) } else if (rcu_preempt_need_deferred_qs(current)) {
resched_cpu(rdp->cpu); /* Provoke future context switch. */ set_tsk_need_resched(current);
set_preempt_need_resched();
}
/* Update RCU state based on any recent quiescent states. */ /* Update RCU state based on any recent quiescent states. */
rcu_check_quiescent_state(rdp); rcu_check_quiescent_state(rdp);
......
...@@ -672,7 +672,8 @@ static void sync_rcu_exp_handler(void *unused) ...@@ -672,7 +672,8 @@ static void sync_rcu_exp_handler(void *unused)
rcu_report_exp_rdp(rdp); rcu_report_exp_rdp(rdp);
} else { } else {
rdp->deferred_qs = true; rdp->deferred_qs = true;
resched_cpu(rdp->cpu); set_tsk_need_resched(t);
set_preempt_need_resched();
} }
return; return;
} }
...@@ -710,15 +711,16 @@ static void sync_rcu_exp_handler(void *unused) ...@@ -710,15 +711,16 @@ static void sync_rcu_exp_handler(void *unused)
* because we are in an interrupt handler, which will cause that * because we are in an interrupt handler, which will cause that
* function to take an early exit without doing anything. * function to take an early exit without doing anything.
* *
* Otherwise, use resched_cpu() to force a context switch after * Otherwise, force a context switch after the CPU enables everything.
* the CPU enables everything.
*/ */
rdp->deferred_qs = true; rdp->deferred_qs = true;
if (!(preempt_count() & (PREEMPT_MASK | SOFTIRQ_MASK)) || if (!(preempt_count() & (PREEMPT_MASK | SOFTIRQ_MASK)) ||
WARN_ON_ONCE(rcu_dynticks_curr_cpu_in_eqs())) WARN_ON_ONCE(rcu_dynticks_curr_cpu_in_eqs())) {
rcu_preempt_deferred_qs(t); rcu_preempt_deferred_qs(t);
else } else {
resched_cpu(rdp->cpu); set_tsk_need_resched(t);
set_preempt_need_resched();
}
} }
/* PREEMPT=y, so no PREEMPT=n expedited grace period to clean up after. */ /* PREEMPT=y, so no PREEMPT=n expedited grace period to clean up after. */
...@@ -779,7 +781,8 @@ static void sync_sched_exp_handler(void *unused) ...@@ -779,7 +781,8 @@ static void sync_sched_exp_handler(void *unused)
__this_cpu_write(rcu_data.cpu_no_qs.b.exp, true); __this_cpu_write(rcu_data.cpu_no_qs.b.exp, true);
/* Store .exp before .rcu_urgent_qs. */ /* Store .exp before .rcu_urgent_qs. */
smp_store_release(this_cpu_ptr(&rcu_dynticks.rcu_urgent_qs), true); smp_store_release(this_cpu_ptr(&rcu_dynticks.rcu_urgent_qs), true);
resched_cpu(smp_processor_id()); set_tsk_need_resched(current);
set_preempt_need_resched();
} }
/* Send IPI for expedited cleanup if needed at end of CPU-hotplug operation. */ /* Send IPI for expedited cleanup if needed at end of CPU-hotplug operation. */
......
...@@ -791,8 +791,10 @@ static void rcu_flavor_check_callbacks(int user) ...@@ -791,8 +791,10 @@ static void rcu_flavor_check_callbacks(int user)
if (t->rcu_read_lock_nesting > 0 || if (t->rcu_read_lock_nesting > 0 ||
(preempt_count() & (PREEMPT_MASK | SOFTIRQ_MASK))) { (preempt_count() & (PREEMPT_MASK | SOFTIRQ_MASK))) {
/* No QS, force context switch if deferred. */ /* No QS, force context switch if deferred. */
if (rcu_preempt_need_deferred_qs(t)) if (rcu_preempt_need_deferred_qs(t)) {
resched_cpu(smp_processor_id()); set_tsk_need_resched(t);
set_preempt_need_resched();
}
} else if (rcu_preempt_need_deferred_qs(t)) { } else if (rcu_preempt_need_deferred_qs(t)) {
rcu_preempt_deferred_qs(t); /* Report deferred QS. */ rcu_preempt_deferred_qs(t); /* Report deferred QS. */
return; return;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment