Commit e015a341 authored by Paul E. McKenney's avatar Paul E. McKenney

rcu: Avoid self-IPI in sync_sched_exp_online_cleanup()

The sync_sched_exp_online_cleanup() is invoked at online time to handle
the case where the start of an expedited grace period ran concurrently
with a CPU being taken offline and then immediately being placed online.
It checks to see if RCU needs an expedited quiescent state from the
incoming CPU, sending it an IPI if so.  However, it is quite possible
that sync_sched_exp_online_cleanup() is running on that CPU, in which
case it is considerably less overhead to simply request the quiescent
state locally instead of simulating a self-IPI.

This commit therefore places the last few lines of rcu_exp_handler()
into a new rcu_exp_need_qs() function, which is invoked both by
rcu_exp_handler() and by sync_sched_exp_online_cleanup() in the self-IPI
case.

This also reduces the rcu_exp_handler() function's state space by
removing the direct call that this smp_call_function_single() uses to
emulate the requested self-IPI.  This in turn will allow tighter error
checking in rcu_is_cpu_rrupt_from_idle().
Signed-off-by: default avatarPaul E. McKenney <paulmck@linux.ibm.com>
Reviewed-by: default avatarJoel Fernandes (Google) <joel@joelfernandes.org>
parent b9ad4d6e
...@@ -699,6 +699,16 @@ static int rcu_print_task_exp_stall(struct rcu_node *rnp) ...@@ -699,6 +699,16 @@ static int rcu_print_task_exp_stall(struct rcu_node *rnp)
#else /* #ifdef CONFIG_PREEMPT_RCU */ #else /* #ifdef CONFIG_PREEMPT_RCU */
/* Request an expedited quiescent state. */
static void rcu_exp_need_qs(void)
{
__this_cpu_write(rcu_data.cpu_no_qs.b.exp, true);
/* Store .exp before .rcu_urgent_qs. */
smp_store_release(this_cpu_ptr(&rcu_data.rcu_urgent_qs), true);
set_tsk_need_resched(current);
set_preempt_need_resched();
}
/* Invoked on each online non-idle CPU for expedited quiescent state. */ /* Invoked on each online non-idle CPU for expedited quiescent state. */
static void rcu_exp_handler(void *unused) static void rcu_exp_handler(void *unused)
{ {
...@@ -714,25 +724,38 @@ static void rcu_exp_handler(void *unused) ...@@ -714,25 +724,38 @@ static void rcu_exp_handler(void *unused)
rcu_report_exp_rdp(this_cpu_ptr(&rcu_data)); rcu_report_exp_rdp(this_cpu_ptr(&rcu_data));
return; return;
} }
__this_cpu_write(rcu_data.cpu_no_qs.b.exp, true); rcu_exp_need_qs();
/* Store .exp before .rcu_urgent_qs. */
smp_store_release(this_cpu_ptr(&rcu_data.rcu_urgent_qs), true);
set_tsk_need_resched(current);
set_preempt_need_resched();
} }
/* Send IPI for expedited cleanup if needed at end of CPU-hotplug operation. */ /* Send IPI for expedited cleanup if needed at end of CPU-hotplug operation. */
static void sync_sched_exp_online_cleanup(int cpu) static void sync_sched_exp_online_cleanup(int cpu)
{ {
unsigned long flags;
int my_cpu;
struct rcu_data *rdp; struct rcu_data *rdp;
int ret; int ret;
struct rcu_node *rnp; struct rcu_node *rnp;
rdp = per_cpu_ptr(&rcu_data, cpu); rdp = per_cpu_ptr(&rcu_data, cpu);
rnp = rdp->mynode; rnp = rdp->mynode;
if (!(READ_ONCE(rnp->expmask) & rdp->grpmask)) my_cpu = get_cpu();
/* Quiescent state either not needed or already requested, leave. */
if (!(READ_ONCE(rnp->expmask) & rdp->grpmask) ||
__this_cpu_read(rcu_data.cpu_no_qs.b.exp)) {
put_cpu();
return; return;
}
/* Quiescent state needed on current CPU, so set it up locally. */
if (my_cpu == cpu) {
local_irq_save(flags);
rcu_exp_need_qs();
local_irq_restore(flags);
put_cpu();
return;
}
/* Quiescent state needed on some other CPU, send IPI. */
ret = smp_call_function_single(cpu, rcu_exp_handler, NULL, 0); ret = smp_call_function_single(cpu, rcu_exp_handler, NULL, 0);
put_cpu();
WARN_ON_ONCE(ret); WARN_ON_ONCE(ret);
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment