Commit cc631fb7 authored by Paul E. McKenney's avatar Paul E. McKenney Committed by Tejun Heo

sched: correctly place paranioa memory barriers in synchronize_sched_expedited()

The memory barriers must be in the SMP case, not in the !SMP case.
Also add a barrier after the atomic_inc() in order to ensure that
other CPUs see post-synchronize_sched_expedited() actions as following
the expedited grace period.
Signed-off-by: default avatarPaul E. McKenney <paulmck@linux.vnet.ibm.com>
Signed-off-by: default avatarTejun Heo <tj@kernel.org>
parent 94458d5e
...@@ -8931,6 +8931,15 @@ struct cgroup_subsys cpuacct_subsys = { ...@@ -8931,6 +8931,15 @@ struct cgroup_subsys cpuacct_subsys = {
#ifndef CONFIG_SMP #ifndef CONFIG_SMP
void synchronize_sched_expedited(void) void synchronize_sched_expedited(void)
{
}
EXPORT_SYMBOL_GPL(synchronize_sched_expedited);
#else /* #ifndef CONFIG_SMP */
static atomic_t synchronize_sched_expedited_count = ATOMIC_INIT(0);
static int synchronize_sched_expedited_cpu_stop(void *data)
{ {
/* /*
* There must be a full memory barrier on each affected CPU * There must be a full memory barrier on each affected CPU
...@@ -8943,16 +8952,7 @@ void synchronize_sched_expedited(void) ...@@ -8943,16 +8952,7 @@ void synchronize_sched_expedited(void)
* necessary. Do smp_mb() anyway for documentation and * necessary. Do smp_mb() anyway for documentation and
* robustness against future implementation changes. * robustness against future implementation changes.
*/ */
smp_mb(); smp_mb(); /* See above comment block. */
}
EXPORT_SYMBOL_GPL(synchronize_sched_expedited);
#else /* #ifndef CONFIG_SMP */
static atomic_t synchronize_sched_expedited_count = ATOMIC_INIT(0);
static int synchronize_sched_expedited_cpu_stop(void *data)
{
return 0; return 0;
} }
...@@ -8990,6 +8990,7 @@ void synchronize_sched_expedited(void) ...@@ -8990,6 +8990,7 @@ void synchronize_sched_expedited(void)
get_online_cpus(); get_online_cpus();
} }
atomic_inc(&synchronize_sched_expedited_count); atomic_inc(&synchronize_sched_expedited_count);
smp_mb__after_atomic_inc(); /* ensure post-GP actions seen after GP. */
put_online_cpus(); put_online_cpus();
} }
EXPORT_SYMBOL_GPL(synchronize_sched_expedited); EXPORT_SYMBOL_GPL(synchronize_sched_expedited);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment