Commit e0ecfa79 authored by Paul E. McKenney's avatar Paul E. McKenney Committed by Ingo Molnar

Preempt-RCU: fix rcu_barrier for preemptive environment.

Fix rcu_barrier() to work properly in preemptive kernel environment.
Also, the ordering of callback must be preserved while moving
callbacks to another CPU during CPU hotplug.
Signed-off-by: default avatarGautham R Shenoy <ego@in.ibm.com>
Signed-off-by: default avatarDipankar Sarma <dipankar@in.ibm.com>
Signed-off-by: default avatarPaul E. McKenney <paulmck@linux.vnet.ibm.com>
Reviewed-by: default avatarSteven Rostedt <srostedt@redhat.com>
Signed-off-by: default avatarIngo Molnar <mingo@elte.hu>
parent 01c1c660
...@@ -371,9 +371,9 @@ static void __rcu_offline_cpu(struct rcu_data *this_rdp, ...@@ -371,9 +371,9 @@ static void __rcu_offline_cpu(struct rcu_data *this_rdp,
if (rcp->cur != rcp->completed) if (rcp->cur != rcp->completed)
cpu_quiet(rdp->cpu, rcp); cpu_quiet(rdp->cpu, rcp);
spin_unlock_bh(&rcp->lock); spin_unlock_bh(&rcp->lock);
rcu_move_batch(this_rdp, rdp->donelist, rdp->donetail);
rcu_move_batch(this_rdp, rdp->curlist, rdp->curtail); rcu_move_batch(this_rdp, rdp->curlist, rdp->curtail);
rcu_move_batch(this_rdp, rdp->nxtlist, rdp->nxttail); rcu_move_batch(this_rdp, rdp->nxtlist, rdp->nxttail);
rcu_move_batch(this_rdp, rdp->donelist, rdp->donetail);
} }
static void rcu_offline_cpu(int cpu) static void rcu_offline_cpu(int cpu)
......
...@@ -115,7 +115,17 @@ void rcu_barrier(void) ...@@ -115,7 +115,17 @@ void rcu_barrier(void)
mutex_lock(&rcu_barrier_mutex); mutex_lock(&rcu_barrier_mutex);
init_completion(&rcu_barrier_completion); init_completion(&rcu_barrier_completion);
atomic_set(&rcu_barrier_cpu_count, 0); atomic_set(&rcu_barrier_cpu_count, 0);
/*
* The queueing of callbacks in all CPUs must be atomic with
* respect to RCU, otherwise one CPU may queue a callback,
* wait for a grace period, decrement barrier count and call
* complete(), while other CPUs have not yet queued anything.
* So, we need to make sure that grace periods cannot complete
* until all the callbacks are queued.
*/
rcu_read_lock();
on_each_cpu(rcu_barrier_func, NULL, 0, 1); on_each_cpu(rcu_barrier_func, NULL, 0, 1);
rcu_read_unlock();
wait_for_completion(&rcu_barrier_completion); wait_for_completion(&rcu_barrier_completion);
mutex_unlock(&rcu_barrier_mutex); mutex_unlock(&rcu_barrier_mutex);
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment