Commit 62ab7072 authored by Paul E. McKenney's avatar Paul E. McKenney Committed by Thomas Gleixner

rcu: Use smp_hotplug_thread facility for RCUs per-CPU kthread

Bring RCU into the new-age CPU-hotplug fold by modifying RCU's per-CPU
kthread code to use the new smp_hotplug_thread facility.

[ tglx: Adapted it to use callbacks and to the simplified rcu yield ]
Signed-off-by: default avatarPaul E. McKenney <paulmck@linux.vnet.ibm.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Srivatsa S. Bhat <srivatsa.bhat@linux.vnet.ibm.com>
Cc: Rusty Russell <rusty@rustcorp.com.au>
Cc: Namhyung Kim <namhyung@kernel.org>
Link: http://lkml.kernel.org/r/20120716103948.673354828@linutronix.deSigned-off-by: default avatarThomas Gleixner <tglx@linutronix.de>
parent bcd951cf
...@@ -133,7 +133,6 @@ static int rcu_scheduler_fully_active __read_mostly; ...@@ -133,7 +133,6 @@ static int rcu_scheduler_fully_active __read_mostly;
*/ */
static DEFINE_PER_CPU(struct task_struct *, rcu_cpu_kthread_task); static DEFINE_PER_CPU(struct task_struct *, rcu_cpu_kthread_task);
DEFINE_PER_CPU(unsigned int, rcu_cpu_kthread_status); DEFINE_PER_CPU(unsigned int, rcu_cpu_kthread_status);
DEFINE_PER_CPU(int, rcu_cpu_kthread_cpu);
DEFINE_PER_CPU(unsigned int, rcu_cpu_kthread_loops); DEFINE_PER_CPU(unsigned int, rcu_cpu_kthread_loops);
DEFINE_PER_CPU(char, rcu_cpu_has_work); DEFINE_PER_CPU(char, rcu_cpu_has_work);
...@@ -1468,7 +1467,6 @@ static void rcu_cleanup_dead_cpu(int cpu, struct rcu_state *rsp) ...@@ -1468,7 +1467,6 @@ static void rcu_cleanup_dead_cpu(int cpu, struct rcu_state *rsp)
struct rcu_node *rnp = rdp->mynode; /* Outgoing CPU's rdp & rnp. */ struct rcu_node *rnp = rdp->mynode; /* Outgoing CPU's rdp & rnp. */
/* Adjust any no-longer-needed kthreads. */ /* Adjust any no-longer-needed kthreads. */
rcu_stop_cpu_kthread(cpu);
rcu_boost_kthread_setaffinity(rnp, -1); rcu_boost_kthread_setaffinity(rnp, -1);
/* Remove the dead CPU from the bitmasks in the rcu_node hierarchy. */ /* Remove the dead CPU from the bitmasks in the rcu_node hierarchy. */
...@@ -2595,11 +2593,9 @@ static int __cpuinit rcu_cpu_notify(struct notifier_block *self, ...@@ -2595,11 +2593,9 @@ static int __cpuinit rcu_cpu_notify(struct notifier_block *self,
case CPU_ONLINE: case CPU_ONLINE:
case CPU_DOWN_FAILED: case CPU_DOWN_FAILED:
rcu_boost_kthread_setaffinity(rnp, -1); rcu_boost_kthread_setaffinity(rnp, -1);
rcu_cpu_kthread_setrt(cpu, 1);
break; break;
case CPU_DOWN_PREPARE: case CPU_DOWN_PREPARE:
rcu_boost_kthread_setaffinity(rnp, cpu); rcu_boost_kthread_setaffinity(rnp, cpu);
rcu_cpu_kthread_setrt(cpu, 0);
break; break;
case CPU_DYING: case CPU_DYING:
case CPU_DYING_FROZEN: case CPU_DYING_FROZEN:
......
...@@ -196,12 +196,6 @@ struct rcu_node { ...@@ -196,12 +196,6 @@ struct rcu_node {
/* Refused to boost: not sure why, though. */ /* Refused to boost: not sure why, though. */
/* This can happen due to race conditions. */ /* This can happen due to race conditions. */
#endif /* #ifdef CONFIG_RCU_BOOST */ #endif /* #ifdef CONFIG_RCU_BOOST */
struct task_struct *node_kthread_task;
/* kthread that takes care of this rcu_node */
/* structure, for example, awakening the */
/* per-CPU kthreads as needed. */
unsigned int node_kthread_status;
/* State of node_kthread_task for tracing. */
} ____cacheline_internodealigned_in_smp; } ____cacheline_internodealigned_in_smp;
/* /*
...@@ -468,7 +462,6 @@ static int rcu_preempt_blocked_readers_cgp(struct rcu_node *rnp); ...@@ -468,7 +462,6 @@ static int rcu_preempt_blocked_readers_cgp(struct rcu_node *rnp);
#ifdef CONFIG_HOTPLUG_CPU #ifdef CONFIG_HOTPLUG_CPU
static void rcu_report_unblock_qs_rnp(struct rcu_node *rnp, static void rcu_report_unblock_qs_rnp(struct rcu_node *rnp,
unsigned long flags); unsigned long flags);
static void rcu_stop_cpu_kthread(int cpu);
#endif /* #ifdef CONFIG_HOTPLUG_CPU */ #endif /* #ifdef CONFIG_HOTPLUG_CPU */
static void rcu_print_detail_task_stall(struct rcu_state *rsp); static void rcu_print_detail_task_stall(struct rcu_state *rsp);
static int rcu_print_task_stall(struct rcu_node *rnp); static int rcu_print_task_stall(struct rcu_node *rnp);
...@@ -494,7 +487,6 @@ static void rcu_preempt_do_callbacks(void); ...@@ -494,7 +487,6 @@ static void rcu_preempt_do_callbacks(void);
static int __cpuinit rcu_spawn_one_boost_kthread(struct rcu_state *rsp, static int __cpuinit rcu_spawn_one_boost_kthread(struct rcu_state *rsp,
struct rcu_node *rnp); struct rcu_node *rnp);
#endif /* #ifdef CONFIG_RCU_BOOST */ #endif /* #ifdef CONFIG_RCU_BOOST */
static void rcu_cpu_kthread_setrt(int cpu, int to_rt);
static void __cpuinit rcu_prepare_kthreads(int cpu); static void __cpuinit rcu_prepare_kthreads(int cpu);
static void rcu_prepare_for_idle_init(int cpu); static void rcu_prepare_for_idle_init(int cpu);
static void rcu_cleanup_after_idle(int cpu); static void rcu_cleanup_after_idle(int cpu);
......
...@@ -25,6 +25,7 @@ ...@@ -25,6 +25,7 @@
*/ */
#include <linux/delay.h> #include <linux/delay.h>
#include <linux/smpboot.h>
#define RCU_KTHREAD_PRIO 1 #define RCU_KTHREAD_PRIO 1
...@@ -1292,25 +1293,6 @@ static int __cpuinit rcu_spawn_one_boost_kthread(struct rcu_state *rsp, ...@@ -1292,25 +1293,6 @@ static int __cpuinit rcu_spawn_one_boost_kthread(struct rcu_state *rsp,
return 0; return 0;
} }
#ifdef CONFIG_HOTPLUG_CPU
/*
* Stop the RCU's per-CPU kthread when its CPU goes offline,.
*/
static void rcu_stop_cpu_kthread(int cpu)
{
struct task_struct *t;
/* Stop the CPU's kthread. */
t = per_cpu(rcu_cpu_kthread_task, cpu);
if (t != NULL) {
per_cpu(rcu_cpu_kthread_task, cpu) = NULL;
kthread_stop(t);
}
}
#endif /* #ifdef CONFIG_HOTPLUG_CPU */
static void rcu_kthread_do_work(void) static void rcu_kthread_do_work(void)
{ {
rcu_do_batch(&rcu_sched_state, &__get_cpu_var(rcu_sched_data)); rcu_do_batch(&rcu_sched_state, &__get_cpu_var(rcu_sched_data));
...@@ -1318,59 +1300,22 @@ static void rcu_kthread_do_work(void) ...@@ -1318,59 +1300,22 @@ static void rcu_kthread_do_work(void)
rcu_preempt_do_callbacks(); rcu_preempt_do_callbacks();
} }
/* static void rcu_cpu_kthread_setup(unsigned int cpu)
* Set the specified CPU's kthread to run RT or not, as specified by
* the to_rt argument. The CPU-hotplug locks are held, so the task
* is not going away.
*/
static void rcu_cpu_kthread_setrt(int cpu, int to_rt)
{ {
int policy;
struct sched_param sp; struct sched_param sp;
struct task_struct *t;
t = per_cpu(rcu_cpu_kthread_task, cpu); sp.sched_priority = RCU_KTHREAD_PRIO;
if (t == NULL) sched_setscheduler_nocheck(current, SCHED_FIFO, &sp);
return;
if (to_rt) {
policy = SCHED_FIFO;
sp.sched_priority = RCU_KTHREAD_PRIO;
} else {
policy = SCHED_NORMAL;
sp.sched_priority = 0;
}
sched_setscheduler_nocheck(t, policy, &sp);
} }
/* static void rcu_cpu_kthread_park(unsigned int cpu)
* Handle cases where the rcu_cpu_kthread() ends up on the wrong CPU.
* This can happen while the corresponding CPU is either coming online
* or going offline. We cannot wait until the CPU is fully online
* before starting the kthread, because the various notifier functions
* can wait for RCU grace periods. So we park rcu_cpu_kthread() until
* the corresponding CPU is online.
*
* Return 1 if the kthread needs to stop, 0 otherwise.
*
* Caller must disable bh. This function can momentarily enable it.
*/
static int rcu_cpu_kthread_should_stop(int cpu)
{ {
while (cpu_is_offline(cpu) || per_cpu(rcu_cpu_kthread_status, cpu) = RCU_KTHREAD_OFFCPU;
!cpumask_equal(&current->cpus_allowed, cpumask_of(cpu)) || }
smp_processor_id() != cpu) {
if (kthread_should_stop()) static int rcu_cpu_kthread_should_run(unsigned int cpu)
return 1; {
per_cpu(rcu_cpu_kthread_status, cpu) = RCU_KTHREAD_OFFCPU; return __get_cpu_var(rcu_cpu_has_work);
per_cpu(rcu_cpu_kthread_cpu, cpu) = raw_smp_processor_id();
local_bh_enable();
schedule_timeout_uninterruptible(1);
if (!cpumask_equal(&current->cpus_allowed, cpumask_of(cpu)))
set_cpus_allowed_ptr(current, cpumask_of(cpu));
local_bh_disable();
}
per_cpu(rcu_cpu_kthread_cpu, cpu) = cpu;
return 0;
} }
/* /*
...@@ -1378,96 +1323,35 @@ static int rcu_cpu_kthread_should_stop(int cpu) ...@@ -1378,96 +1323,35 @@ static int rcu_cpu_kthread_should_stop(int cpu)
* RCU softirq used in flavors and configurations of RCU that do not * RCU softirq used in flavors and configurations of RCU that do not
* support RCU priority boosting. * support RCU priority boosting.
*/ */
static int rcu_cpu_kthread(void *arg) static void rcu_cpu_kthread(unsigned int cpu)
{ {
int cpu = (int)(long)arg; unsigned int *statusp = &__get_cpu_var(rcu_cpu_kthread_status);
unsigned long flags; char work, *workp = &__get_cpu_var(rcu_cpu_has_work);
int spincnt = 0; int spincnt;
unsigned int *statusp = &per_cpu(rcu_cpu_kthread_status, cpu);
char work;
char *workp = &per_cpu(rcu_cpu_has_work, cpu);
trace_rcu_utilization("Start CPU kthread@init"); for (spincnt = 0; spincnt < 10; spincnt++) {
for (;;) {
*statusp = RCU_KTHREAD_WAITING;
trace_rcu_utilization("End CPU kthread@rcu_wait");
rcu_wait(*workp != 0 || kthread_should_stop());
trace_rcu_utilization("Start CPU kthread@rcu_wait"); trace_rcu_utilization("Start CPU kthread@rcu_wait");
local_bh_disable(); local_bh_disable();
if (rcu_cpu_kthread_should_stop(cpu)) {
local_bh_enable();
break;
}
*statusp = RCU_KTHREAD_RUNNING; *statusp = RCU_KTHREAD_RUNNING;
per_cpu(rcu_cpu_kthread_loops, cpu)++; this_cpu_inc(rcu_cpu_kthread_loops);
local_irq_save(flags); local_irq_disable();
work = *workp; work = *workp;
*workp = 0; *workp = 0;
local_irq_restore(flags); local_irq_enable();
if (work) if (work)
rcu_kthread_do_work(); rcu_kthread_do_work();
local_bh_enable(); local_bh_enable();
if (*workp != 0) if (*workp == 0) {
spincnt++; trace_rcu_utilization("End CPU kthread@rcu_wait");
else *statusp = RCU_KTHREAD_WAITING;
spincnt = 0; return;
if (spincnt > 10) {
*statusp = RCU_KTHREAD_YIELDING;
trace_rcu_utilization("End CPU kthread@rcu_yield");
schedule_timeout_interruptible(2);
trace_rcu_utilization("Start CPU kthread@rcu_yield");
spincnt = 0;
} }
} }
*statusp = RCU_KTHREAD_STOPPED; *statusp = RCU_KTHREAD_YIELDING;
trace_rcu_utilization("End CPU kthread@term"); trace_rcu_utilization("Start CPU kthread@rcu_yield");
return 0; schedule_timeout_interruptible(2);
} trace_rcu_utilization("End CPU kthread@rcu_yield");
*statusp = RCU_KTHREAD_WAITING;
/*
* Spawn a per-CPU kthread, setting up affinity and priority.
* Because the CPU hotplug lock is held, no other CPU will be attempting
* to manipulate rcu_cpu_kthread_task. There might be another CPU
* attempting to access it during boot, but the locking in kthread_bind()
* will enforce sufficient ordering.
*
* Please note that we cannot simply refuse to wake up the per-CPU
* kthread because kthreads are created in TASK_UNINTERRUPTIBLE state,
* which can result in softlockup complaints if the task ends up being
* idle for more than a couple of minutes.
*
* However, please note also that we cannot bind the per-CPU kthread to its
* CPU until that CPU is fully online. We also cannot wait until the
* CPU is fully online before we create its per-CPU kthread, as this would
* deadlock the system when CPU notifiers tried waiting for grace
* periods. So we bind the per-CPU kthread to its CPU only if the CPU
* is online. If its CPU is not yet fully online, then the code in
* rcu_cpu_kthread() will wait until it is fully online, and then do
* the binding.
*/
static int __cpuinit rcu_spawn_one_cpu_kthread(int cpu)
{
struct sched_param sp;
struct task_struct *t;
if (!rcu_scheduler_fully_active ||
per_cpu(rcu_cpu_kthread_task, cpu) != NULL)
return 0;
t = kthread_create_on_node(rcu_cpu_kthread,
(void *)(long)cpu,
cpu_to_node(cpu),
"rcuc/%d", cpu);
if (IS_ERR(t))
return PTR_ERR(t);
if (cpu_online(cpu))
kthread_bind(t, cpu);
per_cpu(rcu_cpu_kthread_cpu, cpu) = cpu;
WARN_ON_ONCE(per_cpu(rcu_cpu_kthread_task, cpu) != NULL);
sp.sched_priority = RCU_KTHREAD_PRIO;
sched_setscheduler_nocheck(t, SCHED_FIFO, &sp);
per_cpu(rcu_cpu_kthread_task, cpu) = t;
wake_up_process(t); /* Get to TASK_INTERRUPTIBLE quickly. */
return 0;
} }
/* /*
...@@ -1503,6 +1387,15 @@ static void rcu_boost_kthread_setaffinity(struct rcu_node *rnp, int outgoingcpu) ...@@ -1503,6 +1387,15 @@ static void rcu_boost_kthread_setaffinity(struct rcu_node *rnp, int outgoingcpu)
free_cpumask_var(cm); free_cpumask_var(cm);
} }
static struct smp_hotplug_thread rcu_cpu_thread_spec = {
.store = &rcu_cpu_kthread_task,
.thread_should_run = rcu_cpu_kthread_should_run,
.thread_fn = rcu_cpu_kthread,
.thread_comm = "rcuc/%u",
.setup = rcu_cpu_kthread_setup,
.park = rcu_cpu_kthread_park,
};
/* /*
* Spawn all kthreads -- called as soon as the scheduler is running. * Spawn all kthreads -- called as soon as the scheduler is running.
*/ */
...@@ -1512,11 +1405,9 @@ static int __init rcu_spawn_kthreads(void) ...@@ -1512,11 +1405,9 @@ static int __init rcu_spawn_kthreads(void)
int cpu; int cpu;
rcu_scheduler_fully_active = 1; rcu_scheduler_fully_active = 1;
for_each_possible_cpu(cpu) { for_each_possible_cpu(cpu)
per_cpu(rcu_cpu_has_work, cpu) = 0; per_cpu(rcu_cpu_has_work, cpu) = 0;
if (cpu_online(cpu)) BUG_ON(smpboot_register_percpu_thread(&rcu_cpu_thread_spec));
(void)rcu_spawn_one_cpu_kthread(cpu);
}
rnp = rcu_get_root(rcu_state); rnp = rcu_get_root(rcu_state);
(void)rcu_spawn_one_boost_kthread(rcu_state, rnp); (void)rcu_spawn_one_boost_kthread(rcu_state, rnp);
if (NUM_RCU_NODES > 1) { if (NUM_RCU_NODES > 1) {
...@@ -1533,10 +1424,8 @@ static void __cpuinit rcu_prepare_kthreads(int cpu) ...@@ -1533,10 +1424,8 @@ static void __cpuinit rcu_prepare_kthreads(int cpu)
struct rcu_node *rnp = rdp->mynode; struct rcu_node *rnp = rdp->mynode;
/* Fire up the incoming CPU's kthread and leaf rcu_node kthread. */ /* Fire up the incoming CPU's kthread and leaf rcu_node kthread. */
if (rcu_scheduler_fully_active) { if (rcu_scheduler_fully_active)
(void)rcu_spawn_one_cpu_kthread(cpu);
(void)rcu_spawn_one_boost_kthread(rcu_state, rnp); (void)rcu_spawn_one_boost_kthread(rcu_state, rnp);
}
} }
#else /* #ifdef CONFIG_RCU_BOOST */ #else /* #ifdef CONFIG_RCU_BOOST */
...@@ -1560,22 +1449,10 @@ static void rcu_preempt_boost_start_gp(struct rcu_node *rnp) ...@@ -1560,22 +1449,10 @@ static void rcu_preempt_boost_start_gp(struct rcu_node *rnp)
{ {
} }
#ifdef CONFIG_HOTPLUG_CPU
static void rcu_stop_cpu_kthread(int cpu)
{
}
#endif /* #ifdef CONFIG_HOTPLUG_CPU */
static void rcu_boost_kthread_setaffinity(struct rcu_node *rnp, int outgoingcpu) static void rcu_boost_kthread_setaffinity(struct rcu_node *rnp, int outgoingcpu)
{ {
} }
static void rcu_cpu_kthread_setrt(int cpu, int to_rt)
{
}
static int __init rcu_scheduler_really_started(void) static int __init rcu_scheduler_really_started(void)
{ {
rcu_scheduler_fully_active = 1; rcu_scheduler_fully_active = 1;
......
...@@ -108,11 +108,10 @@ static void print_one_rcu_data(struct seq_file *m, struct rcu_data *rdp) ...@@ -108,11 +108,10 @@ static void print_one_rcu_data(struct seq_file *m, struct rcu_data *rdp)
rdp->nxttail[RCU_WAIT_TAIL]], rdp->nxttail[RCU_WAIT_TAIL]],
".D"[&rdp->nxtlist != rdp->nxttail[RCU_DONE_TAIL]]); ".D"[&rdp->nxtlist != rdp->nxttail[RCU_DONE_TAIL]]);
#ifdef CONFIG_RCU_BOOST #ifdef CONFIG_RCU_BOOST
seq_printf(m, " kt=%d/%c/%d ktl=%x", seq_printf(m, " kt=%d/%c ktl=%x",
per_cpu(rcu_cpu_has_work, rdp->cpu), per_cpu(rcu_cpu_has_work, rdp->cpu),
convert_kthread_status(per_cpu(rcu_cpu_kthread_status, convert_kthread_status(per_cpu(rcu_cpu_kthread_status,
rdp->cpu)), rdp->cpu)),
per_cpu(rcu_cpu_kthread_cpu, rdp->cpu),
per_cpu(rcu_cpu_kthread_loops, rdp->cpu) & 0xffff); per_cpu(rcu_cpu_kthread_loops, rdp->cpu) & 0xffff);
#endif /* #ifdef CONFIG_RCU_BOOST */ #endif /* #ifdef CONFIG_RCU_BOOST */
seq_printf(m, " b=%ld", rdp->blimit); seq_printf(m, " b=%ld", rdp->blimit);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment