Commit 3d36aebc authored by Frederic Weisbecker's avatar Frederic Weisbecker

nohz: Support nohz full remote kick

Remotely kicking a full nohz CPU in order to make it re-evaluate its
next tick is currently implemented using the scheduler IPI.

However this bloats a scheduler fast path with an off-topic feature.
The scheduler tick was abused here for its cool "callable
anywhere/anytime" properties.

But now that the irq work subsystem can queue remote callbacks, it's
a perfect fit to safely queue IPIs when interrupts are disabled
without worrying about concurrent callers.

So lets implement remote kick on top of irq work. This is going to
be used when a new event requires the next tick to be recalculated:
more than 1 task competing on the CPU, timer armed, ...
Acked-by: default avatarPeter Zijlstra <peterz@infradead.org>
Cc: Andrew Morton <akpm@linux-foundation.org>
Cc: Ingo Molnar <mingo@kernel.org>
Cc: Kevin Hilman <khilman@linaro.org>
Cc: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Viresh Kumar <viresh.kumar@linaro.org>
Signed-off-by: default avatarFrederic Weisbecker <fweisbec@gmail.com>
parent 47885016
...@@ -181,7 +181,13 @@ static inline bool tick_nohz_full_cpu(int cpu) ...@@ -181,7 +181,13 @@ static inline bool tick_nohz_full_cpu(int cpu)
extern void tick_nohz_init(void); extern void tick_nohz_init(void);
extern void __tick_nohz_full_check(void); extern void __tick_nohz_full_check(void);
extern void tick_nohz_full_kick(void); extern void tick_nohz_full_kick_cpu(int cpu);
static inline void tick_nohz_full_kick(void)
{
tick_nohz_full_kick_cpu(smp_processor_id());
}
extern void tick_nohz_full_kick_all(void); extern void tick_nohz_full_kick_all(void);
extern void __tick_nohz_task_switch(struct task_struct *tsk); extern void __tick_nohz_task_switch(struct task_struct *tsk);
#else #else
...@@ -189,6 +195,7 @@ static inline void tick_nohz_init(void) { } ...@@ -189,6 +195,7 @@ static inline void tick_nohz_init(void) { }
static inline bool tick_nohz_full_enabled(void) { return false; } static inline bool tick_nohz_full_enabled(void) { return false; }
static inline bool tick_nohz_full_cpu(int cpu) { return false; } static inline bool tick_nohz_full_cpu(int cpu) { return false; }
static inline void __tick_nohz_full_check(void) { } static inline void __tick_nohz_full_check(void) { }
static inline void tick_nohz_full_kick_cpu(int cpu) { }
static inline void tick_nohz_full_kick(void) { } static inline void tick_nohz_full_kick(void) { }
static inline void tick_nohz_full_kick_all(void) { } static inline void tick_nohz_full_kick_all(void) { }
static inline void __tick_nohz_task_switch(struct task_struct *tsk) { } static inline void __tick_nohz_task_switch(struct task_struct *tsk) { }
......
...@@ -224,13 +224,15 @@ static DEFINE_PER_CPU(struct irq_work, nohz_full_kick_work) = { ...@@ -224,13 +224,15 @@ static DEFINE_PER_CPU(struct irq_work, nohz_full_kick_work) = {
}; };
/* /*
* Kick the current CPU if it's full dynticks in order to force it to * Kick the CPU if it's full dynticks in order to force it to
* re-evaluate its dependency on the tick and restart it if necessary. * re-evaluate its dependency on the tick and restart it if necessary.
*/ */
void tick_nohz_full_kick(void) void tick_nohz_full_kick_cpu(int cpu)
{ {
if (tick_nohz_full_cpu(smp_processor_id())) if (!tick_nohz_full_cpu(cpu))
irq_work_queue(&__get_cpu_var(nohz_full_kick_work)); return;
irq_work_queue_on(&per_cpu(nohz_full_kick_work, cpu), cpu);
} }
static void nohz_full_kick_ipi(void *info) static void nohz_full_kick_ipi(void *info)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment