Commit 76c24fb0 authored by Frederic Weisbecker's avatar Frederic Weisbecker

nohz: New APIs to re-evaluate the tick on full dynticks CPUs

Provide two new helpers in order to notify the full dynticks CPUs about
some internal system changes against which they may reconsider the state
of their tick. Some practical examples include: posix cpu timers, perf tick
and sched clock tick.

For now the notifying handler, implemented through IPIs, is a stub
that will be implemented when we get the tick stop/restart infrastructure
in.
Signed-off-by: default avatarFrederic Weisbecker <fweisbec@gmail.com>
Cc: Chris Metcalf <cmetcalf@tilera.com>
Cc: Christoph Lameter <cl@linux.com>
Cc: Geoff Levand <geoff@infradead.org>
Cc: Gilad Ben Yossef <gilad@benyossef.com>
Cc: Hakan Akkan <hakanakkan@gmail.com>
Cc: Ingo Molnar <mingo@kernel.org>
Cc: Kevin Hilman <khilman@linaro.org>
Cc: Li Zhong <zhong@linux.vnet.ibm.com>
Cc: Oleg Nesterov <oleg@redhat.com>
Cc: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Cc: Paul Gortmaker <paul.gortmaker@windriver.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Steven Rostedt <rostedt@goodmis.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
parent 65d798f0
......@@ -159,8 +159,12 @@ static inline u64 get_cpu_iowait_time_us(int cpu, u64 *unused) { return -1; }
#ifdef CONFIG_NO_HZ_FULL
extern int tick_nohz_full_cpu(int cpu);
extern void tick_nohz_full_kick(void);
extern void tick_nohz_full_kick_all(void);
#else
static inline int tick_nohz_full_cpu(int cpu) { return 0; }
static inline void tick_nohz_full_kick(void) { }
static inline void tick_nohz_full_kick_all(void) { }
#endif
......
......@@ -111,6 +111,7 @@ config NO_HZ_FULL
select RCU_USER_QS
select RCU_NOCB_CPU
select CONTEXT_TRACKING_FORCE
select IRQ_WORK
help
Adaptively try to shutdown the tick whenever possible, even when
the CPU is running tasks. Typically this requires running a single
......
......@@ -147,6 +147,57 @@ static void tick_sched_handle(struct tick_sched *ts, struct pt_regs *regs)
static cpumask_var_t nohz_full_mask;
bool have_nohz_full_mask;
/*
* Re-evaluate the need for the tick on the current CPU
* and restart it if necessary.
*/
static void tick_nohz_full_check(void)
{
/*
* STUB for now, will be filled with the full tick stop/restart
* infrastructure patches
*/
}
static void nohz_full_kick_work_func(struct irq_work *work)
{
tick_nohz_full_check();
}
static DEFINE_PER_CPU(struct irq_work, nohz_full_kick_work) = {
.func = nohz_full_kick_work_func,
};
/*
* Kick the current CPU if it's full dynticks in order to force it to
* re-evaluate its dependency on the tick and restart it if necessary.
*/
void tick_nohz_full_kick(void)
{
if (tick_nohz_full_cpu(smp_processor_id()))
irq_work_queue(&__get_cpu_var(nohz_full_kick_work));
}
static void nohz_full_kick_ipi(void *info)
{
tick_nohz_full_check();
}
/*
* Kick all full dynticks CPUs in order to force these to re-evaluate
* their dependency on the tick and restart it if necessary.
*/
void tick_nohz_full_kick_all(void)
{
if (!have_nohz_full_mask)
return;
preempt_disable();
smp_call_function_many(nohz_full_mask,
nohz_full_kick_ipi, NULL, false);
preempt_enable();
}
int tick_nohz_full_cpu(int cpu)
{
if (!have_nohz_full_mask)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment