Commit 265f22a9 authored by Frederic Weisbecker's avatar Frederic Weisbecker

sched: Keep at least 1 tick per second for active dynticks tasks

The scheduler doesn't yet fully support environments
with a single task running without a periodic tick.

In order to ensure we still maintain the duties of scheduler_tick(),
keep at least 1 tick per second.

This makes sure that we keep the progression of various scheduler
accounting and background maintainance even with a very low granularity.
Examples include cpu load, sched average, CFS entity vruntime,
avenrun and events such as load balancing, amongst other details
handled in sched_class::task_tick().

This limitation will be removed in the future once we get
these individual items to work in full dynticks CPUs.
Suggested-by: default avatarIngo Molnar <mingo@kernel.org>
Signed-off-by: default avatarFrederic Weisbecker <fweisbec@gmail.com>
Cc: Christoph Lameter <cl@linux.com>
Cc: Hakan Akkan <hakanakkan@gmail.com>
Cc: Ingo Molnar <mingo@kernel.org>
Cc: Kevin Hilman <khilman@linaro.org>
Cc: Li Zhong <zhong@linux.vnet.ibm.com>
Cc: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Cc: Paul Gortmaker <paul.gortmaker@windriver.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Steven Rostedt <rostedt@goodmis.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
parent 73c30828
...@@ -1862,6 +1862,7 @@ static inline void wake_up_nohz_cpu(int cpu) { } ...@@ -1862,6 +1862,7 @@ static inline void wake_up_nohz_cpu(int cpu) { }
#ifdef CONFIG_NO_HZ_FULL #ifdef CONFIG_NO_HZ_FULL
extern bool sched_can_stop_tick(void); extern bool sched_can_stop_tick(void);
extern u64 scheduler_tick_max_deferment(void);
#else #else
static inline bool sched_can_stop_tick(void) { return false; } static inline bool sched_can_stop_tick(void) { return false; }
#endif #endif
......
...@@ -2736,8 +2736,35 @@ void scheduler_tick(void) ...@@ -2736,8 +2736,35 @@ void scheduler_tick(void)
rq->idle_balance = idle_cpu(cpu); rq->idle_balance = idle_cpu(cpu);
trigger_load_balance(rq, cpu); trigger_load_balance(rq, cpu);
#endif #endif
rq_last_tick_reset(rq);
} }
#ifdef CONFIG_NO_HZ_FULL
/**
* scheduler_tick_max_deferment
*
* Keep at least one tick per second when a single
* active task is running because the scheduler doesn't
* yet completely support full dynticks environment.
*
* This makes sure that uptime, CFS vruntime, load
* balancing, etc... continue to move forward, even
* with a very low granularity.
*/
u64 scheduler_tick_max_deferment(void)
{
struct rq *rq = this_rq();
unsigned long next, now = ACCESS_ONCE(jiffies);
next = rq->last_sched_tick + HZ;
if (time_before_eq(next, now))
return 0;
return jiffies_to_usecs(next - now) * NSEC_PER_USEC;
}
#endif
notrace unsigned long get_parent_ip(unsigned long addr) notrace unsigned long get_parent_ip(unsigned long addr)
{ {
if (in_lock_functions(addr)) { if (in_lock_functions(addr)) {
...@@ -6993,6 +7020,9 @@ void __init sched_init(void) ...@@ -6993,6 +7020,9 @@ void __init sched_init(void)
#ifdef CONFIG_NO_HZ_COMMON #ifdef CONFIG_NO_HZ_COMMON
rq->nohz_flags = 0; rq->nohz_flags = 0;
#endif #endif
#ifdef CONFIG_NO_HZ_FULL
rq->last_sched_tick = 0;
#endif
#endif #endif
init_rq_hrtick(rq); init_rq_hrtick(rq);
atomic_set(&rq->nr_iowait, 0); atomic_set(&rq->nr_iowait, 0);
......
...@@ -17,6 +17,7 @@ select_task_rq_idle(struct task_struct *p, int sd_flag, int flags) ...@@ -17,6 +17,7 @@ select_task_rq_idle(struct task_struct *p, int sd_flag, int flags)
static void pre_schedule_idle(struct rq *rq, struct task_struct *prev) static void pre_schedule_idle(struct rq *rq, struct task_struct *prev)
{ {
idle_exit_fair(rq); idle_exit_fair(rq);
rq_last_tick_reset(rq);
} }
static void post_schedule_idle(struct rq *rq) static void post_schedule_idle(struct rq *rq)
......
...@@ -409,6 +409,9 @@ struct rq { ...@@ -409,6 +409,9 @@ struct rq {
#ifdef CONFIG_NO_HZ_COMMON #ifdef CONFIG_NO_HZ_COMMON
u64 nohz_stamp; u64 nohz_stamp;
unsigned long nohz_flags; unsigned long nohz_flags;
#endif
#ifdef CONFIG_NO_HZ_FULL
unsigned long last_sched_tick;
#endif #endif
int skip_clock_update; int skip_clock_update;
...@@ -1090,6 +1093,13 @@ static inline void dec_nr_running(struct rq *rq) ...@@ -1090,6 +1093,13 @@ static inline void dec_nr_running(struct rq *rq)
rq->nr_running--; rq->nr_running--;
} }
static inline void rq_last_tick_reset(struct rq *rq)
{
#ifdef CONFIG_NO_HZ_FULL
rq->last_sched_tick = jiffies;
#endif
}
extern void update_rq_clock(struct rq *rq); extern void update_rq_clock(struct rq *rq);
extern void activate_task(struct rq *rq, struct task_struct *p, int flags); extern void activate_task(struct rq *rq, struct task_struct *p, int flags);
......
...@@ -600,6 +600,13 @@ static ktime_t tick_nohz_stop_sched_tick(struct tick_sched *ts, ...@@ -600,6 +600,13 @@ static ktime_t tick_nohz_stop_sched_tick(struct tick_sched *ts,
time_delta = KTIME_MAX; time_delta = KTIME_MAX;
} }
#ifdef CONFIG_NO_HZ_FULL
if (!ts->inidle) {
time_delta = min(time_delta,
scheduler_tick_max_deferment());
}
#endif
/* /*
* calculate the expiry time for the next timer wheel * calculate the expiry time for the next timer wheel
* timer. delta_jiffies >= NEXT_TIMER_MAX_DELTA signals * timer. delta_jiffies >= NEXT_TIMER_MAX_DELTA signals
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment