Commit 209a0cbd authored by Luca Abeni's avatar Luca Abeni Committed by Ingo Molnar

sched/deadline: Improve the tracking of active utilization

This patch implements a more theoretically sound algorithm for
tracking active utilization: instead of decreasing it when a
task blocks, use a timer (the "inactive timer", named after the
"Inactive" task state of the GRUB algorithm) to decrease the
active utilization at the so called "0-lag time".
Tested-by: default avatarClaudio Scordino <claudio@evidence.eu.com>
Tested-by: default avatarDaniel Bristot de Oliveira <bristot@redhat.com>
Signed-off-by: default avatarLuca Abeni <luca.abeni@santannapisa.it>
Signed-off-by: default avatarPeter Zijlstra (Intel) <peterz@infradead.org>
Cc: Joel Fernandes <joelaf@google.com>
Cc: Juri Lelli <juri.lelli@arm.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Mathieu Poirier <mathieu.poirier@linaro.org>
Cc: Mike Galbraith <efault@gmx.de>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Steven Rostedt <rostedt@goodmis.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Tommaso Cucinotta <tommaso.cucinotta@sssup.it>
Link: http://lkml.kernel.org/r/1495138417-6203-3-git-send-email-luca.abeni@santannapisa.itSigned-off-by: default avatarIngo Molnar <mingo@kernel.org>
parent e36d8677
...@@ -445,16 +445,33 @@ struct sched_dl_entity { ...@@ -445,16 +445,33 @@ struct sched_dl_entity {
* *
* @dl_yielded tells if task gave up the CPU before consuming * @dl_yielded tells if task gave up the CPU before consuming
* all its available runtime during the last job. * all its available runtime during the last job.
*
* @dl_non_contending tells if the task is inactive while still
* contributing to the active utilization. In other words, it
* indicates if the inactive timer has been armed and its handler
* has not been executed yet. This flag is useful to avoid race
* conditions between the inactive timer handler and the wakeup
* code.
*/ */
int dl_throttled; int dl_throttled;
int dl_boosted; int dl_boosted;
int dl_yielded; int dl_yielded;
int dl_non_contending;
/* /*
* Bandwidth enforcement timer. Each -deadline task has its * Bandwidth enforcement timer. Each -deadline task has its
* own bandwidth to be enforced, thus we need one timer per task. * own bandwidth to be enforced, thus we need one timer per task.
*/ */
struct hrtimer dl_timer; struct hrtimer dl_timer;
/*
* Inactive timer, responsible for decreasing the active utilization
* at the "0-lag time". When a -deadline task blocks, it contributes
* to GRUB's active utilization until the "0-lag time", hence a
* timer is needed to decrease the active utilization at the correct
* time.
*/
struct hrtimer inactive_timer;
}; };
union rcu_special { union rcu_special {
......
...@@ -2153,6 +2153,7 @@ void __dl_clear_params(struct task_struct *p) ...@@ -2153,6 +2153,7 @@ void __dl_clear_params(struct task_struct *p)
dl_se->dl_throttled = 0; dl_se->dl_throttled = 0;
dl_se->dl_yielded = 0; dl_se->dl_yielded = 0;
dl_se->dl_non_contending = 0;
} }
/* /*
...@@ -2184,6 +2185,7 @@ static void __sched_fork(unsigned long clone_flags, struct task_struct *p) ...@@ -2184,6 +2185,7 @@ static void __sched_fork(unsigned long clone_flags, struct task_struct *p)
RB_CLEAR_NODE(&p->dl.rb_node); RB_CLEAR_NODE(&p->dl.rb_node);
init_dl_task_timer(&p->dl); init_dl_task_timer(&p->dl);
init_dl_inactive_task_timer(&p->dl);
__dl_clear_params(p); __dl_clear_params(p);
INIT_LIST_HEAD(&p->rt.run_list); INIT_LIST_HEAD(&p->rt.run_list);
...@@ -2506,6 +2508,7 @@ static int dl_overflow(struct task_struct *p, int policy, ...@@ -2506,6 +2508,7 @@ static int dl_overflow(struct task_struct *p, int policy,
!__dl_overflow(dl_b, cpus, p->dl.dl_bw, new_bw)) { !__dl_overflow(dl_b, cpus, p->dl.dl_bw, new_bw)) {
__dl_clear(dl_b, p->dl.dl_bw); __dl_clear(dl_b, p->dl.dl_bw);
__dl_add(dl_b, new_bw); __dl_add(dl_b, new_bw);
dl_change_utilization(p, new_bw);
err = 0; err = 0;
} else if (!dl_policy(policy) && task_has_dl_policy(p)) { } else if (!dl_policy(policy) && task_has_dl_policy(p)) {
__dl_clear(dl_b, p->dl.dl_bw); __dl_clear(dl_b, p->dl.dl_bw);
......
This diff is collapsed.
...@@ -244,6 +244,7 @@ bool __dl_overflow(struct dl_bw *dl_b, int cpus, u64 old_bw, u64 new_bw) ...@@ -244,6 +244,7 @@ bool __dl_overflow(struct dl_bw *dl_b, int cpus, u64 old_bw, u64 new_bw)
dl_b->bw * cpus < dl_b->total_bw - old_bw + new_bw; dl_b->bw * cpus < dl_b->total_bw - old_bw + new_bw;
} }
void dl_change_utilization(struct task_struct *p, u64 new_bw);
extern void init_dl_bw(struct dl_bw *dl_b); extern void init_dl_bw(struct dl_bw *dl_b);
#ifdef CONFIG_CGROUP_SCHED #ifdef CONFIG_CGROUP_SCHED
...@@ -1493,6 +1494,7 @@ extern void init_rt_bandwidth(struct rt_bandwidth *rt_b, u64 period, u64 runtime ...@@ -1493,6 +1494,7 @@ extern void init_rt_bandwidth(struct rt_bandwidth *rt_b, u64 period, u64 runtime
extern struct dl_bandwidth def_dl_bandwidth; extern struct dl_bandwidth def_dl_bandwidth;
extern void init_dl_bandwidth(struct dl_bandwidth *dl_b, u64 period, u64 runtime); extern void init_dl_bandwidth(struct dl_bandwidth *dl_b, u64 period, u64 runtime);
extern void init_dl_task_timer(struct sched_dl_entity *dl_se); extern void init_dl_task_timer(struct sched_dl_entity *dl_se);
extern void init_dl_inactive_task_timer(struct sched_dl_entity *dl_se);
unsigned long to_ratio(u64 period, u64 runtime); unsigned long to_ratio(u64 period, u64 runtime);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment