Commit 72f9f3fd authored by Luca Abeni's avatar Luca Abeni Committed by Ingo Molnar

sched/deadline: Remove dl_new from struct sched_dl_entity

The dl_new field of struct sched_dl_entity is currently used to
identify new deadline tasks, so that their deadline and runtime
can be properly initialised.

However, these tasks can be easily identified by checking if
their deadline is smaller than the current time when they switch
to SCHED_DEADLINE. So, dl_new can be removed by introducing this
check in switched_to_dl(); this allows to simplify the
SCHED_DEADLINE code.
Signed-off-by: default avatarLuca Abeni <luca.abeni@unitn.it>
Signed-off-by: default avatarPeter Zijlstra (Intel) <peterz@infradead.org>
Cc: Juri Lelli <juri.lelli@arm.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Link: http://lkml.kernel.org/r/1457350024-7825-2-git-send-email-luca.abeni@unitn.itSigned-off-by: default avatarIngo Molnar <mingo@kernel.org>
parent ca031745
...@@ -1333,10 +1333,6 @@ struct sched_dl_entity { ...@@ -1333,10 +1333,6 @@ struct sched_dl_entity {
* task has to wait for a replenishment to be performed at the * task has to wait for a replenishment to be performed at the
* next firing of dl_timer. * next firing of dl_timer.
* *
* @dl_new tells if a new instance arrived. If so we must
* start executing it with full runtime and reset its absolute
* deadline;
*
* @dl_boosted tells if we are boosted due to DI. If so we are * @dl_boosted tells if we are boosted due to DI. If so we are
* outside bandwidth enforcement mechanism (but only until we * outside bandwidth enforcement mechanism (but only until we
* exit the critical section); * exit the critical section);
...@@ -1344,7 +1340,7 @@ struct sched_dl_entity { ...@@ -1344,7 +1340,7 @@ struct sched_dl_entity {
* @dl_yielded tells if task gave up the cpu before consuming * @dl_yielded tells if task gave up the cpu before consuming
* all its available runtime during the last job. * all its available runtime during the last job.
*/ */
int dl_throttled, dl_new, dl_boosted, dl_yielded; int dl_throttled, dl_boosted, dl_yielded;
/* /*
* Bandwidth enforcement timer. Each -deadline task has its * Bandwidth enforcement timer. Each -deadline task has its
......
...@@ -2051,7 +2051,6 @@ void __dl_clear_params(struct task_struct *p) ...@@ -2051,7 +2051,6 @@ void __dl_clear_params(struct task_struct *p)
dl_se->dl_bw = 0; dl_se->dl_bw = 0;
dl_se->dl_throttled = 0; dl_se->dl_throttled = 0;
dl_se->dl_new = 1;
dl_se->dl_yielded = 0; dl_se->dl_yielded = 0;
} }
......
...@@ -352,7 +352,15 @@ static inline void setup_new_dl_entity(struct sched_dl_entity *dl_se, ...@@ -352,7 +352,15 @@ static inline void setup_new_dl_entity(struct sched_dl_entity *dl_se,
struct dl_rq *dl_rq = dl_rq_of_se(dl_se); struct dl_rq *dl_rq = dl_rq_of_se(dl_se);
struct rq *rq = rq_of_dl_rq(dl_rq); struct rq *rq = rq_of_dl_rq(dl_rq);
WARN_ON(!dl_se->dl_new || dl_se->dl_throttled); WARN_ON(dl_time_before(rq_clock(rq), dl_se->deadline));
/*
* We are racing with the deadline timer. So, do nothing because
* the deadline timer handler will take care of properly recharging
* the runtime and postponing the deadline
*/
if (dl_se->dl_throttled)
return;
/* /*
* We use the regular wall clock time to set deadlines in the * We use the regular wall clock time to set deadlines in the
...@@ -361,7 +369,6 @@ static inline void setup_new_dl_entity(struct sched_dl_entity *dl_se, ...@@ -361,7 +369,6 @@ static inline void setup_new_dl_entity(struct sched_dl_entity *dl_se,
*/ */
dl_se->deadline = rq_clock(rq) + pi_se->dl_deadline; dl_se->deadline = rq_clock(rq) + pi_se->dl_deadline;
dl_se->runtime = pi_se->dl_runtime; dl_se->runtime = pi_se->dl_runtime;
dl_se->dl_new = 0;
} }
/* /*
...@@ -503,15 +510,6 @@ static void update_dl_entity(struct sched_dl_entity *dl_se, ...@@ -503,15 +510,6 @@ static void update_dl_entity(struct sched_dl_entity *dl_se,
struct dl_rq *dl_rq = dl_rq_of_se(dl_se); struct dl_rq *dl_rq = dl_rq_of_se(dl_se);
struct rq *rq = rq_of_dl_rq(dl_rq); struct rq *rq = rq_of_dl_rq(dl_rq);
/*
* The arrival of a new instance needs special treatment, i.e.,
* the actual scheduling parameters have to be "renewed".
*/
if (dl_se->dl_new) {
setup_new_dl_entity(dl_se, pi_se);
return;
}
if (dl_time_before(dl_se->deadline, rq_clock(rq)) || if (dl_time_before(dl_se->deadline, rq_clock(rq)) ||
dl_entity_overflow(dl_se, pi_se, rq_clock(rq))) { dl_entity_overflow(dl_se, pi_se, rq_clock(rq))) {
dl_se->deadline = rq_clock(rq) + pi_se->dl_deadline; dl_se->deadline = rq_clock(rq) + pi_se->dl_deadline;
...@@ -607,16 +605,6 @@ static enum hrtimer_restart dl_task_timer(struct hrtimer *timer) ...@@ -607,16 +605,6 @@ static enum hrtimer_restart dl_task_timer(struct hrtimer *timer)
goto unlock; goto unlock;
} }
/*
* This is possible if switched_from_dl() raced against a running
* callback that took the above !dl_task() path and we've since then
* switched back into SCHED_DEADLINE.
*
* There's nothing to do except drop our task reference.
*/
if (dl_se->dl_new)
goto unlock;
/* /*
* The task might have been boosted by someone else and might be in the * The task might have been boosted by someone else and might be in the
* boosting/deboosting path, its not throttled. * boosting/deboosting path, its not throttled.
...@@ -925,7 +913,7 @@ enqueue_dl_entity(struct sched_dl_entity *dl_se, ...@@ -925,7 +913,7 @@ enqueue_dl_entity(struct sched_dl_entity *dl_se,
* parameters of the task might need updating. Otherwise, * parameters of the task might need updating. Otherwise,
* we want a replenishment of its runtime. * we want a replenishment of its runtime.
*/ */
if (dl_se->dl_new || flags & ENQUEUE_WAKEUP) if (flags & ENQUEUE_WAKEUP)
update_dl_entity(dl_se, pi_se); update_dl_entity(dl_se, pi_se);
else if (flags & ENQUEUE_REPLENISH) else if (flags & ENQUEUE_REPLENISH)
replenish_dl_entity(dl_se, pi_se); replenish_dl_entity(dl_se, pi_se);
...@@ -1726,6 +1714,9 @@ static void switched_from_dl(struct rq *rq, struct task_struct *p) ...@@ -1726,6 +1714,9 @@ static void switched_from_dl(struct rq *rq, struct task_struct *p)
*/ */
static void switched_to_dl(struct rq *rq, struct task_struct *p) static void switched_to_dl(struct rq *rq, struct task_struct *p)
{ {
if (dl_time_before(p->dl.deadline, rq_clock(rq)))
setup_new_dl_entity(&p->dl, &p->dl);
if (task_on_rq_queued(p) && rq->curr != p) { if (task_on_rq_queued(p) && rq->curr != p) {
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
if (p->nr_cpus_allowed > 1 && rq->dl.overloaded) if (p->nr_cpus_allowed > 1 && rq->dl.overloaded)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment