Commit 0c0bd34a authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'sched-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip

Pull scheduler fixes from Ingo Molnar:
 "Misc fixes, most of them SCHED_DEADLINE fallout"

* 'sched-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
  sched/deadline: Prevent rt_time growth to infinity
  sched/deadline: Switch CPU's presence test order
  sched/deadline: Cleanup RT leftovers from {inc/dec}_dl_migration
  sched: Fix double normalization of vruntime
parents 148b59c6 faa59937
...@@ -70,7 +70,7 @@ static void cpudl_heapify(struct cpudl *cp, int idx) ...@@ -70,7 +70,7 @@ static void cpudl_heapify(struct cpudl *cp, int idx)
static void cpudl_change_key(struct cpudl *cp, int idx, u64 new_dl) static void cpudl_change_key(struct cpudl *cp, int idx, u64 new_dl)
{ {
WARN_ON(!cpu_present(idx) || idx == IDX_INVALID); WARN_ON(idx == IDX_INVALID || !cpu_present(idx));
if (dl_time_before(new_dl, cp->elements[idx].dl)) { if (dl_time_before(new_dl, cp->elements[idx].dl)) {
cp->elements[idx].dl = new_dl; cp->elements[idx].dl = new_dl;
...@@ -117,7 +117,7 @@ int cpudl_find(struct cpudl *cp, struct task_struct *p, ...@@ -117,7 +117,7 @@ int cpudl_find(struct cpudl *cp, struct task_struct *p,
} }
out: out:
WARN_ON(!cpu_present(best_cpu) && best_cpu != -1); WARN_ON(best_cpu != -1 && !cpu_present(best_cpu));
return best_cpu; return best_cpu;
} }
......
...@@ -135,7 +135,6 @@ static void update_dl_migration(struct dl_rq *dl_rq) ...@@ -135,7 +135,6 @@ static void update_dl_migration(struct dl_rq *dl_rq)
static void inc_dl_migration(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq) static void inc_dl_migration(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq)
{ {
struct task_struct *p = dl_task_of(dl_se); struct task_struct *p = dl_task_of(dl_se);
dl_rq = &rq_of_dl_rq(dl_rq)->dl;
if (p->nr_cpus_allowed > 1) if (p->nr_cpus_allowed > 1)
dl_rq->dl_nr_migratory++; dl_rq->dl_nr_migratory++;
...@@ -146,7 +145,6 @@ static void inc_dl_migration(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq) ...@@ -146,7 +145,6 @@ static void inc_dl_migration(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq)
static void dec_dl_migration(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq) static void dec_dl_migration(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq)
{ {
struct task_struct *p = dl_task_of(dl_se); struct task_struct *p = dl_task_of(dl_se);
dl_rq = &rq_of_dl_rq(dl_rq)->dl;
if (p->nr_cpus_allowed > 1) if (p->nr_cpus_allowed > 1)
dl_rq->dl_nr_migratory--; dl_rq->dl_nr_migratory--;
...@@ -564,6 +562,8 @@ int dl_runtime_exceeded(struct rq *rq, struct sched_dl_entity *dl_se) ...@@ -564,6 +562,8 @@ int dl_runtime_exceeded(struct rq *rq, struct sched_dl_entity *dl_se)
return 1; return 1;
} }
extern bool sched_rt_bandwidth_account(struct rt_rq *rt_rq);
/* /*
* Update the current task's runtime statistics (provided it is still * Update the current task's runtime statistics (provided it is still
* a -deadline task and has not been removed from the dl_rq). * a -deadline task and has not been removed from the dl_rq).
...@@ -627,11 +627,13 @@ static void update_curr_dl(struct rq *rq) ...@@ -627,11 +627,13 @@ static void update_curr_dl(struct rq *rq)
struct rt_rq *rt_rq = &rq->rt; struct rt_rq *rt_rq = &rq->rt;
raw_spin_lock(&rt_rq->rt_runtime_lock); raw_spin_lock(&rt_rq->rt_runtime_lock);
rt_rq->rt_time += delta_exec;
/* /*
* We'll let actual RT tasks worry about the overflow here, we * We'll let actual RT tasks worry about the overflow here, we
* have our own CBS to keep us inline -- see above. * have our own CBS to keep us inline; only account when RT
* bandwidth is relevant.
*/ */
if (sched_rt_bandwidth_account(rt_rq))
rt_rq->rt_time += delta_exec;
raw_spin_unlock(&rt_rq->rt_runtime_lock); raw_spin_unlock(&rt_rq->rt_runtime_lock);
} }
} }
......
...@@ -7001,15 +7001,15 @@ static void switched_from_fair(struct rq *rq, struct task_struct *p) ...@@ -7001,15 +7001,15 @@ static void switched_from_fair(struct rq *rq, struct task_struct *p)
struct cfs_rq *cfs_rq = cfs_rq_of(se); struct cfs_rq *cfs_rq = cfs_rq_of(se);
/* /*
* Ensure the task's vruntime is normalized, so that when its * Ensure the task's vruntime is normalized, so that when it's
* switched back to the fair class the enqueue_entity(.flags=0) will * switched back to the fair class the enqueue_entity(.flags=0) will
* do the right thing. * do the right thing.
* *
* If it was on_rq, then the dequeue_entity(.flags=0) will already * If it's on_rq, then the dequeue_entity(.flags=0) will already
* have normalized the vruntime, if it was !on_rq, then only when * have normalized the vruntime, if it's !on_rq, then only when
* the task is sleeping will it still have non-normalized vruntime. * the task is sleeping will it still have non-normalized vruntime.
*/ */
if (!se->on_rq && p->state != TASK_RUNNING) { if (!p->on_rq && p->state != TASK_RUNNING) {
/* /*
* Fix up our vruntime so that the current sleep doesn't * Fix up our vruntime so that the current sleep doesn't
* cause 'unlimited' sleep bonus. * cause 'unlimited' sleep bonus.
......
...@@ -538,6 +538,14 @@ static inline struct rt_bandwidth *sched_rt_bandwidth(struct rt_rq *rt_rq) ...@@ -538,6 +538,14 @@ static inline struct rt_bandwidth *sched_rt_bandwidth(struct rt_rq *rt_rq)
#endif /* CONFIG_RT_GROUP_SCHED */ #endif /* CONFIG_RT_GROUP_SCHED */
bool sched_rt_bandwidth_account(struct rt_rq *rt_rq)
{
struct rt_bandwidth *rt_b = sched_rt_bandwidth(rt_rq);
return (hrtimer_active(&rt_b->rt_period_timer) ||
rt_rq->rt_time < rt_b->rt_runtime);
}
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
/* /*
* We ran out of runtime, see if we can borrow some from our neighbours. * We ran out of runtime, see if we can borrow some from our neighbours.
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment