Commit 7cb36b6c authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'sched-fixes-for-linus' of...

Merge branch 'sched-fixes-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip

* 'sched-fixes-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip:
  sched: sched_slice() fixlet
  sched: fix update_min_vruntime
  sched: SCHED_OTHER vs SCHED_IDLE isolation
  sched: SCHED_IDLE weight change
  sched: fix bandwidth validation for UID grouping
  Revert "sched: improve preempt debugging"
parents a9f8d25b 6272d68c
...@@ -1323,8 +1323,8 @@ static inline void update_load_sub(struct load_weight *lw, unsigned long dec) ...@@ -1323,8 +1323,8 @@ static inline void update_load_sub(struct load_weight *lw, unsigned long dec)
* slice expiry etc. * slice expiry etc.
*/ */
#define WEIGHT_IDLEPRIO 2 #define WEIGHT_IDLEPRIO 3
#define WMULT_IDLEPRIO (1 << 31) #define WMULT_IDLEPRIO 1431655765
/* /*
* Nice levels are multiplicative, with a gentle 10% change for every * Nice levels are multiplicative, with a gentle 10% change for every
...@@ -4440,7 +4440,7 @@ void __kprobes sub_preempt_count(int val) ...@@ -4440,7 +4440,7 @@ void __kprobes sub_preempt_count(int val)
/* /*
* Underflow? * Underflow?
*/ */
if (DEBUG_LOCKS_WARN_ON(val > preempt_count() - (!!kernel_locked()))) if (DEBUG_LOCKS_WARN_ON(val > preempt_count()))
return; return;
/* /*
* Is the spinlock portion underflowing? * Is the spinlock portion underflowing?
...@@ -9050,6 +9050,13 @@ static int tg_schedulable(struct task_group *tg, void *data) ...@@ -9050,6 +9050,13 @@ static int tg_schedulable(struct task_group *tg, void *data)
runtime = d->rt_runtime; runtime = d->rt_runtime;
} }
#ifdef CONFIG_USER_SCHED
if (tg == &root_task_group) {
period = global_rt_period();
runtime = global_rt_runtime();
}
#endif
/* /*
* Cannot have more runtime than the period. * Cannot have more runtime than the period.
*/ */
......
...@@ -283,7 +283,7 @@ static void update_min_vruntime(struct cfs_rq *cfs_rq) ...@@ -283,7 +283,7 @@ static void update_min_vruntime(struct cfs_rq *cfs_rq)
struct sched_entity, struct sched_entity,
run_node); run_node);
if (vruntime == cfs_rq->min_vruntime) if (!cfs_rq->curr)
vruntime = se->vruntime; vruntime = se->vruntime;
else else
vruntime = min_vruntime(vruntime, se->vruntime); vruntime = min_vruntime(vruntime, se->vruntime);
...@@ -429,7 +429,10 @@ static u64 sched_slice(struct cfs_rq *cfs_rq, struct sched_entity *se) ...@@ -429,7 +429,10 @@ static u64 sched_slice(struct cfs_rq *cfs_rq, struct sched_entity *se)
u64 slice = __sched_period(cfs_rq->nr_running + !se->on_rq); u64 slice = __sched_period(cfs_rq->nr_running + !se->on_rq);
for_each_sched_entity(se) { for_each_sched_entity(se) {
struct load_weight *load = &cfs_rq->load; struct load_weight *load;
cfs_rq = cfs_rq_of(se);
load = &cfs_rq->load;
if (unlikely(!se->on_rq)) { if (unlikely(!se->on_rq)) {
struct load_weight lw = cfs_rq->load; struct load_weight lw = cfs_rq->load;
...@@ -677,9 +680,13 @@ place_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int initial) ...@@ -677,9 +680,13 @@ place_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int initial)
unsigned long thresh = sysctl_sched_latency; unsigned long thresh = sysctl_sched_latency;
/* /*
* convert the sleeper threshold into virtual time * Convert the sleeper threshold into virtual time.
* SCHED_IDLE is a special sub-class. We care about
* fairness only relative to other SCHED_IDLE tasks,
* all of which have the same weight.
*/ */
if (sched_feat(NORMALIZED_SLEEPER)) if (sched_feat(NORMALIZED_SLEEPER) &&
task_of(se)->policy != SCHED_IDLE)
thresh = calc_delta_fair(thresh, se); thresh = calc_delta_fair(thresh, se);
vruntime -= thresh; vruntime -= thresh;
...@@ -1340,14 +1347,18 @@ wakeup_preempt_entity(struct sched_entity *curr, struct sched_entity *se) ...@@ -1340,14 +1347,18 @@ wakeup_preempt_entity(struct sched_entity *curr, struct sched_entity *se)
static void set_last_buddy(struct sched_entity *se) static void set_last_buddy(struct sched_entity *se)
{ {
if (likely(task_of(se)->policy != SCHED_IDLE)) {
for_each_sched_entity(se) for_each_sched_entity(se)
cfs_rq_of(se)->last = se; cfs_rq_of(se)->last = se;
}
} }
static void set_next_buddy(struct sched_entity *se) static void set_next_buddy(struct sched_entity *se)
{ {
if (likely(task_of(se)->policy != SCHED_IDLE)) {
for_each_sched_entity(se) for_each_sched_entity(se)
cfs_rq_of(se)->next = se; cfs_rq_of(se)->next = se;
}
} }
/* /*
...@@ -1393,12 +1404,18 @@ static void check_preempt_wakeup(struct rq *rq, struct task_struct *p, int sync) ...@@ -1393,12 +1404,18 @@ static void check_preempt_wakeup(struct rq *rq, struct task_struct *p, int sync)
return; return;
/* /*
* Batch tasks do not preempt (their preemption is driven by * Batch and idle tasks do not preempt (their preemption is driven by
* the tick): * the tick):
*/ */
if (unlikely(p->policy == SCHED_BATCH)) if (unlikely(p->policy != SCHED_NORMAL))
return; return;
/* Idle tasks are by definition preempted by everybody. */
if (unlikely(curr->policy == SCHED_IDLE)) {
resched_task(curr);
return;
}
if (!sched_feat(WAKEUP_PREEMPT)) if (!sched_feat(WAKEUP_PREEMPT))
return; return;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment