Commit fd7a6d2b authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'sched-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip

Pull scheduler fixes from Ingo Molnar:
 "Misc fixes: a (rare) PSI crash fix, a CPU affinity related balancing
  fix, and a toning down of active migration attempts"

* 'sched-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
  sched/cfs: fix spurious active migration
  sched/fair: Fix find_idlest_group() to handle CPU affinity
  psi: Fix a division error in psi poll()
  sched/psi: Fix sampling error and rare div0 crashes with cgroups and high uptime
parents c4ff10ef 6cf82d55
...@@ -7328,7 +7328,14 @@ static int detach_tasks(struct lb_env *env) ...@@ -7328,7 +7328,14 @@ static int detach_tasks(struct lb_env *env)
load < 16 && !env->sd->nr_balance_failed) load < 16 && !env->sd->nr_balance_failed)
goto next; goto next;
if (load/2 > env->imbalance) /*
* Make sure that we don't migrate too much load.
* Nevertheless, let relax the constraint if
* scheduler fails to find a good waiting task to
* migrate.
*/
if (load/2 > env->imbalance &&
env->sd->nr_balance_failed <= env->sd->cache_nice_tries)
goto next; goto next;
env->imbalance -= load; env->imbalance -= load;
...@@ -8417,6 +8424,10 @@ find_idlest_group(struct sched_domain *sd, struct task_struct *p, ...@@ -8417,6 +8424,10 @@ find_idlest_group(struct sched_domain *sd, struct task_struct *p,
if (!idlest) if (!idlest)
return NULL; return NULL;
/* The local group has been skipped because of CPU affinity */
if (!local)
return idlest;
/* /*
* If the local group is idler than the selected idlest group * If the local group is idler than the selected idlest group
* don't try and push the task. * don't try and push the task.
......
...@@ -185,7 +185,8 @@ static void group_init(struct psi_group *group) ...@@ -185,7 +185,8 @@ static void group_init(struct psi_group *group)
for_each_possible_cpu(cpu) for_each_possible_cpu(cpu)
seqcount_init(&per_cpu_ptr(group->pcpu, cpu)->seq); seqcount_init(&per_cpu_ptr(group->pcpu, cpu)->seq);
group->avg_next_update = sched_clock() + psi_period; group->avg_last_update = sched_clock();
group->avg_next_update = group->avg_last_update + psi_period;
INIT_DELAYED_WORK(&group->avgs_work, psi_avgs_work); INIT_DELAYED_WORK(&group->avgs_work, psi_avgs_work);
mutex_init(&group->avgs_lock); mutex_init(&group->avgs_lock);
/* Init trigger-related members */ /* Init trigger-related members */
...@@ -481,7 +482,7 @@ static u64 window_update(struct psi_window *win, u64 now, u64 value) ...@@ -481,7 +482,7 @@ static u64 window_update(struct psi_window *win, u64 now, u64 value)
u32 remaining; u32 remaining;
remaining = win->size - elapsed; remaining = win->size - elapsed;
growth += div_u64(win->prev_growth * remaining, win->size); growth += div64_u64(win->prev_growth * remaining, win->size);
} }
return growth; return growth;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment