Commit 328fefad authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'sched-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip

Pull scheduler fixes from Ingo Molnar:
 "Two fixes: a guest-cputime accounting fix, and a cgroup bandwidth
  quota precision fix"

* 'sched-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
  sched/vtime: Fix guest/system mis-accounting on task switch
  sched/fair: Scale bandwidth quota and period without losing quota/period ratio precision
parents 465a7e29 68e7a4d6
...@@ -740,7 +740,7 @@ void vtime_account_system(struct task_struct *tsk) ...@@ -740,7 +740,7 @@ void vtime_account_system(struct task_struct *tsk)
write_seqcount_begin(&vtime->seqcount); write_seqcount_begin(&vtime->seqcount);
/* We might have scheduled out from guest path */ /* We might have scheduled out from guest path */
if (current->flags & PF_VCPU) if (tsk->flags & PF_VCPU)
vtime_account_guest(tsk, vtime); vtime_account_guest(tsk, vtime);
else else
__vtime_account_system(tsk, vtime); __vtime_account_system(tsk, vtime);
...@@ -783,7 +783,7 @@ void vtime_guest_enter(struct task_struct *tsk) ...@@ -783,7 +783,7 @@ void vtime_guest_enter(struct task_struct *tsk)
*/ */
write_seqcount_begin(&vtime->seqcount); write_seqcount_begin(&vtime->seqcount);
__vtime_account_system(tsk, vtime); __vtime_account_system(tsk, vtime);
current->flags |= PF_VCPU; tsk->flags |= PF_VCPU;
write_seqcount_end(&vtime->seqcount); write_seqcount_end(&vtime->seqcount);
} }
EXPORT_SYMBOL_GPL(vtime_guest_enter); EXPORT_SYMBOL_GPL(vtime_guest_enter);
...@@ -794,7 +794,7 @@ void vtime_guest_exit(struct task_struct *tsk) ...@@ -794,7 +794,7 @@ void vtime_guest_exit(struct task_struct *tsk)
write_seqcount_begin(&vtime->seqcount); write_seqcount_begin(&vtime->seqcount);
vtime_account_guest(tsk, vtime); vtime_account_guest(tsk, vtime);
current->flags &= ~PF_VCPU; tsk->flags &= ~PF_VCPU;
write_seqcount_end(&vtime->seqcount); write_seqcount_end(&vtime->seqcount);
} }
EXPORT_SYMBOL_GPL(vtime_guest_exit); EXPORT_SYMBOL_GPL(vtime_guest_exit);
......
...@@ -4926,20 +4926,28 @@ static enum hrtimer_restart sched_cfs_period_timer(struct hrtimer *timer) ...@@ -4926,20 +4926,28 @@ static enum hrtimer_restart sched_cfs_period_timer(struct hrtimer *timer)
if (++count > 3) { if (++count > 3) {
u64 new, old = ktime_to_ns(cfs_b->period); u64 new, old = ktime_to_ns(cfs_b->period);
new = (old * 147) / 128; /* ~115% */ /*
new = min(new, max_cfs_quota_period); * Grow period by a factor of 2 to avoid losing precision.
* Precision loss in the quota/period ratio can cause __cfs_schedulable
cfs_b->period = ns_to_ktime(new); * to fail.
*/
/* since max is 1s, this is limited to 1e9^2, which fits in u64 */ new = old * 2;
cfs_b->quota *= new; if (new < max_cfs_quota_period) {
cfs_b->quota = div64_u64(cfs_b->quota, old); cfs_b->period = ns_to_ktime(new);
cfs_b->quota *= 2;
pr_warn_ratelimited(
"cfs_period_timer[cpu%d]: period too short, scaling up (new cfs_period_us %lld, cfs_quota_us = %lld)\n", pr_warn_ratelimited(
smp_processor_id(), "cfs_period_timer[cpu%d]: period too short, scaling up (new cfs_period_us = %lld, cfs_quota_us = %lld)\n",
div_u64(new, NSEC_PER_USEC), smp_processor_id(),
div_u64(cfs_b->quota, NSEC_PER_USEC)); div_u64(new, NSEC_PER_USEC),
div_u64(cfs_b->quota, NSEC_PER_USEC));
} else {
pr_warn_ratelimited(
"cfs_period_timer[cpu%d]: period too short, but cannot scale up without losing precision (cfs_period_us = %lld, cfs_quota_us = %lld)\n",
smp_processor_id(),
div_u64(old, NSEC_PER_USEC),
div_u64(cfs_b->quota, NSEC_PER_USEC));
}
/* reset count so we don't come right back in here */ /* reset count so we don't come right back in here */
count = 0; count = 0;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment