Commit e75c8b47 authored by Ingo Molnar's avatar Ingo Molnar

Merge branch 'sched/core' of...

Merge branch 'sched/core' of git://git.kernel.org/pub/scm/linux/kernel/git/frederic/linux-dynticks into sched/core

Pull CPU runtime stats/accounting fixes from Frederic Weisbecker:

 " Some users are complaining that their threadgroup's runtime accounting
   freezes after a week or so of intense cpu-bound workload. This set tries
   to fix the issue by reducing the risk of multiplication overflow in the
   cputime scaling code. "

Stanislaw Gruszka further explained the historic context and impact of the
bug:

 " Commit 0cf55e1e start to use scalling
   for whole thread group, so increase chances of hitting multiplication
   overflow, depending on how many CPUs are on the system.

   We have multiplication utime * rtime for one thread since commit
   b27f03d4.

   Overflow will happen after:

   rtime * utime > 0xffffffffffffffff jiffies

   if thread utilize 100% of CPU time, that gives:

   rtime > sqrt(0xffffffffffffffff) jiffies

   ritme > sqrt(0xffffffffffffffff) / (24 * 60 * 60 * HZ) days

   For HZ 100 it will be 497 days for HZ 1000 it will be 49 days.

   Bug affect only users, who run CPU intensive application for that
   long period. Also they have to be interested on utime,stime values,
   as bug has no other visible effect as making those values incorrect. "
Signed-off-by: default avatarIngo Molnar <mingo@kernel.org>
parents 1bf08230 d9a3c982
...@@ -29,6 +29,15 @@ static inline s64 div_s64_rem(s64 dividend, s32 divisor, s32 *remainder) ...@@ -29,6 +29,15 @@ static inline s64 div_s64_rem(s64 dividend, s32 divisor, s32 *remainder)
return dividend / divisor; return dividend / divisor;
} }
/**
* div64_u64_rem - unsigned 64bit divide with 64bit divisor
*/
static inline u64 div64_u64_rem(u64 dividend, u64 divisor, u64 *remainder)
{
*remainder = dividend % divisor;
return dividend / divisor;
}
/** /**
* div64_u64 - unsigned 64bit divide with 64bit divisor * div64_u64 - unsigned 64bit divide with 64bit divisor
*/ */
...@@ -61,8 +70,16 @@ static inline u64 div_u64_rem(u64 dividend, u32 divisor, u32 *remainder) ...@@ -61,8 +70,16 @@ static inline u64 div_u64_rem(u64 dividend, u32 divisor, u32 *remainder)
extern s64 div_s64_rem(s64 dividend, s32 divisor, s32 *remainder); extern s64 div_s64_rem(s64 dividend, s32 divisor, s32 *remainder);
#endif #endif
#ifndef div64_u64_rem
extern u64 div64_u64_rem(u64 dividend, u64 divisor, u64 *remainder);
#endif
#ifndef div64_u64 #ifndef div64_u64
extern u64 div64_u64(u64 dividend, u64 divisor); static inline u64 div64_u64(u64 dividend, u64 divisor)
{
u64 remainder;
return div64_u64_rem(dividend, divisor, &remainder);
}
#endif #endif
#ifndef div64_s64 #ifndef div64_s64
......
...@@ -521,18 +521,36 @@ void account_idle_ticks(unsigned long ticks) ...@@ -521,18 +521,36 @@ void account_idle_ticks(unsigned long ticks)
account_idle_time(jiffies_to_cputime(ticks)); account_idle_time(jiffies_to_cputime(ticks));
} }
static cputime_t scale_stime(cputime_t stime, cputime_t rtime, cputime_t total) /*
* Perform (stime * rtime) / total with reduced chances
* of multiplication overflows by using smaller factors
* like quotient and remainders of divisions between
* rtime and total.
*/
static cputime_t scale_stime(u64 stime, u64 rtime, u64 total)
{ {
u64 temp = (__force u64) rtime; u64 rem, res, scaled;
temp *= (__force u64) stime; if (rtime >= total) {
/*
if (sizeof(cputime_t) == 4) * Scale up to rtime / total then add
temp = div_u64(temp, (__force u32) total); * the remainder scaled to stime / total.
else */
temp = div64_u64(temp, (__force u64) total); res = div64_u64_rem(rtime, total, &rem);
scaled = stime * res;
scaled += div64_u64(stime * rem, total);
} else {
/*
* Same in reverse: scale down to total / rtime
* then substract that result scaled to
* to the remaining part.
*/
res = div64_u64_rem(total, rtime, &rem);
scaled = div64_u64(stime, res);
scaled -= div64_u64(scaled * rem, total);
}
return (__force cputime_t) temp; return (__force cputime_t) scaled;
} }
/* /*
...@@ -566,10 +584,14 @@ static void cputime_adjust(struct task_cputime *curr, ...@@ -566,10 +584,14 @@ static void cputime_adjust(struct task_cputime *curr,
*/ */
rtime = nsecs_to_cputime(curr->sum_exec_runtime); rtime = nsecs_to_cputime(curr->sum_exec_runtime);
if (total) if (!rtime) {
stime = scale_stime(stime, rtime, total); stime = 0;
else } else if (!total) {
stime = rtime; stime = rtime;
} else {
stime = scale_stime((__force u64)stime,
(__force u64)rtime, (__force u64)total);
}
/* /*
* If the tick based count grows faster than the scheduler one, * If the tick based count grows faster than the scheduler one,
......
...@@ -79,9 +79,10 @@ EXPORT_SYMBOL(div_s64_rem); ...@@ -79,9 +79,10 @@ EXPORT_SYMBOL(div_s64_rem);
#endif #endif
/** /**
* div64_u64 - unsigned 64bit divide with 64bit divisor * div64_u64_rem - unsigned 64bit divide with 64bit divisor and 64bit remainder
* @dividend: 64bit dividend * @dividend: 64bit dividend
* @divisor: 64bit divisor * @divisor: 64bit divisor
* @remainder: 64bit remainder
* *
* This implementation is a modified version of the algorithm proposed * This implementation is a modified version of the algorithm proposed
* by the book 'Hacker's Delight'. The original source and full proof * by the book 'Hacker's Delight'. The original source and full proof
...@@ -89,27 +90,33 @@ EXPORT_SYMBOL(div_s64_rem); ...@@ -89,27 +90,33 @@ EXPORT_SYMBOL(div_s64_rem);
* *
* 'http://www.hackersdelight.org/HDcode/newCode/divDouble.c.txt' * 'http://www.hackersdelight.org/HDcode/newCode/divDouble.c.txt'
*/ */
#ifndef div64_u64 #ifndef div64_u64_rem
u64 div64_u64(u64 dividend, u64 divisor) u64 div64_u64_rem(u64 dividend, u64 divisor, u64 *remainder)
{ {
u32 high = divisor >> 32; u32 high = divisor >> 32;
u64 quot; u64 quot;
if (high == 0) { if (high == 0) {
quot = div_u64(dividend, divisor); u32 rem32;
quot = div_u64_rem(dividend, divisor, &rem32);
*remainder = rem32;
} else { } else {
int n = 1 + fls(high); int n = 1 + fls(high);
quot = div_u64(dividend >> n, divisor >> n); quot = div_u64(dividend >> n, divisor >> n);
if (quot != 0) if (quot != 0)
quot--; quot--;
if ((dividend - quot * divisor) >= divisor)
*remainder = dividend - quot * divisor;
if (*remainder >= divisor) {
quot++; quot++;
*remainder -= divisor;
}
} }
return quot; return quot;
} }
EXPORT_SYMBOL(div64_u64); EXPORT_SYMBOL(div64_u64_rem);
#endif #endif
/** /**
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment