Commit a8c1de3a authored by Peter Zijlstra's avatar Peter Zijlstra Committed by Greg Kroah-Hartman

sched/cpufreq: Fix 32-bit math overflow

[ Upstream commit a23314e9 ]

Vincent Wang reported that get_next_freq() has a mult overflow bug on
32-bit platforms in the IOWAIT boost case, since in that case {util,max}
are in freq units instead of capacity units.

Solve this by moving the IOWAIT boost to capacity units. And since this
means @max is constant; simplify the code.
Reported-by: default avatarVincent Wang <vincent.wang@unisoc.com>
Tested-by: default avatarVincent Wang <vincent.wang@unisoc.com>
Signed-off-by: default avatarPeter Zijlstra (Intel) <peterz@infradead.org>
Acked-by: default avatarRafael J. Wysocki <rafael.j.wysocki@intel.com>
Cc: Andy Lutomirski <luto@kernel.org>
Cc: Borislav Petkov <bp@alien8.de>
Cc: Chunyan Zhang <zhang.lyra@gmail.com>
Cc: Dave Hansen <dave.hansen@linux.intel.com>
Cc: H. Peter Anvin <hpa@zytor.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Quentin Perret <quentin.perret@arm.com>
Cc: Rafael J. Wysocki <rjw@rjwysocki.net>
Cc: Rik van Riel <riel@surriel.com>
Cc: Thomas Gleixner <tglx@linutronix.de>
Link: https://lkml.kernel.org/r/20190305083202.GU32494@hirez.programming.kicks-ass.netSigned-off-by: default avatarIngo Molnar <mingo@kernel.org>
Signed-off-by: default avatarSasha Levin <sashal@kernel.org>
parent a629c32a
...@@ -50,10 +50,10 @@ struct sugov_cpu { ...@@ -50,10 +50,10 @@ struct sugov_cpu {
bool iowait_boost_pending; bool iowait_boost_pending;
unsigned int iowait_boost; unsigned int iowait_boost;
unsigned int iowait_boost_max;
u64 last_update; u64 last_update;
unsigned long bw_dl; unsigned long bw_dl;
unsigned long min;
unsigned long max; unsigned long max;
/* The field below is for single-CPU policies only: */ /* The field below is for single-CPU policies only: */
...@@ -283,8 +283,7 @@ static bool sugov_iowait_reset(struct sugov_cpu *sg_cpu, u64 time, ...@@ -283,8 +283,7 @@ static bool sugov_iowait_reset(struct sugov_cpu *sg_cpu, u64 time,
if (delta_ns <= TICK_NSEC) if (delta_ns <= TICK_NSEC)
return false; return false;
sg_cpu->iowait_boost = set_iowait_boost sg_cpu->iowait_boost = set_iowait_boost ? sg_cpu->min : 0;
? sg_cpu->sg_policy->policy->min : 0;
sg_cpu->iowait_boost_pending = set_iowait_boost; sg_cpu->iowait_boost_pending = set_iowait_boost;
return true; return true;
...@@ -324,14 +323,13 @@ static void sugov_iowait_boost(struct sugov_cpu *sg_cpu, u64 time, ...@@ -324,14 +323,13 @@ static void sugov_iowait_boost(struct sugov_cpu *sg_cpu, u64 time,
/* Double the boost at each request */ /* Double the boost at each request */
if (sg_cpu->iowait_boost) { if (sg_cpu->iowait_boost) {
sg_cpu->iowait_boost <<= 1; sg_cpu->iowait_boost =
if (sg_cpu->iowait_boost > sg_cpu->iowait_boost_max) min_t(unsigned int, sg_cpu->iowait_boost << 1, SCHED_CAPACITY_SCALE);
sg_cpu->iowait_boost = sg_cpu->iowait_boost_max;
return; return;
} }
/* First wakeup after IO: start with minimum boost */ /* First wakeup after IO: start with minimum boost */
sg_cpu->iowait_boost = sg_cpu->sg_policy->policy->min; sg_cpu->iowait_boost = sg_cpu->min;
} }
/** /**
...@@ -353,47 +351,38 @@ static void sugov_iowait_boost(struct sugov_cpu *sg_cpu, u64 time, ...@@ -353,47 +351,38 @@ static void sugov_iowait_boost(struct sugov_cpu *sg_cpu, u64 time,
* This mechanism is designed to boost high frequently IO waiting tasks, while * This mechanism is designed to boost high frequently IO waiting tasks, while
* being more conservative on tasks which does sporadic IO operations. * being more conservative on tasks which does sporadic IO operations.
*/ */
static void sugov_iowait_apply(struct sugov_cpu *sg_cpu, u64 time, static unsigned long sugov_iowait_apply(struct sugov_cpu *sg_cpu, u64 time,
unsigned long *util, unsigned long *max) unsigned long util, unsigned long max)
{ {
unsigned int boost_util, boost_max; unsigned long boost;
/* No boost currently required */ /* No boost currently required */
if (!sg_cpu->iowait_boost) if (!sg_cpu->iowait_boost)
return; return util;
/* Reset boost if the CPU appears to have been idle enough */ /* Reset boost if the CPU appears to have been idle enough */
if (sugov_iowait_reset(sg_cpu, time, false)) if (sugov_iowait_reset(sg_cpu, time, false))
return; return util;
/* if (!sg_cpu->iowait_boost_pending) {
* An IO waiting task has just woken up:
* allow to further double the boost value
*/
if (sg_cpu->iowait_boost_pending) {
sg_cpu->iowait_boost_pending = false;
} else {
/* /*
* Otherwise: reduce the boost value and disable it when we * No boost pending; reduce the boost value.
* reach the minimum.
*/ */
sg_cpu->iowait_boost >>= 1; sg_cpu->iowait_boost >>= 1;
if (sg_cpu->iowait_boost < sg_cpu->sg_policy->policy->min) { if (sg_cpu->iowait_boost < sg_cpu->min) {
sg_cpu->iowait_boost = 0; sg_cpu->iowait_boost = 0;
return; return util;
} }
} }
sg_cpu->iowait_boost_pending = false;
/* /*
* Apply the current boost value: a CPU is boosted only if its current * @util is already in capacity scale; convert iowait_boost
* utilization is smaller then the current IO boost level. * into the same scale so we can compare.
*/ */
boost_util = sg_cpu->iowait_boost; boost = (sg_cpu->iowait_boost * max) >> SCHED_CAPACITY_SHIFT;
boost_max = sg_cpu->iowait_boost_max; return max(boost, util);
if (*util * boost_max < *max * boost_util) {
*util = boost_util;
*max = boost_max;
}
} }
#ifdef CONFIG_NO_HZ_COMMON #ifdef CONFIG_NO_HZ_COMMON
...@@ -440,7 +429,7 @@ static void sugov_update_single(struct update_util_data *hook, u64 time, ...@@ -440,7 +429,7 @@ static void sugov_update_single(struct update_util_data *hook, u64 time,
util = sugov_get_util(sg_cpu); util = sugov_get_util(sg_cpu);
max = sg_cpu->max; max = sg_cpu->max;
sugov_iowait_apply(sg_cpu, time, &util, &max); util = sugov_iowait_apply(sg_cpu, time, util, max);
next_f = get_next_freq(sg_policy, util, max); next_f = get_next_freq(sg_policy, util, max);
/* /*
* Do not reduce the frequency if the CPU has not been idle * Do not reduce the frequency if the CPU has not been idle
...@@ -480,7 +469,7 @@ static unsigned int sugov_next_freq_shared(struct sugov_cpu *sg_cpu, u64 time) ...@@ -480,7 +469,7 @@ static unsigned int sugov_next_freq_shared(struct sugov_cpu *sg_cpu, u64 time)
j_util = sugov_get_util(j_sg_cpu); j_util = sugov_get_util(j_sg_cpu);
j_max = j_sg_cpu->max; j_max = j_sg_cpu->max;
sugov_iowait_apply(j_sg_cpu, time, &j_util, &j_max); j_util = sugov_iowait_apply(j_sg_cpu, time, j_util, j_max);
if (j_util * max > j_max * util) { if (j_util * max > j_max * util) {
util = j_util; util = j_util;
...@@ -817,7 +806,9 @@ static int sugov_start(struct cpufreq_policy *policy) ...@@ -817,7 +806,9 @@ static int sugov_start(struct cpufreq_policy *policy)
memset(sg_cpu, 0, sizeof(*sg_cpu)); memset(sg_cpu, 0, sizeof(*sg_cpu));
sg_cpu->cpu = cpu; sg_cpu->cpu = cpu;
sg_cpu->sg_policy = sg_policy; sg_cpu->sg_policy = sg_policy;
sg_cpu->iowait_boost_max = policy->cpuinfo.max_freq; sg_cpu->min =
(SCHED_CAPACITY_SCALE * policy->cpuinfo.min_freq) /
policy->cpuinfo.max_freq;
} }
for_each_cpu(cpu, policy->cpus) { for_each_cpu(cpu, policy->cpus) {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment