Commit 8cc90515 authored by Vincent Guittot's avatar Vincent Guittot Committed by Ingo Molnar

cpufreq/schedutil: Use DL utilization tracking

Now that we have both the DL class bandwidth requirement and the DL class
utilization, we can detect when CPU is fully used so we should run at max.
Otherwise, we keep using the DL bandwidth requirement to define the
utilization of the CPU.
Signed-off-by: default avatarVincent Guittot <vincent.guittot@linaro.org>
Signed-off-by: default avatarPeter Zijlstra (Intel) <peterz@infradead.org>
Acked-by: default avatarViresh Kumar <viresh.kumar@linaro.org>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Morten.Rasmussen@arm.com
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: claudio@evidence.eu.com
Cc: daniel.lezcano@linaro.org
Cc: dietmar.eggemann@arm.com
Cc: joel@joelfernandes.org
Cc: juri.lelli@redhat.com
Cc: luca.abeni@santannapisa.it
Cc: patrick.bellasi@arm.com
Cc: quentin.perret@arm.com
Cc: rjw@rjwysocki.net
Cc: valentin.schneider@arm.com
Link: http://lkml.kernel.org/r/1530200714-4504-6-git-send-email-vincent.guittot@linaro.orgSigned-off-by: default avatarIngo Molnar <mingo@kernel.org>
parent 3727e0e1
...@@ -56,6 +56,7 @@ struct sugov_cpu { ...@@ -56,6 +56,7 @@ struct sugov_cpu {
/* The fields below are only needed when sharing a policy: */ /* The fields below are only needed when sharing a policy: */
unsigned long util_cfs; unsigned long util_cfs;
unsigned long util_dl; unsigned long util_dl;
unsigned long bw_dl;
unsigned long util_rt; unsigned long util_rt;
unsigned long max; unsigned long max;
...@@ -187,6 +188,7 @@ static void sugov_get_util(struct sugov_cpu *sg_cpu) ...@@ -187,6 +188,7 @@ static void sugov_get_util(struct sugov_cpu *sg_cpu)
sg_cpu->max = arch_scale_cpu_capacity(NULL, sg_cpu->cpu); sg_cpu->max = arch_scale_cpu_capacity(NULL, sg_cpu->cpu);
sg_cpu->util_cfs = cpu_util_cfs(rq); sg_cpu->util_cfs = cpu_util_cfs(rq);
sg_cpu->util_dl = cpu_util_dl(rq); sg_cpu->util_dl = cpu_util_dl(rq);
sg_cpu->bw_dl = cpu_bw_dl(rq);
sg_cpu->util_rt = cpu_util_rt(rq); sg_cpu->util_rt = cpu_util_rt(rq);
} }
...@@ -198,20 +200,29 @@ static unsigned long sugov_aggregate_util(struct sugov_cpu *sg_cpu) ...@@ -198,20 +200,29 @@ static unsigned long sugov_aggregate_util(struct sugov_cpu *sg_cpu)
if (rt_rq_is_runnable(&rq->rt)) if (rt_rq_is_runnable(&rq->rt))
return sg_cpu->max; return sg_cpu->max;
util = sg_cpu->util_dl; util = sg_cpu->util_cfs;
util += sg_cpu->util_cfs;
util += sg_cpu->util_rt; util += sg_cpu->util_rt;
if ((util + sg_cpu->util_dl) >= sg_cpu->max)
return sg_cpu->max;
/* /*
* Utilization required by DEADLINE must always be granted while, for * As there is still idle time on the CPU, we need to compute the
* FAIR, we use blocked utilization of IDLE CPUs as a mechanism to * utilization level of the CPU.
* gracefully reduce the frequency when no tasks show up for longer *
* Bandwidth required by DEADLINE must always be granted while, for
* FAIR and RT, we use blocked utilization of IDLE CPUs as a mechanism
* to gracefully reduce the frequency when no tasks show up for longer
* periods of time. * periods of time.
* *
* Ideally we would like to set util_dl as min/guaranteed freq and * Ideally we would like to set util_dl as min/guaranteed freq and
* util_cfs + util_dl as requested freq. However, cpufreq is not yet * util_cfs + util_dl as requested freq. However, cpufreq is not yet
* ready for such an interface. So, we only do the latter for now. * ready for such an interface. So, we only do the latter for now.
*/ */
/* Add DL bandwidth requirement */
util += sg_cpu->bw_dl;
return min(sg_cpu->max, util); return min(sg_cpu->max, util);
} }
...@@ -367,7 +378,7 @@ static inline bool sugov_cpu_is_busy(struct sugov_cpu *sg_cpu) { return false; } ...@@ -367,7 +378,7 @@ static inline bool sugov_cpu_is_busy(struct sugov_cpu *sg_cpu) { return false; }
*/ */
static inline void ignore_dl_rate_limit(struct sugov_cpu *sg_cpu, struct sugov_policy *sg_policy) static inline void ignore_dl_rate_limit(struct sugov_cpu *sg_cpu, struct sugov_policy *sg_policy)
{ {
if (cpu_util_dl(cpu_rq(sg_cpu->cpu)) > sg_cpu->util_dl) if (cpu_bw_dl(cpu_rq(sg_cpu->cpu)) > sg_cpu->bw_dl)
sg_policy->need_freq_update = true; sg_policy->need_freq_update = true;
} }
......
...@@ -2199,11 +2199,16 @@ static inline void cpufreq_update_util(struct rq *rq, unsigned int flags) {} ...@@ -2199,11 +2199,16 @@ static inline void cpufreq_update_util(struct rq *rq, unsigned int flags) {}
#endif #endif
#ifdef CONFIG_CPU_FREQ_GOV_SCHEDUTIL #ifdef CONFIG_CPU_FREQ_GOV_SCHEDUTIL
static inline unsigned long cpu_util_dl(struct rq *rq) static inline unsigned long cpu_bw_dl(struct rq *rq)
{ {
return (rq->dl.running_bw * SCHED_CAPACITY_SCALE) >> BW_SHIFT; return (rq->dl.running_bw * SCHED_CAPACITY_SCALE) >> BW_SHIFT;
} }
static inline unsigned long cpu_util_dl(struct rq *rq)
{
return READ_ONCE(rq->avg_dl.util_avg);
}
static inline unsigned long cpu_util_cfs(struct rq *rq) static inline unsigned long cpu_util_cfs(struct rq *rq)
{ {
unsigned long util = READ_ONCE(rq->cfs.avg.util_avg); unsigned long util = READ_ONCE(rq->cfs.avg.util_avg);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment