Commit 2e62c474 authored by Vincent Guittot's avatar Vincent Guittot Committed by Ingo Molnar

sched/fair: Remove #ifdefs from scale_rt_capacity()

Reuse cpu_util_irq() that has been defined for schedutil and set irq util
to 0 when !CONFIG_IRQ_TIME_ACCOUNTING.

But the compiler is not able to optimize the sequence (at least with
aarch64 GCC 7.2.1):

	free *= (max - irq);
	free /= max;

when irq is fixed to 0

Add a new inline function scale_irq_capacity() that will scale utilization
when irq is accounted. Reuse this funciton in schedutil which applies
similar formula.
Suggested-by: default avatarIngo Molnar <mingo@redhat.com>
Signed-off-by: default avatarVincent Guittot <vincent.guittot@linaro.org>
Signed-off-by: default avatarPeter Zijlstra (Intel) <peterz@infradead.org>
Acked-by: default avatarViresh Kumar <viresh.kumar@linaro.org>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: rjw@rjwysocki.net
Link: http://lkml.kernel.org/r/1532001606-6689-1-git-send-email-vincent.guittot@linaro.orgSigned-off-by: default avatarIngo Molnar <mingo@kernel.org>
parent 4765096f
...@@ -177,7 +177,7 @@ static void update_rq_clock_task(struct rq *rq, s64 delta) ...@@ -177,7 +177,7 @@ static void update_rq_clock_task(struct rq *rq, s64 delta)
rq->clock_task += delta; rq->clock_task += delta;
#if defined(CONFIG_IRQ_TIME_ACCOUNTING) || defined(CONFIG_PARAVIRT_TIME_ACCOUNTING) #ifdef HAVE_SCHED_AVG_IRQ
if ((irq_delta + steal) && sched_feat(NONTASK_CAPACITY)) if ((irq_delta + steal) && sched_feat(NONTASK_CAPACITY))
update_irq_load_avg(rq, irq_delta + steal); update_irq_load_avg(rq, irq_delta + steal);
#endif #endif
......
...@@ -247,8 +247,7 @@ static unsigned long sugov_get_util(struct sugov_cpu *sg_cpu) ...@@ -247,8 +247,7 @@ static unsigned long sugov_get_util(struct sugov_cpu *sg_cpu)
* U' = irq + ------- * U * U' = irq + ------- * U
* max * max
*/ */
util *= (max - irq); util = scale_irq_capacity(util, irq, max);
util /= max;
util += irq; util += irq;
/* /*
......
...@@ -7551,16 +7551,12 @@ static unsigned long scale_rt_capacity(int cpu) ...@@ -7551,16 +7551,12 @@ static unsigned long scale_rt_capacity(int cpu)
struct rq *rq = cpu_rq(cpu); struct rq *rq = cpu_rq(cpu);
unsigned long max = arch_scale_cpu_capacity(NULL, cpu); unsigned long max = arch_scale_cpu_capacity(NULL, cpu);
unsigned long used, free; unsigned long used, free;
#if defined(CONFIG_IRQ_TIME_ACCOUNTING) || defined(CONFIG_PARAVIRT_TIME_ACCOUNTING)
unsigned long irq; unsigned long irq;
#endif
#if defined(CONFIG_IRQ_TIME_ACCOUNTING) || defined(CONFIG_PARAVIRT_TIME_ACCOUNTING) irq = cpu_util_irq(rq);
irq = READ_ONCE(rq->avg_irq.util_avg);
if (unlikely(irq >= max)) if (unlikely(irq >= max))
return 1; return 1;
#endif
used = READ_ONCE(rq->avg_rt.util_avg); used = READ_ONCE(rq->avg_rt.util_avg);
used += READ_ONCE(rq->avg_dl.util_avg); used += READ_ONCE(rq->avg_dl.util_avg);
...@@ -7569,11 +7565,8 @@ static unsigned long scale_rt_capacity(int cpu) ...@@ -7569,11 +7565,8 @@ static unsigned long scale_rt_capacity(int cpu)
return 1; return 1;
free = max - used; free = max - used;
#if defined(CONFIG_IRQ_TIME_ACCOUNTING) || defined(CONFIG_PARAVIRT_TIME_ACCOUNTING)
free *= (max - irq); return scale_irq_capacity(free, irq, max);
free /= max;
#endif
return free;
} }
static void update_cpu_capacity(struct sched_domain *sd, int cpu) static void update_cpu_capacity(struct sched_domain *sd, int cpu)
......
...@@ -856,6 +856,7 @@ struct rq { ...@@ -856,6 +856,7 @@ struct rq {
struct sched_avg avg_rt; struct sched_avg avg_rt;
struct sched_avg avg_dl; struct sched_avg avg_dl;
#if defined(CONFIG_IRQ_TIME_ACCOUNTING) || defined(CONFIG_PARAVIRT_TIME_ACCOUNTING) #if defined(CONFIG_IRQ_TIME_ACCOUNTING) || defined(CONFIG_PARAVIRT_TIME_ACCOUNTING)
#define HAVE_SCHED_AVG_IRQ
struct sched_avg avg_irq; struct sched_avg avg_irq;
#endif #endif
u64 idle_stamp; u64 idle_stamp;
...@@ -2210,17 +2211,32 @@ static inline unsigned long cpu_util_rt(struct rq *rq) ...@@ -2210,17 +2211,32 @@ static inline unsigned long cpu_util_rt(struct rq *rq)
{ {
return READ_ONCE(rq->avg_rt.util_avg); return READ_ONCE(rq->avg_rt.util_avg);
} }
#endif
#if defined(CONFIG_IRQ_TIME_ACCOUNTING) || defined(CONFIG_PARAVIRT_TIME_ACCOUNTING) #ifdef HAVE_SCHED_AVG_IRQ
static inline unsigned long cpu_util_irq(struct rq *rq) static inline unsigned long cpu_util_irq(struct rq *rq)
{ {
return rq->avg_irq.util_avg; return rq->avg_irq.util_avg;
} }
static inline
unsigned long scale_irq_capacity(unsigned long util, unsigned long irq, unsigned long max)
{
util *= (max - irq);
util /= max;
return util;
}
#else #else
static inline unsigned long cpu_util_irq(struct rq *rq) static inline unsigned long cpu_util_irq(struct rq *rq)
{ {
return 0; return 0;
} }
#endif static inline
unsigned long scale_irq_capacity(unsigned long util, unsigned long irq, unsigned long max)
{
return util;
}
#endif #endif
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment