Commit dfbca41f authored by Peter Zijlstra's avatar Peter Zijlstra Committed by Ingo Molnar

sched: Optimize freq invariant accounting

Currently the freq invariant accounting (in
__update_entity_runnable_avg() and sched_rt_avg_update()) get the
scale factor from a weak function call, this means that even for archs
that default on their implementation the compiler cannot see into this
function and optimize the extra scaling math away.

This is sad, esp. since its a 64-bit multiplication which can be quite
costly on some platforms.

So replace the weak function with #ifdef and __always_inline goo. This
is not quite as nice from an arch support PoV but should at least
result in compile time errors if done wrong.
Signed-off-by: default avatarPeter Zijlstra (Intel) <peterz@infradead.org>
Cc: Ben Segall <bsegall@google.com>
Cc: Morten.Rasmussen@arm.com
Cc: Paul Turner <pjt@google.com>
Cc: Vincent Guittot <vincent.guittot@linaro.org>
Cc: dietmar.eggemann@arm.com
Cc: efault@gmx.de
Cc: kamalesh@linux.vnet.ibm.com
Cc: nicolas.pitre@linaro.org
Cc: preeti@linux.vnet.ibm.com
Cc: riel@redhat.com
Link: http://lkml.kernel.org/r/20150323131905.GF23123@twins.programming.kicks-ass.netSigned-off-by: default avatarIngo Molnar <mingo@kernel.org>
parent 1aaf90a4
...@@ -2484,8 +2484,6 @@ static u32 __compute_runnable_contrib(u64 n) ...@@ -2484,8 +2484,6 @@ static u32 __compute_runnable_contrib(u64 n)
return contrib + runnable_avg_yN_sum[n]; return contrib + runnable_avg_yN_sum[n];
} }
unsigned long __weak arch_scale_freq_capacity(struct sched_domain *sd, int cpu);
/* /*
* We can represent the historical contribution to runnable average as the * We can represent the historical contribution to runnable average as the
* coefficients of a geometric series. To do this we sub-divide our runnable * coefficients of a geometric series. To do this we sub-divide our runnable
...@@ -6010,16 +6008,6 @@ static inline int get_sd_load_idx(struct sched_domain *sd, ...@@ -6010,16 +6008,6 @@ static inline int get_sd_load_idx(struct sched_domain *sd,
return load_idx; return load_idx;
} }
static unsigned long default_scale_capacity(struct sched_domain *sd, int cpu)
{
return SCHED_CAPACITY_SCALE;
}
unsigned long __weak arch_scale_freq_capacity(struct sched_domain *sd, int cpu)
{
return default_scale_capacity(sd, cpu);
}
static unsigned long default_scale_cpu_capacity(struct sched_domain *sd, int cpu) static unsigned long default_scale_cpu_capacity(struct sched_domain *sd, int cpu)
{ {
if ((sd->flags & SD_SHARE_CPUCAPACITY) && (sd->span_weight > 1)) if ((sd->flags & SD_SHARE_CPUCAPACITY) && (sd->span_weight > 1))
......
...@@ -1387,7 +1387,14 @@ static inline int hrtick_enabled(struct rq *rq) ...@@ -1387,7 +1387,14 @@ static inline int hrtick_enabled(struct rq *rq)
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
extern void sched_avg_update(struct rq *rq); extern void sched_avg_update(struct rq *rq);
extern unsigned long arch_scale_freq_capacity(struct sched_domain *sd, int cpu);
#ifndef arch_scale_freq_capacity
static __always_inline
unsigned long arch_scale_freq_capacity(struct sched_domain *sd, int cpu)
{
return SCHED_CAPACITY_SCALE;
}
#endif
static inline void sched_rt_avg_update(struct rq *rq, u64 rt_delta) static inline void sched_rt_avg_update(struct rq *rq, u64 rt_delta)
{ {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment