Commit 902d67a2 authored by Tejun Heo's avatar Tejun Heo

sched: Move update_other_load_avgs() to kernel/sched/pelt.c

96fd6c65 ("sched: Factor out update_other_load_avgs() from
__update_blocked_others()") added update_other_load_avgs() in
kernel/sched/syscalls.c right above effective_cpu_util(). This location
didn't fit that well in the first place, and with 5d871a63 ("sched/fair:
Move effective_cpu_util() and effective_cpu_util() in fair.c") moving
effective_cpu_util() to kernel/sched/fair.c, it looks even more out of
place.

Relocate the function to kernel/sched/pelt.c where all its callees are.

No functional changes.
Signed-off-by: default avatarTejun Heo <tj@kernel.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Ingo Molnar <mingo@redhat.com>
parent 0b1777f0
...@@ -467,3 +467,23 @@ int update_irq_load_avg(struct rq *rq, u64 running) ...@@ -467,3 +467,23 @@ int update_irq_load_avg(struct rq *rq, u64 running)
return ret; return ret;
} }
#endif #endif
/*
* Load avg and utiliztion metrics need to be updated periodically and before
* consumption. This function updates the metrics for all subsystems except for
* the fair class. @rq must be locked and have its clock updated.
*/
bool update_other_load_avgs(struct rq *rq)
{
u64 now = rq_clock_pelt(rq);
const struct sched_class *curr_class = rq->curr->sched_class;
unsigned long hw_pressure = arch_scale_hw_pressure(cpu_of(rq));
lockdep_assert_rq_held(rq);
/* hw_pressure doesn't care about invariance */
return update_rt_rq_load_avg(now, rq, curr_class == &rt_sched_class) |
update_dl_rq_load_avg(now, rq, curr_class == &dl_sched_class) |
update_hw_load_avg(rq_clock_task(rq), rq, hw_pressure) |
update_irq_load_avg(rq, 0);
}
...@@ -6,6 +6,7 @@ int __update_load_avg_se(u64 now, struct cfs_rq *cfs_rq, struct sched_entity *se ...@@ -6,6 +6,7 @@ int __update_load_avg_se(u64 now, struct cfs_rq *cfs_rq, struct sched_entity *se
int __update_load_avg_cfs_rq(u64 now, struct cfs_rq *cfs_rq); int __update_load_avg_cfs_rq(u64 now, struct cfs_rq *cfs_rq);
int update_rt_rq_load_avg(u64 now, struct rq *rq, int running); int update_rt_rq_load_avg(u64 now, struct rq *rq, int running);
int update_dl_rq_load_avg(u64 now, struct rq *rq, int running); int update_dl_rq_load_avg(u64 now, struct rq *rq, int running);
bool update_other_load_avgs(struct rq *rq);
#ifdef CONFIG_SCHED_HW_PRESSURE #ifdef CONFIG_SCHED_HW_PRESSURE
int update_hw_load_avg(u64 now, struct rq *rq, u64 capacity); int update_hw_load_avg(u64 now, struct rq *rq, u64 capacity);
......
...@@ -3245,8 +3245,6 @@ static inline void cpufreq_update_util(struct rq *rq, unsigned int flags) { } ...@@ -3245,8 +3245,6 @@ static inline void cpufreq_update_util(struct rq *rq, unsigned int flags) { }
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
bool update_other_load_avgs(struct rq *rq);
unsigned long effective_cpu_util(int cpu, unsigned long util_cfs, unsigned long effective_cpu_util(int cpu, unsigned long util_cfs,
unsigned long *min, unsigned long *min,
unsigned long *max); unsigned long *max);
......
...@@ -258,28 +258,6 @@ int sched_core_idle_cpu(int cpu) ...@@ -258,28 +258,6 @@ int sched_core_idle_cpu(int cpu)
#endif #endif
#ifdef CONFIG_SMP
/*
* Load avg and utiliztion metrics need to be updated periodically and before
* consumption. This function updates the metrics for all subsystems except for
* the fair class. @rq must be locked and have its clock updated.
*/
bool update_other_load_avgs(struct rq *rq)
{
u64 now = rq_clock_pelt(rq);
const struct sched_class *curr_class = rq->curr->sched_class;
unsigned long hw_pressure = arch_scale_hw_pressure(cpu_of(rq));
lockdep_assert_rq_held(rq);
/* hw_pressure doesn't care about invariance */
return update_rt_rq_load_avg(now, rq, curr_class == &rt_sched_class) |
update_dl_rq_load_avg(now, rq, curr_class == &dl_sched_class) |
update_hw_load_avg(rq_clock_task(rq), rq, hw_pressure) |
update_irq_load_avg(rq, 0);
}
#endif /* CONFIG_SMP */
/** /**
* find_process_by_pid - find a process with a matching PID value. * find_process_by_pid - find a process with a matching PID value.
* @pid: the pid in question. * @pid: the pid in question.
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment