Commit cee1afce authored by Frederic Weisbecker's avatar Frederic Weisbecker Committed by Ingo Molnar

sched/fair: Gather CPU load functions under a more conventional namespace

The CPU load update related functions have a weak naming convention
currently, starting with update_cpu_load_*() which isn't ideal as
"update" is a very generic concept.

Since two of these functions are public already (and a third is to come)
that's enough to introduce a more conventional naming scheme. So let's
do the following rename instead:

	update_cpu_load_*() -> cpu_load_update_*()
Signed-off-by: default avatarFrederic Weisbecker <fweisbec@gmail.com>
Signed-off-by: default avatarPeter Zijlstra (Intel) <peterz@infradead.org>
Cc: Byungchul Park <byungchul.park@lge.com>
Cc: Chris Metcalf <cmetcalf@ezchip.com>
Cc: Christoph Lameter <cl@linux.com>
Cc: Luiz Capitulino <lcapitulino@redhat.com>
Cc: Mike Galbraith <efault@gmx.de>
Cc: Paul E . McKenney <paulmck@linux.vnet.ibm.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Rik van Riel <riel@redhat.com>
Cc: Thomas Gleixner <tglx@linutronix.de>
Link: http://lkml.kernel.org/r/1460555812-25375-2-git-send-email-fweisbec@gmail.comSigned-off-by: default avatarIngo Molnar <mingo@kernel.org>
parent a2c6c91f
...@@ -1562,12 +1562,12 @@ Doing the same with chrt -r 5 and function-trace set. ...@@ -1562,12 +1562,12 @@ Doing the same with chrt -r 5 and function-trace set.
<idle>-0 3dN.1 12us : menu_hrtimer_cancel <-tick_nohz_idle_exit <idle>-0 3dN.1 12us : menu_hrtimer_cancel <-tick_nohz_idle_exit
<idle>-0 3dN.1 12us : ktime_get <-tick_nohz_idle_exit <idle>-0 3dN.1 12us : ktime_get <-tick_nohz_idle_exit
<idle>-0 3dN.1 12us : tick_do_update_jiffies64 <-tick_nohz_idle_exit <idle>-0 3dN.1 12us : tick_do_update_jiffies64 <-tick_nohz_idle_exit
<idle>-0 3dN.1 13us : update_cpu_load_nohz <-tick_nohz_idle_exit <idle>-0 3dN.1 13us : cpu_load_update_nohz <-tick_nohz_idle_exit
<idle>-0 3dN.1 13us : _raw_spin_lock <-update_cpu_load_nohz <idle>-0 3dN.1 13us : _raw_spin_lock <-cpu_load_update_nohz
<idle>-0 3dN.1 13us : add_preempt_count <-_raw_spin_lock <idle>-0 3dN.1 13us : add_preempt_count <-_raw_spin_lock
<idle>-0 3dN.2 13us : __update_cpu_load <-update_cpu_load_nohz <idle>-0 3dN.2 13us : __cpu_load_update <-cpu_load_update_nohz
<idle>-0 3dN.2 14us : sched_avg_update <-__update_cpu_load <idle>-0 3dN.2 14us : sched_avg_update <-__cpu_load_update
<idle>-0 3dN.2 14us : _raw_spin_unlock <-update_cpu_load_nohz <idle>-0 3dN.2 14us : _raw_spin_unlock <-cpu_load_update_nohz
<idle>-0 3dN.2 14us : sub_preempt_count <-_raw_spin_unlock <idle>-0 3dN.2 14us : sub_preempt_count <-_raw_spin_unlock
<idle>-0 3dN.1 15us : calc_load_exit_idle <-tick_nohz_idle_exit <idle>-0 3dN.1 15us : calc_load_exit_idle <-tick_nohz_idle_exit
<idle>-0 3dN.1 15us : touch_softlockup_watchdog <-tick_nohz_idle_exit <idle>-0 3dN.1 15us : touch_softlockup_watchdog <-tick_nohz_idle_exit
......
...@@ -178,9 +178,9 @@ extern void get_iowait_load(unsigned long *nr_waiters, unsigned long *load); ...@@ -178,9 +178,9 @@ extern void get_iowait_load(unsigned long *nr_waiters, unsigned long *load);
extern void calc_global_load(unsigned long ticks); extern void calc_global_load(unsigned long ticks);
#if defined(CONFIG_SMP) && defined(CONFIG_NO_HZ_COMMON) #if defined(CONFIG_SMP) && defined(CONFIG_NO_HZ_COMMON)
extern void update_cpu_load_nohz(int active); extern void cpu_load_update_nohz(int active);
#else #else
static inline void update_cpu_load_nohz(int active) { } static inline void cpu_load_update_nohz(int active) { }
#endif #endif
extern void dump_cpu_task(int cpu); extern void dump_cpu_task(int cpu);
......
...@@ -2917,7 +2917,7 @@ void scheduler_tick(void) ...@@ -2917,7 +2917,7 @@ void scheduler_tick(void)
raw_spin_lock(&rq->lock); raw_spin_lock(&rq->lock);
update_rq_clock(rq); update_rq_clock(rq);
curr->sched_class->task_tick(rq, curr, 0); curr->sched_class->task_tick(rq, curr, 0);
update_cpu_load_active(rq); cpu_load_update_active(rq);
calc_global_load_tick(rq); calc_global_load_tick(rq);
raw_spin_unlock(&rq->lock); raw_spin_unlock(&rq->lock);
......
...@@ -4559,7 +4559,7 @@ decay_load_missed(unsigned long load, unsigned long missed_updates, int idx) ...@@ -4559,7 +4559,7 @@ decay_load_missed(unsigned long load, unsigned long missed_updates, int idx)
} }
/** /**
* __update_cpu_load - update the rq->cpu_load[] statistics * __cpu_load_update - update the rq->cpu_load[] statistics
* @this_rq: The rq to update statistics for * @this_rq: The rq to update statistics for
* @this_load: The current load * @this_load: The current load
* @pending_updates: The number of missed updates * @pending_updates: The number of missed updates
...@@ -4594,7 +4594,7 @@ decay_load_missed(unsigned long load, unsigned long missed_updates, int idx) ...@@ -4594,7 +4594,7 @@ decay_load_missed(unsigned long load, unsigned long missed_updates, int idx)
* see decay_load_misses(). For NOHZ_FULL we get to subtract and add the extra * see decay_load_misses(). For NOHZ_FULL we get to subtract and add the extra
* term. See the @active paramter. * term. See the @active paramter.
*/ */
static void __update_cpu_load(struct rq *this_rq, unsigned long this_load, static void __cpu_load_update(struct rq *this_rq, unsigned long this_load,
unsigned long pending_updates, int active) unsigned long pending_updates, int active)
{ {
unsigned long tickless_load = active ? this_rq->cpu_load[0] : 0; unsigned long tickless_load = active ? this_rq->cpu_load[0] : 0;
...@@ -4642,7 +4642,7 @@ static unsigned long weighted_cpuload(const int cpu) ...@@ -4642,7 +4642,7 @@ static unsigned long weighted_cpuload(const int cpu)
} }
#ifdef CONFIG_NO_HZ_COMMON #ifdef CONFIG_NO_HZ_COMMON
static void __update_cpu_load_nohz(struct rq *this_rq, static void __cpu_load_update_nohz(struct rq *this_rq,
unsigned long curr_jiffies, unsigned long curr_jiffies,
unsigned long load, unsigned long load,
int active) int active)
...@@ -4657,7 +4657,7 @@ static void __update_cpu_load_nohz(struct rq *this_rq, ...@@ -4657,7 +4657,7 @@ static void __update_cpu_load_nohz(struct rq *this_rq,
* In the NOHZ_FULL case, we were non-idle, we should consider * In the NOHZ_FULL case, we were non-idle, we should consider
* its weighted load. * its weighted load.
*/ */
__update_cpu_load(this_rq, load, pending_updates, active); __cpu_load_update(this_rq, load, pending_updates, active);
} }
} }
...@@ -4678,7 +4678,7 @@ static void __update_cpu_load_nohz(struct rq *this_rq, ...@@ -4678,7 +4678,7 @@ static void __update_cpu_load_nohz(struct rq *this_rq,
* Called from nohz_idle_balance() to update the load ratings before doing the * Called from nohz_idle_balance() to update the load ratings before doing the
* idle balance. * idle balance.
*/ */
static void update_cpu_load_idle(struct rq *this_rq) static void cpu_load_update_idle(struct rq *this_rq)
{ {
/* /*
* bail if there's load or we're actually up-to-date. * bail if there's load or we're actually up-to-date.
...@@ -4686,13 +4686,13 @@ static void update_cpu_load_idle(struct rq *this_rq) ...@@ -4686,13 +4686,13 @@ static void update_cpu_load_idle(struct rq *this_rq)
if (weighted_cpuload(cpu_of(this_rq))) if (weighted_cpuload(cpu_of(this_rq)))
return; return;
__update_cpu_load_nohz(this_rq, READ_ONCE(jiffies), 0, 0); __cpu_load_update_nohz(this_rq, READ_ONCE(jiffies), 0, 0);
} }
/* /*
* Called from tick_nohz_idle_exit() -- try and fix up the ticks we missed. * Called from tick_nohz_idle_exit() -- try and fix up the ticks we missed.
*/ */
void update_cpu_load_nohz(int active) void cpu_load_update_nohz(int active)
{ {
struct rq *this_rq = this_rq(); struct rq *this_rq = this_rq();
unsigned long curr_jiffies = READ_ONCE(jiffies); unsigned long curr_jiffies = READ_ONCE(jiffies);
...@@ -4702,7 +4702,7 @@ void update_cpu_load_nohz(int active) ...@@ -4702,7 +4702,7 @@ void update_cpu_load_nohz(int active)
return; return;
raw_spin_lock(&this_rq->lock); raw_spin_lock(&this_rq->lock);
__update_cpu_load_nohz(this_rq, curr_jiffies, load, active); __cpu_load_update_nohz(this_rq, curr_jiffies, load, active);
raw_spin_unlock(&this_rq->lock); raw_spin_unlock(&this_rq->lock);
} }
#endif /* CONFIG_NO_HZ */ #endif /* CONFIG_NO_HZ */
...@@ -4710,14 +4710,14 @@ void update_cpu_load_nohz(int active) ...@@ -4710,14 +4710,14 @@ void update_cpu_load_nohz(int active)
/* /*
* Called from scheduler_tick() * Called from scheduler_tick()
*/ */
void update_cpu_load_active(struct rq *this_rq) void cpu_load_update_active(struct rq *this_rq)
{ {
unsigned long load = weighted_cpuload(cpu_of(this_rq)); unsigned long load = weighted_cpuload(cpu_of(this_rq));
/* /*
* See the mess around update_cpu_load_idle() / update_cpu_load_nohz(). * See the mess around cpu_load_update_idle() / cpu_load_update_nohz().
*/ */
this_rq->last_load_update_tick = jiffies; this_rq->last_load_update_tick = jiffies;
__update_cpu_load(this_rq, load, 1, 1); __cpu_load_update(this_rq, load, 1, 1);
} }
/* /*
...@@ -8031,7 +8031,7 @@ static void nohz_idle_balance(struct rq *this_rq, enum cpu_idle_type idle) ...@@ -8031,7 +8031,7 @@ static void nohz_idle_balance(struct rq *this_rq, enum cpu_idle_type idle)
if (time_after_eq(jiffies, rq->next_balance)) { if (time_after_eq(jiffies, rq->next_balance)) {
raw_spin_lock_irq(&rq->lock); raw_spin_lock_irq(&rq->lock);
update_rq_clock(rq); update_rq_clock(rq);
update_cpu_load_idle(rq); cpu_load_update_idle(rq);
raw_spin_unlock_irq(&rq->lock); raw_spin_unlock_irq(&rq->lock);
rebalance_domains(rq, CPU_IDLE); rebalance_domains(rq, CPU_IDLE);
} }
......
...@@ -31,9 +31,9 @@ extern void calc_global_load_tick(struct rq *this_rq); ...@@ -31,9 +31,9 @@ extern void calc_global_load_tick(struct rq *this_rq);
extern long calc_load_fold_active(struct rq *this_rq); extern long calc_load_fold_active(struct rq *this_rq);
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
extern void update_cpu_load_active(struct rq *this_rq); extern void cpu_load_update_active(struct rq *this_rq);
#else #else
static inline void update_cpu_load_active(struct rq *this_rq) { } static inline void cpu_load_update_active(struct rq *this_rq) { }
#endif #endif
/* /*
......
...@@ -806,7 +806,7 @@ static void tick_nohz_restart_sched_tick(struct tick_sched *ts, ktime_t now, int ...@@ -806,7 +806,7 @@ static void tick_nohz_restart_sched_tick(struct tick_sched *ts, ktime_t now, int
{ {
/* Update jiffies first */ /* Update jiffies first */
tick_do_update_jiffies64(now); tick_do_update_jiffies64(now);
update_cpu_load_nohz(active); cpu_load_update_nohz(active);
calc_load_exit_idle(); calc_load_exit_idle();
touch_softlockup_watchdog_sched(); touch_softlockup_watchdog_sched();
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment