Commit 0de7611a authored by Ingo Molnar's avatar Ingo Molnar

timers/nohz: Capitalize 'CPU' consistently

While reviewing another patch I noticed that kernel/time/tick-sched.c
had a charmingly (confusingly, annoyingly) rich set of variants for
spelling 'CPU':

  cpu
  cpus
  CPU
  CPUs
  per CPU
  per-CPU
  per cpu

... sometimes these were mixed even within the same comment block!

Compress these variants down to a single consistent set of:

  CPU
  CPUs
  per-CPU

Cc: Frederic Weisbecker <fweisbec@gmail.com>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: linux-kernel@vger.kernel.org
Signed-off-by: default avatarIngo Molnar <mingo@kernel.org>
parent 6168f8ed
...@@ -31,7 +31,7 @@ ...@@ -31,7 +31,7 @@
#include <trace/events/timer.h> #include <trace/events/timer.h>
/* /*
* Per cpu nohz control structure * Per-CPU nohz control structure
*/ */
static DEFINE_PER_CPU(struct tick_sched, tick_cpu_sched); static DEFINE_PER_CPU(struct tick_sched, tick_cpu_sched);
...@@ -116,8 +116,8 @@ static void tick_sched_do_timer(ktime_t now) ...@@ -116,8 +116,8 @@ static void tick_sched_do_timer(ktime_t now)
#ifdef CONFIG_NO_HZ_COMMON #ifdef CONFIG_NO_HZ_COMMON
/* /*
* Check if the do_timer duty was dropped. We don't care about * Check if the do_timer duty was dropped. We don't care about
* concurrency: This happens only when the cpu in charge went * concurrency: This happens only when the CPU in charge went
* into a long sleep. If two cpus happen to assign themselves to * into a long sleep. If two CPUs happen to assign themselves to
* this duty, then the jiffies update is still serialized by * this duty, then the jiffies update is still serialized by
* jiffies_lock. * jiffies_lock.
*/ */
...@@ -349,7 +349,7 @@ void tick_nohz_dep_clear_signal(struct signal_struct *sig, enum tick_dep_bits bi ...@@ -349,7 +349,7 @@ void tick_nohz_dep_clear_signal(struct signal_struct *sig, enum tick_dep_bits bi
/* /*
* Re-evaluate the need for the tick as we switch the current task. * Re-evaluate the need for the tick as we switch the current task.
* It might need the tick due to per task/process properties: * It might need the tick due to per task/process properties:
* perf events, posix cpu timers, ... * perf events, posix CPU timers, ...
*/ */
void __tick_nohz_task_switch(void) void __tick_nohz_task_switch(void)
{ {
...@@ -509,8 +509,8 @@ int tick_nohz_tick_stopped(void) ...@@ -509,8 +509,8 @@ int tick_nohz_tick_stopped(void)
* *
* In case the sched_tick was stopped on this CPU, we have to check if jiffies * In case the sched_tick was stopped on this CPU, we have to check if jiffies
* must be updated. Otherwise an interrupt handler could use a stale jiffy * must be updated. Otherwise an interrupt handler could use a stale jiffy
* value. We do this unconditionally on any cpu, as we don't know whether the * value. We do this unconditionally on any CPU, as we don't know whether the
* cpu, which has the update task assigned is in a long sleep. * CPU, which has the update task assigned is in a long sleep.
*/ */
static void tick_nohz_update_jiffies(ktime_t now) static void tick_nohz_update_jiffies(ktime_t now)
{ {
...@@ -526,7 +526,7 @@ static void tick_nohz_update_jiffies(ktime_t now) ...@@ -526,7 +526,7 @@ static void tick_nohz_update_jiffies(ktime_t now)
} }
/* /*
* Updates the per cpu time idle statistics counters * Updates the per-CPU time idle statistics counters
*/ */
static void static void
update_ts_time_stats(int cpu, struct tick_sched *ts, ktime_t now, u64 *last_update_time) update_ts_time_stats(int cpu, struct tick_sched *ts, ktime_t now, u64 *last_update_time)
...@@ -566,7 +566,7 @@ static ktime_t tick_nohz_start_idle(struct tick_sched *ts) ...@@ -566,7 +566,7 @@ static ktime_t tick_nohz_start_idle(struct tick_sched *ts)
} }
/** /**
* get_cpu_idle_time_us - get the total idle time of a cpu * get_cpu_idle_time_us - get the total idle time of a CPU
* @cpu: CPU number to query * @cpu: CPU number to query
* @last_update_time: variable to store update time in. Do not update * @last_update_time: variable to store update time in. Do not update
* counters if NULL. * counters if NULL.
...@@ -607,7 +607,7 @@ u64 get_cpu_idle_time_us(int cpu, u64 *last_update_time) ...@@ -607,7 +607,7 @@ u64 get_cpu_idle_time_us(int cpu, u64 *last_update_time)
EXPORT_SYMBOL_GPL(get_cpu_idle_time_us); EXPORT_SYMBOL_GPL(get_cpu_idle_time_us);
/** /**
* get_cpu_iowait_time_us - get the total iowait time of a cpu * get_cpu_iowait_time_us - get the total iowait time of a CPU
* @cpu: CPU number to query * @cpu: CPU number to query
* @last_update_time: variable to store update time in. Do not update * @last_update_time: variable to store update time in. Do not update
* counters if NULL. * counters if NULL.
...@@ -726,12 +726,12 @@ static ktime_t tick_nohz_stop_sched_tick(struct tick_sched *ts, ...@@ -726,12 +726,12 @@ static ktime_t tick_nohz_stop_sched_tick(struct tick_sched *ts,
} }
/* /*
* If this cpu is the one which updates jiffies, then give up * If this CPU is the one which updates jiffies, then give up
* the assignment and let it be taken by the cpu which runs * the assignment and let it be taken by the CPU which runs
* the tick timer next, which might be this cpu as well. If we * the tick timer next, which might be this CPU as well. If we
* don't drop this here the jiffies might be stale and * don't drop this here the jiffies might be stale and
* do_timer() never invoked. Keep track of the fact that it * do_timer() never invoked. Keep track of the fact that it
* was the one which had the do_timer() duty last. If this cpu * was the one which had the do_timer() duty last. If this CPU
* is the one which had the do_timer() duty last, we limit the * is the one which had the do_timer() duty last, we limit the
* sleep time to the timekeeping max_deferment value. * sleep time to the timekeeping max_deferment value.
* Otherwise we can sleep as long as we want. * Otherwise we can sleep as long as we want.
...@@ -841,9 +841,9 @@ static void tick_nohz_full_update_tick(struct tick_sched *ts) ...@@ -841,9 +841,9 @@ static void tick_nohz_full_update_tick(struct tick_sched *ts)
static bool can_stop_idle_tick(int cpu, struct tick_sched *ts) static bool can_stop_idle_tick(int cpu, struct tick_sched *ts)
{ {
/* /*
* If this cpu is offline and it is the one which updates * If this CPU is offline and it is the one which updates
* jiffies, then give up the assignment and let it be taken by * jiffies, then give up the assignment and let it be taken by
* the cpu which runs the tick timer next. If we don't drop * the CPU which runs the tick timer next. If we don't drop
* this here the jiffies might be stale and do_timer() never * this here the jiffies might be stale and do_timer() never
* invoked. * invoked.
*/ */
...@@ -1211,7 +1211,7 @@ void tick_setup_sched_timer(void) ...@@ -1211,7 +1211,7 @@ void tick_setup_sched_timer(void)
hrtimer_init(&ts->sched_timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS); hrtimer_init(&ts->sched_timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
ts->sched_timer.function = tick_sched_timer; ts->sched_timer.function = tick_sched_timer;
/* Get the next period (per cpu) */ /* Get the next period (per-CPU) */
hrtimer_set_expires(&ts->sched_timer, tick_init_jiffy_update()); hrtimer_set_expires(&ts->sched_timer, tick_init_jiffy_update());
/* Offset the tick to avert jiffies_lock contention. */ /* Offset the tick to avert jiffies_lock contention. */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment