Commit 2c923e94 authored by Daniel Lezcano's avatar Daniel Lezcano Committed by Ingo Molnar

sched/clock: Make local_clock()/cpu_clock() inline

The local_clock/cpu_clock functions were changed to prevent a double
identical test with sched_clock_cpu() when HAVE_UNSTABLE_SCHED_CLOCK
is set. That resulted in one line functions.

As these functions are in all the cases one line functions and in the
hot path, it is useful to specify them as static inline in order to
give a strong hint to the compiler.

After verification, it appears the compiler does not inline them
without this hint. Change those functions to static inline.

sched_clock_cpu() is called via the inlined local_clock()/cpu_clock()
functions from sched.h. So any module code including sched.h will
reference sched_clock_cpu(). Thus it must be exported with the
EXPORT_SYMBOL_GPL macro.
Signed-off-by: default avatarDaniel Lezcano <daniel.lezcano@linaro.org>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Link: http://lkml.kernel.org/r/1460385514-14700-2-git-send-email-daniel.lezcano@linaro.orgSigned-off-by: default avatarIngo Molnar <mingo@kernel.org>
parent c78b17e2
...@@ -2303,8 +2303,6 @@ extern unsigned long long notrace sched_clock(void); ...@@ -2303,8 +2303,6 @@ extern unsigned long long notrace sched_clock(void);
/* /*
* See the comment in kernel/sched/clock.c * See the comment in kernel/sched/clock.c
*/ */
extern u64 cpu_clock(int cpu);
extern u64 local_clock(void);
extern u64 running_clock(void); extern u64 running_clock(void);
extern u64 sched_clock_cpu(int cpu); extern u64 sched_clock_cpu(int cpu);
...@@ -2323,6 +2321,16 @@ static inline void sched_clock_idle_sleep_event(void) ...@@ -2323,6 +2321,16 @@ static inline void sched_clock_idle_sleep_event(void)
static inline void sched_clock_idle_wakeup_event(u64 delta_ns) static inline void sched_clock_idle_wakeup_event(u64 delta_ns)
{ {
} }
static inline u64 cpu_clock(int cpu)
{
return sched_clock();
}
static inline u64 local_clock(void)
{
return sched_clock();
}
#else #else
/* /*
* Architectures can set this to 1 if they have specified * Architectures can set this to 1 if they have specified
...@@ -2337,6 +2345,26 @@ extern void clear_sched_clock_stable(void); ...@@ -2337,6 +2345,26 @@ extern void clear_sched_clock_stable(void);
extern void sched_clock_tick(void); extern void sched_clock_tick(void);
extern void sched_clock_idle_sleep_event(void); extern void sched_clock_idle_sleep_event(void);
extern void sched_clock_idle_wakeup_event(u64 delta_ns); extern void sched_clock_idle_wakeup_event(u64 delta_ns);
/*
* As outlined in clock.c, provides a fast, high resolution, nanosecond
* time source that is monotonic per cpu argument and has bounded drift
* between cpus.
*
* ######################### BIG FAT WARNING ##########################
* # when comparing cpu_clock(i) to cpu_clock(j) for i != j, time can #
* # go backwards !! #
* ####################################################################
*/
static inline u64 cpu_clock(int cpu)
{
return sched_clock_cpu(cpu);
}
static inline u64 local_clock(void)
{
return sched_clock_cpu(raw_smp_processor_id());
}
#endif #endif
#ifdef CONFIG_IRQ_TIME_ACCOUNTING #ifdef CONFIG_IRQ_TIME_ACCOUNTING
......
...@@ -318,6 +318,7 @@ u64 sched_clock_cpu(int cpu) ...@@ -318,6 +318,7 @@ u64 sched_clock_cpu(int cpu)
return clock; return clock;
} }
EXPORT_SYMBOL_GPL(sched_clock_cpu);
void sched_clock_tick(void) void sched_clock_tick(void)
{ {
...@@ -363,33 +364,6 @@ void sched_clock_idle_wakeup_event(u64 delta_ns) ...@@ -363,33 +364,6 @@ void sched_clock_idle_wakeup_event(u64 delta_ns)
} }
EXPORT_SYMBOL_GPL(sched_clock_idle_wakeup_event); EXPORT_SYMBOL_GPL(sched_clock_idle_wakeup_event);
/*
* As outlined at the top, provides a fast, high resolution, nanosecond
* time source that is monotonic per cpu argument and has bounded drift
* between cpus.
*
* ######################### BIG FAT WARNING ##########################
* # when comparing cpu_clock(i) to cpu_clock(j) for i != j, time can #
* # go backwards !! #
* ####################################################################
*/
u64 cpu_clock(int cpu)
{
return sched_clock_cpu(cpu);
}
/*
* Similar to cpu_clock() for the current cpu. Time will only be observed
* to be monotonic if care is taken to only compare timestampt taken on the
* same CPU.
*
* See cpu_clock().
*/
u64 local_clock(void)
{
return sched_clock_cpu(raw_smp_processor_id());
}
#else /* CONFIG_HAVE_UNSTABLE_SCHED_CLOCK */ #else /* CONFIG_HAVE_UNSTABLE_SCHED_CLOCK */
void sched_clock_init(void) void sched_clock_init(void)
...@@ -404,22 +378,8 @@ u64 sched_clock_cpu(int cpu) ...@@ -404,22 +378,8 @@ u64 sched_clock_cpu(int cpu)
return sched_clock(); return sched_clock();
} }
u64 cpu_clock(int cpu)
{
return sched_clock();
}
u64 local_clock(void)
{
return sched_clock();
}
#endif /* CONFIG_HAVE_UNSTABLE_SCHED_CLOCK */ #endif /* CONFIG_HAVE_UNSTABLE_SCHED_CLOCK */
EXPORT_SYMBOL_GPL(cpu_clock);
EXPORT_SYMBOL_GPL(local_clock);
/* /*
* Running clock - returns the time that has elapsed while a guest has been * Running clock - returns the time that has elapsed while a guest has been
* running. * running.
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment