Commit 10b033d4 authored by Peter Zijlstra's avatar Peter Zijlstra Committed by Ingo Molnar

sched/clock, x86: Avoid a runtime condition in native_sched_clock()

Use a static_key to avoid touching tsc_disabled and a runtime
condition in native_sched_clock() -- less cachelines touched is always
better.

                        MAINLINE   PRE       POST

    sched_clock_stable: 1          1         1
    (cold) sched_clock: 329841     215295    213039
    (cold) local_clock: 301773     220773    216084
    (warm) sched_clock: 38375      25659     25231
    (warm) local_clock: 100371     27242     27601
    (warm) rdtsc:       27340      24208     24203
    sched_clock_stable: 0          0         0
    (cold) sched_clock: 382634     237019    240055
    (cold) local_clock: 396890     294819    299942
    (warm) sched_clock: 38194      25609     25276
    (warm) local_clock: 143452     71232     73232
    (warm) rdtsc:       27345      24243     24244
Signed-off-by: default avatarPeter Zijlstra <peterz@infradead.org>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Andrew Morton <akpm@linux-foundation.org>
Link: http://lkml.kernel.org/n/tip-hrz87bo37qke25bty6pnfy4b@git.kernel.orgSigned-off-by: default avatarIngo Molnar <mingo@kernel.org>
parent 6577e42a
...@@ -11,6 +11,7 @@ ...@@ -11,6 +11,7 @@
#include <linux/clocksource.h> #include <linux/clocksource.h>
#include <linux/percpu.h> #include <linux/percpu.h>
#include <linux/timex.h> #include <linux/timex.h>
#include <linux/static_key.h>
#include <asm/hpet.h> #include <asm/hpet.h>
#include <asm/timer.h> #include <asm/timer.h>
...@@ -37,6 +38,8 @@ static int __read_mostly tsc_unstable; ...@@ -37,6 +38,8 @@ static int __read_mostly tsc_unstable;
erroneous rdtsc usage on !cpu_has_tsc processors */ erroneous rdtsc usage on !cpu_has_tsc processors */
static int __read_mostly tsc_disabled = -1; static int __read_mostly tsc_disabled = -1;
static struct static_key __use_tsc = STATIC_KEY_INIT;
int tsc_clocksource_reliable; int tsc_clocksource_reliable;
/* /*
...@@ -282,7 +285,7 @@ u64 native_sched_clock(void) ...@@ -282,7 +285,7 @@ u64 native_sched_clock(void)
* very important for it to be as fast as the platform * very important for it to be as fast as the platform
* can achieve it. ) * can achieve it. )
*/ */
if (unlikely(tsc_disabled)) { if (!static_key_false(&__use_tsc)) {
/* No locking but a rare wrong value is not a big deal: */ /* No locking but a rare wrong value is not a big deal: */
return (jiffies_64 - INITIAL_JIFFIES) * (1000000000 / HZ); return (jiffies_64 - INITIAL_JIFFIES) * (1000000000 / HZ);
} }
...@@ -1193,7 +1196,9 @@ void __init tsc_init(void) ...@@ -1193,7 +1196,9 @@ void __init tsc_init(void)
return; return;
/* now allow native_sched_clock() to use rdtsc */ /* now allow native_sched_clock() to use rdtsc */
tsc_disabled = 0; tsc_disabled = 0;
static_key_slow_inc(&__use_tsc);
if (!no_sched_irq_time) if (!no_sched_irq_time)
enable_sched_clock_irqtime(); enable_sched_clock_irqtime();
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment