Commit f94c8d11 authored by Peter Zijlstra's avatar Peter Zijlstra Committed by Ingo Molnar

sched/clock, x86/tsc: Rework the x86 'unstable' sched_clock() interface

Wanpeng Li reported that since the following commit:

  acb04058 ("sched/clock: Fix hotplug crash")

... KVM always runs with unstable sched-clock even though KVM's
kvm_clock _is_ stable.

The problem is that we've tied clear_sched_clock_stable() to the TSC
state, and overlooked that sched_clock() is a paravirt function.

Solve this by doing two things:

 - tie the sched_clock() stable state more clearly to the TSC stable
   state for the normal (!paravirt) case.

 - only call clear_sched_clock_stable() when we mark TSC unstable
   when we use native_sched_clock().

The first means we can actually run with stable sched_clock in more
situations then before, which is good. And since commit:

  12907fbb ("sched/clock, clocksource: Add optional cs::mark_unstable() method")

... this should be reliable. Since any detection of TSC fail now results
in marking the TSC unstable.
Reported-by: default avatarWanpeng Li <kernellwp@gmail.com>
Signed-off-by: default avatarPeter Zijlstra (Intel) <peterz@infradead.org>
Cc: Borislav Petkov <bp@alien8.de>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Mike Galbraith <efault@gmx.de>
Cc: Paolo Bonzini <pbonzini@redhat.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: linux-kernel@vger.kernel.org
Fixes: acb04058 ("sched/clock: Fix hotplug crash")
Signed-off-by: default avatarIngo Molnar <mingo@kernel.org>
parent 0ba87bb2
...@@ -555,10 +555,6 @@ static void early_init_amd(struct cpuinfo_x86 *c) ...@@ -555,10 +555,6 @@ static void early_init_amd(struct cpuinfo_x86 *c)
if (c->x86_power & (1 << 8)) { if (c->x86_power & (1 << 8)) {
set_cpu_cap(c, X86_FEATURE_CONSTANT_TSC); set_cpu_cap(c, X86_FEATURE_CONSTANT_TSC);
set_cpu_cap(c, X86_FEATURE_NONSTOP_TSC); set_cpu_cap(c, X86_FEATURE_NONSTOP_TSC);
if (check_tsc_unstable())
clear_sched_clock_stable();
} else {
clear_sched_clock_stable();
} }
/* Bit 12 of 8000_0007 edx is accumulated power mechanism. */ /* Bit 12 of 8000_0007 edx is accumulated power mechanism. */
......
...@@ -104,8 +104,6 @@ static void early_init_centaur(struct cpuinfo_x86 *c) ...@@ -104,8 +104,6 @@ static void early_init_centaur(struct cpuinfo_x86 *c)
#ifdef CONFIG_X86_64 #ifdef CONFIG_X86_64
set_cpu_cap(c, X86_FEATURE_SYSENTER32); set_cpu_cap(c, X86_FEATURE_SYSENTER32);
#endif #endif
clear_sched_clock_stable();
} }
static void init_centaur(struct cpuinfo_x86 *c) static void init_centaur(struct cpuinfo_x86 *c)
......
...@@ -86,7 +86,6 @@ static void default_init(struct cpuinfo_x86 *c) ...@@ -86,7 +86,6 @@ static void default_init(struct cpuinfo_x86 *c)
strcpy(c->x86_model_id, "386"); strcpy(c->x86_model_id, "386");
} }
#endif #endif
clear_sched_clock_stable();
} }
static const struct cpu_dev default_cpu = { static const struct cpu_dev default_cpu = {
...@@ -1075,8 +1074,6 @@ static void identify_cpu(struct cpuinfo_x86 *c) ...@@ -1075,8 +1074,6 @@ static void identify_cpu(struct cpuinfo_x86 *c)
*/ */
if (this_cpu->c_init) if (this_cpu->c_init)
this_cpu->c_init(c); this_cpu->c_init(c);
else
clear_sched_clock_stable();
/* Disable the PN if appropriate */ /* Disable the PN if appropriate */
squash_the_stupid_serial_number(c); squash_the_stupid_serial_number(c);
......
...@@ -184,7 +184,6 @@ static void early_init_cyrix(struct cpuinfo_x86 *c) ...@@ -184,7 +184,6 @@ static void early_init_cyrix(struct cpuinfo_x86 *c)
set_cpu_cap(c, X86_FEATURE_CYRIX_ARR); set_cpu_cap(c, X86_FEATURE_CYRIX_ARR);
break; break;
} }
clear_sched_clock_stable();
} }
static void init_cyrix(struct cpuinfo_x86 *c) static void init_cyrix(struct cpuinfo_x86 *c)
......
...@@ -161,10 +161,6 @@ static void early_init_intel(struct cpuinfo_x86 *c) ...@@ -161,10 +161,6 @@ static void early_init_intel(struct cpuinfo_x86 *c)
if (c->x86_power & (1 << 8)) { if (c->x86_power & (1 << 8)) {
set_cpu_cap(c, X86_FEATURE_CONSTANT_TSC); set_cpu_cap(c, X86_FEATURE_CONSTANT_TSC);
set_cpu_cap(c, X86_FEATURE_NONSTOP_TSC); set_cpu_cap(c, X86_FEATURE_NONSTOP_TSC);
if (check_tsc_unstable())
clear_sched_clock_stable();
} else {
clear_sched_clock_stable();
} }
/* Penwell and Cloverview have the TSC which doesn't sleep on S3 */ /* Penwell and Cloverview have the TSC which doesn't sleep on S3 */
......
...@@ -15,8 +15,6 @@ static void early_init_transmeta(struct cpuinfo_x86 *c) ...@@ -15,8 +15,6 @@ static void early_init_transmeta(struct cpuinfo_x86 *c)
if (xlvl >= 0x80860001) if (xlvl >= 0x80860001)
c->x86_capability[CPUID_8086_0001_EDX] = cpuid_edx(0x80860001); c->x86_capability[CPUID_8086_0001_EDX] = cpuid_edx(0x80860001);
} }
clear_sched_clock_stable();
} }
static void init_transmeta(struct cpuinfo_x86 *c) static void init_transmeta(struct cpuinfo_x86 *c)
......
...@@ -326,9 +326,16 @@ unsigned long long sched_clock(void) ...@@ -326,9 +326,16 @@ unsigned long long sched_clock(void)
{ {
return paravirt_sched_clock(); return paravirt_sched_clock();
} }
static inline bool using_native_sched_clock(void)
{
return pv_time_ops.sched_clock == native_sched_clock;
}
#else #else
unsigned long long unsigned long long
sched_clock(void) __attribute__((alias("native_sched_clock"))); sched_clock(void) __attribute__((alias("native_sched_clock")));
static inline bool using_native_sched_clock(void) { return true; }
#endif #endif
int check_tsc_unstable(void) int check_tsc_unstable(void)
...@@ -1111,7 +1118,9 @@ static void tsc_cs_mark_unstable(struct clocksource *cs) ...@@ -1111,7 +1118,9 @@ static void tsc_cs_mark_unstable(struct clocksource *cs)
{ {
if (tsc_unstable) if (tsc_unstable)
return; return;
tsc_unstable = 1; tsc_unstable = 1;
if (using_native_sched_clock())
clear_sched_clock_stable(); clear_sched_clock_stable();
disable_sched_clock_irqtime(); disable_sched_clock_irqtime();
pr_info("Marking TSC unstable due to clocksource watchdog\n"); pr_info("Marking TSC unstable due to clocksource watchdog\n");
...@@ -1134,19 +1143,21 @@ static struct clocksource clocksource_tsc = { ...@@ -1134,19 +1143,21 @@ static struct clocksource clocksource_tsc = {
void mark_tsc_unstable(char *reason) void mark_tsc_unstable(char *reason)
{ {
if (!tsc_unstable) { if (tsc_unstable)
return;
tsc_unstable = 1; tsc_unstable = 1;
if (using_native_sched_clock())
clear_sched_clock_stable(); clear_sched_clock_stable();
disable_sched_clock_irqtime(); disable_sched_clock_irqtime();
pr_info("Marking TSC unstable due to %s\n", reason); pr_info("Marking TSC unstable due to %s\n", reason);
/* Change only the rating, when not registered */ /* Change only the rating, when not registered */
if (clocksource_tsc.mult) if (clocksource_tsc.mult) {
clocksource_mark_unstable(&clocksource_tsc); clocksource_mark_unstable(&clocksource_tsc);
else { } else {
clocksource_tsc.flags |= CLOCK_SOURCE_UNSTABLE; clocksource_tsc.flags |= CLOCK_SOURCE_UNSTABLE;
clocksource_tsc.rating = 0; clocksource_tsc.rating = 0;
} }
}
} }
EXPORT_SYMBOL_GPL(mark_tsc_unstable); EXPORT_SYMBOL_GPL(mark_tsc_unstable);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment