Commit fe282c60 authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'timers-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip

Pull clocksource fixes from Thomas Gleixner:
 "The recent addition of the early TSC clocksource breaks on machines
  which have an unstable TSC because in case that TSC is disabled, then
  the clocksource selection logic falls back to the early TSC which is
  obviously bogus.

  That also unearthed a few robustness issues in the clocksource
  derating code which are addressed as well"

* 'timers-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
  clocksource: Rework stale comment
  clocksource: Consistent de-rate when marking unstable
  x86/tsc: Fix mark_tsc_unstable()
  clocksource: Initialize cs->wd_list
  clocksource: Allow clocksource_mark_unstable() on unregistered clocksources
  x86/tsc: Always unregister clocksource_tsc_early
parents 03b5f0c1 7dba33c6
...@@ -1067,6 +1067,7 @@ static struct clocksource clocksource_tsc_early = { ...@@ -1067,6 +1067,7 @@ static struct clocksource clocksource_tsc_early = {
.resume = tsc_resume, .resume = tsc_resume,
.mark_unstable = tsc_cs_mark_unstable, .mark_unstable = tsc_cs_mark_unstable,
.tick_stable = tsc_cs_tick_stable, .tick_stable = tsc_cs_tick_stable,
.list = LIST_HEAD_INIT(clocksource_tsc_early.list),
}; };
/* /*
...@@ -1086,6 +1087,7 @@ static struct clocksource clocksource_tsc = { ...@@ -1086,6 +1087,7 @@ static struct clocksource clocksource_tsc = {
.resume = tsc_resume, .resume = tsc_resume,
.mark_unstable = tsc_cs_mark_unstable, .mark_unstable = tsc_cs_mark_unstable,
.tick_stable = tsc_cs_tick_stable, .tick_stable = tsc_cs_tick_stable,
.list = LIST_HEAD_INIT(clocksource_tsc.list),
}; };
void mark_tsc_unstable(char *reason) void mark_tsc_unstable(char *reason)
...@@ -1098,13 +1100,9 @@ void mark_tsc_unstable(char *reason) ...@@ -1098,13 +1100,9 @@ void mark_tsc_unstable(char *reason)
clear_sched_clock_stable(); clear_sched_clock_stable();
disable_sched_clock_irqtime(); disable_sched_clock_irqtime();
pr_info("Marking TSC unstable due to %s\n", reason); pr_info("Marking TSC unstable due to %s\n", reason);
/* Change only the rating, when not registered */
if (clocksource_tsc.mult) { clocksource_mark_unstable(&clocksource_tsc_early);
clocksource_mark_unstable(&clocksource_tsc); clocksource_mark_unstable(&clocksource_tsc);
} else {
clocksource_tsc.flags |= CLOCK_SOURCE_UNSTABLE;
clocksource_tsc.rating = 0;
}
} }
EXPORT_SYMBOL_GPL(mark_tsc_unstable); EXPORT_SYMBOL_GPL(mark_tsc_unstable);
...@@ -1244,7 +1242,7 @@ static void tsc_refine_calibration_work(struct work_struct *work) ...@@ -1244,7 +1242,7 @@ static void tsc_refine_calibration_work(struct work_struct *work)
/* Don't bother refining TSC on unstable systems */ /* Don't bother refining TSC on unstable systems */
if (tsc_unstable) if (tsc_unstable)
return; goto unreg;
/* /*
* Since the work is started early in boot, we may be * Since the work is started early in boot, we may be
...@@ -1297,11 +1295,12 @@ static void tsc_refine_calibration_work(struct work_struct *work) ...@@ -1297,11 +1295,12 @@ static void tsc_refine_calibration_work(struct work_struct *work)
out: out:
if (tsc_unstable) if (tsc_unstable)
return; goto unreg;
if (boot_cpu_has(X86_FEATURE_ART)) if (boot_cpu_has(X86_FEATURE_ART))
art_related_clocksource = &clocksource_tsc; art_related_clocksource = &clocksource_tsc;
clocksource_register_khz(&clocksource_tsc, tsc_khz); clocksource_register_khz(&clocksource_tsc, tsc_khz);
unreg:
clocksource_unregister(&clocksource_tsc_early); clocksource_unregister(&clocksource_tsc_early);
} }
...@@ -1311,8 +1310,8 @@ static int __init init_tsc_clocksource(void) ...@@ -1311,8 +1310,8 @@ static int __init init_tsc_clocksource(void)
if (!boot_cpu_has(X86_FEATURE_TSC) || tsc_disabled > 0 || !tsc_khz) if (!boot_cpu_has(X86_FEATURE_TSC) || tsc_disabled > 0 || !tsc_khz)
return 0; return 0;
if (check_tsc_unstable()) if (tsc_unstable)
return 0; goto unreg;
if (tsc_clocksource_reliable) if (tsc_clocksource_reliable)
clocksource_tsc.flags &= ~CLOCK_SOURCE_MUST_VERIFY; clocksource_tsc.flags &= ~CLOCK_SOURCE_MUST_VERIFY;
...@@ -1328,6 +1327,7 @@ static int __init init_tsc_clocksource(void) ...@@ -1328,6 +1327,7 @@ static int __init init_tsc_clocksource(void)
if (boot_cpu_has(X86_FEATURE_ART)) if (boot_cpu_has(X86_FEATURE_ART))
art_related_clocksource = &clocksource_tsc; art_related_clocksource = &clocksource_tsc;
clocksource_register_khz(&clocksource_tsc, tsc_khz); clocksource_register_khz(&clocksource_tsc, tsc_khz);
unreg:
clocksource_unregister(&clocksource_tsc_early); clocksource_unregister(&clocksource_tsc_early);
return 0; return 0;
} }
......
...@@ -119,6 +119,16 @@ static DEFINE_SPINLOCK(watchdog_lock); ...@@ -119,6 +119,16 @@ static DEFINE_SPINLOCK(watchdog_lock);
static int watchdog_running; static int watchdog_running;
static atomic_t watchdog_reset_pending; static atomic_t watchdog_reset_pending;
static void inline clocksource_watchdog_lock(unsigned long *flags)
{
spin_lock_irqsave(&watchdog_lock, *flags);
}
static void inline clocksource_watchdog_unlock(unsigned long *flags)
{
spin_unlock_irqrestore(&watchdog_lock, *flags);
}
static int clocksource_watchdog_kthread(void *data); static int clocksource_watchdog_kthread(void *data);
static void __clocksource_change_rating(struct clocksource *cs, int rating); static void __clocksource_change_rating(struct clocksource *cs, int rating);
...@@ -142,9 +152,19 @@ static void __clocksource_unstable(struct clocksource *cs) ...@@ -142,9 +152,19 @@ static void __clocksource_unstable(struct clocksource *cs)
cs->flags &= ~(CLOCK_SOURCE_VALID_FOR_HRES | CLOCK_SOURCE_WATCHDOG); cs->flags &= ~(CLOCK_SOURCE_VALID_FOR_HRES | CLOCK_SOURCE_WATCHDOG);
cs->flags |= CLOCK_SOURCE_UNSTABLE; cs->flags |= CLOCK_SOURCE_UNSTABLE;
/*
* If the clocksource is registered clocksource_watchdog_kthread() will
* re-rate and re-select.
*/
if (list_empty(&cs->list)) {
cs->rating = 0;
return;
}
if (cs->mark_unstable) if (cs->mark_unstable)
cs->mark_unstable(cs); cs->mark_unstable(cs);
/* kick clocksource_watchdog_kthread() */
if (finished_booting) if (finished_booting)
schedule_work(&watchdog_work); schedule_work(&watchdog_work);
} }
...@@ -153,10 +173,8 @@ static void __clocksource_unstable(struct clocksource *cs) ...@@ -153,10 +173,8 @@ static void __clocksource_unstable(struct clocksource *cs)
* clocksource_mark_unstable - mark clocksource unstable via watchdog * clocksource_mark_unstable - mark clocksource unstable via watchdog
* @cs: clocksource to be marked unstable * @cs: clocksource to be marked unstable
* *
* This function is called instead of clocksource_change_rating from * This function is called by the x86 TSC code to mark clocksources as unstable;
* cpu hotplug code to avoid a deadlock between the clocksource mutex * it defers demotion and re-selection to a kthread.
* and the cpu hotplug mutex. It defers the update of the clocksource
* to the watchdog thread.
*/ */
void clocksource_mark_unstable(struct clocksource *cs) void clocksource_mark_unstable(struct clocksource *cs)
{ {
...@@ -164,7 +182,7 @@ void clocksource_mark_unstable(struct clocksource *cs) ...@@ -164,7 +182,7 @@ void clocksource_mark_unstable(struct clocksource *cs)
spin_lock_irqsave(&watchdog_lock, flags); spin_lock_irqsave(&watchdog_lock, flags);
if (!(cs->flags & CLOCK_SOURCE_UNSTABLE)) { if (!(cs->flags & CLOCK_SOURCE_UNSTABLE)) {
if (list_empty(&cs->wd_list)) if (!list_empty(&cs->list) && list_empty(&cs->wd_list))
list_add(&cs->wd_list, &watchdog_list); list_add(&cs->wd_list, &watchdog_list);
__clocksource_unstable(cs); __clocksource_unstable(cs);
} }
...@@ -319,9 +337,8 @@ static void clocksource_resume_watchdog(void) ...@@ -319,9 +337,8 @@ static void clocksource_resume_watchdog(void)
static void clocksource_enqueue_watchdog(struct clocksource *cs) static void clocksource_enqueue_watchdog(struct clocksource *cs)
{ {
unsigned long flags; INIT_LIST_HEAD(&cs->wd_list);
spin_lock_irqsave(&watchdog_lock, flags);
if (cs->flags & CLOCK_SOURCE_MUST_VERIFY) { if (cs->flags & CLOCK_SOURCE_MUST_VERIFY) {
/* cs is a clocksource to be watched. */ /* cs is a clocksource to be watched. */
list_add(&cs->wd_list, &watchdog_list); list_add(&cs->wd_list, &watchdog_list);
...@@ -331,7 +348,6 @@ static void clocksource_enqueue_watchdog(struct clocksource *cs) ...@@ -331,7 +348,6 @@ static void clocksource_enqueue_watchdog(struct clocksource *cs)
if (cs->flags & CLOCK_SOURCE_IS_CONTINUOUS) if (cs->flags & CLOCK_SOURCE_IS_CONTINUOUS)
cs->flags |= CLOCK_SOURCE_VALID_FOR_HRES; cs->flags |= CLOCK_SOURCE_VALID_FOR_HRES;
} }
spin_unlock_irqrestore(&watchdog_lock, flags);
} }
static void clocksource_select_watchdog(bool fallback) static void clocksource_select_watchdog(bool fallback)
...@@ -373,9 +389,6 @@ static void clocksource_select_watchdog(bool fallback) ...@@ -373,9 +389,6 @@ static void clocksource_select_watchdog(bool fallback)
static void clocksource_dequeue_watchdog(struct clocksource *cs) static void clocksource_dequeue_watchdog(struct clocksource *cs)
{ {
unsigned long flags;
spin_lock_irqsave(&watchdog_lock, flags);
if (cs != watchdog) { if (cs != watchdog) {
if (cs->flags & CLOCK_SOURCE_MUST_VERIFY) { if (cs->flags & CLOCK_SOURCE_MUST_VERIFY) {
/* cs is a watched clocksource. */ /* cs is a watched clocksource. */
...@@ -384,21 +397,19 @@ static void clocksource_dequeue_watchdog(struct clocksource *cs) ...@@ -384,21 +397,19 @@ static void clocksource_dequeue_watchdog(struct clocksource *cs)
clocksource_stop_watchdog(); clocksource_stop_watchdog();
} }
} }
spin_unlock_irqrestore(&watchdog_lock, flags);
} }
static int __clocksource_watchdog_kthread(void) static int __clocksource_watchdog_kthread(void)
{ {
struct clocksource *cs, *tmp; struct clocksource *cs, *tmp;
unsigned long flags; unsigned long flags;
LIST_HEAD(unstable);
int select = 0; int select = 0;
spin_lock_irqsave(&watchdog_lock, flags); spin_lock_irqsave(&watchdog_lock, flags);
list_for_each_entry_safe(cs, tmp, &watchdog_list, wd_list) { list_for_each_entry_safe(cs, tmp, &watchdog_list, wd_list) {
if (cs->flags & CLOCK_SOURCE_UNSTABLE) { if (cs->flags & CLOCK_SOURCE_UNSTABLE) {
list_del_init(&cs->wd_list); list_del_init(&cs->wd_list);
list_add(&cs->wd_list, &unstable); __clocksource_change_rating(cs, 0);
select = 1; select = 1;
} }
if (cs->flags & CLOCK_SOURCE_RESELECT) { if (cs->flags & CLOCK_SOURCE_RESELECT) {
...@@ -410,11 +421,6 @@ static int __clocksource_watchdog_kthread(void) ...@@ -410,11 +421,6 @@ static int __clocksource_watchdog_kthread(void)
clocksource_stop_watchdog(); clocksource_stop_watchdog();
spin_unlock_irqrestore(&watchdog_lock, flags); spin_unlock_irqrestore(&watchdog_lock, flags);
/* Needs to be done outside of watchdog lock */
list_for_each_entry_safe(cs, tmp, &unstable, wd_list) {
list_del_init(&cs->wd_list);
__clocksource_change_rating(cs, 0);
}
return select; return select;
} }
...@@ -447,6 +453,9 @@ static inline int __clocksource_watchdog_kthread(void) { return 0; } ...@@ -447,6 +453,9 @@ static inline int __clocksource_watchdog_kthread(void) { return 0; }
static bool clocksource_is_watchdog(struct clocksource *cs) { return false; } static bool clocksource_is_watchdog(struct clocksource *cs) { return false; }
void clocksource_mark_unstable(struct clocksource *cs) { } void clocksource_mark_unstable(struct clocksource *cs) { }
static void inline clocksource_watchdog_lock(unsigned long *flags) { }
static void inline clocksource_watchdog_unlock(unsigned long *flags) { }
#endif /* CONFIG_CLOCKSOURCE_WATCHDOG */ #endif /* CONFIG_CLOCKSOURCE_WATCHDOG */
/** /**
...@@ -779,14 +788,19 @@ EXPORT_SYMBOL_GPL(__clocksource_update_freq_scale); ...@@ -779,14 +788,19 @@ EXPORT_SYMBOL_GPL(__clocksource_update_freq_scale);
*/ */
int __clocksource_register_scale(struct clocksource *cs, u32 scale, u32 freq) int __clocksource_register_scale(struct clocksource *cs, u32 scale, u32 freq)
{ {
unsigned long flags;
/* Initialize mult/shift and max_idle_ns */ /* Initialize mult/shift and max_idle_ns */
__clocksource_update_freq_scale(cs, scale, freq); __clocksource_update_freq_scale(cs, scale, freq);
/* Add clocksource to the clocksource list */ /* Add clocksource to the clocksource list */
mutex_lock(&clocksource_mutex); mutex_lock(&clocksource_mutex);
clocksource_watchdog_lock(&flags);
clocksource_enqueue(cs); clocksource_enqueue(cs);
clocksource_enqueue_watchdog(cs); clocksource_enqueue_watchdog(cs);
clocksource_watchdog_unlock(&flags);
clocksource_select(); clocksource_select();
clocksource_select_watchdog(false); clocksource_select_watchdog(false);
mutex_unlock(&clocksource_mutex); mutex_unlock(&clocksource_mutex);
...@@ -808,8 +822,13 @@ static void __clocksource_change_rating(struct clocksource *cs, int rating) ...@@ -808,8 +822,13 @@ static void __clocksource_change_rating(struct clocksource *cs, int rating)
*/ */
void clocksource_change_rating(struct clocksource *cs, int rating) void clocksource_change_rating(struct clocksource *cs, int rating)
{ {
unsigned long flags;
mutex_lock(&clocksource_mutex); mutex_lock(&clocksource_mutex);
clocksource_watchdog_lock(&flags);
__clocksource_change_rating(cs, rating); __clocksource_change_rating(cs, rating);
clocksource_watchdog_unlock(&flags);
clocksource_select(); clocksource_select();
clocksource_select_watchdog(false); clocksource_select_watchdog(false);
mutex_unlock(&clocksource_mutex); mutex_unlock(&clocksource_mutex);
...@@ -821,6 +840,8 @@ EXPORT_SYMBOL(clocksource_change_rating); ...@@ -821,6 +840,8 @@ EXPORT_SYMBOL(clocksource_change_rating);
*/ */
static int clocksource_unbind(struct clocksource *cs) static int clocksource_unbind(struct clocksource *cs)
{ {
unsigned long flags;
if (clocksource_is_watchdog(cs)) { if (clocksource_is_watchdog(cs)) {
/* Select and try to install a replacement watchdog. */ /* Select and try to install a replacement watchdog. */
clocksource_select_watchdog(true); clocksource_select_watchdog(true);
...@@ -834,8 +855,12 @@ static int clocksource_unbind(struct clocksource *cs) ...@@ -834,8 +855,12 @@ static int clocksource_unbind(struct clocksource *cs)
if (curr_clocksource == cs) if (curr_clocksource == cs)
return -EBUSY; return -EBUSY;
} }
clocksource_watchdog_lock(&flags);
clocksource_dequeue_watchdog(cs); clocksource_dequeue_watchdog(cs);
list_del_init(&cs->list); list_del_init(&cs->list);
clocksource_watchdog_unlock(&flags);
return 0; return 0;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment