Commit bbf66d89 authored by Vitaly Kuznetsov's avatar Vitaly Kuznetsov Committed by Thomas Gleixner

clocksource: Allow unregistering the watchdog

Hyper-V vmbus module registers TSC page clocksource when loaded. This is
the clocksource with the highest rating and thus it becomes the watchdog
making unloading of the vmbus module impossible.
Separate clocksource_select_watchdog() from clocksource_enqueue_watchdog()
and use it on clocksource register/rating change/unregister.

After all, lobotomized monkeys may need some love too.
Signed-off-by: default avatarVitaly Kuznetsov <vkuznets@redhat.com>
Cc: John Stultz <john.stultz@linaro.org>
Cc: Dexuan Cui <decui@microsoft.com>
Cc: K. Y. Srinivasan <kys@microsoft.com>
Link: http://lkml.kernel.org/r/1453483913-25672-1-git-send-email-vkuznets@redhat.comSigned-off-by: default avatarThomas Gleixner <tglx@linutronix.de>
parent a6e707dd
...@@ -323,13 +323,42 @@ static void clocksource_enqueue_watchdog(struct clocksource *cs) ...@@ -323,13 +323,42 @@ static void clocksource_enqueue_watchdog(struct clocksource *cs)
/* cs is a watchdog. */ /* cs is a watchdog. */
if (cs->flags & CLOCK_SOURCE_IS_CONTINUOUS) if (cs->flags & CLOCK_SOURCE_IS_CONTINUOUS)
cs->flags |= CLOCK_SOURCE_VALID_FOR_HRES; cs->flags |= CLOCK_SOURCE_VALID_FOR_HRES;
}
spin_unlock_irqrestore(&watchdog_lock, flags);
}
static void clocksource_select_watchdog(bool fallback)
{
struct clocksource *cs, *old_wd;
unsigned long flags;
spin_lock_irqsave(&watchdog_lock, flags);
/* save current watchdog */
old_wd = watchdog;
if (fallback)
watchdog = NULL;
list_for_each_entry(cs, &clocksource_list, list) {
/* cs is a clocksource to be watched. */
if (cs->flags & CLOCK_SOURCE_MUST_VERIFY)
continue;
/* Skip current if we were requested for a fallback. */
if (fallback && cs == old_wd)
continue;
/* Pick the best watchdog. */ /* Pick the best watchdog. */
if (!watchdog || cs->rating > watchdog->rating) { if (!watchdog || cs->rating > watchdog->rating)
watchdog = cs; watchdog = cs;
/* Reset watchdog cycles */
clocksource_reset_watchdog();
}
} }
/* If we failed to find a fallback restore the old one. */
if (!watchdog)
watchdog = old_wd;
/* If we changed the watchdog we need to reset cycles. */
if (watchdog != old_wd)
clocksource_reset_watchdog();
/* Check if the watchdog timer needs to be started. */ /* Check if the watchdog timer needs to be started. */
clocksource_start_watchdog(); clocksource_start_watchdog();
spin_unlock_irqrestore(&watchdog_lock, flags); spin_unlock_irqrestore(&watchdog_lock, flags);
...@@ -404,6 +433,7 @@ static void clocksource_enqueue_watchdog(struct clocksource *cs) ...@@ -404,6 +433,7 @@ static void clocksource_enqueue_watchdog(struct clocksource *cs)
cs->flags |= CLOCK_SOURCE_VALID_FOR_HRES; cs->flags |= CLOCK_SOURCE_VALID_FOR_HRES;
} }
static void clocksource_select_watchdog(bool fallback) { }
static inline void clocksource_dequeue_watchdog(struct clocksource *cs) { } static inline void clocksource_dequeue_watchdog(struct clocksource *cs) { }
static inline void clocksource_resume_watchdog(void) { } static inline void clocksource_resume_watchdog(void) { }
static inline int __clocksource_watchdog_kthread(void) { return 0; } static inline int __clocksource_watchdog_kthread(void) { return 0; }
...@@ -736,6 +766,7 @@ int __clocksource_register_scale(struct clocksource *cs, u32 scale, u32 freq) ...@@ -736,6 +766,7 @@ int __clocksource_register_scale(struct clocksource *cs, u32 scale, u32 freq)
clocksource_enqueue(cs); clocksource_enqueue(cs);
clocksource_enqueue_watchdog(cs); clocksource_enqueue_watchdog(cs);
clocksource_select(); clocksource_select();
clocksource_select_watchdog(false);
mutex_unlock(&clocksource_mutex); mutex_unlock(&clocksource_mutex);
return 0; return 0;
} }
...@@ -758,6 +789,7 @@ void clocksource_change_rating(struct clocksource *cs, int rating) ...@@ -758,6 +789,7 @@ void clocksource_change_rating(struct clocksource *cs, int rating)
mutex_lock(&clocksource_mutex); mutex_lock(&clocksource_mutex);
__clocksource_change_rating(cs, rating); __clocksource_change_rating(cs, rating);
clocksource_select(); clocksource_select();
clocksource_select_watchdog(false);
mutex_unlock(&clocksource_mutex); mutex_unlock(&clocksource_mutex);
} }
EXPORT_SYMBOL(clocksource_change_rating); EXPORT_SYMBOL(clocksource_change_rating);
...@@ -767,12 +799,12 @@ EXPORT_SYMBOL(clocksource_change_rating); ...@@ -767,12 +799,12 @@ EXPORT_SYMBOL(clocksource_change_rating);
*/ */
static int clocksource_unbind(struct clocksource *cs) static int clocksource_unbind(struct clocksource *cs)
{ {
/* if (clocksource_is_watchdog(cs)) {
* I really can't convince myself to support this on hardware /* Select and try to install a replacement watchdog. */
* designed by lobotomized monkeys. clocksource_select_watchdog(true);
*/ if (clocksource_is_watchdog(cs))
if (clocksource_is_watchdog(cs)) return -EBUSY;
return -EBUSY; }
if (cs == curr_clocksource) { if (cs == curr_clocksource) {
/* Select and try to install a replacement clock source */ /* Select and try to install a replacement clock source */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment