Commit 01f0a027 authored by Thomas Gleixner's avatar Thomas Gleixner Committed by Ingo Molnar

watchdog/core: Remove the park_in_progress obfuscation

Commit:

  b94f5118 ("kernel/watchdog: prevent false hardlockup on overloaded system")

tries to fix the following issue:

proc_write()
   set_sample_period()    <--- New sample period becoms visible
			  <----- Broken starts
   proc_watchdog_update()
     watchdog_enable_all_cpus()		watchdog_hrtimer_fn()
     update_watchdog_all_cpus()		   restart_timer(sample_period)
        watchdog_park_threads()

					thread->park()
					  disable_nmi()
			  <----- Broken ends

The reason why this is broken is that the update of the watchdog threshold
becomes immediately effective and visible for the hrtimer function which
uses that value to rearm the timer. But the NMI/perf side still uses the
old value up to the point where it is disabled. If the rate has been
lowered then the NMI can run fast enough to 'detect' a hard lockup because
the timer has not fired due to the longer period.

The patch 'fixed' this by adding a variable:

proc_write()
   set_sample_period()
					<----- Broken starts
   proc_watchdog_update()
     watchdog_enable_all_cpus()		watchdog_hrtimer_fn()
     update_watchdog_all_cpus()		   restart_timer(sample_period)
         watchdog_park_threads()
	  park_in_progress = 1
					<----- Broken ends
				        nmi_watchdog()
					  if (park_in_progress)
					     return;

The only effect of this variable was to make the window where the breakage
can hit small enough that it was not longer observable in testing. From a
correctness point of view it is a pointless bandaid which merily papers
over the root cause: the unsychronized update of the variable.

Looking deeper into the related code pathes unearthed similar problems in
the watchdog_start()/stop() functions.

 watchdog_start()
	perf_nmi_event_start()
	hrtimer_start()

 watchdog_stop()
	hrtimer_cancel()
	perf_nmi_event_stop()

In both cases the call order is wrong because if the tasks gets preempted
or the VM gets scheduled out long enough after the first call, then there is
a chance that the next NMI will see a stale hrtimer interrupt count and
trigger a false positive hard lockup splat.

Get rid of park_in_progress so the code can be gradually deobfuscated and
pruned from several layers of duct tape papering over the root cause,
which has been either ignored or not understood at all.

Once this is removed the underlying problem will be fixed by rewriting the
proc interface to do a proper synchronized update.

Address the start/stop() ordering problem as well by reverting the call
order, so this part is at least correct now.
Signed-off-by: default avatarThomas Gleixner <tglx@linutronix.de>
Reviewed-by: default avatarDon Zickus <dzickus@redhat.com>
Cc: Andrew Morton <akpm@linux-foundation.org>
Cc: Borislav Petkov <bp@alien8.de>
Cc: Chris Metcalf <cmetcalf@mellanox.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Nicholas Piggin <npiggin@gmail.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Sebastian Siewior <bigeasy@linutronix.de>
Cc: Ulrich Obergfell <uobergfe@redhat.com>
Link: http://lkml.kernel.org/r/alpine.DEB.2.20.1709052038270.2393@nanosSigned-off-by: default avatarIngo Molnar <mingo@kernel.org>
parent 941154bd
...@@ -27,7 +27,6 @@ extern void touch_softlockup_watchdog_sync(void); ...@@ -27,7 +27,6 @@ extern void touch_softlockup_watchdog_sync(void);
extern void touch_all_softlockup_watchdogs(void); extern void touch_all_softlockup_watchdogs(void);
extern unsigned int softlockup_panic; extern unsigned int softlockup_panic;
extern int soft_watchdog_enabled; extern int soft_watchdog_enabled;
extern atomic_t watchdog_park_in_progress;
#else #else
static inline void touch_softlockup_watchdog_sched(void) static inline void touch_softlockup_watchdog_sched(void)
{ {
......
...@@ -136,8 +136,6 @@ void __weak watchdog_nmi_reconfigure(void) ...@@ -136,8 +136,6 @@ void __weak watchdog_nmi_reconfigure(void)
#define for_each_watchdog_cpu(cpu) \ #define for_each_watchdog_cpu(cpu) \
for_each_cpu_and((cpu), cpu_online_mask, &watchdog_cpumask) for_each_cpu_and((cpu), cpu_online_mask, &watchdog_cpumask)
atomic_t watchdog_park_in_progress = ATOMIC_INIT(0);
static u64 __read_mostly sample_period; static u64 __read_mostly sample_period;
static DEFINE_PER_CPU(unsigned long, watchdog_touch_ts); static DEFINE_PER_CPU(unsigned long, watchdog_touch_ts);
...@@ -322,8 +320,7 @@ static enum hrtimer_restart watchdog_timer_fn(struct hrtimer *hrtimer) ...@@ -322,8 +320,7 @@ static enum hrtimer_restart watchdog_timer_fn(struct hrtimer *hrtimer)
int duration; int duration;
int softlockup_all_cpu_backtrace = sysctl_softlockup_all_cpu_backtrace; int softlockup_all_cpu_backtrace = sysctl_softlockup_all_cpu_backtrace;
if (!watchdog_enabled || if (!watchdog_enabled)
atomic_read(&watchdog_park_in_progress) != 0)
return HRTIMER_NORESTART; return HRTIMER_NORESTART;
/* kick the hardlockup detector */ /* kick the hardlockup detector */
...@@ -437,32 +434,37 @@ static void watchdog_set_prio(unsigned int policy, unsigned int prio) ...@@ -437,32 +434,37 @@ static void watchdog_set_prio(unsigned int policy, unsigned int prio)
static void watchdog_enable(unsigned int cpu) static void watchdog_enable(unsigned int cpu)
{ {
struct hrtimer *hrtimer = raw_cpu_ptr(&watchdog_hrtimer); struct hrtimer *hrtimer = this_cpu_ptr(&watchdog_hrtimer);
/* kick off the timer for the hardlockup detector */ /*
* Start the timer first to prevent the NMI watchdog triggering
* before the timer has a chance to fire.
*/
hrtimer_init(hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); hrtimer_init(hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
hrtimer->function = watchdog_timer_fn; hrtimer->function = watchdog_timer_fn;
hrtimer_start(hrtimer, ns_to_ktime(sample_period),
HRTIMER_MODE_REL_PINNED);
/* Initialize timestamp */
__touch_watchdog();
/* Enable the perf event */ /* Enable the perf event */
watchdog_nmi_enable(cpu); watchdog_nmi_enable(cpu);
/* done here because hrtimer_start can only pin to smp_processor_id() */
hrtimer_start(hrtimer, ns_to_ktime(sample_period),
HRTIMER_MODE_REL_PINNED);
/* initialize timestamp */
watchdog_set_prio(SCHED_FIFO, MAX_RT_PRIO - 1); watchdog_set_prio(SCHED_FIFO, MAX_RT_PRIO - 1);
__touch_watchdog();
} }
static void watchdog_disable(unsigned int cpu) static void watchdog_disable(unsigned int cpu)
{ {
struct hrtimer *hrtimer = raw_cpu_ptr(&watchdog_hrtimer); struct hrtimer *hrtimer = this_cpu_ptr(&watchdog_hrtimer);
watchdog_set_prio(SCHED_NORMAL, 0); watchdog_set_prio(SCHED_NORMAL, 0);
hrtimer_cancel(hrtimer); /*
/* disable the perf event */ * Disable the perf event first. That prevents that a large delay
* between disabling the timer and disabling the perf event causes
* the perf NMI to detect a false positive.
*/
watchdog_nmi_disable(cpu); watchdog_nmi_disable(cpu);
hrtimer_cancel(hrtimer);
} }
static void watchdog_cleanup(unsigned int cpu, bool online) static void watchdog_cleanup(unsigned int cpu, bool online)
...@@ -518,16 +520,11 @@ static int watchdog_park_threads(void) ...@@ -518,16 +520,11 @@ static int watchdog_park_threads(void)
{ {
int cpu, ret = 0; int cpu, ret = 0;
atomic_set(&watchdog_park_in_progress, 1);
for_each_watchdog_cpu(cpu) { for_each_watchdog_cpu(cpu) {
ret = kthread_park(per_cpu(softlockup_watchdog, cpu)); ret = kthread_park(per_cpu(softlockup_watchdog, cpu));
if (ret) if (ret)
break; break;
} }
atomic_set(&watchdog_park_in_progress, 0);
return ret; return ret;
} }
......
...@@ -106,15 +106,12 @@ static struct perf_event_attr wd_hw_attr = { ...@@ -106,15 +106,12 @@ static struct perf_event_attr wd_hw_attr = {
/* Callback function for perf event subsystem */ /* Callback function for perf event subsystem */
static void watchdog_overflow_callback(struct perf_event *event, static void watchdog_overflow_callback(struct perf_event *event,
struct perf_sample_data *data, struct perf_sample_data *data,
struct pt_regs *regs) struct pt_regs *regs)
{ {
/* Ensure the watchdog never gets throttled */ /* Ensure the watchdog never gets throttled */
event->hw.interrupts = 0; event->hw.interrupts = 0;
if (atomic_read(&watchdog_park_in_progress) != 0)
return;
if (__this_cpu_read(watchdog_nmi_touch) == true) { if (__this_cpu_read(watchdog_nmi_touch) == true) {
__this_cpu_write(watchdog_nmi_touch, false); __this_cpu_write(watchdog_nmi_touch, false);
return; return;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment