watchdog.c 21.8 KB
Newer Older
1 2 3 4 5
/*
 * Detect hard and soft lockups on a system
 *
 * started by Don Zickus, Copyright (C) 2010 Red Hat, Inc.
 *
6 7 8
 * Note: Most of this code is borrowed heavily from the original softlockup
 * detector, so thanks to Ingo for the initial implementation.
 * Some chunks also taken from the old x86-specific nmi watchdog code, thanks
9 10 11
 * to those contributors as well.
 */

12
#define pr_fmt(fmt) "watchdog: " fmt
13

14 15 16 17 18 19
#include <linux/mm.h>
#include <linux/cpu.h>
#include <linux/nmi.h>
#include <linux/init.h>
#include <linux/module.h>
#include <linux/sysctl.h>
20
#include <linux/smpboot.h>
21
#include <linux/sched/rt.h>
22
#include <uapi/linux/sched/types.h>
23
#include <linux/tick.h>
24
#include <linux/workqueue.h>
25
#include <linux/sched/clock.h>
26
#include <linux/sched/debug.h>
27
#include <linux/sched/isolation.h>
28 29

#include <asm/irq_regs.h>
30
#include <linux/kvm_para.h>
31
#include <linux/kthread.h>
32

33
static DEFINE_MUTEX(watchdog_mutex);
34

35
#if defined(CONFIG_HARDLOCKUP_DETECTOR) || defined(CONFIG_HAVE_NMI_WATCHDOG)
36 37
# define WATCHDOG_DEFAULT	(SOFT_WATCHDOG_ENABLED | NMI_WATCHDOG_ENABLED)
# define NMI_WATCHDOG_DEFAULT	1
38
#else
39 40
# define WATCHDOG_DEFAULT	(SOFT_WATCHDOG_ENABLED)
# define NMI_WATCHDOG_DEFAULT	0
41
#endif
42

43 44 45 46
unsigned long __read_mostly watchdog_enabled;
int __read_mostly watchdog_user_enabled = 1;
int __read_mostly nmi_watchdog_user_enabled = NMI_WATCHDOG_DEFAULT;
int __read_mostly soft_watchdog_user_enabled = 1;
47
int __read_mostly watchdog_thresh = 10;
48
int __read_mostly nmi_watchdog_available;
49 50 51 52 53 54

struct cpumask watchdog_allowed_mask __read_mostly;

struct cpumask watchdog_cpumask __read_mostly;
unsigned long *watchdog_cpumask_bits = cpumask_bits(&watchdog_cpumask);

55 56 57 58 59 60 61 62 63 64 65 66 67 68
#ifdef CONFIG_HARDLOCKUP_DETECTOR
/*
 * Should we panic when a soft-lockup or hard-lockup occurs:
 */
unsigned int __read_mostly hardlockup_panic =
			CONFIG_BOOTPARAM_HARDLOCKUP_PANIC_VALUE;
/*
 * We may not want to enable hard lockup detection by default in all cases,
 * for example when running the kernel as a guest on a hypervisor. In these
 * cases this function can be called to disable hard lockup detection. This
 * function should only be executed once by the boot processor before the
 * kernel command line parameters are parsed, because otherwise it is not
 * possible to override this in hardlockup_panic_setup().
 */
69
void __init hardlockup_detector_disable(void)
70
{
71
	nmi_watchdog_user_enabled = 0;
72 73 74 75 76 77 78 79 80
}

static int __init hardlockup_panic_setup(char *str)
{
	if (!strncmp(str, "panic", 5))
		hardlockup_panic = 1;
	else if (!strncmp(str, "nopanic", 7))
		hardlockup_panic = 0;
	else if (!strncmp(str, "0", 1))
81
		nmi_watchdog_user_enabled = 0;
82
	else if (!strncmp(str, "1", 1))
83
		nmi_watchdog_user_enabled = 1;
84 85 86 87
	return 1;
}
__setup("nmi_watchdog=", hardlockup_panic_setup);

88 89
# ifdef CONFIG_SMP
int __read_mostly sysctl_hardlockup_all_cpu_backtrace;
90

91 92 93 94 95 96 97 98
static int __init hardlockup_all_cpu_backtrace_setup(char *str)
{
	sysctl_hardlockup_all_cpu_backtrace = !!simple_strtol(str, NULL, 0);
	return 1;
}
__setup("hardlockup_all_cpu_backtrace=", hardlockup_all_cpu_backtrace_setup);
# endif /* CONFIG_SMP */
#endif /* CONFIG_HARDLOCKUP_DETECTOR */
99 100 101 102

/*
 * These functions can be overridden if an architecture implements its
 * own hardlockup detector.
103 104 105 106
 *
 * watchdog_nmi_enable/disable can be implemented to start and stop when
 * softlockup watchdog threads start and stop. The arch must select the
 * SOFTLOCKUP_DETECTOR Kconfig.
107 108 109
 */
int __weak watchdog_nmi_enable(unsigned int cpu)
{
110
	hardlockup_detector_perf_enable();
111 112
	return 0;
}
113

114 115
void __weak watchdog_nmi_disable(unsigned int cpu)
{
116
	hardlockup_detector_perf_disable();
117 118
}

119 120 121 122 123 124
/* Return 0, if a NMI watchdog is available. Error code otherwise */
int __weak __init watchdog_nmi_probe(void)
{
	return hardlockup_detector_perf_init();
}

125
/**
126
 * watchdog_nmi_stop - Stop the watchdog for reconfiguration
127
 *
128 129
 * The reconfiguration steps are:
 * watchdog_nmi_stop();
130
 * update_variables();
131 132 133 134 135 136
 * watchdog_nmi_start();
 */
void __weak watchdog_nmi_stop(void) { }

/**
 * watchdog_nmi_start - Start the watchdog after reconfiguration
137
 *
138 139 140 141
 * Counterpart to watchdog_nmi_stop().
 *
 * The following variables have been updated in update_variables() and
 * contain the currently valid configuration:
142
 * - watchdog_enabled
143 144 145
 * - watchdog_thresh
 * - watchdog_cpumask
 */
146
void __weak watchdog_nmi_start(void) { }
147

148 149 150 151 152 153 154 155 156 157 158
/**
 * lockup_detector_update_enable - Update the sysctl enable bit
 *
 * Caller needs to make sure that the NMI/perf watchdogs are off, so this
 * can't race with watchdog_nmi_disable().
 */
static void lockup_detector_update_enable(void)
{
	watchdog_enabled = 0;
	if (!watchdog_user_enabled)
		return;
159
	if (nmi_watchdog_available && nmi_watchdog_user_enabled)
160 161 162 163 164
		watchdog_enabled |= NMI_WATCHDOG_ENABLED;
	if (soft_watchdog_user_enabled)
		watchdog_enabled |= SOFT_WATCHDOG_ENABLED;
}

165 166
#ifdef CONFIG_SOFTLOCKUP_DETECTOR

167 168 169
/* Global variables, exported for sysctl */
unsigned int __read_mostly softlockup_panic =
			CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC_VALUE;
170

171
static bool softlockup_threads_initialized __read_mostly;
172
static u64 __read_mostly sample_period;
173 174 175 176 177 178

static DEFINE_PER_CPU(unsigned long, watchdog_touch_ts);
static DEFINE_PER_CPU(struct task_struct *, softlockup_watchdog);
static DEFINE_PER_CPU(struct hrtimer, watchdog_hrtimer);
static DEFINE_PER_CPU(bool, softlockup_touch_sync);
static DEFINE_PER_CPU(bool, soft_watchdog_warn);
179 180
static DEFINE_PER_CPU(unsigned long, hrtimer_interrupts);
static DEFINE_PER_CPU(unsigned long, soft_lockup_hrtimer_cnt);
181
static DEFINE_PER_CPU(struct task_struct *, softlockup_task_ptr_saved);
182
static DEFINE_PER_CPU(unsigned long, hrtimer_interrupts_saved);
183
static unsigned long soft_lockup_nmi_warn;
184 185 186 187 188 189 190 191 192 193

static int __init softlockup_panic_setup(char *str)
{
	softlockup_panic = simple_strtoul(str, NULL, 0);
	return 1;
}
__setup("softlockup_panic=", softlockup_panic_setup);

static int __init nowatchdog_setup(char *str)
{
194
	watchdog_user_enabled = 0;
195 196 197 198 199 200
	return 1;
}
__setup("nowatchdog", nowatchdog_setup);

static int __init nosoftlockup_setup(char *str)
{
201
	soft_watchdog_user_enabled = 0;
202 203 204
	return 1;
}
__setup("nosoftlockup", nosoftlockup_setup);
205

206
#ifdef CONFIG_SMP
207 208
int __read_mostly sysctl_softlockup_all_cpu_backtrace;

209 210
static int __init softlockup_all_cpu_backtrace_setup(char *str)
{
211
	sysctl_softlockup_all_cpu_backtrace = !!simple_strtol(str, NULL, 0);
212 213 214
	return 1;
}
__setup("softlockup_all_cpu_backtrace=", softlockup_all_cpu_backtrace_setup);
215
#endif
216

217 218
static void __lockup_detector_cleanup(void);

219 220 221 222 223 224 225
/*
 * Hard-lockup warnings should be triggered after just a few seconds. Soft-
 * lockups can have false positives under extreme conditions. So we generally
 * want a higher threshold for soft lockups than for hard lockups. So we couple
 * the thresholds with a factor: we make the soft threshold twice the amount of
 * time the hard threshold is.
 */
226
static int get_softlockup_thresh(void)
227 228 229
{
	return watchdog_thresh * 2;
}
230 231 232 233 234 235

/*
 * Returns seconds, approximately.  We don't need nanosecond
 * resolution, and we don't need to waste time with a big divide when
 * 2^30ns == 1.074s.
 */
236
static unsigned long get_timestamp(void)
237
{
238
	return running_clock() >> 30LL;  /* 2^30 ~= 10^9 */
239 240
}

241
static void set_sample_period(void)
242 243
{
	/*
244
	 * convert watchdog_thresh from seconds to ns
245 246 247 248
	 * the divide by 5 is to give hrtimer several chances (two
	 * or three with the current relation between the soft
	 * and hard thresholds) to increment before the
	 * hardlockup detector generates a warning
249
	 */
250
	sample_period = get_softlockup_thresh() * ((u64)NSEC_PER_SEC / 5);
251
	watchdog_update_hrtimer_threshold(sample_period);
252 253 254 255 256
}

/* Commands for resetting the watchdog */
static void __touch_watchdog(void)
{
257
	__this_cpu_write(watchdog_touch_ts, get_timestamp());
258 259
}

260 261 262 263 264 265 266 267 268
/**
 * touch_softlockup_watchdog_sched - touch watchdog on scheduler stalls
 *
 * Call when the scheduler may have stalled for legitimate reasons
 * preventing the watchdog task from executing - e.g. the scheduler
 * entering idle state.  This should only be used for scheduler events.
 * Use touch_softlockup_watchdog() for everything else.
 */
void touch_softlockup_watchdog_sched(void)
269
{
270 271 272 273 274
	/*
	 * Preemption can be enabled.  It doesn't matter which CPU's timestamp
	 * gets zeroed here, so use the raw_ operation.
	 */
	raw_cpu_write(watchdog_touch_ts, 0);
275
}
276 277 278 279

void touch_softlockup_watchdog(void)
{
	touch_softlockup_watchdog_sched();
280
	wq_watchdog_touch(raw_smp_processor_id());
281
}
282
EXPORT_SYMBOL(touch_softlockup_watchdog);
283

284
void touch_all_softlockup_watchdogs(void)
285 286 287 288
{
	int cpu;

	/*
289 290 291 292 293 294 295
	 * watchdog_mutex cannpt be taken here, as this might be called
	 * from (soft)interrupt context, so the access to
	 * watchdog_allowed_cpumask might race with a concurrent update.
	 *
	 * The watchdog time stamp can race against a concurrent real
	 * update as well, the only side effect might be a cycle delay for
	 * the softlockup check.
296
	 */
297
	for_each_cpu(cpu, &watchdog_allowed_mask)
298
		per_cpu(watchdog_touch_ts, cpu) = 0;
299
	wq_watchdog_touch(-1);
300 301 302 303
}

void touch_softlockup_watchdog_sync(void)
{
304 305
	__this_cpu_write(softlockup_touch_sync, true);
	__this_cpu_write(watchdog_touch_ts, 0);
306 307
}

308
static int is_softlockup(unsigned long touch_ts)
309
{
310
	unsigned long now = get_timestamp();
311

312
	if ((watchdog_enabled & SOFT_WATCHDOG_ENABLED) && watchdog_thresh){
313 314 315 316
		/* Warn about unreasonable delays. */
		if (time_after(now, touch_ts + get_softlockup_thresh()))
			return now - touch_ts;
	}
317 318 319
	return 0;
}

320 321
/* watchdog detector functions */
bool is_hardlockup(void)
322
{
323
	unsigned long hrint = __this_cpu_read(hrtimer_interrupts);
324

325 326 327 328 329
	if (__this_cpu_read(hrtimer_interrupts_saved) == hrint)
		return true;

	__this_cpu_write(hrtimer_interrupts_saved, hrint);
	return false;
330
}
331 332

static void watchdog_interrupt_count(void)
333
{
334
	__this_cpu_inc(hrtimer_interrupts);
335
}
336 337 338 339

/* watchdog kicker functions */
static enum hrtimer_restart watchdog_timer_fn(struct hrtimer *hrtimer)
{
340
	unsigned long touch_ts = __this_cpu_read(watchdog_touch_ts);
341 342
	struct pt_regs *regs = get_irq_regs();
	int duration;
343
	int softlockup_all_cpu_backtrace = sysctl_softlockup_all_cpu_backtrace;
344

345
	if (!watchdog_enabled)
346 347
		return HRTIMER_NORESTART;

348 349 350 351
	/* kick the hardlockup detector */
	watchdog_interrupt_count();

	/* kick the softlockup detector */
352
	wake_up_process(__this_cpu_read(softlockup_watchdog));
353 354

	/* .. and repeat */
355
	hrtimer_forward_now(hrtimer, ns_to_ktime(sample_period));
356 357

	if (touch_ts == 0) {
358
		if (unlikely(__this_cpu_read(softlockup_touch_sync))) {
359 360 361 362
			/*
			 * If the time stamp was touched atomically
			 * make sure the scheduler tick is up to date.
			 */
363
			__this_cpu_write(softlockup_touch_sync, false);
364 365
			sched_clock_tick();
		}
366 367 368

		/* Clear the guest paused flag on watchdog reset */
		kvm_check_and_clear_guest_paused();
369 370 371 372 373 374 375 376 377 378
		__touch_watchdog();
		return HRTIMER_RESTART;
	}

	/* check for a softlockup
	 * This is done by making sure a high priority task is
	 * being scheduled.  The task touches the watchdog to
	 * indicate it is getting cpu time.  If it hasn't then
	 * this is a good indication some task is hogging the cpu
	 */
379
	duration = is_softlockup(touch_ts);
380
	if (unlikely(duration)) {
381 382 383 384 385 386 387 388
		/*
		 * If a virtual machine is stopped by the host it can look to
		 * the watchdog like a soft lockup, check to see if the host
		 * stopped the vm before we issue the warning
		 */
		if (kvm_check_and_clear_guest_paused())
			return HRTIMER_RESTART;

389
		/* only warn once */
390 391 392 393 394 395 396 397 398 399 400 401 402 403
		if (__this_cpu_read(soft_watchdog_warn) == true) {
			/*
			 * When multiple processes are causing softlockups the
			 * softlockup detector only warns on the first one
			 * because the code relies on a full quiet cycle to
			 * re-arm.  The second process prevents the quiet cycle
			 * and never gets reported.  Use task pointers to detect
			 * this.
			 */
			if (__this_cpu_read(softlockup_task_ptr_saved) !=
			    current) {
				__this_cpu_write(soft_watchdog_warn, false);
				__touch_watchdog();
			}
404
			return HRTIMER_RESTART;
405
		}
406

407 408 409 410 411 412 413 414 415 416 417
		if (softlockup_all_cpu_backtrace) {
			/* Prevent multiple soft-lockup reports if one cpu is already
			 * engaged in dumping cpu back traces
			 */
			if (test_and_set_bit(0, &soft_lockup_nmi_warn)) {
				/* Someone else will report us. Let's give up */
				__this_cpu_write(soft_watchdog_warn, true);
				return HRTIMER_RESTART;
			}
		}

418
		pr_emerg("BUG: soft lockup - CPU#%d stuck for %us! [%s:%d]\n",
419
			smp_processor_id(), duration,
420
			current->comm, task_pid_nr(current));
421
		__this_cpu_write(softlockup_task_ptr_saved, current);
422 423 424 425 426 427 428
		print_modules();
		print_irqtrace_events(current);
		if (regs)
			show_regs(regs);
		else
			dump_stack();

429 430 431 432 433 434 435 436 437 438 439
		if (softlockup_all_cpu_backtrace) {
			/* Avoid generating two back traces for current
			 * given that one is already made above
			 */
			trigger_allbutself_cpu_backtrace();

			clear_bit(0, &soft_lockup_nmi_warn);
			/* Barrier to sync with other cpus */
			smp_mb__after_atomic();
		}

Josh Hunt's avatar
Josh Hunt committed
440
		add_taint(TAINT_SOFTLOCKUP, LOCKDEP_STILL_OK);
441 442
		if (softlockup_panic)
			panic("softlockup: hung tasks");
443
		__this_cpu_write(soft_watchdog_warn, true);
444
	} else
445
		__this_cpu_write(soft_watchdog_warn, false);
446 447 448 449

	return HRTIMER_RESTART;
}

450 451 452
static void watchdog_set_prio(unsigned int policy, unsigned int prio)
{
	struct sched_param param = { .sched_priority = prio };
453

454 455 456 457
	sched_setscheduler(current, policy, &param);
}

static void watchdog_enable(unsigned int cpu)
458
{
459
	struct hrtimer *hrtimer = this_cpu_ptr(&watchdog_hrtimer);
460

461 462 463 464
	/*
	 * Start the timer first to prevent the NMI watchdog triggering
	 * before the timer has a chance to fire.
	 */
465 466
	hrtimer_init(hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
	hrtimer->function = watchdog_timer_fn;
467 468
	hrtimer_start(hrtimer, ns_to_ktime(sample_period),
		      HRTIMER_MODE_REL_PINNED);
469

470 471
	/* Initialize timestamp */
	__touch_watchdog();
472
	/* Enable the perf event */
473 474
	if (watchdog_enabled & NMI_WATCHDOG_ENABLED)
		watchdog_nmi_enable(cpu);
475

476 477
	watchdog_set_prio(SCHED_FIFO, MAX_RT_PRIO - 1);
}
478

479 480
static void watchdog_disable(unsigned int cpu)
{
481
	struct hrtimer *hrtimer = this_cpu_ptr(&watchdog_hrtimer);
482

483
	watchdog_set_prio(SCHED_NORMAL, 0);
484 485 486 487 488
	/*
	 * Disable the perf event first. That prevents that a large delay
	 * between disabling the timer and disabling the perf event causes
	 * the perf NMI to detect a false positive.
	 */
489
	watchdog_nmi_disable(cpu);
490
	hrtimer_cancel(hrtimer);
491 492
}

493 494 495 496 497
static void watchdog_cleanup(unsigned int cpu, bool online)
{
	watchdog_disable(cpu);
}

498 499 500 501 502 503 504 505 506
static int watchdog_should_run(unsigned int cpu)
{
	return __this_cpu_read(hrtimer_interrupts) !=
		__this_cpu_read(soft_lockup_hrtimer_cnt);
}

/*
 * The watchdog thread function - touches the timestamp.
 *
507
 * It only runs once every sample_period seconds (4 seconds by
508 509 510 511 512 513 514 515 516 517
 * default) to reset the softlockup timestamp. If this gets delayed
 * for more than 2*watchdog_thresh seconds then the debug-printout
 * triggers in watchdog_timer_fn().
 */
static void watchdog(unsigned int cpu)
{
	__this_cpu_write(soft_lockup_hrtimer_cnt,
			 __this_cpu_read(hrtimer_interrupts));
	__touch_watchdog();
}
518

519 520 521 522 523 524 525 526 527 528 529
static struct smp_hotplug_thread watchdog_threads = {
	.store			= &softlockup_watchdog,
	.thread_should_run	= watchdog_should_run,
	.thread_fn		= watchdog,
	.thread_comm		= "watchdog/%u",
	.setup			= watchdog_enable,
	.cleanup		= watchdog_cleanup,
	.park			= watchdog_disable,
	.unpark			= watchdog_enable,
};

530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547
static void softlockup_update_smpboot_threads(void)
{
	lockdep_assert_held(&watchdog_mutex);

	if (!softlockup_threads_initialized)
		return;

	smpboot_update_cpumask_percpu_thread(&watchdog_threads,
					     &watchdog_allowed_mask);
}

/* Temporarily park all watchdog threads */
static void softlockup_park_all_threads(void)
{
	cpumask_clear(&watchdog_allowed_mask);
	softlockup_update_smpboot_threads();
}

548 549
/* Unpark enabled threads */
static void softlockup_unpark_threads(void)
550 551 552 553 554
{
	cpumask_copy(&watchdog_allowed_mask, &watchdog_cpumask);
	softlockup_update_smpboot_threads();
}

555
static void lockup_detector_reconfigure(void)
556
{
557
	cpus_read_lock();
558
	watchdog_nmi_stop();
559 560
	softlockup_park_all_threads();
	set_sample_period();
561 562
	lockup_detector_update_enable();
	if (watchdog_enabled && watchdog_thresh)
563
		softlockup_unpark_threads();
564
	watchdog_nmi_start();
565 566 567 568 569 570
	cpus_read_unlock();
	/*
	 * Must be called outside the cpus locked section to prevent
	 * recursive locking in the perf code.
	 */
	__lockup_detector_cleanup();
571 572 573
}

/*
574
 * Create the watchdog thread infrastructure and configure the detector(s).
575 576 577 578 579
 *
 * The threads are not unparked as watchdog_allowed_mask is empty.  When
 * the threads are sucessfully initialized, take the proper locks and
 * unpark the threads in the watchdog_cpumask if the watchdog is enabled.
 */
580
static __init void lockup_detector_setup(void)
581 582 583 584 585 586 587
{
	int ret;

	/*
	 * If sysctl is off and watchdog got disabled on the command line,
	 * nothing to do here.
	 */
588 589
	lockup_detector_update_enable();

590 591 592 593 594 595 596 597 598 599 600 601 602
	if (!IS_ENABLED(CONFIG_SYSCTL) &&
	    !(watchdog_enabled && watchdog_thresh))
		return;

	ret = smpboot_register_percpu_thread_cpumask(&watchdog_threads,
						     &watchdog_allowed_mask);
	if (ret) {
		pr_err("Failed to initialize soft lockup detector threads\n");
		return;
	}

	mutex_lock(&watchdog_mutex);
	softlockup_threads_initialized = true;
603
	lockup_detector_reconfigure();
604 605 606
	mutex_unlock(&watchdog_mutex);
}

607 608 609 610 611
#else /* CONFIG_SOFTLOCKUP_DETECTOR */
static inline int watchdog_park_threads(void) { return 0; }
static inline void watchdog_unpark_threads(void) { }
static inline int watchdog_enable_all_cpus(void) { return 0; }
static inline void watchdog_disable_all_cpus(void) { }
612
static void lockup_detector_reconfigure(void)
613
{
614
	cpus_read_lock();
615
	watchdog_nmi_stop();
616
	lockup_detector_update_enable();
617
	watchdog_nmi_start();
618
	cpus_read_unlock();
619
}
620
static inline void lockup_detector_setup(void)
621
{
622
	lockup_detector_reconfigure();
623
}
624
#endif /* !CONFIG_SOFTLOCKUP_DETECTOR */
625

626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643
static void __lockup_detector_cleanup(void)
{
	lockdep_assert_held(&watchdog_mutex);
	hardlockup_detector_perf_cleanup();
}

/**
 * lockup_detector_cleanup - Cleanup after cpu hotplug or sysctl changes
 *
 * Caller must not hold the cpu hotplug rwsem.
 */
void lockup_detector_cleanup(void)
{
	mutex_lock(&watchdog_mutex);
	__lockup_detector_cleanup();
	mutex_unlock(&watchdog_mutex);
}

644 645 646 647 648 649 650 651 652 653 654
/**
 * lockup_detector_soft_poweroff - Interface to stop lockup detector(s)
 *
 * Special interface for parisc. It prevents lockup detector warnings from
 * the default pm_poweroff() function which busy loops forever.
 */
void lockup_detector_soft_poweroff(void)
{
	watchdog_enabled = 0;
}

655 656
#ifdef CONFIG_SYSCTL

657
/* Propagate any changes to the watchdog threads */
658
static void proc_watchdog_update(void)
659
{
660 661
	/* Remove impossible cpus to keep sysctl output clean. */
	cpumask_and(&watchdog_cpumask, &watchdog_cpumask, cpu_possible_mask);
662
	lockup_detector_reconfigure();
663 664
}

665 666 667
/*
 * common function for watchdog, nmi_watchdog and soft_watchdog parameter
 *
668 669 670 671 672 673 674 675
 * caller             | table->data points to      | 'which'
 * -------------------|----------------------------|--------------------------
 * proc_watchdog      | watchdog_user_enabled      | NMI_WATCHDOG_ENABLED |
 *                    |                            | SOFT_WATCHDOG_ENABLED
 * -------------------|----------------------------|--------------------------
 * proc_nmi_watchdog  | nmi_watchdog_user_enabled  | NMI_WATCHDOG_ENABLED
 * -------------------|----------------------------|--------------------------
 * proc_soft_watchdog | soft_watchdog_user_enabled | SOFT_WATCHDOG_ENABLED
676 677 678 679
 */
static int proc_watchdog_common(int which, struct ctl_table *table, int write,
				void __user *buffer, size_t *lenp, loff_t *ppos)
{
680
	int err, old, *param = table->data;
681

682
	mutex_lock(&watchdog_mutex);
683 684

	if (!write) {
685 686 687 688 689
		/*
		 * On read synchronize the userspace interface. This is a
		 * racy snapshot.
		 */
		*param = (watchdog_enabled & which) != 0;
690 691
		err = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
	} else {
692
		old = READ_ONCE(*param);
693
		err = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
694
		if (!err && old != READ_ONCE(*param))
695
			proc_watchdog_update();
696
	}
697
	mutex_unlock(&watchdog_mutex);
698 699 700
	return err;
}

701 702 703 704 705 706 707 708 709 710 711 712
/*
 * /proc/sys/kernel/watchdog
 */
int proc_watchdog(struct ctl_table *table, int write,
		  void __user *buffer, size_t *lenp, loff_t *ppos)
{
	return proc_watchdog_common(NMI_WATCHDOG_ENABLED|SOFT_WATCHDOG_ENABLED,
				    table, write, buffer, lenp, ppos);
}

/*
 * /proc/sys/kernel/nmi_watchdog
713
 */
714 715 716
int proc_nmi_watchdog(struct ctl_table *table, int write,
		      void __user *buffer, size_t *lenp, loff_t *ppos)
{
717 718
	if (!nmi_watchdog_available && write)
		return -ENOTSUPP;
719 720 721 722 723 724 725 726 727 728 729 730 731
	return proc_watchdog_common(NMI_WATCHDOG_ENABLED,
				    table, write, buffer, lenp, ppos);
}

/*
 * /proc/sys/kernel/soft_watchdog
 */
int proc_soft_watchdog(struct ctl_table *table, int write,
			void __user *buffer, size_t *lenp, loff_t *ppos)
{
	return proc_watchdog_common(SOFT_WATCHDOG_ENABLED,
				    table, write, buffer, lenp, ppos);
}
732

733 734 735 736 737
/*
 * /proc/sys/kernel/watchdog_thresh
 */
int proc_watchdog_thresh(struct ctl_table *table, int write,
			 void __user *buffer, size_t *lenp, loff_t *ppos)
738
{
739
	int err, old;
740

741
	mutex_lock(&watchdog_mutex);
742

743
	old = READ_ONCE(watchdog_thresh);
744
	err = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
745

746 747
	if (!err && write && old != READ_ONCE(watchdog_thresh))
		proc_watchdog_update();
748

749
	mutex_unlock(&watchdog_mutex);
750
	return err;
751
}
752 753 754 755 756 757 758 759 760 761 762 763

/*
 * The cpumask is the mask of possible cpus that the watchdog can run
 * on, not the mask of cpus it is actually running on.  This allows the
 * user to specify a mask that will include cpus that have not yet
 * been brought online, if desired.
 */
int proc_watchdog_cpumask(struct ctl_table *table, int write,
			  void __user *buffer, size_t *lenp, loff_t *ppos)
{
	int err;

764
	mutex_lock(&watchdog_mutex);
765

766
	err = proc_do_large_bitmap(table, write, buffer, lenp, ppos);
767
	if (!err && write)
768
		proc_watchdog_update();
769

770
	mutex_unlock(&watchdog_mutex);
771 772
	return err;
}
773 774
#endif /* CONFIG_SYSCTL */

775
void __init lockup_detector_init(void)
776
{
777 778
#ifdef CONFIG_NO_HZ_FULL
	if (tick_nohz_full_enabled()) {
779 780
		pr_info("Disabling watchdog on nohz_full cores by default\n");
		cpumask_copy(&watchdog_cpumask, housekeeping_mask);
781 782 783 784 785 786
	} else
		cpumask_copy(&watchdog_cpumask, cpu_possible_mask);
#else
	cpumask_copy(&watchdog_cpumask, cpu_possible_mask);
#endif

787 788
	if (!watchdog_nmi_probe())
		nmi_watchdog_available = true;
789
	lockup_detector_setup();
790
}