Commit 8dcde9de authored by Prarit Bhargava's avatar Prarit Bhargava Committed by Linus Torvalds

kernel/watchdog.c: do not hardcode CPU 0 as the initial thread

When CONFIG_BOOTPARAM_HOTPLUG_CPU0 is enabled, the socket containing the
boot cpu can be replaced.  During the hot add event, the message

NMI watchdog: enabled on all CPUs, permanently consumes one hw-PMU counter.

is output implying that the NMI watchdog was disabled at some point.  This
is not the case and the message has caused confusion for users of systems
that support the removal of the boot cpu socket.

The watchdog code is coded to assume that cpu 0 is always the first cpu to
initialize the watchdog, and the last to stop its watchdog thread.  That
is not the case for initializing if cpu 0 has been removed and added.  The
removal case has never been correct because the smpboot code will remove
the watchdog threads starting with the lowest cpu number.

This patch adds watchdog_cpus to track the number of cpus with active NMI
watchdog threads so that the first and last thread can be used to set and
clear the value of firstcpu_err.  firstcpu_err is set when the first
watchdog thread is enabled, and cleared when the last watchdog thread is
disabled.

Link: http://lkml.kernel.org/r/1480425321-32296-1-git-send-email-prarit@redhat.comSigned-off-by: default avatarPrarit Bhargava <prarit@redhat.com>
Acked-by: default avatarDon Zickus <dzickus@redhat.com>
Cc: Borislav Petkov <bp@suse.de>
Cc: Tejun Heo <tj@kernel.org>
Cc: Hidehiro Kawai <hidehiro.kawai.ez@hitachi.com>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Andi Kleen <ak@linux.intel.com>
Cc: Joshua Hunt <johunt@akamai.com>
Cc: Ingo Molnar <mingo@kernel.org>
Cc: Babu Moger <babu.moger@oracle.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent b5c66bab
...@@ -137,12 +137,14 @@ static void watchdog_overflow_callback(struct perf_event *event, ...@@ -137,12 +137,14 @@ static void watchdog_overflow_callback(struct perf_event *event,
* Reduce the watchdog noise by only printing messages * Reduce the watchdog noise by only printing messages
* that are different from what cpu0 displayed. * that are different from what cpu0 displayed.
*/ */
static unsigned long cpu0_err; static unsigned long firstcpu_err;
static atomic_t watchdog_cpus;
int watchdog_nmi_enable(unsigned int cpu) int watchdog_nmi_enable(unsigned int cpu)
{ {
struct perf_event_attr *wd_attr; struct perf_event_attr *wd_attr;
struct perf_event *event = per_cpu(watchdog_ev, cpu); struct perf_event *event = per_cpu(watchdog_ev, cpu);
int firstcpu = 0;
/* nothing to do if the hard lockup detector is disabled */ /* nothing to do if the hard lockup detector is disabled */
if (!(watchdog_enabled & NMI_WATCHDOG_ENABLED)) if (!(watchdog_enabled & NMI_WATCHDOG_ENABLED))
...@@ -156,19 +158,22 @@ int watchdog_nmi_enable(unsigned int cpu) ...@@ -156,19 +158,22 @@ int watchdog_nmi_enable(unsigned int cpu)
if (event != NULL) if (event != NULL)
goto out_enable; goto out_enable;
if (atomic_inc_return(&watchdog_cpus) == 1)
firstcpu = 1;
wd_attr = &wd_hw_attr; wd_attr = &wd_hw_attr;
wd_attr->sample_period = hw_nmi_get_sample_period(watchdog_thresh); wd_attr->sample_period = hw_nmi_get_sample_period(watchdog_thresh);
/* Try to register using hardware perf events */ /* Try to register using hardware perf events */
event = perf_event_create_kernel_counter(wd_attr, cpu, NULL, watchdog_overflow_callback, NULL); event = perf_event_create_kernel_counter(wd_attr, cpu, NULL, watchdog_overflow_callback, NULL);
/* save cpu0 error for future comparision */ /* save the first cpu's error for future comparision */
if (cpu == 0 && IS_ERR(event)) if (firstcpu && IS_ERR(event))
cpu0_err = PTR_ERR(event); firstcpu_err = PTR_ERR(event);
if (!IS_ERR(event)) { if (!IS_ERR(event)) {
/* only print for cpu0 or different than cpu0 */ /* only print for the first cpu initialized */
if (cpu == 0 || cpu0_err) if (firstcpu || firstcpu_err)
pr_info("enabled on all CPUs, permanently consumes one hw-PMU counter.\n"); pr_info("enabled on all CPUs, permanently consumes one hw-PMU counter.\n");
goto out_save; goto out_save;
} }
...@@ -186,7 +191,7 @@ int watchdog_nmi_enable(unsigned int cpu) ...@@ -186,7 +191,7 @@ int watchdog_nmi_enable(unsigned int cpu)
smp_mb__after_atomic(); smp_mb__after_atomic();
/* skip displaying the same error again */ /* skip displaying the same error again */
if (cpu > 0 && (PTR_ERR(event) == cpu0_err)) if (!firstcpu && (PTR_ERR(event) == firstcpu_err))
return PTR_ERR(event); return PTR_ERR(event);
/* vary the KERN level based on the returned errno */ /* vary the KERN level based on the returned errno */
...@@ -222,9 +227,9 @@ void watchdog_nmi_disable(unsigned int cpu) ...@@ -222,9 +227,9 @@ void watchdog_nmi_disable(unsigned int cpu)
/* should be in cleanup, but blocks oprofile */ /* should be in cleanup, but blocks oprofile */
perf_event_release_kernel(event); perf_event_release_kernel(event);
}
if (cpu == 0) {
/* watchdog_nmi_enable() expects this to be zero initially. */ /* watchdog_nmi_enable() expects this to be zero initially. */
cpu0_err = 0; if (atomic_dec_and_test(&watchdog_cpus))
firstcpu_err = 0;
} }
} }
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment