Commit b772b8e3 authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'core-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip

Pull core fixes from Ingo Molnar:

 - workaround for gcc asm handling

 - futex race fixes

 - objtool build warning fix

 - two watchdog fixes: a crash fix (revert) and a bug fix for
   /proc/sys/kernel/watchdog_thresh handling.

* 'core-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
  objtool: Prevent GCC from merging annotate_unreachable(), take 2
  objtool: Resync objtool's instruction decoder source code copy with the kernel's latest version
  watchdog/hardlockup/perf: Use atomics to track in-use cpu counter
  watchdog/harclockup/perf: Revert a33d4484 ("watchdog/hardlockup/perf: Simplify deferred event destroy")
  futex: Fix more put_pi_state() vs. exit_pi_state_list() races
parents 3131dc46 ec1e1b61
...@@ -191,13 +191,13 @@ void ftrace_likely_update(struct ftrace_likely_data *f, int val, ...@@ -191,13 +191,13 @@ void ftrace_likely_update(struct ftrace_likely_data *f, int val,
asm("%c0:\n\t" \ asm("%c0:\n\t" \
".pushsection .discard.reachable\n\t" \ ".pushsection .discard.reachable\n\t" \
".long %c0b - .\n\t" \ ".long %c0b - .\n\t" \
".popsection\n\t" : : "i" (__LINE__)); \ ".popsection\n\t" : : "i" (__COUNTER__)); \
}) })
#define annotate_unreachable() ({ \ #define annotate_unreachable() ({ \
asm("%c0:\n\t" \ asm("%c0:\n\t" \
".pushsection .discard.unreachable\n\t" \ ".pushsection .discard.unreachable\n\t" \
".long %c0b - .\n\t" \ ".long %c0b - .\n\t" \
".popsection\n\t" : : "i" (__LINE__)); \ ".popsection\n\t" : : "i" (__COUNTER__)); \
}) })
#define ASM_UNREACHABLE \ #define ASM_UNREACHABLE \
"999:\n\t" \ "999:\n\t" \
......
...@@ -903,11 +903,27 @@ void exit_pi_state_list(struct task_struct *curr) ...@@ -903,11 +903,27 @@ void exit_pi_state_list(struct task_struct *curr)
*/ */
raw_spin_lock_irq(&curr->pi_lock); raw_spin_lock_irq(&curr->pi_lock);
while (!list_empty(head)) { while (!list_empty(head)) {
next = head->next; next = head->next;
pi_state = list_entry(next, struct futex_pi_state, list); pi_state = list_entry(next, struct futex_pi_state, list);
key = pi_state->key; key = pi_state->key;
hb = hash_futex(&key); hb = hash_futex(&key);
/*
* We can race against put_pi_state() removing itself from the
* list (a waiter going away). put_pi_state() will first
* decrement the reference count and then modify the list, so
* its possible to see the list entry but fail this reference
* acquire.
*
* In that case; drop the locks to let put_pi_state() make
* progress and retry the loop.
*/
if (!atomic_inc_not_zero(&pi_state->refcount)) {
raw_spin_unlock_irq(&curr->pi_lock);
cpu_relax();
raw_spin_lock_irq(&curr->pi_lock);
continue;
}
raw_spin_unlock_irq(&curr->pi_lock); raw_spin_unlock_irq(&curr->pi_lock);
spin_lock(&hb->lock); spin_lock(&hb->lock);
...@@ -918,8 +934,10 @@ void exit_pi_state_list(struct task_struct *curr) ...@@ -918,8 +934,10 @@ void exit_pi_state_list(struct task_struct *curr)
* task still owns the PI-state: * task still owns the PI-state:
*/ */
if (head->next != next) { if (head->next != next) {
/* retain curr->pi_lock for the loop invariant */
raw_spin_unlock(&pi_state->pi_mutex.wait_lock); raw_spin_unlock(&pi_state->pi_mutex.wait_lock);
spin_unlock(&hb->lock); spin_unlock(&hb->lock);
put_pi_state(pi_state);
continue; continue;
} }
...@@ -927,9 +945,8 @@ void exit_pi_state_list(struct task_struct *curr) ...@@ -927,9 +945,8 @@ void exit_pi_state_list(struct task_struct *curr)
WARN_ON(list_empty(&pi_state->list)); WARN_ON(list_empty(&pi_state->list));
list_del_init(&pi_state->list); list_del_init(&pi_state->list);
pi_state->owner = NULL; pi_state->owner = NULL;
raw_spin_unlock(&curr->pi_lock);
get_pi_state(pi_state); raw_spin_unlock(&curr->pi_lock);
raw_spin_unlock_irq(&pi_state->pi_mutex.wait_lock); raw_spin_unlock_irq(&pi_state->pi_mutex.wait_lock);
spin_unlock(&hb->lock); spin_unlock(&hb->lock);
......
...@@ -13,6 +13,7 @@ ...@@ -13,6 +13,7 @@
#define pr_fmt(fmt) "NMI watchdog: " fmt #define pr_fmt(fmt) "NMI watchdog: " fmt
#include <linux/nmi.h> #include <linux/nmi.h>
#include <linux/atomic.h>
#include <linux/module.h> #include <linux/module.h>
#include <linux/sched/debug.h> #include <linux/sched/debug.h>
...@@ -22,10 +23,11 @@ ...@@ -22,10 +23,11 @@
static DEFINE_PER_CPU(bool, hard_watchdog_warn); static DEFINE_PER_CPU(bool, hard_watchdog_warn);
static DEFINE_PER_CPU(bool, watchdog_nmi_touch); static DEFINE_PER_CPU(bool, watchdog_nmi_touch);
static DEFINE_PER_CPU(struct perf_event *, watchdog_ev); static DEFINE_PER_CPU(struct perf_event *, watchdog_ev);
static DEFINE_PER_CPU(struct perf_event *, dead_event);
static struct cpumask dead_events_mask; static struct cpumask dead_events_mask;
static unsigned long hardlockup_allcpu_dumped; static unsigned long hardlockup_allcpu_dumped;
static unsigned int watchdog_cpus; static atomic_t watchdog_cpus = ATOMIC_INIT(0);
void arch_touch_nmi_watchdog(void) void arch_touch_nmi_watchdog(void)
{ {
...@@ -189,7 +191,8 @@ void hardlockup_detector_perf_enable(void) ...@@ -189,7 +191,8 @@ void hardlockup_detector_perf_enable(void)
if (hardlockup_detector_event_create()) if (hardlockup_detector_event_create())
return; return;
if (!watchdog_cpus++) /* use original value for check */
if (!atomic_fetch_inc(&watchdog_cpus))
pr_info("Enabled. Permanently consumes one hw-PMU counter.\n"); pr_info("Enabled. Permanently consumes one hw-PMU counter.\n");
perf_event_enable(this_cpu_read(watchdog_ev)); perf_event_enable(this_cpu_read(watchdog_ev));
...@@ -204,8 +207,10 @@ void hardlockup_detector_perf_disable(void) ...@@ -204,8 +207,10 @@ void hardlockup_detector_perf_disable(void)
if (event) { if (event) {
perf_event_disable(event); perf_event_disable(event);
this_cpu_write(watchdog_ev, NULL);
this_cpu_write(dead_event, event);
cpumask_set_cpu(smp_processor_id(), &dead_events_mask); cpumask_set_cpu(smp_processor_id(), &dead_events_mask);
watchdog_cpus--; atomic_dec(&watchdog_cpus);
} }
} }
...@@ -219,7 +224,7 @@ void hardlockup_detector_perf_cleanup(void) ...@@ -219,7 +224,7 @@ void hardlockup_detector_perf_cleanup(void)
int cpu; int cpu;
for_each_cpu(cpu, &dead_events_mask) { for_each_cpu(cpu, &dead_events_mask) {
struct perf_event *event = per_cpu(watchdog_ev, cpu); struct perf_event *event = per_cpu(dead_event, cpu);
/* /*
* Required because for_each_cpu() reports unconditionally * Required because for_each_cpu() reports unconditionally
...@@ -227,7 +232,7 @@ void hardlockup_detector_perf_cleanup(void) ...@@ -227,7 +232,7 @@ void hardlockup_detector_perf_cleanup(void)
*/ */
if (event) if (event)
perf_event_release_kernel(event); perf_event_release_kernel(event);
per_cpu(watchdog_ev, cpu) = NULL; per_cpu(dead_event, cpu) = NULL;
} }
cpumask_clear(&dead_events_mask); cpumask_clear(&dead_events_mask);
} }
......
#!/bin/awk -f #!/bin/awk -f
# SPDX-License-Identifier: GPL-2.0
# gen-insn-attr-x86.awk: Instruction attribute table generator # gen-insn-attr-x86.awk: Instruction attribute table generator
# Written by Masami Hiramatsu <mhiramat@redhat.com> # Written by Masami Hiramatsu <mhiramat@redhat.com>
# #
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment