Commit 6fe9e7c2 authored by Jiri Kosina's avatar Jiri Kosina Committed by Borislav Petkov

GHES: Make NMI handler have a single reader

Since GHES sources are global, we theoretically need only a single CPU
reading them per NMI instead of a thundering herd of CPUs waiting on a
spinlock in NMI context for no reason at all.

Do that.
Signed-off-by: default avatarJiri Kosina <jkosina@suse.cz>
Signed-off-by: default avatarBorislav Petkov <bp@suse.de>
parent 2383844d
...@@ -729,10 +729,10 @@ static struct llist_head ghes_estatus_llist; ...@@ -729,10 +729,10 @@ static struct llist_head ghes_estatus_llist;
static struct irq_work ghes_proc_irq_work; static struct irq_work ghes_proc_irq_work;
/* /*
* NMI may be triggered on any CPU, so ghes_nmi_lock is used for * NMI may be triggered on any CPU, so ghes_in_nmi is used for
* mutual exclusion. * having only one concurrent reader.
*/ */
static DEFINE_RAW_SPINLOCK(ghes_nmi_lock); static atomic_t ghes_in_nmi = ATOMIC_INIT(0);
static LIST_HEAD(ghes_nmi); static LIST_HEAD(ghes_nmi);
...@@ -840,7 +840,9 @@ static int ghes_notify_nmi(unsigned int cmd, struct pt_regs *regs) ...@@ -840,7 +840,9 @@ static int ghes_notify_nmi(unsigned int cmd, struct pt_regs *regs)
struct ghes *ghes; struct ghes *ghes;
int sev, ret = NMI_DONE; int sev, ret = NMI_DONE;
raw_spin_lock(&ghes_nmi_lock); if (!atomic_add_unless(&ghes_in_nmi, 1, 1))
return ret;
list_for_each_entry_rcu(ghes, &ghes_nmi, list) { list_for_each_entry_rcu(ghes, &ghes_nmi, list) {
if (ghes_read_estatus(ghes, 1)) { if (ghes_read_estatus(ghes, 1)) {
ghes_clear_estatus(ghes); ghes_clear_estatus(ghes);
...@@ -863,7 +865,7 @@ static int ghes_notify_nmi(unsigned int cmd, struct pt_regs *regs) ...@@ -863,7 +865,7 @@ static int ghes_notify_nmi(unsigned int cmd, struct pt_regs *regs)
#ifdef CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG #ifdef CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG
irq_work_queue(&ghes_proc_irq_work); irq_work_queue(&ghes_proc_irq_work);
#endif #endif
raw_spin_unlock(&ghes_nmi_lock); atomic_dec(&ghes_in_nmi);
return ret; return ret;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment