Commit 28f6bf9e authored by Frederic Weisbecker's avatar Frederic Weisbecker Committed by Thomas Gleixner

arm64: Prepare arch_nmi_enter() for recursion

When using nmi_enter() recursively, arch_nmi_enter() must also be recursion
safe. In particular, it must be ensured that HCR_TGE is always set while in
NMI context when in HYP mode, and be restored to it's former state when
done.

The current code fails this when interleaved wrong. Notably it overwrites
the original hcr state on nesting.

Introduce a nesting counter to make sure to store the original value.
Signed-off-by: default avatarFrederic Weisbecker <frederic@kernel.org>
Signed-off-by: default avatarPeter Zijlstra (Intel) <peterz@infradead.org>
Signed-off-by: default avatarThomas Gleixner <tglx@linutronix.de>
Reviewed-by: default avatarAlexandre Chartre <alexandre.chartre@oracle.com>
Cc: Will Deacon <will@kernel.org>
Cc: Catalin Marinas <catalin.marinas@arm.com>
Link: https://lkml.kernel.org/r/20200505134100.771491291@linutronix.de
parent b0f51883
...@@ -32,30 +32,70 @@ u64 smp_irq_stat_cpu(unsigned int cpu); ...@@ -32,30 +32,70 @@ u64 smp_irq_stat_cpu(unsigned int cpu);
struct nmi_ctx { struct nmi_ctx {
u64 hcr; u64 hcr;
unsigned int cnt;
}; };
DECLARE_PER_CPU(struct nmi_ctx, nmi_contexts); DECLARE_PER_CPU(struct nmi_ctx, nmi_contexts);
#define arch_nmi_enter() \ #define arch_nmi_enter() \
do { \ do { \
if (is_kernel_in_hyp_mode()) { \ struct nmi_ctx *___ctx; \
struct nmi_ctx *nmi_ctx = this_cpu_ptr(&nmi_contexts); \ u64 ___hcr; \
nmi_ctx->hcr = read_sysreg(hcr_el2); \ \
if (!(nmi_ctx->hcr & HCR_TGE)) { \ if (!is_kernel_in_hyp_mode()) \
write_sysreg(nmi_ctx->hcr | HCR_TGE, hcr_el2); \ break; \
isb(); \ \
___ctx = this_cpu_ptr(&nmi_contexts); \
if (___ctx->cnt) { \
___ctx->cnt++; \
break; \
} \ } \
\
___hcr = read_sysreg(hcr_el2); \
if (!(___hcr & HCR_TGE)) { \
write_sysreg(___hcr | HCR_TGE, hcr_el2); \
isb(); \
} \ } \
} while (0) /* \
* Make sure the sysreg write is performed before ___ctx->cnt \
* is set to 1. NMIs that see cnt == 1 will rely on us. \
*/ \
barrier(); \
___ctx->cnt = 1; \
/* \
* Make sure ___ctx->cnt is set before we save ___hcr. We \
* don't want ___ctx->hcr to be overwritten. \
*/ \
barrier(); \
___ctx->hcr = ___hcr; \
} while (0)
#define arch_nmi_exit() \ #define arch_nmi_exit() \
do { \ do { \
if (is_kernel_in_hyp_mode()) { \ struct nmi_ctx *___ctx; \
struct nmi_ctx *nmi_ctx = this_cpu_ptr(&nmi_contexts); \ u64 ___hcr; \
if (!(nmi_ctx->hcr & HCR_TGE)) \ \
write_sysreg(nmi_ctx->hcr, hcr_el2); \ if (!is_kernel_in_hyp_mode()) \
} \ break; \
} while (0) \
___ctx = this_cpu_ptr(&nmi_contexts); \
___hcr = ___ctx->hcr; \
/* \
* Make sure we read ___ctx->hcr before we release \
* ___ctx->cnt as it makes ___ctx->hcr updatable again. \
*/ \
barrier(); \
___ctx->cnt--; \
/* \
* Make sure ___ctx->cnt release is visible before we \
* restore the sysreg. Otherwise a new NMI occurring \
* right after write_sysreg() can be fooled and think \
* we secured things for it. \
*/ \
barrier(); \
if (!___ctx->cnt && !(___hcr & HCR_TGE)) \
write_sysreg(___hcr, hcr_el2); \
} while (0)
static inline void ack_bad_irq(unsigned int irq) static inline void ack_bad_irq(unsigned int irq)
{ {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment