Commit f051f697 authored by Thomas Gleixner's avatar Thomas Gleixner

x86/nmi: Protect NMI entry against instrumentation

Mark all functions in the fragile code parts noinstr or force inlining so
they can't be instrumented.

Also make the hardware latency tracer invocation explicit outside of
non-instrumentable section.
Signed-off-by: default avatarThomas Gleixner <tglx@linutronix.de>
Reviewed-by: default avatarAlexandre Chartre <alexandre.chartre@oracle.com>
Acked-by: default avatarPeter Zijlstra <peterz@infradead.org>
Acked-by: default avatarAndy Lutomirski <luto@kernel.org>
Link: https://lkml.kernel.org/r/20200505135314.716186134@linutronix.de


parent 6271fef0
...@@ -214,7 +214,7 @@ static inline void native_load_gdt(const struct desc_ptr *dtr) ...@@ -214,7 +214,7 @@ static inline void native_load_gdt(const struct desc_ptr *dtr)
asm volatile("lgdt %0"::"m" (*dtr)); asm volatile("lgdt %0"::"m" (*dtr));
} }
static inline void native_load_idt(const struct desc_ptr *dtr) static __always_inline void native_load_idt(const struct desc_ptr *dtr)
{ {
asm volatile("lidt %0"::"m" (*dtr)); asm volatile("lidt %0"::"m" (*dtr));
} }
...@@ -392,7 +392,7 @@ extern unsigned long system_vectors[]; ...@@ -392,7 +392,7 @@ extern unsigned long system_vectors[];
#ifdef CONFIG_X86_64 #ifdef CONFIG_X86_64
DECLARE_PER_CPU(u32, debug_idt_ctr); DECLARE_PER_CPU(u32, debug_idt_ctr);
static inline bool is_debug_idt_enabled(void) static __always_inline bool is_debug_idt_enabled(void)
{ {
if (this_cpu_read(debug_idt_ctr)) if (this_cpu_read(debug_idt_ctr))
return true; return true;
...@@ -400,7 +400,7 @@ static inline bool is_debug_idt_enabled(void) ...@@ -400,7 +400,7 @@ static inline bool is_debug_idt_enabled(void)
return false; return false;
} }
static inline void load_debug_idt(void) static __always_inline void load_debug_idt(void)
{ {
load_idt((const struct desc_ptr *)&debug_idt_descr); load_idt((const struct desc_ptr *)&debug_idt_descr);
} }
...@@ -422,7 +422,7 @@ static inline void load_debug_idt(void) ...@@ -422,7 +422,7 @@ static inline void load_debug_idt(void)
* that doesn't need to disable interrupts, as nothing should be * that doesn't need to disable interrupts, as nothing should be
* bothering the CPU then. * bothering the CPU then.
*/ */
static inline void load_current_idt(void) static __always_inline void load_current_idt(void)
{ {
if (is_debug_idt_enabled()) if (is_debug_idt_enabled())
load_debug_idt(); load_debug_idt();
......
...@@ -1709,21 +1709,19 @@ void syscall_init(void) ...@@ -1709,21 +1709,19 @@ void syscall_init(void)
DEFINE_PER_CPU(int, debug_stack_usage); DEFINE_PER_CPU(int, debug_stack_usage);
DEFINE_PER_CPU(u32, debug_idt_ctr); DEFINE_PER_CPU(u32, debug_idt_ctr);
void debug_stack_set_zero(void) noinstr void debug_stack_set_zero(void)
{ {
this_cpu_inc(debug_idt_ctr); this_cpu_inc(debug_idt_ctr);
load_current_idt(); load_current_idt();
} }
NOKPROBE_SYMBOL(debug_stack_set_zero);
void debug_stack_reset(void) noinstr void debug_stack_reset(void)
{ {
if (WARN_ON(!this_cpu_read(debug_idt_ctr))) if (WARN_ON(!this_cpu_read(debug_idt_ctr)))
return; return;
if (this_cpu_dec_return(debug_idt_ctr) == 0) if (this_cpu_dec_return(debug_idt_ctr) == 0)
load_current_idt(); load_current_idt();
} }
NOKPROBE_SYMBOL(debug_stack_reset);
#else /* CONFIG_X86_64 */ #else /* CONFIG_X86_64 */
......
...@@ -303,7 +303,7 @@ NOKPROBE_SYMBOL(unknown_nmi_error); ...@@ -303,7 +303,7 @@ NOKPROBE_SYMBOL(unknown_nmi_error);
static DEFINE_PER_CPU(bool, swallow_nmi); static DEFINE_PER_CPU(bool, swallow_nmi);
static DEFINE_PER_CPU(unsigned long, last_nmi_rip); static DEFINE_PER_CPU(unsigned long, last_nmi_rip);
static void default_do_nmi(struct pt_regs *regs) static noinstr void default_do_nmi(struct pt_regs *regs)
{ {
unsigned char reason = 0; unsigned char reason = 0;
int handled; int handled;
...@@ -329,6 +329,8 @@ static void default_do_nmi(struct pt_regs *regs) ...@@ -329,6 +329,8 @@ static void default_do_nmi(struct pt_regs *regs)
__this_cpu_write(last_nmi_rip, regs->ip); __this_cpu_write(last_nmi_rip, regs->ip);
instrumentation_begin();
handled = nmi_handle(NMI_LOCAL, regs); handled = nmi_handle(NMI_LOCAL, regs);
__this_cpu_add(nmi_stats.normal, handled); __this_cpu_add(nmi_stats.normal, handled);
if (handled) { if (handled) {
...@@ -342,7 +344,7 @@ static void default_do_nmi(struct pt_regs *regs) ...@@ -342,7 +344,7 @@ static void default_do_nmi(struct pt_regs *regs)
*/ */
if (handled > 1) if (handled > 1)
__this_cpu_write(swallow_nmi, true); __this_cpu_write(swallow_nmi, true);
return; goto out;
} }
/* /*
...@@ -374,7 +376,7 @@ static void default_do_nmi(struct pt_regs *regs) ...@@ -374,7 +376,7 @@ static void default_do_nmi(struct pt_regs *regs)
#endif #endif
__this_cpu_add(nmi_stats.external, 1); __this_cpu_add(nmi_stats.external, 1);
raw_spin_unlock(&nmi_reason_lock); raw_spin_unlock(&nmi_reason_lock);
return; goto out;
} }
raw_spin_unlock(&nmi_reason_lock); raw_spin_unlock(&nmi_reason_lock);
...@@ -412,8 +414,10 @@ static void default_do_nmi(struct pt_regs *regs) ...@@ -412,8 +414,10 @@ static void default_do_nmi(struct pt_regs *regs)
__this_cpu_add(nmi_stats.swallow, 1); __this_cpu_add(nmi_stats.swallow, 1);
else else
unknown_nmi_error(reason, regs); unknown_nmi_error(reason, regs);
out:
instrumentation_end();
} }
NOKPROBE_SYMBOL(default_do_nmi);
/* /*
* NMIs can page fault or hit breakpoints which will cause it to lose * NMIs can page fault or hit breakpoints which will cause it to lose
...@@ -485,7 +489,7 @@ static DEFINE_PER_CPU(unsigned long, nmi_cr2); ...@@ -485,7 +489,7 @@ static DEFINE_PER_CPU(unsigned long, nmi_cr2);
*/ */
static DEFINE_PER_CPU(int, update_debug_stack); static DEFINE_PER_CPU(int, update_debug_stack);
static bool notrace is_debug_stack(unsigned long addr) static noinstr bool is_debug_stack(unsigned long addr)
{ {
struct cea_exception_stacks *cs = __this_cpu_read(cea_exception_stacks); struct cea_exception_stacks *cs = __this_cpu_read(cea_exception_stacks);
unsigned long top = CEA_ESTACK_TOP(cs, DB); unsigned long top = CEA_ESTACK_TOP(cs, DB);
...@@ -500,7 +504,6 @@ static bool notrace is_debug_stack(unsigned long addr) ...@@ -500,7 +504,6 @@ static bool notrace is_debug_stack(unsigned long addr)
*/ */
return addr >= bot && addr < top; return addr >= bot && addr < top;
} }
NOKPROBE_SYMBOL(is_debug_stack);
#endif #endif
DEFINE_IDTENTRY_NMI(exc_nmi) DEFINE_IDTENTRY_NMI(exc_nmi)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment