Commit e3a9e681 authored by Peter Zijlstra's avatar Peter Zijlstra

x86/entry: Fixup bad_iret vs noinstr

vmlinux.o: warning: objtool: fixup_bad_iret()+0x8e: call to memcpy() leaves .noinstr.text section

Worse, when KASAN there is no telling what memcpy() actually is. Force
the use of __memcpy() which is our assmebly implementation.
Reported-by: default avatarMarco Elver <elver@google.com>
Suggested-by: default avatarMarco Elver <elver@google.com>
Signed-off-by: default avatarPeter Zijlstra (Intel) <peterz@infradead.org>
Tested-by: default avatarMarco Elver <elver@google.com>
Link: https://lkml.kernel.org/r/20200618144801.760070502@infradead.org
parent 734d099b
...@@ -690,13 +690,13 @@ struct bad_iret_stack *fixup_bad_iret(struct bad_iret_stack *s) ...@@ -690,13 +690,13 @@ struct bad_iret_stack *fixup_bad_iret(struct bad_iret_stack *s)
(struct bad_iret_stack *)__this_cpu_read(cpu_tss_rw.x86_tss.sp0) - 1; (struct bad_iret_stack *)__this_cpu_read(cpu_tss_rw.x86_tss.sp0) - 1;
/* Copy the IRET target to the temporary storage. */ /* Copy the IRET target to the temporary storage. */
memcpy(&tmp.regs.ip, (void *)s->regs.sp, 5*8); __memcpy(&tmp.regs.ip, (void *)s->regs.sp, 5*8);
/* Copy the remainder of the stack from the current stack. */ /* Copy the remainder of the stack from the current stack. */
memcpy(&tmp, s, offsetof(struct bad_iret_stack, regs.ip)); __memcpy(&tmp, s, offsetof(struct bad_iret_stack, regs.ip));
/* Update the entry stack */ /* Update the entry stack */
memcpy(new_stack, &tmp, sizeof(tmp)); __memcpy(new_stack, &tmp, sizeof(tmp));
BUG_ON(!user_mode(&new_stack->regs)); BUG_ON(!user_mode(&new_stack->regs));
return new_stack; return new_stack;
......
...@@ -8,6 +8,8 @@ ...@@ -8,6 +8,8 @@
#include <asm/alternative-asm.h> #include <asm/alternative-asm.h>
#include <asm/export.h> #include <asm/export.h>
.pushsection .noinstr.text, "ax"
/* /*
* We build a jump to memcpy_orig by default which gets NOPped out on * We build a jump to memcpy_orig by default which gets NOPped out on
* the majority of x86 CPUs which set REP_GOOD. In addition, CPUs which * the majority of x86 CPUs which set REP_GOOD. In addition, CPUs which
...@@ -184,6 +186,8 @@ SYM_FUNC_START_LOCAL(memcpy_orig) ...@@ -184,6 +186,8 @@ SYM_FUNC_START_LOCAL(memcpy_orig)
retq retq
SYM_FUNC_END(memcpy_orig) SYM_FUNC_END(memcpy_orig)
.popsection
#ifndef CONFIG_UML #ifndef CONFIG_UML
MCSAFE_TEST_CTL MCSAFE_TEST_CTL
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment