Commit 2641f08b authored by David Woodhouse's avatar David Woodhouse Committed by Thomas Gleixner

x86/retpoline/entry: Convert entry assembler indirect jumps

Convert indirect jumps in core 32/64bit entry assembler code to use
non-speculative sequences when CONFIG_RETPOLINE is enabled.

Don't use CALL_NOSPEC in entry_SYSCALL_64_fastpath because the return
address after the 'call' instruction must be *precisely* at the
.Lentry_SYSCALL_64_after_fastpath label for stub_ptregs_64 to work,
and the use of alternatives will mess that up unless we play horrid
games to prepend with NOPs and make the variants the same length. It's
not worth it; in the case where we ALTERNATIVE out the retpoline, the
first instruction at __x86.indirect_thunk.rax is going to be a bare
jmp *%rax anyway.
Signed-off-by: default avatarDavid Woodhouse <dwmw@amazon.co.uk>
Signed-off-by: default avatarThomas Gleixner <tglx@linutronix.de>
Acked-by: default avatarIngo Molnar <mingo@kernel.org>
Acked-by: default avatarArjan van de Ven <arjan@linux.intel.com>
Cc: gnomes@lxorguk.ukuu.org.uk
Cc: Rik van Riel <riel@redhat.com>
Cc: Andi Kleen <ak@linux.intel.com>
Cc: Josh Poimboeuf <jpoimboe@redhat.com>
Cc: thomas.lendacky@amd.com
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Jiri Kosina <jikos@kernel.org>
Cc: Andy Lutomirski <luto@amacapital.net>
Cc: Dave Hansen <dave.hansen@intel.com>
Cc: Kees Cook <keescook@google.com>
Cc: Tim Chen <tim.c.chen@linux.intel.com>
Cc: Greg Kroah-Hartman <gregkh@linux-foundation.org>
Cc: Paul Turner <pjt@google.com>
Link: https://lkml.kernel.org/r/1515707194-20531-7-git-send-email-dwmw@amazon.co.uk
parent 9697fa39
...@@ -44,6 +44,7 @@ ...@@ -44,6 +44,7 @@
#include <asm/asm.h> #include <asm/asm.h>
#include <asm/smap.h> #include <asm/smap.h>
#include <asm/frame.h> #include <asm/frame.h>
#include <asm/nospec-branch.h>
.section .entry.text, "ax" .section .entry.text, "ax"
...@@ -290,7 +291,7 @@ ENTRY(ret_from_fork) ...@@ -290,7 +291,7 @@ ENTRY(ret_from_fork)
/* kernel thread */ /* kernel thread */
1: movl %edi, %eax 1: movl %edi, %eax
call *%ebx CALL_NOSPEC %ebx
/* /*
* A kernel thread is allowed to return here after successfully * A kernel thread is allowed to return here after successfully
* calling do_execve(). Exit to userspace to complete the execve() * calling do_execve(). Exit to userspace to complete the execve()
...@@ -919,7 +920,7 @@ common_exception: ...@@ -919,7 +920,7 @@ common_exception:
movl %ecx, %es movl %ecx, %es
TRACE_IRQS_OFF TRACE_IRQS_OFF
movl %esp, %eax # pt_regs pointer movl %esp, %eax # pt_regs pointer
call *%edi CALL_NOSPEC %edi
jmp ret_from_exception jmp ret_from_exception
END(common_exception) END(common_exception)
......
...@@ -37,6 +37,7 @@ ...@@ -37,6 +37,7 @@
#include <asm/pgtable_types.h> #include <asm/pgtable_types.h>
#include <asm/export.h> #include <asm/export.h>
#include <asm/frame.h> #include <asm/frame.h>
#include <asm/nospec-branch.h>
#include <linux/err.h> #include <linux/err.h>
#include "calling.h" #include "calling.h"
...@@ -187,7 +188,7 @@ ENTRY(entry_SYSCALL_64_trampoline) ...@@ -187,7 +188,7 @@ ENTRY(entry_SYSCALL_64_trampoline)
*/ */
pushq %rdi pushq %rdi
movq $entry_SYSCALL_64_stage2, %rdi movq $entry_SYSCALL_64_stage2, %rdi
jmp *%rdi JMP_NOSPEC %rdi
END(entry_SYSCALL_64_trampoline) END(entry_SYSCALL_64_trampoline)
.popsection .popsection
...@@ -266,7 +267,12 @@ entry_SYSCALL_64_fastpath: ...@@ -266,7 +267,12 @@ entry_SYSCALL_64_fastpath:
* It might end up jumping to the slow path. If it jumps, RAX * It might end up jumping to the slow path. If it jumps, RAX
* and all argument registers are clobbered. * and all argument registers are clobbered.
*/ */
#ifdef CONFIG_RETPOLINE
movq sys_call_table(, %rax, 8), %rax
call __x86_indirect_thunk_rax
#else
call *sys_call_table(, %rax, 8) call *sys_call_table(, %rax, 8)
#endif
.Lentry_SYSCALL_64_after_fastpath_call: .Lentry_SYSCALL_64_after_fastpath_call:
movq %rax, RAX(%rsp) movq %rax, RAX(%rsp)
...@@ -438,7 +444,7 @@ ENTRY(stub_ptregs_64) ...@@ -438,7 +444,7 @@ ENTRY(stub_ptregs_64)
jmp entry_SYSCALL64_slow_path jmp entry_SYSCALL64_slow_path
1: 1:
jmp *%rax /* Called from C */ JMP_NOSPEC %rax /* Called from C */
END(stub_ptregs_64) END(stub_ptregs_64)
.macro ptregs_stub func .macro ptregs_stub func
...@@ -517,7 +523,7 @@ ENTRY(ret_from_fork) ...@@ -517,7 +523,7 @@ ENTRY(ret_from_fork)
1: 1:
/* kernel thread */ /* kernel thread */
movq %r12, %rdi movq %r12, %rdi
call *%rbx CALL_NOSPEC %rbx
/* /*
* A kernel thread is allowed to return here after successfully * A kernel thread is allowed to return here after successfully
* calling do_execve(). Exit to userspace to complete the execve() * calling do_execve(). Exit to userspace to complete the execve()
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment