Commit 47f33de4 authored by Lai Jiangshan's avatar Lai Jiangshan Committed by Borislav Petkov

x86/sev: Mark the code returning to user space as syscall gap

When returning to user space, %rsp is user-controlled value.

If it is a SNP-guest and the hypervisor decides to mess with the
code-page for this path while a CPU is executing it, a potential #VC
could hit in the syscall return path and mislead the #VC handler.

So make ip_within_syscall_gap() return true in this case.
Signed-off-by: default avatarLai Jiangshan <jiangshan.ljs@antgroup.com>
Signed-off-by: default avatarBorislav Petkov <bp@suse.de>
Acked-by: default avatarJoerg Roedel <jroedel@suse.de>
Link: https://lore.kernel.org/r/20220412124909.10467-1-jiangshanlai@gmail.com
parent c42b1451
...@@ -215,8 +215,10 @@ syscall_return_via_sysret: ...@@ -215,8 +215,10 @@ syscall_return_via_sysret:
popq %rdi popq %rdi
popq %rsp popq %rsp
SYM_INNER_LABEL(entry_SYSRETQ_unsafe_stack, SYM_L_GLOBAL)
swapgs swapgs
sysretq sysretq
SYM_INNER_LABEL(entry_SYSRETQ_end, SYM_L_GLOBAL)
SYM_CODE_END(entry_SYSCALL_64) SYM_CODE_END(entry_SYSCALL_64)
/* /*
......
...@@ -297,6 +297,7 @@ sysret32_from_system_call: ...@@ -297,6 +297,7 @@ sysret32_from_system_call:
* code. We zero R8-R10 to avoid info leaks. * code. We zero R8-R10 to avoid info leaks.
*/ */
movq RSP-ORIG_RAX(%rsp), %rsp movq RSP-ORIG_RAX(%rsp), %rsp
SYM_INNER_LABEL(entry_SYSRETL_compat_unsafe_stack, SYM_L_GLOBAL)
/* /*
* The original userspace %rsp (RSP-ORIG_RAX(%rsp)) is stored * The original userspace %rsp (RSP-ORIG_RAX(%rsp)) is stored
...@@ -314,6 +315,7 @@ sysret32_from_system_call: ...@@ -314,6 +315,7 @@ sysret32_from_system_call:
xorl %r10d, %r10d xorl %r10d, %r10d
swapgs swapgs
sysretl sysretl
SYM_INNER_LABEL(entry_SYSRETL_compat_end, SYM_L_GLOBAL)
SYM_CODE_END(entry_SYSCALL_compat) SYM_CODE_END(entry_SYSCALL_compat)
/* /*
......
...@@ -13,6 +13,8 @@ void syscall_init(void); ...@@ -13,6 +13,8 @@ void syscall_init(void);
#ifdef CONFIG_X86_64 #ifdef CONFIG_X86_64
void entry_SYSCALL_64(void); void entry_SYSCALL_64(void);
void entry_SYSCALL_64_safe_stack(void); void entry_SYSCALL_64_safe_stack(void);
void entry_SYSRETQ_unsafe_stack(void);
void entry_SYSRETQ_end(void);
long do_arch_prctl_64(struct task_struct *task, int option, unsigned long arg2); long do_arch_prctl_64(struct task_struct *task, int option, unsigned long arg2);
#endif #endif
...@@ -28,6 +30,8 @@ void entry_SYSENTER_compat(void); ...@@ -28,6 +30,8 @@ void entry_SYSENTER_compat(void);
void __end_entry_SYSENTER_compat(void); void __end_entry_SYSENTER_compat(void);
void entry_SYSCALL_compat(void); void entry_SYSCALL_compat(void);
void entry_SYSCALL_compat_safe_stack(void); void entry_SYSCALL_compat_safe_stack(void);
void entry_SYSRETL_compat_unsafe_stack(void);
void entry_SYSRETL_compat_end(void);
void entry_INT80_compat(void); void entry_INT80_compat(void);
#ifdef CONFIG_XEN_PV #ifdef CONFIG_XEN_PV
void xen_entry_INT80_compat(void); void xen_entry_INT80_compat(void);
......
...@@ -186,9 +186,13 @@ static __always_inline bool ip_within_syscall_gap(struct pt_regs *regs) ...@@ -186,9 +186,13 @@ static __always_inline bool ip_within_syscall_gap(struct pt_regs *regs)
bool ret = (regs->ip >= (unsigned long)entry_SYSCALL_64 && bool ret = (regs->ip >= (unsigned long)entry_SYSCALL_64 &&
regs->ip < (unsigned long)entry_SYSCALL_64_safe_stack); regs->ip < (unsigned long)entry_SYSCALL_64_safe_stack);
ret = ret || (regs->ip >= (unsigned long)entry_SYSRETQ_unsafe_stack &&
regs->ip < (unsigned long)entry_SYSRETQ_end);
#ifdef CONFIG_IA32_EMULATION #ifdef CONFIG_IA32_EMULATION
ret = ret || (regs->ip >= (unsigned long)entry_SYSCALL_compat && ret = ret || (regs->ip >= (unsigned long)entry_SYSCALL_compat &&
regs->ip < (unsigned long)entry_SYSCALL_compat_safe_stack); regs->ip < (unsigned long)entry_SYSCALL_compat_safe_stack);
ret = ret || (regs->ip >= (unsigned long)entry_SYSRETL_compat_unsafe_stack &&
regs->ip < (unsigned long)entry_SYSRETL_compat_end);
#endif #endif
return ret; return ret;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment