Commit bf98bae3 authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'x86_urgent_for_v6.5_rc7' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip

Pull x86 fixes from Borislav Petkov:
 "Extraordinary embargoed times call for extraordinary measures. That's
  why this week's x86/urgent branch is larger than usual, containing all
  the known fallout fixes after the SRSO mitigation got merged.

  I know, it is a bit late in the game but everyone who has reported a
  bug stemming from the SRSO pile, has tested that branch and has
  confirmed that it fixes their bug.

  Also, I've run it on every possible hardware I have and it is looking
  good. It is running on this very machine while I'm typing, for 2 days
  now without an issue. Famous last words...

   - Use LEA ...%rsp instead of ADD %rsp in the Zen1/2 SRSO return
     sequence as latter clobbers flags which interferes with fastop
     emulation in KVM, leading to guests freezing during boot

   - A fix for the DIV(0) quotient data leak on Zen1 to clear the
     divider buffers at the right time

   - Disable the SRSO mitigation on unaffected configurations as it got
     enabled there unnecessarily

   - Change .text section name to fix CONFIG_LTO_CLANG builds

   - Improve the optprobe indirect jmp check so that certain
     configurations can still be able to use optprobes at all

   - A serious and good scrubbing of the untraining routines by PeterZ:
      - Add proper speculation stopping traps so that objtool is happy
      - Adjust objtool to handle the new thunks
      - Make the thunk pointer assignable to the different untraining
        sequences at runtime, thus avoiding the alternative at the
        return thunk. It simplifies the code a bit too.
      - Add a entry_untrain_ret() main entry point which selects the
        respective untraining sequence
      - Rename things so that they're more clear
      - Fix stack validation with FRAME_POINTER=y builds

   - Fix static call patching to handle when a JMP to the return thunk
     is the last insn on the very last module memory page

   - Add more documentation about what each untraining routine does and
     why"

* tag 'x86_urgent_for_v6.5_rc7' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
  x86/srso: Correct the mitigation status when SMT is disabled
  x86/static_call: Fix __static_call_fixup()
  objtool/x86: Fixup frame-pointer vs rethunk
  x86/srso: Explain the untraining sequences a bit more
  x86/cpu/kvm: Provide UNTRAIN_RET_VM
  x86/cpu: Cleanup the untrain mess
  x86/cpu: Rename srso_(.*)_alias to srso_alias_\1
  x86/cpu: Rename original retbleed methods
  x86/cpu: Clean up SRSO return thunk mess
  x86/alternative: Make custom return thunk unconditional
  objtool/x86: Fix SRSO mess
  x86/cpu: Fix up srso_safe_ret() and __x86_return_thunk()
  x86/cpu: Fix __x86_return_thunk symbol type
  x86/retpoline,kprobes: Skip optprobe check for indirect jumps with retpolines and IBT
  x86/retpoline,kprobes: Fix position of thunk sections with CONFIG_LTO_CLANG
  x86/srso: Disable the mitigation on unaffected configurations
  x86/CPU/AMD: Fix the DIV(0) initial fix attempt
  x86/retpoline: Don't clobber RFLAGS during srso_safe_ret()
parents 4e7ffde6 6405b72e
...@@ -141,8 +141,8 @@ sequence. ...@@ -141,8 +141,8 @@ sequence.
To ensure the safety of this mitigation, the kernel must ensure that the To ensure the safety of this mitigation, the kernel must ensure that the
safe return sequence is itself free from attacker interference. In Zen3 safe return sequence is itself free from attacker interference. In Zen3
and Zen4, this is accomplished by creating a BTB alias between the and Zen4, this is accomplished by creating a BTB alias between the
untraining function srso_untrain_ret_alias() and the safe return untraining function srso_alias_untrain_ret() and the safe return
function srso_safe_ret_alias() which results in evicting a potentially function srso_alias_safe_ret() which results in evicting a potentially
poisoned BTB entry and using that safe one for all function returns. poisoned BTB entry and using that safe one for all function returns.
In older Zen1 and Zen2, this is accomplished using a reinterpretation In older Zen1 and Zen2, this is accomplished using a reinterpretation
......
...@@ -92,6 +92,7 @@ static inline void arch_exit_to_user_mode_prepare(struct pt_regs *regs, ...@@ -92,6 +92,7 @@ static inline void arch_exit_to_user_mode_prepare(struct pt_regs *regs,
static __always_inline void arch_exit_to_user_mode(void) static __always_inline void arch_exit_to_user_mode(void)
{ {
mds_user_clear_cpu_buffers(); mds_user_clear_cpu_buffers();
amd_clear_divider();
} }
#define arch_exit_to_user_mode arch_exit_to_user_mode #define arch_exit_to_user_mode arch_exit_to_user_mode
......
...@@ -272,9 +272,9 @@ ...@@ -272,9 +272,9 @@
.endm .endm
#ifdef CONFIG_CPU_UNRET_ENTRY #ifdef CONFIG_CPU_UNRET_ENTRY
#define CALL_ZEN_UNTRAIN_RET "call zen_untrain_ret" #define CALL_UNTRAIN_RET "call entry_untrain_ret"
#else #else
#define CALL_ZEN_UNTRAIN_RET "" #define CALL_UNTRAIN_RET ""
#endif #endif
/* /*
...@@ -282,7 +282,7 @@ ...@@ -282,7 +282,7 @@
* return thunk isn't mapped into the userspace tables (then again, AMD * return thunk isn't mapped into the userspace tables (then again, AMD
* typically has NO_MELTDOWN). * typically has NO_MELTDOWN).
* *
* While zen_untrain_ret() doesn't clobber anything but requires stack, * While retbleed_untrain_ret() doesn't clobber anything but requires stack,
* entry_ibpb() will clobber AX, CX, DX. * entry_ibpb() will clobber AX, CX, DX.
* *
* As such, this must be placed after every *SWITCH_TO_KERNEL_CR3 at a point * As such, this must be placed after every *SWITCH_TO_KERNEL_CR3 at a point
...@@ -293,14 +293,20 @@ ...@@ -293,14 +293,20 @@
defined(CONFIG_CALL_DEPTH_TRACKING) || defined(CONFIG_CPU_SRSO) defined(CONFIG_CALL_DEPTH_TRACKING) || defined(CONFIG_CPU_SRSO)
VALIDATE_UNRET_END VALIDATE_UNRET_END
ALTERNATIVE_3 "", \ ALTERNATIVE_3 "", \
CALL_ZEN_UNTRAIN_RET, X86_FEATURE_UNRET, \ CALL_UNTRAIN_RET, X86_FEATURE_UNRET, \
"call entry_ibpb", X86_FEATURE_ENTRY_IBPB, \ "call entry_ibpb", X86_FEATURE_ENTRY_IBPB, \
__stringify(RESET_CALL_DEPTH), X86_FEATURE_CALL_DEPTH __stringify(RESET_CALL_DEPTH), X86_FEATURE_CALL_DEPTH
#endif #endif
.endm
#ifdef CONFIG_CPU_SRSO .macro UNTRAIN_RET_VM
ALTERNATIVE_2 "", "call srso_untrain_ret", X86_FEATURE_SRSO, \ #if defined(CONFIG_CPU_UNRET_ENTRY) || defined(CONFIG_CPU_IBPB_ENTRY) || \
"call srso_untrain_ret_alias", X86_FEATURE_SRSO_ALIAS defined(CONFIG_CALL_DEPTH_TRACKING) || defined(CONFIG_CPU_SRSO)
VALIDATE_UNRET_END
ALTERNATIVE_3 "", \
CALL_UNTRAIN_RET, X86_FEATURE_UNRET, \
"call entry_ibpb", X86_FEATURE_IBPB_ON_VMEXIT, \
__stringify(RESET_CALL_DEPTH), X86_FEATURE_CALL_DEPTH
#endif #endif
.endm .endm
...@@ -309,15 +315,10 @@ ...@@ -309,15 +315,10 @@
defined(CONFIG_CALL_DEPTH_TRACKING) defined(CONFIG_CALL_DEPTH_TRACKING)
VALIDATE_UNRET_END VALIDATE_UNRET_END
ALTERNATIVE_3 "", \ ALTERNATIVE_3 "", \
CALL_ZEN_UNTRAIN_RET, X86_FEATURE_UNRET, \ CALL_UNTRAIN_RET, X86_FEATURE_UNRET, \
"call entry_ibpb", X86_FEATURE_ENTRY_IBPB, \ "call entry_ibpb", X86_FEATURE_ENTRY_IBPB, \
__stringify(RESET_CALL_DEPTH_FROM_CALL), X86_FEATURE_CALL_DEPTH __stringify(RESET_CALL_DEPTH_FROM_CALL), X86_FEATURE_CALL_DEPTH
#endif #endif
#ifdef CONFIG_CPU_SRSO
ALTERNATIVE_2 "", "call srso_untrain_ret", X86_FEATURE_SRSO, \
"call srso_untrain_ret_alias", X86_FEATURE_SRSO_ALIAS
#endif
.endm .endm
...@@ -341,17 +342,24 @@ extern retpoline_thunk_t __x86_indirect_thunk_array[]; ...@@ -341,17 +342,24 @@ extern retpoline_thunk_t __x86_indirect_thunk_array[];
extern retpoline_thunk_t __x86_indirect_call_thunk_array[]; extern retpoline_thunk_t __x86_indirect_call_thunk_array[];
extern retpoline_thunk_t __x86_indirect_jump_thunk_array[]; extern retpoline_thunk_t __x86_indirect_jump_thunk_array[];
#ifdef CONFIG_RETHUNK
extern void __x86_return_thunk(void); extern void __x86_return_thunk(void);
extern void zen_untrain_ret(void); #else
static inline void __x86_return_thunk(void) {}
#endif
extern void retbleed_return_thunk(void);
extern void srso_return_thunk(void);
extern void srso_alias_return_thunk(void);
extern void retbleed_untrain_ret(void);
extern void srso_untrain_ret(void); extern void srso_untrain_ret(void);
extern void srso_untrain_ret_alias(void); extern void srso_alias_untrain_ret(void);
extern void entry_untrain_ret(void);
extern void entry_ibpb(void); extern void entry_ibpb(void);
#ifdef CONFIG_CALL_THUNKS
extern void (*x86_return_thunk)(void); extern void (*x86_return_thunk)(void);
#else
#define x86_return_thunk (&__x86_return_thunk)
#endif
#ifdef CONFIG_CALL_DEPTH_TRACKING #ifdef CONFIG_CALL_DEPTH_TRACKING
extern void __x86_return_skl(void); extern void __x86_return_skl(void);
...@@ -478,9 +486,6 @@ enum ssb_mitigation { ...@@ -478,9 +486,6 @@ enum ssb_mitigation {
SPEC_STORE_BYPASS_SECCOMP, SPEC_STORE_BYPASS_SECCOMP,
}; };
extern char __indirect_thunk_start[];
extern char __indirect_thunk_end[];
static __always_inline static __always_inline
void alternative_msr_write(unsigned int msr, u64 val, unsigned int feature) void alternative_msr_write(unsigned int msr, u64 val, unsigned int feature)
{ {
......
...@@ -687,10 +687,6 @@ void __init_or_module noinline apply_retpolines(s32 *start, s32 *end) ...@@ -687,10 +687,6 @@ void __init_or_module noinline apply_retpolines(s32 *start, s32 *end)
#ifdef CONFIG_RETHUNK #ifdef CONFIG_RETHUNK
#ifdef CONFIG_CALL_THUNKS
void (*x86_return_thunk)(void) __ro_after_init = &__x86_return_thunk;
#endif
/* /*
* Rewrite the compiler generated return thunk tail-calls. * Rewrite the compiler generated return thunk tail-calls.
* *
......
...@@ -1329,3 +1329,4 @@ void noinstr amd_clear_divider(void) ...@@ -1329,3 +1329,4 @@ void noinstr amd_clear_divider(void)
asm volatile(ALTERNATIVE("", "div %2\n\t", X86_BUG_DIV0) asm volatile(ALTERNATIVE("", "div %2\n\t", X86_BUG_DIV0)
:: "a" (0), "d" (0), "r" (1)); :: "a" (0), "d" (0), "r" (1));
} }
EXPORT_SYMBOL_GPL(amd_clear_divider);
...@@ -63,6 +63,8 @@ EXPORT_SYMBOL_GPL(x86_pred_cmd); ...@@ -63,6 +63,8 @@ EXPORT_SYMBOL_GPL(x86_pred_cmd);
static DEFINE_MUTEX(spec_ctrl_mutex); static DEFINE_MUTEX(spec_ctrl_mutex);
void (*x86_return_thunk)(void) __ro_after_init = &__x86_return_thunk;
/* Update SPEC_CTRL MSR and its cached copy unconditionally */ /* Update SPEC_CTRL MSR and its cached copy unconditionally */
static void update_spec_ctrl(u64 val) static void update_spec_ctrl(u64 val)
{ {
...@@ -165,6 +167,11 @@ void __init cpu_select_mitigations(void) ...@@ -165,6 +167,11 @@ void __init cpu_select_mitigations(void)
md_clear_select_mitigation(); md_clear_select_mitigation();
srbds_select_mitigation(); srbds_select_mitigation();
l1d_flush_select_mitigation(); l1d_flush_select_mitigation();
/*
* srso_select_mitigation() depends and must run after
* retbleed_select_mitigation().
*/
srso_select_mitigation(); srso_select_mitigation();
gds_select_mitigation(); gds_select_mitigation();
} }
...@@ -1035,6 +1042,9 @@ static void __init retbleed_select_mitigation(void) ...@@ -1035,6 +1042,9 @@ static void __init retbleed_select_mitigation(void)
setup_force_cpu_cap(X86_FEATURE_RETHUNK); setup_force_cpu_cap(X86_FEATURE_RETHUNK);
setup_force_cpu_cap(X86_FEATURE_UNRET); setup_force_cpu_cap(X86_FEATURE_UNRET);
if (IS_ENABLED(CONFIG_RETHUNK))
x86_return_thunk = retbleed_return_thunk;
if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD && if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD &&
boot_cpu_data.x86_vendor != X86_VENDOR_HYGON) boot_cpu_data.x86_vendor != X86_VENDOR_HYGON)
pr_err(RETBLEED_UNTRAIN_MSG); pr_err(RETBLEED_UNTRAIN_MSG);
...@@ -1044,6 +1054,7 @@ static void __init retbleed_select_mitigation(void) ...@@ -1044,6 +1054,7 @@ static void __init retbleed_select_mitigation(void)
case RETBLEED_MITIGATION_IBPB: case RETBLEED_MITIGATION_IBPB:
setup_force_cpu_cap(X86_FEATURE_ENTRY_IBPB); setup_force_cpu_cap(X86_FEATURE_ENTRY_IBPB);
setup_force_cpu_cap(X86_FEATURE_IBPB_ON_VMEXIT);
mitigate_smt = true; mitigate_smt = true;
break; break;
...@@ -2417,9 +2428,10 @@ static void __init srso_select_mitigation(void) ...@@ -2417,9 +2428,10 @@ static void __init srso_select_mitigation(void)
* Zen1/2 with SMT off aren't vulnerable after the right * Zen1/2 with SMT off aren't vulnerable after the right
* IBPB microcode has been applied. * IBPB microcode has been applied.
*/ */
if ((boot_cpu_data.x86 < 0x19) && if (boot_cpu_data.x86 < 0x19 && !cpu_smt_possible()) {
(!cpu_smt_possible() || (cpu_smt_control == CPU_SMT_DISABLED)))
setup_force_cpu_cap(X86_FEATURE_SRSO_NO); setup_force_cpu_cap(X86_FEATURE_SRSO_NO);
return;
}
} }
if (retbleed_mitigation == RETBLEED_MITIGATION_IBPB) { if (retbleed_mitigation == RETBLEED_MITIGATION_IBPB) {
...@@ -2448,11 +2460,15 @@ static void __init srso_select_mitigation(void) ...@@ -2448,11 +2460,15 @@ static void __init srso_select_mitigation(void)
* like ftrace, static_call, etc. * like ftrace, static_call, etc.
*/ */
setup_force_cpu_cap(X86_FEATURE_RETHUNK); setup_force_cpu_cap(X86_FEATURE_RETHUNK);
setup_force_cpu_cap(X86_FEATURE_UNRET);
if (boot_cpu_data.x86 == 0x19) if (boot_cpu_data.x86 == 0x19) {
setup_force_cpu_cap(X86_FEATURE_SRSO_ALIAS); setup_force_cpu_cap(X86_FEATURE_SRSO_ALIAS);
else x86_return_thunk = srso_alias_return_thunk;
} else {
setup_force_cpu_cap(X86_FEATURE_SRSO); setup_force_cpu_cap(X86_FEATURE_SRSO);
x86_return_thunk = srso_return_thunk;
}
srso_mitigation = SRSO_MITIGATION_SAFE_RET; srso_mitigation = SRSO_MITIGATION_SAFE_RET;
} else { } else {
pr_err("WARNING: kernel not compiled with CPU_SRSO.\n"); pr_err("WARNING: kernel not compiled with CPU_SRSO.\n");
...@@ -2696,6 +2712,9 @@ static ssize_t retbleed_show_state(char *buf) ...@@ -2696,6 +2712,9 @@ static ssize_t retbleed_show_state(char *buf)
static ssize_t srso_show_state(char *buf) static ssize_t srso_show_state(char *buf)
{ {
if (boot_cpu_has(X86_FEATURE_SRSO_NO))
return sysfs_emit(buf, "Mitigation: SMT disabled\n");
return sysfs_emit(buf, "%s%s\n", return sysfs_emit(buf, "%s%s\n",
srso_strings[srso_mitigation], srso_strings[srso_mitigation],
(cpu_has_ibpb_brtype_microcode() ? "" : ", no microcode")); (cpu_has_ibpb_brtype_microcode() ? "" : ", no microcode"));
......
...@@ -226,7 +226,7 @@ static int copy_optimized_instructions(u8 *dest, u8 *src, u8 *real) ...@@ -226,7 +226,7 @@ static int copy_optimized_instructions(u8 *dest, u8 *src, u8 *real)
} }
/* Check whether insn is indirect jump */ /* Check whether insn is indirect jump */
static int __insn_is_indirect_jump(struct insn *insn) static int insn_is_indirect_jump(struct insn *insn)
{ {
return ((insn->opcode.bytes[0] == 0xff && return ((insn->opcode.bytes[0] == 0xff &&
(X86_MODRM_REG(insn->modrm.value) & 6) == 4) || /* Jump */ (X86_MODRM_REG(insn->modrm.value) & 6) == 4) || /* Jump */
...@@ -260,26 +260,6 @@ static int insn_jump_into_range(struct insn *insn, unsigned long start, int len) ...@@ -260,26 +260,6 @@ static int insn_jump_into_range(struct insn *insn, unsigned long start, int len)
return (start <= target && target <= start + len); return (start <= target && target <= start + len);
} }
static int insn_is_indirect_jump(struct insn *insn)
{
int ret = __insn_is_indirect_jump(insn);
#ifdef CONFIG_RETPOLINE
/*
* Jump to x86_indirect_thunk_* is treated as an indirect jump.
* Note that even with CONFIG_RETPOLINE=y, the kernel compiled with
* older gcc may use indirect jump. So we add this check instead of
* replace indirect-jump check.
*/
if (!ret)
ret = insn_jump_into_range(insn,
(unsigned long)__indirect_thunk_start,
(unsigned long)__indirect_thunk_end -
(unsigned long)__indirect_thunk_start);
#endif
return ret;
}
/* Decode whole function to ensure any instructions don't jump into target */ /* Decode whole function to ensure any instructions don't jump into target */
static int can_optimize(unsigned long paddr) static int can_optimize(unsigned long paddr)
{ {
...@@ -334,9 +314,21 @@ static int can_optimize(unsigned long paddr) ...@@ -334,9 +314,21 @@ static int can_optimize(unsigned long paddr)
/* Recover address */ /* Recover address */
insn.kaddr = (void *)addr; insn.kaddr = (void *)addr;
insn.next_byte = (void *)(addr + insn.length); insn.next_byte = (void *)(addr + insn.length);
/* Check any instructions don't jump into target */ /*
if (insn_is_indirect_jump(&insn) || * Check any instructions don't jump into target, indirectly or
insn_jump_into_range(&insn, paddr + INT3_INSN_SIZE, * directly.
*
* The indirect case is present to handle a code with jump
* tables. When the kernel uses retpolines, the check should in
* theory additionally look for jumps to indirect thunks.
* However, the kernel built with retpolines or IBT has jump
* tables disabled so the check can be skipped altogether.
*/
if (!IS_ENABLED(CONFIG_RETPOLINE) &&
!IS_ENABLED(CONFIG_X86_KERNEL_IBT) &&
insn_is_indirect_jump(&insn))
return 0;
if (insn_jump_into_range(&insn, paddr + INT3_INSN_SIZE,
DISP32_SIZE)) DISP32_SIZE))
return 0; return 0;
addr += insn.length; addr += insn.length;
......
...@@ -186,6 +186,19 @@ EXPORT_SYMBOL_GPL(arch_static_call_transform); ...@@ -186,6 +186,19 @@ EXPORT_SYMBOL_GPL(arch_static_call_transform);
*/ */
bool __static_call_fixup(void *tramp, u8 op, void *dest) bool __static_call_fixup(void *tramp, u8 op, void *dest)
{ {
unsigned long addr = (unsigned long)tramp;
/*
* Not all .return_sites are a static_call trampoline (most are not).
* Check if the 3 bytes after the return are still kernel text, if not,
* then this definitely is not a trampoline and we need not worry
* further.
*
* This avoids the memcmp() below tripping over pagefaults etc..
*/
if (((addr >> PAGE_SHIFT) != ((addr + 7) >> PAGE_SHIFT)) &&
!kernel_text_address(addr + 7))
return false;
if (memcmp(tramp+5, tramp_ud, 3)) { if (memcmp(tramp+5, tramp_ud, 3)) {
/* Not a trampoline site, not our problem. */ /* Not a trampoline site, not our problem. */
return false; return false;
......
...@@ -206,8 +206,6 @@ DEFINE_IDTENTRY(exc_divide_error) ...@@ -206,8 +206,6 @@ DEFINE_IDTENTRY(exc_divide_error)
{ {
do_error_trap(regs, 0, "divide error", X86_TRAP_DE, SIGFPE, do_error_trap(regs, 0, "divide error", X86_TRAP_DE, SIGFPE,
FPE_INTDIV, error_get_trap_addr(regs)); FPE_INTDIV, error_get_trap_addr(regs));
amd_clear_divider();
} }
DEFINE_IDTENTRY(exc_overflow) DEFINE_IDTENTRY(exc_overflow)
......
...@@ -133,27 +133,25 @@ SECTIONS ...@@ -133,27 +133,25 @@ SECTIONS
KPROBES_TEXT KPROBES_TEXT
SOFTIRQENTRY_TEXT SOFTIRQENTRY_TEXT
#ifdef CONFIG_RETPOLINE #ifdef CONFIG_RETPOLINE
__indirect_thunk_start = .; *(.text..__x86.indirect_thunk)
*(.text.__x86.indirect_thunk) *(.text..__x86.return_thunk)
*(.text.__x86.return_thunk)
__indirect_thunk_end = .;
#endif #endif
STATIC_CALL_TEXT STATIC_CALL_TEXT
ALIGN_ENTRY_TEXT_BEGIN ALIGN_ENTRY_TEXT_BEGIN
#ifdef CONFIG_CPU_SRSO #ifdef CONFIG_CPU_SRSO
*(.text.__x86.rethunk_untrain) *(.text..__x86.rethunk_untrain)
#endif #endif
ENTRY_TEXT ENTRY_TEXT
#ifdef CONFIG_CPU_SRSO #ifdef CONFIG_CPU_SRSO
/* /*
* See the comment above srso_untrain_ret_alias()'s * See the comment above srso_alias_untrain_ret()'s
* definition. * definition.
*/ */
. = srso_untrain_ret_alias | (1 << 2) | (1 << 8) | (1 << 14) | (1 << 20); . = srso_alias_untrain_ret | (1 << 2) | (1 << 8) | (1 << 14) | (1 << 20);
*(.text.__x86.rethunk_safe) *(.text..__x86.rethunk_safe)
#endif #endif
ALIGN_ENTRY_TEXT_END ALIGN_ENTRY_TEXT_END
*(.gnu.warning) *(.gnu.warning)
...@@ -523,7 +521,7 @@ INIT_PER_CPU(irq_stack_backing_store); ...@@ -523,7 +521,7 @@ INIT_PER_CPU(irq_stack_backing_store);
#endif #endif
#ifdef CONFIG_RETHUNK #ifdef CONFIG_RETHUNK
. = ASSERT((__ret & 0x3f) == 0, "__ret not cacheline-aligned"); . = ASSERT((retbleed_return_thunk & 0x3f) == 0, "retbleed_return_thunk not cacheline-aligned");
. = ASSERT((srso_safe_ret & 0x3f) == 0, "srso_safe_ret not cacheline-aligned"); . = ASSERT((srso_safe_ret & 0x3f) == 0, "srso_safe_ret not cacheline-aligned");
#endif #endif
...@@ -538,8 +536,8 @@ INIT_PER_CPU(irq_stack_backing_store); ...@@ -538,8 +536,8 @@ INIT_PER_CPU(irq_stack_backing_store);
* Instead do: (A | B) - (A & B) in order to compute the XOR * Instead do: (A | B) - (A & B) in order to compute the XOR
* of the two function addresses: * of the two function addresses:
*/ */
. = ASSERT(((ABSOLUTE(srso_untrain_ret_alias) | srso_safe_ret_alias) - . = ASSERT(((ABSOLUTE(srso_alias_untrain_ret) | srso_alias_safe_ret) -
(ABSOLUTE(srso_untrain_ret_alias) & srso_safe_ret_alias)) == ((1 << 2) | (1 << 8) | (1 << 14) | (1 << 20)), (ABSOLUTE(srso_alias_untrain_ret) & srso_alias_safe_ret)) == ((1 << 2) | (1 << 8) | (1 << 14) | (1 << 20)),
"SRSO function pair won't alias"); "SRSO function pair won't alias");
#endif #endif
......
...@@ -4006,6 +4006,8 @@ static noinstr void svm_vcpu_enter_exit(struct kvm_vcpu *vcpu, bool spec_ctrl_in ...@@ -4006,6 +4006,8 @@ static noinstr void svm_vcpu_enter_exit(struct kvm_vcpu *vcpu, bool spec_ctrl_in
guest_state_enter_irqoff(); guest_state_enter_irqoff();
amd_clear_divider();
if (sev_es_guest(vcpu->kvm)) if (sev_es_guest(vcpu->kvm))
__svm_sev_es_vcpu_run(svm, spec_ctrl_intercepted); __svm_sev_es_vcpu_run(svm, spec_ctrl_intercepted);
else else
......
...@@ -222,10 +222,7 @@ SYM_FUNC_START(__svm_vcpu_run) ...@@ -222,10 +222,7 @@ SYM_FUNC_START(__svm_vcpu_run)
* because interrupt handlers won't sanitize 'ret' if the return is * because interrupt handlers won't sanitize 'ret' if the return is
* from the kernel. * from the kernel.
*/ */
UNTRAIN_RET UNTRAIN_RET_VM
/* SRSO */
ALTERNATIVE "", "call entry_ibpb", X86_FEATURE_IBPB_ON_VMEXIT
/* /*
* Clear all general purpose registers except RSP and RAX to prevent * Clear all general purpose registers except RSP and RAX to prevent
...@@ -362,7 +359,7 @@ SYM_FUNC_START(__svm_sev_es_vcpu_run) ...@@ -362,7 +359,7 @@ SYM_FUNC_START(__svm_sev_es_vcpu_run)
* because interrupt handlers won't sanitize RET if the return is * because interrupt handlers won't sanitize RET if the return is
* from the kernel. * from the kernel.
*/ */
UNTRAIN_RET UNTRAIN_RET_VM
/* "Pop" @spec_ctrl_intercepted. */ /* "Pop" @spec_ctrl_intercepted. */
pop %_ASM_BX pop %_ASM_BX
......
...@@ -13,7 +13,7 @@ ...@@ -13,7 +13,7 @@
#include <asm/frame.h> #include <asm/frame.h>
#include <asm/nops.h> #include <asm/nops.h>
.section .text.__x86.indirect_thunk .section .text..__x86.indirect_thunk
.macro POLINE reg .macro POLINE reg
...@@ -133,75 +133,106 @@ SYM_CODE_END(__x86_indirect_jump_thunk_array) ...@@ -133,75 +133,106 @@ SYM_CODE_END(__x86_indirect_jump_thunk_array)
#ifdef CONFIG_RETHUNK #ifdef CONFIG_RETHUNK
/* /*
* srso_untrain_ret_alias() and srso_safe_ret_alias() are placed at * srso_alias_untrain_ret() and srso_alias_safe_ret() are placed at
* special addresses: * special addresses:
* *
* - srso_untrain_ret_alias() is 2M aligned * - srso_alias_untrain_ret() is 2M aligned
* - srso_safe_ret_alias() is also in the same 2M page but bits 2, 8, 14 * - srso_alias_safe_ret() is also in the same 2M page but bits 2, 8, 14
* and 20 in its virtual address are set (while those bits in the * and 20 in its virtual address are set (while those bits in the
* srso_untrain_ret_alias() function are cleared). * srso_alias_untrain_ret() function are cleared).
* *
* This guarantees that those two addresses will alias in the branch * This guarantees that those two addresses will alias in the branch
* target buffer of Zen3/4 generations, leading to any potential * target buffer of Zen3/4 generations, leading to any potential
* poisoned entries at that BTB slot to get evicted. * poisoned entries at that BTB slot to get evicted.
* *
* As a result, srso_safe_ret_alias() becomes a safe return. * As a result, srso_alias_safe_ret() becomes a safe return.
*/ */
#ifdef CONFIG_CPU_SRSO #ifdef CONFIG_CPU_SRSO
.section .text.__x86.rethunk_untrain .section .text..__x86.rethunk_untrain
SYM_START(srso_untrain_ret_alias, SYM_L_GLOBAL, SYM_A_NONE) SYM_START(srso_alias_untrain_ret, SYM_L_GLOBAL, SYM_A_NONE)
UNWIND_HINT_FUNC
ANNOTATE_NOENDBR ANNOTATE_NOENDBR
ASM_NOP2 ASM_NOP2
lfence lfence
jmp __x86_return_thunk jmp srso_alias_return_thunk
SYM_FUNC_END(srso_untrain_ret_alias) SYM_FUNC_END(srso_alias_untrain_ret)
__EXPORT_THUNK(srso_untrain_ret_alias) __EXPORT_THUNK(srso_alias_untrain_ret)
.section .text.__x86.rethunk_safe .section .text..__x86.rethunk_safe
#else
/* dummy definition for alternatives */
SYM_START(srso_alias_untrain_ret, SYM_L_GLOBAL, SYM_A_NONE)
ANNOTATE_UNRET_SAFE
ret
int3
SYM_FUNC_END(srso_alias_untrain_ret)
#endif #endif
/* Needs a definition for the __x86_return_thunk alternative below. */ SYM_START(srso_alias_safe_ret, SYM_L_GLOBAL, SYM_A_NONE)
SYM_START(srso_safe_ret_alias, SYM_L_GLOBAL, SYM_A_NONE) lea 8(%_ASM_SP), %_ASM_SP
#ifdef CONFIG_CPU_SRSO
add $8, %_ASM_SP
UNWIND_HINT_FUNC UNWIND_HINT_FUNC
#endif
ANNOTATE_UNRET_SAFE ANNOTATE_UNRET_SAFE
ret ret
int3 int3
SYM_FUNC_END(srso_safe_ret_alias) SYM_FUNC_END(srso_alias_safe_ret)
.section .text.__x86.return_thunk .section .text..__x86.return_thunk
SYM_CODE_START(srso_alias_return_thunk)
UNWIND_HINT_FUNC
ANNOTATE_NOENDBR
call srso_alias_safe_ret
ud2
SYM_CODE_END(srso_alias_return_thunk)
/*
* Some generic notes on the untraining sequences:
*
* They are interchangeable when it comes to flushing potentially wrong
* RET predictions from the BTB.
*
* The SRSO Zen1/2 (MOVABS) untraining sequence is longer than the
* Retbleed sequence because the return sequence done there
* (srso_safe_ret()) is longer and the return sequence must fully nest
* (end before) the untraining sequence. Therefore, the untraining
* sequence must fully overlap the return sequence.
*
* Regarding alignment - the instructions which need to be untrained,
* must all start at a cacheline boundary for Zen1/2 generations. That
* is, instruction sequences starting at srso_safe_ret() and
* the respective instruction sequences at retbleed_return_thunk()
* must start at a cacheline boundary.
*/
/* /*
* Safety details here pertain to the AMD Zen{1,2} microarchitecture: * Safety details here pertain to the AMD Zen{1,2} microarchitecture:
* 1) The RET at __x86_return_thunk must be on a 64 byte boundary, for * 1) The RET at retbleed_return_thunk must be on a 64 byte boundary, for
* alignment within the BTB. * alignment within the BTB.
* 2) The instruction at zen_untrain_ret must contain, and not * 2) The instruction at retbleed_untrain_ret must contain, and not
* end with, the 0xc3 byte of the RET. * end with, the 0xc3 byte of the RET.
* 3) STIBP must be enabled, or SMT disabled, to prevent the sibling thread * 3) STIBP must be enabled, or SMT disabled, to prevent the sibling thread
* from re-poisioning the BTB prediction. * from re-poisioning the BTB prediction.
*/ */
.align 64 .align 64
.skip 64 - (__ret - zen_untrain_ret), 0xcc .skip 64 - (retbleed_return_thunk - retbleed_untrain_ret), 0xcc
SYM_START(zen_untrain_ret, SYM_L_GLOBAL, SYM_A_NONE) SYM_START(retbleed_untrain_ret, SYM_L_GLOBAL, SYM_A_NONE)
ANNOTATE_NOENDBR ANNOTATE_NOENDBR
/* /*
* As executed from zen_untrain_ret, this is: * As executed from retbleed_untrain_ret, this is:
* *
* TEST $0xcc, %bl * TEST $0xcc, %bl
* LFENCE * LFENCE
* JMP __x86_return_thunk * JMP retbleed_return_thunk
* *
* Executing the TEST instruction has a side effect of evicting any BTB * Executing the TEST instruction has a side effect of evicting any BTB
* prediction (potentially attacker controlled) attached to the RET, as * prediction (potentially attacker controlled) attached to the RET, as
* __x86_return_thunk + 1 isn't an instruction boundary at the moment. * retbleed_return_thunk + 1 isn't an instruction boundary at the moment.
*/ */
.byte 0xf6 .byte 0xf6
/* /*
* As executed from __x86_return_thunk, this is a plain RET. * As executed from retbleed_return_thunk, this is a plain RET.
* *
* As part of the TEST above, RET is the ModRM byte, and INT3 the imm8. * As part of the TEST above, RET is the ModRM byte, and INT3 the imm8.
* *
...@@ -213,13 +244,13 @@ SYM_START(zen_untrain_ret, SYM_L_GLOBAL, SYM_A_NONE) ...@@ -213,13 +244,13 @@ SYM_START(zen_untrain_ret, SYM_L_GLOBAL, SYM_A_NONE)
* With SMT enabled and STIBP active, a sibling thread cannot poison * With SMT enabled and STIBP active, a sibling thread cannot poison
* RET's prediction to a type of its choice, but can evict the * RET's prediction to a type of its choice, but can evict the
* prediction due to competitive sharing. If the prediction is * prediction due to competitive sharing. If the prediction is
* evicted, __x86_return_thunk will suffer Straight Line Speculation * evicted, retbleed_return_thunk will suffer Straight Line Speculation
* which will be contained safely by the INT3. * which will be contained safely by the INT3.
*/ */
SYM_INNER_LABEL(__ret, SYM_L_GLOBAL) SYM_INNER_LABEL(retbleed_return_thunk, SYM_L_GLOBAL)
ret ret
int3 int3
SYM_CODE_END(__ret) SYM_CODE_END(retbleed_return_thunk)
/* /*
* Ensure the TEST decoding / BTB invalidation is complete. * Ensure the TEST decoding / BTB invalidation is complete.
...@@ -230,16 +261,16 @@ SYM_CODE_END(__ret) ...@@ -230,16 +261,16 @@ SYM_CODE_END(__ret)
* Jump back and execute the RET in the middle of the TEST instruction. * Jump back and execute the RET in the middle of the TEST instruction.
* INT3 is for SLS protection. * INT3 is for SLS protection.
*/ */
jmp __ret jmp retbleed_return_thunk
int3 int3
SYM_FUNC_END(zen_untrain_ret) SYM_FUNC_END(retbleed_untrain_ret)
__EXPORT_THUNK(zen_untrain_ret) __EXPORT_THUNK(retbleed_untrain_ret)
/* /*
* SRSO untraining sequence for Zen1/2, similar to zen_untrain_ret() * SRSO untraining sequence for Zen1/2, similar to retbleed_untrain_ret()
* above. On kernel entry, srso_untrain_ret() is executed which is a * above. On kernel entry, srso_untrain_ret() is executed which is a
* *
* movabs $0xccccccc308c48348,%rax * movabs $0xccccc30824648d48,%rax
* *
* and when the return thunk executes the inner label srso_safe_ret() * and when the return thunk executes the inner label srso_safe_ret()
* later, it is a stack manipulation and a RET which is mispredicted and * later, it is a stack manipulation and a RET which is mispredicted and
...@@ -251,22 +282,44 @@ SYM_START(srso_untrain_ret, SYM_L_GLOBAL, SYM_A_NONE) ...@@ -251,22 +282,44 @@ SYM_START(srso_untrain_ret, SYM_L_GLOBAL, SYM_A_NONE)
ANNOTATE_NOENDBR ANNOTATE_NOENDBR
.byte 0x48, 0xb8 .byte 0x48, 0xb8
/*
* This forces the function return instruction to speculate into a trap
* (UD2 in srso_return_thunk() below). This RET will then mispredict
* and execution will continue at the return site read from the top of
* the stack.
*/
SYM_INNER_LABEL(srso_safe_ret, SYM_L_GLOBAL) SYM_INNER_LABEL(srso_safe_ret, SYM_L_GLOBAL)
add $8, %_ASM_SP lea 8(%_ASM_SP), %_ASM_SP
ret ret
int3 int3
int3 int3
int3 /* end of movabs */
lfence lfence
call srso_safe_ret call srso_safe_ret
int3 ud2
SYM_CODE_END(srso_safe_ret) SYM_CODE_END(srso_safe_ret)
SYM_FUNC_END(srso_untrain_ret) SYM_FUNC_END(srso_untrain_ret)
__EXPORT_THUNK(srso_untrain_ret) __EXPORT_THUNK(srso_untrain_ret)
SYM_FUNC_START(__x86_return_thunk) SYM_CODE_START(srso_return_thunk)
ALTERNATIVE_2 "jmp __ret", "call srso_safe_ret", X86_FEATURE_SRSO, \ UNWIND_HINT_FUNC
"call srso_safe_ret_alias", X86_FEATURE_SRSO_ALIAS ANNOTATE_NOENDBR
call srso_safe_ret
ud2
SYM_CODE_END(srso_return_thunk)
SYM_FUNC_START(entry_untrain_ret)
ALTERNATIVE_2 "jmp retbleed_untrain_ret", \
"jmp srso_untrain_ret", X86_FEATURE_SRSO, \
"jmp srso_alias_untrain_ret", X86_FEATURE_SRSO_ALIAS
SYM_FUNC_END(entry_untrain_ret)
__EXPORT_THUNK(entry_untrain_ret)
SYM_CODE_START(__x86_return_thunk)
UNWIND_HINT_FUNC
ANNOTATE_NOENDBR
ANNOTATE_UNRET_SAFE
ret
int3 int3
SYM_CODE_END(__x86_return_thunk) SYM_CODE_END(__x86_return_thunk)
EXPORT_SYMBOL(__x86_return_thunk) EXPORT_SYMBOL(__x86_return_thunk)
......
...@@ -824,8 +824,11 @@ bool arch_is_retpoline(struct symbol *sym) ...@@ -824,8 +824,11 @@ bool arch_is_retpoline(struct symbol *sym)
bool arch_is_rethunk(struct symbol *sym) bool arch_is_rethunk(struct symbol *sym)
{ {
return !strcmp(sym->name, "__x86_return_thunk") || return !strcmp(sym->name, "__x86_return_thunk");
!strcmp(sym->name, "srso_untrain_ret") || }
!strcmp(sym->name, "srso_safe_ret") ||
!strcmp(sym->name, "__ret"); bool arch_is_embedded_insn(struct symbol *sym)
{
return !strcmp(sym->name, "retbleed_return_thunk") ||
!strcmp(sym->name, "srso_safe_ret");
} }
...@@ -389,7 +389,7 @@ static int decode_instructions(struct objtool_file *file) ...@@ -389,7 +389,7 @@ static int decode_instructions(struct objtool_file *file)
if (!strcmp(sec->name, ".noinstr.text") || if (!strcmp(sec->name, ".noinstr.text") ||
!strcmp(sec->name, ".entry.text") || !strcmp(sec->name, ".entry.text") ||
!strcmp(sec->name, ".cpuidle.text") || !strcmp(sec->name, ".cpuidle.text") ||
!strncmp(sec->name, ".text.__x86.", 12)) !strncmp(sec->name, ".text..__x86.", 13))
sec->noinstr = true; sec->noinstr = true;
/* /*
...@@ -455,7 +455,7 @@ static int decode_instructions(struct objtool_file *file) ...@@ -455,7 +455,7 @@ static int decode_instructions(struct objtool_file *file)
return -1; return -1;
} }
if (func->return_thunk || func->alias != func) if (func->embedded_insn || func->alias != func)
continue; continue;
if (!find_insn(file, sec, func->offset)) { if (!find_insn(file, sec, func->offset)) {
...@@ -1288,16 +1288,33 @@ static int add_ignore_alternatives(struct objtool_file *file) ...@@ -1288,16 +1288,33 @@ static int add_ignore_alternatives(struct objtool_file *file)
return 0; return 0;
} }
/*
* Symbols that replace INSN_CALL_DYNAMIC, every (tail) call to such a symbol
* will be added to the .retpoline_sites section.
*/
__weak bool arch_is_retpoline(struct symbol *sym) __weak bool arch_is_retpoline(struct symbol *sym)
{ {
return false; return false;
} }
/*
* Symbols that replace INSN_RETURN, every (tail) call to such a symbol
* will be added to the .return_sites section.
*/
__weak bool arch_is_rethunk(struct symbol *sym) __weak bool arch_is_rethunk(struct symbol *sym)
{ {
return false; return false;
} }
/*
* Symbols that are embedded inside other instructions, because sometimes crazy
* code exists. These are mostly ignored for validation purposes.
*/
__weak bool arch_is_embedded_insn(struct symbol *sym)
{
return false;
}
static struct reloc *insn_reloc(struct objtool_file *file, struct instruction *insn) static struct reloc *insn_reloc(struct objtool_file *file, struct instruction *insn)
{ {
struct reloc *reloc; struct reloc *reloc;
...@@ -1576,14 +1593,14 @@ static int add_jump_destinations(struct objtool_file *file) ...@@ -1576,14 +1593,14 @@ static int add_jump_destinations(struct objtool_file *file)
struct symbol *sym = find_symbol_by_offset(dest_sec, dest_off); struct symbol *sym = find_symbol_by_offset(dest_sec, dest_off);
/* /*
* This is a special case for zen_untrain_ret(). * This is a special case for retbleed_untrain_ret().
* It jumps to __x86_return_thunk(), but objtool * It jumps to __x86_return_thunk(), but objtool
* can't find the thunk's starting RET * can't find the thunk's starting RET
* instruction, because the RET is also in the * instruction, because the RET is also in the
* middle of another instruction. Objtool only * middle of another instruction. Objtool only
* knows about the outer instruction. * knows about the outer instruction.
*/ */
if (sym && sym->return_thunk) { if (sym && sym->embedded_insn) {
add_return_call(file, insn, false); add_return_call(file, insn, false);
continue; continue;
} }
...@@ -2502,6 +2519,9 @@ static int classify_symbols(struct objtool_file *file) ...@@ -2502,6 +2519,9 @@ static int classify_symbols(struct objtool_file *file)
if (arch_is_rethunk(func)) if (arch_is_rethunk(func))
func->return_thunk = true; func->return_thunk = true;
if (arch_is_embedded_insn(func))
func->embedded_insn = true;
if (arch_ftrace_match(func->name)) if (arch_ftrace_match(func->name))
func->fentry = true; func->fentry = true;
...@@ -2630,12 +2650,17 @@ static int decode_sections(struct objtool_file *file) ...@@ -2630,12 +2650,17 @@ static int decode_sections(struct objtool_file *file)
return 0; return 0;
} }
static bool is_fentry_call(struct instruction *insn) static bool is_special_call(struct instruction *insn)
{ {
if (insn->type == INSN_CALL && if (insn->type == INSN_CALL) {
insn_call_dest(insn) && struct symbol *dest = insn_call_dest(insn);
insn_call_dest(insn)->fentry)
return true; if (!dest)
return false;
if (dest->fentry || dest->embedded_insn)
return true;
}
return false; return false;
} }
...@@ -3636,7 +3661,7 @@ static int validate_branch(struct objtool_file *file, struct symbol *func, ...@@ -3636,7 +3661,7 @@ static int validate_branch(struct objtool_file *file, struct symbol *func,
if (ret) if (ret)
return ret; return ret;
if (opts.stackval && func && !is_fentry_call(insn) && if (opts.stackval && func && !is_special_call(insn) &&
!has_valid_stack_frame(&state)) { !has_valid_stack_frame(&state)) {
WARN_INSN(insn, "call without frame pointer save/setup"); WARN_INSN(insn, "call without frame pointer save/setup");
return 1; return 1;
......
...@@ -90,6 +90,7 @@ int arch_decode_hint_reg(u8 sp_reg, int *base); ...@@ -90,6 +90,7 @@ int arch_decode_hint_reg(u8 sp_reg, int *base);
bool arch_is_retpoline(struct symbol *sym); bool arch_is_retpoline(struct symbol *sym);
bool arch_is_rethunk(struct symbol *sym); bool arch_is_rethunk(struct symbol *sym);
bool arch_is_embedded_insn(struct symbol *sym);
int arch_rewrite_retpolines(struct objtool_file *file); int arch_rewrite_retpolines(struct objtool_file *file);
......
...@@ -66,6 +66,7 @@ struct symbol { ...@@ -66,6 +66,7 @@ struct symbol {
u8 fentry : 1; u8 fentry : 1;
u8 profiling_func : 1; u8 profiling_func : 1;
u8 warned : 1; u8 warned : 1;
u8 embedded_insn : 1;
struct list_head pv_target; struct list_head pv_target;
struct reloc *relocs; struct reloc *relocs;
}; };
......
...@@ -1038,9 +1038,7 @@ static int thread_stack__trace_end(struct thread_stack *ts, ...@@ -1038,9 +1038,7 @@ static int thread_stack__trace_end(struct thread_stack *ts,
static bool is_x86_retpoline(const char *name) static bool is_x86_retpoline(const char *name)
{ {
const char *p = strstr(name, "__x86_indirect_thunk_"); return strstr(name, "__x86_indirect_thunk_") == name;
return p == name || !strcmp(name, "__indirect_thunk_start");
} }
/* /*
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment