Commit 6304672b authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'x86-pti-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip

Pull x86/pti updates from Thomas Gleixner:
 "Another set of melted spectrum related changes:

   - Code simplifications and cleanups for RSB and retpolines.

   - Make the indirect calls in KVM speculation safe.

   - Whitelist CPUs which are known not to speculate from Meltdown and
     prepare for the new CPUID flag which tells the kernel that a CPU is
     not affected.

   - A less rigorous variant of the module retpoline check which merily
     warns when a non-retpoline protected module is loaded and reflects
     that fact in the sysfs file.

   - Prepare for Indirect Branch Prediction Barrier support.

   - Prepare for exposure of the Speculation Control MSRs to guests, so
     guest OSes which depend on those "features" can use them. Includes
     a blacklist of the broken microcodes. The actual exposure of the
     MSRs through KVM is still being worked on"

* 'x86-pti-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
  x86/speculation: Simplify indirect_branch_prediction_barrier()
  x86/retpoline: Simplify vmexit_fill_RSB()
  x86/cpufeatures: Clean up Spectre v2 related CPUID flags
  x86/cpu/bugs: Make retpoline module warning conditional
  x86/bugs: Drop one "mitigation" from dmesg
  x86/nospec: Fix header guards names
  x86/alternative: Print unadorned pointers
  x86/speculation: Add basic IBPB (Indirect Branch Prediction Barrier) support
  x86/cpufeature: Blacklist SPEC_CTRL/PRED_CMD on early Spectre v2 microcodes
  x86/pti: Do not enable PTI on CPUs which are not vulnerable to Meltdown
  x86/msr: Add definitions for new speculation control MSRs
  x86/cpufeatures: Add AMD feature bits for Speculation Control
  x86/cpufeatures: Add Intel feature bits for Speculation Control
  x86/cpufeatures: Add CPUID_7_EDX CPUID leaf
  module/retpoline: Warn about missing retpoline in module
  KVM: VMX: Make indirect call speculation safe
  KVM: x86: Make indirect calls in emulator speculation safe
parents 94263352 64e16720
...@@ -252,7 +252,8 @@ ENTRY(__switch_to_asm) ...@@ -252,7 +252,8 @@ ENTRY(__switch_to_asm)
* exist, overwrite the RSB with entries which capture * exist, overwrite the RSB with entries which capture
* speculative execution to prevent attack. * speculative execution to prevent attack.
*/ */
FILL_RETURN_BUFFER %ebx, RSB_CLEAR_LOOPS, X86_FEATURE_RSB_CTXSW /* Clobbers %ebx */
FILL_RETURN_BUFFER RSB_CLEAR_LOOPS, X86_FEATURE_RSB_CTXSW
#endif #endif
/* restore callee-saved registers */ /* restore callee-saved registers */
......
...@@ -499,7 +499,8 @@ ENTRY(__switch_to_asm) ...@@ -499,7 +499,8 @@ ENTRY(__switch_to_asm)
* exist, overwrite the RSB with entries which capture * exist, overwrite the RSB with entries which capture
* speculative execution to prevent attack. * speculative execution to prevent attack.
*/ */
FILL_RETURN_BUFFER %r12, RSB_CLEAR_LOOPS, X86_FEATURE_RSB_CTXSW /* Clobbers %rbx */
FILL_RETURN_BUFFER RSB_CLEAR_LOOPS, X86_FEATURE_RSB_CTXSW
#endif #endif
/* restore callee-saved registers */ /* restore callee-saved registers */
......
...@@ -38,4 +38,7 @@ INDIRECT_THUNK(dx) ...@@ -38,4 +38,7 @@ INDIRECT_THUNK(dx)
INDIRECT_THUNK(si) INDIRECT_THUNK(si)
INDIRECT_THUNK(di) INDIRECT_THUNK(di)
INDIRECT_THUNK(bp) INDIRECT_THUNK(bp)
asmlinkage void __fill_rsb(void);
asmlinkage void __clear_rsb(void);
#endif /* CONFIG_RETPOLINE */ #endif /* CONFIG_RETPOLINE */
...@@ -29,6 +29,7 @@ enum cpuid_leafs ...@@ -29,6 +29,7 @@ enum cpuid_leafs
CPUID_8000_000A_EDX, CPUID_8000_000A_EDX,
CPUID_7_ECX, CPUID_7_ECX,
CPUID_8000_0007_EBX, CPUID_8000_0007_EBX,
CPUID_7_EDX,
}; };
#ifdef CONFIG_X86_FEATURE_NAMES #ifdef CONFIG_X86_FEATURE_NAMES
...@@ -79,8 +80,9 @@ extern const char * const x86_bug_flags[NBUGINTS*32]; ...@@ -79,8 +80,9 @@ extern const char * const x86_bug_flags[NBUGINTS*32];
CHECK_BIT_IN_MASK_WORD(REQUIRED_MASK, 15, feature_bit) || \ CHECK_BIT_IN_MASK_WORD(REQUIRED_MASK, 15, feature_bit) || \
CHECK_BIT_IN_MASK_WORD(REQUIRED_MASK, 16, feature_bit) || \ CHECK_BIT_IN_MASK_WORD(REQUIRED_MASK, 16, feature_bit) || \
CHECK_BIT_IN_MASK_WORD(REQUIRED_MASK, 17, feature_bit) || \ CHECK_BIT_IN_MASK_WORD(REQUIRED_MASK, 17, feature_bit) || \
CHECK_BIT_IN_MASK_WORD(REQUIRED_MASK, 18, feature_bit) || \
REQUIRED_MASK_CHECK || \ REQUIRED_MASK_CHECK || \
BUILD_BUG_ON_ZERO(NCAPINTS != 18)) BUILD_BUG_ON_ZERO(NCAPINTS != 19))
#define DISABLED_MASK_BIT_SET(feature_bit) \ #define DISABLED_MASK_BIT_SET(feature_bit) \
( CHECK_BIT_IN_MASK_WORD(DISABLED_MASK, 0, feature_bit) || \ ( CHECK_BIT_IN_MASK_WORD(DISABLED_MASK, 0, feature_bit) || \
...@@ -101,8 +103,9 @@ extern const char * const x86_bug_flags[NBUGINTS*32]; ...@@ -101,8 +103,9 @@ extern const char * const x86_bug_flags[NBUGINTS*32];
CHECK_BIT_IN_MASK_WORD(DISABLED_MASK, 15, feature_bit) || \ CHECK_BIT_IN_MASK_WORD(DISABLED_MASK, 15, feature_bit) || \
CHECK_BIT_IN_MASK_WORD(DISABLED_MASK, 16, feature_bit) || \ CHECK_BIT_IN_MASK_WORD(DISABLED_MASK, 16, feature_bit) || \
CHECK_BIT_IN_MASK_WORD(DISABLED_MASK, 17, feature_bit) || \ CHECK_BIT_IN_MASK_WORD(DISABLED_MASK, 17, feature_bit) || \
CHECK_BIT_IN_MASK_WORD(DISABLED_MASK, 18, feature_bit) || \
DISABLED_MASK_CHECK || \ DISABLED_MASK_CHECK || \
BUILD_BUG_ON_ZERO(NCAPINTS != 18)) BUILD_BUG_ON_ZERO(NCAPINTS != 19))
#define cpu_has(c, bit) \ #define cpu_has(c, bit) \
(__builtin_constant_p(bit) && REQUIRED_MASK_BIT_SET(bit) ? 1 : \ (__builtin_constant_p(bit) && REQUIRED_MASK_BIT_SET(bit) ? 1 : \
......
...@@ -13,7 +13,7 @@ ...@@ -13,7 +13,7 @@
/* /*
* Defines x86 CPU feature bits * Defines x86 CPU feature bits
*/ */
#define NCAPINTS 18 /* N 32-bit words worth of info */ #define NCAPINTS 19 /* N 32-bit words worth of info */
#define NBUGINTS 1 /* N 32-bit bug flags */ #define NBUGINTS 1 /* N 32-bit bug flags */
/* /*
...@@ -203,15 +203,15 @@ ...@@ -203,15 +203,15 @@
#define X86_FEATURE_PROC_FEEDBACK ( 7*32+ 9) /* AMD ProcFeedbackInterface */ #define X86_FEATURE_PROC_FEEDBACK ( 7*32+ 9) /* AMD ProcFeedbackInterface */
#define X86_FEATURE_SME ( 7*32+10) /* AMD Secure Memory Encryption */ #define X86_FEATURE_SME ( 7*32+10) /* AMD Secure Memory Encryption */
#define X86_FEATURE_PTI ( 7*32+11) /* Kernel Page Table Isolation enabled */ #define X86_FEATURE_PTI ( 7*32+11) /* Kernel Page Table Isolation enabled */
#define X86_FEATURE_RETPOLINE ( 7*32+12) /* Generic Retpoline mitigation for Spectre variant 2 */ #define X86_FEATURE_RETPOLINE ( 7*32+12) /* "" Generic Retpoline mitigation for Spectre variant 2 */
#define X86_FEATURE_RETPOLINE_AMD ( 7*32+13) /* AMD Retpoline mitigation for Spectre variant 2 */ #define X86_FEATURE_RETPOLINE_AMD ( 7*32+13) /* "" AMD Retpoline mitigation for Spectre variant 2 */
#define X86_FEATURE_INTEL_PPIN ( 7*32+14) /* Intel Processor Inventory Number */ #define X86_FEATURE_INTEL_PPIN ( 7*32+14) /* Intel Processor Inventory Number */
#define X86_FEATURE_CDP_L2 ( 7*32+15) /* Code and Data Prioritization L2 */ #define X86_FEATURE_CDP_L2 ( 7*32+15) /* Code and Data Prioritization L2 */
#define X86_FEATURE_AVX512_4VNNIW ( 7*32+16) /* AVX-512 Neural Network Instructions */
#define X86_FEATURE_AVX512_4FMAPS ( 7*32+17) /* AVX-512 Multiply Accumulation Single precision */
#define X86_FEATURE_MBA ( 7*32+18) /* Memory Bandwidth Allocation */ #define X86_FEATURE_MBA ( 7*32+18) /* Memory Bandwidth Allocation */
#define X86_FEATURE_RSB_CTXSW ( 7*32+19) /* Fill RSB on context switches */ #define X86_FEATURE_RSB_CTXSW ( 7*32+19) /* "" Fill RSB on context switches */
#define X86_FEATURE_USE_IBPB ( 7*32+21) /* "" Indirect Branch Prediction Barrier enabled */
/* Virtualization flags: Linux defined, word 8 */ /* Virtualization flags: Linux defined, word 8 */
#define X86_FEATURE_TPR_SHADOW ( 8*32+ 0) /* Intel TPR Shadow */ #define X86_FEATURE_TPR_SHADOW ( 8*32+ 0) /* Intel TPR Shadow */
...@@ -272,6 +272,9 @@ ...@@ -272,6 +272,9 @@
#define X86_FEATURE_CLZERO (13*32+ 0) /* CLZERO instruction */ #define X86_FEATURE_CLZERO (13*32+ 0) /* CLZERO instruction */
#define X86_FEATURE_IRPERF (13*32+ 1) /* Instructions Retired Count */ #define X86_FEATURE_IRPERF (13*32+ 1) /* Instructions Retired Count */
#define X86_FEATURE_XSAVEERPTR (13*32+ 2) /* Always save/restore FP error pointers */ #define X86_FEATURE_XSAVEERPTR (13*32+ 2) /* Always save/restore FP error pointers */
#define X86_FEATURE_IBPB (13*32+12) /* Indirect Branch Prediction Barrier */
#define X86_FEATURE_IBRS (13*32+14) /* Indirect Branch Restricted Speculation */
#define X86_FEATURE_STIBP (13*32+15) /* Single Thread Indirect Branch Predictors */
/* Thermal and Power Management Leaf, CPUID level 0x00000006 (EAX), word 14 */ /* Thermal and Power Management Leaf, CPUID level 0x00000006 (EAX), word 14 */
#define X86_FEATURE_DTHERM (14*32+ 0) /* Digital Thermal Sensor */ #define X86_FEATURE_DTHERM (14*32+ 0) /* Digital Thermal Sensor */
...@@ -320,6 +323,13 @@ ...@@ -320,6 +323,13 @@
#define X86_FEATURE_SUCCOR (17*32+ 1) /* Uncorrectable error containment and recovery */ #define X86_FEATURE_SUCCOR (17*32+ 1) /* Uncorrectable error containment and recovery */
#define X86_FEATURE_SMCA (17*32+ 3) /* Scalable MCA */ #define X86_FEATURE_SMCA (17*32+ 3) /* Scalable MCA */
/* Intel-defined CPU features, CPUID level 0x00000007:0 (EDX), word 18 */
#define X86_FEATURE_AVX512_4VNNIW (18*32+ 2) /* AVX-512 Neural Network Instructions */
#define X86_FEATURE_AVX512_4FMAPS (18*32+ 3) /* AVX-512 Multiply Accumulation Single precision */
#define X86_FEATURE_SPEC_CTRL (18*32+26) /* "" Speculation Control (IBRS + IBPB) */
#define X86_FEATURE_INTEL_STIBP (18*32+27) /* "" Single Thread Indirect Branch Predictors */
#define X86_FEATURE_ARCH_CAPABILITIES (18*32+29) /* IA32_ARCH_CAPABILITIES MSR (Intel) */
/* /*
* BUG word(s) * BUG word(s)
*/ */
......
...@@ -77,6 +77,7 @@ ...@@ -77,6 +77,7 @@
#define DISABLED_MASK15 0 #define DISABLED_MASK15 0
#define DISABLED_MASK16 (DISABLE_PKU|DISABLE_OSPKE|DISABLE_LA57|DISABLE_UMIP) #define DISABLED_MASK16 (DISABLE_PKU|DISABLE_OSPKE|DISABLE_LA57|DISABLE_UMIP)
#define DISABLED_MASK17 0 #define DISABLED_MASK17 0
#define DISABLED_MASK_CHECK BUILD_BUG_ON_ZERO(NCAPINTS != 18) #define DISABLED_MASK18 0
#define DISABLED_MASK_CHECK BUILD_BUG_ON_ZERO(NCAPINTS != 19)
#endif /* _ASM_X86_DISABLED_FEATURES_H */ #endif /* _ASM_X86_DISABLED_FEATURES_H */
...@@ -39,6 +39,13 @@ ...@@ -39,6 +39,13 @@
/* Intel MSRs. Some also available on other CPUs */ /* Intel MSRs. Some also available on other CPUs */
#define MSR_IA32_SPEC_CTRL 0x00000048 /* Speculation Control */
#define SPEC_CTRL_IBRS (1 << 0) /* Indirect Branch Restricted Speculation */
#define SPEC_CTRL_STIBP (1 << 1) /* Single Thread Indirect Branch Predictors */
#define MSR_IA32_PRED_CMD 0x00000049 /* Prediction Command */
#define PRED_CMD_IBPB (1 << 0) /* Indirect Branch Prediction Barrier */
#define MSR_PPIN_CTL 0x0000004e #define MSR_PPIN_CTL 0x0000004e
#define MSR_PPIN 0x0000004f #define MSR_PPIN 0x0000004f
...@@ -57,6 +64,11 @@ ...@@ -57,6 +64,11 @@
#define SNB_C3_AUTO_UNDEMOTE (1UL << 28) #define SNB_C3_AUTO_UNDEMOTE (1UL << 28)
#define MSR_MTRRcap 0x000000fe #define MSR_MTRRcap 0x000000fe
#define MSR_IA32_ARCH_CAPABILITIES 0x0000010a
#define ARCH_CAP_RDCL_NO (1 << 0) /* Not susceptible to Meltdown */
#define ARCH_CAP_IBRS_ALL (1 << 1) /* Enhanced IBRS support */
#define MSR_IA32_BBL_CR_CTL 0x00000119 #define MSR_IA32_BBL_CR_CTL 0x00000119
#define MSR_IA32_BBL_CR_CTL3 0x0000011e #define MSR_IA32_BBL_CR_CTL3 0x0000011e
......
/* SPDX-License-Identifier: GPL-2.0 */ /* SPDX-License-Identifier: GPL-2.0 */
#ifndef __NOSPEC_BRANCH_H__ #ifndef _ASM_X86_NOSPEC_BRANCH_H_
#define __NOSPEC_BRANCH_H__ #define _ASM_X86_NOSPEC_BRANCH_H_
#include <asm/alternative.h> #include <asm/alternative.h>
#include <asm/alternative-asm.h> #include <asm/alternative-asm.h>
#include <asm/cpufeatures.h> #include <asm/cpufeatures.h>
/*
* Fill the CPU return stack buffer.
*
* Each entry in the RSB, if used for a speculative 'ret', contains an
* infinite 'pause; lfence; jmp' loop to capture speculative execution.
*
* This is required in various cases for retpoline and IBRS-based
* mitigations for the Spectre variant 2 vulnerability. Sometimes to
* eliminate potentially bogus entries from the RSB, and sometimes
* purely to ensure that it doesn't get empty, which on some CPUs would
* allow predictions from other (unwanted!) sources to be used.
*
* We define a CPP macro such that it can be used from both .S files and
* inline assembly. It's possible to do a .macro and then include that
* from C via asm(".include <asm/nospec-branch.h>") but let's not go there.
*/
#define RSB_CLEAR_LOOPS 32 /* To forcibly overwrite all entries */
#define RSB_FILL_LOOPS 16 /* To avoid underflow */
/*
* Google experimented with loop-unrolling and this turned out to be
* the optimal version — two calls, each with their own speculation
* trap should their return address end up getting used, in a loop.
*/
#define __FILL_RETURN_BUFFER(reg, nr, sp) \
mov $(nr/2), reg; \
771: \
call 772f; \
773: /* speculation trap */ \
pause; \
lfence; \
jmp 773b; \
772: \
call 774f; \
775: /* speculation trap */ \
pause; \
lfence; \
jmp 775b; \
774: \
dec reg; \
jnz 771b; \
add $(BITS_PER_LONG/8) * nr, sp;
#ifdef __ASSEMBLY__ #ifdef __ASSEMBLY__
/* /*
...@@ -121,17 +77,10 @@ ...@@ -121,17 +77,10 @@
#endif #endif
.endm .endm
/* /* This clobbers the BX register */
* A simpler FILL_RETURN_BUFFER macro. Don't make people use the CPP .macro FILL_RETURN_BUFFER nr:req ftr:req
* monstrosity above, manually.
*/
.macro FILL_RETURN_BUFFER reg:req nr:req ftr:req
#ifdef CONFIG_RETPOLINE #ifdef CONFIG_RETPOLINE
ANNOTATE_NOSPEC_ALTERNATIVE ALTERNATIVE "", "call __clear_rsb", \ftr
ALTERNATIVE "jmp .Lskip_rsb_\@", \
__stringify(__FILL_RETURN_BUFFER(\reg,\nr,%_ASM_SP)) \
\ftr
.Lskip_rsb_\@:
#endif #endif
.endm .endm
...@@ -206,17 +155,20 @@ extern char __indirect_thunk_end[]; ...@@ -206,17 +155,20 @@ extern char __indirect_thunk_end[];
static inline void vmexit_fill_RSB(void) static inline void vmexit_fill_RSB(void)
{ {
#ifdef CONFIG_RETPOLINE #ifdef CONFIG_RETPOLINE
unsigned long loops; alternative_input("",
"call __fill_rsb",
asm volatile (ANNOTATE_NOSPEC_ALTERNATIVE X86_FEATURE_RETPOLINE,
ALTERNATIVE("jmp 910f", ASM_NO_INPUT_CLOBBER(_ASM_BX, "memory"));
__stringify(__FILL_RETURN_BUFFER(%0, RSB_CLEAR_LOOPS, %1)),
X86_FEATURE_RETPOLINE)
"910:"
: "=r" (loops), ASM_CALL_CONSTRAINT
: : "memory" );
#endif #endif
} }
static inline void indirect_branch_prediction_barrier(void)
{
alternative_input("",
"call __ibp_barrier",
X86_FEATURE_USE_IBPB,
ASM_NO_INPUT_CLOBBER("eax", "ecx", "edx", "memory"));
}
#endif /* __ASSEMBLY__ */ #endif /* __ASSEMBLY__ */
#endif /* __NOSPEC_BRANCH_H__ */ #endif /* _ASM_X86_NOSPEC_BRANCH_H_ */
...@@ -971,4 +971,7 @@ bool xen_set_default_idle(void); ...@@ -971,4 +971,7 @@ bool xen_set_default_idle(void);
void stop_this_cpu(void *dummy); void stop_this_cpu(void *dummy);
void df_debug(struct pt_regs *regs, long error_code); void df_debug(struct pt_regs *regs, long error_code);
void __ibp_barrier(void);
#endif /* _ASM_X86_PROCESSOR_H */ #endif /* _ASM_X86_PROCESSOR_H */
...@@ -106,6 +106,7 @@ ...@@ -106,6 +106,7 @@
#define REQUIRED_MASK15 0 #define REQUIRED_MASK15 0
#define REQUIRED_MASK16 (NEED_LA57) #define REQUIRED_MASK16 (NEED_LA57)
#define REQUIRED_MASK17 0 #define REQUIRED_MASK17 0
#define REQUIRED_MASK_CHECK BUILD_BUG_ON_ZERO(NCAPINTS != 18) #define REQUIRED_MASK18 0
#define REQUIRED_MASK_CHECK BUILD_BUG_ON_ZERO(NCAPINTS != 19)
#endif /* _ASM_X86_REQUIRED_FEATURES_H */ #endif /* _ASM_X86_REQUIRED_FEATURES_H */
...@@ -298,7 +298,7 @@ recompute_jump(struct alt_instr *a, u8 *orig_insn, u8 *repl_insn, u8 *insnbuf) ...@@ -298,7 +298,7 @@ recompute_jump(struct alt_instr *a, u8 *orig_insn, u8 *repl_insn, u8 *insnbuf)
tgt_rip = next_rip + o_dspl; tgt_rip = next_rip + o_dspl;
n_dspl = tgt_rip - orig_insn; n_dspl = tgt_rip - orig_insn;
DPRINTK("target RIP: %p, new_displ: 0x%x", tgt_rip, n_dspl); DPRINTK("target RIP: %px, new_displ: 0x%x", tgt_rip, n_dspl);
if (tgt_rip - orig_insn >= 0) { if (tgt_rip - orig_insn >= 0) {
if (n_dspl - 2 <= 127) if (n_dspl - 2 <= 127)
...@@ -355,7 +355,7 @@ static void __init_or_module noinline optimize_nops(struct alt_instr *a, u8 *ins ...@@ -355,7 +355,7 @@ static void __init_or_module noinline optimize_nops(struct alt_instr *a, u8 *ins
add_nops(instr + (a->instrlen - a->padlen), a->padlen); add_nops(instr + (a->instrlen - a->padlen), a->padlen);
local_irq_restore(flags); local_irq_restore(flags);
DUMP_BYTES(instr, a->instrlen, "%p: [%d:%d) optimized NOPs: ", DUMP_BYTES(instr, a->instrlen, "%px: [%d:%d) optimized NOPs: ",
instr, a->instrlen - a->padlen, a->padlen); instr, a->instrlen - a->padlen, a->padlen);
} }
...@@ -376,7 +376,7 @@ void __init_or_module noinline apply_alternatives(struct alt_instr *start, ...@@ -376,7 +376,7 @@ void __init_or_module noinline apply_alternatives(struct alt_instr *start,
u8 *instr, *replacement; u8 *instr, *replacement;
u8 insnbuf[MAX_PATCH_LEN]; u8 insnbuf[MAX_PATCH_LEN];
DPRINTK("alt table %p -> %p", start, end); DPRINTK("alt table %px, -> %px", start, end);
/* /*
* The scan order should be from start to end. A later scanned * The scan order should be from start to end. A later scanned
* alternative code can overwrite previously scanned alternative code. * alternative code can overwrite previously scanned alternative code.
...@@ -400,14 +400,14 @@ void __init_or_module noinline apply_alternatives(struct alt_instr *start, ...@@ -400,14 +400,14 @@ void __init_or_module noinline apply_alternatives(struct alt_instr *start,
continue; continue;
} }
DPRINTK("feat: %d*32+%d, old: (%p, len: %d), repl: (%p, len: %d), pad: %d", DPRINTK("feat: %d*32+%d, old: (%px len: %d), repl: (%px, len: %d), pad: %d",
a->cpuid >> 5, a->cpuid >> 5,
a->cpuid & 0x1f, a->cpuid & 0x1f,
instr, a->instrlen, instr, a->instrlen,
replacement, a->replacementlen, a->padlen); replacement, a->replacementlen, a->padlen);
DUMP_BYTES(instr, a->instrlen, "%p: old_insn: ", instr); DUMP_BYTES(instr, a->instrlen, "%px: old_insn: ", instr);
DUMP_BYTES(replacement, a->replacementlen, "%p: rpl_insn: ", replacement); DUMP_BYTES(replacement, a->replacementlen, "%px: rpl_insn: ", replacement);
memcpy(insnbuf, replacement, a->replacementlen); memcpy(insnbuf, replacement, a->replacementlen);
insnbuf_sz = a->replacementlen; insnbuf_sz = a->replacementlen;
...@@ -433,7 +433,7 @@ void __init_or_module noinline apply_alternatives(struct alt_instr *start, ...@@ -433,7 +433,7 @@ void __init_or_module noinline apply_alternatives(struct alt_instr *start,
a->instrlen - a->replacementlen); a->instrlen - a->replacementlen);
insnbuf_sz += a->instrlen - a->replacementlen; insnbuf_sz += a->instrlen - a->replacementlen;
} }
DUMP_BYTES(insnbuf, insnbuf_sz, "%p: final_insn: ", instr); DUMP_BYTES(insnbuf, insnbuf_sz, "%px: final_insn: ", instr);
text_poke_early(instr, insnbuf, insnbuf_sz); text_poke_early(instr, insnbuf, insnbuf_sz);
} }
......
...@@ -11,6 +11,7 @@ ...@@ -11,6 +11,7 @@
#include <linux/init.h> #include <linux/init.h>
#include <linux/utsname.h> #include <linux/utsname.h>
#include <linux/cpu.h> #include <linux/cpu.h>
#include <linux/module.h>
#include <asm/nospec-branch.h> #include <asm/nospec-branch.h>
#include <asm/cmdline.h> #include <asm/cmdline.h>
...@@ -90,10 +91,31 @@ static const char *spectre_v2_strings[] = { ...@@ -90,10 +91,31 @@ static const char *spectre_v2_strings[] = {
}; };
#undef pr_fmt #undef pr_fmt
#define pr_fmt(fmt) "Spectre V2 mitigation: " fmt #define pr_fmt(fmt) "Spectre V2 : " fmt
static enum spectre_v2_mitigation spectre_v2_enabled = SPECTRE_V2_NONE; static enum spectre_v2_mitigation spectre_v2_enabled = SPECTRE_V2_NONE;
#ifdef RETPOLINE
static bool spectre_v2_bad_module;
bool retpoline_module_ok(bool has_retpoline)
{
if (spectre_v2_enabled == SPECTRE_V2_NONE || has_retpoline)
return true;
pr_err("System may be vunerable to spectre v2\n");
spectre_v2_bad_module = true;
return false;
}
static inline const char *spectre_v2_module_string(void)
{
return spectre_v2_bad_module ? " - vulnerable module loaded" : "";
}
#else
static inline const char *spectre_v2_module_string(void) { return ""; }
#endif
static void __init spec2_print_if_insecure(const char *reason) static void __init spec2_print_if_insecure(const char *reason)
{ {
if (boot_cpu_has_bug(X86_BUG_SPECTRE_V2)) if (boot_cpu_has_bug(X86_BUG_SPECTRE_V2))
...@@ -249,6 +271,12 @@ static void __init spectre_v2_select_mitigation(void) ...@@ -249,6 +271,12 @@ static void __init spectre_v2_select_mitigation(void)
setup_force_cpu_cap(X86_FEATURE_RSB_CTXSW); setup_force_cpu_cap(X86_FEATURE_RSB_CTXSW);
pr_info("Filling RSB on context switch\n"); pr_info("Filling RSB on context switch\n");
} }
/* Initialize Indirect Branch Prediction Barrier if supported */
if (boot_cpu_has(X86_FEATURE_IBPB)) {
setup_force_cpu_cap(X86_FEATURE_USE_IBPB);
pr_info("Enabling Indirect Branch Prediction Barrier\n");
}
} }
#undef pr_fmt #undef pr_fmt
...@@ -278,6 +306,14 @@ ssize_t cpu_show_spectre_v2(struct device *dev, ...@@ -278,6 +306,14 @@ ssize_t cpu_show_spectre_v2(struct device *dev,
if (!boot_cpu_has_bug(X86_BUG_SPECTRE_V2)) if (!boot_cpu_has_bug(X86_BUG_SPECTRE_V2))
return sprintf(buf, "Not affected\n"); return sprintf(buf, "Not affected\n");
return sprintf(buf, "%s\n", spectre_v2_strings[spectre_v2_enabled]); return sprintf(buf, "%s%s%s\n", spectre_v2_strings[spectre_v2_enabled],
boot_cpu_has(X86_FEATURE_USE_IBPB) ? ", IBPB" : "",
spectre_v2_module_string());
} }
#endif #endif
void __ibp_barrier(void)
{
__wrmsr(MSR_IA32_PRED_CMD, PRED_CMD_IBPB, 0);
}
EXPORT_SYMBOL_GPL(__ibp_barrier);
...@@ -47,6 +47,8 @@ ...@@ -47,6 +47,8 @@
#include <asm/pat.h> #include <asm/pat.h>
#include <asm/microcode.h> #include <asm/microcode.h>
#include <asm/microcode_intel.h> #include <asm/microcode_intel.h>
#include <asm/intel-family.h>
#include <asm/cpu_device_id.h>
#ifdef CONFIG_X86_LOCAL_APIC #ifdef CONFIG_X86_LOCAL_APIC
#include <asm/uv/uv.h> #include <asm/uv/uv.h>
...@@ -769,6 +771,7 @@ void get_cpu_cap(struct cpuinfo_x86 *c) ...@@ -769,6 +771,7 @@ void get_cpu_cap(struct cpuinfo_x86 *c)
cpuid_count(0x00000007, 0, &eax, &ebx, &ecx, &edx); cpuid_count(0x00000007, 0, &eax, &ebx, &ecx, &edx);
c->x86_capability[CPUID_7_0_EBX] = ebx; c->x86_capability[CPUID_7_0_EBX] = ebx;
c->x86_capability[CPUID_7_ECX] = ecx; c->x86_capability[CPUID_7_ECX] = ecx;
c->x86_capability[CPUID_7_EDX] = edx;
} }
/* Extended state features: level 0x0000000d */ /* Extended state features: level 0x0000000d */
...@@ -876,6 +879,41 @@ static void identify_cpu_without_cpuid(struct cpuinfo_x86 *c) ...@@ -876,6 +879,41 @@ static void identify_cpu_without_cpuid(struct cpuinfo_x86 *c)
#endif #endif
} }
static const __initdata struct x86_cpu_id cpu_no_speculation[] = {
{ X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_CEDARVIEW, X86_FEATURE_ANY },
{ X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_CLOVERVIEW, X86_FEATURE_ANY },
{ X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_LINCROFT, X86_FEATURE_ANY },
{ X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_PENWELL, X86_FEATURE_ANY },
{ X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_PINEVIEW, X86_FEATURE_ANY },
{ X86_VENDOR_CENTAUR, 5 },
{ X86_VENDOR_INTEL, 5 },
{ X86_VENDOR_NSC, 5 },
{ X86_VENDOR_ANY, 4 },
{}
};
static const __initdata struct x86_cpu_id cpu_no_meltdown[] = {
{ X86_VENDOR_AMD },
{}
};
static bool __init cpu_vulnerable_to_meltdown(struct cpuinfo_x86 *c)
{
u64 ia32_cap = 0;
if (x86_match_cpu(cpu_no_meltdown))
return false;
if (cpu_has(c, X86_FEATURE_ARCH_CAPABILITIES))
rdmsrl(MSR_IA32_ARCH_CAPABILITIES, ia32_cap);
/* Rogue Data Cache Load? No! */
if (ia32_cap & ARCH_CAP_RDCL_NO)
return false;
return true;
}
/* /*
* Do minimum CPU detection early. * Do minimum CPU detection early.
* Fields really needed: vendor, cpuid_level, family, model, mask, * Fields really needed: vendor, cpuid_level, family, model, mask,
...@@ -923,11 +961,12 @@ static void __init early_identify_cpu(struct cpuinfo_x86 *c) ...@@ -923,11 +961,12 @@ static void __init early_identify_cpu(struct cpuinfo_x86 *c)
setup_force_cpu_cap(X86_FEATURE_ALWAYS); setup_force_cpu_cap(X86_FEATURE_ALWAYS);
if (c->x86_vendor != X86_VENDOR_AMD) if (!x86_match_cpu(cpu_no_speculation)) {
setup_force_cpu_bug(X86_BUG_CPU_MELTDOWN); if (cpu_vulnerable_to_meltdown(c))
setup_force_cpu_bug(X86_BUG_CPU_MELTDOWN);
setup_force_cpu_bug(X86_BUG_SPECTRE_V1); setup_force_cpu_bug(X86_BUG_SPECTRE_V1);
setup_force_cpu_bug(X86_BUG_SPECTRE_V2); setup_force_cpu_bug(X86_BUG_SPECTRE_V2);
}
fpu__init_system(c); fpu__init_system(c);
......
...@@ -102,6 +102,59 @@ static void probe_xeon_phi_r3mwait(struct cpuinfo_x86 *c) ...@@ -102,6 +102,59 @@ static void probe_xeon_phi_r3mwait(struct cpuinfo_x86 *c)
ELF_HWCAP2 |= HWCAP2_RING3MWAIT; ELF_HWCAP2 |= HWCAP2_RING3MWAIT;
} }
/*
* Early microcode releases for the Spectre v2 mitigation were broken.
* Information taken from;
* - https://newsroom.intel.com/wp-content/uploads/sites/11/2018/01/microcode-update-guidance.pdf
* - https://kb.vmware.com/s/article/52345
* - Microcode revisions observed in the wild
* - Release note from 20180108 microcode release
*/
struct sku_microcode {
u8 model;
u8 stepping;
u32 microcode;
};
static const struct sku_microcode spectre_bad_microcodes[] = {
{ INTEL_FAM6_KABYLAKE_DESKTOP, 0x0B, 0x84 },
{ INTEL_FAM6_KABYLAKE_DESKTOP, 0x0A, 0x84 },
{ INTEL_FAM6_KABYLAKE_DESKTOP, 0x09, 0x84 },
{ INTEL_FAM6_KABYLAKE_MOBILE, 0x0A, 0x84 },
{ INTEL_FAM6_KABYLAKE_MOBILE, 0x09, 0x84 },
{ INTEL_FAM6_SKYLAKE_X, 0x03, 0x0100013e },
{ INTEL_FAM6_SKYLAKE_X, 0x04, 0x0200003c },
{ INTEL_FAM6_SKYLAKE_MOBILE, 0x03, 0xc2 },
{ INTEL_FAM6_SKYLAKE_DESKTOP, 0x03, 0xc2 },
{ INTEL_FAM6_BROADWELL_CORE, 0x04, 0x28 },
{ INTEL_FAM6_BROADWELL_GT3E, 0x01, 0x1b },
{ INTEL_FAM6_BROADWELL_XEON_D, 0x02, 0x14 },
{ INTEL_FAM6_BROADWELL_XEON_D, 0x03, 0x07000011 },
{ INTEL_FAM6_BROADWELL_X, 0x01, 0x0b000025 },
{ INTEL_FAM6_HASWELL_ULT, 0x01, 0x21 },
{ INTEL_FAM6_HASWELL_GT3E, 0x01, 0x18 },
{ INTEL_FAM6_HASWELL_CORE, 0x03, 0x23 },
{ INTEL_FAM6_HASWELL_X, 0x02, 0x3b },
{ INTEL_FAM6_HASWELL_X, 0x04, 0x10 },
{ INTEL_FAM6_IVYBRIDGE_X, 0x04, 0x42a },
/* Updated in the 20180108 release; blacklist until we know otherwise */
{ INTEL_FAM6_ATOM_GEMINI_LAKE, 0x01, 0x22 },
/* Observed in the wild */
{ INTEL_FAM6_SANDYBRIDGE_X, 0x06, 0x61b },
{ INTEL_FAM6_SANDYBRIDGE_X, 0x07, 0x712 },
};
static bool bad_spectre_microcode(struct cpuinfo_x86 *c)
{
int i;
for (i = 0; i < ARRAY_SIZE(spectre_bad_microcodes); i++) {
if (c->x86_model == spectre_bad_microcodes[i].model &&
c->x86_mask == spectre_bad_microcodes[i].stepping)
return (c->microcode <= spectre_bad_microcodes[i].microcode);
}
return false;
}
static void early_init_intel(struct cpuinfo_x86 *c) static void early_init_intel(struct cpuinfo_x86 *c)
{ {
u64 misc_enable; u64 misc_enable;
...@@ -122,6 +175,30 @@ static void early_init_intel(struct cpuinfo_x86 *c) ...@@ -122,6 +175,30 @@ static void early_init_intel(struct cpuinfo_x86 *c)
if (c->x86 >= 6 && !cpu_has(c, X86_FEATURE_IA64)) if (c->x86 >= 6 && !cpu_has(c, X86_FEATURE_IA64))
c->microcode = intel_get_microcode_revision(); c->microcode = intel_get_microcode_revision();
/*
* The Intel SPEC_CTRL CPUID bit implies IBRS and IBPB support,
* and they also have a different bit for STIBP support. Also,
* a hypervisor might have set the individual AMD bits even on
* Intel CPUs, for finer-grained selection of what's available.
*/
if (cpu_has(c, X86_FEATURE_SPEC_CTRL)) {
set_cpu_cap(c, X86_FEATURE_IBRS);
set_cpu_cap(c, X86_FEATURE_IBPB);
}
if (cpu_has(c, X86_FEATURE_INTEL_STIBP))
set_cpu_cap(c, X86_FEATURE_STIBP);
/* Now if any of them are set, check the blacklist and clear the lot */
if ((cpu_has(c, X86_FEATURE_IBRS) || cpu_has(c, X86_FEATURE_IBPB) ||
cpu_has(c, X86_FEATURE_STIBP)) && bad_spectre_microcode(c)) {
pr_warn("Intel Spectre v2 broken microcode detected; disabling Speculation Control\n");
clear_cpu_cap(c, X86_FEATURE_IBRS);
clear_cpu_cap(c, X86_FEATURE_IBPB);
clear_cpu_cap(c, X86_FEATURE_STIBP);
clear_cpu_cap(c, X86_FEATURE_SPEC_CTRL);
clear_cpu_cap(c, X86_FEATURE_INTEL_STIBP);
}
/* /*
* Atom erratum AAE44/AAF40/AAG38/AAH41: * Atom erratum AAE44/AAF40/AAG38/AAH41:
* *
......
...@@ -21,8 +21,6 @@ struct cpuid_bit { ...@@ -21,8 +21,6 @@ struct cpuid_bit {
static const struct cpuid_bit cpuid_bits[] = { static const struct cpuid_bit cpuid_bits[] = {
{ X86_FEATURE_APERFMPERF, CPUID_ECX, 0, 0x00000006, 0 }, { X86_FEATURE_APERFMPERF, CPUID_ECX, 0, 0x00000006, 0 },
{ X86_FEATURE_EPB, CPUID_ECX, 3, 0x00000006, 0 }, { X86_FEATURE_EPB, CPUID_ECX, 3, 0x00000006, 0 },
{ X86_FEATURE_AVX512_4VNNIW, CPUID_EDX, 2, 0x00000007, 0 },
{ X86_FEATURE_AVX512_4FMAPS, CPUID_EDX, 3, 0x00000007, 0 },
{ X86_FEATURE_CAT_L3, CPUID_EBX, 1, 0x00000010, 0 }, { X86_FEATURE_CAT_L3, CPUID_EBX, 1, 0x00000010, 0 },
{ X86_FEATURE_CAT_L2, CPUID_EBX, 2, 0x00000010, 0 }, { X86_FEATURE_CAT_L2, CPUID_EBX, 2, 0x00000010, 0 },
{ X86_FEATURE_CDP_L3, CPUID_ECX, 2, 0x00000010, 1 }, { X86_FEATURE_CDP_L3, CPUID_ECX, 2, 0x00000010, 1 },
......
...@@ -25,6 +25,7 @@ ...@@ -25,6 +25,7 @@
#include <asm/kvm_emulate.h> #include <asm/kvm_emulate.h>
#include <linux/stringify.h> #include <linux/stringify.h>
#include <asm/debugreg.h> #include <asm/debugreg.h>
#include <asm/nospec-branch.h>
#include "x86.h" #include "x86.h"
#include "tss.h" #include "tss.h"
...@@ -1021,8 +1022,8 @@ static __always_inline u8 test_cc(unsigned int condition, unsigned long flags) ...@@ -1021,8 +1022,8 @@ static __always_inline u8 test_cc(unsigned int condition, unsigned long flags)
void (*fop)(void) = (void *)em_setcc + 4 * (condition & 0xf); void (*fop)(void) = (void *)em_setcc + 4 * (condition & 0xf);
flags = (flags & EFLAGS_MASK) | X86_EFLAGS_IF; flags = (flags & EFLAGS_MASK) | X86_EFLAGS_IF;
asm("push %[flags]; popf; call *%[fastop]" asm("push %[flags]; popf; " CALL_NOSPEC
: "=a"(rc) : [fastop]"r"(fop), [flags]"r"(flags)); : "=a"(rc) : [thunk_target]"r"(fop), [flags]"r"(flags));
return rc; return rc;
} }
...@@ -5335,9 +5336,9 @@ static int fastop(struct x86_emulate_ctxt *ctxt, void (*fop)(struct fastop *)) ...@@ -5335,9 +5336,9 @@ static int fastop(struct x86_emulate_ctxt *ctxt, void (*fop)(struct fastop *))
if (!(ctxt->d & ByteOp)) if (!(ctxt->d & ByteOp))
fop += __ffs(ctxt->dst.bytes) * FASTOP_SIZE; fop += __ffs(ctxt->dst.bytes) * FASTOP_SIZE;
asm("push %[flags]; popf; call *%[fastop]; pushf; pop %[flags]\n" asm("push %[flags]; popf; " CALL_NOSPEC " ; pushf; pop %[flags]\n"
: "+a"(ctxt->dst.val), "+d"(ctxt->src.val), [flags]"+D"(flags), : "+a"(ctxt->dst.val), "+d"(ctxt->src.val), [flags]"+D"(flags),
[fastop]"+S"(fop), ASM_CALL_CONSTRAINT [thunk_target]"+S"(fop), ASM_CALL_CONSTRAINT
: "c"(ctxt->src2.val)); : "c"(ctxt->src2.val));
ctxt->eflags = (ctxt->eflags & ~EFLAGS_MASK) | (flags & EFLAGS_MASK); ctxt->eflags = (ctxt->eflags & ~EFLAGS_MASK) | (flags & EFLAGS_MASK);
......
...@@ -9129,14 +9129,14 @@ static void vmx_handle_external_intr(struct kvm_vcpu *vcpu) ...@@ -9129,14 +9129,14 @@ static void vmx_handle_external_intr(struct kvm_vcpu *vcpu)
#endif #endif
"pushf\n\t" "pushf\n\t"
__ASM_SIZE(push) " $%c[cs]\n\t" __ASM_SIZE(push) " $%c[cs]\n\t"
"call *%[entry]\n\t" CALL_NOSPEC
: :
#ifdef CONFIG_X86_64 #ifdef CONFIG_X86_64
[sp]"=&r"(tmp), [sp]"=&r"(tmp),
#endif #endif
ASM_CALL_CONSTRAINT ASM_CALL_CONSTRAINT
: :
[entry]"r"(entry), THUNK_TARGET(entry),
[ss]"i"(__KERNEL_DS), [ss]"i"(__KERNEL_DS),
[cs]"i"(__KERNEL_CS) [cs]"i"(__KERNEL_CS)
); );
......
...@@ -27,6 +27,7 @@ lib-$(CONFIG_RWSEM_XCHGADD_ALGORITHM) += rwsem.o ...@@ -27,6 +27,7 @@ lib-$(CONFIG_RWSEM_XCHGADD_ALGORITHM) += rwsem.o
lib-$(CONFIG_INSTRUCTION_DECODER) += insn.o inat.o insn-eval.o lib-$(CONFIG_INSTRUCTION_DECODER) += insn.o inat.o insn-eval.o
lib-$(CONFIG_RANDOMIZE_BASE) += kaslr.o lib-$(CONFIG_RANDOMIZE_BASE) += kaslr.o
lib-$(CONFIG_RETPOLINE) += retpoline.o lib-$(CONFIG_RETPOLINE) += retpoline.o
OBJECT_FILES_NON_STANDARD_retpoline.o :=y
obj-y += msr.o msr-reg.o msr-reg-export.o hweight.o obj-y += msr.o msr-reg.o msr-reg-export.o hweight.o
......
...@@ -7,6 +7,7 @@ ...@@ -7,6 +7,7 @@
#include <asm/alternative-asm.h> #include <asm/alternative-asm.h>
#include <asm/export.h> #include <asm/export.h>
#include <asm/nospec-branch.h> #include <asm/nospec-branch.h>
#include <asm/bitsperlong.h>
.macro THUNK reg .macro THUNK reg
.section .text.__x86.indirect_thunk .section .text.__x86.indirect_thunk
...@@ -46,3 +47,58 @@ GENERATE_THUNK(r13) ...@@ -46,3 +47,58 @@ GENERATE_THUNK(r13)
GENERATE_THUNK(r14) GENERATE_THUNK(r14)
GENERATE_THUNK(r15) GENERATE_THUNK(r15)
#endif #endif
/*
* Fill the CPU return stack buffer.
*
* Each entry in the RSB, if used for a speculative 'ret', contains an
* infinite 'pause; lfence; jmp' loop to capture speculative execution.
*
* This is required in various cases for retpoline and IBRS-based
* mitigations for the Spectre variant 2 vulnerability. Sometimes to
* eliminate potentially bogus entries from the RSB, and sometimes
* purely to ensure that it doesn't get empty, which on some CPUs would
* allow predictions from other (unwanted!) sources to be used.
*
* Google experimented with loop-unrolling and this turned out to be
* the optimal version - two calls, each with their own speculation
* trap should their return address end up getting used, in a loop.
*/
.macro STUFF_RSB nr:req sp:req
mov $(\nr / 2), %_ASM_BX
.align 16
771:
call 772f
773: /* speculation trap */
pause
lfence
jmp 773b
.align 16
772:
call 774f
775: /* speculation trap */
pause
lfence
jmp 775b
.align 16
774:
dec %_ASM_BX
jnz 771b
add $((BITS_PER_LONG/8) * \nr), \sp
.endm
#define RSB_FILL_LOOPS 16 /* To avoid underflow */
ENTRY(__fill_rsb)
STUFF_RSB RSB_FILL_LOOPS, %_ASM_SP
ret
END(__fill_rsb)
EXPORT_SYMBOL_GPL(__fill_rsb)
#define RSB_CLEAR_LOOPS 32 /* To forcibly overwrite all entries */
ENTRY(__clear_rsb)
STUFF_RSB RSB_CLEAR_LOOPS, %_ASM_SP
ret
END(__clear_rsb)
EXPORT_SYMBOL_GPL(__clear_rsb)
...@@ -801,6 +801,15 @@ static inline void module_bug_finalize(const Elf_Ehdr *hdr, ...@@ -801,6 +801,15 @@ static inline void module_bug_finalize(const Elf_Ehdr *hdr,
static inline void module_bug_cleanup(struct module *mod) {} static inline void module_bug_cleanup(struct module *mod) {}
#endif /* CONFIG_GENERIC_BUG */ #endif /* CONFIG_GENERIC_BUG */
#ifdef RETPOLINE
extern bool retpoline_module_ok(bool has_retpoline);
#else
static inline bool retpoline_module_ok(bool has_retpoline)
{
return true;
}
#endif
#ifdef CONFIG_MODULE_SIG #ifdef CONFIG_MODULE_SIG
static inline bool module_sig_ok(struct module *module) static inline bool module_sig_ok(struct module *module)
{ {
......
...@@ -2863,6 +2863,15 @@ static int check_modinfo_livepatch(struct module *mod, struct load_info *info) ...@@ -2863,6 +2863,15 @@ static int check_modinfo_livepatch(struct module *mod, struct load_info *info)
} }
#endif /* CONFIG_LIVEPATCH */ #endif /* CONFIG_LIVEPATCH */
static void check_modinfo_retpoline(struct module *mod, struct load_info *info)
{
if (retpoline_module_ok(get_modinfo(info, "retpoline")))
return;
pr_warn("%s: loading module not compiled with retpoline compiler.\n",
mod->name);
}
/* Sets info->hdr and info->len. */ /* Sets info->hdr and info->len. */
static int copy_module_from_user(const void __user *umod, unsigned long len, static int copy_module_from_user(const void __user *umod, unsigned long len,
struct load_info *info) struct load_info *info)
...@@ -3029,6 +3038,8 @@ static int check_modinfo(struct module *mod, struct load_info *info, int flags) ...@@ -3029,6 +3038,8 @@ static int check_modinfo(struct module *mod, struct load_info *info, int flags)
add_taint_module(mod, TAINT_OOT_MODULE, LOCKDEP_STILL_OK); add_taint_module(mod, TAINT_OOT_MODULE, LOCKDEP_STILL_OK);
} }
check_modinfo_retpoline(mod, info);
if (get_modinfo(info, "staging")) { if (get_modinfo(info, "staging")) {
add_taint_module(mod, TAINT_CRAP, LOCKDEP_STILL_OK); add_taint_module(mod, TAINT_CRAP, LOCKDEP_STILL_OK);
pr_warn("%s: module is from the staging directory, the quality " pr_warn("%s: module is from the staging directory, the quality "
......
...@@ -2165,6 +2165,14 @@ static void add_intree_flag(struct buffer *b, int is_intree) ...@@ -2165,6 +2165,14 @@ static void add_intree_flag(struct buffer *b, int is_intree)
buf_printf(b, "\nMODULE_INFO(intree, \"Y\");\n"); buf_printf(b, "\nMODULE_INFO(intree, \"Y\");\n");
} }
/* Cannot check for assembler */
static void add_retpoline(struct buffer *b)
{
buf_printf(b, "\n#ifdef RETPOLINE\n");
buf_printf(b, "MODULE_INFO(retpoline, \"Y\");\n");
buf_printf(b, "#endif\n");
}
static void add_staging_flag(struct buffer *b, const char *name) static void add_staging_flag(struct buffer *b, const char *name)
{ {
static const char *staging_dir = "drivers/staging"; static const char *staging_dir = "drivers/staging";
...@@ -2506,6 +2514,7 @@ int main(int argc, char **argv) ...@@ -2506,6 +2514,7 @@ int main(int argc, char **argv)
err |= check_modname_len(mod); err |= check_modname_len(mod);
add_header(&buf, mod); add_header(&buf, mod);
add_intree_flag(&buf, !external_module); add_intree_flag(&buf, !external_module);
add_retpoline(&buf);
add_staging_flag(&buf, mod->name); add_staging_flag(&buf, mod->name);
err |= add_versions(&buf, mod); err |= add_versions(&buf, mod);
add_depends(&buf, mod, modules); add_depends(&buf, mod, modules);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment