Commit 6e5f0927 authored by Will Deacon's avatar Will Deacon

arm64: Remove Spectre-related CONFIG_* options

The spectre mitigations are too configurable for their own good, leading
to confusing logic trying to figure out when we should mitigate and when
we shouldn't. Although the plethora of command-line options need to stick
around for backwards compatibility, the default-on CONFIG options that
depend on EXPERT can be dropped, as the mitigations only do anything if
the system is vulnerable, a mitigation is available and the command-line
hasn't disabled it.

Remove CONFIG_HARDEN_BRANCH_PREDICTOR and CONFIG_ARM64_SSBD in favour of
enabling this code unconditionally.
Signed-off-by: default avatarWill Deacon <will@kernel.org>
parent 39533e12
...@@ -1165,32 +1165,6 @@ config UNMAP_KERNEL_AT_EL0 ...@@ -1165,32 +1165,6 @@ config UNMAP_KERNEL_AT_EL0
If unsure, say Y. If unsure, say Y.
config HARDEN_BRANCH_PREDICTOR
bool "Harden the branch predictor against aliasing attacks" if EXPERT
default y
help
Speculation attacks against some high-performance processors rely on
being able to manipulate the branch predictor for a victim context by
executing aliasing branches in the attacker context. Such attacks
can be partially mitigated against by clearing internal branch
predictor state and limiting the prediction logic in some situations.
This config option will take CPU-specific actions to harden the
branch predictor against aliasing attacks and may rely on specific
instruction sequences or control bits being set by the system
firmware.
If unsure, say Y.
config ARM64_SSBD
bool "Speculative Store Bypass Disable" if EXPERT
default y
help
This enables mitigation of the bypassing of previous stores
by speculative loads.
If unsure, say Y.
config RODATA_FULL_DEFAULT_ENABLED config RODATA_FULL_DEFAULT_ENABLED
bool "Apply r/o permissions of VM areas also to their linear aliases" bool "Apply r/o permissions of VM areas also to their linear aliases"
default y default y
......
...@@ -712,12 +712,8 @@ int get_spectre_v2_workaround_state(void); ...@@ -712,12 +712,8 @@ int get_spectre_v2_workaround_state(void);
static inline int arm64_get_ssbd_state(void) static inline int arm64_get_ssbd_state(void)
{ {
#ifdef CONFIG_ARM64_SSBD
extern int ssbd_state; extern int ssbd_state;
return ssbd_state; return ssbd_state;
#else
return ARM64_SSBD_UNKNOWN;
#endif
} }
void arm64_set_ssbd_mitigation(bool state); void arm64_set_ssbd_mitigation(bool state);
......
...@@ -527,7 +527,6 @@ static inline int kvm_map_vectors(void) ...@@ -527,7 +527,6 @@ static inline int kvm_map_vectors(void)
} }
#endif #endif
#ifdef CONFIG_ARM64_SSBD
DECLARE_PER_CPU_READ_MOSTLY(u64, arm64_ssbd_callback_required); DECLARE_PER_CPU_READ_MOSTLY(u64, arm64_ssbd_callback_required);
static inline int hyp_map_aux_data(void) static inline int hyp_map_aux_data(void)
...@@ -544,12 +543,6 @@ static inline int hyp_map_aux_data(void) ...@@ -544,12 +543,6 @@ static inline int hyp_map_aux_data(void)
} }
return 0; return 0;
} }
#else
static inline int hyp_map_aux_data(void)
{
return 0;
}
#endif
#define kvm_phys_to_vttbr(addr) phys_to_ttbr(addr) #define kvm_phys_to_vttbr(addr) phys_to_ttbr(addr)
......
...@@ -45,7 +45,6 @@ struct bp_hardening_data { ...@@ -45,7 +45,6 @@ struct bp_hardening_data {
bp_hardening_cb_t fn; bp_hardening_cb_t fn;
}; };
#ifdef CONFIG_HARDEN_BRANCH_PREDICTOR
DECLARE_PER_CPU_READ_MOSTLY(struct bp_hardening_data, bp_hardening_data); DECLARE_PER_CPU_READ_MOSTLY(struct bp_hardening_data, bp_hardening_data);
static inline struct bp_hardening_data *arm64_get_bp_hardening_data(void) static inline struct bp_hardening_data *arm64_get_bp_hardening_data(void)
...@@ -64,14 +63,6 @@ static inline void arm64_apply_bp_hardening(void) ...@@ -64,14 +63,6 @@ static inline void arm64_apply_bp_hardening(void)
if (d->fn) if (d->fn)
d->fn(); d->fn();
} }
#else
static inline struct bp_hardening_data *arm64_get_bp_hardening_data(void)
{
return NULL;
}
static inline void arm64_apply_bp_hardening(void) { }
#endif /* CONFIG_HARDEN_BRANCH_PREDICTOR */
extern void arm64_memblock_init(void); extern void arm64_memblock_init(void);
extern void paging_init(void); extern void paging_init(void);
......
...@@ -19,7 +19,7 @@ obj-y := debug-monitors.o entry.o irq.o fpsimd.o \ ...@@ -19,7 +19,7 @@ obj-y := debug-monitors.o entry.o irq.o fpsimd.o \
return_address.o cpuinfo.o cpu_errata.o \ return_address.o cpuinfo.o cpu_errata.o \
cpufeature.o alternative.o cacheinfo.o \ cpufeature.o alternative.o cacheinfo.o \
smp.o smp_spin_table.o topology.o smccc-call.o \ smp.o smp_spin_table.o topology.o smccc-call.o \
syscall.o ssbd.o syscall.o
targets += efi-entry.o targets += efi-entry.o
...@@ -59,7 +59,6 @@ arm64-reloc-test-y := reloc_test_core.o reloc_test_syms.o ...@@ -59,7 +59,6 @@ arm64-reloc-test-y := reloc_test_core.o reloc_test_syms.o
obj-$(CONFIG_CRASH_DUMP) += crash_dump.o obj-$(CONFIG_CRASH_DUMP) += crash_dump.o
obj-$(CONFIG_CRASH_CORE) += crash_core.o obj-$(CONFIG_CRASH_CORE) += crash_core.o
obj-$(CONFIG_ARM_SDE_INTERFACE) += sdei.o obj-$(CONFIG_ARM_SDE_INTERFACE) += sdei.o
obj-$(CONFIG_ARM64_SSBD) += ssbd.o
obj-$(CONFIG_ARM64_PTR_AUTH) += pointer_auth.o obj-$(CONFIG_ARM64_PTR_AUTH) += pointer_auth.o
obj-$(CONFIG_SHADOW_CALL_STACK) += scs.o obj-$(CONFIG_SHADOW_CALL_STACK) += scs.o
......
...@@ -254,9 +254,7 @@ static int detect_harden_bp_fw(void) ...@@ -254,9 +254,7 @@ static int detect_harden_bp_fw(void)
((midr & MIDR_CPU_MODEL_MASK) == MIDR_QCOM_FALKOR_V1)) ((midr & MIDR_CPU_MODEL_MASK) == MIDR_QCOM_FALKOR_V1))
cb = qcom_link_stack_sanitization; cb = qcom_link_stack_sanitization;
if (IS_ENABLED(CONFIG_HARDEN_BRANCH_PREDICTOR)) install_bp_hardening_cb(cb, smccc_start, smccc_end);
install_bp_hardening_cb(cb, smccc_start, smccc_end);
return 1; return 1;
} }
...@@ -335,11 +333,6 @@ void arm64_set_ssbd_mitigation(bool state) ...@@ -335,11 +333,6 @@ void arm64_set_ssbd_mitigation(bool state)
{ {
int conduit; int conduit;
if (!IS_ENABLED(CONFIG_ARM64_SSBD)) {
pr_info_once("SSBD disabled by kernel configuration\n");
return;
}
if (this_cpu_has_cap(ARM64_SSBS)) { if (this_cpu_has_cap(ARM64_SSBS)) {
if (state) if (state)
asm volatile(SET_PSTATE_SSBS(0)); asm volatile(SET_PSTATE_SSBS(0));
...@@ -584,12 +577,6 @@ check_branch_predictor(const struct arm64_cpu_capabilities *entry, int scope) ...@@ -584,12 +577,6 @@ check_branch_predictor(const struct arm64_cpu_capabilities *entry, int scope)
__spectrev2_safe = false; __spectrev2_safe = false;
if (!IS_ENABLED(CONFIG_HARDEN_BRANCH_PREDICTOR)) {
pr_warn_once("spectrev2 mitigation disabled by kernel configuration\n");
__hardenbp_enab = false;
return false;
}
/* forced off */ /* forced off */
if (__nospectre_v2 || cpu_mitigations_off()) { if (__nospectre_v2 || cpu_mitigations_off()) {
pr_info_once("spectrev2 mitigation disabled by command line option\n"); pr_info_once("spectrev2 mitigation disabled by command line option\n");
...@@ -1004,9 +991,7 @@ ssize_t cpu_show_spec_store_bypass(struct device *dev, ...@@ -1004,9 +991,7 @@ ssize_t cpu_show_spec_store_bypass(struct device *dev,
switch (ssbd_state) { switch (ssbd_state) {
case ARM64_SSBD_KERNEL: case ARM64_SSBD_KERNEL:
case ARM64_SSBD_FORCE_ENABLE: case ARM64_SSBD_FORCE_ENABLE:
if (IS_ENABLED(CONFIG_ARM64_SSBD)) return sprintf(buf, "Mitigation: Speculative Store Bypass disabled via prctl\n");
return sprintf(buf,
"Mitigation: Speculative Store Bypass disabled via prctl\n");
} }
return sprintf(buf, "Vulnerable\n"); return sprintf(buf, "Vulnerable\n");
......
...@@ -1583,7 +1583,6 @@ static void cpu_has_fwb(const struct arm64_cpu_capabilities *__unused) ...@@ -1583,7 +1583,6 @@ static void cpu_has_fwb(const struct arm64_cpu_capabilities *__unused)
WARN_ON(val & (7 << 27 | 7 << 21)); WARN_ON(val & (7 << 27 | 7 << 21));
} }
#ifdef CONFIG_ARM64_SSBD
static int ssbs_emulation_handler(struct pt_regs *regs, u32 instr) static int ssbs_emulation_handler(struct pt_regs *regs, u32 instr)
{ {
if (user_mode(regs)) if (user_mode(regs))
...@@ -1623,7 +1622,6 @@ static void cpu_enable_ssbs(const struct arm64_cpu_capabilities *__unused) ...@@ -1623,7 +1622,6 @@ static void cpu_enable_ssbs(const struct arm64_cpu_capabilities *__unused)
arm64_set_ssbd_mitigation(true); arm64_set_ssbd_mitigation(true);
} }
} }
#endif /* CONFIG_ARM64_SSBD */
#ifdef CONFIG_ARM64_PAN #ifdef CONFIG_ARM64_PAN
static void cpu_enable_pan(const struct arm64_cpu_capabilities *__unused) static void cpu_enable_pan(const struct arm64_cpu_capabilities *__unused)
...@@ -1976,7 +1974,6 @@ static const struct arm64_cpu_capabilities arm64_features[] = { ...@@ -1976,7 +1974,6 @@ static const struct arm64_cpu_capabilities arm64_features[] = {
.field_pos = ID_AA64ISAR0_CRC32_SHIFT, .field_pos = ID_AA64ISAR0_CRC32_SHIFT,
.min_field_value = 1, .min_field_value = 1,
}, },
#ifdef CONFIG_ARM64_SSBD
{ {
.desc = "Speculative Store Bypassing Safe (SSBS)", .desc = "Speculative Store Bypassing Safe (SSBS)",
.capability = ARM64_SSBS, .capability = ARM64_SSBS,
...@@ -1988,7 +1985,6 @@ static const struct arm64_cpu_capabilities arm64_features[] = { ...@@ -1988,7 +1985,6 @@ static const struct arm64_cpu_capabilities arm64_features[] = {
.min_field_value = ID_AA64PFR1_SSBS_PSTATE_ONLY, .min_field_value = ID_AA64PFR1_SSBS_PSTATE_ONLY,
.cpu_enable = cpu_enable_ssbs, .cpu_enable = cpu_enable_ssbs,
}, },
#endif
#ifdef CONFIG_ARM64_CNP #ifdef CONFIG_ARM64_CNP
{ {
.desc = "Common not Private translations", .desc = "Common not Private translations",
......
...@@ -132,7 +132,6 @@ alternative_else_nop_endif ...@@ -132,7 +132,6 @@ alternative_else_nop_endif
* them if required. * them if required.
*/ */
.macro apply_ssbd, state, tmp1, tmp2 .macro apply_ssbd, state, tmp1, tmp2
#ifdef CONFIG_ARM64_SSBD
alternative_cb arm64_enable_wa2_handling alternative_cb arm64_enable_wa2_handling
b .L__asm_ssbd_skip\@ b .L__asm_ssbd_skip\@
alternative_cb_end alternative_cb_end
...@@ -146,7 +145,6 @@ alternative_cb arm64_update_smccc_conduit ...@@ -146,7 +145,6 @@ alternative_cb arm64_update_smccc_conduit
nop // Patched to SMC/HVC #0 nop // Patched to SMC/HVC #0
alternative_cb_end alternative_cb_end
.L__asm_ssbd_skip\@: .L__asm_ssbd_skip\@:
#endif
.endm .endm
.macro kernel_entry, el, regsize = 64 .macro kernel_entry, el, regsize = 64
...@@ -697,11 +695,9 @@ el0_irq_naked: ...@@ -697,11 +695,9 @@ el0_irq_naked:
bl trace_hardirqs_off bl trace_hardirqs_off
#endif #endif
#ifdef CONFIG_HARDEN_BRANCH_PREDICTOR
tbz x22, #55, 1f tbz x22, #55, 1f
bl do_el0_irq_bp_hardening bl do_el0_irq_bp_hardening
1: 1:
#endif
irq_handler irq_handler
#ifdef CONFIG_TRACE_IRQFLAGS #ifdef CONFIG_TRACE_IRQFLAGS
......
...@@ -58,7 +58,7 @@ config KVM_ARM_PMU ...@@ -58,7 +58,7 @@ config KVM_ARM_PMU
virtual machines. virtual machines.
config KVM_INDIRECT_VECTORS config KVM_INDIRECT_VECTORS
def_bool HARDEN_BRANCH_PREDICTOR || RANDOMIZE_BASE def_bool RANDOMIZE_BASE
endif # KVM endif # KVM
......
...@@ -116,7 +116,6 @@ el1_hvc_guest: ...@@ -116,7 +116,6 @@ el1_hvc_guest:
ARM_SMCCC_ARCH_WORKAROUND_2) ARM_SMCCC_ARCH_WORKAROUND_2)
cbnz w1, el1_trap cbnz w1, el1_trap
#ifdef CONFIG_ARM64_SSBD
alternative_cb arm64_enable_wa2_handling alternative_cb arm64_enable_wa2_handling
b wa2_end b wa2_end
alternative_cb_end alternative_cb_end
...@@ -143,7 +142,6 @@ alternative_cb_end ...@@ -143,7 +142,6 @@ alternative_cb_end
wa2_end: wa2_end:
mov x2, xzr mov x2, xzr
mov x1, xzr mov x1, xzr
#endif
wa_epilogue: wa_epilogue:
mov x0, xzr mov x0, xzr
......
...@@ -489,7 +489,6 @@ static inline bool __needs_ssbd_off(struct kvm_vcpu *vcpu) ...@@ -489,7 +489,6 @@ static inline bool __needs_ssbd_off(struct kvm_vcpu *vcpu)
static inline void __set_guest_arch_workaround_state(struct kvm_vcpu *vcpu) static inline void __set_guest_arch_workaround_state(struct kvm_vcpu *vcpu)
{ {
#ifdef CONFIG_ARM64_SSBD
/* /*
* The host runs with the workaround always present. If the * The host runs with the workaround always present. If the
* guest wants it disabled, so be it... * guest wants it disabled, so be it...
...@@ -497,19 +496,16 @@ static inline void __set_guest_arch_workaround_state(struct kvm_vcpu *vcpu) ...@@ -497,19 +496,16 @@ static inline void __set_guest_arch_workaround_state(struct kvm_vcpu *vcpu)
if (__needs_ssbd_off(vcpu) && if (__needs_ssbd_off(vcpu) &&
__hyp_this_cpu_read(arm64_ssbd_callback_required)) __hyp_this_cpu_read(arm64_ssbd_callback_required))
arm_smccc_1_1_smc(ARM_SMCCC_ARCH_WORKAROUND_2, 0, NULL); arm_smccc_1_1_smc(ARM_SMCCC_ARCH_WORKAROUND_2, 0, NULL);
#endif
} }
static inline void __set_host_arch_workaround_state(struct kvm_vcpu *vcpu) static inline void __set_host_arch_workaround_state(struct kvm_vcpu *vcpu)
{ {
#ifdef CONFIG_ARM64_SSBD
/* /*
* If the guest has disabled the workaround, bring it back on. * If the guest has disabled the workaround, bring it back on.
*/ */
if (__needs_ssbd_off(vcpu) && if (__needs_ssbd_off(vcpu) &&
__hyp_this_cpu_read(arm64_ssbd_callback_required)) __hyp_this_cpu_read(arm64_ssbd_callback_required))
arm_smccc_1_1_smc(ARM_SMCCC_ARCH_WORKAROUND_2, 1, NULL); arm_smccc_1_1_smc(ARM_SMCCC_ARCH_WORKAROUND_2, 1, NULL);
#endif
} }
static inline void __kvm_unexpected_el2_exception(void) static inline void __kvm_unexpected_el2_exception(void)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment