Commit c2876207 authored by Will Deacon's avatar Will Deacon

arm64: Rewrite Spectre-v4 mitigation code

Rewrite the Spectre-v4 mitigation handling code to follow the same
approach as that taken by Spectre-v2.

For now, report to KVM that the system is vulnerable (by forcing
'ssbd_state' to ARM64_SSBD_UNKNOWN), as this will be cleared up in
subsequent steps.
Signed-off-by: default avatarWill Deacon <will@kernel.org>
parent 9e78b659
...@@ -198,25 +198,12 @@ static inline void start_thread_common(struct pt_regs *regs, unsigned long pc) ...@@ -198,25 +198,12 @@ static inline void start_thread_common(struct pt_regs *regs, unsigned long pc)
regs->pmr_save = GIC_PRIO_IRQON; regs->pmr_save = GIC_PRIO_IRQON;
} }
static inline void set_ssbs_bit(struct pt_regs *regs)
{
regs->pstate |= PSR_SSBS_BIT;
}
static inline void set_compat_ssbs_bit(struct pt_regs *regs)
{
regs->pstate |= PSR_AA32_SSBS_BIT;
}
static inline void start_thread(struct pt_regs *regs, unsigned long pc, static inline void start_thread(struct pt_regs *regs, unsigned long pc,
unsigned long sp) unsigned long sp)
{ {
start_thread_common(regs, pc); start_thread_common(regs, pc);
regs->pstate = PSR_MODE_EL0t; regs->pstate = PSR_MODE_EL0t;
spectre_v4_enable_task_mitigation(current);
if (arm64_get_ssbd_state() != ARM64_SSBD_FORCE_ENABLE)
set_ssbs_bit(regs);
regs->sp = sp; regs->sp = sp;
} }
...@@ -233,9 +220,7 @@ static inline void compat_start_thread(struct pt_regs *regs, unsigned long pc, ...@@ -233,9 +220,7 @@ static inline void compat_start_thread(struct pt_regs *regs, unsigned long pc,
regs->pstate |= PSR_AA32_E_BIT; regs->pstate |= PSR_AA32_E_BIT;
#endif #endif
if (arm64_get_ssbd_state() != ARM64_SSBD_FORCE_ENABLE) spectre_v4_enable_task_mitigation(current);
set_compat_ssbs_bit(regs);
regs->compat_sp = sp; regs->compat_sp = sp;
} }
#endif #endif
......
...@@ -24,4 +24,9 @@ enum mitigation_state arm64_get_spectre_v2_state(void); ...@@ -24,4 +24,9 @@ enum mitigation_state arm64_get_spectre_v2_state(void);
bool has_spectre_v2(const struct arm64_cpu_capabilities *cap, int scope); bool has_spectre_v2(const struct arm64_cpu_capabilities *cap, int scope);
void spectre_v2_enable_mitigation(const struct arm64_cpu_capabilities *__unused); void spectre_v2_enable_mitigation(const struct arm64_cpu_capabilities *__unused);
enum mitigation_state arm64_get_spectre_v4_state(void);
bool has_spectre_v4(const struct arm64_cpu_capabilities *cap, int scope);
void spectre_v4_enable_mitigation(const struct arm64_cpu_capabilities *__unused);
void spectre_v4_enable_task_mitigation(struct task_struct *tsk);
#endif /* __ASM_SPECTRE_H */ #endif /* __ASM_SPECTRE_H */
...@@ -106,62 +106,7 @@ cpu_enable_trap_ctr_access(const struct arm64_cpu_capabilities *cap) ...@@ -106,62 +106,7 @@ cpu_enable_trap_ctr_access(const struct arm64_cpu_capabilities *cap)
sysreg_clear_set(sctlr_el1, SCTLR_EL1_UCT, 0); sysreg_clear_set(sctlr_el1, SCTLR_EL1_UCT, 0);
} }
DEFINE_PER_CPU_READ_MOSTLY(u64, arm64_ssbd_callback_required); int ssbd_state __read_mostly = ARM64_SSBD_UNKNOWN;
int ssbd_state __read_mostly = ARM64_SSBD_KERNEL;
static bool __ssb_safe = true;
static const struct ssbd_options {
const char *str;
int state;
} ssbd_options[] = {
{ "force-on", ARM64_SSBD_FORCE_ENABLE, },
{ "force-off", ARM64_SSBD_FORCE_DISABLE, },
{ "kernel", ARM64_SSBD_KERNEL, },
};
static int __init ssbd_cfg(char *buf)
{
int i;
if (!buf || !buf[0])
return -EINVAL;
for (i = 0; i < ARRAY_SIZE(ssbd_options); i++) {
int len = strlen(ssbd_options[i].str);
if (strncmp(buf, ssbd_options[i].str, len))
continue;
ssbd_state = ssbd_options[i].state;
return 0;
}
return -EINVAL;
}
early_param("ssbd", ssbd_cfg);
void __init arm64_update_smccc_conduit(struct alt_instr *alt,
__le32 *origptr, __le32 *updptr,
int nr_inst)
{
u32 insn;
BUG_ON(nr_inst != 1);
switch (arm_smccc_1_1_get_conduit()) {
case SMCCC_CONDUIT_HVC:
insn = aarch64_insn_get_hvc_value();
break;
case SMCCC_CONDUIT_SMC:
insn = aarch64_insn_get_smc_value();
break;
default:
return;
}
*updptr = cpu_to_le32(insn);
}
void __init arm64_enable_wa2_handling(struct alt_instr *alt, void __init arm64_enable_wa2_handling(struct alt_instr *alt,
__le32 *origptr, __le32 *updptr, __le32 *origptr, __le32 *updptr,
...@@ -177,144 +122,6 @@ void __init arm64_enable_wa2_handling(struct alt_instr *alt, ...@@ -177,144 +122,6 @@ void __init arm64_enable_wa2_handling(struct alt_instr *alt,
*updptr = cpu_to_le32(aarch64_insn_gen_nop()); *updptr = cpu_to_le32(aarch64_insn_gen_nop());
} }
void arm64_set_ssbd_mitigation(bool state)
{
int conduit;
if (this_cpu_has_cap(ARM64_SSBS)) {
if (state)
asm volatile(SET_PSTATE_SSBS(0));
else
asm volatile(SET_PSTATE_SSBS(1));
return;
}
conduit = arm_smccc_1_1_invoke(ARM_SMCCC_ARCH_WORKAROUND_2, state,
NULL);
WARN_ON_ONCE(conduit == SMCCC_CONDUIT_NONE);
}
static bool has_ssbd_mitigation(const struct arm64_cpu_capabilities *entry,
int scope)
{
struct arm_smccc_res res;
bool required = true;
s32 val;
bool this_cpu_safe = false;
int conduit;
WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible());
if (cpu_mitigations_off())
ssbd_state = ARM64_SSBD_FORCE_DISABLE;
/* delay setting __ssb_safe until we get a firmware response */
if (is_midr_in_range_list(read_cpuid_id(), entry->midr_range_list))
this_cpu_safe = true;
if (this_cpu_has_cap(ARM64_SSBS)) {
if (!this_cpu_safe)
__ssb_safe = false;
required = false;
goto out_printmsg;
}
conduit = arm_smccc_1_1_invoke(ARM_SMCCC_ARCH_FEATURES_FUNC_ID,
ARM_SMCCC_ARCH_WORKAROUND_2, &res);
if (conduit == SMCCC_CONDUIT_NONE) {
ssbd_state = ARM64_SSBD_UNKNOWN;
if (!this_cpu_safe)
__ssb_safe = false;
return false;
}
val = (s32)res.a0;
switch (val) {
case SMCCC_RET_NOT_SUPPORTED:
ssbd_state = ARM64_SSBD_UNKNOWN;
if (!this_cpu_safe)
__ssb_safe = false;
return false;
/* machines with mixed mitigation requirements must not return this */
case SMCCC_RET_NOT_REQUIRED:
pr_info_once("%s mitigation not required\n", entry->desc);
ssbd_state = ARM64_SSBD_MITIGATED;
return false;
case SMCCC_RET_SUCCESS:
__ssb_safe = false;
required = true;
break;
case 1: /* Mitigation not required on this CPU */
required = false;
break;
default:
WARN_ON(1);
if (!this_cpu_safe)
__ssb_safe = false;
return false;
}
switch (ssbd_state) {
case ARM64_SSBD_FORCE_DISABLE:
arm64_set_ssbd_mitigation(false);
required = false;
break;
case ARM64_SSBD_KERNEL:
if (required) {
__this_cpu_write(arm64_ssbd_callback_required, 1);
arm64_set_ssbd_mitigation(true);
}
break;
case ARM64_SSBD_FORCE_ENABLE:
arm64_set_ssbd_mitigation(true);
required = true;
break;
default:
WARN_ON(1);
break;
}
out_printmsg:
switch (ssbd_state) {
case ARM64_SSBD_FORCE_DISABLE:
pr_info_once("%s disabled from command-line\n", entry->desc);
break;
case ARM64_SSBD_FORCE_ENABLE:
pr_info_once("%s forced from command-line\n", entry->desc);
break;
}
return required;
}
static void cpu_enable_ssbd_mitigation(const struct arm64_cpu_capabilities *cap)
{
if (ssbd_state != ARM64_SSBD_FORCE_DISABLE)
cap->matches(cap, SCOPE_LOCAL_CPU);
}
/* known invulnerable cores */
static const struct midr_range arm64_ssb_cpus[] = {
MIDR_ALL_VERSIONS(MIDR_CORTEX_A35),
MIDR_ALL_VERSIONS(MIDR_CORTEX_A53),
MIDR_ALL_VERSIONS(MIDR_CORTEX_A55),
MIDR_ALL_VERSIONS(MIDR_BRAHMA_B53),
MIDR_ALL_VERSIONS(MIDR_QCOM_KRYO_3XX_SILVER),
MIDR_ALL_VERSIONS(MIDR_QCOM_KRYO_4XX_SILVER),
{},
};
#ifdef CONFIG_ARM64_ERRATUM_1463225 #ifdef CONFIG_ARM64_ERRATUM_1463225
DEFINE_PER_CPU(int, __in_cortex_a76_erratum_1463225_wa); DEFINE_PER_CPU(int, __in_cortex_a76_erratum_1463225_wa);
...@@ -674,12 +481,11 @@ const struct arm64_cpu_capabilities arm64_errata[] = { ...@@ -674,12 +481,11 @@ const struct arm64_cpu_capabilities arm64_errata[] = {
}, },
#endif #endif
{ {
.desc = "Speculative Store Bypass Disable", .desc = "Spectre-v4",
.capability = ARM64_SPECTRE_V4, .capability = ARM64_SPECTRE_V4,
.type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM, .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM,
.matches = has_ssbd_mitigation, .matches = has_spectre_v4,
.cpu_enable = cpu_enable_ssbd_mitigation, .cpu_enable = spectre_v4_enable_mitigation,
.midr_range_list = arm64_ssb_cpus,
}, },
#ifdef CONFIG_ARM64_ERRATUM_1418040 #ifdef CONFIG_ARM64_ERRATUM_1418040
{ {
...@@ -732,18 +538,3 @@ const struct arm64_cpu_capabilities arm64_errata[] = { ...@@ -732,18 +538,3 @@ const struct arm64_cpu_capabilities arm64_errata[] = {
{ {
} }
}; };
ssize_t cpu_show_spec_store_bypass(struct device *dev,
struct device_attribute *attr, char *buf)
{
if (__ssb_safe)
return sprintf(buf, "Not affected\n");
switch (ssbd_state) {
case ARM64_SSBD_KERNEL:
case ARM64_SSBD_FORCE_ENABLE:
return sprintf(buf, "Mitigation: Speculative Store Bypass disabled via prctl\n");
}
return sprintf(buf, "Vulnerable\n");
}
...@@ -1583,46 +1583,6 @@ static void cpu_has_fwb(const struct arm64_cpu_capabilities *__unused) ...@@ -1583,46 +1583,6 @@ static void cpu_has_fwb(const struct arm64_cpu_capabilities *__unused)
WARN_ON(val & (7 << 27 | 7 << 21)); WARN_ON(val & (7 << 27 | 7 << 21));
} }
static int ssbs_emulation_handler(struct pt_regs *regs, u32 instr)
{
if (user_mode(regs))
return 1;
if (instr & BIT(PSTATE_Imm_shift))
regs->pstate |= PSR_SSBS_BIT;
else
regs->pstate &= ~PSR_SSBS_BIT;
arm64_skip_faulting_instruction(regs, 4);
return 0;
}
static struct undef_hook ssbs_emulation_hook = {
.instr_mask = ~(1U << PSTATE_Imm_shift),
.instr_val = 0xd500401f | PSTATE_SSBS,
.fn = ssbs_emulation_handler,
};
static void cpu_enable_ssbs(const struct arm64_cpu_capabilities *__unused)
{
static bool undef_hook_registered = false;
static DEFINE_RAW_SPINLOCK(hook_lock);
raw_spin_lock(&hook_lock);
if (!undef_hook_registered) {
register_undef_hook(&ssbs_emulation_hook);
undef_hook_registered = true;
}
raw_spin_unlock(&hook_lock);
if (arm64_get_ssbd_state() == ARM64_SSBD_FORCE_DISABLE) {
sysreg_clear_set(sctlr_el1, 0, SCTLR_ELx_DSSBS);
arm64_set_ssbd_mitigation(false);
} else {
arm64_set_ssbd_mitigation(true);
}
}
#ifdef CONFIG_ARM64_PAN #ifdef CONFIG_ARM64_PAN
static void cpu_enable_pan(const struct arm64_cpu_capabilities *__unused) static void cpu_enable_pan(const struct arm64_cpu_capabilities *__unused)
{ {
...@@ -1983,7 +1943,6 @@ static const struct arm64_cpu_capabilities arm64_features[] = { ...@@ -1983,7 +1943,6 @@ static const struct arm64_cpu_capabilities arm64_features[] = {
.field_pos = ID_AA64PFR1_SSBS_SHIFT, .field_pos = ID_AA64PFR1_SSBS_SHIFT,
.sign = FTR_UNSIGNED, .sign = FTR_UNSIGNED,
.min_field_value = ID_AA64PFR1_SSBS_PSTATE_ONLY, .min_field_value = ID_AA64PFR1_SSBS_PSTATE_ONLY,
.cpu_enable = cpu_enable_ssbs,
}, },
#ifdef CONFIG_ARM64_CNP #ifdef CONFIG_ARM64_CNP
{ {
......
...@@ -132,8 +132,8 @@ alternative_else_nop_endif ...@@ -132,8 +132,8 @@ alternative_else_nop_endif
* them if required. * them if required.
*/ */
.macro apply_ssbd, state, tmp1, tmp2 .macro apply_ssbd, state, tmp1, tmp2
alternative_cb arm64_enable_wa2_handling alternative_cb spectre_v4_patch_fw_mitigation_enable
b .L__asm_ssbd_skip\@ b .L__asm_ssbd_skip\@ // Patched to NOP
alternative_cb_end alternative_cb_end
ldr_this_cpu \tmp2, arm64_ssbd_callback_required, \tmp1 ldr_this_cpu \tmp2, arm64_ssbd_callback_required, \tmp1
cbz \tmp2, .L__asm_ssbd_skip\@ cbz \tmp2, .L__asm_ssbd_skip\@
...@@ -141,7 +141,7 @@ alternative_cb_end ...@@ -141,7 +141,7 @@ alternative_cb_end
tbnz \tmp2, #TIF_SSBD, .L__asm_ssbd_skip\@ tbnz \tmp2, #TIF_SSBD, .L__asm_ssbd_skip\@
mov w0, #ARM_SMCCC_ARCH_WORKAROUND_2 mov w0, #ARM_SMCCC_ARCH_WORKAROUND_2
mov w1, #\state mov w1, #\state
alternative_cb arm64_update_smccc_conduit alternative_cb spectre_v4_patch_fw_mitigation_conduit
nop // Patched to SMC/HVC #0 nop // Patched to SMC/HVC #0
alternative_cb_end alternative_cb_end
.L__asm_ssbd_skip\@: .L__asm_ssbd_skip\@:
......
...@@ -332,11 +332,7 @@ int swsusp_arch_suspend(void) ...@@ -332,11 +332,7 @@ int swsusp_arch_suspend(void)
* mitigation off behind our back, let's set the state * mitigation off behind our back, let's set the state
* to what we expect it to be. * to what we expect it to be.
*/ */
switch (arm64_get_ssbd_state()) { spectre_v4_enable_mitigation(NULL);
case ARM64_SSBD_FORCE_ENABLE:
case ARM64_SSBD_KERNEL:
arm64_set_ssbd_mitigation(true);
}
} }
local_daif_restore(flags); local_daif_restore(flags);
......
...@@ -421,8 +421,7 @@ int copy_thread(unsigned long clone_flags, unsigned long stack_start, ...@@ -421,8 +421,7 @@ int copy_thread(unsigned long clone_flags, unsigned long stack_start,
cpus_have_const_cap(ARM64_HAS_UAO)) cpus_have_const_cap(ARM64_HAS_UAO))
childregs->pstate |= PSR_UAO_BIT; childregs->pstate |= PSR_UAO_BIT;
if (arm64_get_ssbd_state() == ARM64_SSBD_FORCE_DISABLE) spectre_v4_enable_task_mitigation(p);
set_ssbs_bit(childregs);
if (system_uses_irq_prio_masking()) if (system_uses_irq_prio_masking())
childregs->pmr_save = GIC_PRIO_IRQON; childregs->pmr_save = GIC_PRIO_IRQON;
...@@ -472,8 +471,6 @@ void uao_thread_switch(struct task_struct *next) ...@@ -472,8 +471,6 @@ void uao_thread_switch(struct task_struct *next)
*/ */
static void ssbs_thread_switch(struct task_struct *next) static void ssbs_thread_switch(struct task_struct *next)
{ {
struct pt_regs *regs = task_pt_regs(next);
/* /*
* Nothing to do for kernel threads, but 'regs' may be junk * Nothing to do for kernel threads, but 'regs' may be junk
* (e.g. idle task) so check the flags and bail early. * (e.g. idle task) so check the flags and bail early.
...@@ -485,18 +482,10 @@ static void ssbs_thread_switch(struct task_struct *next) ...@@ -485,18 +482,10 @@ static void ssbs_thread_switch(struct task_struct *next)
* If all CPUs implement the SSBS extension, then we just need to * If all CPUs implement the SSBS extension, then we just need to
* context-switch the PSTATE field. * context-switch the PSTATE field.
*/ */
if (cpu_have_feature(cpu_feature(SSBS))) if (cpus_have_const_cap(ARM64_SSBS))
return;
/* If the mitigation is enabled, then we leave SSBS clear. */
if ((arm64_get_ssbd_state() == ARM64_SSBD_FORCE_ENABLE) ||
test_tsk_thread_flag(next, TIF_SSBD))
return; return;
if (compat_user_mode(regs)) spectre_v4_enable_task_mitigation(next);
set_compat_ssbs_bit(regs);
else if (user_mode(regs))
set_ssbs_bit(regs);
} }
/* /*
......
This diff is collapsed.
...@@ -72,8 +72,7 @@ void notrace __cpu_suspend_exit(void) ...@@ -72,8 +72,7 @@ void notrace __cpu_suspend_exit(void)
* have turned the mitigation on. If the user has forcefully * have turned the mitigation on. If the user has forcefully
* disabled it, make sure their wishes are obeyed. * disabled it, make sure their wishes are obeyed.
*/ */
if (arm64_get_ssbd_state() == ARM64_SSBD_FORCE_DISABLE) spectre_v4_enable_mitigation(NULL);
arm64_set_ssbd_mitigation(false);
} }
/* /*
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment