Commit 228a26b9 authored by James Morse's avatar James Morse

arm64: Use the clearbhb instruction in mitigations

Future CPUs may implement a clearbhb instruction that is sufficient
to mitigate SpectreBHB. CPUs that implement this instruction, but
not CSV2.3 must be affected by Spectre-BHB.

Add support to use this instruction as the BHB mitigation on CPUs
that support it. The instruction is in the hint space, so it will
be treated by a NOP as older CPUs.
Reviewed-by: default avatarRussell King (Oracle) <rmk+kernel@armlinux.org.uk>
Reviewed-by: default avatarCatalin Marinas <catalin.marinas@arm.com>
Signed-off-by: default avatarJames Morse <james.morse@arm.com>
parent a5905d6a
...@@ -108,6 +108,13 @@ ...@@ -108,6 +108,13 @@
hint #20 hint #20
.endm .endm
/*
* Clear Branch History instruction
*/
.macro clearbhb
hint #22
.endm
/* /*
* Speculation barrier * Speculation barrier
*/ */
...@@ -884,6 +891,16 @@ alternative_cb smccc_patch_fw_mitigation_conduit ...@@ -884,6 +891,16 @@ alternative_cb smccc_patch_fw_mitigation_conduit
alternative_cb_end alternative_cb_end
ldp x2, x3, [sp], #16 ldp x2, x3, [sp], #16
ldp x0, x1, [sp], #16 ldp x0, x1, [sp], #16
#endif /* CONFIG_MITIGATE_SPECTRE_BRANCH_HISTORY */
.endm
.macro mitigate_spectre_bhb_clear_insn
#ifdef CONFIG_MITIGATE_SPECTRE_BRANCH_HISTORY
alternative_cb spectre_bhb_patch_clearbhb
/* Patched to NOP when not supported */
clearbhb
isb
alternative_cb_end
#endif /* CONFIG_MITIGATE_SPECTRE_BRANCH_HISTORY */ #endif /* CONFIG_MITIGATE_SPECTRE_BRANCH_HISTORY */
.endm .endm
#endif /* __ASM_ASSEMBLER_H */ #endif /* __ASM_ASSEMBLER_H */
...@@ -653,6 +653,19 @@ static inline bool supports_csv2p3(int scope) ...@@ -653,6 +653,19 @@ static inline bool supports_csv2p3(int scope)
return csv2_val == 3; return csv2_val == 3;
} }
static inline bool supports_clearbhb(int scope)
{
u64 isar2;
if (scope == SCOPE_LOCAL_CPU)
isar2 = read_sysreg_s(SYS_ID_AA64ISAR2_EL1);
else
isar2 = read_sanitised_ftr_reg(SYS_ID_AA64ISAR2_EL1);
return cpuid_feature_extract_unsigned_field(isar2,
ID_AA64ISAR2_CLEARBHB_SHIFT);
}
const struct cpumask *system_32bit_el0_cpumask(void); const struct cpumask *system_32bit_el0_cpumask(void);
DECLARE_STATIC_KEY_FALSE(arm64_mismatched_32bit_el0); DECLARE_STATIC_KEY_FALSE(arm64_mismatched_32bit_el0);
......
...@@ -65,6 +65,7 @@ enum aarch64_insn_hint_cr_op { ...@@ -65,6 +65,7 @@ enum aarch64_insn_hint_cr_op {
AARCH64_INSN_HINT_PSB = 0x11 << 5, AARCH64_INSN_HINT_PSB = 0x11 << 5,
AARCH64_INSN_HINT_TSB = 0x12 << 5, AARCH64_INSN_HINT_TSB = 0x12 << 5,
AARCH64_INSN_HINT_CSDB = 0x14 << 5, AARCH64_INSN_HINT_CSDB = 0x14 << 5,
AARCH64_INSN_HINT_CLEARBHB = 0x16 << 5,
AARCH64_INSN_HINT_BTI = 0x20 << 5, AARCH64_INSN_HINT_BTI = 0x20 << 5,
AARCH64_INSN_HINT_BTIC = 0x22 << 5, AARCH64_INSN_HINT_BTIC = 0x22 << 5,
......
...@@ -773,6 +773,7 @@ ...@@ -773,6 +773,7 @@
#define ID_AA64ISAR1_GPI_IMP_DEF 0x1 #define ID_AA64ISAR1_GPI_IMP_DEF 0x1
/* id_aa64isar2 */ /* id_aa64isar2 */
#define ID_AA64ISAR2_CLEARBHB_SHIFT 28
#define ID_AA64ISAR2_RPRES_SHIFT 4 #define ID_AA64ISAR2_RPRES_SHIFT 4
#define ID_AA64ISAR2_WFXT_SHIFT 0 #define ID_AA64ISAR2_WFXT_SHIFT 0
......
...@@ -32,6 +32,12 @@ enum arm64_bp_harden_el1_vectors { ...@@ -32,6 +32,12 @@ enum arm64_bp_harden_el1_vectors {
* canonical vectors. * canonical vectors.
*/ */
EL1_VECTOR_BHB_FW, EL1_VECTOR_BHB_FW,
/*
* Use the ClearBHB instruction, before branching to the canonical
* vectors.
*/
EL1_VECTOR_BHB_CLEAR_INSN,
#endif /* CONFIG_MITIGATE_SPECTRE_BRANCH_HISTORY */ #endif /* CONFIG_MITIGATE_SPECTRE_BRANCH_HISTORY */
/* /*
...@@ -43,6 +49,7 @@ enum arm64_bp_harden_el1_vectors { ...@@ -43,6 +49,7 @@ enum arm64_bp_harden_el1_vectors {
#ifndef CONFIG_MITIGATE_SPECTRE_BRANCH_HISTORY #ifndef CONFIG_MITIGATE_SPECTRE_BRANCH_HISTORY
#define EL1_VECTOR_BHB_LOOP -1 #define EL1_VECTOR_BHB_LOOP -1
#define EL1_VECTOR_BHB_FW -1 #define EL1_VECTOR_BHB_FW -1
#define EL1_VECTOR_BHB_CLEAR_INSN -1
#endif /* !CONFIG_MITIGATE_SPECTRE_BRANCH_HISTORY */ #endif /* !CONFIG_MITIGATE_SPECTRE_BRANCH_HISTORY */
/* The vectors to use on return from EL0. e.g. to remap the kernel */ /* The vectors to use on return from EL0. e.g. to remap the kernel */
......
...@@ -231,6 +231,7 @@ static const struct arm64_ftr_bits ftr_id_aa64isar1[] = { ...@@ -231,6 +231,7 @@ static const struct arm64_ftr_bits ftr_id_aa64isar1[] = {
}; };
static const struct arm64_ftr_bits ftr_id_aa64isar2[] = { static const struct arm64_ftr_bits ftr_id_aa64isar2[] = {
ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_HIGHER_SAFE, ID_AA64ISAR2_CLEARBHB_SHIFT, 4, 0),
ARM64_FTR_BITS(FTR_VISIBLE, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64ISAR2_RPRES_SHIFT, 4, 0), ARM64_FTR_BITS(FTR_VISIBLE, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64ISAR2_RPRES_SHIFT, 4, 0),
ARM64_FTR_END, ARM64_FTR_END,
}; };
......
...@@ -657,6 +657,7 @@ alternative_else_nop_endif ...@@ -657,6 +657,7 @@ alternative_else_nop_endif
#define BHB_MITIGATION_NONE 0 #define BHB_MITIGATION_NONE 0
#define BHB_MITIGATION_LOOP 1 #define BHB_MITIGATION_LOOP 1
#define BHB_MITIGATION_FW 2 #define BHB_MITIGATION_FW 2
#define BHB_MITIGATION_INSN 3
.macro tramp_ventry, vector_start, regsize, kpti, bhb .macro tramp_ventry, vector_start, regsize, kpti, bhb
.align 7 .align 7
...@@ -673,6 +674,11 @@ alternative_else_nop_endif ...@@ -673,6 +674,11 @@ alternative_else_nop_endif
__mitigate_spectre_bhb_loop x30 __mitigate_spectre_bhb_loop x30
.endif // \bhb == BHB_MITIGATION_LOOP .endif // \bhb == BHB_MITIGATION_LOOP
.if \bhb == BHB_MITIGATION_INSN
clearbhb
isb
.endif // \bhb == BHB_MITIGATION_INSN
.if \kpti == 1 .if \kpti == 1
/* /*
* Defend against branch aliasing attacks by pushing a dummy * Defend against branch aliasing attacks by pushing a dummy
...@@ -749,6 +755,7 @@ SYM_CODE_START_NOALIGN(tramp_vectors) ...@@ -749,6 +755,7 @@ SYM_CODE_START_NOALIGN(tramp_vectors)
#ifdef CONFIG_MITIGATE_SPECTRE_BRANCH_HISTORY #ifdef CONFIG_MITIGATE_SPECTRE_BRANCH_HISTORY
generate_tramp_vector kpti=1, bhb=BHB_MITIGATION_LOOP generate_tramp_vector kpti=1, bhb=BHB_MITIGATION_LOOP
generate_tramp_vector kpti=1, bhb=BHB_MITIGATION_FW generate_tramp_vector kpti=1, bhb=BHB_MITIGATION_FW
generate_tramp_vector kpti=1, bhb=BHB_MITIGATION_INSN
#endif /* CONFIG_MITIGATE_SPECTRE_BRANCH_HISTORY */ #endif /* CONFIG_MITIGATE_SPECTRE_BRANCH_HISTORY */
generate_tramp_vector kpti=1, bhb=BHB_MITIGATION_NONE generate_tramp_vector kpti=1, bhb=BHB_MITIGATION_NONE
SYM_CODE_END(tramp_vectors) SYM_CODE_END(tramp_vectors)
...@@ -811,6 +818,7 @@ SYM_CODE_START(__bp_harden_el1_vectors) ...@@ -811,6 +818,7 @@ SYM_CODE_START(__bp_harden_el1_vectors)
#ifdef CONFIG_MITIGATE_SPECTRE_BRANCH_HISTORY #ifdef CONFIG_MITIGATE_SPECTRE_BRANCH_HISTORY
generate_el1_vector bhb=BHB_MITIGATION_LOOP generate_el1_vector bhb=BHB_MITIGATION_LOOP
generate_el1_vector bhb=BHB_MITIGATION_FW generate_el1_vector bhb=BHB_MITIGATION_FW
generate_el1_vector bhb=BHB_MITIGATION_INSN
#endif /* CONFIG_MITIGATE_SPECTRE_BRANCH_HISTORY */ #endif /* CONFIG_MITIGATE_SPECTRE_BRANCH_HISTORY */
SYM_CODE_END(__bp_harden_el1_vectors) SYM_CODE_END(__bp_harden_el1_vectors)
.popsection .popsection
......
...@@ -69,6 +69,7 @@ KVM_NVHE_ALIAS(kvm_compute_final_ctr_el0); ...@@ -69,6 +69,7 @@ KVM_NVHE_ALIAS(kvm_compute_final_ctr_el0);
KVM_NVHE_ALIAS(spectre_bhb_patch_loop_iter); KVM_NVHE_ALIAS(spectre_bhb_patch_loop_iter);
KVM_NVHE_ALIAS(spectre_bhb_patch_loop_mitigation_enable); KVM_NVHE_ALIAS(spectre_bhb_patch_loop_mitigation_enable);
KVM_NVHE_ALIAS(spectre_bhb_patch_wa3); KVM_NVHE_ALIAS(spectre_bhb_patch_wa3);
KVM_NVHE_ALIAS(spectre_bhb_patch_clearbhb);
/* Global kernel state accessed by nVHE hyp code. */ /* Global kernel state accessed by nVHE hyp code. */
KVM_NVHE_ALIAS(kvm_vgic_global_state); KVM_NVHE_ALIAS(kvm_vgic_global_state);
......
...@@ -805,6 +805,7 @@ int arch_prctl_spec_ctrl_get(struct task_struct *task, unsigned long which) ...@@ -805,6 +805,7 @@ int arch_prctl_spec_ctrl_get(struct task_struct *task, unsigned long which)
* - Mitigated by a branchy loop a CPU specific number of times, and listed * - Mitigated by a branchy loop a CPU specific number of times, and listed
* in our "loop mitigated list". * in our "loop mitigated list".
* - Mitigated in software by the firmware Spectre v2 call. * - Mitigated in software by the firmware Spectre v2 call.
* - Has the ClearBHB instruction to perform the mitigation.
* - Has the 'Exception Clears Branch History Buffer' (ECBHB) feature, so no * - Has the 'Exception Clears Branch History Buffer' (ECBHB) feature, so no
* software mitigation in the vectors is needed. * software mitigation in the vectors is needed.
* - Has CSV2.3, so is unaffected. * - Has CSV2.3, so is unaffected.
...@@ -820,6 +821,7 @@ enum bhb_mitigation_bits { ...@@ -820,6 +821,7 @@ enum bhb_mitigation_bits {
BHB_LOOP, BHB_LOOP,
BHB_FW, BHB_FW,
BHB_HW, BHB_HW,
BHB_INSN,
}; };
static unsigned long system_bhb_mitigations; static unsigned long system_bhb_mitigations;
...@@ -937,6 +939,9 @@ bool is_spectre_bhb_affected(const struct arm64_cpu_capabilities *entry, ...@@ -937,6 +939,9 @@ bool is_spectre_bhb_affected(const struct arm64_cpu_capabilities *entry,
if (supports_csv2p3(scope)) if (supports_csv2p3(scope))
return false; return false;
if (supports_clearbhb(scope))
return true;
if (spectre_bhb_loop_affected(scope)) if (spectre_bhb_loop_affected(scope))
return true; return true;
...@@ -984,6 +989,17 @@ void spectre_bhb_enable_mitigation(const struct arm64_cpu_capabilities *entry) ...@@ -984,6 +989,17 @@ void spectre_bhb_enable_mitigation(const struct arm64_cpu_capabilities *entry)
} else if (supports_ecbhb(SCOPE_LOCAL_CPU)) { } else if (supports_ecbhb(SCOPE_LOCAL_CPU)) {
state = SPECTRE_MITIGATED; state = SPECTRE_MITIGATED;
set_bit(BHB_HW, &system_bhb_mitigations); set_bit(BHB_HW, &system_bhb_mitigations);
} else if (supports_clearbhb(SCOPE_LOCAL_CPU)) {
/*
* Ensure KVM uses the indirect vector which will have ClearBHB
* added.
*/
if (!data->slot)
data->slot = HYP_VECTOR_INDIRECT;
this_cpu_set_vectors(EL1_VECTOR_BHB_CLEAR_INSN);
state = SPECTRE_MITIGATED;
set_bit(BHB_INSN, &system_bhb_mitigations);
} else if (spectre_bhb_loop_affected(SCOPE_LOCAL_CPU)) { } else if (spectre_bhb_loop_affected(SCOPE_LOCAL_CPU)) {
/* /*
* Ensure KVM uses the indirect vector which will have the * Ensure KVM uses the indirect vector which will have the
...@@ -1096,3 +1112,16 @@ void noinstr spectre_bhb_patch_wa3(struct alt_instr *alt, ...@@ -1096,3 +1112,16 @@ void noinstr spectre_bhb_patch_wa3(struct alt_instr *alt,
*updptr++ = cpu_to_le32(insn); *updptr++ = cpu_to_le32(insn);
} }
/* Patched to NOP when not supported */
void __init spectre_bhb_patch_clearbhb(struct alt_instr *alt,
__le32 *origptr, __le32 *updptr, int nr_inst)
{
BUG_ON(nr_inst != 2);
if (test_bit(BHB_INSN, &system_bhb_mitigations))
return;
*updptr++ = cpu_to_le32(aarch64_insn_gen_nop());
*updptr++ = cpu_to_le32(aarch64_insn_gen_nop());
}
...@@ -213,6 +213,7 @@ SYM_CODE_END(__kvm_hyp_vector) ...@@ -213,6 +213,7 @@ SYM_CODE_END(__kvm_hyp_vector)
.else .else
stp x0, x1, [sp, #-16]! stp x0, x1, [sp, #-16]!
mitigate_spectre_bhb_loop x0 mitigate_spectre_bhb_loop x0
mitigate_spectre_bhb_clear_insn
.endif .endif
.if \indirect != 0 .if \indirect != 0
alternative_cb kvm_patch_vector_branch alternative_cb kvm_patch_vector_branch
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment