Commit 688f1e4b authored by Will Deacon's avatar Will Deacon

arm64: Rename ARM64_HARDEN_BRANCH_PREDICTOR to ARM64_SPECTRE_V2

For better or worse, the world knows about "Spectre" and not about
"Branch predictor hardening". Rename ARM64_HARDEN_BRANCH_PREDICTOR to
ARM64_SPECTRE_V2 as part of moving all of the Spectre mitigations into
their own little corner.
Signed-off-by: default avatarWill Deacon <will@kernel.org>
parent b181048f
...@@ -31,7 +31,7 @@ ...@@ -31,7 +31,7 @@
#define ARM64_HAS_DCPOP 21 #define ARM64_HAS_DCPOP 21
#define ARM64_SVE 22 #define ARM64_SVE 22
#define ARM64_UNMAP_KERNEL_AT_EL0 23 #define ARM64_UNMAP_KERNEL_AT_EL0 23
#define ARM64_HARDEN_BRANCH_PREDICTOR 24 #define ARM64_SPECTRE_V2 24
#define ARM64_HAS_RAS_EXTN 25 #define ARM64_HAS_RAS_EXTN 25
#define ARM64_WORKAROUND_843419 26 #define ARM64_WORKAROUND_843419 26
#define ARM64_HAS_CACHE_IDC 27 #define ARM64_HAS_CACHE_IDC 27
......
...@@ -435,14 +435,13 @@ static inline int kvm_write_guest_lock(struct kvm *kvm, gpa_t gpa, ...@@ -435,14 +435,13 @@ static inline int kvm_write_guest_lock(struct kvm *kvm, gpa_t gpa,
* EL2 vectors can be mapped and rerouted in a number of ways, * EL2 vectors can be mapped and rerouted in a number of ways,
* depending on the kernel configuration and CPU present: * depending on the kernel configuration and CPU present:
* *
* - If the CPU has the ARM64_HARDEN_BRANCH_PREDICTOR cap, the * - If the CPU is affected by Spectre-v2, the hardening sequence is
* hardening sequence is placed in one of the vector slots, which is * placed in one of the vector slots, which is executed before jumping
* executed before jumping to the real vectors. * to the real vectors.
* *
* - If the CPU has both the ARM64_HARDEN_EL2_VECTORS cap and the * - If the CPU also has the ARM64_HARDEN_EL2_VECTORS cap, the slot
* ARM64_HARDEN_BRANCH_PREDICTOR cap, the slot containing the * containing the hardening sequence is mapped next to the idmap page,
* hardening sequence is mapped next to the idmap page, and executed * and executed before jumping to the real vectors.
* before jumping to the real vectors.
* *
* - If the CPU only has the ARM64_HARDEN_EL2_VECTORS cap, then an * - If the CPU only has the ARM64_HARDEN_EL2_VECTORS cap, then an
* empty slot is selected, mapped next to the idmap page, and * empty slot is selected, mapped next to the idmap page, and
...@@ -464,7 +463,7 @@ static inline void *kvm_get_hyp_vector(void) ...@@ -464,7 +463,7 @@ static inline void *kvm_get_hyp_vector(void)
void *vect = kern_hyp_va(kvm_ksym_ref(__kvm_hyp_vector)); void *vect = kern_hyp_va(kvm_ksym_ref(__kvm_hyp_vector));
int slot = -1; int slot = -1;
if (cpus_have_const_cap(ARM64_HARDEN_BRANCH_PREDICTOR) && data->fn) { if (cpus_have_const_cap(ARM64_SPECTRE_V2) && data->fn) {
vect = kern_hyp_va(kvm_ksym_ref(__bp_harden_hyp_vecs)); vect = kern_hyp_va(kvm_ksym_ref(__bp_harden_hyp_vecs));
slot = data->hyp_vectors_slot; slot = data->hyp_vectors_slot;
} }
...@@ -485,15 +484,15 @@ static inline void *kvm_get_hyp_vector(void) ...@@ -485,15 +484,15 @@ static inline void *kvm_get_hyp_vector(void)
static inline int kvm_map_vectors(void) static inline int kvm_map_vectors(void)
{ {
/* /*
* HBP = ARM64_HARDEN_BRANCH_PREDICTOR * SV2 = ARM64_SPECTRE_V2
* HEL2 = ARM64_HARDEN_EL2_VECTORS * HEL2 = ARM64_HARDEN_EL2_VECTORS
* *
* !HBP + !HEL2 -> use direct vectors * !SV2 + !HEL2 -> use direct vectors
* HBP + !HEL2 -> use hardened vectors in place * SV2 + !HEL2 -> use hardened vectors in place
* !HBP + HEL2 -> allocate one vector slot and use exec mapping * !SV2 + HEL2 -> allocate one vector slot and use exec mapping
* HBP + HEL2 -> use hardened vertors and use exec mapping * SV2 + HEL2 -> use hardened vertors and use exec mapping
*/ */
if (cpus_have_const_cap(ARM64_HARDEN_BRANCH_PREDICTOR)) { if (cpus_have_const_cap(ARM64_SPECTRE_V2)) {
__kvm_bp_vect_base = kvm_ksym_ref(__bp_harden_hyp_vecs); __kvm_bp_vect_base = kvm_ksym_ref(__bp_harden_hyp_vecs);
__kvm_bp_vect_base = kern_hyp_va(__kvm_bp_vect_base); __kvm_bp_vect_base = kern_hyp_va(__kvm_bp_vect_base);
} }
......
...@@ -56,7 +56,7 @@ static inline void arm64_apply_bp_hardening(void) ...@@ -56,7 +56,7 @@ static inline void arm64_apply_bp_hardening(void)
{ {
struct bp_hardening_data *d; struct bp_hardening_data *d;
if (!cpus_have_const_cap(ARM64_HARDEN_BRANCH_PREDICTOR)) if (!cpus_have_const_cap(ARM64_SPECTRE_V2))
return; return;
d = arm64_get_bp_hardening_data(); d = arm64_get_bp_hardening_data();
......
...@@ -877,7 +877,7 @@ const struct arm64_cpu_capabilities arm64_errata[] = { ...@@ -877,7 +877,7 @@ const struct arm64_cpu_capabilities arm64_errata[] = {
#endif #endif
{ {
.desc = "Branch predictor hardening", .desc = "Branch predictor hardening",
.capability = ARM64_HARDEN_BRANCH_PREDICTOR, .capability = ARM64_SPECTRE_V2,
.type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM, .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM,
.matches = check_branch_predictor, .matches = check_branch_predictor,
.cpu_enable = cpu_enable_branch_predictor_hardening, .cpu_enable = cpu_enable_branch_predictor_hardening,
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment