Commit c118bbb5 authored by Andre Przywara's avatar Andre Przywara Committed by Marc Zyngier

arm64: KVM: Propagate full Spectre v2 workaround state to KVM guests

Recent commits added the explicit notion of "workaround not required" to
the state of the Spectre v2 (aka. BP_HARDENING) workaround, where we
just had "needed" and "unknown" before.

Export this knowledge to the rest of the kernel and enhance the existing
kvm_arm_harden_branch_predictor() to report this new state as well.
Export this new state to guests when they use KVM's firmware interface
emulation.
Signed-off-by: default avatarAndre Przywara <andre.przywara@arm.com>
Reviewed-by: default avatarSteven Price <steven.price@arm.com>
Signed-off-by: default avatarMarc Zyngier <marc.zyngier@arm.com>
parent 80f393a2
...@@ -362,7 +362,11 @@ static inline void kvm_vcpu_pmu_restore_host(struct kvm_vcpu *vcpu) {} ...@@ -362,7 +362,11 @@ static inline void kvm_vcpu_pmu_restore_host(struct kvm_vcpu *vcpu) {}
static inline void kvm_arm_vhe_guest_enter(void) {} static inline void kvm_arm_vhe_guest_enter(void) {}
static inline void kvm_arm_vhe_guest_exit(void) {} static inline void kvm_arm_vhe_guest_exit(void) {}
static inline bool kvm_arm_harden_branch_predictor(void) #define KVM_BP_HARDEN_UNKNOWN -1
#define KVM_BP_HARDEN_WA_NEEDED 0
#define KVM_BP_HARDEN_NOT_REQUIRED 1
static inline int kvm_arm_harden_branch_predictor(void)
{ {
switch(read_cpuid_part()) { switch(read_cpuid_part()) {
#ifdef CONFIG_HARDEN_BRANCH_PREDICTOR #ifdef CONFIG_HARDEN_BRANCH_PREDICTOR
...@@ -370,10 +374,12 @@ static inline bool kvm_arm_harden_branch_predictor(void) ...@@ -370,10 +374,12 @@ static inline bool kvm_arm_harden_branch_predictor(void)
case ARM_CPU_PART_CORTEX_A12: case ARM_CPU_PART_CORTEX_A12:
case ARM_CPU_PART_CORTEX_A15: case ARM_CPU_PART_CORTEX_A15:
case ARM_CPU_PART_CORTEX_A17: case ARM_CPU_PART_CORTEX_A17:
return true; return KVM_BP_HARDEN_WA_NEEDED;
#endif #endif
case ARM_CPU_PART_CORTEX_A7:
return KVM_BP_HARDEN_NOT_REQUIRED;
default: default:
return false; return KVM_BP_HARDEN_UNKNOWN;
} }
} }
......
...@@ -614,6 +614,12 @@ static inline bool system_uses_irq_prio_masking(void) ...@@ -614,6 +614,12 @@ static inline bool system_uses_irq_prio_masking(void)
cpus_have_const_cap(ARM64_HAS_IRQ_PRIO_MASKING); cpus_have_const_cap(ARM64_HAS_IRQ_PRIO_MASKING);
} }
#define ARM64_BP_HARDEN_UNKNOWN -1
#define ARM64_BP_HARDEN_WA_NEEDED 0
#define ARM64_BP_HARDEN_NOT_REQUIRED 1
int get_spectre_v2_workaround_state(void);
#define ARM64_SSBD_UNKNOWN -1 #define ARM64_SSBD_UNKNOWN -1
#define ARM64_SSBD_FORCE_DISABLE 0 #define ARM64_SSBD_FORCE_DISABLE 0
#define ARM64_SSBD_KERNEL 1 #define ARM64_SSBD_KERNEL 1
......
...@@ -620,9 +620,21 @@ static inline void kvm_arm_vhe_guest_exit(void) ...@@ -620,9 +620,21 @@ static inline void kvm_arm_vhe_guest_exit(void)
isb(); isb();
} }
static inline bool kvm_arm_harden_branch_predictor(void) #define KVM_BP_HARDEN_UNKNOWN -1
#define KVM_BP_HARDEN_WA_NEEDED 0
#define KVM_BP_HARDEN_NOT_REQUIRED 1
static inline int kvm_arm_harden_branch_predictor(void)
{ {
return cpus_have_const_cap(ARM64_HARDEN_BRANCH_PREDICTOR); switch (get_spectre_v2_workaround_state()) {
case ARM64_BP_HARDEN_WA_NEEDED:
return KVM_BP_HARDEN_WA_NEEDED;
case ARM64_BP_HARDEN_NOT_REQUIRED:
return KVM_BP_HARDEN_NOT_REQUIRED;
case ARM64_BP_HARDEN_UNKNOWN:
default:
return KVM_BP_HARDEN_UNKNOWN;
}
} }
#define KVM_SSBD_UNKNOWN -1 #define KVM_SSBD_UNKNOWN -1
......
...@@ -554,6 +554,17 @@ cpu_enable_cache_maint_trap(const struct arm64_cpu_capabilities *__unused) ...@@ -554,6 +554,17 @@ cpu_enable_cache_maint_trap(const struct arm64_cpu_capabilities *__unused)
static bool __hardenbp_enab = true; static bool __hardenbp_enab = true;
static bool __spectrev2_safe = true; static bool __spectrev2_safe = true;
int get_spectre_v2_workaround_state(void)
{
if (__spectrev2_safe)
return ARM64_BP_HARDEN_NOT_REQUIRED;
if (!__hardenbp_enab)
return ARM64_BP_HARDEN_UNKNOWN;
return ARM64_BP_HARDEN_WA_NEEDED;
}
/* /*
* List of CPUs that do not need any Spectre-v2 mitigation at all. * List of CPUs that do not need any Spectre-v2 mitigation at all.
*/ */
...@@ -854,13 +865,15 @@ ssize_t cpu_show_spectre_v1(struct device *dev, struct device_attribute *attr, ...@@ -854,13 +865,15 @@ ssize_t cpu_show_spectre_v1(struct device *dev, struct device_attribute *attr,
ssize_t cpu_show_spectre_v2(struct device *dev, struct device_attribute *attr, ssize_t cpu_show_spectre_v2(struct device *dev, struct device_attribute *attr,
char *buf) char *buf)
{ {
if (__spectrev2_safe) switch (get_spectre_v2_workaround_state()) {
case ARM64_BP_HARDEN_NOT_REQUIRED:
return sprintf(buf, "Not affected\n"); return sprintf(buf, "Not affected\n");
case ARM64_BP_HARDEN_WA_NEEDED:
if (__hardenbp_enab)
return sprintf(buf, "Mitigation: Branch predictor hardening\n"); return sprintf(buf, "Mitigation: Branch predictor hardening\n");
case ARM64_BP_HARDEN_UNKNOWN:
default:
return sprintf(buf, "Vulnerable\n"); return sprintf(buf, "Vulnerable\n");
}
} }
ssize_t cpu_show_spec_store_bypass(struct device *dev, ssize_t cpu_show_spec_store_bypass(struct device *dev,
......
...@@ -401,9 +401,17 @@ int kvm_hvc_call_handler(struct kvm_vcpu *vcpu) ...@@ -401,9 +401,17 @@ int kvm_hvc_call_handler(struct kvm_vcpu *vcpu)
feature = smccc_get_arg1(vcpu); feature = smccc_get_arg1(vcpu);
switch(feature) { switch(feature) {
case ARM_SMCCC_ARCH_WORKAROUND_1: case ARM_SMCCC_ARCH_WORKAROUND_1:
if (kvm_arm_harden_branch_predictor()) switch (kvm_arm_harden_branch_predictor()) {
case KVM_BP_HARDEN_UNKNOWN:
break;
case KVM_BP_HARDEN_WA_NEEDED:
val = SMCCC_RET_SUCCESS; val = SMCCC_RET_SUCCESS;
break; break;
case KVM_BP_HARDEN_NOT_REQUIRED:
val = SMCCC_RET_NOT_REQUIRED;
break;
}
break;
case ARM_SMCCC_ARCH_WORKAROUND_2: case ARM_SMCCC_ARCH_WORKAROUND_2:
switch (kvm_arm_have_ssbd()) { switch (kvm_arm_have_ssbd()) {
case KVM_SSBD_FORCE_DISABLE: case KVM_SSBD_FORCE_DISABLE:
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment