Commit 29e8910a authored by Marc Zyngier's avatar Marc Zyngier Committed by Will Deacon

KVM: arm64: Simplify handling of ARCH_WORKAROUND_2

Owing to the fact that the host kernel is always mitigated, we can
drastically simplify the WA2 handling by keeping the mitigation
state ON when entering the guest. This means the guest is either
unaffected or not mitigated.

This results in a nice simplification of the mitigation space,
and the removal of a lot of code that was never really used anyway.
Signed-off-by: default avatarMarc Zyngier <maz@kernel.org>
Signed-off-by: default avatarWill Deacon <will@kernel.org>
parent c2876207
...@@ -9,9 +9,6 @@ ...@@ -9,9 +9,6 @@
#include <asm/virt.h> #include <asm/virt.h>
#define VCPU_WORKAROUND_2_FLAG_SHIFT 0
#define VCPU_WORKAROUND_2_FLAG (_AC(1, UL) << VCPU_WORKAROUND_2_FLAG_SHIFT)
#define ARM_EXIT_WITH_SERROR_BIT 31 #define ARM_EXIT_WITH_SERROR_BIT 31
#define ARM_EXCEPTION_CODE(x) ((x) & ~(1U << ARM_EXIT_WITH_SERROR_BIT)) #define ARM_EXCEPTION_CODE(x) ((x) & ~(1U << ARM_EXIT_WITH_SERROR_BIT))
#define ARM_EXCEPTION_IS_TRAP(x) (ARM_EXCEPTION_CODE((x)) == ARM_EXCEPTION_TRAP) #define ARM_EXCEPTION_IS_TRAP(x) (ARM_EXCEPTION_CODE((x)) == ARM_EXCEPTION_TRAP)
......
...@@ -383,20 +383,6 @@ static inline unsigned long kvm_vcpu_get_mpidr_aff(struct kvm_vcpu *vcpu) ...@@ -383,20 +383,6 @@ static inline unsigned long kvm_vcpu_get_mpidr_aff(struct kvm_vcpu *vcpu)
return vcpu_read_sys_reg(vcpu, MPIDR_EL1) & MPIDR_HWID_BITMASK; return vcpu_read_sys_reg(vcpu, MPIDR_EL1) & MPIDR_HWID_BITMASK;
} }
static inline bool kvm_arm_get_vcpu_workaround_2_flag(struct kvm_vcpu *vcpu)
{
return vcpu->arch.workaround_flags & VCPU_WORKAROUND_2_FLAG;
}
static inline void kvm_arm_set_vcpu_workaround_2_flag(struct kvm_vcpu *vcpu,
bool flag)
{
if (flag)
vcpu->arch.workaround_flags |= VCPU_WORKAROUND_2_FLAG;
else
vcpu->arch.workaround_flags &= ~VCPU_WORKAROUND_2_FLAG;
}
static inline void kvm_vcpu_set_be(struct kvm_vcpu *vcpu) static inline void kvm_vcpu_set_be(struct kvm_vcpu *vcpu)
{ {
if (vcpu_mode_is_32bit(vcpu)) { if (vcpu_mode_is_32bit(vcpu)) {
......
...@@ -526,23 +526,6 @@ static inline int kvm_map_vectors(void) ...@@ -526,23 +526,6 @@ static inline int kvm_map_vectors(void)
} }
#endif #endif
DECLARE_PER_CPU_READ_MOSTLY(u64, arm64_ssbd_callback_required);
static inline int hyp_map_aux_data(void)
{
int cpu, err;
for_each_possible_cpu(cpu) {
u64 *ptr;
ptr = per_cpu_ptr(&arm64_ssbd_callback_required, cpu);
err = create_hyp_mappings(ptr, ptr + 1, PAGE_HYP);
if (err)
return err;
}
return 0;
}
#define kvm_phys_to_vttbr(addr) phys_to_ttbr(addr) #define kvm_phys_to_vttbr(addr) phys_to_ttbr(addr)
/* /*
......
...@@ -242,6 +242,15 @@ struct kvm_vcpu_events { ...@@ -242,6 +242,15 @@ struct kvm_vcpu_events {
#define KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_1_NOT_AVAIL 0 #define KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_1_NOT_AVAIL 0
#define KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_1_AVAIL 1 #define KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_1_AVAIL 1
#define KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_1_NOT_REQUIRED 2 #define KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_1_NOT_REQUIRED 2
/*
* Only two states can be presented by the host kernel:
* - NOT_REQUIRED: the guest doesn't need to do anything
* - NOT_AVAIL: the guest isn't mitigated (it can still use SSBS if available)
*
* All the other values are deprecated. The host still accepts all
* values (they are ABI), but will narrow them to the above two.
*/
#define KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2 KVM_REG_ARM_FW_REG(2) #define KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2 KVM_REG_ARM_FW_REG(2)
#define KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2_NOT_AVAIL 0 #define KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2_NOT_AVAIL 0
#define KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2_UNKNOWN 1 #define KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2_UNKNOWN 1
......
...@@ -108,20 +108,6 @@ cpu_enable_trap_ctr_access(const struct arm64_cpu_capabilities *cap) ...@@ -108,20 +108,6 @@ cpu_enable_trap_ctr_access(const struct arm64_cpu_capabilities *cap)
int ssbd_state __read_mostly = ARM64_SSBD_UNKNOWN; int ssbd_state __read_mostly = ARM64_SSBD_UNKNOWN;
void __init arm64_enable_wa2_handling(struct alt_instr *alt,
__le32 *origptr, __le32 *updptr,
int nr_inst)
{
BUG_ON(nr_inst != 1);
/*
* Only allow mitigation on EL1 entry/exit and guest
* ARCH_WORKAROUND_2 handling if the SSBD state allows it to
* be flipped.
*/
if (arm64_get_ssbd_state() == ARM64_SSBD_KERNEL)
*updptr = cpu_to_le32(aarch64_insn_gen_nop());
}
#ifdef CONFIG_ARM64_ERRATUM_1463225 #ifdef CONFIG_ARM64_ERRATUM_1463225
DEFINE_PER_CPU(int, __in_cortex_a76_erratum_1463225_wa); DEFINE_PER_CPU(int, __in_cortex_a76_erratum_1463225_wa);
......
...@@ -64,12 +64,10 @@ __efistub__ctype = _ctype; ...@@ -64,12 +64,10 @@ __efistub__ctype = _ctype;
#define KVM_NVHE_ALIAS(sym) __kvm_nvhe_##sym = sym; #define KVM_NVHE_ALIAS(sym) __kvm_nvhe_##sym = sym;
/* Alternative callbacks for init-time patching of nVHE hyp code. */ /* Alternative callbacks for init-time patching of nVHE hyp code. */
KVM_NVHE_ALIAS(arm64_enable_wa2_handling);
KVM_NVHE_ALIAS(kvm_patch_vector_branch); KVM_NVHE_ALIAS(kvm_patch_vector_branch);
KVM_NVHE_ALIAS(kvm_update_va_mask); KVM_NVHE_ALIAS(kvm_update_va_mask);
/* Global kernel state accessed by nVHE hyp code. */ /* Global kernel state accessed by nVHE hyp code. */
KVM_NVHE_ALIAS(arm64_ssbd_callback_required);
KVM_NVHE_ALIAS(kvm_host_data); KVM_NVHE_ALIAS(kvm_host_data);
KVM_NVHE_ALIAS(kvm_vgic_global_state); KVM_NVHE_ALIAS(kvm_vgic_global_state);
......
...@@ -1549,10 +1549,6 @@ static int init_hyp_mode(void) ...@@ -1549,10 +1549,6 @@ static int init_hyp_mode(void)
} }
} }
err = hyp_map_aux_data();
if (err)
kvm_err("Cannot map host auxiliary data: %d\n", err);
return 0; return 0;
out_err: out_err:
......
...@@ -116,33 +116,6 @@ el1_hvc_guest: ...@@ -116,33 +116,6 @@ el1_hvc_guest:
ARM_SMCCC_ARCH_WORKAROUND_2) ARM_SMCCC_ARCH_WORKAROUND_2)
cbnz w1, el1_trap cbnz w1, el1_trap
alternative_cb arm64_enable_wa2_handling
b wa2_end
alternative_cb_end
get_vcpu_ptr x2, x0
ldr x0, [x2, #VCPU_WORKAROUND_FLAGS]
// Sanitize the argument and update the guest flags
ldr x1, [sp, #8] // Guest's x1
clz w1, w1 // Murphy's device:
lsr w1, w1, #5 // w1 = !!w1 without using
eor w1, w1, #1 // the flags...
bfi x0, x1, #VCPU_WORKAROUND_2_FLAG_SHIFT, #1
str x0, [x2, #VCPU_WORKAROUND_FLAGS]
/* Check that we actually need to perform the call */
hyp_ldr_this_cpu x0, arm64_ssbd_callback_required, x2
cbz x0, wa2_end
mov w0, #ARM_SMCCC_ARCH_WORKAROUND_2
smc #0
/* Don't leak data from the SMC call */
mov x3, xzr
wa2_end:
mov x2, xzr
mov x1, xzr
wa_epilogue: wa_epilogue:
mov x0, xzr mov x0, xzr
add sp, sp, #16 add sp, sp, #16
......
...@@ -479,35 +479,6 @@ static inline bool fixup_guest_exit(struct kvm_vcpu *vcpu, u64 *exit_code) ...@@ -479,35 +479,6 @@ static inline bool fixup_guest_exit(struct kvm_vcpu *vcpu, u64 *exit_code)
return false; return false;
} }
static inline bool __needs_ssbd_off(struct kvm_vcpu *vcpu)
{
if (!cpus_have_final_cap(ARM64_SPECTRE_V4))
return false;
return !(vcpu->arch.workaround_flags & VCPU_WORKAROUND_2_FLAG);
}
static inline void __set_guest_arch_workaround_state(struct kvm_vcpu *vcpu)
{
/*
* The host runs with the workaround always present. If the
* guest wants it disabled, so be it...
*/
if (__needs_ssbd_off(vcpu) &&
__hyp_this_cpu_read(arm64_ssbd_callback_required))
arm_smccc_1_1_smc(ARM_SMCCC_ARCH_WORKAROUND_2, 0, NULL);
}
static inline void __set_host_arch_workaround_state(struct kvm_vcpu *vcpu)
{
/*
* If the guest has disabled the workaround, bring it back on.
*/
if (__needs_ssbd_off(vcpu) &&
__hyp_this_cpu_read(arm64_ssbd_callback_required))
arm_smccc_1_1_smc(ARM_SMCCC_ARCH_WORKAROUND_2, 1, NULL);
}
static inline void __kvm_unexpected_el2_exception(void) static inline void __kvm_unexpected_el2_exception(void)
{ {
unsigned long addr, fixup; unsigned long addr, fixup;
......
...@@ -202,8 +202,6 @@ int __kvm_vcpu_run(struct kvm_vcpu *vcpu) ...@@ -202,8 +202,6 @@ int __kvm_vcpu_run(struct kvm_vcpu *vcpu)
__debug_switch_to_guest(vcpu); __debug_switch_to_guest(vcpu);
__set_guest_arch_workaround_state(vcpu);
do { do {
/* Jump in the fire! */ /* Jump in the fire! */
exit_code = __guest_enter(vcpu, host_ctxt); exit_code = __guest_enter(vcpu, host_ctxt);
...@@ -211,8 +209,6 @@ int __kvm_vcpu_run(struct kvm_vcpu *vcpu) ...@@ -211,8 +209,6 @@ int __kvm_vcpu_run(struct kvm_vcpu *vcpu)
/* And we're baaack! */ /* And we're baaack! */
} while (fixup_guest_exit(vcpu, &exit_code)); } while (fixup_guest_exit(vcpu, &exit_code));
__set_host_arch_workaround_state(vcpu);
__sysreg_save_state_nvhe(guest_ctxt); __sysreg_save_state_nvhe(guest_ctxt);
__sysreg32_save_state(vcpu); __sysreg32_save_state(vcpu);
__timer_disable_traps(vcpu); __timer_disable_traps(vcpu);
......
...@@ -131,8 +131,6 @@ static int __kvm_vcpu_run_vhe(struct kvm_vcpu *vcpu) ...@@ -131,8 +131,6 @@ static int __kvm_vcpu_run_vhe(struct kvm_vcpu *vcpu)
sysreg_restore_guest_state_vhe(guest_ctxt); sysreg_restore_guest_state_vhe(guest_ctxt);
__debug_switch_to_guest(vcpu); __debug_switch_to_guest(vcpu);
__set_guest_arch_workaround_state(vcpu);
do { do {
/* Jump in the fire! */ /* Jump in the fire! */
exit_code = __guest_enter(vcpu, host_ctxt); exit_code = __guest_enter(vcpu, host_ctxt);
...@@ -140,8 +138,6 @@ static int __kvm_vcpu_run_vhe(struct kvm_vcpu *vcpu) ...@@ -140,8 +138,6 @@ static int __kvm_vcpu_run_vhe(struct kvm_vcpu *vcpu)
/* And we're baaack! */ /* And we're baaack! */
} while (fixup_guest_exit(vcpu, &exit_code)); } while (fixup_guest_exit(vcpu, &exit_code));
__set_host_arch_workaround_state(vcpu);
sysreg_save_guest_state_vhe(guest_ctxt); sysreg_save_guest_state_vhe(guest_ctxt);
__deactivate_traps(vcpu); __deactivate_traps(vcpu);
......
...@@ -36,15 +36,13 @@ int kvm_hvc_call_handler(struct kvm_vcpu *vcpu) ...@@ -36,15 +36,13 @@ int kvm_hvc_call_handler(struct kvm_vcpu *vcpu)
} }
break; break;
case ARM_SMCCC_ARCH_WORKAROUND_2: case ARM_SMCCC_ARCH_WORKAROUND_2:
switch (kvm_arm_have_ssbd()) { switch (arm64_get_ssbd_state()) {
case KVM_SSBD_FORCE_DISABLE: case ARM64_SSBD_FORCE_DISABLE:
case KVM_SSBD_UNKNOWN: case ARM64_SSBD_UNKNOWN:
break; break;
case KVM_SSBD_KERNEL: case ARM64_SSBD_KERNEL:
val = SMCCC_RET_SUCCESS; case ARM64_SSBD_FORCE_ENABLE:
break; case ARM64_SSBD_MITIGATED:
case KVM_SSBD_FORCE_ENABLE:
case KVM_SSBD_MITIGATED:
val = SMCCC_RET_NOT_REQUIRED; val = SMCCC_RET_NOT_REQUIRED;
break; break;
} }
......
...@@ -435,17 +435,15 @@ static int get_kernel_wa_level(u64 regid) ...@@ -435,17 +435,15 @@ static int get_kernel_wa_level(u64 regid)
} }
return KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_1_NOT_AVAIL; return KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_1_NOT_AVAIL;
case KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2: case KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2:
switch (kvm_arm_have_ssbd()) { switch (arm64_get_ssbd_state()) {
case KVM_SSBD_FORCE_DISABLE: case ARM64_SSBD_FORCE_ENABLE:
return KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2_NOT_AVAIL; case ARM64_SSBD_MITIGATED:
case KVM_SSBD_KERNEL: case ARM64_SSBD_KERNEL:
return KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2_AVAIL;
case KVM_SSBD_FORCE_ENABLE:
case KVM_SSBD_MITIGATED:
return KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2_NOT_REQUIRED; return KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2_NOT_REQUIRED;
case KVM_SSBD_UNKNOWN: case ARM64_SSBD_UNKNOWN:
case ARM64_SSBD_FORCE_DISABLE:
default: default:
return KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2_UNKNOWN; return KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2_NOT_AVAIL;
} }
} }
...@@ -462,14 +460,8 @@ int kvm_arm_get_fw_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg) ...@@ -462,14 +460,8 @@ int kvm_arm_get_fw_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
val = kvm_psci_version(vcpu, vcpu->kvm); val = kvm_psci_version(vcpu, vcpu->kvm);
break; break;
case KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_1: case KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_1:
val = get_kernel_wa_level(reg->id) & KVM_REG_FEATURE_LEVEL_MASK;
break;
case KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2: case KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2:
val = get_kernel_wa_level(reg->id) & KVM_REG_FEATURE_LEVEL_MASK; val = get_kernel_wa_level(reg->id) & KVM_REG_FEATURE_LEVEL_MASK;
if (val == KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2_AVAIL &&
kvm_arm_get_vcpu_workaround_2_flag(vcpu))
val |= KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2_ENABLED;
break; break;
default: default:
return -ENOENT; return -ENOENT;
...@@ -527,34 +519,35 @@ int kvm_arm_set_fw_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg) ...@@ -527,34 +519,35 @@ int kvm_arm_set_fw_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2_ENABLED)) KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2_ENABLED))
return -EINVAL; return -EINVAL;
wa_level = val & KVM_REG_FEATURE_LEVEL_MASK;
if (get_kernel_wa_level(reg->id) < wa_level)
return -EINVAL;
/* The enabled bit must not be set unless the level is AVAIL. */ /* The enabled bit must not be set unless the level is AVAIL. */
if (wa_level != KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2_AVAIL && if ((val & KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2_ENABLED) &&
wa_level != val) (val & KVM_REG_FEATURE_LEVEL_MASK) != KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2_AVAIL)
return -EINVAL; return -EINVAL;
/* Are we finished or do we need to check the enable bit ? */
if (kvm_arm_have_ssbd() != KVM_SSBD_KERNEL)
return 0;
/* /*
* If this kernel supports the workaround to be switched on * Map all the possible incoming states to the only two we
* or off, make sure it matches the requested setting. * really want to deal with.
*/ */
switch (wa_level) { switch (val & KVM_REG_FEATURE_LEVEL_MASK) {
case KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2_AVAIL: case KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2_NOT_AVAIL:
kvm_arm_set_vcpu_workaround_2_flag(vcpu, case KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2_UNKNOWN:
val & KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2_ENABLED); wa_level = KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2_NOT_AVAIL;
break; break;
case KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2_AVAIL:
case KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2_NOT_REQUIRED: case KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2_NOT_REQUIRED:
kvm_arm_set_vcpu_workaround_2_flag(vcpu, true); wa_level = KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2_NOT_REQUIRED;
break; break;
default:
return -EINVAL;
} }
/*
* We can deal with NOT_AVAIL on NOT_REQUIRED, but not the
* other way around.
*/
if (get_kernel_wa_level(reg->id) < wa_level)
return -EINVAL;
return 0; return 0;
default: default:
return -ENOENT; return -ENOENT;
......
...@@ -319,10 +319,6 @@ int kvm_reset_vcpu(struct kvm_vcpu *vcpu) ...@@ -319,10 +319,6 @@ int kvm_reset_vcpu(struct kvm_vcpu *vcpu)
vcpu->arch.reset_state.reset = false; vcpu->arch.reset_state.reset = false;
} }
/* Default workaround setup is enabled (if supported) */
if (kvm_arm_have_ssbd() == KVM_SSBD_KERNEL)
vcpu->arch.workaround_flags |= VCPU_WORKAROUND_2_FLAG;
/* Reset timer */ /* Reset timer */
ret = kvm_timer_vcpu_reset(vcpu); ret = kvm_timer_vcpu_reset(vcpu);
out: out:
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment