Commit c350717e authored by Will Deacon's avatar Will Deacon

Merge branch 'for-next/kvm/errata' into for-next/core

KVM CPU errata rework
(Andrew Scull and Marc Zyngier)
* for-next/kvm/errata:
  KVM: arm64: Move __load_guest_stage2 to kvm_mmu.h
  arm64: Unify WORKAROUND_SPECULATIVE_AT_{NVHE,VHE}
parents d2786527 fe677be9
...@@ -528,13 +528,13 @@ config ARM64_ERRATUM_1418040 ...@@ -528,13 +528,13 @@ config ARM64_ERRATUM_1418040
If unsure, say Y. If unsure, say Y.
config ARM64_WORKAROUND_SPECULATIVE_AT_VHE config ARM64_WORKAROUND_SPECULATIVE_AT
bool bool
config ARM64_ERRATUM_1165522 config ARM64_ERRATUM_1165522
bool "Cortex-A76: Speculative AT instruction using out-of-context translation regime could cause subsequent request to generate an incorrect translation" bool "Cortex-A76: 1165522: Speculative AT instruction using out-of-context translation regime could cause subsequent request to generate an incorrect translation"
default y default y
select ARM64_WORKAROUND_SPECULATIVE_AT_VHE select ARM64_WORKAROUND_SPECULATIVE_AT
help help
This option adds a workaround for ARM Cortex-A76 erratum 1165522. This option adds a workaround for ARM Cortex-A76 erratum 1165522.
...@@ -544,10 +544,23 @@ config ARM64_ERRATUM_1165522 ...@@ -544,10 +544,23 @@ config ARM64_ERRATUM_1165522
If unsure, say Y. If unsure, say Y.
config ARM64_ERRATUM_1319367
bool "Cortex-A57/A72: 1319537: Speculative AT instruction using out-of-context translation regime could cause subsequent request to generate an incorrect translation"
default y
select ARM64_WORKAROUND_SPECULATIVE_AT
help
This option adds work arounds for ARM Cortex-A57 erratum 1319537
and A72 erratum 1319367
Cortex-A57 and A72 cores could end-up with corrupted TLBs by
speculating an AT instruction during a guest context switch.
If unsure, say Y.
config ARM64_ERRATUM_1530923 config ARM64_ERRATUM_1530923
bool "Cortex-A55: Speculative AT instruction using out-of-context translation regime could cause subsequent request to generate an incorrect translation" bool "Cortex-A55: 1530923: Speculative AT instruction using out-of-context translation regime could cause subsequent request to generate an incorrect translation"
default y default y
select ARM64_WORKAROUND_SPECULATIVE_AT_VHE select ARM64_WORKAROUND_SPECULATIVE_AT
help help
This option adds a workaround for ARM Cortex-A55 erratum 1530923. This option adds a workaround for ARM Cortex-A55 erratum 1530923.
...@@ -576,22 +589,6 @@ config ARM64_ERRATUM_1286807 ...@@ -576,22 +589,6 @@ config ARM64_ERRATUM_1286807
invalidated has been observed by other observers. The invalidated has been observed by other observers. The
workaround repeats the TLBI+DSB operation. workaround repeats the TLBI+DSB operation.
config ARM64_WORKAROUND_SPECULATIVE_AT_NVHE
bool
config ARM64_ERRATUM_1319367
bool "Cortex-A57/A72: Speculative AT instruction using out-of-context translation regime could cause subsequent request to generate an incorrect translation"
default y
select ARM64_WORKAROUND_SPECULATIVE_AT_NVHE
help
This option adds work arounds for ARM Cortex-A57 erratum 1319537
and A72 erratum 1319367
Cortex-A57 and A72 cores could end-up with corrupted TLBs by
speculating an AT instruction during a guest context switch.
If unsure, say Y.
config ARM64_ERRATUM_1463225 config ARM64_ERRATUM_1463225
bool "Cortex-A76: Software Step might prevent interrupt recognition" bool "Cortex-A76: Software Step might prevent interrupt recognition"
default y default y
......
...@@ -44,7 +44,7 @@ ...@@ -44,7 +44,7 @@
#define ARM64_SSBS 34 #define ARM64_SSBS 34
#define ARM64_WORKAROUND_1418040 35 #define ARM64_WORKAROUND_1418040 35
#define ARM64_HAS_SB 36 #define ARM64_HAS_SB 36
#define ARM64_WORKAROUND_SPECULATIVE_AT_VHE 37 #define ARM64_WORKAROUND_SPECULATIVE_AT 37
#define ARM64_HAS_ADDRESS_AUTH_ARCH 38 #define ARM64_HAS_ADDRESS_AUTH_ARCH 38
#define ARM64_HAS_ADDRESS_AUTH_IMP_DEF 39 #define ARM64_HAS_ADDRESS_AUTH_IMP_DEF 39
#define ARM64_HAS_GENERIC_AUTH_ARCH 40 #define ARM64_HAS_GENERIC_AUTH_ARCH 40
...@@ -55,15 +55,14 @@ ...@@ -55,15 +55,14 @@
#define ARM64_WORKAROUND_CAVIUM_TX2_219_TVM 45 #define ARM64_WORKAROUND_CAVIUM_TX2_219_TVM 45
#define ARM64_WORKAROUND_CAVIUM_TX2_219_PRFM 46 #define ARM64_WORKAROUND_CAVIUM_TX2_219_PRFM 46
#define ARM64_WORKAROUND_1542419 47 #define ARM64_WORKAROUND_1542419 47
#define ARM64_WORKAROUND_SPECULATIVE_AT_NVHE 48 #define ARM64_HAS_E0PD 48
#define ARM64_HAS_E0PD 49 #define ARM64_HAS_RNG 49
#define ARM64_HAS_RNG 50 #define ARM64_HAS_AMU_EXTN 50
#define ARM64_HAS_AMU_EXTN 51 #define ARM64_HAS_ADDRESS_AUTH 51
#define ARM64_HAS_ADDRESS_AUTH 52 #define ARM64_HAS_GENERIC_AUTH 52
#define ARM64_HAS_GENERIC_AUTH 53 #define ARM64_HAS_32BIT_EL1 53
#define ARM64_HAS_32BIT_EL1 54 #define ARM64_BTI 54
#define ARM64_BTI 55
#define ARM64_NCAPS 56 #define ARM64_NCAPS 55
#endif /* __ASM_CPUCAPS_H */ #endif /* __ASM_CPUCAPS_H */
...@@ -573,10 +573,6 @@ static inline bool kvm_arch_requires_vhe(void) ...@@ -573,10 +573,6 @@ static inline bool kvm_arch_requires_vhe(void)
if (system_supports_sve()) if (system_supports_sve())
return true; return true;
/* Some implementations have defects that confine them to VHE */
if (cpus_have_cap(ARM64_WORKAROUND_SPECULATIVE_AT_VHE))
return true;
return false; return false;
} }
......
...@@ -10,7 +10,6 @@ ...@@ -10,7 +10,6 @@
#include <linux/compiler.h> #include <linux/compiler.h>
#include <linux/kvm_host.h> #include <linux/kvm_host.h>
#include <asm/alternative.h> #include <asm/alternative.h>
#include <asm/kvm_mmu.h>
#include <asm/sysreg.h> #include <asm/sysreg.h>
#define __hyp_text __section(.hyp.text) notrace #define __hyp_text __section(.hyp.text) notrace
...@@ -88,22 +87,5 @@ void deactivate_traps_vhe_put(void); ...@@ -88,22 +87,5 @@ void deactivate_traps_vhe_put(void);
u64 __guest_enter(struct kvm_vcpu *vcpu, struct kvm_cpu_context *host_ctxt); u64 __guest_enter(struct kvm_vcpu *vcpu, struct kvm_cpu_context *host_ctxt);
void __noreturn __hyp_do_panic(unsigned long, ...); void __noreturn __hyp_do_panic(unsigned long, ...);
/*
* Must be called from hyp code running at EL2 with an updated VTTBR
* and interrupts disabled.
*/
static __always_inline void __hyp_text __load_guest_stage2(struct kvm *kvm)
{
write_sysreg(kvm->arch.vtcr, vtcr_el2);
write_sysreg(kvm_get_vttbr(kvm), vttbr_el2);
/*
* ARM errata 1165522 and 1530923 require the actual execution of the
* above before we can switch to the EL1/EL0 translation regime used by
* the guest.
*/
asm(ALTERNATIVE("nop", "isb", ARM64_WORKAROUND_SPECULATIVE_AT_VHE));
}
#endif /* __ARM64_KVM_HYP_H__ */ #endif /* __ARM64_KVM_HYP_H__ */
...@@ -604,5 +604,22 @@ static __always_inline u64 kvm_get_vttbr(struct kvm *kvm) ...@@ -604,5 +604,22 @@ static __always_inline u64 kvm_get_vttbr(struct kvm *kvm)
return kvm_phys_to_vttbr(baddr) | vmid_field | cnp; return kvm_phys_to_vttbr(baddr) | vmid_field | cnp;
} }
/*
* Must be called from hyp code running at EL2 with an updated VTTBR
* and interrupts disabled.
*/
static __always_inline void __load_guest_stage2(struct kvm *kvm)
{
write_sysreg(kvm->arch.vtcr, vtcr_el2);
write_sysreg(kvm_get_vttbr(kvm), vttbr_el2);
/*
* ARM errata 1165522 and 1530923 require the actual execution of the
* above before we can switch to the EL1/EL0 translation regime used by
* the guest.
*/
asm(ALTERNATIVE("nop", "isb", ARM64_WORKAROUND_SPECULATIVE_AT));
}
#endif /* __ASSEMBLY__ */ #endif /* __ASSEMBLY__ */
#endif /* __ARM64_KVM_MMU_H__ */ #endif /* __ARM64_KVM_MMU_H__ */
...@@ -635,7 +635,7 @@ has_neoverse_n1_erratum_1542419(const struct arm64_cpu_capabilities *entry, ...@@ -635,7 +635,7 @@ has_neoverse_n1_erratum_1542419(const struct arm64_cpu_capabilities *entry,
return is_midr_in_range(midr, &range) && has_dic; return is_midr_in_range(midr, &range) && has_dic;
} }
#if defined(CONFIG_HARDEN_EL2_VECTORS) || defined(CONFIG_ARM64_ERRATUM_1319367) #if defined(CONFIG_HARDEN_EL2_VECTORS)
static const struct midr_range ca57_a72[] = { static const struct midr_range ca57_a72[] = {
MIDR_ALL_VERSIONS(MIDR_CORTEX_A57), MIDR_ALL_VERSIONS(MIDR_CORTEX_A57),
...@@ -757,12 +757,16 @@ static const struct arm64_cpu_capabilities erratum_843419_list[] = { ...@@ -757,12 +757,16 @@ static const struct arm64_cpu_capabilities erratum_843419_list[] = {
}; };
#endif #endif
#ifdef CONFIG_ARM64_WORKAROUND_SPECULATIVE_AT_VHE #ifdef CONFIG_ARM64_WORKAROUND_SPECULATIVE_AT
static const struct midr_range erratum_speculative_at_vhe_list[] = { static const struct midr_range erratum_speculative_at_list[] = {
#ifdef CONFIG_ARM64_ERRATUM_1165522 #ifdef CONFIG_ARM64_ERRATUM_1165522
/* Cortex A76 r0p0 to r2p0 */ /* Cortex A76 r0p0 to r2p0 */
MIDR_RANGE(MIDR_CORTEX_A76, 0, 0, 2, 0), MIDR_RANGE(MIDR_CORTEX_A76, 0, 0, 2, 0),
#endif #endif
#ifdef CONFIG_ARM64_ERRATUM_1319367
MIDR_ALL_VERSIONS(MIDR_CORTEX_A57),
MIDR_ALL_VERSIONS(MIDR_CORTEX_A72),
#endif
#ifdef CONFIG_ARM64_ERRATUM_1530923 #ifdef CONFIG_ARM64_ERRATUM_1530923
/* Cortex A55 r0p0 to r2p0 */ /* Cortex A55 r0p0 to r2p0 */
MIDR_RANGE(MIDR_CORTEX_A55, 0, 0, 2, 0), MIDR_RANGE(MIDR_CORTEX_A55, 0, 0, 2, 0),
...@@ -897,11 +901,11 @@ const struct arm64_cpu_capabilities arm64_errata[] = { ...@@ -897,11 +901,11 @@ const struct arm64_cpu_capabilities arm64_errata[] = {
ERRATA_MIDR_RANGE_LIST(erratum_1418040_list), ERRATA_MIDR_RANGE_LIST(erratum_1418040_list),
}, },
#endif #endif
#ifdef CONFIG_ARM64_WORKAROUND_SPECULATIVE_AT_VHE #ifdef CONFIG_ARM64_WORKAROUND_SPECULATIVE_AT
{ {
.desc = "ARM errata 1165522 or 1530923", .desc = "ARM errata 1165522, 1319367, or 1530923",
.capability = ARM64_WORKAROUND_SPECULATIVE_AT_VHE, .capability = ARM64_WORKAROUND_SPECULATIVE_AT,
ERRATA_MIDR_RANGE_LIST(erratum_speculative_at_vhe_list), ERRATA_MIDR_RANGE_LIST(erratum_speculative_at_list),
}, },
#endif #endif
#ifdef CONFIG_ARM64_ERRATUM_1463225 #ifdef CONFIG_ARM64_ERRATUM_1463225
...@@ -934,13 +938,6 @@ const struct arm64_cpu_capabilities arm64_errata[] = { ...@@ -934,13 +938,6 @@ const struct arm64_cpu_capabilities arm64_errata[] = {
.matches = has_neoverse_n1_erratum_1542419, .matches = has_neoverse_n1_erratum_1542419,
.cpu_enable = cpu_enable_trap_ctr_access, .cpu_enable = cpu_enable_trap_ctr_access,
}, },
#endif
#ifdef CONFIG_ARM64_ERRATUM_1319367
{
.desc = "ARM erratum 1319367",
.capability = ARM64_WORKAROUND_SPECULATIVE_AT_NVHE,
ERRATA_MIDR_RANGE_LIST(ca57_a72),
},
#endif #endif
{ {
} }
......
...@@ -138,7 +138,7 @@ static void __hyp_text __activate_traps_nvhe(struct kvm_vcpu *vcpu) ...@@ -138,7 +138,7 @@ static void __hyp_text __activate_traps_nvhe(struct kvm_vcpu *vcpu)
write_sysreg(val, cptr_el2); write_sysreg(val, cptr_el2);
if (cpus_have_final_cap(ARM64_WORKAROUND_SPECULATIVE_AT_NVHE)) { if (cpus_have_final_cap(ARM64_WORKAROUND_SPECULATIVE_AT)) {
struct kvm_cpu_context *ctxt = &vcpu->arch.ctxt; struct kvm_cpu_context *ctxt = &vcpu->arch.ctxt;
isb(); isb();
...@@ -181,7 +181,7 @@ static void deactivate_traps_vhe(void) ...@@ -181,7 +181,7 @@ static void deactivate_traps_vhe(void)
* above before we can switch to the EL2/EL0 translation regime used by * above before we can switch to the EL2/EL0 translation regime used by
* the host. * the host.
*/ */
asm(ALTERNATIVE("nop", "isb", ARM64_WORKAROUND_SPECULATIVE_AT_VHE)); asm(ALTERNATIVE("nop", "isb", ARM64_WORKAROUND_SPECULATIVE_AT));
write_sysreg(CPACR_EL1_DEFAULT, cpacr_el1); write_sysreg(CPACR_EL1_DEFAULT, cpacr_el1);
write_sysreg(vectors, vbar_el1); write_sysreg(vectors, vbar_el1);
...@@ -192,7 +192,7 @@ static void __hyp_text __deactivate_traps_nvhe(void) ...@@ -192,7 +192,7 @@ static void __hyp_text __deactivate_traps_nvhe(void)
{ {
u64 mdcr_el2 = read_sysreg(mdcr_el2); u64 mdcr_el2 = read_sysreg(mdcr_el2);
if (cpus_have_final_cap(ARM64_WORKAROUND_SPECULATIVE_AT_NVHE)) { if (cpus_have_final_cap(ARM64_WORKAROUND_SPECULATIVE_AT)) {
u64 val; u64 val;
/* /*
......
...@@ -118,7 +118,8 @@ static void __hyp_text __sysreg_restore_el1_state(struct kvm_cpu_context *ctxt) ...@@ -118,7 +118,8 @@ static void __hyp_text __sysreg_restore_el1_state(struct kvm_cpu_context *ctxt)
write_sysreg(ctxt->sys_regs[MPIDR_EL1], vmpidr_el2); write_sysreg(ctxt->sys_regs[MPIDR_EL1], vmpidr_el2);
write_sysreg(ctxt->sys_regs[CSSELR_EL1], csselr_el1); write_sysreg(ctxt->sys_regs[CSSELR_EL1], csselr_el1);
if (!cpus_have_final_cap(ARM64_WORKAROUND_SPECULATIVE_AT_NVHE)) { if (has_vhe() ||
!cpus_have_final_cap(ARM64_WORKAROUND_SPECULATIVE_AT)) {
write_sysreg_el1(ctxt->sys_regs[SCTLR_EL1], SYS_SCTLR); write_sysreg_el1(ctxt->sys_regs[SCTLR_EL1], SYS_SCTLR);
write_sysreg_el1(ctxt->sys_regs[TCR_EL1], SYS_TCR); write_sysreg_el1(ctxt->sys_regs[TCR_EL1], SYS_TCR);
} else if (!ctxt->__hyp_running_vcpu) { } else if (!ctxt->__hyp_running_vcpu) {
...@@ -149,7 +150,8 @@ static void __hyp_text __sysreg_restore_el1_state(struct kvm_cpu_context *ctxt) ...@@ -149,7 +150,8 @@ static void __hyp_text __sysreg_restore_el1_state(struct kvm_cpu_context *ctxt)
write_sysreg(ctxt->sys_regs[PAR_EL1], par_el1); write_sysreg(ctxt->sys_regs[PAR_EL1], par_el1);
write_sysreg(ctxt->sys_regs[TPIDR_EL1], tpidr_el1); write_sysreg(ctxt->sys_regs[TPIDR_EL1], tpidr_el1);
if (cpus_have_final_cap(ARM64_WORKAROUND_SPECULATIVE_AT_NVHE) && if (!has_vhe() &&
cpus_have_final_cap(ARM64_WORKAROUND_SPECULATIVE_AT) &&
ctxt->__hyp_running_vcpu) { ctxt->__hyp_running_vcpu) {
/* /*
* Must only be done for host registers, hence the context * Must only be done for host registers, hence the context
......
...@@ -23,7 +23,7 @@ static void __hyp_text __tlb_switch_to_guest_vhe(struct kvm *kvm, ...@@ -23,7 +23,7 @@ static void __hyp_text __tlb_switch_to_guest_vhe(struct kvm *kvm,
local_irq_save(cxt->flags); local_irq_save(cxt->flags);
if (cpus_have_final_cap(ARM64_WORKAROUND_SPECULATIVE_AT_VHE)) { if (cpus_have_final_cap(ARM64_WORKAROUND_SPECULATIVE_AT)) {
/* /*
* For CPUs that are affected by ARM errata 1165522 or 1530923, * For CPUs that are affected by ARM errata 1165522 or 1530923,
* we cannot trust stage-1 to be in a correct state at that * we cannot trust stage-1 to be in a correct state at that
...@@ -63,7 +63,7 @@ static void __hyp_text __tlb_switch_to_guest_vhe(struct kvm *kvm, ...@@ -63,7 +63,7 @@ static void __hyp_text __tlb_switch_to_guest_vhe(struct kvm *kvm,
static void __hyp_text __tlb_switch_to_guest_nvhe(struct kvm *kvm, static void __hyp_text __tlb_switch_to_guest_nvhe(struct kvm *kvm,
struct tlb_inv_context *cxt) struct tlb_inv_context *cxt)
{ {
if (cpus_have_final_cap(ARM64_WORKAROUND_SPECULATIVE_AT_NVHE)) { if (cpus_have_final_cap(ARM64_WORKAROUND_SPECULATIVE_AT)) {
u64 val; u64 val;
/* /*
...@@ -79,8 +79,9 @@ static void __hyp_text __tlb_switch_to_guest_nvhe(struct kvm *kvm, ...@@ -79,8 +79,9 @@ static void __hyp_text __tlb_switch_to_guest_nvhe(struct kvm *kvm,
isb(); isb();
} }
/* __load_guest_stage2() includes an ISB for the workaround. */
__load_guest_stage2(kvm); __load_guest_stage2(kvm);
isb(); asm(ALTERNATIVE("isb", "nop", ARM64_WORKAROUND_SPECULATIVE_AT));
} }
static void __hyp_text __tlb_switch_to_guest(struct kvm *kvm, static void __hyp_text __tlb_switch_to_guest(struct kvm *kvm,
...@@ -103,7 +104,7 @@ static void __hyp_text __tlb_switch_to_host_vhe(struct kvm *kvm, ...@@ -103,7 +104,7 @@ static void __hyp_text __tlb_switch_to_host_vhe(struct kvm *kvm,
write_sysreg(HCR_HOST_VHE_FLAGS, hcr_el2); write_sysreg(HCR_HOST_VHE_FLAGS, hcr_el2);
isb(); isb();
if (cpus_have_final_cap(ARM64_WORKAROUND_SPECULATIVE_AT_VHE)) { if (cpus_have_final_cap(ARM64_WORKAROUND_SPECULATIVE_AT)) {
/* Restore the registers to what they were */ /* Restore the registers to what they were */
write_sysreg_el1(cxt->tcr, SYS_TCR); write_sysreg_el1(cxt->tcr, SYS_TCR);
write_sysreg_el1(cxt->sctlr, SYS_SCTLR); write_sysreg_el1(cxt->sctlr, SYS_SCTLR);
...@@ -117,7 +118,7 @@ static void __hyp_text __tlb_switch_to_host_nvhe(struct kvm *kvm, ...@@ -117,7 +118,7 @@ static void __hyp_text __tlb_switch_to_host_nvhe(struct kvm *kvm,
{ {
write_sysreg(0, vttbr_el2); write_sysreg(0, vttbr_el2);
if (cpus_have_final_cap(ARM64_WORKAROUND_SPECULATIVE_AT_NVHE)) { if (cpus_have_final_cap(ARM64_WORKAROUND_SPECULATIVE_AT)) {
/* Ensure write of the host VMID */ /* Ensure write of the host VMID */
isb(); isb();
/* Restore the host's TCR_EL1 */ /* Restore the host's TCR_EL1 */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment