Commit 85acda3b authored by Dave Martin's avatar Dave Martin Committed by Marc Zyngier

KVM: arm64: Save host SVE context as appropriate

This patch adds SVE context saving to the hyp FPSIMD context switch
path.  This means that it is no longer necessary to save the host
SVE state in advance of entering the guest, when in use.

In order to avoid adding pointless complexity to the code, VHE is
assumed if SVE is in use.  VHE is an architectural prerequisite for
SVE, so there is no good reason to turn CONFIG_ARM64_VHE off in
kernels that support both SVE and KVM.

Historically, software models exist that can expose the
architecturally invalid configuration of SVE without VHE, so if
this situation is detected at kvm_init() time then KVM will be
disabled.
Signed-off-by: default avatarDave Martin <Dave.Martin@arm.com>
Reviewed-by: default avatarAlex Bennée <alex.bennee@linaro.org>
Acked-by: default avatarCatalin Marinas <catalin.marinas@arm.com>
Signed-off-by: default avatarMarc Zyngier <marc.zyngier@arm.com>
parent 9a6e5948
...@@ -280,6 +280,7 @@ void kvm_mmu_wp_memory_region(struct kvm *kvm, int slot); ...@@ -280,6 +280,7 @@ void kvm_mmu_wp_memory_region(struct kvm *kvm, int slot);
struct kvm_vcpu *kvm_mpidr_to_vcpu(struct kvm *kvm, unsigned long mpidr); struct kvm_vcpu *kvm_mpidr_to_vcpu(struct kvm *kvm, unsigned long mpidr);
static inline bool kvm_arch_check_sve_has_vhe(void) { return true; }
static inline void kvm_arch_hardware_unsetup(void) {} static inline void kvm_arch_hardware_unsetup(void) {}
static inline void kvm_arch_sync_events(struct kvm *kvm) {} static inline void kvm_arch_sync_events(struct kvm *kvm) {}
static inline void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu) {} static inline void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu) {}
......
...@@ -1130,6 +1130,7 @@ endmenu ...@@ -1130,6 +1130,7 @@ endmenu
config ARM64_SVE config ARM64_SVE
bool "ARM Scalable Vector Extension support" bool "ARM Scalable Vector Extension support"
default y default y
depends on !KVM || ARM64_VHE
help help
The Scalable Vector Extension (SVE) is an extension to the AArch64 The Scalable Vector Extension (SVE) is an extension to the AArch64
execution state which complements and extends the SIMD functionality execution state which complements and extends the SIMD functionality
...@@ -1155,6 +1156,12 @@ config ARM64_SVE ...@@ -1155,6 +1156,12 @@ config ARM64_SVE
booting the kernel. If unsure and you are not observing these booting the kernel. If unsure and you are not observing these
symptoms, you should assume that it is safe to say Y. symptoms, you should assume that it is safe to say Y.
CPUs that support SVE are architecturally required to support the
Virtualization Host Extensions (VHE), so the kernel makes no
provision for supporting SVE alongside KVM without VHE enabled.
Thus, you will need to enable CONFIG_ARM64_VHE if you want to support
KVM in the same kernel image.
config ARM64_MODULE_PLTS config ARM64_MODULE_PLTS
bool bool
select HAVE_MOD_ARCH_SPECIFIC select HAVE_MOD_ARCH_SPECIFIC
......
...@@ -405,6 +405,19 @@ static inline void __cpu_init_hyp_mode(phys_addr_t pgd_ptr, ...@@ -405,6 +405,19 @@ static inline void __cpu_init_hyp_mode(phys_addr_t pgd_ptr,
kvm_call_hyp(__kvm_set_tpidr_el2, tpidr_el2); kvm_call_hyp(__kvm_set_tpidr_el2, tpidr_el2);
} }
static inline bool kvm_arch_check_sve_has_vhe(void)
{
/*
* The Arm architecture specifies that implementation of SVE
* requires VHE also to be implemented. The KVM code for arm64
* relies on this when SVE is present:
*/
if (system_supports_sve())
return has_vhe();
else
return true;
}
static inline void kvm_arch_hardware_unsetup(void) {} static inline void kvm_arch_hardware_unsetup(void) {}
static inline void kvm_arch_sync_events(struct kvm *kvm) {} static inline void kvm_arch_sync_events(struct kvm *kvm) {}
static inline void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu) {} static inline void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu) {}
......
...@@ -59,7 +59,6 @@ int kvm_arch_vcpu_run_map_fp(struct kvm_vcpu *vcpu) ...@@ -59,7 +59,6 @@ int kvm_arch_vcpu_run_map_fp(struct kvm_vcpu *vcpu)
*/ */
void kvm_arch_vcpu_load_fp(struct kvm_vcpu *vcpu) void kvm_arch_vcpu_load_fp(struct kvm_vcpu *vcpu)
{ {
BUG_ON(system_supports_sve());
BUG_ON(!current->mm); BUG_ON(!current->mm);
vcpu->arch.flags &= ~(KVM_ARM64_FP_ENABLED | KVM_ARM64_HOST_SVE_IN_USE); vcpu->arch.flags &= ~(KVM_ARM64_FP_ENABLED | KVM_ARM64_HOST_SVE_IN_USE);
......
...@@ -21,6 +21,7 @@ ...@@ -21,6 +21,7 @@
#include <kvm/arm_psci.h> #include <kvm/arm_psci.h>
#include <asm/cpufeature.h>
#include <asm/kvm_asm.h> #include <asm/kvm_asm.h>
#include <asm/kvm_emulate.h> #include <asm/kvm_emulate.h>
#include <asm/kvm_host.h> #include <asm/kvm_host.h>
...@@ -28,6 +29,7 @@ ...@@ -28,6 +29,7 @@
#include <asm/kvm_mmu.h> #include <asm/kvm_mmu.h>
#include <asm/fpsimd.h> #include <asm/fpsimd.h>
#include <asm/debug-monitors.h> #include <asm/debug-monitors.h>
#include <asm/processor.h>
#include <asm/thread_info.h> #include <asm/thread_info.h>
/* Check whether the FP regs were dirtied while in the host-side run loop: */ /* Check whether the FP regs were dirtied while in the host-side run loop: */
...@@ -329,6 +331,8 @@ static bool __hyp_text __skip_instr(struct kvm_vcpu *vcpu) ...@@ -329,6 +331,8 @@ static bool __hyp_text __skip_instr(struct kvm_vcpu *vcpu)
void __hyp_text __hyp_switch_fpsimd(u64 esr __always_unused, void __hyp_text __hyp_switch_fpsimd(u64 esr __always_unused,
struct kvm_vcpu *vcpu) struct kvm_vcpu *vcpu)
{ {
struct user_fpsimd_state *host_fpsimd = vcpu->arch.host_fpsimd_state;
if (has_vhe()) if (has_vhe())
write_sysreg(read_sysreg(cpacr_el1) | CPACR_EL1_FPEN, write_sysreg(read_sysreg(cpacr_el1) | CPACR_EL1_FPEN,
cpacr_el1); cpacr_el1);
...@@ -339,7 +343,21 @@ void __hyp_text __hyp_switch_fpsimd(u64 esr __always_unused, ...@@ -339,7 +343,21 @@ void __hyp_text __hyp_switch_fpsimd(u64 esr __always_unused,
isb(); isb();
if (vcpu->arch.flags & KVM_ARM64_FP_HOST) { if (vcpu->arch.flags & KVM_ARM64_FP_HOST) {
__fpsimd_save_state(vcpu->arch.host_fpsimd_state); /*
* In the SVE case, VHE is assumed: it is enforced by
* Kconfig and kvm_arch_init().
*/
if (system_supports_sve() &&
(vcpu->arch.flags & KVM_ARM64_HOST_SVE_IN_USE)) {
struct thread_struct *thread = container_of(
host_fpsimd,
struct thread_struct, uw.fpsimd_state);
sve_save_state(sve_pffr(thread), &host_fpsimd->fpsr);
} else {
__fpsimd_save_state(host_fpsimd);
}
vcpu->arch.flags &= ~KVM_ARM64_FP_HOST; vcpu->arch.flags &= ~KVM_ARM64_FP_HOST;
} }
......
...@@ -16,6 +16,7 @@ ...@@ -16,6 +16,7 @@
* Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
*/ */
#include <linux/bug.h>
#include <linux/cpu_pm.h> #include <linux/cpu_pm.h>
#include <linux/errno.h> #include <linux/errno.h>
#include <linux/err.h> #include <linux/err.h>
...@@ -41,6 +42,7 @@ ...@@ -41,6 +42,7 @@
#include <asm/mman.h> #include <asm/mman.h>
#include <asm/tlbflush.h> #include <asm/tlbflush.h>
#include <asm/cacheflush.h> #include <asm/cacheflush.h>
#include <asm/cpufeature.h>
#include <asm/virt.h> #include <asm/virt.h>
#include <asm/kvm_arm.h> #include <asm/kvm_arm.h>
#include <asm/kvm_asm.h> #include <asm/kvm_asm.h>
...@@ -1574,6 +1576,11 @@ int kvm_arch_init(void *opaque) ...@@ -1574,6 +1576,11 @@ int kvm_arch_init(void *opaque)
return -ENODEV; return -ENODEV;
} }
if (!kvm_arch_check_sve_has_vhe()) {
kvm_pr_unimpl("SVE system without VHE unsupported. Broken cpu?");
return -ENODEV;
}
for_each_online_cpu(cpu) { for_each_online_cpu(cpu) {
smp_call_function_single(cpu, check_kvm_target_cpu, &ret, 1); smp_call_function_single(cpu, check_kvm_target_cpu, &ret, 1);
if (ret < 0) { if (ret < 0) {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment