Commit 0f4b8257 authored by Vincent Chen's avatar Vincent Chen Committed by Palmer Dabbelt

riscv: KVM: Add vector lazy save/restore support

This patch adds vector context save/restore for guest VCPUs. To reduce the
impact on KVM performance, the implementation imitates the FP context
switch mechanism to lazily store and restore the vector context only when
the kernel enters/exits the in-kernel run loop and not during the KVM
world switch.
Signed-off-by: default avatarVincent Chen <vincent.chen@sifive.com>
Signed-off-by: default avatarGreentime Hu <greentime.hu@sifive.com>
Signed-off-by: default avatarAndy Chiu <andy.chiu@sifive.com>
Reviewed-by: default avatarAnup Patel <anup@brainfault.org>
Acked-by: default avatarAnup Patel <anup@brainfault.org>
Link: https://lore.kernel.org/r/20230605110724.21391-20-andy.chiu@sifive.comSigned-off-by: default avatarPalmer Dabbelt <palmer@rivosinc.com>
parent bf78f1ea
...@@ -15,6 +15,7 @@ ...@@ -15,6 +15,7 @@
#include <linux/spinlock.h> #include <linux/spinlock.h>
#include <asm/hwcap.h> #include <asm/hwcap.h>
#include <asm/kvm_aia.h> #include <asm/kvm_aia.h>
#include <asm/ptrace.h>
#include <asm/kvm_vcpu_fp.h> #include <asm/kvm_vcpu_fp.h>
#include <asm/kvm_vcpu_insn.h> #include <asm/kvm_vcpu_insn.h>
#include <asm/kvm_vcpu_sbi.h> #include <asm/kvm_vcpu_sbi.h>
...@@ -145,6 +146,7 @@ struct kvm_cpu_context { ...@@ -145,6 +146,7 @@ struct kvm_cpu_context {
unsigned long sstatus; unsigned long sstatus;
unsigned long hstatus; unsigned long hstatus;
union __riscv_fp_state fp; union __riscv_fp_state fp;
struct __riscv_v_ext_state vector;
}; };
struct kvm_vcpu_csr { struct kvm_vcpu_csr {
......
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (C) 2022 SiFive
*
* Authors:
* Vincent Chen <vincent.chen@sifive.com>
* Greentime Hu <greentime.hu@sifive.com>
*/
#ifndef __KVM_VCPU_RISCV_VECTOR_H
#define __KVM_VCPU_RISCV_VECTOR_H
#include <linux/types.h>
#ifdef CONFIG_RISCV_ISA_V
#include <asm/vector.h>
#include <asm/kvm_host.h>
static __always_inline void __kvm_riscv_vector_save(struct kvm_cpu_context *context)
{
__riscv_v_vstate_save(&context->vector, context->vector.datap);
}
static __always_inline void __kvm_riscv_vector_restore(struct kvm_cpu_context *context)
{
__riscv_v_vstate_restore(&context->vector, context->vector.datap);
}
void kvm_riscv_vcpu_vector_reset(struct kvm_vcpu *vcpu);
void kvm_riscv_vcpu_guest_vector_save(struct kvm_cpu_context *cntx,
unsigned long *isa);
void kvm_riscv_vcpu_guest_vector_restore(struct kvm_cpu_context *cntx,
unsigned long *isa);
void kvm_riscv_vcpu_host_vector_save(struct kvm_cpu_context *cntx);
void kvm_riscv_vcpu_host_vector_restore(struct kvm_cpu_context *cntx);
int kvm_riscv_vcpu_alloc_vector_context(struct kvm_vcpu *vcpu,
struct kvm_cpu_context *cntx);
void kvm_riscv_vcpu_free_vector_context(struct kvm_vcpu *vcpu);
#else
struct kvm_cpu_context;
static inline void kvm_riscv_vcpu_vector_reset(struct kvm_vcpu *vcpu)
{
}
static inline void kvm_riscv_vcpu_guest_vector_save(struct kvm_cpu_context *cntx,
unsigned long *isa)
{
}
static inline void kvm_riscv_vcpu_guest_vector_restore(struct kvm_cpu_context *cntx,
unsigned long *isa)
{
}
static inline void kvm_riscv_vcpu_host_vector_save(struct kvm_cpu_context *cntx)
{
}
static inline void kvm_riscv_vcpu_host_vector_restore(struct kvm_cpu_context *cntx)
{
}
static inline int kvm_riscv_vcpu_alloc_vector_context(struct kvm_vcpu *vcpu,
struct kvm_cpu_context *cntx)
{
return 0;
}
static inline void kvm_riscv_vcpu_free_vector_context(struct kvm_vcpu *vcpu)
{
}
#endif
int kvm_riscv_vcpu_get_reg_vector(struct kvm_vcpu *vcpu,
const struct kvm_one_reg *reg,
unsigned long rtype);
int kvm_riscv_vcpu_set_reg_vector(struct kvm_vcpu *vcpu,
const struct kvm_one_reg *reg,
unsigned long rtype);
#endif
...@@ -204,6 +204,13 @@ enum KVM_RISCV_SBI_EXT_ID { ...@@ -204,6 +204,13 @@ enum KVM_RISCV_SBI_EXT_ID {
#define KVM_REG_RISCV_SBI_MULTI_REG_LAST \ #define KVM_REG_RISCV_SBI_MULTI_REG_LAST \
KVM_REG_RISCV_SBI_MULTI_REG(KVM_RISCV_SBI_EXT_MAX - 1) KVM_REG_RISCV_SBI_MULTI_REG(KVM_RISCV_SBI_EXT_MAX - 1)
/* V extension registers are mapped as type 9 */
#define KVM_REG_RISCV_VECTOR (0x09 << KVM_REG_RISCV_TYPE_SHIFT)
#define KVM_REG_RISCV_VECTOR_CSR_REG(name) \
(offsetof(struct __riscv_v_ext_state, name) / sizeof(unsigned long))
#define KVM_REG_RISCV_VECTOR_REG(n) \
((n) + sizeof(struct __riscv_v_ext_state) / sizeof(unsigned long))
#endif #endif
#endif /* __LINUX_KVM_RISCV_H */ #endif /* __LINUX_KVM_RISCV_H */
...@@ -17,6 +17,7 @@ kvm-y += mmu.o ...@@ -17,6 +17,7 @@ kvm-y += mmu.o
kvm-y += vcpu.o kvm-y += vcpu.o
kvm-y += vcpu_exit.o kvm-y += vcpu_exit.o
kvm-y += vcpu_fp.o kvm-y += vcpu_fp.o
kvm-y += vcpu_vector.o
kvm-y += vcpu_insn.o kvm-y += vcpu_insn.o
kvm-y += vcpu_switch.o kvm-y += vcpu_switch.o
kvm-y += vcpu_sbi.o kvm-y += vcpu_sbi.o
......
...@@ -22,6 +22,8 @@ ...@@ -22,6 +22,8 @@
#include <asm/cacheflush.h> #include <asm/cacheflush.h>
#include <asm/hwcap.h> #include <asm/hwcap.h>
#include <asm/sbi.h> #include <asm/sbi.h>
#include <asm/vector.h>
#include <asm/kvm_vcpu_vector.h>
const struct _kvm_stats_desc kvm_vcpu_stats_desc[] = { const struct _kvm_stats_desc kvm_vcpu_stats_desc[] = {
KVM_GENERIC_VCPU_STATS(), KVM_GENERIC_VCPU_STATS(),
...@@ -139,6 +141,8 @@ static void kvm_riscv_reset_vcpu(struct kvm_vcpu *vcpu) ...@@ -139,6 +141,8 @@ static void kvm_riscv_reset_vcpu(struct kvm_vcpu *vcpu)
kvm_riscv_vcpu_fp_reset(vcpu); kvm_riscv_vcpu_fp_reset(vcpu);
kvm_riscv_vcpu_vector_reset(vcpu);
kvm_riscv_vcpu_timer_reset(vcpu); kvm_riscv_vcpu_timer_reset(vcpu);
kvm_riscv_vcpu_aia_reset(vcpu); kvm_riscv_vcpu_aia_reset(vcpu);
...@@ -199,6 +203,9 @@ int kvm_arch_vcpu_create(struct kvm_vcpu *vcpu) ...@@ -199,6 +203,9 @@ int kvm_arch_vcpu_create(struct kvm_vcpu *vcpu)
cntx->hstatus |= HSTATUS_SPVP; cntx->hstatus |= HSTATUS_SPVP;
cntx->hstatus |= HSTATUS_SPV; cntx->hstatus |= HSTATUS_SPV;
if (kvm_riscv_vcpu_alloc_vector_context(vcpu, cntx))
return -ENOMEM;
/* By default, make CY, TM, and IR counters accessible in VU mode */ /* By default, make CY, TM, and IR counters accessible in VU mode */
reset_csr->scounteren = 0x7; reset_csr->scounteren = 0x7;
...@@ -242,6 +249,9 @@ void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu) ...@@ -242,6 +249,9 @@ void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
/* Free unused pages pre-allocated for G-stage page table mappings */ /* Free unused pages pre-allocated for G-stage page table mappings */
kvm_mmu_free_memory_cache(&vcpu->arch.mmu_page_cache); kvm_mmu_free_memory_cache(&vcpu->arch.mmu_page_cache);
/* Free vector context space for host and guest kernel */
kvm_riscv_vcpu_free_vector_context(vcpu);
} }
int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu) int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu)
...@@ -680,6 +690,9 @@ static int kvm_riscv_vcpu_set_reg(struct kvm_vcpu *vcpu, ...@@ -680,6 +690,9 @@ static int kvm_riscv_vcpu_set_reg(struct kvm_vcpu *vcpu,
return kvm_riscv_vcpu_set_reg_isa_ext(vcpu, reg); return kvm_riscv_vcpu_set_reg_isa_ext(vcpu, reg);
case KVM_REG_RISCV_SBI_EXT: case KVM_REG_RISCV_SBI_EXT:
return kvm_riscv_vcpu_set_reg_sbi_ext(vcpu, reg); return kvm_riscv_vcpu_set_reg_sbi_ext(vcpu, reg);
case KVM_REG_RISCV_VECTOR:
return kvm_riscv_vcpu_set_reg_vector(vcpu, reg,
KVM_REG_RISCV_VECTOR);
default: default:
break; break;
} }
...@@ -709,6 +722,9 @@ static int kvm_riscv_vcpu_get_reg(struct kvm_vcpu *vcpu, ...@@ -709,6 +722,9 @@ static int kvm_riscv_vcpu_get_reg(struct kvm_vcpu *vcpu,
return kvm_riscv_vcpu_get_reg_isa_ext(vcpu, reg); return kvm_riscv_vcpu_get_reg_isa_ext(vcpu, reg);
case KVM_REG_RISCV_SBI_EXT: case KVM_REG_RISCV_SBI_EXT:
return kvm_riscv_vcpu_get_reg_sbi_ext(vcpu, reg); return kvm_riscv_vcpu_get_reg_sbi_ext(vcpu, reg);
case KVM_REG_RISCV_VECTOR:
return kvm_riscv_vcpu_get_reg_vector(vcpu, reg,
KVM_REG_RISCV_VECTOR);
default: default:
break; break;
} }
...@@ -1003,6 +1019,9 @@ void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu) ...@@ -1003,6 +1019,9 @@ void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
kvm_riscv_vcpu_host_fp_save(&vcpu->arch.host_context); kvm_riscv_vcpu_host_fp_save(&vcpu->arch.host_context);
kvm_riscv_vcpu_guest_fp_restore(&vcpu->arch.guest_context, kvm_riscv_vcpu_guest_fp_restore(&vcpu->arch.guest_context,
vcpu->arch.isa); vcpu->arch.isa);
kvm_riscv_vcpu_host_vector_save(&vcpu->arch.host_context);
kvm_riscv_vcpu_guest_vector_restore(&vcpu->arch.guest_context,
vcpu->arch.isa);
kvm_riscv_vcpu_aia_load(vcpu, cpu); kvm_riscv_vcpu_aia_load(vcpu, cpu);
...@@ -1022,6 +1041,9 @@ void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu) ...@@ -1022,6 +1041,9 @@ void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
kvm_riscv_vcpu_host_fp_restore(&vcpu->arch.host_context); kvm_riscv_vcpu_host_fp_restore(&vcpu->arch.host_context);
kvm_riscv_vcpu_timer_save(vcpu); kvm_riscv_vcpu_timer_save(vcpu);
kvm_riscv_vcpu_guest_vector_save(&vcpu->arch.guest_context,
vcpu->arch.isa);
kvm_riscv_vcpu_host_vector_restore(&vcpu->arch.host_context);
csr->vsstatus = csr_read(CSR_VSSTATUS); csr->vsstatus = csr_read(CSR_VSSTATUS);
csr->vsie = csr_read(CSR_VSIE); csr->vsie = csr_read(CSR_VSIE);
......
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2022 SiFive
*
* Authors:
* Vincent Chen <vincent.chen@sifive.com>
* Greentime Hu <greentime.hu@sifive.com>
*/
#include <linux/errno.h>
#include <linux/err.h>
#include <linux/kvm_host.h>
#include <linux/uaccess.h>
#include <asm/hwcap.h>
#include <asm/kvm_vcpu_vector.h>
#include <asm/vector.h>
#ifdef CONFIG_RISCV_ISA_V
void kvm_riscv_vcpu_vector_reset(struct kvm_vcpu *vcpu)
{
unsigned long *isa = vcpu->arch.isa;
struct kvm_cpu_context *cntx = &vcpu->arch.guest_context;
cntx->sstatus &= ~SR_VS;
if (riscv_isa_extension_available(isa, v)) {
cntx->sstatus |= SR_VS_INITIAL;
WARN_ON(!cntx->vector.datap);
memset(cntx->vector.datap, 0, riscv_v_vsize);
} else {
cntx->sstatus |= SR_VS_OFF;
}
}
static void kvm_riscv_vcpu_vector_clean(struct kvm_cpu_context *cntx)
{
cntx->sstatus &= ~SR_VS;
cntx->sstatus |= SR_VS_CLEAN;
}
void kvm_riscv_vcpu_guest_vector_save(struct kvm_cpu_context *cntx,
unsigned long *isa)
{
if ((cntx->sstatus & SR_VS) == SR_VS_DIRTY) {
if (riscv_isa_extension_available(isa, v))
__kvm_riscv_vector_save(cntx);
kvm_riscv_vcpu_vector_clean(cntx);
}
}
void kvm_riscv_vcpu_guest_vector_restore(struct kvm_cpu_context *cntx,
unsigned long *isa)
{
if ((cntx->sstatus & SR_VS) != SR_VS_OFF) {
if (riscv_isa_extension_available(isa, v))
__kvm_riscv_vector_restore(cntx);
kvm_riscv_vcpu_vector_clean(cntx);
}
}
void kvm_riscv_vcpu_host_vector_save(struct kvm_cpu_context *cntx)
{
/* No need to check host sstatus as it can be modified outside */
if (riscv_isa_extension_available(NULL, v))
__kvm_riscv_vector_save(cntx);
}
void kvm_riscv_vcpu_host_vector_restore(struct kvm_cpu_context *cntx)
{
if (riscv_isa_extension_available(NULL, v))
__kvm_riscv_vector_restore(cntx);
}
int kvm_riscv_vcpu_alloc_vector_context(struct kvm_vcpu *vcpu,
struct kvm_cpu_context *cntx)
{
cntx->vector.datap = kmalloc(riscv_v_vsize, GFP_KERNEL);
if (!cntx->vector.datap)
return -ENOMEM;
vcpu->arch.host_context.vector.datap = kzalloc(riscv_v_vsize, GFP_KERNEL);
if (!vcpu->arch.host_context.vector.datap)
return -ENOMEM;
return 0;
}
void kvm_riscv_vcpu_free_vector_context(struct kvm_vcpu *vcpu)
{
kfree(vcpu->arch.guest_reset_context.vector.datap);
kfree(vcpu->arch.host_context.vector.datap);
}
#endif
static void *kvm_riscv_vcpu_vreg_addr(struct kvm_vcpu *vcpu,
unsigned long reg_num,
size_t reg_size)
{
struct kvm_cpu_context *cntx = &vcpu->arch.guest_context;
void *reg_val;
size_t vlenb = riscv_v_vsize / 32;
if (reg_num < KVM_REG_RISCV_VECTOR_REG(0)) {
if (reg_size != sizeof(unsigned long))
return NULL;
switch (reg_num) {
case KVM_REG_RISCV_VECTOR_CSR_REG(vstart):
reg_val = &cntx->vector.vstart;
break;
case KVM_REG_RISCV_VECTOR_CSR_REG(vl):
reg_val = &cntx->vector.vl;
break;
case KVM_REG_RISCV_VECTOR_CSR_REG(vtype):
reg_val = &cntx->vector.vtype;
break;
case KVM_REG_RISCV_VECTOR_CSR_REG(vcsr):
reg_val = &cntx->vector.vcsr;
break;
case KVM_REG_RISCV_VECTOR_CSR_REG(datap):
default:
return NULL;
}
} else if (reg_num <= KVM_REG_RISCV_VECTOR_REG(31)) {
if (reg_size != vlenb)
return NULL;
reg_val = cntx->vector.datap
+ (reg_num - KVM_REG_RISCV_VECTOR_REG(0)) * vlenb;
} else {
return NULL;
}
return reg_val;
}
int kvm_riscv_vcpu_get_reg_vector(struct kvm_vcpu *vcpu,
const struct kvm_one_reg *reg,
unsigned long rtype)
{
unsigned long *isa = vcpu->arch.isa;
unsigned long __user *uaddr =
(unsigned long __user *)(unsigned long)reg->addr;
unsigned long reg_num = reg->id & ~(KVM_REG_ARCH_MASK |
KVM_REG_SIZE_MASK |
rtype);
void *reg_val = NULL;
size_t reg_size = KVM_REG_SIZE(reg->id);
if (rtype == KVM_REG_RISCV_VECTOR &&
riscv_isa_extension_available(isa, v)) {
reg_val = kvm_riscv_vcpu_vreg_addr(vcpu, reg_num, reg_size);
}
if (!reg_val)
return -EINVAL;
if (copy_to_user(uaddr, reg_val, reg_size))
return -EFAULT;
return 0;
}
int kvm_riscv_vcpu_set_reg_vector(struct kvm_vcpu *vcpu,
const struct kvm_one_reg *reg,
unsigned long rtype)
{
unsigned long *isa = vcpu->arch.isa;
unsigned long __user *uaddr =
(unsigned long __user *)(unsigned long)reg->addr;
unsigned long reg_num = reg->id & ~(KVM_REG_ARCH_MASK |
KVM_REG_SIZE_MASK |
rtype);
void *reg_val = NULL;
size_t reg_size = KVM_REG_SIZE(reg->id);
if (rtype == KVM_REG_RISCV_VECTOR &&
riscv_isa_extension_available(isa, v)) {
reg_val = kvm_riscv_vcpu_vreg_addr(vcpu, reg_num, reg_size);
}
if (!reg_val)
return -EINVAL;
if (copy_from_user(reg_val, uaddr, reg_size))
return -EFAULT;
return 0;
}
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment