Commit e98b1085 authored by Anup Patel's avatar Anup Patel Committed by Anup Patel

RISC-V: KVM: Factor-out ONE_REG related code to its own source file

The VCPU ONE_REG interface has grown over time and it will continue
to grow with new ISA extensions and other features. Let us move all
ONE_REG related code to its own source file so that vcpu.c only
focuses only on high-level VCPU functions.
Signed-off-by: default avatarAnup Patel <apatel@ventanamicro.com>
Reviewed-by: default avatarAndrew Jones <ajones@ventanamicro.com>
Signed-off-by: default avatarAnup Patel <anup@brainfault.org>
parent 52a93d39
...@@ -337,6 +337,12 @@ int kvm_riscv_vcpu_exit(struct kvm_vcpu *vcpu, struct kvm_run *run, ...@@ -337,6 +337,12 @@ int kvm_riscv_vcpu_exit(struct kvm_vcpu *vcpu, struct kvm_run *run,
void __kvm_riscv_switch_to(struct kvm_vcpu_arch *vcpu_arch); void __kvm_riscv_switch_to(struct kvm_vcpu_arch *vcpu_arch);
void kvm_riscv_vcpu_setup_isa(struct kvm_vcpu *vcpu);
int kvm_riscv_vcpu_get_reg(struct kvm_vcpu *vcpu,
const struct kvm_one_reg *reg);
int kvm_riscv_vcpu_set_reg(struct kvm_vcpu *vcpu,
const struct kvm_one_reg *reg);
int kvm_riscv_vcpu_set_interrupt(struct kvm_vcpu *vcpu, unsigned int irq); int kvm_riscv_vcpu_set_interrupt(struct kvm_vcpu *vcpu, unsigned int irq);
int kvm_riscv_vcpu_unset_interrupt(struct kvm_vcpu *vcpu, unsigned int irq); int kvm_riscv_vcpu_unset_interrupt(struct kvm_vcpu *vcpu, unsigned int irq);
void kvm_riscv_vcpu_flush_interrupts(struct kvm_vcpu *vcpu); void kvm_riscv_vcpu_flush_interrupts(struct kvm_vcpu *vcpu);
......
...@@ -19,6 +19,7 @@ kvm-y += vcpu_exit.o ...@@ -19,6 +19,7 @@ kvm-y += vcpu_exit.o
kvm-y += vcpu_fp.o kvm-y += vcpu_fp.o
kvm-y += vcpu_vector.o kvm-y += vcpu_vector.o
kvm-y += vcpu_insn.o kvm-y += vcpu_insn.o
kvm-y += vcpu_onereg.o
kvm-y += vcpu_switch.o kvm-y += vcpu_switch.o
kvm-y += vcpu_sbi.o kvm-y += vcpu_sbi.o
kvm-$(CONFIG_RISCV_SBI_V01) += vcpu_sbi_v01.o kvm-$(CONFIG_RISCV_SBI_V01) += vcpu_sbi_v01.o
......
...@@ -13,16 +13,12 @@ ...@@ -13,16 +13,12 @@
#include <linux/kdebug.h> #include <linux/kdebug.h>
#include <linux/module.h> #include <linux/module.h>
#include <linux/percpu.h> #include <linux/percpu.h>
#include <linux/uaccess.h>
#include <linux/vmalloc.h> #include <linux/vmalloc.h>
#include <linux/sched/signal.h> #include <linux/sched/signal.h>
#include <linux/fs.h> #include <linux/fs.h>
#include <linux/kvm_host.h> #include <linux/kvm_host.h>
#include <asm/csr.h> #include <asm/csr.h>
#include <asm/cacheflush.h> #include <asm/cacheflush.h>
#include <asm/hwcap.h>
#include <asm/sbi.h>
#include <asm/vector.h>
#include <asm/kvm_vcpu_vector.h> #include <asm/kvm_vcpu_vector.h>
const struct _kvm_stats_desc kvm_vcpu_stats_desc[] = { const struct _kvm_stats_desc kvm_vcpu_stats_desc[] = {
...@@ -46,79 +42,6 @@ const struct kvm_stats_header kvm_vcpu_stats_header = { ...@@ -46,79 +42,6 @@ const struct kvm_stats_header kvm_vcpu_stats_header = {
sizeof(kvm_vcpu_stats_desc), sizeof(kvm_vcpu_stats_desc),
}; };
#define KVM_RISCV_BASE_ISA_MASK GENMASK(25, 0)
#define KVM_ISA_EXT_ARR(ext) [KVM_RISCV_ISA_EXT_##ext] = RISCV_ISA_EXT_##ext
/* Mapping between KVM ISA Extension ID & Host ISA extension ID */
static const unsigned long kvm_isa_ext_arr[] = {
[KVM_RISCV_ISA_EXT_A] = RISCV_ISA_EXT_a,
[KVM_RISCV_ISA_EXT_C] = RISCV_ISA_EXT_c,
[KVM_RISCV_ISA_EXT_D] = RISCV_ISA_EXT_d,
[KVM_RISCV_ISA_EXT_F] = RISCV_ISA_EXT_f,
[KVM_RISCV_ISA_EXT_H] = RISCV_ISA_EXT_h,
[KVM_RISCV_ISA_EXT_I] = RISCV_ISA_EXT_i,
[KVM_RISCV_ISA_EXT_M] = RISCV_ISA_EXT_m,
[KVM_RISCV_ISA_EXT_V] = RISCV_ISA_EXT_v,
KVM_ISA_EXT_ARR(SSAIA),
KVM_ISA_EXT_ARR(SSTC),
KVM_ISA_EXT_ARR(SVINVAL),
KVM_ISA_EXT_ARR(SVNAPOT),
KVM_ISA_EXT_ARR(SVPBMT),
KVM_ISA_EXT_ARR(ZBB),
KVM_ISA_EXT_ARR(ZIHINTPAUSE),
KVM_ISA_EXT_ARR(ZICBOM),
KVM_ISA_EXT_ARR(ZICBOZ),
};
static unsigned long kvm_riscv_vcpu_base2isa_ext(unsigned long base_ext)
{
unsigned long i;
for (i = 0; i < KVM_RISCV_ISA_EXT_MAX; i++) {
if (kvm_isa_ext_arr[i] == base_ext)
return i;
}
return KVM_RISCV_ISA_EXT_MAX;
}
static bool kvm_riscv_vcpu_isa_enable_allowed(unsigned long ext)
{
switch (ext) {
case KVM_RISCV_ISA_EXT_H:
return false;
case KVM_RISCV_ISA_EXT_V:
return riscv_v_vstate_ctrl_user_allowed();
default:
break;
}
return true;
}
static bool kvm_riscv_vcpu_isa_disable_allowed(unsigned long ext)
{
switch (ext) {
case KVM_RISCV_ISA_EXT_A:
case KVM_RISCV_ISA_EXT_C:
case KVM_RISCV_ISA_EXT_I:
case KVM_RISCV_ISA_EXT_M:
case KVM_RISCV_ISA_EXT_SSAIA:
case KVM_RISCV_ISA_EXT_SSTC:
case KVM_RISCV_ISA_EXT_SVINVAL:
case KVM_RISCV_ISA_EXT_SVNAPOT:
case KVM_RISCV_ISA_EXT_ZIHINTPAUSE:
case KVM_RISCV_ISA_EXT_ZBB:
return false;
default:
break;
}
return true;
}
static void kvm_riscv_reset_vcpu(struct kvm_vcpu *vcpu) static void kvm_riscv_reset_vcpu(struct kvm_vcpu *vcpu)
{ {
struct kvm_vcpu_csr *csr = &vcpu->arch.guest_csr; struct kvm_vcpu_csr *csr = &vcpu->arch.guest_csr;
...@@ -176,7 +99,6 @@ int kvm_arch_vcpu_create(struct kvm_vcpu *vcpu) ...@@ -176,7 +99,6 @@ int kvm_arch_vcpu_create(struct kvm_vcpu *vcpu)
int rc; int rc;
struct kvm_cpu_context *cntx; struct kvm_cpu_context *cntx;
struct kvm_vcpu_csr *reset_csr = &vcpu->arch.guest_reset_csr; struct kvm_vcpu_csr *reset_csr = &vcpu->arch.guest_reset_csr;
unsigned long host_isa, i;
/* Mark this VCPU never ran */ /* Mark this VCPU never ran */
vcpu->arch.ran_atleast_once = false; vcpu->arch.ran_atleast_once = false;
...@@ -184,12 +106,7 @@ int kvm_arch_vcpu_create(struct kvm_vcpu *vcpu) ...@@ -184,12 +106,7 @@ int kvm_arch_vcpu_create(struct kvm_vcpu *vcpu)
bitmap_zero(vcpu->arch.isa, RISCV_ISA_EXT_MAX); bitmap_zero(vcpu->arch.isa, RISCV_ISA_EXT_MAX);
/* Setup ISA features available to VCPU */ /* Setup ISA features available to VCPU */
for (i = 0; i < ARRAY_SIZE(kvm_isa_ext_arr); i++) { kvm_riscv_vcpu_setup_isa(vcpu);
host_isa = kvm_isa_ext_arr[i];
if (__riscv_isa_extension_available(NULL, host_isa) &&
kvm_riscv_vcpu_isa_enable_allowed(i))
set_bit(host_isa, vcpu->arch.isa);
}
/* Setup vendor, arch, and implementation details */ /* Setup vendor, arch, and implementation details */
vcpu->arch.mvendorid = sbi_get_mvendorid(); vcpu->arch.mvendorid = sbi_get_mvendorid();
...@@ -294,450 +211,6 @@ vm_fault_t kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf) ...@@ -294,450 +211,6 @@ vm_fault_t kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf)
return VM_FAULT_SIGBUS; return VM_FAULT_SIGBUS;
} }
static int kvm_riscv_vcpu_get_reg_config(struct kvm_vcpu *vcpu,
const struct kvm_one_reg *reg)
{
unsigned long __user *uaddr =
(unsigned long __user *)(unsigned long)reg->addr;
unsigned long reg_num = reg->id & ~(KVM_REG_ARCH_MASK |
KVM_REG_SIZE_MASK |
KVM_REG_RISCV_CONFIG);
unsigned long reg_val;
if (KVM_REG_SIZE(reg->id) != sizeof(unsigned long))
return -EINVAL;
switch (reg_num) {
case KVM_REG_RISCV_CONFIG_REG(isa):
reg_val = vcpu->arch.isa[0] & KVM_RISCV_BASE_ISA_MASK;
break;
case KVM_REG_RISCV_CONFIG_REG(zicbom_block_size):
if (!riscv_isa_extension_available(vcpu->arch.isa, ZICBOM))
return -EINVAL;
reg_val = riscv_cbom_block_size;
break;
case KVM_REG_RISCV_CONFIG_REG(zicboz_block_size):
if (!riscv_isa_extension_available(vcpu->arch.isa, ZICBOZ))
return -EINVAL;
reg_val = riscv_cboz_block_size;
break;
case KVM_REG_RISCV_CONFIG_REG(mvendorid):
reg_val = vcpu->arch.mvendorid;
break;
case KVM_REG_RISCV_CONFIG_REG(marchid):
reg_val = vcpu->arch.marchid;
break;
case KVM_REG_RISCV_CONFIG_REG(mimpid):
reg_val = vcpu->arch.mimpid;
break;
default:
return -EINVAL;
}
if (copy_to_user(uaddr, &reg_val, KVM_REG_SIZE(reg->id)))
return -EFAULT;
return 0;
}
static int kvm_riscv_vcpu_set_reg_config(struct kvm_vcpu *vcpu,
const struct kvm_one_reg *reg)
{
unsigned long __user *uaddr =
(unsigned long __user *)(unsigned long)reg->addr;
unsigned long reg_num = reg->id & ~(KVM_REG_ARCH_MASK |
KVM_REG_SIZE_MASK |
KVM_REG_RISCV_CONFIG);
unsigned long i, isa_ext, reg_val;
if (KVM_REG_SIZE(reg->id) != sizeof(unsigned long))
return -EINVAL;
if (copy_from_user(&reg_val, uaddr, KVM_REG_SIZE(reg->id)))
return -EFAULT;
switch (reg_num) {
case KVM_REG_RISCV_CONFIG_REG(isa):
/*
* This ONE REG interface is only defined for
* single letter extensions.
*/
if (fls(reg_val) >= RISCV_ISA_EXT_BASE)
return -EINVAL;
if (!vcpu->arch.ran_atleast_once) {
/* Ignore the enable/disable request for certain extensions */
for (i = 0; i < RISCV_ISA_EXT_BASE; i++) {
isa_ext = kvm_riscv_vcpu_base2isa_ext(i);
if (isa_ext >= KVM_RISCV_ISA_EXT_MAX) {
reg_val &= ~BIT(i);
continue;
}
if (!kvm_riscv_vcpu_isa_enable_allowed(isa_ext))
if (reg_val & BIT(i))
reg_val &= ~BIT(i);
if (!kvm_riscv_vcpu_isa_disable_allowed(isa_ext))
if (!(reg_val & BIT(i)))
reg_val |= BIT(i);
}
reg_val &= riscv_isa_extension_base(NULL);
/* Do not modify anything beyond single letter extensions */
reg_val = (vcpu->arch.isa[0] & ~KVM_RISCV_BASE_ISA_MASK) |
(reg_val & KVM_RISCV_BASE_ISA_MASK);
vcpu->arch.isa[0] = reg_val;
kvm_riscv_vcpu_fp_reset(vcpu);
} else {
return -EOPNOTSUPP;
}
break;
case KVM_REG_RISCV_CONFIG_REG(zicbom_block_size):
return -EOPNOTSUPP;
case KVM_REG_RISCV_CONFIG_REG(zicboz_block_size):
return -EOPNOTSUPP;
case KVM_REG_RISCV_CONFIG_REG(mvendorid):
if (!vcpu->arch.ran_atleast_once)
vcpu->arch.mvendorid = reg_val;
else
return -EBUSY;
break;
case KVM_REG_RISCV_CONFIG_REG(marchid):
if (!vcpu->arch.ran_atleast_once)
vcpu->arch.marchid = reg_val;
else
return -EBUSY;
break;
case KVM_REG_RISCV_CONFIG_REG(mimpid):
if (!vcpu->arch.ran_atleast_once)
vcpu->arch.mimpid = reg_val;
else
return -EBUSY;
break;
default:
return -EINVAL;
}
return 0;
}
static int kvm_riscv_vcpu_get_reg_core(struct kvm_vcpu *vcpu,
const struct kvm_one_reg *reg)
{
struct kvm_cpu_context *cntx = &vcpu->arch.guest_context;
unsigned long __user *uaddr =
(unsigned long __user *)(unsigned long)reg->addr;
unsigned long reg_num = reg->id & ~(KVM_REG_ARCH_MASK |
KVM_REG_SIZE_MASK |
KVM_REG_RISCV_CORE);
unsigned long reg_val;
if (KVM_REG_SIZE(reg->id) != sizeof(unsigned long))
return -EINVAL;
if (reg_num >= sizeof(struct kvm_riscv_core) / sizeof(unsigned long))
return -EINVAL;
if (reg_num == KVM_REG_RISCV_CORE_REG(regs.pc))
reg_val = cntx->sepc;
else if (KVM_REG_RISCV_CORE_REG(regs.pc) < reg_num &&
reg_num <= KVM_REG_RISCV_CORE_REG(regs.t6))
reg_val = ((unsigned long *)cntx)[reg_num];
else if (reg_num == KVM_REG_RISCV_CORE_REG(mode))
reg_val = (cntx->sstatus & SR_SPP) ?
KVM_RISCV_MODE_S : KVM_RISCV_MODE_U;
else
return -EINVAL;
if (copy_to_user(uaddr, &reg_val, KVM_REG_SIZE(reg->id)))
return -EFAULT;
return 0;
}
static int kvm_riscv_vcpu_set_reg_core(struct kvm_vcpu *vcpu,
const struct kvm_one_reg *reg)
{
struct kvm_cpu_context *cntx = &vcpu->arch.guest_context;
unsigned long __user *uaddr =
(unsigned long __user *)(unsigned long)reg->addr;
unsigned long reg_num = reg->id & ~(KVM_REG_ARCH_MASK |
KVM_REG_SIZE_MASK |
KVM_REG_RISCV_CORE);
unsigned long reg_val;
if (KVM_REG_SIZE(reg->id) != sizeof(unsigned long))
return -EINVAL;
if (reg_num >= sizeof(struct kvm_riscv_core) / sizeof(unsigned long))
return -EINVAL;
if (copy_from_user(&reg_val, uaddr, KVM_REG_SIZE(reg->id)))
return -EFAULT;
if (reg_num == KVM_REG_RISCV_CORE_REG(regs.pc))
cntx->sepc = reg_val;
else if (KVM_REG_RISCV_CORE_REG(regs.pc) < reg_num &&
reg_num <= KVM_REG_RISCV_CORE_REG(regs.t6))
((unsigned long *)cntx)[reg_num] = reg_val;
else if (reg_num == KVM_REG_RISCV_CORE_REG(mode)) {
if (reg_val == KVM_RISCV_MODE_S)
cntx->sstatus |= SR_SPP;
else
cntx->sstatus &= ~SR_SPP;
} else
return -EINVAL;
return 0;
}
static int kvm_riscv_vcpu_general_get_csr(struct kvm_vcpu *vcpu,
unsigned long reg_num,
unsigned long *out_val)
{
struct kvm_vcpu_csr *csr = &vcpu->arch.guest_csr;
if (reg_num >= sizeof(struct kvm_riscv_csr) / sizeof(unsigned long))
return -EINVAL;
if (reg_num == KVM_REG_RISCV_CSR_REG(sip)) {
kvm_riscv_vcpu_flush_interrupts(vcpu);
*out_val = (csr->hvip >> VSIP_TO_HVIP_SHIFT) & VSIP_VALID_MASK;
*out_val |= csr->hvip & ~IRQ_LOCAL_MASK;
} else
*out_val = ((unsigned long *)csr)[reg_num];
return 0;
}
static inline int kvm_riscv_vcpu_general_set_csr(struct kvm_vcpu *vcpu,
unsigned long reg_num,
unsigned long reg_val)
{
struct kvm_vcpu_csr *csr = &vcpu->arch.guest_csr;
if (reg_num >= sizeof(struct kvm_riscv_csr) / sizeof(unsigned long))
return -EINVAL;
if (reg_num == KVM_REG_RISCV_CSR_REG(sip)) {
reg_val &= VSIP_VALID_MASK;
reg_val <<= VSIP_TO_HVIP_SHIFT;
}
((unsigned long *)csr)[reg_num] = reg_val;
if (reg_num == KVM_REG_RISCV_CSR_REG(sip))
WRITE_ONCE(vcpu->arch.irqs_pending_mask[0], 0);
return 0;
}
static int kvm_riscv_vcpu_get_reg_csr(struct kvm_vcpu *vcpu,
const struct kvm_one_reg *reg)
{
int rc;
unsigned long __user *uaddr =
(unsigned long __user *)(unsigned long)reg->addr;
unsigned long reg_num = reg->id & ~(KVM_REG_ARCH_MASK |
KVM_REG_SIZE_MASK |
KVM_REG_RISCV_CSR);
unsigned long reg_val, reg_subtype;
if (KVM_REG_SIZE(reg->id) != sizeof(unsigned long))
return -EINVAL;
reg_subtype = reg_num & KVM_REG_RISCV_SUBTYPE_MASK;
reg_num &= ~KVM_REG_RISCV_SUBTYPE_MASK;
switch (reg_subtype) {
case KVM_REG_RISCV_CSR_GENERAL:
rc = kvm_riscv_vcpu_general_get_csr(vcpu, reg_num, &reg_val);
break;
case KVM_REG_RISCV_CSR_AIA:
rc = kvm_riscv_vcpu_aia_get_csr(vcpu, reg_num, &reg_val);
break;
default:
rc = -EINVAL;
break;
}
if (rc)
return rc;
if (copy_to_user(uaddr, &reg_val, KVM_REG_SIZE(reg->id)))
return -EFAULT;
return 0;
}
static int kvm_riscv_vcpu_set_reg_csr(struct kvm_vcpu *vcpu,
const struct kvm_one_reg *reg)
{
int rc;
unsigned long __user *uaddr =
(unsigned long __user *)(unsigned long)reg->addr;
unsigned long reg_num = reg->id & ~(KVM_REG_ARCH_MASK |
KVM_REG_SIZE_MASK |
KVM_REG_RISCV_CSR);
unsigned long reg_val, reg_subtype;
if (KVM_REG_SIZE(reg->id) != sizeof(unsigned long))
return -EINVAL;
if (copy_from_user(&reg_val, uaddr, KVM_REG_SIZE(reg->id)))
return -EFAULT;
reg_subtype = reg_num & KVM_REG_RISCV_SUBTYPE_MASK;
reg_num &= ~KVM_REG_RISCV_SUBTYPE_MASK;
switch (reg_subtype) {
case KVM_REG_RISCV_CSR_GENERAL:
rc = kvm_riscv_vcpu_general_set_csr(vcpu, reg_num, reg_val);
break;
case KVM_REG_RISCV_CSR_AIA:
rc = kvm_riscv_vcpu_aia_set_csr(vcpu, reg_num, reg_val);
break;
default:
rc = -EINVAL;
break;
}
if (rc)
return rc;
return 0;
}
static int kvm_riscv_vcpu_get_reg_isa_ext(struct kvm_vcpu *vcpu,
const struct kvm_one_reg *reg)
{
unsigned long __user *uaddr =
(unsigned long __user *)(unsigned long)reg->addr;
unsigned long reg_num = reg->id & ~(KVM_REG_ARCH_MASK |
KVM_REG_SIZE_MASK |
KVM_REG_RISCV_ISA_EXT);
unsigned long reg_val = 0;
unsigned long host_isa_ext;
if (KVM_REG_SIZE(reg->id) != sizeof(unsigned long))
return -EINVAL;
if (reg_num >= KVM_RISCV_ISA_EXT_MAX ||
reg_num >= ARRAY_SIZE(kvm_isa_ext_arr))
return -EINVAL;
host_isa_ext = kvm_isa_ext_arr[reg_num];
if (__riscv_isa_extension_available(vcpu->arch.isa, host_isa_ext))
reg_val = 1; /* Mark the given extension as available */
if (copy_to_user(uaddr, &reg_val, KVM_REG_SIZE(reg->id)))
return -EFAULT;
return 0;
}
static int kvm_riscv_vcpu_set_reg_isa_ext(struct kvm_vcpu *vcpu,
const struct kvm_one_reg *reg)
{
unsigned long __user *uaddr =
(unsigned long __user *)(unsigned long)reg->addr;
unsigned long reg_num = reg->id & ~(KVM_REG_ARCH_MASK |
KVM_REG_SIZE_MASK |
KVM_REG_RISCV_ISA_EXT);
unsigned long reg_val;
unsigned long host_isa_ext;
if (KVM_REG_SIZE(reg->id) != sizeof(unsigned long))
return -EINVAL;
if (reg_num >= KVM_RISCV_ISA_EXT_MAX ||
reg_num >= ARRAY_SIZE(kvm_isa_ext_arr))
return -EINVAL;
if (copy_from_user(&reg_val, uaddr, KVM_REG_SIZE(reg->id)))
return -EFAULT;
host_isa_ext = kvm_isa_ext_arr[reg_num];
if (!__riscv_isa_extension_available(NULL, host_isa_ext))
return -EOPNOTSUPP;
if (!vcpu->arch.ran_atleast_once) {
/*
* All multi-letter extension and a few single letter
* extension can be disabled
*/
if (reg_val == 1 &&
kvm_riscv_vcpu_isa_enable_allowed(reg_num))
set_bit(host_isa_ext, vcpu->arch.isa);
else if (!reg_val &&
kvm_riscv_vcpu_isa_disable_allowed(reg_num))
clear_bit(host_isa_ext, vcpu->arch.isa);
else
return -EINVAL;
kvm_riscv_vcpu_fp_reset(vcpu);
} else {
return -EOPNOTSUPP;
}
return 0;
}
static int kvm_riscv_vcpu_set_reg(struct kvm_vcpu *vcpu,
const struct kvm_one_reg *reg)
{
switch (reg->id & KVM_REG_RISCV_TYPE_MASK) {
case KVM_REG_RISCV_CONFIG:
return kvm_riscv_vcpu_set_reg_config(vcpu, reg);
case KVM_REG_RISCV_CORE:
return kvm_riscv_vcpu_set_reg_core(vcpu, reg);
case KVM_REG_RISCV_CSR:
return kvm_riscv_vcpu_set_reg_csr(vcpu, reg);
case KVM_REG_RISCV_TIMER:
return kvm_riscv_vcpu_set_reg_timer(vcpu, reg);
case KVM_REG_RISCV_FP_F:
return kvm_riscv_vcpu_set_reg_fp(vcpu, reg,
KVM_REG_RISCV_FP_F);
case KVM_REG_RISCV_FP_D:
return kvm_riscv_vcpu_set_reg_fp(vcpu, reg,
KVM_REG_RISCV_FP_D);
case KVM_REG_RISCV_ISA_EXT:
return kvm_riscv_vcpu_set_reg_isa_ext(vcpu, reg);
case KVM_REG_RISCV_SBI_EXT:
return kvm_riscv_vcpu_set_reg_sbi_ext(vcpu, reg);
case KVM_REG_RISCV_VECTOR:
return kvm_riscv_vcpu_set_reg_vector(vcpu, reg,
KVM_REG_RISCV_VECTOR);
default:
break;
}
return -EINVAL;
}
static int kvm_riscv_vcpu_get_reg(struct kvm_vcpu *vcpu,
const struct kvm_one_reg *reg)
{
switch (reg->id & KVM_REG_RISCV_TYPE_MASK) {
case KVM_REG_RISCV_CONFIG:
return kvm_riscv_vcpu_get_reg_config(vcpu, reg);
case KVM_REG_RISCV_CORE:
return kvm_riscv_vcpu_get_reg_core(vcpu, reg);
case KVM_REG_RISCV_CSR:
return kvm_riscv_vcpu_get_reg_csr(vcpu, reg);
case KVM_REG_RISCV_TIMER:
return kvm_riscv_vcpu_get_reg_timer(vcpu, reg);
case KVM_REG_RISCV_FP_F:
return kvm_riscv_vcpu_get_reg_fp(vcpu, reg,
KVM_REG_RISCV_FP_F);
case KVM_REG_RISCV_FP_D:
return kvm_riscv_vcpu_get_reg_fp(vcpu, reg,
KVM_REG_RISCV_FP_D);
case KVM_REG_RISCV_ISA_EXT:
return kvm_riscv_vcpu_get_reg_isa_ext(vcpu, reg);
case KVM_REG_RISCV_SBI_EXT:
return kvm_riscv_vcpu_get_reg_sbi_ext(vcpu, reg);
case KVM_REG_RISCV_VECTOR:
return kvm_riscv_vcpu_get_reg_vector(vcpu, reg,
KVM_REG_RISCV_VECTOR);
default:
break;
}
return -EINVAL;
}
long kvm_arch_vcpu_async_ioctl(struct file *filp, long kvm_arch_vcpu_async_ioctl(struct file *filp,
unsigned int ioctl, unsigned long arg) unsigned int ioctl, unsigned long arg)
{ {
......
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2019 Western Digital Corporation or its affiliates.
* Copyright (C) 2023 Ventana Micro Systems Inc.
*
* Authors:
* Anup Patel <apatel@ventanamicro.com>
*/
#include <linux/bitops.h>
#include <linux/errno.h>
#include <linux/err.h>
#include <linux/uaccess.h>
#include <linux/kvm_host.h>
#include <asm/cacheflush.h>
#include <asm/hwcap.h>
#include <asm/kvm_vcpu_vector.h>
#include <asm/vector.h>
#define KVM_RISCV_BASE_ISA_MASK GENMASK(25, 0)
#define KVM_ISA_EXT_ARR(ext) \
[KVM_RISCV_ISA_EXT_##ext] = RISCV_ISA_EXT_##ext
/* Mapping between KVM ISA Extension ID & Host ISA extension ID */
static const unsigned long kvm_isa_ext_arr[] = {
[KVM_RISCV_ISA_EXT_A] = RISCV_ISA_EXT_a,
[KVM_RISCV_ISA_EXT_C] = RISCV_ISA_EXT_c,
[KVM_RISCV_ISA_EXT_D] = RISCV_ISA_EXT_d,
[KVM_RISCV_ISA_EXT_F] = RISCV_ISA_EXT_f,
[KVM_RISCV_ISA_EXT_H] = RISCV_ISA_EXT_h,
[KVM_RISCV_ISA_EXT_I] = RISCV_ISA_EXT_i,
[KVM_RISCV_ISA_EXT_M] = RISCV_ISA_EXT_m,
[KVM_RISCV_ISA_EXT_V] = RISCV_ISA_EXT_v,
KVM_ISA_EXT_ARR(SSAIA),
KVM_ISA_EXT_ARR(SSTC),
KVM_ISA_EXT_ARR(SVINVAL),
KVM_ISA_EXT_ARR(SVNAPOT),
KVM_ISA_EXT_ARR(SVPBMT),
KVM_ISA_EXT_ARR(ZBB),
KVM_ISA_EXT_ARR(ZIHINTPAUSE),
KVM_ISA_EXT_ARR(ZICBOM),
KVM_ISA_EXT_ARR(ZICBOZ),
};
static unsigned long kvm_riscv_vcpu_base2isa_ext(unsigned long base_ext)
{
unsigned long i;
for (i = 0; i < KVM_RISCV_ISA_EXT_MAX; i++) {
if (kvm_isa_ext_arr[i] == base_ext)
return i;
}
return KVM_RISCV_ISA_EXT_MAX;
}
static bool kvm_riscv_vcpu_isa_enable_allowed(unsigned long ext)
{
switch (ext) {
case KVM_RISCV_ISA_EXT_H:
return false;
case KVM_RISCV_ISA_EXT_V:
return riscv_v_vstate_ctrl_user_allowed();
default:
break;
}
return true;
}
static bool kvm_riscv_vcpu_isa_disable_allowed(unsigned long ext)
{
switch (ext) {
case KVM_RISCV_ISA_EXT_A:
case KVM_RISCV_ISA_EXT_C:
case KVM_RISCV_ISA_EXT_I:
case KVM_RISCV_ISA_EXT_M:
case KVM_RISCV_ISA_EXT_SSAIA:
case KVM_RISCV_ISA_EXT_SSTC:
case KVM_RISCV_ISA_EXT_SVINVAL:
case KVM_RISCV_ISA_EXT_SVNAPOT:
case KVM_RISCV_ISA_EXT_ZIHINTPAUSE:
case KVM_RISCV_ISA_EXT_ZBB:
return false;
default:
break;
}
return true;
}
void kvm_riscv_vcpu_setup_isa(struct kvm_vcpu *vcpu)
{
unsigned long host_isa, i;
for (i = 0; i < ARRAY_SIZE(kvm_isa_ext_arr); i++) {
host_isa = kvm_isa_ext_arr[i];
if (__riscv_isa_extension_available(NULL, host_isa) &&
kvm_riscv_vcpu_isa_enable_allowed(i))
set_bit(host_isa, vcpu->arch.isa);
}
}
static int kvm_riscv_vcpu_get_reg_config(struct kvm_vcpu *vcpu,
const struct kvm_one_reg *reg)
{
unsigned long __user *uaddr =
(unsigned long __user *)(unsigned long)reg->addr;
unsigned long reg_num = reg->id & ~(KVM_REG_ARCH_MASK |
KVM_REG_SIZE_MASK |
KVM_REG_RISCV_CONFIG);
unsigned long reg_val;
if (KVM_REG_SIZE(reg->id) != sizeof(unsigned long))
return -EINVAL;
switch (reg_num) {
case KVM_REG_RISCV_CONFIG_REG(isa):
reg_val = vcpu->arch.isa[0] & KVM_RISCV_BASE_ISA_MASK;
break;
case KVM_REG_RISCV_CONFIG_REG(zicbom_block_size):
if (!riscv_isa_extension_available(vcpu->arch.isa, ZICBOM))
return -EINVAL;
reg_val = riscv_cbom_block_size;
break;
case KVM_REG_RISCV_CONFIG_REG(zicboz_block_size):
if (!riscv_isa_extension_available(vcpu->arch.isa, ZICBOZ))
return -EINVAL;
reg_val = riscv_cboz_block_size;
break;
case KVM_REG_RISCV_CONFIG_REG(mvendorid):
reg_val = vcpu->arch.mvendorid;
break;
case KVM_REG_RISCV_CONFIG_REG(marchid):
reg_val = vcpu->arch.marchid;
break;
case KVM_REG_RISCV_CONFIG_REG(mimpid):
reg_val = vcpu->arch.mimpid;
break;
default:
return -EINVAL;
}
if (copy_to_user(uaddr, &reg_val, KVM_REG_SIZE(reg->id)))
return -EFAULT;
return 0;
}
static int kvm_riscv_vcpu_set_reg_config(struct kvm_vcpu *vcpu,
const struct kvm_one_reg *reg)
{
unsigned long __user *uaddr =
(unsigned long __user *)(unsigned long)reg->addr;
unsigned long reg_num = reg->id & ~(KVM_REG_ARCH_MASK |
KVM_REG_SIZE_MASK |
KVM_REG_RISCV_CONFIG);
unsigned long i, isa_ext, reg_val;
if (KVM_REG_SIZE(reg->id) != sizeof(unsigned long))
return -EINVAL;
if (copy_from_user(&reg_val, uaddr, KVM_REG_SIZE(reg->id)))
return -EFAULT;
switch (reg_num) {
case KVM_REG_RISCV_CONFIG_REG(isa):
/*
* This ONE REG interface is only defined for
* single letter extensions.
*/
if (fls(reg_val) >= RISCV_ISA_EXT_BASE)
return -EINVAL;
if (!vcpu->arch.ran_atleast_once) {
/* Ignore the enable/disable request for certain extensions */
for (i = 0; i < RISCV_ISA_EXT_BASE; i++) {
isa_ext = kvm_riscv_vcpu_base2isa_ext(i);
if (isa_ext >= KVM_RISCV_ISA_EXT_MAX) {
reg_val &= ~BIT(i);
continue;
}
if (!kvm_riscv_vcpu_isa_enable_allowed(isa_ext))
if (reg_val & BIT(i))
reg_val &= ~BIT(i);
if (!kvm_riscv_vcpu_isa_disable_allowed(isa_ext))
if (!(reg_val & BIT(i)))
reg_val |= BIT(i);
}
reg_val &= riscv_isa_extension_base(NULL);
/* Do not modify anything beyond single letter extensions */
reg_val = (vcpu->arch.isa[0] & ~KVM_RISCV_BASE_ISA_MASK) |
(reg_val & KVM_RISCV_BASE_ISA_MASK);
vcpu->arch.isa[0] = reg_val;
kvm_riscv_vcpu_fp_reset(vcpu);
} else {
return -EOPNOTSUPP;
}
break;
case KVM_REG_RISCV_CONFIG_REG(zicbom_block_size):
return -EOPNOTSUPP;
case KVM_REG_RISCV_CONFIG_REG(zicboz_block_size):
return -EOPNOTSUPP;
case KVM_REG_RISCV_CONFIG_REG(mvendorid):
if (!vcpu->arch.ran_atleast_once)
vcpu->arch.mvendorid = reg_val;
else
return -EBUSY;
break;
case KVM_REG_RISCV_CONFIG_REG(marchid):
if (!vcpu->arch.ran_atleast_once)
vcpu->arch.marchid = reg_val;
else
return -EBUSY;
break;
case KVM_REG_RISCV_CONFIG_REG(mimpid):
if (!vcpu->arch.ran_atleast_once)
vcpu->arch.mimpid = reg_val;
else
return -EBUSY;
break;
default:
return -EINVAL;
}
return 0;
}
static int kvm_riscv_vcpu_get_reg_core(struct kvm_vcpu *vcpu,
const struct kvm_one_reg *reg)
{
struct kvm_cpu_context *cntx = &vcpu->arch.guest_context;
unsigned long __user *uaddr =
(unsigned long __user *)(unsigned long)reg->addr;
unsigned long reg_num = reg->id & ~(KVM_REG_ARCH_MASK |
KVM_REG_SIZE_MASK |
KVM_REG_RISCV_CORE);
unsigned long reg_val;
if (KVM_REG_SIZE(reg->id) != sizeof(unsigned long))
return -EINVAL;
if (reg_num >= sizeof(struct kvm_riscv_core) / sizeof(unsigned long))
return -EINVAL;
if (reg_num == KVM_REG_RISCV_CORE_REG(regs.pc))
reg_val = cntx->sepc;
else if (KVM_REG_RISCV_CORE_REG(regs.pc) < reg_num &&
reg_num <= KVM_REG_RISCV_CORE_REG(regs.t6))
reg_val = ((unsigned long *)cntx)[reg_num];
else if (reg_num == KVM_REG_RISCV_CORE_REG(mode))
reg_val = (cntx->sstatus & SR_SPP) ?
KVM_RISCV_MODE_S : KVM_RISCV_MODE_U;
else
return -EINVAL;
if (copy_to_user(uaddr, &reg_val, KVM_REG_SIZE(reg->id)))
return -EFAULT;
return 0;
}
static int kvm_riscv_vcpu_set_reg_core(struct kvm_vcpu *vcpu,
const struct kvm_one_reg *reg)
{
struct kvm_cpu_context *cntx = &vcpu->arch.guest_context;
unsigned long __user *uaddr =
(unsigned long __user *)(unsigned long)reg->addr;
unsigned long reg_num = reg->id & ~(KVM_REG_ARCH_MASK |
KVM_REG_SIZE_MASK |
KVM_REG_RISCV_CORE);
unsigned long reg_val;
if (KVM_REG_SIZE(reg->id) != sizeof(unsigned long))
return -EINVAL;
if (reg_num >= sizeof(struct kvm_riscv_core) / sizeof(unsigned long))
return -EINVAL;
if (copy_from_user(&reg_val, uaddr, KVM_REG_SIZE(reg->id)))
return -EFAULT;
if (reg_num == KVM_REG_RISCV_CORE_REG(regs.pc))
cntx->sepc = reg_val;
else if (KVM_REG_RISCV_CORE_REG(regs.pc) < reg_num &&
reg_num <= KVM_REG_RISCV_CORE_REG(regs.t6))
((unsigned long *)cntx)[reg_num] = reg_val;
else if (reg_num == KVM_REG_RISCV_CORE_REG(mode)) {
if (reg_val == KVM_RISCV_MODE_S)
cntx->sstatus |= SR_SPP;
else
cntx->sstatus &= ~SR_SPP;
} else
return -EINVAL;
return 0;
}
static int kvm_riscv_vcpu_general_get_csr(struct kvm_vcpu *vcpu,
unsigned long reg_num,
unsigned long *out_val)
{
struct kvm_vcpu_csr *csr = &vcpu->arch.guest_csr;
if (reg_num >= sizeof(struct kvm_riscv_csr) / sizeof(unsigned long))
return -EINVAL;
if (reg_num == KVM_REG_RISCV_CSR_REG(sip)) {
kvm_riscv_vcpu_flush_interrupts(vcpu);
*out_val = (csr->hvip >> VSIP_TO_HVIP_SHIFT) & VSIP_VALID_MASK;
*out_val |= csr->hvip & ~IRQ_LOCAL_MASK;
} else
*out_val = ((unsigned long *)csr)[reg_num];
return 0;
}
static int kvm_riscv_vcpu_general_set_csr(struct kvm_vcpu *vcpu,
unsigned long reg_num,
unsigned long reg_val)
{
struct kvm_vcpu_csr *csr = &vcpu->arch.guest_csr;
if (reg_num >= sizeof(struct kvm_riscv_csr) / sizeof(unsigned long))
return -EINVAL;
if (reg_num == KVM_REG_RISCV_CSR_REG(sip)) {
reg_val &= VSIP_VALID_MASK;
reg_val <<= VSIP_TO_HVIP_SHIFT;
}
((unsigned long *)csr)[reg_num] = reg_val;
if (reg_num == KVM_REG_RISCV_CSR_REG(sip))
WRITE_ONCE(vcpu->arch.irqs_pending_mask[0], 0);
return 0;
}
static int kvm_riscv_vcpu_get_reg_csr(struct kvm_vcpu *vcpu,
const struct kvm_one_reg *reg)
{
int rc;
unsigned long __user *uaddr =
(unsigned long __user *)(unsigned long)reg->addr;
unsigned long reg_num = reg->id & ~(KVM_REG_ARCH_MASK |
KVM_REG_SIZE_MASK |
KVM_REG_RISCV_CSR);
unsigned long reg_val, reg_subtype;
if (KVM_REG_SIZE(reg->id) != sizeof(unsigned long))
return -EINVAL;
reg_subtype = reg_num & KVM_REG_RISCV_SUBTYPE_MASK;
reg_num &= ~KVM_REG_RISCV_SUBTYPE_MASK;
switch (reg_subtype) {
case KVM_REG_RISCV_CSR_GENERAL:
rc = kvm_riscv_vcpu_general_get_csr(vcpu, reg_num, &reg_val);
break;
case KVM_REG_RISCV_CSR_AIA:
rc = kvm_riscv_vcpu_aia_get_csr(vcpu, reg_num, &reg_val);
break;
default:
rc = -EINVAL;
break;
}
if (rc)
return rc;
if (copy_to_user(uaddr, &reg_val, KVM_REG_SIZE(reg->id)))
return -EFAULT;
return 0;
}
static int kvm_riscv_vcpu_set_reg_csr(struct kvm_vcpu *vcpu,
const struct kvm_one_reg *reg)
{
int rc;
unsigned long __user *uaddr =
(unsigned long __user *)(unsigned long)reg->addr;
unsigned long reg_num = reg->id & ~(KVM_REG_ARCH_MASK |
KVM_REG_SIZE_MASK |
KVM_REG_RISCV_CSR);
unsigned long reg_val, reg_subtype;
if (KVM_REG_SIZE(reg->id) != sizeof(unsigned long))
return -EINVAL;
if (copy_from_user(&reg_val, uaddr, KVM_REG_SIZE(reg->id)))
return -EFAULT;
reg_subtype = reg_num & KVM_REG_RISCV_SUBTYPE_MASK;
reg_num &= ~KVM_REG_RISCV_SUBTYPE_MASK;
switch (reg_subtype) {
case KVM_REG_RISCV_CSR_GENERAL:
rc = kvm_riscv_vcpu_general_set_csr(vcpu, reg_num, reg_val);
break;
case KVM_REG_RISCV_CSR_AIA:
rc = kvm_riscv_vcpu_aia_set_csr(vcpu, reg_num, reg_val);
break;
default:
rc = -EINVAL;
break;
}
if (rc)
return rc;
return 0;
}
static int kvm_riscv_vcpu_get_reg_isa_ext(struct kvm_vcpu *vcpu,
const struct kvm_one_reg *reg)
{
unsigned long __user *uaddr =
(unsigned long __user *)(unsigned long)reg->addr;
unsigned long reg_num = reg->id & ~(KVM_REG_ARCH_MASK |
KVM_REG_SIZE_MASK |
KVM_REG_RISCV_ISA_EXT);
unsigned long reg_val = 0;
unsigned long host_isa_ext;
if (KVM_REG_SIZE(reg->id) != sizeof(unsigned long))
return -EINVAL;
if (reg_num >= KVM_RISCV_ISA_EXT_MAX ||
reg_num >= ARRAY_SIZE(kvm_isa_ext_arr))
return -EINVAL;
host_isa_ext = kvm_isa_ext_arr[reg_num];
if (__riscv_isa_extension_available(vcpu->arch.isa, host_isa_ext))
reg_val = 1; /* Mark the given extension as available */
if (copy_to_user(uaddr, &reg_val, KVM_REG_SIZE(reg->id)))
return -EFAULT;
return 0;
}
static int kvm_riscv_vcpu_set_reg_isa_ext(struct kvm_vcpu *vcpu,
const struct kvm_one_reg *reg)
{
unsigned long __user *uaddr =
(unsigned long __user *)(unsigned long)reg->addr;
unsigned long reg_num = reg->id & ~(KVM_REG_ARCH_MASK |
KVM_REG_SIZE_MASK |
KVM_REG_RISCV_ISA_EXT);
unsigned long reg_val;
unsigned long host_isa_ext;
if (KVM_REG_SIZE(reg->id) != sizeof(unsigned long))
return -EINVAL;
if (reg_num >= KVM_RISCV_ISA_EXT_MAX ||
reg_num >= ARRAY_SIZE(kvm_isa_ext_arr))
return -EINVAL;
if (copy_from_user(&reg_val, uaddr, KVM_REG_SIZE(reg->id)))
return -EFAULT;
host_isa_ext = kvm_isa_ext_arr[reg_num];
if (!__riscv_isa_extension_available(NULL, host_isa_ext))
return -EOPNOTSUPP;
if (!vcpu->arch.ran_atleast_once) {
/*
* All multi-letter extension and a few single letter
* extension can be disabled
*/
if (reg_val == 1 &&
kvm_riscv_vcpu_isa_enable_allowed(reg_num))
set_bit(host_isa_ext, vcpu->arch.isa);
else if (!reg_val &&
kvm_riscv_vcpu_isa_disable_allowed(reg_num))
clear_bit(host_isa_ext, vcpu->arch.isa);
else
return -EINVAL;
kvm_riscv_vcpu_fp_reset(vcpu);
} else {
return -EOPNOTSUPP;
}
return 0;
}
int kvm_riscv_vcpu_set_reg(struct kvm_vcpu *vcpu,
const struct kvm_one_reg *reg)
{
switch (reg->id & KVM_REG_RISCV_TYPE_MASK) {
case KVM_REG_RISCV_CONFIG:
return kvm_riscv_vcpu_set_reg_config(vcpu, reg);
case KVM_REG_RISCV_CORE:
return kvm_riscv_vcpu_set_reg_core(vcpu, reg);
case KVM_REG_RISCV_CSR:
return kvm_riscv_vcpu_set_reg_csr(vcpu, reg);
case KVM_REG_RISCV_TIMER:
return kvm_riscv_vcpu_set_reg_timer(vcpu, reg);
case KVM_REG_RISCV_FP_F:
return kvm_riscv_vcpu_set_reg_fp(vcpu, reg,
KVM_REG_RISCV_FP_F);
case KVM_REG_RISCV_FP_D:
return kvm_riscv_vcpu_set_reg_fp(vcpu, reg,
KVM_REG_RISCV_FP_D);
case KVM_REG_RISCV_ISA_EXT:
return kvm_riscv_vcpu_set_reg_isa_ext(vcpu, reg);
case KVM_REG_RISCV_SBI_EXT:
return kvm_riscv_vcpu_set_reg_sbi_ext(vcpu, reg);
case KVM_REG_RISCV_VECTOR:
return kvm_riscv_vcpu_set_reg_vector(vcpu, reg,
KVM_REG_RISCV_VECTOR);
default:
break;
}
return -EINVAL;
}
int kvm_riscv_vcpu_get_reg(struct kvm_vcpu *vcpu,
const struct kvm_one_reg *reg)
{
switch (reg->id & KVM_REG_RISCV_TYPE_MASK) {
case KVM_REG_RISCV_CONFIG:
return kvm_riscv_vcpu_get_reg_config(vcpu, reg);
case KVM_REG_RISCV_CORE:
return kvm_riscv_vcpu_get_reg_core(vcpu, reg);
case KVM_REG_RISCV_CSR:
return kvm_riscv_vcpu_get_reg_csr(vcpu, reg);
case KVM_REG_RISCV_TIMER:
return kvm_riscv_vcpu_get_reg_timer(vcpu, reg);
case KVM_REG_RISCV_FP_F:
return kvm_riscv_vcpu_get_reg_fp(vcpu, reg,
KVM_REG_RISCV_FP_F);
case KVM_REG_RISCV_FP_D:
return kvm_riscv_vcpu_get_reg_fp(vcpu, reg,
KVM_REG_RISCV_FP_D);
case KVM_REG_RISCV_ISA_EXT:
return kvm_riscv_vcpu_get_reg_isa_ext(vcpu, reg);
case KVM_REG_RISCV_SBI_EXT:
return kvm_riscv_vcpu_get_reg_sbi_ext(vcpu, reg);
case KVM_REG_RISCV_VECTOR:
return kvm_riscv_vcpu_get_reg_vector(vcpu, reg,
KVM_REG_RISCV_VECTOR);
default:
break;
}
return -EINVAL;
}
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment