Commit 7a5189c5 authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'for-linus' of git://git.kernel.org/pub/scm/virt/kvm/kvm

Pull RISC-V kvm updates from Paolo Bonzini:

 - Allow unloading KVM module

 - Allow KVM user-space to set mvendorid, marchid, and mimpid

 - Several fixes and cleanups

* tag 'for-linus' of git://git.kernel.org/pub/scm/virt/kvm/kvm:
  RISC-V: KVM: Add ONE_REG interface for mvendorid, marchid, and mimpid
  RISC-V: KVM: Save mvendorid, marchid, and mimpid when creating VCPU
  RISC-V: Export sbi_get_mvendorid() and friends
  RISC-V: KVM: Move sbi related struct and functions to kvm_vcpu_sbi.h
  RISC-V: KVM: Use switch-case in kvm_riscv_vcpu_set/get_reg()
  RISC-V: KVM: Remove redundant includes of asm/csr.h
  RISC-V: KVM: Remove redundant includes of asm/kvm_vcpu_timer.h
  RISC-V: KVM: Fix reg_val check in kvm_riscv_vcpu_set_reg_config()
  RISC-V: KVM: Simplify kvm_arch_prepare_memory_region()
  RISC-V: KVM: Exit run-loop immediately if xfer_to_guest fails
  RISC-V: KVM: use vma_lookup() instead of find_vma_intersection()
  RISC-V: KVM: Add exit logic to main.c
parents 569c3a28 6ebbdecf
...@@ -13,10 +13,10 @@ ...@@ -13,10 +13,10 @@
#include <linux/kvm.h> #include <linux/kvm.h>
#include <linux/kvm_types.h> #include <linux/kvm_types.h>
#include <linux/spinlock.h> #include <linux/spinlock.h>
#include <asm/csr.h>
#include <asm/hwcap.h> #include <asm/hwcap.h>
#include <asm/kvm_vcpu_fp.h> #include <asm/kvm_vcpu_fp.h>
#include <asm/kvm_vcpu_insn.h> #include <asm/kvm_vcpu_insn.h>
#include <asm/kvm_vcpu_sbi.h>
#include <asm/kvm_vcpu_timer.h> #include <asm/kvm_vcpu_timer.h>
#define KVM_MAX_VCPUS 1024 #define KVM_MAX_VCPUS 1024
...@@ -95,10 +95,6 @@ struct kvm_arch { ...@@ -95,10 +95,6 @@ struct kvm_arch {
struct kvm_guest_timer timer; struct kvm_guest_timer timer;
}; };
struct kvm_sbi_context {
int return_handled;
};
struct kvm_cpu_trap { struct kvm_cpu_trap {
unsigned long sepc; unsigned long sepc;
unsigned long scause; unsigned long scause;
...@@ -169,6 +165,11 @@ struct kvm_vcpu_arch { ...@@ -169,6 +165,11 @@ struct kvm_vcpu_arch {
/* ISA feature bits (similar to MISA) */ /* ISA feature bits (similar to MISA) */
DECLARE_BITMAP(isa, RISCV_ISA_EXT_MAX); DECLARE_BITMAP(isa, RISCV_ISA_EXT_MAX);
/* Vendor, Arch, and Implementation details */
unsigned long mvendorid;
unsigned long marchid;
unsigned long mimpid;
/* SSCRATCH, STVEC, and SCOUNTEREN of Host */ /* SSCRATCH, STVEC, and SCOUNTEREN of Host */
unsigned long host_sscratch; unsigned long host_sscratch;
unsigned long host_stvec; unsigned long host_stvec;
...@@ -217,7 +218,7 @@ struct kvm_vcpu_arch { ...@@ -217,7 +218,7 @@ struct kvm_vcpu_arch {
struct kvm_csr_decode csr_decode; struct kvm_csr_decode csr_decode;
/* SBI context */ /* SBI context */
struct kvm_sbi_context sbi_context; struct kvm_vcpu_sbi_context sbi_context;
/* Cache pages needed to program page tables with spinlock held */ /* Cache pages needed to program page tables with spinlock held */
struct kvm_mmu_memory_cache mmu_page_cache; struct kvm_mmu_memory_cache mmu_page_cache;
...@@ -327,7 +328,4 @@ bool kvm_riscv_vcpu_has_interrupts(struct kvm_vcpu *vcpu, unsigned long mask); ...@@ -327,7 +328,4 @@ bool kvm_riscv_vcpu_has_interrupts(struct kvm_vcpu *vcpu, unsigned long mask);
void kvm_riscv_vcpu_power_off(struct kvm_vcpu *vcpu); void kvm_riscv_vcpu_power_off(struct kvm_vcpu *vcpu);
void kvm_riscv_vcpu_power_on(struct kvm_vcpu *vcpu); void kvm_riscv_vcpu_power_on(struct kvm_vcpu *vcpu);
int kvm_riscv_vcpu_sbi_return(struct kvm_vcpu *vcpu, struct kvm_run *run);
int kvm_riscv_vcpu_sbi_ecall(struct kvm_vcpu *vcpu, struct kvm_run *run);
#endif /* __RISCV_KVM_HOST_H__ */ #endif /* __RISCV_KVM_HOST_H__ */
...@@ -14,6 +14,10 @@ ...@@ -14,6 +14,10 @@
#define KVM_SBI_VERSION_MAJOR 1 #define KVM_SBI_VERSION_MAJOR 1
#define KVM_SBI_VERSION_MINOR 0 #define KVM_SBI_VERSION_MINOR 0
struct kvm_vcpu_sbi_context {
int return_handled;
};
struct kvm_vcpu_sbi_extension { struct kvm_vcpu_sbi_extension {
unsigned long extid_start; unsigned long extid_start;
unsigned long extid_end; unsigned long extid_end;
...@@ -31,7 +35,9 @@ void kvm_riscv_vcpu_sbi_forward(struct kvm_vcpu *vcpu, struct kvm_run *run); ...@@ -31,7 +35,9 @@ void kvm_riscv_vcpu_sbi_forward(struct kvm_vcpu *vcpu, struct kvm_run *run);
void kvm_riscv_vcpu_sbi_system_reset(struct kvm_vcpu *vcpu, void kvm_riscv_vcpu_sbi_system_reset(struct kvm_vcpu *vcpu,
struct kvm_run *run, struct kvm_run *run,
u32 type, u64 flags); u32 type, u64 flags);
int kvm_riscv_vcpu_sbi_return(struct kvm_vcpu *vcpu, struct kvm_run *run);
const struct kvm_vcpu_sbi_extension *kvm_vcpu_sbi_find_ext(unsigned long extid); const struct kvm_vcpu_sbi_extension *kvm_vcpu_sbi_find_ext(unsigned long extid);
int kvm_riscv_vcpu_sbi_ecall(struct kvm_vcpu *vcpu, struct kvm_run *run);
#ifdef CONFIG_RISCV_SBI_V01 #ifdef CONFIG_RISCV_SBI_V01
extern const struct kvm_vcpu_sbi_extension vcpu_sbi_ext_v01; extern const struct kvm_vcpu_sbi_extension vcpu_sbi_ext_v01;
......
...@@ -49,6 +49,9 @@ struct kvm_sregs { ...@@ -49,6 +49,9 @@ struct kvm_sregs {
struct kvm_riscv_config { struct kvm_riscv_config {
unsigned long isa; unsigned long isa;
unsigned long zicbom_block_size; unsigned long zicbom_block_size;
unsigned long mvendorid;
unsigned long marchid;
unsigned long mimpid;
}; };
/* CORE registers for KVM_GET_ONE_REG and KVM_SET_ONE_REG */ /* CORE registers for KVM_GET_ONE_REG and KVM_SET_ONE_REG */
......
...@@ -627,16 +627,19 @@ long sbi_get_mvendorid(void) ...@@ -627,16 +627,19 @@ long sbi_get_mvendorid(void)
{ {
return __sbi_base_ecall(SBI_EXT_BASE_GET_MVENDORID); return __sbi_base_ecall(SBI_EXT_BASE_GET_MVENDORID);
} }
EXPORT_SYMBOL_GPL(sbi_get_mvendorid);
long sbi_get_marchid(void) long sbi_get_marchid(void)
{ {
return __sbi_base_ecall(SBI_EXT_BASE_GET_MARCHID); return __sbi_base_ecall(SBI_EXT_BASE_GET_MARCHID);
} }
EXPORT_SYMBOL_GPL(sbi_get_marchid);
long sbi_get_mimpid(void) long sbi_get_mimpid(void)
{ {
return __sbi_base_ecall(SBI_EXT_BASE_GET_MIMPID); return __sbi_base_ecall(SBI_EXT_BASE_GET_MIMPID);
} }
EXPORT_SYMBOL_GPL(sbi_get_mimpid);
static void sbi_send_cpumask_ipi(const struct cpumask *target) static void sbi_send_cpumask_ipi(const struct cpumask *target)
{ {
......
...@@ -127,3 +127,9 @@ static int __init riscv_kvm_init(void) ...@@ -127,3 +127,9 @@ static int __init riscv_kvm_init(void)
return kvm_init(NULL, sizeof(struct kvm_vcpu), 0, THIS_MODULE); return kvm_init(NULL, sizeof(struct kvm_vcpu), 0, THIS_MODULE);
} }
module_init(riscv_kvm_init); module_init(riscv_kvm_init);
static void __exit riscv_kvm_exit(void)
{
kvm_exit();
}
module_exit(riscv_kvm_exit);
...@@ -537,10 +537,8 @@ int kvm_arch_prepare_memory_region(struct kvm *kvm, ...@@ -537,10 +537,8 @@ int kvm_arch_prepare_memory_region(struct kvm *kvm,
if (change == KVM_MR_FLAGS_ONLY) if (change == KVM_MR_FLAGS_ONLY)
goto out; goto out;
spin_lock(&kvm->mmu_lock);
if (ret) if (ret)
gstage_unmap_range(kvm, base_gpa, size, false); kvm_riscv_gstage_iounmap(kvm, base_gpa, size);
spin_unlock(&kvm->mmu_lock);
out: out:
mmap_read_unlock(current->mm); mmap_read_unlock(current->mm);
...@@ -632,7 +630,7 @@ int kvm_riscv_gstage_map(struct kvm_vcpu *vcpu, ...@@ -632,7 +630,7 @@ int kvm_riscv_gstage_map(struct kvm_vcpu *vcpu,
mmap_read_lock(current->mm); mmap_read_lock(current->mm);
vma = find_vma_intersection(current->mm, hva, hva + 1); vma = vma_lookup(current->mm, hva);
if (unlikely(!vma)) { if (unlikely(!vma)) {
kvm_err("Failed to find VMA for hva 0x%lx\n", hva); kvm_err("Failed to find VMA for hva 0x%lx\n", hva);
mmap_read_unlock(current->mm); mmap_read_unlock(current->mm);
......
...@@ -21,6 +21,7 @@ ...@@ -21,6 +21,7 @@
#include <asm/csr.h> #include <asm/csr.h>
#include <asm/cacheflush.h> #include <asm/cacheflush.h>
#include <asm/hwcap.h> #include <asm/hwcap.h>
#include <asm/sbi.h>
const struct _kvm_stats_desc kvm_vcpu_stats_desc[] = { const struct _kvm_stats_desc kvm_vcpu_stats_desc[] = {
KVM_GENERIC_VCPU_STATS(), KVM_GENERIC_VCPU_STATS(),
...@@ -171,6 +172,11 @@ int kvm_arch_vcpu_create(struct kvm_vcpu *vcpu) ...@@ -171,6 +172,11 @@ int kvm_arch_vcpu_create(struct kvm_vcpu *vcpu)
set_bit(host_isa, vcpu->arch.isa); set_bit(host_isa, vcpu->arch.isa);
} }
/* Setup vendor, arch, and implementation details */
vcpu->arch.mvendorid = sbi_get_mvendorid();
vcpu->arch.marchid = sbi_get_marchid();
vcpu->arch.mimpid = sbi_get_mimpid();
/* Setup VCPU hfence queue */ /* Setup VCPU hfence queue */
spin_lock_init(&vcpu->arch.hfence_lock); spin_lock_init(&vcpu->arch.hfence_lock);
...@@ -270,6 +276,15 @@ static int kvm_riscv_vcpu_get_reg_config(struct kvm_vcpu *vcpu, ...@@ -270,6 +276,15 @@ static int kvm_riscv_vcpu_get_reg_config(struct kvm_vcpu *vcpu,
return -EINVAL; return -EINVAL;
reg_val = riscv_cbom_block_size; reg_val = riscv_cbom_block_size;
break; break;
case KVM_REG_RISCV_CONFIG_REG(mvendorid):
reg_val = vcpu->arch.mvendorid;
break;
case KVM_REG_RISCV_CONFIG_REG(marchid):
reg_val = vcpu->arch.marchid;
break;
case KVM_REG_RISCV_CONFIG_REG(mimpid):
reg_val = vcpu->arch.mimpid;
break;
default: default:
return -EINVAL; return -EINVAL;
} }
...@@ -296,12 +311,15 @@ static int kvm_riscv_vcpu_set_reg_config(struct kvm_vcpu *vcpu, ...@@ -296,12 +311,15 @@ static int kvm_riscv_vcpu_set_reg_config(struct kvm_vcpu *vcpu,
if (copy_from_user(&reg_val, uaddr, KVM_REG_SIZE(reg->id))) if (copy_from_user(&reg_val, uaddr, KVM_REG_SIZE(reg->id)))
return -EFAULT; return -EFAULT;
/* This ONE REG interface is only defined for single letter extensions */
if (fls(reg_val) >= RISCV_ISA_EXT_BASE)
return -EINVAL;
switch (reg_num) { switch (reg_num) {
case KVM_REG_RISCV_CONFIG_REG(isa): case KVM_REG_RISCV_CONFIG_REG(isa):
/*
* This ONE REG interface is only defined for
* single letter extensions.
*/
if (fls(reg_val) >= RISCV_ISA_EXT_BASE)
return -EINVAL;
if (!vcpu->arch.ran_atleast_once) { if (!vcpu->arch.ran_atleast_once) {
/* Ignore the enable/disable request for certain extensions */ /* Ignore the enable/disable request for certain extensions */
for (i = 0; i < RISCV_ISA_EXT_BASE; i++) { for (i = 0; i < RISCV_ISA_EXT_BASE; i++) {
...@@ -329,6 +347,24 @@ static int kvm_riscv_vcpu_set_reg_config(struct kvm_vcpu *vcpu, ...@@ -329,6 +347,24 @@ static int kvm_riscv_vcpu_set_reg_config(struct kvm_vcpu *vcpu,
break; break;
case KVM_REG_RISCV_CONFIG_REG(zicbom_block_size): case KVM_REG_RISCV_CONFIG_REG(zicbom_block_size):
return -EOPNOTSUPP; return -EOPNOTSUPP;
case KVM_REG_RISCV_CONFIG_REG(mvendorid):
if (!vcpu->arch.ran_atleast_once)
vcpu->arch.mvendorid = reg_val;
else
return -EBUSY;
break;
case KVM_REG_RISCV_CONFIG_REG(marchid):
if (!vcpu->arch.ran_atleast_once)
vcpu->arch.marchid = reg_val;
else
return -EBUSY;
break;
case KVM_REG_RISCV_CONFIG_REG(mimpid):
if (!vcpu->arch.ran_atleast_once)
vcpu->arch.mimpid = reg_val;
else
return -EBUSY;
break;
default: default:
return -EINVAL; return -EINVAL;
} }
...@@ -541,22 +577,26 @@ static int kvm_riscv_vcpu_set_reg_isa_ext(struct kvm_vcpu *vcpu, ...@@ -541,22 +577,26 @@ static int kvm_riscv_vcpu_set_reg_isa_ext(struct kvm_vcpu *vcpu,
static int kvm_riscv_vcpu_set_reg(struct kvm_vcpu *vcpu, static int kvm_riscv_vcpu_set_reg(struct kvm_vcpu *vcpu,
const struct kvm_one_reg *reg) const struct kvm_one_reg *reg)
{ {
if ((reg->id & KVM_REG_RISCV_TYPE_MASK) == KVM_REG_RISCV_CONFIG) switch (reg->id & KVM_REG_RISCV_TYPE_MASK) {
case KVM_REG_RISCV_CONFIG:
return kvm_riscv_vcpu_set_reg_config(vcpu, reg); return kvm_riscv_vcpu_set_reg_config(vcpu, reg);
else if ((reg->id & KVM_REG_RISCV_TYPE_MASK) == KVM_REG_RISCV_CORE) case KVM_REG_RISCV_CORE:
return kvm_riscv_vcpu_set_reg_core(vcpu, reg); return kvm_riscv_vcpu_set_reg_core(vcpu, reg);
else if ((reg->id & KVM_REG_RISCV_TYPE_MASK) == KVM_REG_RISCV_CSR) case KVM_REG_RISCV_CSR:
return kvm_riscv_vcpu_set_reg_csr(vcpu, reg); return kvm_riscv_vcpu_set_reg_csr(vcpu, reg);
else if ((reg->id & KVM_REG_RISCV_TYPE_MASK) == KVM_REG_RISCV_TIMER) case KVM_REG_RISCV_TIMER:
return kvm_riscv_vcpu_set_reg_timer(vcpu, reg); return kvm_riscv_vcpu_set_reg_timer(vcpu, reg);
else if ((reg->id & KVM_REG_RISCV_TYPE_MASK) == KVM_REG_RISCV_FP_F) case KVM_REG_RISCV_FP_F:
return kvm_riscv_vcpu_set_reg_fp(vcpu, reg, return kvm_riscv_vcpu_set_reg_fp(vcpu, reg,
KVM_REG_RISCV_FP_F); KVM_REG_RISCV_FP_F);
else if ((reg->id & KVM_REG_RISCV_TYPE_MASK) == KVM_REG_RISCV_FP_D) case KVM_REG_RISCV_FP_D:
return kvm_riscv_vcpu_set_reg_fp(vcpu, reg, return kvm_riscv_vcpu_set_reg_fp(vcpu, reg,
KVM_REG_RISCV_FP_D); KVM_REG_RISCV_FP_D);
else if ((reg->id & KVM_REG_RISCV_TYPE_MASK) == KVM_REG_RISCV_ISA_EXT) case KVM_REG_RISCV_ISA_EXT:
return kvm_riscv_vcpu_set_reg_isa_ext(vcpu, reg); return kvm_riscv_vcpu_set_reg_isa_ext(vcpu, reg);
default:
break;
}
return -EINVAL; return -EINVAL;
} }
...@@ -564,22 +604,26 @@ static int kvm_riscv_vcpu_set_reg(struct kvm_vcpu *vcpu, ...@@ -564,22 +604,26 @@ static int kvm_riscv_vcpu_set_reg(struct kvm_vcpu *vcpu,
static int kvm_riscv_vcpu_get_reg(struct kvm_vcpu *vcpu, static int kvm_riscv_vcpu_get_reg(struct kvm_vcpu *vcpu,
const struct kvm_one_reg *reg) const struct kvm_one_reg *reg)
{ {
if ((reg->id & KVM_REG_RISCV_TYPE_MASK) == KVM_REG_RISCV_CONFIG) switch (reg->id & KVM_REG_RISCV_TYPE_MASK) {
case KVM_REG_RISCV_CONFIG:
return kvm_riscv_vcpu_get_reg_config(vcpu, reg); return kvm_riscv_vcpu_get_reg_config(vcpu, reg);
else if ((reg->id & KVM_REG_RISCV_TYPE_MASK) == KVM_REG_RISCV_CORE) case KVM_REG_RISCV_CORE:
return kvm_riscv_vcpu_get_reg_core(vcpu, reg); return kvm_riscv_vcpu_get_reg_core(vcpu, reg);
else if ((reg->id & KVM_REG_RISCV_TYPE_MASK) == KVM_REG_RISCV_CSR) case KVM_REG_RISCV_CSR:
return kvm_riscv_vcpu_get_reg_csr(vcpu, reg); return kvm_riscv_vcpu_get_reg_csr(vcpu, reg);
else if ((reg->id & KVM_REG_RISCV_TYPE_MASK) == KVM_REG_RISCV_TIMER) case KVM_REG_RISCV_TIMER:
return kvm_riscv_vcpu_get_reg_timer(vcpu, reg); return kvm_riscv_vcpu_get_reg_timer(vcpu, reg);
else if ((reg->id & KVM_REG_RISCV_TYPE_MASK) == KVM_REG_RISCV_FP_F) case KVM_REG_RISCV_FP_F:
return kvm_riscv_vcpu_get_reg_fp(vcpu, reg, return kvm_riscv_vcpu_get_reg_fp(vcpu, reg,
KVM_REG_RISCV_FP_F); KVM_REG_RISCV_FP_F);
else if ((reg->id & KVM_REG_RISCV_TYPE_MASK) == KVM_REG_RISCV_FP_D) case KVM_REG_RISCV_FP_D:
return kvm_riscv_vcpu_get_reg_fp(vcpu, reg, return kvm_riscv_vcpu_get_reg_fp(vcpu, reg,
KVM_REG_RISCV_FP_D); KVM_REG_RISCV_FP_D);
else if ((reg->id & KVM_REG_RISCV_TYPE_MASK) == KVM_REG_RISCV_ISA_EXT) case KVM_REG_RISCV_ISA_EXT:
return kvm_riscv_vcpu_get_reg_isa_ext(vcpu, reg); return kvm_riscv_vcpu_get_reg_isa_ext(vcpu, reg);
default:
break;
}
return -EINVAL; return -EINVAL;
} }
...@@ -984,8 +1028,9 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu) ...@@ -984,8 +1028,9 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu)
while (ret > 0) { while (ret > 0) {
/* Check conditions before entering the guest */ /* Check conditions before entering the guest */
ret = xfer_to_guest_mode_handle_work(vcpu); ret = xfer_to_guest_mode_handle_work(vcpu);
if (!ret) if (ret)
ret = 1; continue;
ret = 1;
kvm_riscv_gstage_vmid_update(vcpu); kvm_riscv_gstage_vmid_update(vcpu);
......
...@@ -10,9 +10,7 @@ ...@@ -10,9 +10,7 @@
#include <linux/err.h> #include <linux/err.h>
#include <linux/kvm_host.h> #include <linux/kvm_host.h>
#include <linux/version.h> #include <linux/version.h>
#include <asm/csr.h>
#include <asm/sbi.h> #include <asm/sbi.h>
#include <asm/kvm_vcpu_timer.h>
#include <asm/kvm_vcpu_sbi.h> #include <asm/kvm_vcpu_sbi.h>
static int kvm_sbi_ext_base_handler(struct kvm_vcpu *vcpu, struct kvm_run *run, static int kvm_sbi_ext_base_handler(struct kvm_vcpu *vcpu, struct kvm_run *run,
...@@ -21,7 +19,6 @@ static int kvm_sbi_ext_base_handler(struct kvm_vcpu *vcpu, struct kvm_run *run, ...@@ -21,7 +19,6 @@ static int kvm_sbi_ext_base_handler(struct kvm_vcpu *vcpu, struct kvm_run *run,
{ {
int ret = 0; int ret = 0;
struct kvm_cpu_context *cp = &vcpu->arch.guest_context; struct kvm_cpu_context *cp = &vcpu->arch.guest_context;
struct sbiret ecall_ret;
switch (cp->a6) { switch (cp->a6) {
case SBI_EXT_BASE_GET_SPEC_VERSION: case SBI_EXT_BASE_GET_SPEC_VERSION:
...@@ -50,13 +47,13 @@ static int kvm_sbi_ext_base_handler(struct kvm_vcpu *vcpu, struct kvm_run *run, ...@@ -50,13 +47,13 @@ static int kvm_sbi_ext_base_handler(struct kvm_vcpu *vcpu, struct kvm_run *run,
*out_val = kvm_vcpu_sbi_find_ext(cp->a0) ? 1 : 0; *out_val = kvm_vcpu_sbi_find_ext(cp->a0) ? 1 : 0;
break; break;
case SBI_EXT_BASE_GET_MVENDORID: case SBI_EXT_BASE_GET_MVENDORID:
*out_val = vcpu->arch.mvendorid;
break;
case SBI_EXT_BASE_GET_MARCHID: case SBI_EXT_BASE_GET_MARCHID:
*out_val = vcpu->arch.marchid;
break;
case SBI_EXT_BASE_GET_MIMPID: case SBI_EXT_BASE_GET_MIMPID:
ecall_ret = sbi_ecall(SBI_EXT_BASE, cp->a6, 0, 0, 0, 0, 0, 0); *out_val = vcpu->arch.mimpid;
if (!ecall_ret.error)
*out_val = ecall_ret.value;
/*TODO: We are unnecessarily converting the error twice */
ret = sbi_err_map_linux_errno(ecall_ret.error);
break; break;
default: default:
ret = -EOPNOTSUPP; ret = -EOPNOTSUPP;
......
...@@ -9,7 +9,6 @@ ...@@ -9,7 +9,6 @@
#include <linux/errno.h> #include <linux/errno.h>
#include <linux/err.h> #include <linux/err.h>
#include <linux/kvm_host.h> #include <linux/kvm_host.h>
#include <asm/csr.h>
#include <asm/sbi.h> #include <asm/sbi.h>
#include <asm/kvm_vcpu_sbi.h> #include <asm/kvm_vcpu_sbi.h>
......
...@@ -9,7 +9,6 @@ ...@@ -9,7 +9,6 @@
#include <linux/errno.h> #include <linux/errno.h>
#include <linux/err.h> #include <linux/err.h>
#include <linux/kvm_host.h> #include <linux/kvm_host.h>
#include <asm/csr.h>
#include <asm/sbi.h> #include <asm/sbi.h>
#include <asm/kvm_vcpu_timer.h> #include <asm/kvm_vcpu_timer.h>
#include <asm/kvm_vcpu_sbi.h> #include <asm/kvm_vcpu_sbi.h>
......
...@@ -9,7 +9,6 @@ ...@@ -9,7 +9,6 @@
#include <linux/errno.h> #include <linux/errno.h>
#include <linux/err.h> #include <linux/err.h>
#include <linux/kvm_host.h> #include <linux/kvm_host.h>
#include <asm/csr.h>
#include <asm/sbi.h> #include <asm/sbi.h>
#include <asm/kvm_vcpu_timer.h> #include <asm/kvm_vcpu_timer.h>
#include <asm/kvm_vcpu_sbi.h> #include <asm/kvm_vcpu_sbi.h>
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment