Commit 9f701326 authored by Anup Patel's avatar Anup Patel Committed by Anup Patel

RISC-V: KVM: Handle MMIO exits for VCPU

We will get stage2 page faults whenever Guest/VM access SW emulated
MMIO device or unmapped Guest RAM.

This patch implements MMIO read/write emulation by extracting MMIO
details from the trapped load/store instruction and forwarding the
MMIO read/write to user-space. The actual MMIO emulation will happen
in user-space and KVM kernel module will only take care of register
updates before resuming the trapped VCPU.

The handling for stage2 page faults for unmapped Guest RAM will be
implemeted by a separate patch later.

[jiangyifei: ioeventfd and in-kernel mmio device support]
Signed-off-by: default avatarYifei Jiang <jiangyifei@huawei.com>
Signed-off-by: default avatarAnup Patel <anup.patel@wdc.com>
Acked-by: default avatarPaolo Bonzini <pbonzini@redhat.com>
Reviewed-by: default avatarPaolo Bonzini <pbonzini@redhat.com>
Reviewed-by: default avatarAlexander Graf <graf@amazon.com>
Acked-by: default avatarPalmer Dabbelt <palmerdabbelt@google.com>
parent 34bde9d8
...@@ -49,6 +49,14 @@ struct kvm_arch { ...@@ -49,6 +49,14 @@ struct kvm_arch {
phys_addr_t pgd_phys; phys_addr_t pgd_phys;
}; };
struct kvm_mmio_decode {
unsigned long insn;
int insn_len;
int len;
int shift;
int return_handled;
};
struct kvm_cpu_trap { struct kvm_cpu_trap {
unsigned long sepc; unsigned long sepc;
unsigned long scause; unsigned long scause;
...@@ -147,6 +155,9 @@ struct kvm_vcpu_arch { ...@@ -147,6 +155,9 @@ struct kvm_vcpu_arch {
unsigned long irqs_pending; unsigned long irqs_pending;
unsigned long irqs_pending_mask; unsigned long irqs_pending_mask;
/* MMIO instruction details */
struct kvm_mmio_decode mmio_decode;
/* VCPU power-off state */ /* VCPU power-off state */
bool power_off; bool power_off;
...@@ -162,11 +173,22 @@ static inline void kvm_arch_sync_events(struct kvm *kvm) {} ...@@ -162,11 +173,22 @@ static inline void kvm_arch_sync_events(struct kvm *kvm) {}
static inline void kvm_arch_sched_in(struct kvm_vcpu *vcpu, int cpu) {} static inline void kvm_arch_sched_in(struct kvm_vcpu *vcpu, int cpu) {}
static inline void kvm_arch_vcpu_block_finish(struct kvm_vcpu *vcpu) {} static inline void kvm_arch_vcpu_block_finish(struct kvm_vcpu *vcpu) {}
int kvm_riscv_stage2_map(struct kvm_vcpu *vcpu,
struct kvm_memory_slot *memslot,
gpa_t gpa, unsigned long hva, bool is_write);
void kvm_riscv_stage2_flush_cache(struct kvm_vcpu *vcpu); void kvm_riscv_stage2_flush_cache(struct kvm_vcpu *vcpu);
int kvm_riscv_stage2_alloc_pgd(struct kvm *kvm); int kvm_riscv_stage2_alloc_pgd(struct kvm *kvm);
void kvm_riscv_stage2_free_pgd(struct kvm *kvm); void kvm_riscv_stage2_free_pgd(struct kvm *kvm);
void kvm_riscv_stage2_update_hgatp(struct kvm_vcpu *vcpu); void kvm_riscv_stage2_update_hgatp(struct kvm_vcpu *vcpu);
void __kvm_riscv_unpriv_trap(void);
unsigned long kvm_riscv_vcpu_unpriv_read(struct kvm_vcpu *vcpu,
bool read_insn,
unsigned long guest_addr,
struct kvm_cpu_trap *trap);
void kvm_riscv_vcpu_trap_redirect(struct kvm_vcpu *vcpu,
struct kvm_cpu_trap *trap);
int kvm_riscv_vcpu_mmio_return(struct kvm_vcpu *vcpu, struct kvm_run *run); int kvm_riscv_vcpu_mmio_return(struct kvm_vcpu *vcpu, struct kvm_run *run);
int kvm_riscv_vcpu_exit(struct kvm_vcpu *vcpu, struct kvm_run *run, int kvm_riscv_vcpu_exit(struct kvm_vcpu *vcpu, struct kvm_run *run,
struct kvm_cpu_trap *trap); struct kvm_cpu_trap *trap);
......
...@@ -189,6 +189,12 @@ void asm_offsets(void) ...@@ -189,6 +189,12 @@ void asm_offsets(void)
OFFSET(KVM_ARCH_HOST_STVEC, kvm_vcpu_arch, host_stvec); OFFSET(KVM_ARCH_HOST_STVEC, kvm_vcpu_arch, host_stvec);
OFFSET(KVM_ARCH_HOST_SCOUNTEREN, kvm_vcpu_arch, host_scounteren); OFFSET(KVM_ARCH_HOST_SCOUNTEREN, kvm_vcpu_arch, host_scounteren);
OFFSET(KVM_ARCH_TRAP_SEPC, kvm_cpu_trap, sepc);
OFFSET(KVM_ARCH_TRAP_SCAUSE, kvm_cpu_trap, scause);
OFFSET(KVM_ARCH_TRAP_STVAL, kvm_cpu_trap, stval);
OFFSET(KVM_ARCH_TRAP_HTVAL, kvm_cpu_trap, htval);
OFFSET(KVM_ARCH_TRAP_HTINST, kvm_cpu_trap, htinst);
/* /*
* THREAD_{F,X}* might be larger than a S-type offset can handle, but * THREAD_{F,X}* might be larger than a S-type offset can handle, but
* these are used in performance-sensitive assembly so we can't resort * these are used in performance-sensitive assembly so we can't resort
......
...@@ -24,6 +24,7 @@ config KVM ...@@ -24,6 +24,7 @@ config KVM
select ANON_INODES select ANON_INODES
select KVM_MMIO select KVM_MMIO
select HAVE_KVM_VCPU_ASYNC_IOCTL select HAVE_KVM_VCPU_ASYNC_IOCTL
select HAVE_KVM_EVENTFD
select SRCU select SRCU
help help
Support hosting virtualized guest machines. Support hosting virtualized guest machines.
......
...@@ -10,4 +10,4 @@ KVM := ../../../virt/kvm ...@@ -10,4 +10,4 @@ KVM := ../../../virt/kvm
obj-$(CONFIG_KVM) += kvm.o obj-$(CONFIG_KVM) += kvm.o
kvm-y += $(KVM)/kvm_main.o $(KVM)/coalesced_mmio.o $(KVM)/binary_stats.o \ kvm-y += $(KVM)/kvm_main.o $(KVM)/coalesced_mmio.o $(KVM)/binary_stats.o \
main.o vm.o mmu.o vcpu.o vcpu_exit.o vcpu_switch.o $(KVM)/eventfd.o main.o vm.o mmu.o vcpu.o vcpu_exit.o vcpu_switch.o
...@@ -58,6 +58,14 @@ int kvm_arch_prepare_memory_region(struct kvm *kvm, ...@@ -58,6 +58,14 @@ int kvm_arch_prepare_memory_region(struct kvm *kvm,
return 0; return 0;
} }
int kvm_riscv_stage2_map(struct kvm_vcpu *vcpu,
struct kvm_memory_slot *memslot,
gpa_t gpa, unsigned long hva, bool is_write)
{
/* TODO: */
return 0;
}
void kvm_riscv_stage2_flush_cache(struct kvm_vcpu *vcpu) void kvm_riscv_stage2_flush_cache(struct kvm_vcpu *vcpu)
{ {
/* TODO: */ /* TODO: */
......
This diff is collapsed.
...@@ -201,3 +201,26 @@ __kvm_switch_return: ...@@ -201,3 +201,26 @@ __kvm_switch_return:
/* Return to C code */ /* Return to C code */
ret ret
ENDPROC(__kvm_riscv_switch_to) ENDPROC(__kvm_riscv_switch_to)
ENTRY(__kvm_riscv_unpriv_trap)
/*
* We assume that faulting unpriv load/store instruction is
* 4-byte long and blindly increment SEPC by 4.
*
* The trap details will be saved at address pointed by 'A0'
* register and we use 'A1' register as temporary.
*/
csrr a1, CSR_SEPC
REG_S a1, (KVM_ARCH_TRAP_SEPC)(a0)
addi a1, a1, 4
csrw CSR_SEPC, a1
csrr a1, CSR_SCAUSE
REG_S a1, (KVM_ARCH_TRAP_SCAUSE)(a0)
csrr a1, CSR_STVAL
REG_S a1, (KVM_ARCH_TRAP_STVAL)(a0)
csrr a1, CSR_HTVAL
REG_S a1, (KVM_ARCH_TRAP_HTVAL)(a0)
csrr a1, CSR_HTINST
REG_S a1, (KVM_ARCH_TRAP_HTINST)(a0)
sret
ENDPROC(__kvm_riscv_unpriv_trap)
...@@ -62,6 +62,7 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext) ...@@ -62,6 +62,7 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
int r; int r;
switch (ext) { switch (ext) {
case KVM_CAP_IOEVENTFD:
case KVM_CAP_DEVICE_CTRL: case KVM_CAP_DEVICE_CTRL:
case KVM_CAP_USER_MEMORY: case KVM_CAP_USER_MEMORY:
case KVM_CAP_DESTROY_MEMORY_REGION_WORKS: case KVM_CAP_DESTROY_MEMORY_REGION_WORKS:
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment