Commit 797179bc authored by James Hogan's avatar James Hogan Committed by Paolo Bonzini

MIPS: KVM: Fix modular KVM under QEMU

Copy __kvm_mips_vcpu_run() into unmapped memory, so that we can never
get a TLB refill exception in it when KVM is built as a module.

This was observed to happen with the host MIPS kernel running under
QEMU, due to a not entirely transparent optimisation in the QEMU TLB
handling where TLB entries replaced with TLBWR are copied to a separate
part of the TLB array. Code in those pages continue to be executable,
but those mappings persist only until the next ASID switch, even if they
are marked global.

An ASID switch happens in __kvm_mips_vcpu_run() at exception level after
switching to the guest exception base. Subsequent TLB mapped kernel
instructions just prior to switching to the guest trigger a TLB refill
exception, which enters the guest exception handlers without updating
EPC. This appears as a guest triggered TLB refill on a host kernel
mapped (host KSeg2) address, which is not handled correctly as user
(guest) mode accesses to kernel (host) segments always generate address
error exceptions.
Signed-off-by: default avatarJames Hogan <james.hogan@imgtec.com>
Cc: Paolo Bonzini <pbonzini@redhat.com>
Cc: Radim Krčmář <rkrcmar@redhat.com>
Cc: Ralf Baechle <ralf@linux-mips.org>
Cc: kvm@vger.kernel.org
Cc: linux-mips@linux-mips.org
Cc: <stable@vger.kernel.org> # 3.10.x-
Signed-off-by: default avatarPaolo Bonzini <pbonzini@redhat.com>
parent 4340fa55
...@@ -338,6 +338,7 @@ struct kvm_mips_tlb { ...@@ -338,6 +338,7 @@ struct kvm_mips_tlb {
#define KVM_MIPS_GUEST_TLB_SIZE 64 #define KVM_MIPS_GUEST_TLB_SIZE 64
struct kvm_vcpu_arch { struct kvm_vcpu_arch {
void *host_ebase, *guest_ebase; void *host_ebase, *guest_ebase;
int (*vcpu_run)(struct kvm_run *run, struct kvm_vcpu *vcpu);
unsigned long host_stack; unsigned long host_stack;
unsigned long host_gp; unsigned long host_gp;
......
...@@ -28,6 +28,7 @@ ...@@ -28,6 +28,7 @@
#define MIPS_EXC_MAX 12 #define MIPS_EXC_MAX 12
/* XXXSL More to follow */ /* XXXSL More to follow */
extern char __kvm_mips_vcpu_run_end[];
extern char mips32_exception[], mips32_exceptionEnd[]; extern char mips32_exception[], mips32_exceptionEnd[];
extern char mips32_GuestException[], mips32_GuestExceptionEnd[]; extern char mips32_GuestException[], mips32_GuestExceptionEnd[];
......
...@@ -202,6 +202,7 @@ FEXPORT(__kvm_mips_load_k0k1) ...@@ -202,6 +202,7 @@ FEXPORT(__kvm_mips_load_k0k1)
/* Jump to guest */ /* Jump to guest */
eret eret
EXPORT(__kvm_mips_vcpu_run_end)
VECTOR(MIPSX(exception), unknown) VECTOR(MIPSX(exception), unknown)
/* Find out what mode we came from and jump to the proper handler. */ /* Find out what mode we came from and jump to the proper handler. */
......
...@@ -315,6 +315,15 @@ struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, unsigned int id) ...@@ -315,6 +315,15 @@ struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, unsigned int id)
memcpy(gebase + offset, mips32_GuestException, memcpy(gebase + offset, mips32_GuestException,
mips32_GuestExceptionEnd - mips32_GuestException); mips32_GuestExceptionEnd - mips32_GuestException);
#ifdef MODULE
offset += mips32_GuestExceptionEnd - mips32_GuestException;
memcpy(gebase + offset, (char *)__kvm_mips_vcpu_run,
__kvm_mips_vcpu_run_end - (char *)__kvm_mips_vcpu_run);
vcpu->arch.vcpu_run = gebase + offset;
#else
vcpu->arch.vcpu_run = __kvm_mips_vcpu_run;
#endif
/* Invalidate the icache for these ranges */ /* Invalidate the icache for these ranges */
local_flush_icache_range((unsigned long)gebase, local_flush_icache_range((unsigned long)gebase,
(unsigned long)gebase + ALIGN(size, PAGE_SIZE)); (unsigned long)gebase + ALIGN(size, PAGE_SIZE));
...@@ -404,7 +413,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run) ...@@ -404,7 +413,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run)
/* Disable hardware page table walking while in guest */ /* Disable hardware page table walking while in guest */
htw_stop(); htw_stop();
r = __kvm_mips_vcpu_run(run, vcpu); r = vcpu->arch.vcpu_run(run, vcpu);
/* Re-enable HTW before enabling interrupts */ /* Re-enable HTW before enabling interrupts */
htw_start(); htw_start();
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment