Commit 90e9311a authored by James Hogan's avatar James Hogan Committed by Paolo Bonzini

MIPS; KVM: Convert exception entry to uasm

Convert the whole of locore.S (assembly to enter guest and handle
exception entry) to be generated dynamically with uasm. This is done
with minimal changes to the resulting code.

The main changes are:
- Some constants are generated by uasm using LUI+ADDIU instead of
  LUI+ORI.
- Loading of lo and hi are swapped around in vcpu_run but not when
  resuming the guest after an exit. Both bits of logic are now generated
  by the same code.
- Register MOVEs in uasm use different ADDU operand ordering to GNU as,
  putting zero register into rs instead of rt.
- The JALR.HB to call the C exit handler is switched to JALR, since the
  hazard barrier would appear to be unnecessary.

This will allow further optimisation in the future to dynamically handle
the capabilities of the CPU.
Signed-off-by: default avatarJames Hogan <james.hogan@imgtec.com>
Cc: Paolo Bonzini <pbonzini@redhat.com>
Cc: Radim KrÄmáŠ<rkrcmar@redhat.com>
Cc: Ralf Baechle <ralf@linux-mips.org>
Cc: linux-mips@linux-mips.org
Cc: kvm@vger.kernel.org
Signed-off-by: default avatarPaolo Bonzini <pbonzini@redhat.com>
parent 6f63405c
...@@ -533,8 +533,12 @@ int kvm_mips_emulation_init(struct kvm_mips_callbacks **install_callbacks); ...@@ -533,8 +533,12 @@ int kvm_mips_emulation_init(struct kvm_mips_callbacks **install_callbacks);
/* Debug: dump vcpu state */ /* Debug: dump vcpu state */
int kvm_arch_vcpu_dump_regs(struct kvm_vcpu *vcpu); int kvm_arch_vcpu_dump_regs(struct kvm_vcpu *vcpu);
/* Trampoline ASM routine to start running in "Guest" context */ extern int kvm_mips_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu);
extern int __kvm_mips_vcpu_run(struct kvm_run *run, struct kvm_vcpu *vcpu);
/* Building of entry/exception code */
void *kvm_mips_build_vcpu_run(void *addr);
void *kvm_mips_build_exception(void *addr);
void *kvm_mips_build_exit(void *addr);
/* FPU/MSA context management */ /* FPU/MSA context management */
void __kvm_save_fpu(struct kvm_vcpu_arch *vcpu); void __kvm_save_fpu(struct kvm_vcpu_arch *vcpu);
......
...@@ -17,6 +17,7 @@ if VIRTUALIZATION ...@@ -17,6 +17,7 @@ if VIRTUALIZATION
config KVM config KVM
tristate "Kernel-based Virtual Machine (KVM) support" tristate "Kernel-based Virtual Machine (KVM) support"
depends on HAVE_KVM depends on HAVE_KVM
select EXPORT_UASM
select PREEMPT_NOTIFIERS select PREEMPT_NOTIFIERS
select ANON_INODES select ANON_INODES
select KVM_MMIO select KVM_MMIO
......
...@@ -7,7 +7,7 @@ EXTRA_CFLAGS += -Ivirt/kvm -Iarch/mips/kvm ...@@ -7,7 +7,7 @@ EXTRA_CFLAGS += -Ivirt/kvm -Iarch/mips/kvm
common-objs-$(CONFIG_CPU_HAS_MSA) += msa.o common-objs-$(CONFIG_CPU_HAS_MSA) += msa.o
kvm-objs := $(common-objs-y) mips.o emulate.o locore.o \ kvm-objs := $(common-objs-y) mips.o emulate.o entry.o \
interrupt.o stats.o commpage.o \ interrupt.o stats.o commpage.o \
dyntrans.o trap_emul.o fpu.o dyntrans.o trap_emul.o fpu.o
kvm-objs += mmu.o kvm-objs += mmu.o
......
This diff is collapsed.
...@@ -28,10 +28,6 @@ ...@@ -28,10 +28,6 @@
#define MIPS_EXC_MAX 12 #define MIPS_EXC_MAX 12
/* XXXSL More to follow */ /* XXXSL More to follow */
extern char __kvm_mips_vcpu_run_end[];
extern char mips32_exception[], mips32_exceptionEnd[];
extern char mips32_GuestException[], mips32_GuestExceptionEnd[];
#define C_TI (_ULCAST_(1) << 30) #define C_TI (_ULCAST_(1) << 30)
#define KVM_MIPS_IRQ_DELIVER_ALL_AT_ONCE (0) #define KVM_MIPS_IRQ_DELIVER_ALL_AT_ONCE (0)
......
This diff is collapsed.
...@@ -247,8 +247,8 @@ void kvm_arch_commit_memory_region(struct kvm *kvm, ...@@ -247,8 +247,8 @@ void kvm_arch_commit_memory_region(struct kvm *kvm,
struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, unsigned int id) struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, unsigned int id)
{ {
int err, size, offset; int err, size;
void *gebase; void *gebase, *p;
int i; int i;
struct kvm_vcpu *vcpu = kzalloc(sizeof(struct kvm_vcpu), GFP_KERNEL); struct kvm_vcpu *vcpu = kzalloc(sizeof(struct kvm_vcpu), GFP_KERNEL);
...@@ -286,41 +286,28 @@ struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, unsigned int id) ...@@ -286,41 +286,28 @@ struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, unsigned int id)
/* Save new ebase */ /* Save new ebase */
vcpu->arch.guest_ebase = gebase; vcpu->arch.guest_ebase = gebase;
/* Copy L1 Guest Exception handler to correct offset */ /* Build guest exception vectors dynamically in unmapped memory */
/* TLB Refill, EXL = 0 */ /* TLB Refill, EXL = 0 */
memcpy(gebase, mips32_exception, kvm_mips_build_exception(gebase);
mips32_exceptionEnd - mips32_exception);
/* General Exception Entry point */ /* General Exception Entry point */
memcpy(gebase + 0x180, mips32_exception, kvm_mips_build_exception(gebase + 0x180);
mips32_exceptionEnd - mips32_exception);
/* For vectored interrupts poke the exception code @ all offsets 0-7 */ /* For vectored interrupts poke the exception code @ all offsets 0-7 */
for (i = 0; i < 8; i++) { for (i = 0; i < 8; i++) {
kvm_debug("L1 Vectored handler @ %p\n", kvm_debug("L1 Vectored handler @ %p\n",
gebase + 0x200 + (i * VECTORSPACING)); gebase + 0x200 + (i * VECTORSPACING));
memcpy(gebase + 0x200 + (i * VECTORSPACING), mips32_exception, kvm_mips_build_exception(gebase + 0x200 + i * VECTORSPACING);
mips32_exceptionEnd - mips32_exception);
} }
/* General handler, relocate to unmapped space for sanity's sake */ /* General exit handler */
offset = 0x2000; p = gebase + 0x2000;
kvm_debug("Installing KVM Exception handlers @ %p, %#x bytes\n", p = kvm_mips_build_exit(p);
gebase + offset,
mips32_GuestExceptionEnd - mips32_GuestException);
memcpy(gebase + offset, mips32_GuestException, /* Guest entry routine */
mips32_GuestExceptionEnd - mips32_GuestException); vcpu->arch.vcpu_run = p;
p = kvm_mips_build_vcpu_run(p);
#ifdef MODULE
offset += mips32_GuestExceptionEnd - mips32_GuestException;
memcpy(gebase + offset, (char *)__kvm_mips_vcpu_run,
__kvm_mips_vcpu_run_end - (char *)__kvm_mips_vcpu_run);
vcpu->arch.vcpu_run = gebase + offset;
#else
vcpu->arch.vcpu_run = __kvm_mips_vcpu_run;
#endif
/* Invalidate the icache for these ranges */ /* Invalidate the icache for these ranges */
local_flush_icache_range((unsigned long)gebase, local_flush_icache_range((unsigned long)gebase,
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment