Commit 45c7e8af authored by Thomas Bogendoerfer's avatar Thomas Bogendoerfer

MIPS: Remove KVM_TE support

After removal of the guest part of KVM TE (trap and emulate), also remove
the host part.
Signed-off-by: default avatarThomas Bogendoerfer <tsbogend@alpha.franken.de>
parent a1515ec7
...@@ -39,7 +39,6 @@ CONFIG_MIPS32_O32=y ...@@ -39,7 +39,6 @@ CONFIG_MIPS32_O32=y
CONFIG_MIPS32_N32=y CONFIG_MIPS32_N32=y
CONFIG_VIRTUALIZATION=y CONFIG_VIRTUALIZATION=y
CONFIG_KVM=m CONFIG_KVM=m
CONFIG_KVM_MIPS_VZ=y
CONFIG_MODULES=y CONFIG_MODULES=y
CONFIG_MODULE_FORCE_LOAD=y CONFIG_MODULE_FORCE_LOAD=y
CONFIG_MODULE_UNLOAD=y CONFIG_MODULE_UNLOAD=y
......
This diff is collapsed.
...@@ -30,40 +30,6 @@ config KVM ...@@ -30,40 +30,6 @@ config KVM
help help
Support for hosting Guest kernels. Support for hosting Guest kernels.
choice
prompt "Virtualization mode"
depends on KVM
default KVM_MIPS_TE
config KVM_MIPS_TE
bool "Trap & Emulate"
depends on CPU_MIPS32_R2
help
Use trap and emulate to virtualize 32-bit guests in user mode. This
does not require any special hardware Virtualization support beyond
standard MIPS32 r2 or later, but it does require the guest kernel
to be configured with CONFIG_KVM_GUEST=y so that it resides in the
user address segment.
config KVM_MIPS_VZ
bool "MIPS Virtualization (VZ) ASE"
help
Use the MIPS Virtualization (VZ) ASE to virtualize guests. This
supports running unmodified guest kernels (with CONFIG_KVM_GUEST=n),
but requires hardware support.
endchoice
config KVM_MIPS_DYN_TRANS
bool "KVM/MIPS: Dynamic binary translation to reduce traps"
depends on KVM_MIPS_TE
default y
help
When running in Trap & Emulate mode patch privileged
instructions to reduce the number of traps.
If unsure, say Y.
config KVM_MIPS_DEBUG_COP0_COUNTERS config KVM_MIPS_DEBUG_COP0_COUNTERS
bool "Maintain counters for COP0 accesses" bool "Maintain counters for COP0 accesses"
depends on KVM depends on KVM
......
...@@ -9,7 +9,7 @@ EXTRA_CFLAGS += -Ivirt/kvm -Iarch/mips/kvm ...@@ -9,7 +9,7 @@ EXTRA_CFLAGS += -Ivirt/kvm -Iarch/mips/kvm
common-objs-$(CONFIG_CPU_HAS_MSA) += msa.o common-objs-$(CONFIG_CPU_HAS_MSA) += msa.o
kvm-objs := $(common-objs-y) mips.o emulate.o entry.o \ kvm-objs := $(common-objs-y) mips.o emulate.o entry.o \
interrupt.o stats.o commpage.o \ interrupt.o stats.o \
fpu.o fpu.o
kvm-objs += hypcall.o kvm-objs += hypcall.o
kvm-objs += mmu.o kvm-objs += mmu.o
...@@ -17,11 +17,6 @@ ifdef CONFIG_CPU_LOONGSON64 ...@@ -17,11 +17,6 @@ ifdef CONFIG_CPU_LOONGSON64
kvm-objs += loongson_ipi.o kvm-objs += loongson_ipi.o
endif endif
ifdef CONFIG_KVM_MIPS_VZ
kvm-objs += vz.o kvm-objs += vz.o
else
kvm-objs += dyntrans.o
kvm-objs += trap_emul.o
endif
obj-$(CONFIG_KVM) += kvm.o obj-$(CONFIG_KVM) += kvm.o
obj-y += callback.o tlb.o obj-y += callback.o tlb.o
/*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* commpage, currently used for Virtual COP0 registers.
* Mapped into the guest kernel @ KVM_GUEST_COMMPAGE_ADDR.
*
* Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved.
* Authors: Sanjay Lal <sanjayl@kymasys.com>
*/
#include <linux/errno.h>
#include <linux/err.h>
#include <linux/vmalloc.h>
#include <linux/fs.h>
#include <linux/memblock.h>
#include <asm/page.h>
#include <asm/cacheflush.h>
#include <asm/mmu_context.h>
#include <linux/kvm_host.h>
#include "commpage.h"
void kvm_mips_commpage_init(struct kvm_vcpu *vcpu)
{
struct kvm_mips_commpage *page = vcpu->arch.kseg0_commpage;
/* Specific init values for fields */
vcpu->arch.cop0 = &page->cop0;
}
/*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* KVM/MIPS: commpage: mapped into get kernel space
*
* Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved.
* Authors: Sanjay Lal <sanjayl@kymasys.com>
*/
#ifndef __KVM_MIPS_COMMPAGE_H__
#define __KVM_MIPS_COMMPAGE_H__
struct kvm_mips_commpage {
/* COP0 state is mapped into Guest kernel via commpage */
struct mips_coproc cop0;
};
#define KVM_MIPS_COMM_EIDI_OFFSET 0x0
extern void kvm_mips_commpage_init(struct kvm_vcpu *vcpu);
#endif /* __KVM_MIPS_COMMPAGE_H__ */
/*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* KVM/MIPS: Binary Patching for privileged instructions, reduces traps.
*
* Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved.
* Authors: Sanjay Lal <sanjayl@kymasys.com>
*/
#include <linux/errno.h>
#include <linux/err.h>
#include <linux/highmem.h>
#include <linux/kvm_host.h>
#include <linux/uaccess.h>
#include <linux/vmalloc.h>
#include <linux/fs.h>
#include <linux/memblock.h>
#include <asm/cacheflush.h>
#include "commpage.h"
/**
* kvm_mips_trans_replace() - Replace trapping instruction in guest memory.
* @vcpu: Virtual CPU.
* @opc: PC of instruction to replace.
* @replace: Instruction to write
*/
static int kvm_mips_trans_replace(struct kvm_vcpu *vcpu, u32 *opc,
union mips_instruction replace)
{
unsigned long vaddr = (unsigned long)opc;
int err;
retry:
/* The GVA page table is still active so use the Linux TLB handlers */
kvm_trap_emul_gva_lockless_begin(vcpu);
err = put_user(replace.word, opc);
kvm_trap_emul_gva_lockless_end(vcpu);
if (unlikely(err)) {
/*
* We write protect clean pages in GVA page table so normal
* Linux TLB mod handler doesn't silently dirty the page.
* Its also possible we raced with a GVA invalidation.
* Try to force the page to become dirty.
*/
err = kvm_trap_emul_gva_fault(vcpu, vaddr, true);
if (unlikely(err)) {
kvm_info("%s: Address unwriteable: %p\n",
__func__, opc);
return -EFAULT;
}
/*
* Try again. This will likely trigger a TLB refill, which will
* fetch the new dirty entry from the GVA page table, which
* should then succeed.
*/
goto retry;
}
__local_flush_icache_user_range(vaddr, vaddr + 4);
return 0;
}
int kvm_mips_trans_cache_index(union mips_instruction inst, u32 *opc,
struct kvm_vcpu *vcpu)
{
union mips_instruction nop_inst = { 0 };
/* Replace the CACHE instruction, with a NOP */
return kvm_mips_trans_replace(vcpu, opc, nop_inst);
}
/*
* Address based CACHE instructions are transformed into synci(s). A little
* heavy for just D-cache invalidates, but avoids an expensive trap
*/
int kvm_mips_trans_cache_va(union mips_instruction inst, u32 *opc,
struct kvm_vcpu *vcpu)
{
union mips_instruction synci_inst = { 0 };
synci_inst.i_format.opcode = bcond_op;
synci_inst.i_format.rs = inst.i_format.rs;
synci_inst.i_format.rt = synci_op;
if (cpu_has_mips_r6)
synci_inst.i_format.simmediate = inst.spec3_format.simmediate;
else
synci_inst.i_format.simmediate = inst.i_format.simmediate;
return kvm_mips_trans_replace(vcpu, opc, synci_inst);
}
int kvm_mips_trans_mfc0(union mips_instruction inst, u32 *opc,
struct kvm_vcpu *vcpu)
{
union mips_instruction mfc0_inst = { 0 };
u32 rd, sel;
rd = inst.c0r_format.rd;
sel = inst.c0r_format.sel;
if (rd == MIPS_CP0_ERRCTL && sel == 0) {
mfc0_inst.r_format.opcode = spec_op;
mfc0_inst.r_format.rd = inst.c0r_format.rt;
mfc0_inst.r_format.func = add_op;
} else {
mfc0_inst.i_format.opcode = lw_op;
mfc0_inst.i_format.rt = inst.c0r_format.rt;
mfc0_inst.i_format.simmediate = KVM_GUEST_COMMPAGE_ADDR |
offsetof(struct kvm_mips_commpage, cop0.reg[rd][sel]);
#ifdef CONFIG_CPU_BIG_ENDIAN
if (sizeof(vcpu->arch.cop0->reg[0][0]) == 8)
mfc0_inst.i_format.simmediate |= 4;
#endif
}
return kvm_mips_trans_replace(vcpu, opc, mfc0_inst);
}
int kvm_mips_trans_mtc0(union mips_instruction inst, u32 *opc,
struct kvm_vcpu *vcpu)
{
union mips_instruction mtc0_inst = { 0 };
u32 rd, sel;
rd = inst.c0r_format.rd;
sel = inst.c0r_format.sel;
mtc0_inst.i_format.opcode = sw_op;
mtc0_inst.i_format.rt = inst.c0r_format.rt;
mtc0_inst.i_format.simmediate = KVM_GUEST_COMMPAGE_ADDR |
offsetof(struct kvm_mips_commpage, cop0.reg[rd][sel]);
#ifdef CONFIG_CPU_BIG_ENDIAN
if (sizeof(vcpu->arch.cop0->reg[0][0]) == 8)
mtc0_inst.i_format.simmediate |= 4;
#endif
return kvm_mips_trans_replace(vcpu, opc, mtc0_inst);
}
This diff is collapsed.
...@@ -305,7 +305,6 @@ static void *kvm_mips_build_enter_guest(void *addr) ...@@ -305,7 +305,6 @@ static void *kvm_mips_build_enter_guest(void *addr)
UASM_i_LW(&p, T0, offsetof(struct kvm_vcpu_arch, pc), K1); UASM_i_LW(&p, T0, offsetof(struct kvm_vcpu_arch, pc), K1);
UASM_i_MTC0(&p, T0, C0_EPC); UASM_i_MTC0(&p, T0, C0_EPC);
#ifdef CONFIG_KVM_MIPS_VZ
/* Save normal linux process pgd (VZ guarantees pgd_reg is set) */ /* Save normal linux process pgd (VZ guarantees pgd_reg is set) */
if (cpu_has_ldpte) if (cpu_has_ldpte)
UASM_i_MFC0(&p, K0, C0_PWBASE); UASM_i_MFC0(&p, K0, C0_PWBASE);
...@@ -367,21 +366,6 @@ static void *kvm_mips_build_enter_guest(void *addr) ...@@ -367,21 +366,6 @@ static void *kvm_mips_build_enter_guest(void *addr)
/* Set the root ASID for the Guest */ /* Set the root ASID for the Guest */
UASM_i_ADDIU(&p, T1, S0, UASM_i_ADDIU(&p, T1, S0,
offsetof(struct kvm, arch.gpa_mm.context.asid)); offsetof(struct kvm, arch.gpa_mm.context.asid));
#else
/* Set the ASID for the Guest Kernel or User */
UASM_i_LW(&p, T0, offsetof(struct kvm_vcpu_arch, cop0), K1);
UASM_i_LW(&p, T0, offsetof(struct mips_coproc, reg[MIPS_CP0_STATUS][0]),
T0);
uasm_i_andi(&p, T0, T0, KSU_USER | ST0_ERL | ST0_EXL);
uasm_i_xori(&p, T0, T0, KSU_USER);
uasm_il_bnez(&p, &r, T0, label_kernel_asid);
UASM_i_ADDIU(&p, T1, K1, offsetof(struct kvm_vcpu_arch,
guest_kernel_mm.context.asid));
/* else user */
UASM_i_ADDIU(&p, T1, K1, offsetof(struct kvm_vcpu_arch,
guest_user_mm.context.asid));
uasm_l_kernel_asid(&l, p);
#endif
/* t1: contains the base of the ASID array, need to get the cpu id */ /* t1: contains the base of the ASID array, need to get the cpu id */
/* smp_processor_id */ /* smp_processor_id */
...@@ -406,24 +390,9 @@ static void *kvm_mips_build_enter_guest(void *addr) ...@@ -406,24 +390,9 @@ static void *kvm_mips_build_enter_guest(void *addr)
uasm_i_andi(&p, K0, K0, MIPS_ENTRYHI_ASID); uasm_i_andi(&p, K0, K0, MIPS_ENTRYHI_ASID);
#endif #endif
#ifndef CONFIG_KVM_MIPS_VZ
/*
* Set up KVM T&E GVA pgd.
* This does roughly the same as TLBMISS_HANDLER_SETUP_PGD():
* - call tlbmiss_handler_setup_pgd(mm->pgd)
* - but skips write into CP0_PWBase for now
*/
UASM_i_LW(&p, A0, (int)offsetof(struct mm_struct, pgd) -
(int)offsetof(struct mm_struct, context.asid), T1);
UASM_i_LA(&p, T9, (unsigned long)tlbmiss_handler_setup_pgd);
uasm_i_jalr(&p, RA, T9);
uasm_i_mtc0(&p, K0, C0_ENTRYHI);
#else
/* Set up KVM VZ root ASID (!guestid) */ /* Set up KVM VZ root ASID (!guestid) */
uasm_i_mtc0(&p, K0, C0_ENTRYHI); uasm_i_mtc0(&p, K0, C0_ENTRYHI);
skip_asid_restore: skip_asid_restore:
#endif
uasm_i_ehb(&p); uasm_i_ehb(&p);
/* Disable RDHWR access */ /* Disable RDHWR access */
...@@ -720,7 +689,6 @@ void *kvm_mips_build_exit(void *addr) ...@@ -720,7 +689,6 @@ void *kvm_mips_build_exit(void *addr)
uasm_l_msa_1(&l, p); uasm_l_msa_1(&l, p);
} }
#ifdef CONFIG_KVM_MIPS_VZ
/* Restore host ASID */ /* Restore host ASID */
if (!cpu_has_guestid) { if (!cpu_has_guestid) {
UASM_i_LW(&p, K0, offsetof(struct kvm_vcpu_arch, host_entryhi), UASM_i_LW(&p, K0, offsetof(struct kvm_vcpu_arch, host_entryhi),
...@@ -764,7 +732,6 @@ void *kvm_mips_build_exit(void *addr) ...@@ -764,7 +732,6 @@ void *kvm_mips_build_exit(void *addr)
MIPS_GCTL1_RID_WIDTH); MIPS_GCTL1_RID_WIDTH);
uasm_i_mtc0(&p, T0, C0_GUESTCTL1); uasm_i_mtc0(&p, T0, C0_GUESTCTL1);
} }
#endif
/* Now that the new EBASE has been loaded, unset BEV and KSU_USER */ /* Now that the new EBASE has been loaded, unset BEV and KSU_USER */
uasm_i_addiu(&p, AT, ZERO, ~(ST0_EXL | KSU_USER | ST0_IE)); uasm_i_addiu(&p, AT, ZERO, ~(ST0_EXL | KSU_USER | ST0_IE));
......
...@@ -21,119 +21,6 @@ ...@@ -21,119 +21,6 @@
#include "interrupt.h" #include "interrupt.h"
void kvm_mips_queue_irq(struct kvm_vcpu *vcpu, unsigned int priority)
{
set_bit(priority, &vcpu->arch.pending_exceptions);
}
void kvm_mips_dequeue_irq(struct kvm_vcpu *vcpu, unsigned int priority)
{
clear_bit(priority, &vcpu->arch.pending_exceptions);
}
void kvm_mips_queue_timer_int_cb(struct kvm_vcpu *vcpu)
{
/*
* Cause bits to reflect the pending timer interrupt,
* the EXC code will be set when we are actually
* delivering the interrupt:
*/
kvm_set_c0_guest_cause(vcpu->arch.cop0, (C_IRQ5 | C_TI));
/* Queue up an INT exception for the core */
kvm_mips_queue_irq(vcpu, MIPS_EXC_INT_TIMER);
}
void kvm_mips_dequeue_timer_int_cb(struct kvm_vcpu *vcpu)
{
kvm_clear_c0_guest_cause(vcpu->arch.cop0, (C_IRQ5 | C_TI));
kvm_mips_dequeue_irq(vcpu, MIPS_EXC_INT_TIMER);
}
void kvm_mips_queue_io_int_cb(struct kvm_vcpu *vcpu,
struct kvm_mips_interrupt *irq)
{
int intr = (int)irq->irq;
/*
* Cause bits to reflect the pending IO interrupt,
* the EXC code will be set when we are actually
* delivering the interrupt:
*/
kvm_set_c0_guest_cause(vcpu->arch.cop0, 1 << (intr + 8));
kvm_mips_queue_irq(vcpu, kvm_irq_to_priority(intr));
}
void kvm_mips_dequeue_io_int_cb(struct kvm_vcpu *vcpu,
struct kvm_mips_interrupt *irq)
{
int intr = (int)irq->irq;
kvm_clear_c0_guest_cause(vcpu->arch.cop0, 1 << (-intr + 8));
kvm_mips_dequeue_irq(vcpu, kvm_irq_to_priority(-intr));
}
/* Deliver the interrupt of the corresponding priority, if possible. */
int kvm_mips_irq_deliver_cb(struct kvm_vcpu *vcpu, unsigned int priority,
u32 cause)
{
int allowed = 0;
u32 exccode, ie;
struct kvm_vcpu_arch *arch = &vcpu->arch;
struct mips_coproc *cop0 = vcpu->arch.cop0;
if (priority == MIPS_EXC_MAX)
return 0;
ie = 1 << (kvm_priority_to_irq[priority] + 8);
if ((kvm_read_c0_guest_status(cop0) & ST0_IE)
&& (!(kvm_read_c0_guest_status(cop0) & (ST0_EXL | ST0_ERL)))
&& (kvm_read_c0_guest_status(cop0) & ie)) {
allowed = 1;
exccode = EXCCODE_INT;
}
/* Are we allowed to deliver the interrupt ??? */
if (allowed) {
if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) {
/* save old pc */
kvm_write_c0_guest_epc(cop0, arch->pc);
kvm_set_c0_guest_status(cop0, ST0_EXL);
if (cause & CAUSEF_BD)
kvm_set_c0_guest_cause(cop0, CAUSEF_BD);
else
kvm_clear_c0_guest_cause(cop0, CAUSEF_BD);
kvm_debug("Delivering INT @ pc %#lx\n", arch->pc);
} else
kvm_err("Trying to deliver interrupt when EXL is already set\n");
kvm_change_c0_guest_cause(cop0, CAUSEF_EXCCODE,
(exccode << CAUSEB_EXCCODE));
/* XXXSL Set PC to the interrupt exception entry point */
arch->pc = kvm_mips_guest_exception_base(vcpu);
if (kvm_read_c0_guest_cause(cop0) & CAUSEF_IV)
arch->pc += 0x200;
else
arch->pc += 0x180;
clear_bit(priority, &vcpu->arch.pending_exceptions);
}
return allowed;
}
int kvm_mips_irq_clear_cb(struct kvm_vcpu *vcpu, unsigned int priority,
u32 cause)
{
return 1;
}
void kvm_mips_deliver_interrupts(struct kvm_vcpu *vcpu, u32 cause) void kvm_mips_deliver_interrupts(struct kvm_vcpu *vcpu, u32 cause)
{ {
unsigned long *pending = &vcpu->arch.pending_exceptions; unsigned long *pending = &vcpu->arch.pending_exceptions;
...@@ -145,10 +32,7 @@ void kvm_mips_deliver_interrupts(struct kvm_vcpu *vcpu, u32 cause) ...@@ -145,10 +32,7 @@ void kvm_mips_deliver_interrupts(struct kvm_vcpu *vcpu, u32 cause)
priority = __ffs(*pending_clr); priority = __ffs(*pending_clr);
while (priority <= MIPS_EXC_MAX) { while (priority <= MIPS_EXC_MAX) {
if (kvm_mips_callbacks->irq_clear(vcpu, priority, cause)) { kvm_mips_callbacks->irq_clear(vcpu, priority, cause);
if (!KVM_MIPS_IRQ_CLEAR_ALL_AT_ONCE)
break;
}
priority = find_next_bit(pending_clr, priority = find_next_bit(pending_clr,
BITS_PER_BYTE * sizeof(*pending_clr), BITS_PER_BYTE * sizeof(*pending_clr),
...@@ -157,10 +41,7 @@ void kvm_mips_deliver_interrupts(struct kvm_vcpu *vcpu, u32 cause) ...@@ -157,10 +41,7 @@ void kvm_mips_deliver_interrupts(struct kvm_vcpu *vcpu, u32 cause)
priority = __ffs(*pending); priority = __ffs(*pending);
while (priority <= MIPS_EXC_MAX) { while (priority <= MIPS_EXC_MAX) {
if (kvm_mips_callbacks->irq_deliver(vcpu, priority, cause)) { kvm_mips_callbacks->irq_deliver(vcpu, priority, cause);
if (!KVM_MIPS_IRQ_DELIVER_ALL_AT_ONCE)
break;
}
priority = find_next_bit(pending, priority = find_next_bit(pending,
BITS_PER_BYTE * sizeof(*pending), BITS_PER_BYTE * sizeof(*pending),
......
...@@ -31,29 +31,9 @@ ...@@ -31,29 +31,9 @@
#define C_TI (_ULCAST_(1) << 30) #define C_TI (_ULCAST_(1) << 30)
#ifdef CONFIG_KVM_MIPS_VZ
#define KVM_MIPS_IRQ_DELIVER_ALL_AT_ONCE (1)
#define KVM_MIPS_IRQ_CLEAR_ALL_AT_ONCE (1)
#else
#define KVM_MIPS_IRQ_DELIVER_ALL_AT_ONCE (0)
#define KVM_MIPS_IRQ_CLEAR_ALL_AT_ONCE (0)
#endif
extern u32 *kvm_priority_to_irq; extern u32 *kvm_priority_to_irq;
u32 kvm_irq_to_priority(u32 irq); u32 kvm_irq_to_priority(u32 irq);
void kvm_mips_queue_irq(struct kvm_vcpu *vcpu, unsigned int priority);
void kvm_mips_dequeue_irq(struct kvm_vcpu *vcpu, unsigned int priority);
int kvm_mips_pending_timer(struct kvm_vcpu *vcpu); int kvm_mips_pending_timer(struct kvm_vcpu *vcpu);
void kvm_mips_queue_timer_int_cb(struct kvm_vcpu *vcpu);
void kvm_mips_dequeue_timer_int_cb(struct kvm_vcpu *vcpu);
void kvm_mips_queue_io_int_cb(struct kvm_vcpu *vcpu,
struct kvm_mips_interrupt *irq);
void kvm_mips_dequeue_io_int_cb(struct kvm_vcpu *vcpu,
struct kvm_mips_interrupt *irq);
int kvm_mips_irq_deliver_cb(struct kvm_vcpu *vcpu, unsigned int priority,
u32 cause);
int kvm_mips_irq_clear_cb(struct kvm_vcpu *vcpu, unsigned int priority,
u32 cause);
void kvm_mips_deliver_interrupts(struct kvm_vcpu *vcpu, u32 cause); void kvm_mips_deliver_interrupts(struct kvm_vcpu *vcpu, u32 cause);
...@@ -30,7 +30,6 @@ ...@@ -30,7 +30,6 @@
#include <linux/kvm_host.h> #include <linux/kvm_host.h>
#include "interrupt.h" #include "interrupt.h"
#include "commpage.h"
#define CREATE_TRACE_POINTS #define CREATE_TRACE_POINTS
#include "trace.h" #include "trace.h"
...@@ -58,7 +57,6 @@ struct kvm_stats_debugfs_item debugfs_entries[] = { ...@@ -58,7 +57,6 @@ struct kvm_stats_debugfs_item debugfs_entries[] = {
VCPU_STAT("fpe", fpe_exits), VCPU_STAT("fpe", fpe_exits),
VCPU_STAT("msa_disabled", msa_disabled_exits), VCPU_STAT("msa_disabled", msa_disabled_exits),
VCPU_STAT("flush_dcache", flush_dcache_exits), VCPU_STAT("flush_dcache", flush_dcache_exits),
#ifdef CONFIG_KVM_MIPS_VZ
VCPU_STAT("vz_gpsi", vz_gpsi_exits), VCPU_STAT("vz_gpsi", vz_gpsi_exits),
VCPU_STAT("vz_gsfc", vz_gsfc_exits), VCPU_STAT("vz_gsfc", vz_gsfc_exits),
VCPU_STAT("vz_hc", vz_hc_exits), VCPU_STAT("vz_hc", vz_hc_exits),
...@@ -69,7 +67,6 @@ struct kvm_stats_debugfs_item debugfs_entries[] = { ...@@ -69,7 +67,6 @@ struct kvm_stats_debugfs_item debugfs_entries[] = {
VCPU_STAT("vz_resvd", vz_resvd_exits), VCPU_STAT("vz_resvd", vz_resvd_exits),
#ifdef CONFIG_CPU_LOONGSON64 #ifdef CONFIG_CPU_LOONGSON64
VCPU_STAT("vz_cpucfg", vz_cpucfg_exits), VCPU_STAT("vz_cpucfg", vz_cpucfg_exits),
#endif
#endif #endif
VCPU_STAT("halt_successful_poll", halt_successful_poll), VCPU_STAT("halt_successful_poll", halt_successful_poll),
VCPU_STAT("halt_attempted_poll", halt_attempted_poll), VCPU_STAT("halt_attempted_poll", halt_attempted_poll),
...@@ -139,11 +136,7 @@ int kvm_arch_init_vm(struct kvm *kvm, unsigned long type) ...@@ -139,11 +136,7 @@ int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
switch (type) { switch (type) {
case KVM_VM_MIPS_AUTO: case KVM_VM_MIPS_AUTO:
break; break;
#ifdef CONFIG_KVM_MIPS_VZ
case KVM_VM_MIPS_VZ: case KVM_VM_MIPS_VZ:
#else
case KVM_VM_MIPS_TE:
#endif
break; break;
default: default:
/* Unsupported KVM type */ /* Unsupported KVM type */
...@@ -361,7 +354,7 @@ int kvm_arch_vcpu_create(struct kvm_vcpu *vcpu) ...@@ -361,7 +354,7 @@ int kvm_arch_vcpu_create(struct kvm_vcpu *vcpu)
/* TLB refill (or XTLB refill on 64-bit VZ where KX=1) */ /* TLB refill (or XTLB refill on 64-bit VZ where KX=1) */
refill_start = gebase; refill_start = gebase;
if (IS_ENABLED(CONFIG_KVM_MIPS_VZ) && IS_ENABLED(CONFIG_64BIT)) if (IS_ENABLED(CONFIG_64BIT))
refill_start += 0x080; refill_start += 0x080;
refill_end = kvm_mips_build_tlb_refill_exception(refill_start, handler); refill_end = kvm_mips_build_tlb_refill_exception(refill_start, handler);
...@@ -397,20 +390,6 @@ int kvm_arch_vcpu_create(struct kvm_vcpu *vcpu) ...@@ -397,20 +390,6 @@ int kvm_arch_vcpu_create(struct kvm_vcpu *vcpu)
flush_icache_range((unsigned long)gebase, flush_icache_range((unsigned long)gebase,
(unsigned long)gebase + ALIGN(size, PAGE_SIZE)); (unsigned long)gebase + ALIGN(size, PAGE_SIZE));
/*
* Allocate comm page for guest kernel, a TLB will be reserved for
* mapping GVA @ 0xFFFF8000 to this page
*/
vcpu->arch.kseg0_commpage = kzalloc(PAGE_SIZE << 1, GFP_KERNEL);
if (!vcpu->arch.kseg0_commpage) {
err = -ENOMEM;
goto out_free_gebase;
}
kvm_debug("Allocated COMM page @ %p\n", vcpu->arch.kseg0_commpage);
kvm_mips_commpage_init(vcpu);
/* Init */ /* Init */
vcpu->arch.last_sched_cpu = -1; vcpu->arch.last_sched_cpu = -1;
vcpu->arch.last_exec_cpu = -1; vcpu->arch.last_exec_cpu = -1;
...@@ -418,12 +397,10 @@ int kvm_arch_vcpu_create(struct kvm_vcpu *vcpu) ...@@ -418,12 +397,10 @@ int kvm_arch_vcpu_create(struct kvm_vcpu *vcpu)
/* Initial guest state */ /* Initial guest state */
err = kvm_mips_callbacks->vcpu_setup(vcpu); err = kvm_mips_callbacks->vcpu_setup(vcpu);
if (err) if (err)
goto out_free_commpage; goto out_free_gebase;
return 0; return 0;
out_free_commpage:
kfree(vcpu->arch.kseg0_commpage);
out_free_gebase: out_free_gebase:
kfree(gebase); kfree(gebase);
out_uninit_vcpu: out_uninit_vcpu:
...@@ -439,7 +416,6 @@ void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu) ...@@ -439,7 +416,6 @@ void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
kvm_mmu_free_memory_caches(vcpu); kvm_mmu_free_memory_caches(vcpu);
kfree(vcpu->arch.guest_ebase); kfree(vcpu->arch.guest_ebase);
kfree(vcpu->arch.kseg0_commpage);
kvm_mips_callbacks->vcpu_uninit(vcpu); kvm_mips_callbacks->vcpu_uninit(vcpu);
} }
...@@ -1212,10 +1188,6 @@ int kvm_mips_handle_exit(struct kvm_vcpu *vcpu) ...@@ -1212,10 +1188,6 @@ int kvm_mips_handle_exit(struct kvm_vcpu *vcpu)
vcpu->mode = OUTSIDE_GUEST_MODE; vcpu->mode = OUTSIDE_GUEST_MODE;
/* re-enable HTW before enabling interrupts */
if (!IS_ENABLED(CONFIG_KVM_MIPS_VZ))
htw_start();
/* Set a default exit reason */ /* Set a default exit reason */
run->exit_reason = KVM_EXIT_UNKNOWN; run->exit_reason = KVM_EXIT_UNKNOWN;
run->ready_for_interrupt_injection = 1; run->ready_for_interrupt_injection = 1;
...@@ -1232,22 +1204,6 @@ int kvm_mips_handle_exit(struct kvm_vcpu *vcpu) ...@@ -1232,22 +1204,6 @@ int kvm_mips_handle_exit(struct kvm_vcpu *vcpu)
cause, opc, run, vcpu); cause, opc, run, vcpu);
trace_kvm_exit(vcpu, exccode); trace_kvm_exit(vcpu, exccode);
if (!IS_ENABLED(CONFIG_KVM_MIPS_VZ)) {
/*
* Do a privilege check, if in UM most of these exit conditions
* end up causing an exception to be delivered to the Guest
* Kernel
*/
er = kvm_mips_check_privilege(cause, opc, vcpu);
if (er == EMULATE_PRIV_FAIL) {
goto skip_emul;
} else if (er == EMULATE_FAIL) {
run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
ret = RESUME_HOST;
goto skip_emul;
}
}
switch (exccode) { switch (exccode) {
case EXCCODE_INT: case EXCCODE_INT:
kvm_debug("[%d]EXCCODE_INT @ %p\n", vcpu->vcpu_id, opc); kvm_debug("[%d]EXCCODE_INT @ %p\n", vcpu->vcpu_id, opc);
...@@ -1357,7 +1313,6 @@ int kvm_mips_handle_exit(struct kvm_vcpu *vcpu) ...@@ -1357,7 +1313,6 @@ int kvm_mips_handle_exit(struct kvm_vcpu *vcpu)
} }
skip_emul:
local_irq_disable(); local_irq_disable();
if (ret == RESUME_GUEST) if (ret == RESUME_GUEST)
...@@ -1406,11 +1361,6 @@ int kvm_mips_handle_exit(struct kvm_vcpu *vcpu) ...@@ -1406,11 +1361,6 @@ int kvm_mips_handle_exit(struct kvm_vcpu *vcpu)
read_c0_config5() & MIPS_CONF5_MSAEN) read_c0_config5() & MIPS_CONF5_MSAEN)
__kvm_restore_msacsr(&vcpu->arch); __kvm_restore_msacsr(&vcpu->arch);
} }
/* Disable HTW before returning to guest or host */
if (!IS_ENABLED(CONFIG_KVM_MIPS_VZ))
htw_stop();
return ret; return ret;
} }
...@@ -1429,10 +1379,6 @@ void kvm_own_fpu(struct kvm_vcpu *vcpu) ...@@ -1429,10 +1379,6 @@ void kvm_own_fpu(struct kvm_vcpu *vcpu)
* FR=0 FPU state, and we don't want to hit reserved instruction * FR=0 FPU state, and we don't want to hit reserved instruction
* exceptions trying to save the MSA state later when CU=1 && FR=1, so * exceptions trying to save the MSA state later when CU=1 && FR=1, so
* play it safe and save it first. * play it safe and save it first.
*
* In theory we shouldn't ever hit this case since kvm_lose_fpu() should
* get called when guest CU1 is set, however we can't trust the guest
* not to clobber the status register directly via the commpage.
*/ */
if (cpu_has_msa && sr & ST0_CU1 && !(sr & ST0_FR) && if (cpu_has_msa && sr & ST0_CU1 && !(sr & ST0_FR) &&
vcpu->arch.aux_inuse & KVM_MIPS_AUX_MSA) vcpu->arch.aux_inuse & KVM_MIPS_AUX_MSA)
...@@ -1553,11 +1499,6 @@ void kvm_lose_fpu(struct kvm_vcpu *vcpu) ...@@ -1553,11 +1499,6 @@ void kvm_lose_fpu(struct kvm_vcpu *vcpu)
preempt_disable(); preempt_disable();
if (cpu_has_msa && vcpu->arch.aux_inuse & KVM_MIPS_AUX_MSA) { if (cpu_has_msa && vcpu->arch.aux_inuse & KVM_MIPS_AUX_MSA) {
if (!IS_ENABLED(CONFIG_KVM_MIPS_VZ)) {
set_c0_config5(MIPS_CONF5_MSAEN);
enable_fpu_hazard();
}
__kvm_save_msa(&vcpu->arch); __kvm_save_msa(&vcpu->arch);
trace_kvm_aux(vcpu, KVM_TRACE_AUX_SAVE, KVM_TRACE_AUX_FPU_MSA); trace_kvm_aux(vcpu, KVM_TRACE_AUX_SAVE, KVM_TRACE_AUX_FPU_MSA);
...@@ -1569,11 +1510,6 @@ void kvm_lose_fpu(struct kvm_vcpu *vcpu) ...@@ -1569,11 +1510,6 @@ void kvm_lose_fpu(struct kvm_vcpu *vcpu)
} }
vcpu->arch.aux_inuse &= ~(KVM_MIPS_AUX_FPU | KVM_MIPS_AUX_MSA); vcpu->arch.aux_inuse &= ~(KVM_MIPS_AUX_FPU | KVM_MIPS_AUX_MSA);
} else if (vcpu->arch.aux_inuse & KVM_MIPS_AUX_FPU) { } else if (vcpu->arch.aux_inuse & KVM_MIPS_AUX_FPU) {
if (!IS_ENABLED(CONFIG_KVM_MIPS_VZ)) {
set_c0_status(ST0_CU1);
enable_fpu_hazard();
}
__kvm_save_fpu(&vcpu->arch); __kvm_save_fpu(&vcpu->arch);
vcpu->arch.aux_inuse &= ~KVM_MIPS_AUX_FPU; vcpu->arch.aux_inuse &= ~KVM_MIPS_AUX_FPU;
trace_kvm_aux(vcpu, KVM_TRACE_AUX_SAVE, KVM_TRACE_AUX_FPU); trace_kvm_aux(vcpu, KVM_TRACE_AUX_SAVE, KVM_TRACE_AUX_FPU);
......
This diff is collapsed.
...@@ -30,10 +30,6 @@ ...@@ -30,10 +30,6 @@
#include <asm/r4kcache.h> #include <asm/r4kcache.h>
#define CONFIG_MIPS_MT #define CONFIG_MIPS_MT
#define KVM_GUEST_PC_TLB 0
#define KVM_GUEST_SP_TLB 1
#ifdef CONFIG_KVM_MIPS_VZ
unsigned long GUESTID_MASK; unsigned long GUESTID_MASK;
EXPORT_SYMBOL_GPL(GUESTID_MASK); EXPORT_SYMBOL_GPL(GUESTID_MASK);
unsigned long GUESTID_FIRST_VERSION; unsigned long GUESTID_FIRST_VERSION;
...@@ -50,91 +46,6 @@ static u32 kvm_mips_get_root_asid(struct kvm_vcpu *vcpu) ...@@ -50,91 +46,6 @@ static u32 kvm_mips_get_root_asid(struct kvm_vcpu *vcpu)
else else
return cpu_asid(smp_processor_id(), gpa_mm); return cpu_asid(smp_processor_id(), gpa_mm);
} }
#endif
static u32 kvm_mips_get_kernel_asid(struct kvm_vcpu *vcpu)
{
struct mm_struct *kern_mm = &vcpu->arch.guest_kernel_mm;
int cpu = smp_processor_id();
return cpu_asid(cpu, kern_mm);
}
static u32 kvm_mips_get_user_asid(struct kvm_vcpu *vcpu)
{
struct mm_struct *user_mm = &vcpu->arch.guest_user_mm;
int cpu = smp_processor_id();
return cpu_asid(cpu, user_mm);
}
/* Structure defining an tlb entry data set. */
void kvm_mips_dump_host_tlbs(void)
{
unsigned long flags;
local_irq_save(flags);
kvm_info("HOST TLBs:\n");
dump_tlb_regs();
pr_info("\n");
dump_tlb_all();
local_irq_restore(flags);
}
EXPORT_SYMBOL_GPL(kvm_mips_dump_host_tlbs);
void kvm_mips_dump_guest_tlbs(struct kvm_vcpu *vcpu)
{
struct mips_coproc *cop0 = vcpu->arch.cop0;
struct kvm_mips_tlb tlb;
int i;
kvm_info("Guest TLBs:\n");
kvm_info("Guest EntryHi: %#lx\n", kvm_read_c0_guest_entryhi(cop0));
for (i = 0; i < KVM_MIPS_GUEST_TLB_SIZE; i++) {
tlb = vcpu->arch.guest_tlb[i];
kvm_info("TLB%c%3d Hi 0x%08lx ",
(tlb.tlb_lo[0] | tlb.tlb_lo[1]) & ENTRYLO_V
? ' ' : '*',
i, tlb.tlb_hi);
kvm_info("Lo0=0x%09llx %c%c attr %lx ",
(u64) mips3_tlbpfn_to_paddr(tlb.tlb_lo[0]),
(tlb.tlb_lo[0] & ENTRYLO_D) ? 'D' : ' ',
(tlb.tlb_lo[0] & ENTRYLO_G) ? 'G' : ' ',
(tlb.tlb_lo[0] & ENTRYLO_C) >> ENTRYLO_C_SHIFT);
kvm_info("Lo1=0x%09llx %c%c attr %lx sz=%lx\n",
(u64) mips3_tlbpfn_to_paddr(tlb.tlb_lo[1]),
(tlb.tlb_lo[1] & ENTRYLO_D) ? 'D' : ' ',
(tlb.tlb_lo[1] & ENTRYLO_G) ? 'G' : ' ',
(tlb.tlb_lo[1] & ENTRYLO_C) >> ENTRYLO_C_SHIFT,
tlb.tlb_mask);
}
}
EXPORT_SYMBOL_GPL(kvm_mips_dump_guest_tlbs);
int kvm_mips_guest_tlb_lookup(struct kvm_vcpu *vcpu, unsigned long entryhi)
{
int i;
int index = -1;
struct kvm_mips_tlb *tlb = vcpu->arch.guest_tlb;
for (i = 0; i < KVM_MIPS_GUEST_TLB_SIZE; i++) {
if (TLB_HI_VPN2_HIT(tlb[i], entryhi) &&
TLB_HI_ASID_HIT(tlb[i], entryhi)) {
index = i;
break;
}
}
kvm_debug("%s: entryhi: %#lx, index: %d lo0: %#lx, lo1: %#lx\n",
__func__, entryhi, index, tlb[i].tlb_lo[0], tlb[i].tlb_lo[1]);
return index;
}
EXPORT_SYMBOL_GPL(kvm_mips_guest_tlb_lookup);
static int _kvm_mips_host_tlb_inv(unsigned long entryhi) static int _kvm_mips_host_tlb_inv(unsigned long entryhi)
{ {
...@@ -163,54 +74,6 @@ static int _kvm_mips_host_tlb_inv(unsigned long entryhi) ...@@ -163,54 +74,6 @@ static int _kvm_mips_host_tlb_inv(unsigned long entryhi)
return idx; return idx;
} }
int kvm_mips_host_tlb_inv(struct kvm_vcpu *vcpu, unsigned long va,
bool user, bool kernel)
{
/*
* Initialize idx_user and idx_kernel to workaround bogus
* maybe-initialized warning when using GCC 6.
*/
int idx_user = 0, idx_kernel = 0;
unsigned long flags, old_entryhi;
local_irq_save(flags);
old_entryhi = read_c0_entryhi();
if (user)
idx_user = _kvm_mips_host_tlb_inv((va & VPN2_MASK) |
kvm_mips_get_user_asid(vcpu));
if (kernel)
idx_kernel = _kvm_mips_host_tlb_inv((va & VPN2_MASK) |
kvm_mips_get_kernel_asid(vcpu));
write_c0_entryhi(old_entryhi);
mtc0_tlbw_hazard();
local_irq_restore(flags);
/*
* We don't want to get reserved instruction exceptions for missing tlb
* entries.
*/
if (cpu_has_vtag_icache)
flush_icache_all();
if (user && idx_user >= 0)
kvm_debug("%s: Invalidated guest user entryhi %#lx @ idx %d\n",
__func__, (va & VPN2_MASK) |
kvm_mips_get_user_asid(vcpu), idx_user);
if (kernel && idx_kernel >= 0)
kvm_debug("%s: Invalidated guest kernel entryhi %#lx @ idx %d\n",
__func__, (va & VPN2_MASK) |
kvm_mips_get_kernel_asid(vcpu), idx_kernel);
return 0;
}
EXPORT_SYMBOL_GPL(kvm_mips_host_tlb_inv);
#ifdef CONFIG_KVM_MIPS_VZ
/* GuestID management */ /* GuestID management */
/** /**
...@@ -661,40 +524,3 @@ void kvm_loongson_clear_guest_ftlb(void) ...@@ -661,40 +524,3 @@ void kvm_loongson_clear_guest_ftlb(void)
} }
EXPORT_SYMBOL_GPL(kvm_loongson_clear_guest_ftlb); EXPORT_SYMBOL_GPL(kvm_loongson_clear_guest_ftlb);
#endif #endif
#endif
/**
* kvm_mips_suspend_mm() - Suspend the active mm.
* @cpu The CPU we're running on.
*
* Suspend the active_mm, ready for a switch to a KVM guest virtual address
* space. This is left active for the duration of guest context, including time
* with interrupts enabled, so we need to be careful not to confuse e.g. cache
* management IPIs.
*
* kvm_mips_resume_mm() should be called before context switching to a different
* process so we don't need to worry about reference counting.
*
* This needs to be in static kernel code to avoid exporting init_mm.
*/
void kvm_mips_suspend_mm(int cpu)
{
cpumask_clear_cpu(cpu, mm_cpumask(current->active_mm));
current->active_mm = &init_mm;
}
EXPORT_SYMBOL_GPL(kvm_mips_suspend_mm);
/**
* kvm_mips_resume_mm() - Resume the current process mm.
* @cpu The CPU we're running on.
*
* Resume the mm of the current process, after a switch back from a KVM guest
* virtual address space (see kvm_mips_suspend_mm()).
*/
void kvm_mips_resume_mm(int cpu)
{
cpumask_set_cpu(cpu, mm_cpumask(current->mm));
current->active_mm = current->mm;
}
EXPORT_SYMBOL_GPL(kvm_mips_resume_mm);
This diff is collapsed.
...@@ -292,9 +292,8 @@ static int kvm_vz_irq_clear_cb(struct kvm_vcpu *vcpu, unsigned int priority, ...@@ -292,9 +292,8 @@ static int kvm_vz_irq_clear_cb(struct kvm_vcpu *vcpu, unsigned int priority,
switch (priority) { switch (priority) {
case MIPS_EXC_INT_TIMER: case MIPS_EXC_INT_TIMER:
/* /*
* Call to kvm_write_c0_guest_compare() clears Cause.TI in * Explicitly clear irq associated with Cause.IP[IPTI]
* kvm_mips_emulate_CP0(). Explicitly clear irq associated with * if GuestCtl2 virtual interrupt register not
* Cause.IP[IPTI] if GuestCtl2 virtual interrupt register not
* supported or if not using GuestCtl2 Hardware Clear. * supported or if not using GuestCtl2 Hardware Clear.
*/ */
if (cpu_has_guestctl2) { if (cpu_has_guestctl2) {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment