Commit dc44abd6 authored by James Hogan's avatar James Hogan

KVM: MIPS/Emulate: Properly implement TLBR for T&E

Properly implement emulation of the TLBR instruction for Trap & Emulate.
This instruction reads the TLB entry pointed at by the CP0_Index
register into the other TLB registers, which may have the side effect of
changing the current ASID. Therefore abstract the CP0_EntryHi and ASID
changing code into a common function in the process.

A comment indicated that Linux doesn't use TLBR, which is true during
normal use, however dumping of the TLB does use it (for example with the
relatively recent 'x' magic sysrq key), as does a wired TLB entries test
case in my KVM tests.
Signed-off-by: default avatarJames Hogan <james.hogan@imgtec.com>
Acked-by: default avatarRalf Baechle <ralf@linux-mips.org>
Cc: Paolo Bonzini <pbonzini@redhat.com>
Cc: "Radim Krčmář" <rkrcmar@redhat.com>
Cc: linux-mips@linux-mips.org
Cc: kvm@vger.kernel.org
parent 0ae3abcd
...@@ -990,17 +990,62 @@ enum emulation_result kvm_mips_emul_wait(struct kvm_vcpu *vcpu) ...@@ -990,17 +990,62 @@ enum emulation_result kvm_mips_emul_wait(struct kvm_vcpu *vcpu)
return EMULATE_DONE; return EMULATE_DONE;
} }
/* static void kvm_mips_change_entryhi(struct kvm_vcpu *vcpu,
* XXXKYMA: Linux doesn't seem to use TLBR, return EMULATE_FAIL for now so that unsigned long entryhi)
* we can catch this, if things ever change {
struct mips_coproc *cop0 = vcpu->arch.cop0;
struct mm_struct *kern_mm = &vcpu->arch.guest_kernel_mm;
int cpu, i;
u32 nasid = entryhi & KVM_ENTRYHI_ASID;
if (((kvm_read_c0_guest_entryhi(cop0) & KVM_ENTRYHI_ASID) != nasid)) {
trace_kvm_asid_change(vcpu, kvm_read_c0_guest_entryhi(cop0) &
KVM_ENTRYHI_ASID, nasid);
/*
* Flush entries from the GVA page tables.
* Guest user page table will get flushed lazily on re-entry to
* guest user if the guest ASID actually changes.
*/
kvm_mips_flush_gva_pt(kern_mm->pgd, KMF_KERN);
/*
* Regenerate/invalidate kernel MMU context.
* The user MMU context will be regenerated lazily on re-entry
* to guest user if the guest ASID actually changes.
*/ */
preempt_disable();
cpu = smp_processor_id();
get_new_mmu_context(kern_mm, cpu);
for_each_possible_cpu(i)
if (i != cpu)
cpu_context(i, kern_mm) = 0;
preempt_enable();
}
kvm_write_c0_guest_entryhi(cop0, entryhi);
}
enum emulation_result kvm_mips_emul_tlbr(struct kvm_vcpu *vcpu) enum emulation_result kvm_mips_emul_tlbr(struct kvm_vcpu *vcpu)
{ {
struct mips_coproc *cop0 = vcpu->arch.cop0; struct mips_coproc *cop0 = vcpu->arch.cop0;
struct kvm_mips_tlb *tlb;
unsigned long pc = vcpu->arch.pc; unsigned long pc = vcpu->arch.pc;
int index;
kvm_err("[%#lx] COP0_TLBR [%d]\n", pc, kvm_read_c0_guest_index(cop0)); index = kvm_read_c0_guest_index(cop0);
return EMULATE_FAIL; if (index < 0 || index >= KVM_MIPS_GUEST_TLB_SIZE) {
/* UNDEFINED */
kvm_debug("[%#lx] TLBR Index %#x out of range\n", pc, index);
index &= KVM_MIPS_GUEST_TLB_SIZE - 1;
}
tlb = &vcpu->arch.guest_tlb[index];
kvm_write_c0_guest_pagemask(cop0, tlb->tlb_mask);
kvm_write_c0_guest_entrylo0(cop0, tlb->tlb_lo[0]);
kvm_write_c0_guest_entrylo1(cop0, tlb->tlb_lo[1]);
kvm_mips_change_entryhi(vcpu, tlb->tlb_hi);
return EMULATE_DONE;
} }
/** /**
...@@ -1222,11 +1267,9 @@ enum emulation_result kvm_mips_emulate_CP0(union mips_instruction inst, ...@@ -1222,11 +1267,9 @@ enum emulation_result kvm_mips_emulate_CP0(union mips_instruction inst,
struct kvm_vcpu *vcpu) struct kvm_vcpu *vcpu)
{ {
struct mips_coproc *cop0 = vcpu->arch.cop0; struct mips_coproc *cop0 = vcpu->arch.cop0;
struct mm_struct *kern_mm = &vcpu->arch.guest_kernel_mm;
enum emulation_result er = EMULATE_DONE; enum emulation_result er = EMULATE_DONE;
u32 rt, rd, sel; u32 rt, rd, sel;
unsigned long curr_pc; unsigned long curr_pc;
int cpu, i;
/* /*
* Update PC and hold onto current PC in case there is * Update PC and hold onto current PC in case there is
...@@ -1328,43 +1371,7 @@ enum emulation_result kvm_mips_emulate_CP0(union mips_instruction inst, ...@@ -1328,43 +1371,7 @@ enum emulation_result kvm_mips_emulate_CP0(union mips_instruction inst,
kvm_change_c0_guest_ebase(cop0, 0x1ffff000, kvm_change_c0_guest_ebase(cop0, 0x1ffff000,
vcpu->arch.gprs[rt]); vcpu->arch.gprs[rt]);
} else if (rd == MIPS_CP0_TLB_HI && sel == 0) { } else if (rd == MIPS_CP0_TLB_HI && sel == 0) {
u32 nasid = kvm_mips_change_entryhi(vcpu,
vcpu->arch.gprs[rt] & KVM_ENTRYHI_ASID;
if (((kvm_read_c0_guest_entryhi(cop0) &
KVM_ENTRYHI_ASID) != nasid)) {
trace_kvm_asid_change(vcpu,
kvm_read_c0_guest_entryhi(cop0)
& KVM_ENTRYHI_ASID,
nasid);
/*
* Flush entries from the GVA page
* tables.
* Guest user page table will get
* flushed lazily on re-entry to guest
* user if the guest ASID actually
* changes.
*/
kvm_mips_flush_gva_pt(kern_mm->pgd,
KMF_KERN);
/*
* Regenerate/invalidate kernel MMU
* context.
* The user MMU context will be
* regenerated lazily on re-entry to
* guest user if the guest ASID actually
* changes.
*/
preempt_disable();
cpu = smp_processor_id();
get_new_mmu_context(kern_mm, cpu);
for_each_possible_cpu(i)
if (i != cpu)
cpu_context(i, kern_mm) = 0;
preempt_enable();
}
kvm_write_c0_guest_entryhi(cop0,
vcpu->arch.gprs[rt]); vcpu->arch.gprs[rt]);
} }
/* Are we writing to COUNT */ /* Are we writing to COUNT */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment