Commit e01f1c70 authored by James Hogan's avatar James Hogan Committed by Jiri Slaby

KVM: MIPS: Drop other CPU ASIDs on guest MMU changes

commit 91e4f1b6 upstream.

When a guest TLB entry is replaced by TLBWI or TLBWR, we only invalidate
TLB entries on the local CPU. This doesn't work correctly on an SMP host
when the guest is migrated to a different physical CPU, as it could pick
up stale TLB mappings from the last time the vCPU ran on that physical
CPU.

Therefore invalidate both user and kernel host ASIDs on other CPUs,
which will cause new ASIDs to be generated when it next runs on those
CPUs.

We're careful only to do this if the TLB entry was already valid, and
only for the kernel ASID where the virtual address it mapped is outside
of the guest user address range.
Signed-off-by: default avatarJames Hogan <james.hogan@imgtec.com>
Cc: Paolo Bonzini <pbonzini@redhat.com>
Cc: "Radim Krčmář" <rkrcmar@redhat.com>
Cc: Ralf Baechle <ralf@linux-mips.org>
Cc: linux-mips@linux-mips.org
Cc: kvm@vger.kernel.org
[james.hogan@imgtec.com: Backport to 3.10..3.16]
Signed-off-by: default avatarJames Hogan <james.hogan@imgtec.com>
Signed-off-by: default avatarJiri Slaby <jslaby@suse.cz>
parent f668f2ee
...@@ -310,6 +310,47 @@ enum emulation_result kvm_mips_emul_tlbr(struct kvm_vcpu *vcpu) ...@@ -310,6 +310,47 @@ enum emulation_result kvm_mips_emul_tlbr(struct kvm_vcpu *vcpu)
return er; return er;
} }
/**
* kvm_mips_invalidate_guest_tlb() - Indicates a change in guest MMU map.
* @vcpu: VCPU with changed mappings.
* @tlb: TLB entry being removed.
*
* This is called to indicate a single change in guest MMU mappings, so that we
* can arrange TLB flushes on this and other CPUs.
*/
static void kvm_mips_invalidate_guest_tlb(struct kvm_vcpu *vcpu,
struct kvm_mips_tlb *tlb)
{
int cpu, i;
bool user;
/* No need to flush for entries which are already invalid */
if (!((tlb->tlb_lo0 | tlb->tlb_lo1) & MIPS3_PG_V))
return;
/* User address space doesn't need flushing for KSeg2/3 changes */
user = tlb->tlb_hi < KVM_GUEST_KSEG0;
preempt_disable();
/*
* Probe the shadow host TLB for the entry being overwritten, if one
* matches, invalidate it
*/
kvm_mips_host_tlb_inv(vcpu, tlb->tlb_hi);
/* Invalidate the whole ASID on other CPUs */
cpu = smp_processor_id();
for_each_possible_cpu(i) {
if (i == cpu)
continue;
if (user)
vcpu->arch.guest_user_asid[i] = 0;
vcpu->arch.guest_kernel_asid[i] = 0;
}
preempt_enable();
}
/* Write Guest TLB Entry @ Index */ /* Write Guest TLB Entry @ Index */
enum emulation_result kvm_mips_emul_tlbwi(struct kvm_vcpu *vcpu) enum emulation_result kvm_mips_emul_tlbwi(struct kvm_vcpu *vcpu)
{ {
...@@ -331,10 +372,8 @@ enum emulation_result kvm_mips_emul_tlbwi(struct kvm_vcpu *vcpu) ...@@ -331,10 +372,8 @@ enum emulation_result kvm_mips_emul_tlbwi(struct kvm_vcpu *vcpu)
} }
tlb = &vcpu->arch.guest_tlb[index]; tlb = &vcpu->arch.guest_tlb[index];
#if 1
/* Probe the shadow host TLB for the entry being overwritten, if one matches, invalidate it */ kvm_mips_invalidate_guest_tlb(vcpu, tlb);
kvm_mips_host_tlb_inv(vcpu, tlb->tlb_hi);
#endif
tlb->tlb_mask = kvm_read_c0_guest_pagemask(cop0); tlb->tlb_mask = kvm_read_c0_guest_pagemask(cop0);
tlb->tlb_hi = kvm_read_c0_guest_entryhi(cop0); tlb->tlb_hi = kvm_read_c0_guest_entryhi(cop0);
...@@ -373,10 +412,7 @@ enum emulation_result kvm_mips_emul_tlbwr(struct kvm_vcpu *vcpu) ...@@ -373,10 +412,7 @@ enum emulation_result kvm_mips_emul_tlbwr(struct kvm_vcpu *vcpu)
tlb = &vcpu->arch.guest_tlb[index]; tlb = &vcpu->arch.guest_tlb[index];
#if 1 kvm_mips_invalidate_guest_tlb(vcpu, tlb);
/* Probe the shadow host TLB for the entry being overwritten, if one matches, invalidate it */
kvm_mips_host_tlb_inv(vcpu, tlb->tlb_hi);
#endif
tlb->tlb_mask = kvm_read_c0_guest_pagemask(cop0); tlb->tlb_mask = kvm_read_c0_guest_pagemask(cop0);
tlb->tlb_hi = kvm_read_c0_guest_entryhi(cop0); tlb->tlb_hi = kvm_read_c0_guest_entryhi(cop0);
...@@ -419,6 +455,7 @@ kvm_mips_emulate_CP0(uint32_t inst, uint32_t *opc, uint32_t cause, ...@@ -419,6 +455,7 @@ kvm_mips_emulate_CP0(uint32_t inst, uint32_t *opc, uint32_t cause,
int32_t rt, rd, copz, sel, co_bit, op; int32_t rt, rd, copz, sel, co_bit, op;
uint32_t pc = vcpu->arch.pc; uint32_t pc = vcpu->arch.pc;
unsigned long curr_pc; unsigned long curr_pc;
int cpu, i;
/* /*
* Update PC and hold onto current PC in case there is * Update PC and hold onto current PC in case there is
...@@ -538,8 +575,16 @@ kvm_mips_emulate_CP0(uint32_t inst, uint32_t *opc, uint32_t cause, ...@@ -538,8 +575,16 @@ kvm_mips_emulate_CP0(uint32_t inst, uint32_t *opc, uint32_t cause,
ASID_MASK, ASID_MASK,
vcpu->arch.gprs[rt] & ASID_MASK); vcpu->arch.gprs[rt] & ASID_MASK);
preempt_disable();
/* Blow away the shadow host TLBs */ /* Blow away the shadow host TLBs */
kvm_mips_flush_host_tlb(1); kvm_mips_flush_host_tlb(1);
cpu = smp_processor_id();
for_each_possible_cpu(i)
if (i != cpu) {
vcpu->arch.guest_user_asid[i] = 0;
vcpu->arch.guest_kernel_asid[i] = 0;
}
preempt_enable();
} }
kvm_write_c0_guest_entryhi(cop0, kvm_write_c0_guest_entryhi(cop0,
vcpu->arch.gprs[rt]); vcpu->arch.gprs[rt]);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment