Commit 491d6ecc authored by Paul Mackerras's avatar Paul Mackerras Committed by Alexander Graf

KVM: PPC: Book3S PR: Reduce number of shadow PTEs invalidated by MMU notifiers

Currently, whenever any of the MMU notifier callbacks get called, we
invalidate all the shadow PTEs.  This is inefficient because it means
that we typically then get a lot of DSIs and ISIs in the guest to fault
the shadow PTEs back in.  We do this even if the address range being
notified doesn't correspond to guest memory.

This commit adds code to scan the memslot array to find out what range(s)
of guest physical addresses corresponds to the host virtual address range
being affected.  For each such range we flush only the shadow PTEs
for the range, on all cpus.
Signed-off-by: default avatarPaul Mackerras <paulus@samba.org>
Signed-off-by: default avatarAlexander Graf <agraf@suse.de>
parent adc0bafe
...@@ -150,24 +150,48 @@ int kvmppc_core_check_requests(struct kvm_vcpu *vcpu) ...@@ -150,24 +150,48 @@ int kvmppc_core_check_requests(struct kvm_vcpu *vcpu)
} }
/************* MMU Notifiers *************/ /************* MMU Notifiers *************/
static void do_kvm_unmap_hva(struct kvm *kvm, unsigned long start,
unsigned long end)
{
long i;
struct kvm_vcpu *vcpu;
struct kvm_memslots *slots;
struct kvm_memory_slot *memslot;
slots = kvm_memslots(kvm);
kvm_for_each_memslot(memslot, slots) {
unsigned long hva_start, hva_end;
gfn_t gfn, gfn_end;
hva_start = max(start, memslot->userspace_addr);
hva_end = min(end, memslot->userspace_addr +
(memslot->npages << PAGE_SHIFT));
if (hva_start >= hva_end)
continue;
/*
* {gfn(page) | page intersects with [hva_start, hva_end)} =
* {gfn, gfn+1, ..., gfn_end-1}.
*/
gfn = hva_to_gfn_memslot(hva_start, memslot);
gfn_end = hva_to_gfn_memslot(hva_end + PAGE_SIZE - 1, memslot);
kvm_for_each_vcpu(i, vcpu, kvm)
kvmppc_mmu_pte_pflush(vcpu, gfn << PAGE_SHIFT,
gfn_end << PAGE_SHIFT);
}
}
int kvm_unmap_hva(struct kvm *kvm, unsigned long hva) int kvm_unmap_hva(struct kvm *kvm, unsigned long hva)
{ {
trace_kvm_unmap_hva(hva); trace_kvm_unmap_hva(hva);
/* do_kvm_unmap_hva(kvm, hva, hva + PAGE_SIZE);
* Flush all shadow tlb entries everywhere. This is slow, but
* we are 100% sure that we catch the to be unmapped page
*/
kvm_flush_remote_tlbs(kvm);
return 0; return 0;
} }
int kvm_unmap_hva_range(struct kvm *kvm, unsigned long start, unsigned long end) int kvm_unmap_hva_range(struct kvm *kvm, unsigned long start, unsigned long end)
{ {
/* kvm_unmap_hva flushes everything anyways */ do_kvm_unmap_hva(kvm, start, end);
kvm_unmap_hva(kvm, start);
return 0; return 0;
} }
...@@ -187,7 +211,7 @@ int kvm_test_age_hva(struct kvm *kvm, unsigned long hva) ...@@ -187,7 +211,7 @@ int kvm_test_age_hva(struct kvm *kvm, unsigned long hva)
void kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte) void kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte)
{ {
/* The page will get remapped properly on its next fault */ /* The page will get remapped properly on its next fault */
kvm_unmap_hva(kvm, hva); do_kvm_unmap_hva(kvm, hva, hva + PAGE_SIZE);
} }
/*****************************************/ /*****************************************/
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment