Commit 71883a62 authored by Lan Tianyu's avatar Lan Tianyu Committed by Paolo Bonzini

KVM/MMU: Flush tlb directly in the kvm_zap_gfn_range()

Originally, flush tlb is done by slot_handle_level_range(). This patch
moves the flush directly to kvm_zap_gfn_range() when range flush is
available, so that only the requested range can be flushed.
Signed-off-by: default avatarLan Tianyu <Tianyu.Lan@microsoft.com>
Signed-off-by: default avatarPaolo Bonzini <pbonzini@redhat.com>
parent 3cc5ea94
...@@ -5624,8 +5624,13 @@ void kvm_zap_gfn_range(struct kvm *kvm, gfn_t gfn_start, gfn_t gfn_end) ...@@ -5624,8 +5624,13 @@ void kvm_zap_gfn_range(struct kvm *kvm, gfn_t gfn_start, gfn_t gfn_end)
{ {
struct kvm_memslots *slots; struct kvm_memslots *slots;
struct kvm_memory_slot *memslot; struct kvm_memory_slot *memslot;
bool flush_tlb = true;
bool flush = false;
int i; int i;
if (kvm_available_flush_tlb_with_range())
flush_tlb = false;
spin_lock(&kvm->mmu_lock); spin_lock(&kvm->mmu_lock);
for (i = 0; i < KVM_ADDRESS_SPACE_NUM; i++) { for (i = 0; i < KVM_ADDRESS_SPACE_NUM; i++) {
slots = __kvm_memslots(kvm, i); slots = __kvm_memslots(kvm, i);
...@@ -5637,12 +5642,17 @@ void kvm_zap_gfn_range(struct kvm *kvm, gfn_t gfn_start, gfn_t gfn_end) ...@@ -5637,12 +5642,17 @@ void kvm_zap_gfn_range(struct kvm *kvm, gfn_t gfn_start, gfn_t gfn_end)
if (start >= end) if (start >= end)
continue; continue;
slot_handle_level_range(kvm, memslot, kvm_zap_rmapp, flush |= slot_handle_level_range(kvm, memslot,
PT_PAGE_TABLE_LEVEL, PT_MAX_HUGEPAGE_LEVEL, kvm_zap_rmapp, PT_PAGE_TABLE_LEVEL,
start, end - 1, true); PT_MAX_HUGEPAGE_LEVEL, start,
end - 1, flush_tlb);
} }
} }
if (flush)
kvm_flush_remote_tlbs_with_address(kvm, gfn_start,
gfn_end - gfn_start + 1);
spin_unlock(&kvm->mmu_lock); spin_unlock(&kvm->mmu_lock);
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment