Commit 7f42aa76 authored by Sean Christopherson's avatar Sean Christopherson Committed by Paolo Bonzini

KVM: x86/mmu: Consolidate open coded variants of memslot TLB flushes

Replace open coded instances of kvm_arch_flush_remote_tlbs_memslot()'s
functionality with calls to the aforementioned function.  Update the
comment in kvm_arch_flush_remote_tlbs_memslot() to elaborate on how it
is used and why it asserts that slots_lock is held.

No functional change intended.
Signed-off-by: default avatarSean Christopherson <sean.j.christopherson@intel.com>
Signed-off-by: default avatarPaolo Bonzini <pbonzini@redhat.com>
parent cec37648
...@@ -5862,13 +5862,6 @@ void kvm_mmu_slot_remove_write_access(struct kvm *kvm, ...@@ -5862,13 +5862,6 @@ void kvm_mmu_slot_remove_write_access(struct kvm *kvm,
false); false);
spin_unlock(&kvm->mmu_lock); spin_unlock(&kvm->mmu_lock);
/*
* kvm_mmu_slot_remove_write_access() and kvm_vm_ioctl_get_dirty_log()
* which do tlb flush out of mmu-lock should be serialized by
* kvm->slots_lock otherwise tlb flush would be missed.
*/
lockdep_assert_held(&kvm->slots_lock);
/* /*
* We can flush all the TLBs out of the mmu lock without TLB * We can flush all the TLBs out of the mmu lock without TLB
* corruption since we just change the spte from writable to * corruption since we just change the spte from writable to
...@@ -5881,8 +5874,7 @@ void kvm_mmu_slot_remove_write_access(struct kvm *kvm, ...@@ -5881,8 +5874,7 @@ void kvm_mmu_slot_remove_write_access(struct kvm *kvm,
* on PT_WRITABLE_MASK anymore. * on PT_WRITABLE_MASK anymore.
*/ */
if (flush) if (flush)
kvm_flush_remote_tlbs_with_address(kvm, memslot->base_gfn, kvm_arch_flush_remote_tlbs_memslot(kvm, memslot);
memslot->npages);
} }
static bool kvm_mmu_zap_collapsible_spte(struct kvm *kvm, static bool kvm_mmu_zap_collapsible_spte(struct kvm *kvm,
...@@ -5938,8 +5930,11 @@ void kvm_arch_flush_remote_tlbs_memslot(struct kvm *kvm, ...@@ -5938,8 +5930,11 @@ void kvm_arch_flush_remote_tlbs_memslot(struct kvm *kvm,
struct kvm_memory_slot *memslot) struct kvm_memory_slot *memslot)
{ {
/* /*
* All the TLBs can be flushed out of mmu lock, see the comments in * All current use cases for flushing the TLBs for a specific memslot
* kvm_mmu_slot_remove_write_access(). * are related to dirty logging, and do the TLB flush out of mmu_lock.
* The interaction between the various operations on memslot must be
* serialized by slots_locks to ensure the TLB flush from one operation
* is observed by any other operation on the same memslot.
*/ */
lockdep_assert_held(&kvm->slots_lock); lockdep_assert_held(&kvm->slots_lock);
kvm_flush_remote_tlbs_with_address(kvm, memslot->base_gfn, kvm_flush_remote_tlbs_with_address(kvm, memslot->base_gfn,
...@@ -5955,8 +5950,6 @@ void kvm_mmu_slot_leaf_clear_dirty(struct kvm *kvm, ...@@ -5955,8 +5950,6 @@ void kvm_mmu_slot_leaf_clear_dirty(struct kvm *kvm,
flush = slot_handle_leaf(kvm, memslot, __rmap_clear_dirty, false); flush = slot_handle_leaf(kvm, memslot, __rmap_clear_dirty, false);
spin_unlock(&kvm->mmu_lock); spin_unlock(&kvm->mmu_lock);
lockdep_assert_held(&kvm->slots_lock);
/* /*
* It's also safe to flush TLBs out of mmu lock here as currently this * It's also safe to flush TLBs out of mmu lock here as currently this
* function is only used for dirty logging, in which case flushing TLB * function is only used for dirty logging, in which case flushing TLB
...@@ -5964,8 +5957,7 @@ void kvm_mmu_slot_leaf_clear_dirty(struct kvm *kvm, ...@@ -5964,8 +5957,7 @@ void kvm_mmu_slot_leaf_clear_dirty(struct kvm *kvm,
* dirty_bitmap. * dirty_bitmap.
*/ */
if (flush) if (flush)
kvm_flush_remote_tlbs_with_address(kvm, memslot->base_gfn, kvm_arch_flush_remote_tlbs_memslot(kvm, memslot);
memslot->npages);
} }
EXPORT_SYMBOL_GPL(kvm_mmu_slot_leaf_clear_dirty); EXPORT_SYMBOL_GPL(kvm_mmu_slot_leaf_clear_dirty);
...@@ -5979,12 +5971,8 @@ void kvm_mmu_slot_largepage_remove_write_access(struct kvm *kvm, ...@@ -5979,12 +5971,8 @@ void kvm_mmu_slot_largepage_remove_write_access(struct kvm *kvm,
false); false);
spin_unlock(&kvm->mmu_lock); spin_unlock(&kvm->mmu_lock);
/* see kvm_mmu_slot_remove_write_access */
lockdep_assert_held(&kvm->slots_lock);
if (flush) if (flush)
kvm_flush_remote_tlbs_with_address(kvm, memslot->base_gfn, kvm_arch_flush_remote_tlbs_memslot(kvm, memslot);
memslot->npages);
} }
EXPORT_SYMBOL_GPL(kvm_mmu_slot_largepage_remove_write_access); EXPORT_SYMBOL_GPL(kvm_mmu_slot_largepage_remove_write_access);
...@@ -5997,12 +5985,8 @@ void kvm_mmu_slot_set_dirty(struct kvm *kvm, ...@@ -5997,12 +5985,8 @@ void kvm_mmu_slot_set_dirty(struct kvm *kvm,
flush = slot_handle_all_level(kvm, memslot, __rmap_set_dirty, false); flush = slot_handle_all_level(kvm, memslot, __rmap_set_dirty, false);
spin_unlock(&kvm->mmu_lock); spin_unlock(&kvm->mmu_lock);
lockdep_assert_held(&kvm->slots_lock);
/* see kvm_mmu_slot_leaf_clear_dirty */
if (flush) if (flush)
kvm_flush_remote_tlbs_with_address(kvm, memslot->base_gfn, kvm_arch_flush_remote_tlbs_memslot(kvm, memslot);
memslot->npages);
} }
EXPORT_SYMBOL_GPL(kvm_mmu_slot_set_dirty); EXPORT_SYMBOL_GPL(kvm_mmu_slot_set_dirty);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment