Commit 85875a13 authored by Sean Christopherson's avatar Sean Christopherson Committed by Paolo Bonzini

KVM: x86/mmu: Move slot_level_*() helper functions up a few lines

...so that kvm_mmu_invalidate_zap_pages_in_memslot() can utilize the
helpers in future patches.
Signed-off-by: default avatarSean Christopherson <sean.j.christopherson@intel.com>
Signed-off-by: default avatarPaolo Bonzini <pbonzini@redhat.com>
parent 164bf7e5
...@@ -5487,6 +5487,76 @@ void kvm_disable_tdp(void) ...@@ -5487,6 +5487,76 @@ void kvm_disable_tdp(void)
} }
EXPORT_SYMBOL_GPL(kvm_disable_tdp); EXPORT_SYMBOL_GPL(kvm_disable_tdp);
/* The return value indicates if tlb flush on all vcpus is needed. */
typedef bool (*slot_level_handler) (struct kvm *kvm, struct kvm_rmap_head *rmap_head);
/* The caller should hold mmu-lock before calling this function. */
static __always_inline bool
slot_handle_level_range(struct kvm *kvm, struct kvm_memory_slot *memslot,
slot_level_handler fn, int start_level, int end_level,
gfn_t start_gfn, gfn_t end_gfn, bool lock_flush_tlb)
{
struct slot_rmap_walk_iterator iterator;
bool flush = false;
for_each_slot_rmap_range(memslot, start_level, end_level, start_gfn,
end_gfn, &iterator) {
if (iterator.rmap)
flush |= fn(kvm, iterator.rmap);
if (need_resched() || spin_needbreak(&kvm->mmu_lock)) {
if (flush && lock_flush_tlb) {
kvm_flush_remote_tlbs(kvm);
flush = false;
}
cond_resched_lock(&kvm->mmu_lock);
}
}
if (flush && lock_flush_tlb) {
kvm_flush_remote_tlbs(kvm);
flush = false;
}
return flush;
}
static __always_inline bool
slot_handle_level(struct kvm *kvm, struct kvm_memory_slot *memslot,
slot_level_handler fn, int start_level, int end_level,
bool lock_flush_tlb)
{
return slot_handle_level_range(kvm, memslot, fn, start_level,
end_level, memslot->base_gfn,
memslot->base_gfn + memslot->npages - 1,
lock_flush_tlb);
}
static __always_inline bool
slot_handle_all_level(struct kvm *kvm, struct kvm_memory_slot *memslot,
slot_level_handler fn, bool lock_flush_tlb)
{
return slot_handle_level(kvm, memslot, fn, PT_PAGE_TABLE_LEVEL,
PT_MAX_HUGEPAGE_LEVEL, lock_flush_tlb);
}
static __always_inline bool
slot_handle_large_level(struct kvm *kvm, struct kvm_memory_slot *memslot,
slot_level_handler fn, bool lock_flush_tlb)
{
return slot_handle_level(kvm, memslot, fn, PT_PAGE_TABLE_LEVEL + 1,
PT_MAX_HUGEPAGE_LEVEL, lock_flush_tlb);
}
static __always_inline bool
slot_handle_leaf(struct kvm *kvm, struct kvm_memory_slot *memslot,
slot_level_handler fn, bool lock_flush_tlb)
{
return slot_handle_level(kvm, memslot, fn, PT_PAGE_TABLE_LEVEL,
PT_PAGE_TABLE_LEVEL, lock_flush_tlb);
}
static void free_mmu_pages(struct kvm_vcpu *vcpu) static void free_mmu_pages(struct kvm_vcpu *vcpu)
{ {
free_page((unsigned long)vcpu->arch.mmu->pae_root); free_page((unsigned long)vcpu->arch.mmu->pae_root);
...@@ -5561,75 +5631,6 @@ void kvm_mmu_uninit_vm(struct kvm *kvm) ...@@ -5561,75 +5631,6 @@ void kvm_mmu_uninit_vm(struct kvm *kvm)
kvm_page_track_unregister_notifier(kvm, node); kvm_page_track_unregister_notifier(kvm, node);
} }
/* The return value indicates if tlb flush on all vcpus is needed. */
typedef bool (*slot_level_handler) (struct kvm *kvm, struct kvm_rmap_head *rmap_head);
/* The caller should hold mmu-lock before calling this function. */
static __always_inline bool
slot_handle_level_range(struct kvm *kvm, struct kvm_memory_slot *memslot,
slot_level_handler fn, int start_level, int end_level,
gfn_t start_gfn, gfn_t end_gfn, bool lock_flush_tlb)
{
struct slot_rmap_walk_iterator iterator;
bool flush = false;
for_each_slot_rmap_range(memslot, start_level, end_level, start_gfn,
end_gfn, &iterator) {
if (iterator.rmap)
flush |= fn(kvm, iterator.rmap);
if (need_resched() || spin_needbreak(&kvm->mmu_lock)) {
if (flush && lock_flush_tlb) {
kvm_flush_remote_tlbs(kvm);
flush = false;
}
cond_resched_lock(&kvm->mmu_lock);
}
}
if (flush && lock_flush_tlb) {
kvm_flush_remote_tlbs(kvm);
flush = false;
}
return flush;
}
static __always_inline bool
slot_handle_level(struct kvm *kvm, struct kvm_memory_slot *memslot,
slot_level_handler fn, int start_level, int end_level,
bool lock_flush_tlb)
{
return slot_handle_level_range(kvm, memslot, fn, start_level,
end_level, memslot->base_gfn,
memslot->base_gfn + memslot->npages - 1,
lock_flush_tlb);
}
static __always_inline bool
slot_handle_all_level(struct kvm *kvm, struct kvm_memory_slot *memslot,
slot_level_handler fn, bool lock_flush_tlb)
{
return slot_handle_level(kvm, memslot, fn, PT_PAGE_TABLE_LEVEL,
PT_MAX_HUGEPAGE_LEVEL, lock_flush_tlb);
}
static __always_inline bool
slot_handle_large_level(struct kvm *kvm, struct kvm_memory_slot *memslot,
slot_level_handler fn, bool lock_flush_tlb)
{
return slot_handle_level(kvm, memslot, fn, PT_PAGE_TABLE_LEVEL + 1,
PT_MAX_HUGEPAGE_LEVEL, lock_flush_tlb);
}
static __always_inline bool
slot_handle_leaf(struct kvm *kvm, struct kvm_memory_slot *memslot,
slot_level_handler fn, bool lock_flush_tlb)
{
return slot_handle_level(kvm, memslot, fn, PT_PAGE_TABLE_LEVEL,
PT_PAGE_TABLE_LEVEL, lock_flush_tlb);
}
void kvm_zap_gfn_range(struct kvm *kvm, gfn_t gfn_start, gfn_t gfn_end) void kvm_zap_gfn_range(struct kvm *kvm, gfn_t gfn_start, gfn_t gfn_end)
{ {
struct kvm_memslots *slots; struct kvm_memslots *slots;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment