Commit d012a06a authored by Paolo Bonzini's avatar Paolo Bonzini

Revert "KVM: x86/mmu: Zap only the relevant pages when removing a memslot"

This reverts commit 4e103134.
Alex Williamson reported regressions with device assignment with
this patch.  Even though the bug is probably elsewhere and still
latent, this is needed to fix the regression.

Fixes: 4e103134 ("KVM: x86/mmu: Zap only the relevant pages when removing a memslot", 2019-02-05)
Reported-by: default avatarAlex Willamson <alex.williamson@redhat.com>
Cc: stable@vger.kernel.org
Cc: Sean Christopherson <sean.j.christopherson@intel.com>
Signed-off-by: default avatarPaolo Bonzini <pbonzini@redhat.com>
parent 54577e50
...@@ -5653,38 +5653,7 @@ static void kvm_mmu_invalidate_zap_pages_in_memslot(struct kvm *kvm, ...@@ -5653,38 +5653,7 @@ static void kvm_mmu_invalidate_zap_pages_in_memslot(struct kvm *kvm,
struct kvm_memory_slot *slot, struct kvm_memory_slot *slot,
struct kvm_page_track_notifier_node *node) struct kvm_page_track_notifier_node *node)
{ {
struct kvm_mmu_page *sp; kvm_mmu_zap_all(kvm);
LIST_HEAD(invalid_list);
unsigned long i;
bool flush;
gfn_t gfn;
spin_lock(&kvm->mmu_lock);
if (list_empty(&kvm->arch.active_mmu_pages))
goto out_unlock;
flush = slot_handle_all_level(kvm, slot, kvm_zap_rmapp, false);
for (i = 0; i < slot->npages; i++) {
gfn = slot->base_gfn + i;
for_each_valid_sp(kvm, sp, gfn) {
if (sp->gfn != gfn)
continue;
kvm_mmu_prepare_zap_page(kvm, sp, &invalid_list);
}
if (need_resched() || spin_needbreak(&kvm->mmu_lock)) {
kvm_mmu_remote_flush_or_zap(kvm, &invalid_list, flush);
flush = false;
cond_resched_lock(&kvm->mmu_lock);
}
}
kvm_mmu_remote_flush_or_zap(kvm, &invalid_list, flush);
out_unlock:
spin_unlock(&kvm->mmu_lock);
} }
void kvm_mmu_init_vm(struct kvm *kvm) void kvm_mmu_init_vm(struct kvm *kvm)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment