Commit 52d5dedc authored by Sean Christopherson's avatar Sean Christopherson Committed by Paolo Bonzini

Revert "KVM: MMU: reclaim the zapped-obsolete page first"

Unwinding optimizations related to obsolete pages is a step towards
removing x86 KVM's fast invalidate mechanism, i.e. this is one part of
a revert all patches from the series that introduced the mechanism[1].

This reverts commit 365c8868.

[1] https://lkml.kernel.org/r/1369960590-14138-1-git-send-email-xiaoguangrong@linux.vnet.ibm.com

Cc: Xiao Guangrong <guangrong.xiao@gmail.com>
Signed-off-by: default avatarSean Christopherson <sean.j.christopherson@intel.com>
Signed-off-by: default avatarPaolo Bonzini <pbonzini@redhat.com>
parent 5ff05683
...@@ -851,7 +851,6 @@ struct kvm_arch { ...@@ -851,7 +851,6 @@ struct kvm_arch {
* Hash table of struct kvm_mmu_page. * Hash table of struct kvm_mmu_page.
*/ */
struct list_head active_mmu_pages; struct list_head active_mmu_pages;
struct list_head zapped_obsolete_pages;
struct kvm_page_track_notifier_node mmu_sp_tracker; struct kvm_page_track_notifier_node mmu_sp_tracker;
struct kvm_page_track_notifier_head track_notifier_head; struct kvm_page_track_notifier_head track_notifier_head;
......
...@@ -5858,6 +5858,7 @@ EXPORT_SYMBOL_GPL(kvm_mmu_slot_set_dirty); ...@@ -5858,6 +5858,7 @@ EXPORT_SYMBOL_GPL(kvm_mmu_slot_set_dirty);
static void kvm_zap_obsolete_pages(struct kvm *kvm) static void kvm_zap_obsolete_pages(struct kvm *kvm)
{ {
struct kvm_mmu_page *sp, *node; struct kvm_mmu_page *sp, *node;
LIST_HEAD(invalid_list);
int batch = 0; int batch = 0;
restart: restart:
...@@ -5890,8 +5891,7 @@ static void kvm_zap_obsolete_pages(struct kvm *kvm) ...@@ -5890,8 +5891,7 @@ static void kvm_zap_obsolete_pages(struct kvm *kvm)
goto restart; goto restart;
} }
ret = kvm_mmu_prepare_zap_page(kvm, sp, ret = kvm_mmu_prepare_zap_page(kvm, sp, &invalid_list);
&kvm->arch.zapped_obsolete_pages);
batch += ret; batch += ret;
if (ret) if (ret)
...@@ -5902,7 +5902,7 @@ static void kvm_zap_obsolete_pages(struct kvm *kvm) ...@@ -5902,7 +5902,7 @@ static void kvm_zap_obsolete_pages(struct kvm *kvm)
* Should flush tlb before free page tables since lockless-walking * Should flush tlb before free page tables since lockless-walking
* may use the pages. * may use the pages.
*/ */
kvm_mmu_commit_zap_page(kvm, &kvm->arch.zapped_obsolete_pages); kvm_mmu_commit_zap_page(kvm, &invalid_list);
} }
/* /*
...@@ -5935,11 +5935,6 @@ void kvm_mmu_invalidate_zap_all_pages(struct kvm *kvm) ...@@ -5935,11 +5935,6 @@ void kvm_mmu_invalidate_zap_all_pages(struct kvm *kvm)
spin_unlock(&kvm->mmu_lock); spin_unlock(&kvm->mmu_lock);
} }
static bool kvm_has_zapped_obsolete_pages(struct kvm *kvm)
{
return unlikely(!list_empty_careful(&kvm->arch.zapped_obsolete_pages));
}
static void kvm_mmu_zap_mmio_sptes(struct kvm *kvm) static void kvm_mmu_zap_mmio_sptes(struct kvm *kvm)
{ {
struct kvm_mmu_page *sp, *node; struct kvm_mmu_page *sp, *node;
...@@ -6011,24 +6006,16 @@ mmu_shrink_scan(struct shrinker *shrink, struct shrink_control *sc) ...@@ -6011,24 +6006,16 @@ mmu_shrink_scan(struct shrinker *shrink, struct shrink_control *sc)
* want to shrink a VM that only started to populate its MMU * want to shrink a VM that only started to populate its MMU
* anyway. * anyway.
*/ */
if (!kvm->arch.n_used_mmu_pages && if (!kvm->arch.n_used_mmu_pages)
!kvm_has_zapped_obsolete_pages(kvm))
continue; continue;
idx = srcu_read_lock(&kvm->srcu); idx = srcu_read_lock(&kvm->srcu);
spin_lock(&kvm->mmu_lock); spin_lock(&kvm->mmu_lock);
if (kvm_has_zapped_obsolete_pages(kvm)) {
kvm_mmu_commit_zap_page(kvm,
&kvm->arch.zapped_obsolete_pages);
goto unlock;
}
if (prepare_zap_oldest_mmu_page(kvm, &invalid_list)) if (prepare_zap_oldest_mmu_page(kvm, &invalid_list))
freed++; freed++;
kvm_mmu_commit_zap_page(kvm, &invalid_list); kvm_mmu_commit_zap_page(kvm, &invalid_list);
unlock:
spin_unlock(&kvm->mmu_lock); spin_unlock(&kvm->mmu_lock);
srcu_read_unlock(&kvm->srcu, idx); srcu_read_unlock(&kvm->srcu, idx);
......
...@@ -9113,7 +9113,6 @@ int kvm_arch_init_vm(struct kvm *kvm, unsigned long type) ...@@ -9113,7 +9113,6 @@ int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
INIT_HLIST_HEAD(&kvm->arch.mask_notifier_list); INIT_HLIST_HEAD(&kvm->arch.mask_notifier_list);
INIT_LIST_HEAD(&kvm->arch.active_mmu_pages); INIT_LIST_HEAD(&kvm->arch.active_mmu_pages);
INIT_LIST_HEAD(&kvm->arch.zapped_obsolete_pages);
INIT_LIST_HEAD(&kvm->arch.assigned_dev_head); INIT_LIST_HEAD(&kvm->arch.assigned_dev_head);
atomic_set(&kvm->arch.noncoherent_dma_count, 0); atomic_set(&kvm->arch.noncoherent_dma_count, 0);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment