Commit 7d919c7a authored by Sean Christopherson's avatar Sean Christopherson Committed by Paolo Bonzini

KVM: x86/mmu: Refactor the zap loop for recovering NX lpages

Refactor the zap loop in kvm_recover_nx_lpages() to be a for loop that
iterates on to_zap and drop the !to_zap check that leads to the in-loop
calling of kvm_mmu_commit_zap_page().  The in-loop commit when to_zap
hits zero is superfluous now that there's an unconditional commit after
the loop to handle the case where lpage_disallowed_mmu_pages is emptied.
Signed-off-by: default avatarSean Christopherson <sean.j.christopherson@intel.com>
Message-Id: <20200923183735.584-3-sean.j.christopherson@intel.com>
Signed-off-by: default avatarPaolo Bonzini <pbonzini@redhat.com>
parent e8950569
...@@ -6375,7 +6375,10 @@ static void kvm_recover_nx_lpages(struct kvm *kvm) ...@@ -6375,7 +6375,10 @@ static void kvm_recover_nx_lpages(struct kvm *kvm)
ratio = READ_ONCE(nx_huge_pages_recovery_ratio); ratio = READ_ONCE(nx_huge_pages_recovery_ratio);
to_zap = ratio ? DIV_ROUND_UP(kvm->stat.nx_lpage_splits, ratio) : 0; to_zap = ratio ? DIV_ROUND_UP(kvm->stat.nx_lpage_splits, ratio) : 0;
while (to_zap && !list_empty(&kvm->arch.lpage_disallowed_mmu_pages)) { for ( ; to_zap; --to_zap) {
if (list_empty(&kvm->arch.lpage_disallowed_mmu_pages))
break;
/* /*
* We use a separate list instead of just using active_mmu_pages * We use a separate list instead of just using active_mmu_pages
* because the number of lpage_disallowed pages is expected to * because the number of lpage_disallowed pages is expected to
...@@ -6388,10 +6391,9 @@ static void kvm_recover_nx_lpages(struct kvm *kvm) ...@@ -6388,10 +6391,9 @@ static void kvm_recover_nx_lpages(struct kvm *kvm)
kvm_mmu_prepare_zap_page(kvm, sp, &invalid_list); kvm_mmu_prepare_zap_page(kvm, sp, &invalid_list);
WARN_ON_ONCE(sp->lpage_disallowed); WARN_ON_ONCE(sp->lpage_disallowed);
if (!--to_zap || need_resched() || spin_needbreak(&kvm->mmu_lock)) { if (need_resched() || spin_needbreak(&kvm->mmu_lock)) {
kvm_mmu_commit_zap_page(kvm, &invalid_list); kvm_mmu_commit_zap_page(kvm, &invalid_list);
if (to_zap) cond_resched_lock(&kvm->mmu_lock);
cond_resched_lock(&kvm->mmu_lock);
} }
} }
kvm_mmu_commit_zap_page(kvm, &invalid_list); kvm_mmu_commit_zap_page(kvm, &invalid_list);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment