Commit 479a1efc authored by Sean Christopherson's avatar Sean Christopherson Committed by Paolo Bonzini

KVM: x86/mmu: Drop the intermediate "transient" __kvm_sync_page()

Nove the kvm_unlink_unsync_page() call out of kvm_sync_page() and into
it's sole caller, and fold __kvm_sync_page() into kvm_sync_page() since
the latter becomes a pure pass-through.  There really should be no reason
for code to do a complete sync of a shadow page outside of the full
kvm_mmu_sync_roots(), e.g. the one use case that creeped in turned out to
be flawed and counter-productive.

Drop the stale comment about @sp->gfn needing to be write-protected, as
it directly contradicts the kvm_mmu_get_page() usage.

No functional change intended.
Signed-off-by: default avatarSean Christopherson <seanjc@google.com>
Message-Id: <20210622175739.3610207-13-seanjc@google.com>
Signed-off-by: default avatarPaolo Bonzini <pbonzini@redhat.com>
parent 07dc4f35
...@@ -1780,9 +1780,8 @@ static void kvm_mmu_commit_zap_page(struct kvm *kvm, ...@@ -1780,9 +1780,8 @@ static void kvm_mmu_commit_zap_page(struct kvm *kvm,
&(_kvm)->arch.mmu_page_hash[kvm_page_table_hashfn(_gfn)]) \ &(_kvm)->arch.mmu_page_hash[kvm_page_table_hashfn(_gfn)]) \
if ((_sp)->gfn != (_gfn) || (_sp)->role.direct) {} else if ((_sp)->gfn != (_gfn) || (_sp)->role.direct) {} else
/* @sp->gfn should be write-protected at the call site */ static bool kvm_sync_page(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp,
static bool __kvm_sync_page(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp, struct list_head *invalid_list)
struct list_head *invalid_list)
{ {
if (vcpu->arch.mmu->sync_page(vcpu, sp) == 0) { if (vcpu->arch.mmu->sync_page(vcpu, sp) == 0) {
kvm_mmu_prepare_zap_page(vcpu->kvm, sp, invalid_list); kvm_mmu_prepare_zap_page(vcpu->kvm, sp, invalid_list);
...@@ -1830,13 +1829,6 @@ static bool is_obsolete_sp(struct kvm *kvm, struct kvm_mmu_page *sp) ...@@ -1830,13 +1829,6 @@ static bool is_obsolete_sp(struct kvm *kvm, struct kvm_mmu_page *sp)
unlikely(sp->mmu_valid_gen != kvm->arch.mmu_valid_gen); unlikely(sp->mmu_valid_gen != kvm->arch.mmu_valid_gen);
} }
static bool kvm_sync_page(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp,
struct list_head *invalid_list)
{
kvm_unlink_unsync_page(vcpu->kvm, sp);
return __kvm_sync_page(vcpu, sp, invalid_list);
}
struct mmu_page_path { struct mmu_page_path {
struct kvm_mmu_page *parent[PT64_ROOT_MAX_LEVEL]; struct kvm_mmu_page *parent[PT64_ROOT_MAX_LEVEL];
unsigned int idx[PT64_ROOT_MAX_LEVEL]; unsigned int idx[PT64_ROOT_MAX_LEVEL];
...@@ -1931,6 +1923,7 @@ static void mmu_sync_children(struct kvm_vcpu *vcpu, ...@@ -1931,6 +1923,7 @@ static void mmu_sync_children(struct kvm_vcpu *vcpu,
} }
for_each_sp(pages, sp, parents, i) { for_each_sp(pages, sp, parents, i) {
kvm_unlink_unsync_page(vcpu->kvm, sp);
flush |= kvm_sync_page(vcpu, sp, &invalid_list); flush |= kvm_sync_page(vcpu, sp, &invalid_list);
mmu_pages_clear_parents(&parents); mmu_pages_clear_parents(&parents);
} }
...@@ -2009,7 +2002,7 @@ static struct kvm_mmu_page *kvm_mmu_get_page(struct kvm_vcpu *vcpu, ...@@ -2009,7 +2002,7 @@ static struct kvm_mmu_page *kvm_mmu_get_page(struct kvm_vcpu *vcpu,
if (sp->unsync) { if (sp->unsync) {
/* /*
* The page is good, but is stale. __kvm_sync_page does * The page is good, but is stale. kvm_sync_page does
* get the latest guest state, but (unlike mmu_unsync_children) * get the latest guest state, but (unlike mmu_unsync_children)
* it doesn't write-protect the page or mark it synchronized! * it doesn't write-protect the page or mark it synchronized!
* This way the validity of the mapping is ensured, but the * This way the validity of the mapping is ensured, but the
...@@ -2020,7 +2013,7 @@ static struct kvm_mmu_page *kvm_mmu_get_page(struct kvm_vcpu *vcpu, ...@@ -2020,7 +2013,7 @@ static struct kvm_mmu_page *kvm_mmu_get_page(struct kvm_vcpu *vcpu,
* If the sync fails, the page is zapped. If so, break * If the sync fails, the page is zapped. If so, break
* in order to rebuild it. * in order to rebuild it.
*/ */
if (!__kvm_sync_page(vcpu, sp, &invalid_list)) if (!kvm_sync_page(vcpu, sp, &invalid_list))
break; break;
WARN_ON(!list_empty(&invalid_list)); WARN_ON(!list_empty(&invalid_list));
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment