Commit 0337f585 authored by Sean Christopherson's avatar Sean Christopherson Committed by Paolo Bonzini

KVM: x86/mmu: Rename unsync helper and update related comments

Rename mmu_need_write_protect() to mmu_try_to_unsync_pages() and update
a variety of related, stale comments.  Add several new comments to call
out subtle details, e.g. that upper-level shadow pages are write-tracked,
and that can_unsync is false iff KVM is in the process of synchronizing
pages.

No functional change intended.
Signed-off-by: default avatarSean Christopherson <seanjc@google.com>
Message-Id: <20210622175739.3610207-14-seanjc@google.com>
Signed-off-by: default avatarPaolo Bonzini <pbonzini@redhat.com>
parent 479a1efc
...@@ -2458,17 +2458,33 @@ static void kvm_unsync_page(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp) ...@@ -2458,17 +2458,33 @@ static void kvm_unsync_page(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp)
kvm_mmu_mark_parents_unsync(sp); kvm_mmu_mark_parents_unsync(sp);
} }
bool mmu_need_write_protect(struct kvm_vcpu *vcpu, gfn_t gfn, /*
bool can_unsync) * Attempt to unsync any shadow pages that can be reached by the specified gfn,
* KVM is creating a writable mapping for said gfn. Returns 0 if all pages
* were marked unsync (or if there is no shadow page), -EPERM if the SPTE must
* be write-protected.
*/
int mmu_try_to_unsync_pages(struct kvm_vcpu *vcpu, gfn_t gfn, bool can_unsync)
{ {
struct kvm_mmu_page *sp; struct kvm_mmu_page *sp;
/*
* Force write-protection if the page is being tracked. Note, the page
* track machinery is used to write-protect upper-level shadow pages,
* i.e. this guards the role.level == 4K assertion below!
*/
if (kvm_page_track_is_active(vcpu, gfn, KVM_PAGE_TRACK_WRITE)) if (kvm_page_track_is_active(vcpu, gfn, KVM_PAGE_TRACK_WRITE))
return true; return -EPERM;
/*
* The page is not write-tracked, mark existing shadow pages unsync
* unless KVM is synchronizing an unsync SP (can_unsync = false). In
* that case, KVM must complete emulation of the guest TLB flush before
* allowing shadow pages to become unsync (writable by the guest).
*/
for_each_gfn_indirect_valid_sp(vcpu->kvm, sp, gfn) { for_each_gfn_indirect_valid_sp(vcpu->kvm, sp, gfn) {
if (!can_unsync) if (!can_unsync)
return true; return -EPERM;
if (sp->unsync) if (sp->unsync)
continue; continue;
...@@ -2499,8 +2515,8 @@ bool mmu_need_write_protect(struct kvm_vcpu *vcpu, gfn_t gfn, ...@@ -2499,8 +2515,8 @@ bool mmu_need_write_protect(struct kvm_vcpu *vcpu, gfn_t gfn,
* 2.2 Guest issues TLB flush. * 2.2 Guest issues TLB flush.
* That causes a VM Exit. * That causes a VM Exit.
* *
* 2.3 kvm_mmu_sync_pages() reads sp->unsync. * 2.3 Walking of unsync pages sees sp->unsync is
* Since it is false, so it just returns. * false and skips the page.
* *
* 2.4 Guest accesses GVA X. * 2.4 Guest accesses GVA X.
* Since the mapping in the SP was not updated, * Since the mapping in the SP was not updated,
...@@ -2516,7 +2532,7 @@ bool mmu_need_write_protect(struct kvm_vcpu *vcpu, gfn_t gfn, ...@@ -2516,7 +2532,7 @@ bool mmu_need_write_protect(struct kvm_vcpu *vcpu, gfn_t gfn,
*/ */
smp_wmb(); smp_wmb();
return false; return 0;
} }
static int set_spte(struct kvm_vcpu *vcpu, u64 *sptep, static int set_spte(struct kvm_vcpu *vcpu, u64 *sptep,
...@@ -3461,8 +3477,8 @@ void kvm_mmu_sync_roots(struct kvm_vcpu *vcpu) ...@@ -3461,8 +3477,8 @@ void kvm_mmu_sync_roots(struct kvm_vcpu *vcpu)
* flush strictly after those changes are made. We only need to * flush strictly after those changes are made. We only need to
* ensure that the other CPU sets these flags before any actual * ensure that the other CPU sets these flags before any actual
* changes to the page tables are made. The comments in * changes to the page tables are made. The comments in
* mmu_need_write_protect() describe what could go wrong if this * mmu_try_to_unsync_pages() describe what could go wrong if
* requirement isn't satisfied. * this requirement isn't satisfied.
*/ */
if (!smp_load_acquire(&sp->unsync) && if (!smp_load_acquire(&sp->unsync) &&
!smp_load_acquire(&sp->unsync_children)) !smp_load_acquire(&sp->unsync_children))
......
...@@ -122,8 +122,7 @@ static inline bool is_nx_huge_page_enabled(void) ...@@ -122,8 +122,7 @@ static inline bool is_nx_huge_page_enabled(void)
return READ_ONCE(nx_huge_pages); return READ_ONCE(nx_huge_pages);
} }
bool mmu_need_write_protect(struct kvm_vcpu *vcpu, gfn_t gfn, int mmu_try_to_unsync_pages(struct kvm_vcpu *vcpu, gfn_t gfn, bool can_unsync);
bool can_unsync);
void kvm_mmu_gfn_disallow_lpage(struct kvm_memory_slot *slot, gfn_t gfn); void kvm_mmu_gfn_disallow_lpage(struct kvm_memory_slot *slot, gfn_t gfn);
void kvm_mmu_gfn_allow_lpage(struct kvm_memory_slot *slot, gfn_t gfn); void kvm_mmu_gfn_allow_lpage(struct kvm_memory_slot *slot, gfn_t gfn);
......
...@@ -147,13 +147,19 @@ int make_spte(struct kvm_vcpu *vcpu, unsigned int pte_access, int level, ...@@ -147,13 +147,19 @@ int make_spte(struct kvm_vcpu *vcpu, unsigned int pte_access, int level,
/* /*
* Optimization: for pte sync, if spte was writable the hash * Optimization: for pte sync, if spte was writable the hash
* lookup is unnecessary (and expensive). Write protection * lookup is unnecessary (and expensive). Write protection
* is responsibility of mmu_get_page / kvm_sync_page. * is responsibility of kvm_mmu_get_page / kvm_mmu_sync_roots.
* Same reasoning can be applied to dirty page accounting. * Same reasoning can be applied to dirty page accounting.
*/ */
if (!can_unsync && is_writable_pte(old_spte)) if (!can_unsync && is_writable_pte(old_spte))
goto out; goto out;
if (mmu_need_write_protect(vcpu, gfn, can_unsync)) { /*
* Unsync shadow pages that are reachable by the new, writable
* SPTE. Write-protect the SPTE if the page can't be unsync'd,
* e.g. it's write-tracked (upper-level SPs) or has one or more
* shadow pages and unsync'ing pages is not allowed.
*/
if (mmu_try_to_unsync_pages(vcpu, gfn, can_unsync)) {
pgprintk("%s: found shadow page for %llx, marking ro\n", pgprintk("%s: found shadow page for %llx, marking ro\n",
__func__, gfn); __func__, gfn);
ret |= SET_SPTE_WRITE_PROTECTED_PT; ret |= SET_SPTE_WRITE_PROTECTED_PT;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment