Commit ce92ef76 authored by Sean Christopherson's avatar Sean Christopherson Committed by Paolo Bonzini

KVM: x86/mmu: Use shadow page role to detect PML-unfriendly pages for L2

Rework make_spte() to query the shadow page's role, specifically whether
or not it's a guest_mode page, a.k.a. a page for L2, when determining if
the SPTE is compatible with PML.  This eliminates a dependency on @vcpu,
with a future goal of being able to create SPTEs without a specific vCPU.
Signed-off-by: default avatarSean Christopherson <seanjc@google.com>
Signed-off-by: default avatarPaolo Bonzini <pbonzini@redhat.com>
parent 8fc78909
...@@ -104,7 +104,7 @@ static inline int kvm_mmu_page_as_id(struct kvm_mmu_page *sp) ...@@ -104,7 +104,7 @@ static inline int kvm_mmu_page_as_id(struct kvm_mmu_page *sp)
return kvm_mmu_role_as_id(sp->role); return kvm_mmu_role_as_id(sp->role);
} }
static inline bool kvm_vcpu_ad_need_write_protect(struct kvm_vcpu *vcpu) static inline bool kvm_mmu_page_ad_need_write_protect(struct kvm_mmu_page *sp)
{ {
/* /*
* When using the EPT page-modification log, the GPAs in the CPU dirty * When using the EPT page-modification log, the GPAs in the CPU dirty
...@@ -112,10 +112,9 @@ static inline bool kvm_vcpu_ad_need_write_protect(struct kvm_vcpu *vcpu) ...@@ -112,10 +112,9 @@ static inline bool kvm_vcpu_ad_need_write_protect(struct kvm_vcpu *vcpu)
* on write protection to record dirty pages, which bypasses PML, since * on write protection to record dirty pages, which bypasses PML, since
* writes now result in a vmexit. Note, the check on CPU dirty logging * writes now result in a vmexit. Note, the check on CPU dirty logging
* being enabled is mandatory as the bits used to denote WP-only SPTEs * being enabled is mandatory as the bits used to denote WP-only SPTEs
* are reserved for NPT w/ PAE (32-bit KVM). * are reserved for PAE paging (32-bit KVM).
*/ */
return vcpu->arch.mmu == &vcpu->arch.guest_mmu && return kvm_x86_ops.cpu_dirty_log_size && sp->role.guest_mode;
kvm_x86_ops.cpu_dirty_log_size;
} }
int mmu_try_to_unsync_pages(struct kvm_vcpu *vcpu, struct kvm_memory_slot *slot, int mmu_try_to_unsync_pages(struct kvm_vcpu *vcpu, struct kvm_memory_slot *slot,
......
...@@ -101,7 +101,7 @@ bool make_spte(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp, ...@@ -101,7 +101,7 @@ bool make_spte(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp,
if (sp->role.ad_disabled) if (sp->role.ad_disabled)
spte |= SPTE_TDP_AD_DISABLED_MASK; spte |= SPTE_TDP_AD_DISABLED_MASK;
else if (kvm_vcpu_ad_need_write_protect(vcpu)) else if (kvm_mmu_page_ad_need_write_protect(sp))
spte |= SPTE_TDP_AD_WRPROT_ONLY_MASK; spte |= SPTE_TDP_AD_WRPROT_ONLY_MASK;
/* /*
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment