Commit 3cf06612 authored by Sean Christopherson's avatar Sean Christopherson Committed by Paolo Bonzini

KVM: x86/mmu: Capture requested page level before NX huge page workaround

Apply the "huge page disallowed" adjustment of the max level only after
capturing the original requested level.  The requested level will be
used in a future patch to skip adding pages to the list of disallowed
huge pages if a huge page wasn't possible anyways, e.g. if the page
isn't mapped as a huge page in the host.

No functional change intended.
Signed-off-by: default avatarSean Christopherson <sean.j.christopherson@intel.com>
Message-Id: <20200923183735.584-5-sean.j.christopherson@intel.com>
Signed-off-by: default avatarPaolo Bonzini <pbonzini@redhat.com>
parent 6c2fd34f
...@@ -3267,7 +3267,8 @@ static int host_pfn_mapping_level(struct kvm_vcpu *vcpu, gfn_t gfn, ...@@ -3267,7 +3267,8 @@ static int host_pfn_mapping_level(struct kvm_vcpu *vcpu, gfn_t gfn,
} }
static int kvm_mmu_hugepage_adjust(struct kvm_vcpu *vcpu, gfn_t gfn, static int kvm_mmu_hugepage_adjust(struct kvm_vcpu *vcpu, gfn_t gfn,
int max_level, kvm_pfn_t *pfnp) int max_level, kvm_pfn_t *pfnp,
bool huge_page_disallowed, int *req_level)
{ {
struct kvm_memory_slot *slot; struct kvm_memory_slot *slot;
struct kvm_lpage_info *linfo; struct kvm_lpage_info *linfo;
...@@ -3275,6 +3276,8 @@ static int kvm_mmu_hugepage_adjust(struct kvm_vcpu *vcpu, gfn_t gfn, ...@@ -3275,6 +3276,8 @@ static int kvm_mmu_hugepage_adjust(struct kvm_vcpu *vcpu, gfn_t gfn,
kvm_pfn_t mask; kvm_pfn_t mask;
int level; int level;
*req_level = PG_LEVEL_4K;
if (unlikely(max_level == PG_LEVEL_4K)) if (unlikely(max_level == PG_LEVEL_4K))
return PG_LEVEL_4K; return PG_LEVEL_4K;
...@@ -3299,7 +3302,14 @@ static int kvm_mmu_hugepage_adjust(struct kvm_vcpu *vcpu, gfn_t gfn, ...@@ -3299,7 +3302,14 @@ static int kvm_mmu_hugepage_adjust(struct kvm_vcpu *vcpu, gfn_t gfn,
if (level == PG_LEVEL_4K) if (level == PG_LEVEL_4K)
return level; return level;
level = min(level, max_level); *req_level = level = min(level, max_level);
/*
* Enforce the iTLB multihit workaround after capturing the requested
* level, which will be used to do precise, accurate accounting.
*/
if (huge_page_disallowed)
return PG_LEVEL_4K;
/* /*
* mmu_notifier_retry() was successful and mmu_lock is held, so * mmu_notifier_retry() was successful and mmu_lock is held, so
...@@ -3345,17 +3355,15 @@ static int __direct_map(struct kvm_vcpu *vcpu, gpa_t gpa, u32 error_code, ...@@ -3345,17 +3355,15 @@ static int __direct_map(struct kvm_vcpu *vcpu, gpa_t gpa, u32 error_code,
bool huge_page_disallowed = exec && nx_huge_page_workaround_enabled; bool huge_page_disallowed = exec && nx_huge_page_workaround_enabled;
struct kvm_shadow_walk_iterator it; struct kvm_shadow_walk_iterator it;
struct kvm_mmu_page *sp; struct kvm_mmu_page *sp;
int level, ret; int level, req_level, ret;
gfn_t gfn = gpa >> PAGE_SHIFT; gfn_t gfn = gpa >> PAGE_SHIFT;
gfn_t base_gfn = gfn; gfn_t base_gfn = gfn;
if (WARN_ON(!VALID_PAGE(vcpu->arch.mmu->root_hpa))) if (WARN_ON(!VALID_PAGE(vcpu->arch.mmu->root_hpa)))
return RET_PF_RETRY; return RET_PF_RETRY;
if (huge_page_disallowed) level = kvm_mmu_hugepage_adjust(vcpu, gfn, max_level, &pfn,
max_level = PG_LEVEL_4K; huge_page_disallowed, &req_level);
level = kvm_mmu_hugepage_adjust(vcpu, gfn, max_level, &pfn);
trace_kvm_mmu_spte_requested(gpa, level, pfn); trace_kvm_mmu_spte_requested(gpa, level, pfn);
for_each_shadow_entry(vcpu, gpa, it) { for_each_shadow_entry(vcpu, gpa, it) {
......
...@@ -636,7 +636,7 @@ static int FNAME(fetch)(struct kvm_vcpu *vcpu, gpa_t addr, ...@@ -636,7 +636,7 @@ static int FNAME(fetch)(struct kvm_vcpu *vcpu, gpa_t addr,
struct kvm_mmu_page *sp = NULL; struct kvm_mmu_page *sp = NULL;
struct kvm_shadow_walk_iterator it; struct kvm_shadow_walk_iterator it;
unsigned direct_access, access = gw->pt_access; unsigned direct_access, access = gw->pt_access;
int top_level, hlevel, ret; int top_level, hlevel, req_level, ret;
gfn_t base_gfn = gw->gfn; gfn_t base_gfn = gw->gfn;
direct_access = gw->pte_access; direct_access = gw->pte_access;
...@@ -682,10 +682,8 @@ static int FNAME(fetch)(struct kvm_vcpu *vcpu, gpa_t addr, ...@@ -682,10 +682,8 @@ static int FNAME(fetch)(struct kvm_vcpu *vcpu, gpa_t addr,
link_shadow_page(vcpu, it.sptep, sp); link_shadow_page(vcpu, it.sptep, sp);
} }
if (huge_page_disallowed) hlevel = kvm_mmu_hugepage_adjust(vcpu, gw->gfn, max_level, &pfn,
max_level = PG_LEVEL_4K; huge_page_disallowed, &req_level);
hlevel = kvm_mmu_hugepage_adjust(vcpu, gw->gfn, max_level, &pfn);
trace_kvm_mmu_spte_requested(addr, gw->level, pfn); trace_kvm_mmu_spte_requested(addr, gw->level, pfn);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment