Commit 6c2fd34f authored by Sean Christopherson's avatar Sean Christopherson Committed by Paolo Bonzini

KVM: x86/mmu: Move "huge page disallowed" calculation into mapping helpers

Calculate huge_page_disallowed in __direct_map() and FNAME(fetch) in
preparation for reworking the calculation so that it preserves the
requested map level and eventually to avoid flagging a shadow page as
being disallowed for being used as a large/huge page when it couldn't
have been huge in the first place, e.g. because the backing page in the
host is not large.

Pass the error code into the helpers and use it to recalcuate exec and
write_fault instead adding yet more booleans to the parameters.

Opportunistically use huge_page_disallowed instead of lpage_disallowed
to match the nomenclature used within the mapping helpers (though even
they have existing inconsistencies).

No functional change intended.
Signed-off-by: default avatarSean Christopherson <sean.j.christopherson@intel.com>
Message-Id: <20200923183735.584-4-sean.j.christopherson@intel.com>
Signed-off-by: default avatarPaolo Bonzini <pbonzini@redhat.com>
parent 7d919c7a
...@@ -3335,10 +3335,14 @@ static void disallowed_hugepage_adjust(struct kvm_shadow_walk_iterator it, ...@@ -3335,10 +3335,14 @@ static void disallowed_hugepage_adjust(struct kvm_shadow_walk_iterator it,
} }
} }
static int __direct_map(struct kvm_vcpu *vcpu, gpa_t gpa, int write, static int __direct_map(struct kvm_vcpu *vcpu, gpa_t gpa, u32 error_code,
int map_writable, int max_level, kvm_pfn_t pfn, int map_writable, int max_level, kvm_pfn_t pfn,
bool prefault, bool account_disallowed_nx_lpage) bool prefault, bool is_tdp)
{ {
bool nx_huge_page_workaround_enabled = is_nx_huge_page_enabled();
bool write = error_code & PFERR_WRITE_MASK;
bool exec = error_code & PFERR_FETCH_MASK;
bool huge_page_disallowed = exec && nx_huge_page_workaround_enabled;
struct kvm_shadow_walk_iterator it; struct kvm_shadow_walk_iterator it;
struct kvm_mmu_page *sp; struct kvm_mmu_page *sp;
int level, ret; int level, ret;
...@@ -3348,6 +3352,9 @@ static int __direct_map(struct kvm_vcpu *vcpu, gpa_t gpa, int write, ...@@ -3348,6 +3352,9 @@ static int __direct_map(struct kvm_vcpu *vcpu, gpa_t gpa, int write,
if (WARN_ON(!VALID_PAGE(vcpu->arch.mmu->root_hpa))) if (WARN_ON(!VALID_PAGE(vcpu->arch.mmu->root_hpa)))
return RET_PF_RETRY; return RET_PF_RETRY;
if (huge_page_disallowed)
max_level = PG_LEVEL_4K;
level = kvm_mmu_hugepage_adjust(vcpu, gfn, max_level, &pfn); level = kvm_mmu_hugepage_adjust(vcpu, gfn, max_level, &pfn);
trace_kvm_mmu_spte_requested(gpa, level, pfn); trace_kvm_mmu_spte_requested(gpa, level, pfn);
...@@ -3368,7 +3375,7 @@ static int __direct_map(struct kvm_vcpu *vcpu, gpa_t gpa, int write, ...@@ -3368,7 +3375,7 @@ static int __direct_map(struct kvm_vcpu *vcpu, gpa_t gpa, int write,
it.level - 1, true, ACC_ALL); it.level - 1, true, ACC_ALL);
link_shadow_page(vcpu, it.sptep, sp); link_shadow_page(vcpu, it.sptep, sp);
if (account_disallowed_nx_lpage) if (is_tdp && huge_page_disallowed)
account_huge_nx_page(vcpu->kvm, sp); account_huge_nx_page(vcpu->kvm, sp);
} }
} }
...@@ -4108,8 +4115,6 @@ static int direct_page_fault(struct kvm_vcpu *vcpu, gpa_t gpa, u32 error_code, ...@@ -4108,8 +4115,6 @@ static int direct_page_fault(struct kvm_vcpu *vcpu, gpa_t gpa, u32 error_code,
bool prefault, int max_level, bool is_tdp) bool prefault, int max_level, bool is_tdp)
{ {
bool write = error_code & PFERR_WRITE_MASK; bool write = error_code & PFERR_WRITE_MASK;
bool exec = error_code & PFERR_FETCH_MASK;
bool lpage_disallowed = exec && is_nx_huge_page_enabled();
bool map_writable; bool map_writable;
gfn_t gfn = gpa >> PAGE_SHIFT; gfn_t gfn = gpa >> PAGE_SHIFT;
...@@ -4128,9 +4133,6 @@ static int direct_page_fault(struct kvm_vcpu *vcpu, gpa_t gpa, u32 error_code, ...@@ -4128,9 +4133,6 @@ static int direct_page_fault(struct kvm_vcpu *vcpu, gpa_t gpa, u32 error_code,
if (r) if (r)
return r; return r;
if (lpage_disallowed)
max_level = PG_LEVEL_4K;
mmu_seq = vcpu->kvm->mmu_notifier_seq; mmu_seq = vcpu->kvm->mmu_notifier_seq;
smp_rmb(); smp_rmb();
...@@ -4147,8 +4149,8 @@ static int direct_page_fault(struct kvm_vcpu *vcpu, gpa_t gpa, u32 error_code, ...@@ -4147,8 +4149,8 @@ static int direct_page_fault(struct kvm_vcpu *vcpu, gpa_t gpa, u32 error_code,
r = make_mmu_pages_available(vcpu); r = make_mmu_pages_available(vcpu);
if (r) if (r)
goto out_unlock; goto out_unlock;
r = __direct_map(vcpu, gpa, write, map_writable, max_level, pfn, r = __direct_map(vcpu, gpa, error_code, map_writable, max_level, pfn,
prefault, is_tdp && lpage_disallowed); prefault, is_tdp);
out_unlock: out_unlock:
spin_unlock(&vcpu->kvm->mmu_lock); spin_unlock(&vcpu->kvm->mmu_lock);
......
...@@ -625,11 +625,14 @@ static void FNAME(pte_prefetch)(struct kvm_vcpu *vcpu, struct guest_walker *gw, ...@@ -625,11 +625,14 @@ static void FNAME(pte_prefetch)(struct kvm_vcpu *vcpu, struct guest_walker *gw,
* emulate this operation, return 1 to indicate this case. * emulate this operation, return 1 to indicate this case.
*/ */
static int FNAME(fetch)(struct kvm_vcpu *vcpu, gpa_t addr, static int FNAME(fetch)(struct kvm_vcpu *vcpu, gpa_t addr,
struct guest_walker *gw, struct guest_walker *gw, u32 error_code,
int write_fault, int max_level, int max_level, kvm_pfn_t pfn, bool map_writable,
kvm_pfn_t pfn, bool map_writable, bool prefault, bool prefault)
bool lpage_disallowed)
{ {
bool nx_huge_page_workaround_enabled = is_nx_huge_page_enabled();
int write_fault = error_code & PFERR_WRITE_MASK;
bool exec = error_code & PFERR_FETCH_MASK;
bool huge_page_disallowed = exec && nx_huge_page_workaround_enabled;
struct kvm_mmu_page *sp = NULL; struct kvm_mmu_page *sp = NULL;
struct kvm_shadow_walk_iterator it; struct kvm_shadow_walk_iterator it;
unsigned direct_access, access = gw->pt_access; unsigned direct_access, access = gw->pt_access;
...@@ -679,6 +682,9 @@ static int FNAME(fetch)(struct kvm_vcpu *vcpu, gpa_t addr, ...@@ -679,6 +682,9 @@ static int FNAME(fetch)(struct kvm_vcpu *vcpu, gpa_t addr,
link_shadow_page(vcpu, it.sptep, sp); link_shadow_page(vcpu, it.sptep, sp);
} }
if (huge_page_disallowed)
max_level = PG_LEVEL_4K;
hlevel = kvm_mmu_hugepage_adjust(vcpu, gw->gfn, max_level, &pfn); hlevel = kvm_mmu_hugepage_adjust(vcpu, gw->gfn, max_level, &pfn);
trace_kvm_mmu_spte_requested(addr, gw->level, pfn); trace_kvm_mmu_spte_requested(addr, gw->level, pfn);
...@@ -704,7 +710,7 @@ static int FNAME(fetch)(struct kvm_vcpu *vcpu, gpa_t addr, ...@@ -704,7 +710,7 @@ static int FNAME(fetch)(struct kvm_vcpu *vcpu, gpa_t addr,
sp = kvm_mmu_get_page(vcpu, base_gfn, addr, sp = kvm_mmu_get_page(vcpu, base_gfn, addr,
it.level - 1, true, direct_access); it.level - 1, true, direct_access);
link_shadow_page(vcpu, it.sptep, sp); link_shadow_page(vcpu, it.sptep, sp);
if (lpage_disallowed) if (huge_page_disallowed)
account_huge_nx_page(vcpu->kvm, sp); account_huge_nx_page(vcpu->kvm, sp);
} }
} }
...@@ -786,8 +792,6 @@ static int FNAME(page_fault)(struct kvm_vcpu *vcpu, gpa_t addr, u32 error_code, ...@@ -786,8 +792,6 @@ static int FNAME(page_fault)(struct kvm_vcpu *vcpu, gpa_t addr, u32 error_code,
kvm_pfn_t pfn; kvm_pfn_t pfn;
unsigned long mmu_seq; unsigned long mmu_seq;
bool map_writable, is_self_change_mapping; bool map_writable, is_self_change_mapping;
bool lpage_disallowed = (error_code & PFERR_FETCH_MASK) &&
is_nx_huge_page_enabled();
int max_level; int max_level;
pgprintk("%s: addr %lx err %x\n", __func__, addr, error_code); pgprintk("%s: addr %lx err %x\n", __func__, addr, error_code);
...@@ -828,7 +832,7 @@ static int FNAME(page_fault)(struct kvm_vcpu *vcpu, gpa_t addr, u32 error_code, ...@@ -828,7 +832,7 @@ static int FNAME(page_fault)(struct kvm_vcpu *vcpu, gpa_t addr, u32 error_code,
is_self_change_mapping = FNAME(is_self_change_mapping)(vcpu, is_self_change_mapping = FNAME(is_self_change_mapping)(vcpu,
&walker, user_fault, &vcpu->arch.write_fault_to_shadow_pgtable); &walker, user_fault, &vcpu->arch.write_fault_to_shadow_pgtable);
if (lpage_disallowed || is_self_change_mapping) if (is_self_change_mapping)
max_level = PG_LEVEL_4K; max_level = PG_LEVEL_4K;
else else
max_level = walker.level; max_level = walker.level;
...@@ -872,8 +876,8 @@ static int FNAME(page_fault)(struct kvm_vcpu *vcpu, gpa_t addr, u32 error_code, ...@@ -872,8 +876,8 @@ static int FNAME(page_fault)(struct kvm_vcpu *vcpu, gpa_t addr, u32 error_code,
r = make_mmu_pages_available(vcpu); r = make_mmu_pages_available(vcpu);
if (r) if (r)
goto out_unlock; goto out_unlock;
r = FNAME(fetch)(vcpu, addr, &walker, write_fault, max_level, pfn, r = FNAME(fetch)(vcpu, addr, &walker, error_code, max_level, pfn,
map_writable, prefault, lpage_disallowed); map_writable, prefault);
kvm_mmu_audit(vcpu, AUDIT_POST_PAGE_FAULT); kvm_mmu_audit(vcpu, AUDIT_POST_PAGE_FAULT);
out_unlock: out_unlock:
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment