Commit cb9b88c6 authored by Sean Christopherson's avatar Sean Christopherson Committed by Paolo Bonzini

KVM: x86/mmu: Refactor handling of cache consistency with TDP

Pre-calculate the max level for a TDP page with respect to MTRR cache
consistency in preparation of replacing force_pt_level with max_level,
and eventually combining the bulk of nonpaging_page_fault() and
tdp_page_fault() into a common helper.

No functional change intended.
Signed-off-by: default avatarSean Christopherson <sean.j.christopherson@intel.com>
Signed-off-by: default avatarPaolo Bonzini <pbonzini@redhat.com>
parent 9f1a8526
...@@ -4271,16 +4271,6 @@ int kvm_handle_page_fault(struct kvm_vcpu *vcpu, u64 error_code, ...@@ -4271,16 +4271,6 @@ int kvm_handle_page_fault(struct kvm_vcpu *vcpu, u64 error_code,
} }
EXPORT_SYMBOL_GPL(kvm_handle_page_fault); EXPORT_SYMBOL_GPL(kvm_handle_page_fault);
static bool
check_hugepage_cache_consistency(struct kvm_vcpu *vcpu, gfn_t gfn, int level)
{
int page_num = KVM_PAGES_PER_HPAGE(level);
gfn &= ~(page_num - 1);
return kvm_mtrr_check_gfn_range_consistency(vcpu, gfn, page_num);
}
static int tdp_page_fault(struct kvm_vcpu *vcpu, gpa_t gpa, u32 error_code, static int tdp_page_fault(struct kvm_vcpu *vcpu, gpa_t gpa, u32 error_code,
bool prefault) bool prefault)
{ {
...@@ -4294,6 +4284,7 @@ static int tdp_page_fault(struct kvm_vcpu *vcpu, gpa_t gpa, u32 error_code, ...@@ -4294,6 +4284,7 @@ static int tdp_page_fault(struct kvm_vcpu *vcpu, gpa_t gpa, u32 error_code,
bool map_writable; bool map_writable;
bool lpage_disallowed = (error_code & PFERR_FETCH_MASK) && bool lpage_disallowed = (error_code & PFERR_FETCH_MASK) &&
is_nx_huge_page_enabled(); is_nx_huge_page_enabled();
int max_level;
MMU_WARN_ON(!VALID_PAGE(vcpu->arch.mmu->root_hpa)); MMU_WARN_ON(!VALID_PAGE(vcpu->arch.mmu->root_hpa));
...@@ -4304,14 +4295,21 @@ static int tdp_page_fault(struct kvm_vcpu *vcpu, gpa_t gpa, u32 error_code, ...@@ -4304,14 +4295,21 @@ static int tdp_page_fault(struct kvm_vcpu *vcpu, gpa_t gpa, u32 error_code,
if (r) if (r)
return r; return r;
force_pt_level = for (max_level = PT_MAX_HUGEPAGE_LEVEL;
lpage_disallowed || max_level > PT_PAGE_TABLE_LEVEL;
!check_hugepage_cache_consistency(vcpu, gfn, PT_DIRECTORY_LEVEL); max_level--) {
int page_num = KVM_PAGES_PER_HPAGE(max_level);
gfn_t base = gfn & ~(page_num - 1);
if (kvm_mtrr_check_gfn_range_consistency(vcpu, base, page_num))
break;
}
force_pt_level = lpage_disallowed || max_level == PT_PAGE_TABLE_LEVEL;
level = mapping_level(vcpu, gfn, &force_pt_level); level = mapping_level(vcpu, gfn, &force_pt_level);
if (likely(!force_pt_level)) { if (likely(!force_pt_level)) {
if (level > PT_DIRECTORY_LEVEL && if (level > max_level)
!check_hugepage_cache_consistency(vcpu, gfn, level)) level = max_level;
level = PT_DIRECTORY_LEVEL;
gfn &= ~(KVM_PAGES_PER_HPAGE(level) - 1); gfn &= ~(KVM_PAGES_PER_HPAGE(level) - 1);
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment