Commit cbe1e6f0 authored by Sean Christopherson's avatar Sean Christopherson Committed by Paolo Bonzini

KVM: x86/mmu: Incorporate guest's page level into max level for shadow MMU

Restrict the max level for a shadow page based on the guest's level
instead of capping the level after the fact for host-mapped huge pages,
e.g. hugetlbfs pages.  Explicitly capping the max level using the guest
mapping level also eliminates FNAME(page_fault)'s subtle dependency on
THP only supporting 2mb pages.

No functional change intended.
Signed-off-by: default avatarSean Christopherson <sean.j.christopherson@intel.com>
Signed-off-by: default avatarPaolo Bonzini <pbonzini@redhat.com>
parent 39ca1ecb
......@@ -773,7 +773,7 @@ static int FNAME(page_fault)(struct kvm_vcpu *vcpu, gpa_t addr, u32 error_code,
struct guest_walker walker;
int r;
kvm_pfn_t pfn;
int level = PT_PAGE_TABLE_LEVEL;
int level;
unsigned long mmu_seq;
bool map_writable, is_self_change_mapping;
bool lpage_disallowed = (error_code & PFERR_FETCH_MASK) &&
......@@ -818,18 +818,14 @@ static int FNAME(page_fault)(struct kvm_vcpu *vcpu, gpa_t addr, u32 error_code,
is_self_change_mapping = FNAME(is_self_change_mapping)(vcpu,
&walker, user_fault, &vcpu->arch.write_fault_to_shadow_pgtable);
max_level = lpage_disallowed ? PT_PAGE_TABLE_LEVEL :
PT_MAX_HUGEPAGE_LEVEL;
if (lpage_disallowed || is_self_change_mapping)
max_level = PT_PAGE_TABLE_LEVEL;
else
max_level = walker.level;
if (walker.level >= PT_DIRECTORY_LEVEL && !is_self_change_mapping) {
level = mapping_level(vcpu, walker.gfn, &max_level);
if (likely(max_level > PT_DIRECTORY_LEVEL)) {
level = min(walker.level, level);
if (level > PT_PAGE_TABLE_LEVEL)
walker.gfn = walker.gfn & ~(KVM_PAGES_PER_HPAGE(level) - 1);
}
} else {
max_level = PT_PAGE_TABLE_LEVEL;
}
mmu_seq = vcpu->kvm->mmu_notifier_seq;
smp_rmb();
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment