Commit f0f37e22 authored by Sean Christopherson's avatar Sean Christopherson Committed by Paolo Bonzini

KVM: x86/mmu: Refactor the per-slot level calculation in mapping_level()

Invert the loop which adjusts the allowed page level based on what's
compatible with the associated memslot to use a largest-to-smallest
page size walk.  This paves the way for passing around a "max level"
variable instead of having redundant checks and/or multiple booleans.

No functional change intended.
Signed-off-by: default avatarSean Christopherson <sean.j.christopherson@intel.com>
Signed-off-by: default avatarPaolo Bonzini <pbonzini@redhat.com>
parent cb9b88c6
...@@ -1330,7 +1330,7 @@ gfn_to_memslot_dirty_bitmap(struct kvm_vcpu *vcpu, gfn_t gfn, ...@@ -1330,7 +1330,7 @@ gfn_to_memslot_dirty_bitmap(struct kvm_vcpu *vcpu, gfn_t gfn,
static int mapping_level(struct kvm_vcpu *vcpu, gfn_t large_gfn, static int mapping_level(struct kvm_vcpu *vcpu, gfn_t large_gfn,
bool *force_pt_level) bool *force_pt_level)
{ {
int host_level, level, max_level; int host_level, max_level;
struct kvm_memory_slot *slot; struct kvm_memory_slot *slot;
if (unlikely(*force_pt_level)) if (unlikely(*force_pt_level))
...@@ -1347,12 +1347,12 @@ static int mapping_level(struct kvm_vcpu *vcpu, gfn_t large_gfn, ...@@ -1347,12 +1347,12 @@ static int mapping_level(struct kvm_vcpu *vcpu, gfn_t large_gfn,
return host_level; return host_level;
max_level = min(kvm_x86_ops->get_lpage_level(), host_level); max_level = min(kvm_x86_ops->get_lpage_level(), host_level);
for ( ; max_level > PT_PAGE_TABLE_LEVEL; max_level--) {
for (level = PT_DIRECTORY_LEVEL; level <= max_level; ++level) if (!__mmu_gfn_lpage_is_disallowed(large_gfn, max_level, slot))
if (__mmu_gfn_lpage_is_disallowed(large_gfn, level, slot))
break; break;
}
return level - 1; return max_level;
} }
/* /*
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment