Commit 7f497775 authored by David Matlack's avatar David Matlack Committed by Paolo Bonzini

KVM: x86/mmu: Always pass 0 for @quadrant when gptes are 8 bytes

The quadrant is only used when gptes are 4 bytes, but
mmu_alloc_{direct,shadow}_roots() pass in a non-zero quadrant for PAE
page directories regardless. Make this less confusing by only passing in
a non-zero quadrant when it is actually necessary.
Signed-off-by: default avatarDavid Matlack <dmatlack@google.com>
Message-Id: <20220516232138.1783324-6-dmatlack@google.com>
Signed-off-by: default avatarPaolo Bonzini <pbonzini@redhat.com>
parent 2e65e842
...@@ -3389,9 +3389,10 @@ static hpa_t mmu_alloc_root(struct kvm_vcpu *vcpu, gfn_t gfn, int quadrant, ...@@ -3389,9 +3389,10 @@ static hpa_t mmu_alloc_root(struct kvm_vcpu *vcpu, gfn_t gfn, int quadrant,
struct kvm_mmu_page *sp; struct kvm_mmu_page *sp;
role.level = level; role.level = level;
role.quadrant = quadrant;
if (role.has_4_byte_gpte) WARN_ON_ONCE(quadrant && !role.has_4_byte_gpte);
role.quadrant = quadrant; WARN_ON_ONCE(role.direct && role.has_4_byte_gpte);
sp = kvm_mmu_get_page(vcpu, gfn, role); sp = kvm_mmu_get_page(vcpu, gfn, role);
++sp->root_count; ++sp->root_count;
...@@ -3427,7 +3428,7 @@ static int mmu_alloc_direct_roots(struct kvm_vcpu *vcpu) ...@@ -3427,7 +3428,7 @@ static int mmu_alloc_direct_roots(struct kvm_vcpu *vcpu)
for (i = 0; i < 4; ++i) { for (i = 0; i < 4; ++i) {
WARN_ON_ONCE(IS_VALID_PAE_ROOT(mmu->pae_root[i])); WARN_ON_ONCE(IS_VALID_PAE_ROOT(mmu->pae_root[i]));
root = mmu_alloc_root(vcpu, i << (30 - PAGE_SHIFT), i, root = mmu_alloc_root(vcpu, i << (30 - PAGE_SHIFT), 0,
PT32_ROOT_LEVEL); PT32_ROOT_LEVEL);
mmu->pae_root[i] = root | PT_PRESENT_MASK | mmu->pae_root[i] = root | PT_PRESENT_MASK |
shadow_me_value; shadow_me_value;
...@@ -3512,9 +3513,8 @@ static int mmu_alloc_shadow_roots(struct kvm_vcpu *vcpu) ...@@ -3512,9 +3513,8 @@ static int mmu_alloc_shadow_roots(struct kvm_vcpu *vcpu)
struct kvm_mmu *mmu = vcpu->arch.mmu; struct kvm_mmu *mmu = vcpu->arch.mmu;
u64 pdptrs[4], pm_mask; u64 pdptrs[4], pm_mask;
gfn_t root_gfn, root_pgd; gfn_t root_gfn, root_pgd;
int quadrant, i, r;
hpa_t root; hpa_t root;
unsigned i;
int r;
root_pgd = mmu->get_guest_pgd(vcpu); root_pgd = mmu->get_guest_pgd(vcpu);
root_gfn = root_pgd >> PAGE_SHIFT; root_gfn = root_pgd >> PAGE_SHIFT;
...@@ -3597,7 +3597,15 @@ static int mmu_alloc_shadow_roots(struct kvm_vcpu *vcpu) ...@@ -3597,7 +3597,15 @@ static int mmu_alloc_shadow_roots(struct kvm_vcpu *vcpu)
root_gfn = pdptrs[i] >> PAGE_SHIFT; root_gfn = pdptrs[i] >> PAGE_SHIFT;
} }
root = mmu_alloc_root(vcpu, root_gfn, i, PT32_ROOT_LEVEL); /*
* If shadowing 32-bit non-PAE page tables, each PAE page
* directory maps one quarter of the guest's non-PAE page
* directory. Othwerise each PAE page direct shadows one guest
* PAE page directory so that quadrant should be 0.
*/
quadrant = (mmu->cpu_role.base.level == PT32_ROOT_LEVEL) ? i : 0;
root = mmu_alloc_root(vcpu, root_gfn, quadrant, PT32_ROOT_LEVEL);
mmu->pae_root[i] = root | pm_mask; mmu->pae_root[i] = root | pm_mask;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment