Commit 8123f265 authored by Sean Christopherson's avatar Sean Christopherson Committed by Paolo Bonzini

KVM: x86/mmu: Add a helper to consolidate root sp allocation

Add a helper, mmu_alloc_root(), to consolidate the allocation of a root
shadow page, which has the same basic mechanics for all flavors of TDP
and shadow paging.

Note, __pa(sp->spt) doesn't need to be protected by mmu_lock, sp->spt
points at a kernel page.

No functional change intended.
Signed-off-by: default avatarSean Christopherson <sean.j.christopherson@intel.com>
Message-Id: <20200428023714.31923-1-sean.j.christopherson@intel.com>
Signed-off-by: default avatarPaolo Bonzini <pbonzini@redhat.com>
parent 3bae0459
...@@ -3678,37 +3678,43 @@ static int mmu_check_root(struct kvm_vcpu *vcpu, gfn_t root_gfn) ...@@ -3678,37 +3678,43 @@ static int mmu_check_root(struct kvm_vcpu *vcpu, gfn_t root_gfn)
return ret; return ret;
} }
static int mmu_alloc_direct_roots(struct kvm_vcpu *vcpu) static hpa_t mmu_alloc_root(struct kvm_vcpu *vcpu, gfn_t gfn, gva_t gva,
u8 level, bool direct)
{ {
struct kvm_mmu_page *sp; struct kvm_mmu_page *sp;
spin_lock(&vcpu->kvm->mmu_lock);
if (make_mmu_pages_available(vcpu)) {
spin_unlock(&vcpu->kvm->mmu_lock);
return INVALID_PAGE;
}
sp = kvm_mmu_get_page(vcpu, gfn, gva, level, direct, ACC_ALL);
++sp->root_count;
spin_unlock(&vcpu->kvm->mmu_lock);
return __pa(sp->spt);
}
static int mmu_alloc_direct_roots(struct kvm_vcpu *vcpu)
{
u8 shadow_root_level = vcpu->arch.mmu->shadow_root_level;
hpa_t root;
unsigned i; unsigned i;
if (vcpu->arch.mmu->shadow_root_level >= PT64_ROOT_4LEVEL) { if (shadow_root_level >= PT64_ROOT_4LEVEL) {
spin_lock(&vcpu->kvm->mmu_lock); root = mmu_alloc_root(vcpu, 0, 0, shadow_root_level, true);
if(make_mmu_pages_available(vcpu) < 0) { if (!VALID_PAGE(root))
spin_unlock(&vcpu->kvm->mmu_lock);
return -ENOSPC; return -ENOSPC;
} vcpu->arch.mmu->root_hpa = root;
sp = kvm_mmu_get_page(vcpu, 0, 0, } else if (shadow_root_level == PT32E_ROOT_LEVEL) {
vcpu->arch.mmu->shadow_root_level, 1, ACC_ALL);
++sp->root_count;
spin_unlock(&vcpu->kvm->mmu_lock);
vcpu->arch.mmu->root_hpa = __pa(sp->spt);
} else if (vcpu->arch.mmu->shadow_root_level == PT32E_ROOT_LEVEL) {
for (i = 0; i < 4; ++i) { for (i = 0; i < 4; ++i) {
hpa_t root = vcpu->arch.mmu->pae_root[i]; MMU_WARN_ON(VALID_PAGE(vcpu->arch.mmu->pae_root[i]));
MMU_WARN_ON(VALID_PAGE(root)); root = mmu_alloc_root(vcpu, i << (30 - PAGE_SHIFT),
spin_lock(&vcpu->kvm->mmu_lock); i << 30, PT32_ROOT_LEVEL, true);
if (make_mmu_pages_available(vcpu) < 0) { if (!VALID_PAGE(root))
spin_unlock(&vcpu->kvm->mmu_lock);
return -ENOSPC; return -ENOSPC;
}
sp = kvm_mmu_get_page(vcpu, i << (30 - PAGE_SHIFT),
i << 30, PT32_ROOT_LEVEL, 1, ACC_ALL);
root = __pa(sp->spt);
++sp->root_count;
spin_unlock(&vcpu->kvm->mmu_lock);
vcpu->arch.mmu->pae_root[i] = root | PT_PRESENT_MASK; vcpu->arch.mmu->pae_root[i] = root | PT_PRESENT_MASK;
} }
vcpu->arch.mmu->root_hpa = __pa(vcpu->arch.mmu->pae_root); vcpu->arch.mmu->root_hpa = __pa(vcpu->arch.mmu->pae_root);
...@@ -3723,9 +3729,9 @@ static int mmu_alloc_direct_roots(struct kvm_vcpu *vcpu) ...@@ -3723,9 +3729,9 @@ static int mmu_alloc_direct_roots(struct kvm_vcpu *vcpu)
static int mmu_alloc_shadow_roots(struct kvm_vcpu *vcpu) static int mmu_alloc_shadow_roots(struct kvm_vcpu *vcpu)
{ {
struct kvm_mmu_page *sp;
u64 pdptr, pm_mask; u64 pdptr, pm_mask;
gfn_t root_gfn, root_pgd; gfn_t root_gfn, root_pgd;
hpa_t root;
int i; int i;
root_pgd = vcpu->arch.mmu->get_guest_pgd(vcpu); root_pgd = vcpu->arch.mmu->get_guest_pgd(vcpu);
...@@ -3739,20 +3745,12 @@ static int mmu_alloc_shadow_roots(struct kvm_vcpu *vcpu) ...@@ -3739,20 +3745,12 @@ static int mmu_alloc_shadow_roots(struct kvm_vcpu *vcpu)
* write-protect the guests page table root. * write-protect the guests page table root.
*/ */
if (vcpu->arch.mmu->root_level >= PT64_ROOT_4LEVEL) { if (vcpu->arch.mmu->root_level >= PT64_ROOT_4LEVEL) {
hpa_t root = vcpu->arch.mmu->root_hpa; MMU_WARN_ON(VALID_PAGE(vcpu->arch.mmu->root_hpa));
MMU_WARN_ON(VALID_PAGE(root)); root = mmu_alloc_root(vcpu, root_gfn, 0,
vcpu->arch.mmu->shadow_root_level, false);
spin_lock(&vcpu->kvm->mmu_lock); if (!VALID_PAGE(root))
if (make_mmu_pages_available(vcpu) < 0) {
spin_unlock(&vcpu->kvm->mmu_lock);
return -ENOSPC; return -ENOSPC;
}
sp = kvm_mmu_get_page(vcpu, root_gfn, 0,
vcpu->arch.mmu->shadow_root_level, 0, ACC_ALL);
root = __pa(sp->spt);
++sp->root_count;
spin_unlock(&vcpu->kvm->mmu_lock);
vcpu->arch.mmu->root_hpa = root; vcpu->arch.mmu->root_hpa = root;
goto set_root_pgd; goto set_root_pgd;
} }
...@@ -3767,9 +3765,7 @@ static int mmu_alloc_shadow_roots(struct kvm_vcpu *vcpu) ...@@ -3767,9 +3765,7 @@ static int mmu_alloc_shadow_roots(struct kvm_vcpu *vcpu)
pm_mask |= PT_ACCESSED_MASK | PT_WRITABLE_MASK | PT_USER_MASK; pm_mask |= PT_ACCESSED_MASK | PT_WRITABLE_MASK | PT_USER_MASK;
for (i = 0; i < 4; ++i) { for (i = 0; i < 4; ++i) {
hpa_t root = vcpu->arch.mmu->pae_root[i]; MMU_WARN_ON(VALID_PAGE(vcpu->arch.mmu->pae_root[i]));
MMU_WARN_ON(VALID_PAGE(root));
if (vcpu->arch.mmu->root_level == PT32E_ROOT_LEVEL) { if (vcpu->arch.mmu->root_level == PT32E_ROOT_LEVEL) {
pdptr = vcpu->arch.mmu->get_pdptr(vcpu, i); pdptr = vcpu->arch.mmu->get_pdptr(vcpu, i);
if (!(pdptr & PT_PRESENT_MASK)) { if (!(pdptr & PT_PRESENT_MASK)) {
...@@ -3780,17 +3776,11 @@ static int mmu_alloc_shadow_roots(struct kvm_vcpu *vcpu) ...@@ -3780,17 +3776,11 @@ static int mmu_alloc_shadow_roots(struct kvm_vcpu *vcpu)
if (mmu_check_root(vcpu, root_gfn)) if (mmu_check_root(vcpu, root_gfn))
return 1; return 1;
} }
spin_lock(&vcpu->kvm->mmu_lock);
if (make_mmu_pages_available(vcpu) < 0) {
spin_unlock(&vcpu->kvm->mmu_lock);
return -ENOSPC;
}
sp = kvm_mmu_get_page(vcpu, root_gfn, i << 30, PT32_ROOT_LEVEL,
0, ACC_ALL);
root = __pa(sp->spt);
++sp->root_count;
spin_unlock(&vcpu->kvm->mmu_lock);
root = mmu_alloc_root(vcpu, root_gfn, i << 30,
PT32_ROOT_LEVEL, false);
if (!VALID_PAGE(root))
return -ENOSPC;
vcpu->arch.mmu->pae_root[i] = root | pm_mask; vcpu->arch.mmu->pae_root[i] = root | pm_mask;
} }
vcpu->arch.mmu->root_hpa = __pa(vcpu->arch.mmu->pae_root); vcpu->arch.mmu->root_hpa = __pa(vcpu->arch.mmu->pae_root);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment