Commit a3aca4de authored by David Matlack's avatar David Matlack Committed by Paolo Bonzini

KVM: x86/mmu: Derive page role for TDP MMU shadow pages from parent

Derive the page role from the parent shadow page, since the only thing
that changes is the level. This is in preparation for splitting huge
pages during VM-ioctls which do not have access to the vCPU MMU context.

No functional change intended.
Reviewed-by: default avatarPeter Xu <peterx@redhat.com>
Signed-off-by: default avatarDavid Matlack <dmatlack@google.com>
Message-Id: <20220119230739.2234394-14-dmatlack@google.com>
Signed-off-by: default avatarPaolo Bonzini <pbonzini@redhat.com>
parent a81399a5
...@@ -171,19 +171,8 @@ static struct kvm_mmu_page *tdp_mmu_next_root(struct kvm *kvm, ...@@ -171,19 +171,8 @@ static struct kvm_mmu_page *tdp_mmu_next_root(struct kvm *kvm,
if (kvm_mmu_page_as_id(_root) != _as_id) { \ if (kvm_mmu_page_as_id(_root) != _as_id) { \
} else } else
static union kvm_mmu_page_role page_role_for_level(struct kvm_vcpu *vcpu,
int level)
{
union kvm_mmu_page_role role;
role = vcpu->arch.mmu->mmu_role.base;
role.level = level;
return role;
}
static struct kvm_mmu_page *tdp_mmu_alloc_sp(struct kvm_vcpu *vcpu, gfn_t gfn, static struct kvm_mmu_page *tdp_mmu_alloc_sp(struct kvm_vcpu *vcpu, gfn_t gfn,
int level) union kvm_mmu_page_role role)
{ {
struct kvm_mmu_page *sp; struct kvm_mmu_page *sp;
...@@ -191,7 +180,7 @@ static struct kvm_mmu_page *tdp_mmu_alloc_sp(struct kvm_vcpu *vcpu, gfn_t gfn, ...@@ -191,7 +180,7 @@ static struct kvm_mmu_page *tdp_mmu_alloc_sp(struct kvm_vcpu *vcpu, gfn_t gfn,
sp->spt = kvm_mmu_memory_cache_alloc(&vcpu->arch.mmu_shadow_page_cache); sp->spt = kvm_mmu_memory_cache_alloc(&vcpu->arch.mmu_shadow_page_cache);
set_page_private(virt_to_page(sp->spt), (unsigned long)sp); set_page_private(virt_to_page(sp->spt), (unsigned long)sp);
sp->role.word = page_role_for_level(vcpu, level).word; sp->role = role;
sp->gfn = gfn; sp->gfn = gfn;
sp->tdp_mmu_page = true; sp->tdp_mmu_page = true;
...@@ -200,16 +189,28 @@ static struct kvm_mmu_page *tdp_mmu_alloc_sp(struct kvm_vcpu *vcpu, gfn_t gfn, ...@@ -200,16 +189,28 @@ static struct kvm_mmu_page *tdp_mmu_alloc_sp(struct kvm_vcpu *vcpu, gfn_t gfn,
return sp; return sp;
} }
hpa_t kvm_tdp_mmu_get_vcpu_root_hpa(struct kvm_vcpu *vcpu) static struct kvm_mmu_page *tdp_mmu_alloc_child_sp(struct kvm_vcpu *vcpu,
struct tdp_iter *iter)
{ {
struct kvm_mmu_page *parent_sp;
union kvm_mmu_page_role role; union kvm_mmu_page_role role;
parent_sp = sptep_to_sp(rcu_dereference(iter->sptep));
role = parent_sp->role;
role.level--;
return tdp_mmu_alloc_sp(vcpu, iter->gfn, role);
}
hpa_t kvm_tdp_mmu_get_vcpu_root_hpa(struct kvm_vcpu *vcpu)
{
union kvm_mmu_page_role role = vcpu->arch.mmu->mmu_role.base;
struct kvm *kvm = vcpu->kvm; struct kvm *kvm = vcpu->kvm;
struct kvm_mmu_page *root; struct kvm_mmu_page *root;
lockdep_assert_held_write(&kvm->mmu_lock); lockdep_assert_held_write(&kvm->mmu_lock);
role = page_role_for_level(vcpu, vcpu->arch.mmu->shadow_root_level);
/* /*
* Check for an existing root before allocating a new one. Note, the * Check for an existing root before allocating a new one. Note, the
* role check prevents consuming an invalid root. * role check prevents consuming an invalid root.
...@@ -220,7 +221,7 @@ hpa_t kvm_tdp_mmu_get_vcpu_root_hpa(struct kvm_vcpu *vcpu) ...@@ -220,7 +221,7 @@ hpa_t kvm_tdp_mmu_get_vcpu_root_hpa(struct kvm_vcpu *vcpu)
goto out; goto out;
} }
root = tdp_mmu_alloc_sp(vcpu, 0, vcpu->arch.mmu->shadow_root_level); root = tdp_mmu_alloc_sp(vcpu, 0, role);
refcount_set(&root->tdp_mmu_root_count, 1); refcount_set(&root->tdp_mmu_root_count, 1);
spin_lock(&kvm->arch.tdp_mmu_pages_lock); spin_lock(&kvm->arch.tdp_mmu_pages_lock);
...@@ -1041,7 +1042,7 @@ int kvm_tdp_mmu_map(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault) ...@@ -1041,7 +1042,7 @@ int kvm_tdp_mmu_map(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault)
if (is_removed_spte(iter.old_spte)) if (is_removed_spte(iter.old_spte))
break; break;
sp = tdp_mmu_alloc_sp(vcpu, iter.gfn, iter.level - 1); sp = tdp_mmu_alloc_child_sp(vcpu, &iter);
if (tdp_mmu_link_sp_atomic(vcpu->kvm, &iter, sp, account_nx)) { if (tdp_mmu_link_sp_atomic(vcpu->kvm, &iter, sp, account_nx)) {
tdp_mmu_free_sp(sp); tdp_mmu_free_sp(sp);
break; break;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment