Commit 3d4a5a45 authored by David Matlack's avatar David Matlack Committed by Sean Christopherson

KVM: x86/mmu: Unnest TDP MMU helpers that allocate SPs for eager splitting

Move the implementation of tdp_mmu_alloc_sp_for_split() to its one and
only caller to reduce unnecessary nesting and make it more clear why the
eager split loop continues after allocating a new SP.

Opportunistically drop the double-underscores from
__tdp_mmu_alloc_sp_for_split() now that its parent is gone.

No functional change intended.
Suggested-by: default avatarSean Christopherson <seanjc@google.com>
Signed-off-by: default avatarDavid Matlack <dmatlack@google.com>
Link: https://lore.kernel.org/r/20240611220512.2426439-4-dmatlack@google.comSigned-off-by: default avatarSean Christopherson <seanjc@google.com>
parent e1c04f7a
......@@ -1339,7 +1339,7 @@ bool kvm_tdp_mmu_wrprot_slot(struct kvm *kvm,
return spte_set;
}
static struct kvm_mmu_page *__tdp_mmu_alloc_sp_for_split(void)
static struct kvm_mmu_page *tdp_mmu_alloc_sp_for_split(void)
{
struct kvm_mmu_page *sp;
......@@ -1356,34 +1356,6 @@ static struct kvm_mmu_page *__tdp_mmu_alloc_sp_for_split(void)
return sp;
}
static struct kvm_mmu_page *tdp_mmu_alloc_sp_for_split(struct kvm *kvm,
struct tdp_iter *iter,
bool shared)
{
struct kvm_mmu_page *sp;
kvm_lockdep_assert_mmu_lock_held(kvm, shared);
rcu_read_unlock();
if (shared)
read_unlock(&kvm->mmu_lock);
else
write_unlock(&kvm->mmu_lock);
iter->yielded = true;
sp = __tdp_mmu_alloc_sp_for_split();
if (shared)
read_lock(&kvm->mmu_lock);
else
write_lock(&kvm->mmu_lock);
rcu_read_lock();
return sp;
}
/* Note, the caller is responsible for initializing @sp. */
static int tdp_mmu_split_huge_page(struct kvm *kvm, struct tdp_iter *iter,
struct kvm_mmu_page *sp, bool shared)
......@@ -1454,7 +1426,22 @@ static int tdp_mmu_split_huge_pages_root(struct kvm *kvm,
continue;
if (!sp) {
sp = tdp_mmu_alloc_sp_for_split(kvm, &iter, shared);
rcu_read_unlock();
if (shared)
read_unlock(&kvm->mmu_lock);
else
write_unlock(&kvm->mmu_lock);
sp = tdp_mmu_alloc_sp_for_split();
if (shared)
read_lock(&kvm->mmu_lock);
else
write_lock(&kvm->mmu_lock);
rcu_read_lock();
if (!sp) {
ret = -ENOMEM;
trace_kvm_mmu_split_huge_page(iter.gfn,
......@@ -1463,6 +1450,7 @@ static int tdp_mmu_split_huge_pages_root(struct kvm *kvm,
break;
}
iter.yielded = true;
continue;
}
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment