Commit 43a063ca authored by Yosry Ahmed's avatar Yosry Ahmed Committed by Sean Christopherson

KVM: x86/mmu: count KVM mmu usage in secondary pagetable stats.

Count the pages used by KVM mmu on x86 in memory stats under secondary
pagetable stats (e.g. "SecPageTables" in /proc/meminfo) to give better
visibility into the memory consumption of KVM mmu in a similar way to
how normal user page tables are accounted.

Add the inner helper in common KVM, ARM will also use it to count stats
in a future commit.
Signed-off-by: default avatarYosry Ahmed <yosryahmed@google.com>
Reviewed-by: default avatarSean Christopherson <seanjc@google.com>
Acked-by: Marc Zyngier <maz@kernel.org> # generic KVM changes
Link: https://lore.kernel.org/r/20220823004639.2387269-3-yosryahmed@google.com
Link: https://lore.kernel.org/r/20220823004639.2387269-4-yosryahmed@google.com
[sean: squash x86 usage to workaround modpost issues]
Signed-off-by: default avatarSean Christopherson <seanjc@google.com>
parent ebc97a52
...@@ -1665,6 +1665,18 @@ static inline void kvm_mod_used_mmu_pages(struct kvm *kvm, long nr) ...@@ -1665,6 +1665,18 @@ static inline void kvm_mod_used_mmu_pages(struct kvm *kvm, long nr)
percpu_counter_add(&kvm_total_used_mmu_pages, nr); percpu_counter_add(&kvm_total_used_mmu_pages, nr);
} }
static void kvm_account_mmu_page(struct kvm *kvm, struct kvm_mmu_page *sp)
{
kvm_mod_used_mmu_pages(kvm, +1);
kvm_account_pgtable_pages((void *)sp->spt, +1);
}
static void kvm_unaccount_mmu_page(struct kvm *kvm, struct kvm_mmu_page *sp)
{
kvm_mod_used_mmu_pages(kvm, -1);
kvm_account_pgtable_pages((void *)sp->spt, -1);
}
static void kvm_mmu_free_shadow_page(struct kvm_mmu_page *sp) static void kvm_mmu_free_shadow_page(struct kvm_mmu_page *sp)
{ {
MMU_WARN_ON(!is_empty_shadow_page(sp->spt)); MMU_WARN_ON(!is_empty_shadow_page(sp->spt));
...@@ -2122,7 +2134,7 @@ static struct kvm_mmu_page *kvm_mmu_alloc_shadow_page(struct kvm *kvm, ...@@ -2122,7 +2134,7 @@ static struct kvm_mmu_page *kvm_mmu_alloc_shadow_page(struct kvm *kvm,
*/ */
sp->mmu_valid_gen = kvm->arch.mmu_valid_gen; sp->mmu_valid_gen = kvm->arch.mmu_valid_gen;
list_add(&sp->link, &kvm->arch.active_mmu_pages); list_add(&sp->link, &kvm->arch.active_mmu_pages);
kvm_mod_used_mmu_pages(kvm, +1); kvm_account_mmu_page(kvm, sp);
sp->gfn = gfn; sp->gfn = gfn;
sp->role = role; sp->role = role;
...@@ -2456,7 +2468,7 @@ static bool __kvm_mmu_prepare_zap_page(struct kvm *kvm, ...@@ -2456,7 +2468,7 @@ static bool __kvm_mmu_prepare_zap_page(struct kvm *kvm,
list_add(&sp->link, invalid_list); list_add(&sp->link, invalid_list);
else else
list_move(&sp->link, invalid_list); list_move(&sp->link, invalid_list);
kvm_mod_used_mmu_pages(kvm, -1); kvm_unaccount_mmu_page(kvm, sp);
} else { } else {
/* /*
* Remove the active root from the active page list, the root * Remove the active root from the active page list, the root
......
...@@ -372,6 +372,16 @@ static void handle_changed_spte_dirty_log(struct kvm *kvm, int as_id, gfn_t gfn, ...@@ -372,6 +372,16 @@ static void handle_changed_spte_dirty_log(struct kvm *kvm, int as_id, gfn_t gfn,
} }
} }
static void tdp_account_mmu_page(struct kvm *kvm, struct kvm_mmu_page *sp)
{
kvm_account_pgtable_pages((void *)sp->spt, +1);
}
static void tdp_unaccount_mmu_page(struct kvm *kvm, struct kvm_mmu_page *sp)
{
kvm_account_pgtable_pages((void *)sp->spt, -1);
}
/** /**
* tdp_mmu_unlink_sp() - Remove a shadow page from the list of used pages * tdp_mmu_unlink_sp() - Remove a shadow page from the list of used pages
* *
...@@ -384,6 +394,7 @@ static void handle_changed_spte_dirty_log(struct kvm *kvm, int as_id, gfn_t gfn, ...@@ -384,6 +394,7 @@ static void handle_changed_spte_dirty_log(struct kvm *kvm, int as_id, gfn_t gfn,
static void tdp_mmu_unlink_sp(struct kvm *kvm, struct kvm_mmu_page *sp, static void tdp_mmu_unlink_sp(struct kvm *kvm, struct kvm_mmu_page *sp,
bool shared) bool shared)
{ {
tdp_unaccount_mmu_page(kvm, sp);
if (shared) if (shared)
spin_lock(&kvm->arch.tdp_mmu_pages_lock); spin_lock(&kvm->arch.tdp_mmu_pages_lock);
else else
...@@ -1132,6 +1143,7 @@ static int tdp_mmu_link_sp(struct kvm *kvm, struct tdp_iter *iter, ...@@ -1132,6 +1143,7 @@ static int tdp_mmu_link_sp(struct kvm *kvm, struct tdp_iter *iter,
if (account_nx) if (account_nx)
account_huge_nx_page(kvm, sp); account_huge_nx_page(kvm, sp);
spin_unlock(&kvm->arch.tdp_mmu_pages_lock); spin_unlock(&kvm->arch.tdp_mmu_pages_lock);
tdp_account_mmu_page(kvm, sp);
return 0; return 0;
} }
......
...@@ -2247,6 +2247,19 @@ static inline void kvm_handle_signal_exit(struct kvm_vcpu *vcpu) ...@@ -2247,6 +2247,19 @@ static inline void kvm_handle_signal_exit(struct kvm_vcpu *vcpu)
} }
#endif /* CONFIG_KVM_XFER_TO_GUEST_WORK */ #endif /* CONFIG_KVM_XFER_TO_GUEST_WORK */
/*
* If more than one page is being (un)accounted, @virt must be the address of
* the first page of a block of pages what were allocated together (i.e
* accounted together).
*
* kvm_account_pgtable_pages() is thread-safe because mod_lruvec_page_state()
* is thread-safe.
*/
static inline void kvm_account_pgtable_pages(void *virt, int nr)
{
mod_lruvec_page_state(virt_to_page(virt), NR_SECONDARY_PAGETABLE, nr);
}
/* /*
* This defines how many reserved entries we want to keep before we * This defines how many reserved entries we want to keep before we
* kick the vcpu to the userspace to avoid dirty ring full. This * kick the vcpu to the userspace to avoid dirty ring full. This
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment