Commit 15c0536f authored by Yosry Ahmed's avatar Yosry Ahmed Committed by Andrew Morton

mm: rmap: abstract updating per-node and per-memcg stats

A lot of intricacies go into updating the stats when adding or removing
mappings: which stat index to use and which function.  Abstract this away
into a new static helper in rmap.c, __folio_mod_stat().

This adds an unnecessary call to folio_test_anon() in
__folio_add_anon_rmap() and __folio_add_file_rmap().  However, the folio
struct should already be in the cache at this point, so it shouldn't cause
any noticeable overhead.

No functional change intended.

[hughd@google.com: fix /proc/meminfo]
  Link: https://lkml.kernel.org/r/49914517-dfc7-e784-fde0-0e08fafbecc2@google.com
Link: https://lkml.kernel.org/r/20240506211333.346605-1-yosryahmed@google.comSigned-off-by: default avatarYosry Ahmed <yosryahmed@google.com>
Signed-off-by: default avatarHugh Dickins <hughd@google.com>
Reviewed-by: default avatarDavid Hildenbrand <david@redhat.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
parent 5a3f572a
......@@ -1269,6 +1269,28 @@ static void __page_check_anon_rmap(struct folio *folio, struct page *page,
page);
}
static void __folio_mod_stat(struct folio *folio, int nr, int nr_pmdmapped)
{
int idx;
if (nr) {
idx = folio_test_anon(folio) ? NR_ANON_MAPPED : NR_FILE_MAPPED;
__lruvec_stat_mod_folio(folio, idx, nr);
}
if (nr_pmdmapped) {
if (folio_test_anon(folio)) {
idx = NR_ANON_THPS;
__lruvec_stat_mod_folio(folio, idx, nr_pmdmapped);
} else {
/* NR_*_PMDMAPPED are not maintained per-memcg */
idx = folio_test_swapbacked(folio) ?
NR_SHMEM_PMDMAPPED : NR_FILE_PMDMAPPED;
__mod_node_page_state(folio_pgdat(folio), idx,
nr_pmdmapped);
}
}
}
static __always_inline void __folio_add_anon_rmap(struct folio *folio,
struct page *page, int nr_pages, struct vm_area_struct *vma,
unsigned long address, rmap_t flags, enum rmap_level level)
......@@ -1276,10 +1298,6 @@ static __always_inline void __folio_add_anon_rmap(struct folio *folio,
int i, nr, nr_pmdmapped = 0;
nr = __folio_add_rmap(folio, page, nr_pages, level, &nr_pmdmapped);
if (nr_pmdmapped)
__lruvec_stat_mod_folio(folio, NR_ANON_THPS, nr_pmdmapped);
if (nr)
__lruvec_stat_mod_folio(folio, NR_ANON_MAPPED, nr);
if (unlikely(!folio_test_anon(folio))) {
VM_WARN_ON_FOLIO(!folio_test_locked(folio), folio);
......@@ -1297,6 +1315,8 @@ static __always_inline void __folio_add_anon_rmap(struct folio *folio,
__page_check_anon_rmap(folio, page, vma, address);
}
__folio_mod_stat(folio, nr, nr_pmdmapped);
if (flags & RMAP_EXCLUSIVE) {
switch (level) {
case RMAP_LEVEL_PTE:
......@@ -1393,6 +1413,7 @@ void folio_add_new_anon_rmap(struct folio *folio, struct vm_area_struct *vma,
unsigned long address)
{
int nr = folio_nr_pages(folio);
int nr_pmdmapped = 0;
VM_WARN_ON_FOLIO(folio_test_hugetlb(folio), folio);
VM_BUG_ON_VMA(address < vma->vm_start ||
......@@ -1425,27 +1446,22 @@ void folio_add_new_anon_rmap(struct folio *folio, struct vm_area_struct *vma,
atomic_set(&folio->_large_mapcount, 0);
atomic_set(&folio->_nr_pages_mapped, ENTIRELY_MAPPED);
SetPageAnonExclusive(&folio->page);
__lruvec_stat_mod_folio(folio, NR_ANON_THPS, nr);
nr_pmdmapped = nr;
}
__lruvec_stat_mod_folio(folio, NR_ANON_MAPPED, nr);
__folio_mod_stat(folio, nr, nr_pmdmapped);
}
static __always_inline void __folio_add_file_rmap(struct folio *folio,
struct page *page, int nr_pages, struct vm_area_struct *vma,
enum rmap_level level)
{
pg_data_t *pgdat = folio_pgdat(folio);
int nr, nr_pmdmapped = 0;
VM_WARN_ON_FOLIO(folio_test_anon(folio), folio);
nr = __folio_add_rmap(folio, page, nr_pages, level, &nr_pmdmapped);
if (nr_pmdmapped)
__mod_node_page_state(pgdat, folio_test_swapbacked(folio) ?
NR_SHMEM_PMDMAPPED : NR_FILE_PMDMAPPED, nr_pmdmapped);
if (nr)
__lruvec_stat_mod_folio(folio, NR_FILE_MAPPED, nr);
__folio_mod_stat(folio, nr, nr_pmdmapped);
/* See comments in folio_add_anon_rmap_*() */
if (!folio_test_large(folio))
......@@ -1494,10 +1510,8 @@ static __always_inline void __folio_remove_rmap(struct folio *folio,
enum rmap_level level)
{
atomic_t *mapped = &folio->_nr_pages_mapped;
pg_data_t *pgdat = folio_pgdat(folio);
int last, nr = 0, nr_pmdmapped = 0;
bool partially_mapped = false;
enum node_stat_item idx;
__folio_rmap_sanity_checks(folio, page, nr_pages, level);
......@@ -1541,20 +1555,7 @@ static __always_inline void __folio_remove_rmap(struct folio *folio,
break;
}
if (nr_pmdmapped) {
/* NR_{FILE/SHMEM}_PMDMAPPED are not maintained per-memcg */
if (folio_test_anon(folio))
__lruvec_stat_mod_folio(folio, NR_ANON_THPS, -nr_pmdmapped);
else
__mod_node_page_state(pgdat,
folio_test_swapbacked(folio) ?
NR_SHMEM_PMDMAPPED : NR_FILE_PMDMAPPED,
-nr_pmdmapped);
}
if (nr) {
idx = folio_test_anon(folio) ? NR_ANON_MAPPED : NR_FILE_MAPPED;
__lruvec_stat_mod_folio(folio, idx, -nr);
/*
* Queue anon large folio for deferred split if at least one
* page of the folio is unmapped and at least one page
......@@ -1566,6 +1567,7 @@ static __always_inline void __folio_remove_rmap(struct folio *folio,
list_empty(&folio->_deferred_list))
deferred_split_folio(folio);
}
__folio_mod_stat(folio, -nr, -nr_pmdmapped);
/*
* It would be tidy to reset folio_test_anon mapping when fully
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment