Commit b8eddff8 authored by Johannes Weiner's avatar Johannes Weiner Committed by Linus Torvalds

mm: memcontrol: add file_thp, shmem_thp to memory.stat

As huge page usage in the page cache and for shmem files proliferates in
our production environment, the performance monitoring team has asked for
per-cgroup stats on those pages.

We already track and export anon_thp per cgroup.  We already track file
THP and shmem THP per node, so making them per-cgroup is only a matter of
switching from node to lruvec counters.  All callsites are in places where
the pages are charged and locked, so page->memcg is stable.

[hannes@cmpxchg.org: add documentation]
  Link: https://lkml.kernel.org/r/20201026174029.GC548555@cmpxchg.org

Link: https://lkml.kernel.org/r/20201022151844.489337-1-hannes@cmpxchg.orgSigned-off-by: default avatarJohannes Weiner <hannes@cmpxchg.org>
Reviewed-by: default avatarRik van Riel <riel@surriel.com>
Reviewed-by: default avatarShakeel Butt <shakeelb@google.com>
Acked-by: default avatarDavid Rientjes <rientjes@google.com>
Acked-by: default avatarMichal Hocko <mhocko@suse.com>
Acked-by: default avatarSong Liu <songliubraving@fb.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent f38d58b7
......@@ -1300,6 +1300,14 @@ PAGE_SIZE multiple when read back.
Amount of memory used in anonymous mappings backed by
transparent hugepages
file_thp
Amount of cached filesystem data backed by transparent
hugepages
shmem_thp
Amount of shm, tmpfs, shared anonymous mmap()s backed by
transparent hugepages
inactive_anon, active_anon, inactive_file, active_file, unevictable
Amount of memory, swap-backed and filesystem-backed,
on the internal memory management lists used by the
......
......@@ -204,9 +204,9 @@ static void unaccount_page_cache_page(struct address_space *mapping,
if (PageSwapBacked(page)) {
__mod_lruvec_page_state(page, NR_SHMEM, -nr);
if (PageTransHuge(page))
__dec_node_page_state(page, NR_SHMEM_THPS);
__dec_lruvec_page_state(page, NR_SHMEM_THPS);
} else if (PageTransHuge(page)) {
__dec_node_page_state(page, NR_FILE_THPS);
__dec_lruvec_page_state(page, NR_FILE_THPS);
filemap_nr_thps_dec(mapping);
}
......
......@@ -2710,9 +2710,9 @@ int split_huge_page_to_list(struct page *page, struct list_head *list)
spin_unlock(&ds_queue->split_queue_lock);
if (mapping) {
if (PageSwapBacked(head))
__dec_node_page_state(head, NR_SHMEM_THPS);
__dec_lruvec_page_state(head, NR_SHMEM_THPS);
else
__dec_node_page_state(head, NR_FILE_THPS);
__dec_lruvec_page_state(head, NR_FILE_THPS);
}
__split_huge_page(page, list, end, flags);
......
......@@ -1845,9 +1845,9 @@ static void collapse_file(struct mm_struct *mm,
}
if (is_shmem)
__inc_node_page_state(new_page, NR_SHMEM_THPS);
__inc_lruvec_page_state(new_page, NR_SHMEM_THPS);
else {
__inc_node_page_state(new_page, NR_FILE_THPS);
__inc_lruvec_page_state(new_page, NR_FILE_THPS);
filemap_nr_thps_inc(mapping);
}
......
......@@ -1512,6 +1512,8 @@ static struct memory_stat memory_stats[] = {
* constant(e.g. powerpc).
*/
{ "anon_thp", 0, NR_ANON_THPS },
{ "file_thp", 0, NR_FILE_THPS },
{ "shmem_thp", 0, NR_SHMEM_THPS },
#endif
{ "inactive_anon", PAGE_SIZE, NR_INACTIVE_ANON },
{ "active_anon", PAGE_SIZE, NR_ACTIVE_ANON },
......@@ -1542,7 +1544,9 @@ static int __init memory_stats_init(void)
for (i = 0; i < ARRAY_SIZE(memory_stats); i++) {
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
if (memory_stats[i].idx == NR_ANON_THPS)
if (memory_stats[i].idx == NR_ANON_THPS ||
memory_stats[i].idx == NR_FILE_THPS ||
memory_stats[i].idx == NR_SHMEM_THPS)
memory_stats[i].ratio = HPAGE_PMD_SIZE;
#endif
VM_BUG_ON(!memory_stats[i].ratio);
......
......@@ -713,7 +713,7 @@ static int shmem_add_to_page_cache(struct page *page,
}
if (PageTransHuge(page)) {
count_vm_event(THP_FILE_ALLOC);
__inc_node_page_state(page, NR_SHMEM_THPS);
__inc_lruvec_page_state(page, NR_SHMEM_THPS);
}
mapping->nrpages += nr;
__mod_lruvec_page_state(page, NR_FILE_PAGES, nr);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment