Commit 468c3982 authored by Johannes Weiner's avatar Johannes Weiner Committed by Linus Torvalds

mm: memcontrol: switch to native NR_ANON_THPS counter

With rmap memcg locking already in place for NR_ANON_MAPPED, it's just a
small step to remove the MEMCG_RSS_HUGE wart and switch memcg to the
native NR_ANON_THPS accounting sites.

[hannes@cmpxchg.org: fixes]
  Link: http://lkml.kernel.org/r/20200512121750.GA397968@cmpxchg.orgSigned-off-by: default avatarJohannes Weiner <hannes@cmpxchg.org>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Tested-by: default avatarNaresh Kamboju <naresh.kamboju@linaro.org>
Reviewed-by: default avatarJoonsoo Kim <iamjoonsoo.kim@lge.com>
Acked-by: Randy Dunlap <rdunlap@infradead.org>	[build-tested]
Cc: Alex Shi <alex.shi@linux.alibaba.com>
Cc: Hugh Dickins <hughd@google.com>
Cc: "Kirill A. Shutemov" <kirill@shutemov.name>
Cc: Michal Hocko <mhocko@suse.com>
Cc: Roman Gushchin <guro@fb.com>
Cc: Shakeel Butt <shakeelb@google.com>
Cc: Balbir Singh <bsingharora@gmail.com>
Link: http://lkml.kernel.org/r/20200508183105.225460-12-hannes@cmpxchg.orgSigned-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent be5d0a74
...@@ -29,8 +29,7 @@ struct kmem_cache; ...@@ -29,8 +29,7 @@ struct kmem_cache;
/* Cgroup-specific page state, on top of universal node page state */ /* Cgroup-specific page state, on top of universal node page state */
enum memcg_stat_item { enum memcg_stat_item {
MEMCG_RSS_HUGE = NR_VM_NODE_STAT_ITEMS, MEMCG_SWAP = NR_VM_NODE_STAT_ITEMS,
MEMCG_SWAP,
MEMCG_SOCK, MEMCG_SOCK,
/* XXX: why are these zone and not node counters? */ /* XXX: why are these zone and not node counters? */
MEMCG_KERNEL_STACK_KB, MEMCG_KERNEL_STACK_KB,
......
...@@ -2159,15 +2159,17 @@ static void __split_huge_pmd_locked(struct vm_area_struct *vma, pmd_t *pmd, ...@@ -2159,15 +2159,17 @@ static void __split_huge_pmd_locked(struct vm_area_struct *vma, pmd_t *pmd,
atomic_inc(&page[i]._mapcount); atomic_inc(&page[i]._mapcount);
} }
lock_page_memcg(page);
if (atomic_add_negative(-1, compound_mapcount_ptr(page))) { if (atomic_add_negative(-1, compound_mapcount_ptr(page))) {
/* Last compound_mapcount is gone. */ /* Last compound_mapcount is gone. */
__dec_node_page_state(page, NR_ANON_THPS); __dec_lruvec_page_state(page, NR_ANON_THPS);
if (TestClearPageDoubleMap(page)) { if (TestClearPageDoubleMap(page)) {
/* No need in mapcount reference anymore */ /* No need in mapcount reference anymore */
for (i = 0; i < HPAGE_PMD_NR; i++) for (i = 0; i < HPAGE_PMD_NR; i++)
atomic_dec(&page[i]._mapcount); atomic_dec(&page[i]._mapcount);
} }
} }
unlock_page_memcg(page);
smp_wmb(); /* make pte visible before pmd */ smp_wmb(); /* make pte visible before pmd */
pmd_populate(mm, pmd, pgtable); pmd_populate(mm, pmd, pgtable);
......
...@@ -836,11 +836,6 @@ static void mem_cgroup_charge_statistics(struct mem_cgroup *memcg, ...@@ -836,11 +836,6 @@ static void mem_cgroup_charge_statistics(struct mem_cgroup *memcg,
struct page *page, struct page *page,
int nr_pages) int nr_pages)
{ {
if (abs(nr_pages) > 1) {
VM_BUG_ON_PAGE(!PageTransHuge(page), page);
__mod_memcg_state(memcg, MEMCG_RSS_HUGE, nr_pages);
}
/* pagein of a big page is an event. So, ignore page size */ /* pagein of a big page is an event. So, ignore page size */
if (nr_pages > 0) if (nr_pages > 0)
__count_memcg_events(memcg, PGPGIN, 1); __count_memcg_events(memcg, PGPGIN, 1);
...@@ -1406,15 +1401,11 @@ static char *memory_stat_format(struct mem_cgroup *memcg) ...@@ -1406,15 +1401,11 @@ static char *memory_stat_format(struct mem_cgroup *memcg)
(u64)memcg_page_state(memcg, NR_WRITEBACK) * (u64)memcg_page_state(memcg, NR_WRITEBACK) *
PAGE_SIZE); PAGE_SIZE);
/* #ifdef CONFIG_TRANSPARENT_HUGEPAGE
* TODO: We should eventually replace our own MEMCG_RSS_HUGE counter
* with the NR_ANON_THP vm counter, but right now it's a pain in the
* arse because it requires migrating the work out of rmap to a place
* where the page->mem_cgroup is set up and stable.
*/
seq_buf_printf(&s, "anon_thp %llu\n", seq_buf_printf(&s, "anon_thp %llu\n",
(u64)memcg_page_state(memcg, MEMCG_RSS_HUGE) * (u64)memcg_page_state(memcg, NR_ANON_THPS) *
PAGE_SIZE); HPAGE_PMD_SIZE);
#endif
for (i = 0; i < NR_LRU_LISTS; i++) for (i = 0; i < NR_LRU_LISTS; i++)
seq_buf_printf(&s, "%s %llu\n", lru_list_name(i), seq_buf_printf(&s, "%s %llu\n", lru_list_name(i),
...@@ -3061,8 +3052,6 @@ void mem_cgroup_split_huge_fixup(struct page *head) ...@@ -3061,8 +3052,6 @@ void mem_cgroup_split_huge_fixup(struct page *head)
for (i = 1; i < HPAGE_PMD_NR; i++) for (i = 1; i < HPAGE_PMD_NR; i++)
head[i].mem_cgroup = head->mem_cgroup; head[i].mem_cgroup = head->mem_cgroup;
__mod_memcg_state(head->mem_cgroup, MEMCG_RSS_HUGE, -HPAGE_PMD_NR);
} }
#endif /* CONFIG_TRANSPARENT_HUGEPAGE */ #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
...@@ -3818,7 +3807,9 @@ static int memcg_numa_stat_show(struct seq_file *m, void *v) ...@@ -3818,7 +3807,9 @@ static int memcg_numa_stat_show(struct seq_file *m, void *v)
static const unsigned int memcg1_stats[] = { static const unsigned int memcg1_stats[] = {
NR_FILE_PAGES, NR_FILE_PAGES,
NR_ANON_MAPPED, NR_ANON_MAPPED,
MEMCG_RSS_HUGE, #ifdef CONFIG_TRANSPARENT_HUGEPAGE
NR_ANON_THPS,
#endif
NR_SHMEM, NR_SHMEM,
NR_FILE_MAPPED, NR_FILE_MAPPED,
NR_FILE_DIRTY, NR_FILE_DIRTY,
...@@ -3829,7 +3820,9 @@ static const unsigned int memcg1_stats[] = { ...@@ -3829,7 +3820,9 @@ static const unsigned int memcg1_stats[] = {
static const char *const memcg1_stat_names[] = { static const char *const memcg1_stat_names[] = {
"cache", "cache",
"rss", "rss",
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
"rss_huge", "rss_huge",
#endif
"shmem", "shmem",
"mapped_file", "mapped_file",
"dirty", "dirty",
...@@ -3855,11 +3848,16 @@ static int memcg_stat_show(struct seq_file *m, void *v) ...@@ -3855,11 +3848,16 @@ static int memcg_stat_show(struct seq_file *m, void *v)
BUILD_BUG_ON(ARRAY_SIZE(memcg1_stat_names) != ARRAY_SIZE(memcg1_stats)); BUILD_BUG_ON(ARRAY_SIZE(memcg1_stat_names) != ARRAY_SIZE(memcg1_stats));
for (i = 0; i < ARRAY_SIZE(memcg1_stats); i++) { for (i = 0; i < ARRAY_SIZE(memcg1_stats); i++) {
unsigned long nr;
if (memcg1_stats[i] == MEMCG_SWAP && !do_memsw_account()) if (memcg1_stats[i] == MEMCG_SWAP && !do_memsw_account())
continue; continue;
seq_printf(m, "%s %lu\n", memcg1_stat_names[i], nr = memcg_page_state_local(memcg, memcg1_stats[i]);
memcg_page_state_local(memcg, memcg1_stats[i]) * #ifdef CONFIG_TRANSPARENT_HUGEPAGE
PAGE_SIZE); if (memcg1_stats[i] == NR_ANON_THPS)
nr *= HPAGE_PMD_NR;
#endif
seq_printf(m, "%s %lu\n", memcg1_stat_names[i], nr * PAGE_SIZE);
} }
for (i = 0; i < ARRAY_SIZE(memcg1_events); i++) for (i = 0; i < ARRAY_SIZE(memcg1_events); i++)
...@@ -5452,6 +5450,13 @@ static int mem_cgroup_move_account(struct page *page, ...@@ -5452,6 +5450,13 @@ static int mem_cgroup_move_account(struct page *page,
if (page_mapped(page)) { if (page_mapped(page)) {
__mod_lruvec_state(from_vec, NR_ANON_MAPPED, -nr_pages); __mod_lruvec_state(from_vec, NR_ANON_MAPPED, -nr_pages);
__mod_lruvec_state(to_vec, NR_ANON_MAPPED, nr_pages); __mod_lruvec_state(to_vec, NR_ANON_MAPPED, nr_pages);
if (PageTransHuge(page)) {
__mod_lruvec_state(from_vec, NR_ANON_THPS,
-nr_pages);
__mod_lruvec_state(to_vec, NR_ANON_THPS,
nr_pages);
}
} }
} else { } else {
__mod_lruvec_state(from_vec, NR_FILE_PAGES, -nr_pages); __mod_lruvec_state(from_vec, NR_FILE_PAGES, -nr_pages);
...@@ -6671,7 +6676,6 @@ struct uncharge_gather { ...@@ -6671,7 +6676,6 @@ struct uncharge_gather {
unsigned long nr_pages; unsigned long nr_pages;
unsigned long pgpgout; unsigned long pgpgout;
unsigned long nr_kmem; unsigned long nr_kmem;
unsigned long nr_huge;
struct page *dummy_page; struct page *dummy_page;
}; };
...@@ -6694,7 +6698,6 @@ static void uncharge_batch(const struct uncharge_gather *ug) ...@@ -6694,7 +6698,6 @@ static void uncharge_batch(const struct uncharge_gather *ug)
} }
local_irq_save(flags); local_irq_save(flags);
__mod_memcg_state(ug->memcg, MEMCG_RSS_HUGE, -ug->nr_huge);
__count_memcg_events(ug->memcg, PGPGOUT, ug->pgpgout); __count_memcg_events(ug->memcg, PGPGOUT, ug->pgpgout);
__this_cpu_add(ug->memcg->vmstats_percpu->nr_page_events, ug->nr_pages); __this_cpu_add(ug->memcg->vmstats_percpu->nr_page_events, ug->nr_pages);
memcg_check_events(ug->memcg, ug->dummy_page); memcg_check_events(ug->memcg, ug->dummy_page);
...@@ -6731,8 +6734,6 @@ static void uncharge_page(struct page *page, struct uncharge_gather *ug) ...@@ -6731,8 +6734,6 @@ static void uncharge_page(struct page *page, struct uncharge_gather *ug)
ug->nr_pages += nr_pages; ug->nr_pages += nr_pages;
if (!PageKmemcg(page)) { if (!PageKmemcg(page)) {
if (PageTransHuge(page))
ug->nr_huge += nr_pages;
ug->pgpgout++; ug->pgpgout++;
} else { } else {
ug->nr_kmem += nr_pages; ug->nr_kmem += nr_pages;
......
...@@ -1138,7 +1138,7 @@ void do_page_add_anon_rmap(struct page *page, ...@@ -1138,7 +1138,7 @@ void do_page_add_anon_rmap(struct page *page,
* disabled. * disabled.
*/ */
if (compound) if (compound)
__inc_node_page_state(page, NR_ANON_THPS); __inc_lruvec_page_state(page, NR_ANON_THPS);
__mod_lruvec_page_state(page, NR_ANON_MAPPED, nr); __mod_lruvec_page_state(page, NR_ANON_MAPPED, nr);
} }
...@@ -1180,7 +1180,7 @@ void page_add_new_anon_rmap(struct page *page, ...@@ -1180,7 +1180,7 @@ void page_add_new_anon_rmap(struct page *page,
if (hpage_pincount_available(page)) if (hpage_pincount_available(page))
atomic_set(compound_pincount_ptr(page), 0); atomic_set(compound_pincount_ptr(page), 0);
__inc_node_page_state(page, NR_ANON_THPS); __inc_lruvec_page_state(page, NR_ANON_THPS);
} else { } else {
/* Anon THP always mapped first with PMD */ /* Anon THP always mapped first with PMD */
VM_BUG_ON_PAGE(PageTransCompound(page), page); VM_BUG_ON_PAGE(PageTransCompound(page), page);
...@@ -1286,7 +1286,7 @@ static void page_remove_anon_compound_rmap(struct page *page) ...@@ -1286,7 +1286,7 @@ static void page_remove_anon_compound_rmap(struct page *page)
if (!IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE)) if (!IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE))
return; return;
__dec_node_page_state(page, NR_ANON_THPS); __dec_lruvec_page_state(page, NR_ANON_THPS);
if (TestClearPageDoubleMap(page)) { if (TestClearPageDoubleMap(page)) {
/* /*
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment