Commit 9f762dbe authored by Johannes Weiner's avatar Johannes Weiner Committed by Linus Torvalds

mm: memcontrol: prepare uncharging for removal of private page type counters

The uncharge batching code adds up the anon, file, kmem counts to
determine the total number of pages to uncharge and references to drop.
But the next patches will remove the anon and file counters.

Maintain an aggregate nr_pages in the uncharge_gather struct.
Signed-off-by: default avatarJohannes Weiner <hannes@cmpxchg.org>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Reviewed-by: default avatarAlex Shi <alex.shi@linux.alibaba.com>
Reviewed-by: default avatarJoonsoo Kim <iamjoonsoo.kim@lge.com>
Cc: Hugh Dickins <hughd@google.com>
Cc: "Kirill A. Shutemov" <kirill@shutemov.name>
Cc: Michal Hocko <mhocko@suse.com>
Cc: Roman Gushchin <guro@fb.com>
Cc: Shakeel Butt <shakeelb@google.com>
Cc: Balbir Singh <bsingharora@gmail.com>
Link: http://lkml.kernel.org/r/20200508183105.225460-7-hannes@cmpxchg.orgSigned-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent 3fea5a49
...@@ -6666,6 +6666,7 @@ int mem_cgroup_charge(struct page *page, struct mm_struct *mm, gfp_t gfp_mask, ...@@ -6666,6 +6666,7 @@ int mem_cgroup_charge(struct page *page, struct mm_struct *mm, gfp_t gfp_mask,
struct uncharge_gather { struct uncharge_gather {
struct mem_cgroup *memcg; struct mem_cgroup *memcg;
unsigned long nr_pages;
unsigned long pgpgout; unsigned long pgpgout;
unsigned long nr_anon; unsigned long nr_anon;
unsigned long nr_file; unsigned long nr_file;
...@@ -6682,13 +6683,12 @@ static inline void uncharge_gather_clear(struct uncharge_gather *ug) ...@@ -6682,13 +6683,12 @@ static inline void uncharge_gather_clear(struct uncharge_gather *ug)
static void uncharge_batch(const struct uncharge_gather *ug) static void uncharge_batch(const struct uncharge_gather *ug)
{ {
unsigned long nr_pages = ug->nr_anon + ug->nr_file + ug->nr_kmem;
unsigned long flags; unsigned long flags;
if (!mem_cgroup_is_root(ug->memcg)) { if (!mem_cgroup_is_root(ug->memcg)) {
page_counter_uncharge(&ug->memcg->memory, nr_pages); page_counter_uncharge(&ug->memcg->memory, ug->nr_pages);
if (do_memsw_account()) if (do_memsw_account())
page_counter_uncharge(&ug->memcg->memsw, nr_pages); page_counter_uncharge(&ug->memcg->memsw, ug->nr_pages);
if (!cgroup_subsys_on_dfl(memory_cgrp_subsys) && ug->nr_kmem) if (!cgroup_subsys_on_dfl(memory_cgrp_subsys) && ug->nr_kmem)
page_counter_uncharge(&ug->memcg->kmem, ug->nr_kmem); page_counter_uncharge(&ug->memcg->kmem, ug->nr_kmem);
memcg_oom_recover(ug->memcg); memcg_oom_recover(ug->memcg);
...@@ -6700,16 +6700,18 @@ static void uncharge_batch(const struct uncharge_gather *ug) ...@@ -6700,16 +6700,18 @@ static void uncharge_batch(const struct uncharge_gather *ug)
__mod_memcg_state(ug->memcg, MEMCG_RSS_HUGE, -ug->nr_huge); __mod_memcg_state(ug->memcg, MEMCG_RSS_HUGE, -ug->nr_huge);
__mod_memcg_state(ug->memcg, NR_SHMEM, -ug->nr_shmem); __mod_memcg_state(ug->memcg, NR_SHMEM, -ug->nr_shmem);
__count_memcg_events(ug->memcg, PGPGOUT, ug->pgpgout); __count_memcg_events(ug->memcg, PGPGOUT, ug->pgpgout);
__this_cpu_add(ug->memcg->vmstats_percpu->nr_page_events, nr_pages); __this_cpu_add(ug->memcg->vmstats_percpu->nr_page_events, ug->nr_pages);
memcg_check_events(ug->memcg, ug->dummy_page); memcg_check_events(ug->memcg, ug->dummy_page);
local_irq_restore(flags); local_irq_restore(flags);
if (!mem_cgroup_is_root(ug->memcg)) if (!mem_cgroup_is_root(ug->memcg))
css_put_many(&ug->memcg->css, nr_pages); css_put_many(&ug->memcg->css, ug->nr_pages);
} }
static void uncharge_page(struct page *page, struct uncharge_gather *ug) static void uncharge_page(struct page *page, struct uncharge_gather *ug)
{ {
unsigned long nr_pages;
VM_BUG_ON_PAGE(PageLRU(page), page); VM_BUG_ON_PAGE(PageLRU(page), page);
if (!page->mem_cgroup) if (!page->mem_cgroup)
...@@ -6729,13 +6731,12 @@ static void uncharge_page(struct page *page, struct uncharge_gather *ug) ...@@ -6729,13 +6731,12 @@ static void uncharge_page(struct page *page, struct uncharge_gather *ug)
ug->memcg = page->mem_cgroup; ug->memcg = page->mem_cgroup;
} }
if (!PageKmemcg(page)) {
unsigned int nr_pages = 1;
if (PageTransHuge(page)) {
nr_pages = compound_nr(page); nr_pages = compound_nr(page);
ug->nr_pages += nr_pages;
if (!PageKmemcg(page)) {
if (PageTransHuge(page))
ug->nr_huge += nr_pages; ug->nr_huge += nr_pages;
}
if (PageAnon(page)) if (PageAnon(page))
ug->nr_anon += nr_pages; ug->nr_anon += nr_pages;
else { else {
...@@ -6745,7 +6746,7 @@ static void uncharge_page(struct page *page, struct uncharge_gather *ug) ...@@ -6745,7 +6746,7 @@ static void uncharge_page(struct page *page, struct uncharge_gather *ug)
} }
ug->pgpgout++; ug->pgpgout++;
} else { } else {
ug->nr_kmem += compound_nr(page); ug->nr_kmem += nr_pages;
__ClearPageKmemcg(page); __ClearPageKmemcg(page);
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment