mm/memcg: Convert memcg_check_events to take a node ID

memcg_check_events only uses the page's nid, so call page_to_nid in the
callers to make the interface easier to understand.
Signed-off-by: default avatarMatthew Wilcox (Oracle) <willy@infradead.org>
Acked-by: default avatarMichal Hocko <mhocko@suse.com>
Reviewed-by: default avatarChristoph Hellwig <hch@lst.de>
Reviewed-by: default avatarDavid Howells <dhowells@redhat.com>
Acked-by: default avatarVlastimil Babka <vbabka@suse.cz>
parent 2ab082ba
...@@ -819,7 +819,7 @@ static bool mem_cgroup_event_ratelimit(struct mem_cgroup *memcg, ...@@ -819,7 +819,7 @@ static bool mem_cgroup_event_ratelimit(struct mem_cgroup *memcg,
* Check events in order. * Check events in order.
* *
*/ */
static void memcg_check_events(struct mem_cgroup *memcg, struct page *page) static void memcg_check_events(struct mem_cgroup *memcg, int nid)
{ {
/* threshold event is triggered in finer grain than soft limit */ /* threshold event is triggered in finer grain than soft limit */
if (unlikely(mem_cgroup_event_ratelimit(memcg, if (unlikely(mem_cgroup_event_ratelimit(memcg,
...@@ -830,7 +830,7 @@ static void memcg_check_events(struct mem_cgroup *memcg, struct page *page) ...@@ -830,7 +830,7 @@ static void memcg_check_events(struct mem_cgroup *memcg, struct page *page)
MEM_CGROUP_TARGET_SOFTLIMIT); MEM_CGROUP_TARGET_SOFTLIMIT);
mem_cgroup_threshold(memcg); mem_cgroup_threshold(memcg);
if (unlikely(do_softlimit)) if (unlikely(do_softlimit))
mem_cgroup_update_tree(memcg, page_to_nid(page)); mem_cgroup_update_tree(memcg, nid);
} }
} }
...@@ -5555,7 +5555,7 @@ static int mem_cgroup_move_account(struct page *page, ...@@ -5555,7 +5555,7 @@ static int mem_cgroup_move_account(struct page *page,
struct lruvec *from_vec, *to_vec; struct lruvec *from_vec, *to_vec;
struct pglist_data *pgdat; struct pglist_data *pgdat;
unsigned int nr_pages = compound ? thp_nr_pages(page) : 1; unsigned int nr_pages = compound ? thp_nr_pages(page) : 1;
int ret; int nid, ret;
VM_BUG_ON(from == to); VM_BUG_ON(from == to);
VM_BUG_ON_PAGE(PageLRU(page), page); VM_BUG_ON_PAGE(PageLRU(page), page);
...@@ -5644,12 +5644,13 @@ static int mem_cgroup_move_account(struct page *page, ...@@ -5644,12 +5644,13 @@ static int mem_cgroup_move_account(struct page *page,
__unlock_page_memcg(from); __unlock_page_memcg(from);
ret = 0; ret = 0;
nid = page_to_nid(page);
local_irq_disable(); local_irq_disable();
mem_cgroup_charge_statistics(to, nr_pages); mem_cgroup_charge_statistics(to, nr_pages);
memcg_check_events(to, page); memcg_check_events(to, nid);
mem_cgroup_charge_statistics(from, -nr_pages); mem_cgroup_charge_statistics(from, -nr_pages);
memcg_check_events(from, page); memcg_check_events(from, nid);
local_irq_enable(); local_irq_enable();
out_unlock: out_unlock:
unlock_page(page); unlock_page(page);
...@@ -6671,7 +6672,7 @@ static int charge_memcg(struct page *page, struct mem_cgroup *memcg, gfp_t gfp) ...@@ -6671,7 +6672,7 @@ static int charge_memcg(struct page *page, struct mem_cgroup *memcg, gfp_t gfp)
local_irq_disable(); local_irq_disable();
mem_cgroup_charge_statistics(memcg, nr_pages); mem_cgroup_charge_statistics(memcg, nr_pages);
memcg_check_events(memcg, page); memcg_check_events(memcg, page_to_nid(page));
local_irq_enable(); local_irq_enable();
out: out:
return ret; return ret;
...@@ -6777,7 +6778,7 @@ struct uncharge_gather { ...@@ -6777,7 +6778,7 @@ struct uncharge_gather {
unsigned long nr_memory; unsigned long nr_memory;
unsigned long pgpgout; unsigned long pgpgout;
unsigned long nr_kmem; unsigned long nr_kmem;
struct page *dummy_page; int nid;
}; };
static inline void uncharge_gather_clear(struct uncharge_gather *ug) static inline void uncharge_gather_clear(struct uncharge_gather *ug)
...@@ -6801,7 +6802,7 @@ static void uncharge_batch(const struct uncharge_gather *ug) ...@@ -6801,7 +6802,7 @@ static void uncharge_batch(const struct uncharge_gather *ug)
local_irq_save(flags); local_irq_save(flags);
__count_memcg_events(ug->memcg, PGPGOUT, ug->pgpgout); __count_memcg_events(ug->memcg, PGPGOUT, ug->pgpgout);
__this_cpu_add(ug->memcg->vmstats_percpu->nr_page_events, ug->nr_memory); __this_cpu_add(ug->memcg->vmstats_percpu->nr_page_events, ug->nr_memory);
memcg_check_events(ug->memcg, ug->dummy_page); memcg_check_events(ug->memcg, ug->nid);
local_irq_restore(flags); local_irq_restore(flags);
/* drop reference from uncharge_page */ /* drop reference from uncharge_page */
...@@ -6842,7 +6843,7 @@ static void uncharge_page(struct page *page, struct uncharge_gather *ug) ...@@ -6842,7 +6843,7 @@ static void uncharge_page(struct page *page, struct uncharge_gather *ug)
uncharge_gather_clear(ug); uncharge_gather_clear(ug);
} }
ug->memcg = memcg; ug->memcg = memcg;
ug->dummy_page = page; ug->nid = page_to_nid(page);
/* pairs with css_put in uncharge_batch */ /* pairs with css_put in uncharge_batch */
css_get(&memcg->css); css_get(&memcg->css);
...@@ -6954,7 +6955,7 @@ void mem_cgroup_migrate(struct page *oldpage, struct page *newpage) ...@@ -6954,7 +6955,7 @@ void mem_cgroup_migrate(struct page *oldpage, struct page *newpage)
local_irq_save(flags); local_irq_save(flags);
mem_cgroup_charge_statistics(memcg, nr_pages); mem_cgroup_charge_statistics(memcg, nr_pages);
memcg_check_events(memcg, newpage); memcg_check_events(memcg, page_to_nid(newpage));
local_irq_restore(flags); local_irq_restore(flags);
} }
...@@ -7182,7 +7183,7 @@ void mem_cgroup_swapout(struct page *page, swp_entry_t entry) ...@@ -7182,7 +7183,7 @@ void mem_cgroup_swapout(struct page *page, swp_entry_t entry)
*/ */
VM_BUG_ON(!irqs_disabled()); VM_BUG_ON(!irqs_disabled());
mem_cgroup_charge_statistics(memcg, -nr_entries); mem_cgroup_charge_statistics(memcg, -nr_entries);
memcg_check_events(memcg, page); memcg_check_events(memcg, page_to_nid(page));
css_put(&memcg->css); css_put(&memcg->css);
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment