mm/memcg: Convert memcg_check_events to take a node ID

memcg_check_events only uses the page's nid, so call page_to_nid in the
callers to make the interface easier to understand.
Signed-off-by: default avatarMatthew Wilcox (Oracle) <willy@infradead.org>
Acked-by: default avatarMichal Hocko <mhocko@suse.com>
Reviewed-by: default avatarChristoph Hellwig <hch@lst.de>
Reviewed-by: default avatarDavid Howells <dhowells@redhat.com>
Acked-by: default avatarVlastimil Babka <vbabka@suse.cz>
parent 2ab082ba
......@@ -819,7 +819,7 @@ static bool mem_cgroup_event_ratelimit(struct mem_cgroup *memcg,
* Check events in order.
*
*/
static void memcg_check_events(struct mem_cgroup *memcg, struct page *page)
static void memcg_check_events(struct mem_cgroup *memcg, int nid)
{
/* threshold event is triggered in finer grain than soft limit */
if (unlikely(mem_cgroup_event_ratelimit(memcg,
......@@ -830,7 +830,7 @@ static void memcg_check_events(struct mem_cgroup *memcg, struct page *page)
MEM_CGROUP_TARGET_SOFTLIMIT);
mem_cgroup_threshold(memcg);
if (unlikely(do_softlimit))
mem_cgroup_update_tree(memcg, page_to_nid(page));
mem_cgroup_update_tree(memcg, nid);
}
}
......@@ -5555,7 +5555,7 @@ static int mem_cgroup_move_account(struct page *page,
struct lruvec *from_vec, *to_vec;
struct pglist_data *pgdat;
unsigned int nr_pages = compound ? thp_nr_pages(page) : 1;
int ret;
int nid, ret;
VM_BUG_ON(from == to);
VM_BUG_ON_PAGE(PageLRU(page), page);
......@@ -5644,12 +5644,13 @@ static int mem_cgroup_move_account(struct page *page,
__unlock_page_memcg(from);
ret = 0;
nid = page_to_nid(page);
local_irq_disable();
mem_cgroup_charge_statistics(to, nr_pages);
memcg_check_events(to, page);
memcg_check_events(to, nid);
mem_cgroup_charge_statistics(from, -nr_pages);
memcg_check_events(from, page);
memcg_check_events(from, nid);
local_irq_enable();
out_unlock:
unlock_page(page);
......@@ -6671,7 +6672,7 @@ static int charge_memcg(struct page *page, struct mem_cgroup *memcg, gfp_t gfp)
local_irq_disable();
mem_cgroup_charge_statistics(memcg, nr_pages);
memcg_check_events(memcg, page);
memcg_check_events(memcg, page_to_nid(page));
local_irq_enable();
out:
return ret;
......@@ -6777,7 +6778,7 @@ struct uncharge_gather {
unsigned long nr_memory;
unsigned long pgpgout;
unsigned long nr_kmem;
struct page *dummy_page;
int nid;
};
static inline void uncharge_gather_clear(struct uncharge_gather *ug)
......@@ -6801,7 +6802,7 @@ static void uncharge_batch(const struct uncharge_gather *ug)
local_irq_save(flags);
__count_memcg_events(ug->memcg, PGPGOUT, ug->pgpgout);
__this_cpu_add(ug->memcg->vmstats_percpu->nr_page_events, ug->nr_memory);
memcg_check_events(ug->memcg, ug->dummy_page);
memcg_check_events(ug->memcg, ug->nid);
local_irq_restore(flags);
/* drop reference from uncharge_page */
......@@ -6842,7 +6843,7 @@ static void uncharge_page(struct page *page, struct uncharge_gather *ug)
uncharge_gather_clear(ug);
}
ug->memcg = memcg;
ug->dummy_page = page;
ug->nid = page_to_nid(page);
/* pairs with css_put in uncharge_batch */
css_get(&memcg->css);
......@@ -6954,7 +6955,7 @@ void mem_cgroup_migrate(struct page *oldpage, struct page *newpage)
local_irq_save(flags);
mem_cgroup_charge_statistics(memcg, nr_pages);
memcg_check_events(memcg, newpage);
memcg_check_events(memcg, page_to_nid(newpage));
local_irq_restore(flags);
}
......@@ -7182,7 +7183,7 @@ void mem_cgroup_swapout(struct page *page, swp_entry_t entry)
*/
VM_BUG_ON(!irqs_disabled());
mem_cgroup_charge_statistics(memcg, -nr_entries);
memcg_check_events(memcg, page);
memcg_check_events(memcg, page_to_nid(page));
css_put(&memcg->css);
}
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment