Commit cc7b8504 authored by Roman Gushchin's avatar Roman Gushchin Committed by Andrew Morton

mm: memcg: rename memcg_check_events()

Rename memcg_check_events() into memcg1_check_events() for consistency
with other cgroup v1-specific functions.

Link: https://lkml.kernel.org/r/20240625005906.106920-8-roman.gushchin@linux.devSigned-off-by: default avatarRoman Gushchin <roman.gushchin@linux.dev>
Acked-by: default avatarMichal Hocko <mhocko@suse.com>
Acked-by: default avatarShakeel Butt <shakeel.butt@linux.dev>
Cc: Johannes Weiner <hannes@cmpxchg.org>
Cc: Matthew Wilcox (Oracle) <willy@infradead.org>
Cc: Muchun Song <muchun.song@linux.dev>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
parent 66d60c42
...@@ -835,9 +835,9 @@ static int mem_cgroup_move_account(struct folio *folio, ...@@ -835,9 +835,9 @@ static int mem_cgroup_move_account(struct folio *folio,
local_irq_disable(); local_irq_disable();
mem_cgroup_charge_statistics(to, nr_pages); mem_cgroup_charge_statistics(to, nr_pages);
memcg_check_events(to, nid); memcg1_check_events(to, nid);
mem_cgroup_charge_statistics(from, -nr_pages); mem_cgroup_charge_statistics(from, -nr_pages);
memcg_check_events(from, nid); memcg1_check_events(from, nid);
local_irq_enable(); local_irq_enable();
out: out:
return ret; return ret;
...@@ -1424,7 +1424,7 @@ static void mem_cgroup_threshold(struct mem_cgroup *memcg) ...@@ -1424,7 +1424,7 @@ static void mem_cgroup_threshold(struct mem_cgroup *memcg)
* Check events in order. * Check events in order.
* *
*/ */
void memcg_check_events(struct mem_cgroup *memcg, int nid) void memcg1_check_events(struct mem_cgroup *memcg, int nid)
{ {
if (IS_ENABLED(CONFIG_PREEMPT_RT)) if (IS_ENABLED(CONFIG_PREEMPT_RT))
return; return;
......
...@@ -12,7 +12,7 @@ static inline void memcg1_soft_limit_reset(struct mem_cgroup *memcg) ...@@ -12,7 +12,7 @@ static inline void memcg1_soft_limit_reset(struct mem_cgroup *memcg)
} }
void mem_cgroup_charge_statistics(struct mem_cgroup *memcg, int nr_pages); void mem_cgroup_charge_statistics(struct mem_cgroup *memcg, int nr_pages);
void memcg_check_events(struct mem_cgroup *memcg, int nid); void memcg1_check_events(struct mem_cgroup *memcg, int nid);
void memcg_oom_recover(struct mem_cgroup *memcg); void memcg_oom_recover(struct mem_cgroup *memcg);
int try_charge_memcg(struct mem_cgroup *memcg, gfp_t gfp_mask, int try_charge_memcg(struct mem_cgroup *memcg, gfp_t gfp_mask,
unsigned int nr_pages); unsigned int nr_pages);
......
...@@ -2630,7 +2630,7 @@ void mem_cgroup_commit_charge(struct folio *folio, struct mem_cgroup *memcg) ...@@ -2630,7 +2630,7 @@ void mem_cgroup_commit_charge(struct folio *folio, struct mem_cgroup *memcg)
local_irq_disable(); local_irq_disable();
mem_cgroup_charge_statistics(memcg, folio_nr_pages(folio)); mem_cgroup_charge_statistics(memcg, folio_nr_pages(folio));
memcg_check_events(memcg, folio_nid(folio)); memcg1_check_events(memcg, folio_nid(folio));
local_irq_enable(); local_irq_enable();
} }
...@@ -5662,7 +5662,7 @@ static void uncharge_batch(const struct uncharge_gather *ug) ...@@ -5662,7 +5662,7 @@ static void uncharge_batch(const struct uncharge_gather *ug)
local_irq_save(flags); local_irq_save(flags);
__count_memcg_events(ug->memcg, PGPGOUT, ug->pgpgout); __count_memcg_events(ug->memcg, PGPGOUT, ug->pgpgout);
__this_cpu_add(ug->memcg->vmstats_percpu->nr_page_events, ug->nr_memory); __this_cpu_add(ug->memcg->vmstats_percpu->nr_page_events, ug->nr_memory);
memcg_check_events(ug->memcg, ug->nid); memcg1_check_events(ug->memcg, ug->nid);
local_irq_restore(flags); local_irq_restore(flags);
/* drop reference from uncharge_folio */ /* drop reference from uncharge_folio */
...@@ -5801,7 +5801,7 @@ void mem_cgroup_replace_folio(struct folio *old, struct folio *new) ...@@ -5801,7 +5801,7 @@ void mem_cgroup_replace_folio(struct folio *old, struct folio *new)
local_irq_save(flags); local_irq_save(flags);
mem_cgroup_charge_statistics(memcg, nr_pages); mem_cgroup_charge_statistics(memcg, nr_pages);
memcg_check_events(memcg, folio_nid(new)); memcg1_check_events(memcg, folio_nid(new));
local_irq_restore(flags); local_irq_restore(flags);
} }
...@@ -6070,7 +6070,7 @@ void mem_cgroup_swapout(struct folio *folio, swp_entry_t entry) ...@@ -6070,7 +6070,7 @@ void mem_cgroup_swapout(struct folio *folio, swp_entry_t entry)
memcg_stats_lock(); memcg_stats_lock();
mem_cgroup_charge_statistics(memcg, -nr_entries); mem_cgroup_charge_statistics(memcg, -nr_entries);
memcg_stats_unlock(); memcg_stats_unlock();
memcg_check_events(memcg, folio_nid(folio)); memcg1_check_events(memcg, folio_nid(folio));
css_put(&memcg->css); css_put(&memcg->css);
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment