Commit 87024f58 authored by Roman Gushchin's avatar Roman Gushchin Committed by Andrew Morton

mm: memcg: rename soft limit reclaim-related functions

Rename exported function related to the softlimit reclaim to have memcg1_
prefix.

Link: https://lkml.kernel.org/r/20240625005906.106920-4-roman.gushchin@linux.devSigned-off-by: default avatarRoman Gushchin <roman.gushchin@linux.dev>
Acked-by: default avatarMichal Hocko <mhocko@suse.com>
Acked-by: default avatarShakeel Butt <shakeel.butt@linux.dev>
Cc: Johannes Weiner <hannes@cmpxchg.org>
Cc: Matthew Wilcox (Oracle) <willy@infradead.org>
Cc: Muchun Song <muchun.song@linux.dev>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
parent d12f6d22
...@@ -1121,9 +1121,9 @@ static inline void memcg_memory_event_mm(struct mm_struct *mm, ...@@ -1121,9 +1121,9 @@ static inline void memcg_memory_event_mm(struct mm_struct *mm,
void split_page_memcg(struct page *head, int old_order, int new_order); void split_page_memcg(struct page *head, int old_order, int new_order);
unsigned long mem_cgroup_soft_limit_reclaim(pg_data_t *pgdat, int order, unsigned long memcg1_soft_limit_reclaim(pg_data_t *pgdat, int order,
gfp_t gfp_mask, gfp_t gfp_mask,
unsigned long *total_scanned); unsigned long *total_scanned);
#else /* CONFIG_MEMCG */ #else /* CONFIG_MEMCG */
...@@ -1572,9 +1572,9 @@ static inline void split_page_memcg(struct page *head, int old_order, int new_or ...@@ -1572,9 +1572,9 @@ static inline void split_page_memcg(struct page *head, int old_order, int new_or
} }
static inline static inline
unsigned long mem_cgroup_soft_limit_reclaim(pg_data_t *pgdat, int order, unsigned long memcg1_soft_limit_reclaim(pg_data_t *pgdat, int order,
gfp_t gfp_mask, gfp_t gfp_mask,
unsigned long *total_scanned) unsigned long *total_scanned)
{ {
return 0; return 0;
} }
......
...@@ -100,7 +100,7 @@ static unsigned long soft_limit_excess(struct mem_cgroup *memcg) ...@@ -100,7 +100,7 @@ static unsigned long soft_limit_excess(struct mem_cgroup *memcg)
return excess; return excess;
} }
void mem_cgroup_update_tree(struct mem_cgroup *memcg, int nid) void memcg1_update_tree(struct mem_cgroup *memcg, int nid)
{ {
unsigned long excess; unsigned long excess;
struct mem_cgroup_per_node *mz; struct mem_cgroup_per_node *mz;
...@@ -143,7 +143,7 @@ void mem_cgroup_update_tree(struct mem_cgroup *memcg, int nid) ...@@ -143,7 +143,7 @@ void mem_cgroup_update_tree(struct mem_cgroup *memcg, int nid)
} }
} }
void mem_cgroup_remove_from_trees(struct mem_cgroup *memcg) void memcg1_remove_from_trees(struct mem_cgroup *memcg)
{ {
struct mem_cgroup_tree_per_node *mctz; struct mem_cgroup_tree_per_node *mctz;
struct mem_cgroup_per_node *mz; struct mem_cgroup_per_node *mz;
...@@ -243,7 +243,7 @@ static int mem_cgroup_soft_reclaim(struct mem_cgroup *root_memcg, ...@@ -243,7 +243,7 @@ static int mem_cgroup_soft_reclaim(struct mem_cgroup *root_memcg,
return total; return total;
} }
unsigned long mem_cgroup_soft_limit_reclaim(pg_data_t *pgdat, int order, unsigned long memcg1_soft_limit_reclaim(pg_data_t *pgdat, int order,
gfp_t gfp_mask, gfp_t gfp_mask,
unsigned long *total_scanned) unsigned long *total_scanned)
{ {
......
...@@ -3,8 +3,8 @@ ...@@ -3,8 +3,8 @@
#ifndef __MM_MEMCONTROL_V1_H #ifndef __MM_MEMCONTROL_V1_H
#define __MM_MEMCONTROL_V1_H #define __MM_MEMCONTROL_V1_H
void mem_cgroup_update_tree(struct mem_cgroup *memcg, int nid); void memcg1_update_tree(struct mem_cgroup *memcg, int nid);
void mem_cgroup_remove_from_trees(struct mem_cgroup *memcg); void memcg1_remove_from_trees(struct mem_cgroup *memcg);
static inline void memcg1_soft_limit_reset(struct mem_cgroup *memcg) static inline void memcg1_soft_limit_reset(struct mem_cgroup *memcg)
{ {
......
...@@ -1011,7 +1011,7 @@ static void memcg_check_events(struct mem_cgroup *memcg, int nid) ...@@ -1011,7 +1011,7 @@ static void memcg_check_events(struct mem_cgroup *memcg, int nid)
MEM_CGROUP_TARGET_SOFTLIMIT); MEM_CGROUP_TARGET_SOFTLIMIT);
mem_cgroup_threshold(memcg); mem_cgroup_threshold(memcg);
if (unlikely(do_softlimit)) if (unlikely(do_softlimit))
mem_cgroup_update_tree(memcg, nid); memcg1_update_tree(memcg, nid);
} }
} }
...@@ -5608,7 +5608,7 @@ static void mem_cgroup_css_free(struct cgroup_subsys_state *css) ...@@ -5608,7 +5608,7 @@ static void mem_cgroup_css_free(struct cgroup_subsys_state *css)
vmpressure_cleanup(&memcg->vmpressure); vmpressure_cleanup(&memcg->vmpressure);
cancel_work_sync(&memcg->high_work); cancel_work_sync(&memcg->high_work);
mem_cgroup_remove_from_trees(memcg); memcg1_remove_from_trees(memcg);
free_shrinker_info(memcg); free_shrinker_info(memcg);
mem_cgroup_free(memcg); mem_cgroup_free(memcg);
} }
......
...@@ -6169,9 +6169,9 @@ static void shrink_zones(struct zonelist *zonelist, struct scan_control *sc) ...@@ -6169,9 +6169,9 @@ static void shrink_zones(struct zonelist *zonelist, struct scan_control *sc)
* and balancing, not for a memcg's limit. * and balancing, not for a memcg's limit.
*/ */
nr_soft_scanned = 0; nr_soft_scanned = 0;
nr_soft_reclaimed = mem_cgroup_soft_limit_reclaim(zone->zone_pgdat, nr_soft_reclaimed = memcg1_soft_limit_reclaim(zone->zone_pgdat,
sc->order, sc->gfp_mask, sc->order, sc->gfp_mask,
&nr_soft_scanned); &nr_soft_scanned);
sc->nr_reclaimed += nr_soft_reclaimed; sc->nr_reclaimed += nr_soft_reclaimed;
sc->nr_scanned += nr_soft_scanned; sc->nr_scanned += nr_soft_scanned;
/* need some check for avoid more shrink_zone() */ /* need some check for avoid more shrink_zone() */
...@@ -6933,8 +6933,8 @@ static int balance_pgdat(pg_data_t *pgdat, int order, int highest_zoneidx) ...@@ -6933,8 +6933,8 @@ static int balance_pgdat(pg_data_t *pgdat, int order, int highest_zoneidx)
/* Call soft limit reclaim before calling shrink_node. */ /* Call soft limit reclaim before calling shrink_node. */
sc.nr_scanned = 0; sc.nr_scanned = 0;
nr_soft_scanned = 0; nr_soft_scanned = 0;
nr_soft_reclaimed = mem_cgroup_soft_limit_reclaim(pgdat, sc.order, nr_soft_reclaimed = memcg1_soft_limit_reclaim(pgdat, sc.order,
sc.gfp_mask, &nr_soft_scanned); sc.gfp_mask, &nr_soft_scanned);
sc.nr_reclaimed += nr_soft_reclaimed; sc.nr_reclaimed += nr_soft_reclaimed;
/* /*
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment