Commit 68b4876d authored by Sha Zhengju's avatar Sha Zhengju Committed by Linus Torvalds

memcg: remove MEMCG_NR_FILE_MAPPED

While accounting memcg page stat, it's not worth to use
MEMCG_NR_FILE_MAPPED as an extra layer of indirection because of the
complexity and presumed performance overhead.  We can use
MEM_CGROUP_STAT_FILE_MAPPED directly.
Signed-off-by: default avatarSha Zhengju <handai.szj@taobao.com>
Acked-by: default avatarKAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com>
Acked-by: default avatarMichal Hocko <mhocko@suse.cz>
Acked-by: default avatarFengguang Wu <fengguang.wu@intel.com>
Reviewed-by: default avatarGreg Thelen <gthelen@google.com>
Cc: Johannes Weiner <hannes@cmpxchg.org>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent 1a36e59d
...@@ -30,9 +30,20 @@ struct page; ...@@ -30,9 +30,20 @@ struct page;
struct mm_struct; struct mm_struct;
struct kmem_cache; struct kmem_cache;
/* Stats that can be updated by kernel. */ /*
enum mem_cgroup_page_stat_item { * The corresponding mem_cgroup_stat_names is defined in mm/memcontrol.c,
MEMCG_NR_FILE_MAPPED, /* # of pages charged as file rss */ * These two lists should keep in accord with each other.
*/
enum mem_cgroup_stat_index {
/*
* For MEM_CONTAINER_TYPE_ALL, usage = pagecache + rss.
*/
MEM_CGROUP_STAT_CACHE, /* # of pages charged as cache */
MEM_CGROUP_STAT_RSS, /* # of pages charged as anon rss */
MEM_CGROUP_STAT_RSS_HUGE, /* # of pages charged as anon huge */
MEM_CGROUP_STAT_FILE_MAPPED, /* # of pages charged as file rss */
MEM_CGROUP_STAT_SWAP, /* # of pages, swapped out */
MEM_CGROUP_STAT_NSTATS,
}; };
struct mem_cgroup_reclaim_cookie { struct mem_cgroup_reclaim_cookie {
...@@ -233,17 +244,17 @@ static inline void mem_cgroup_end_update_page_stat(struct page *page, ...@@ -233,17 +244,17 @@ static inline void mem_cgroup_end_update_page_stat(struct page *page,
} }
void mem_cgroup_update_page_stat(struct page *page, void mem_cgroup_update_page_stat(struct page *page,
enum mem_cgroup_page_stat_item idx, enum mem_cgroup_stat_index idx,
int val); int val);
static inline void mem_cgroup_inc_page_stat(struct page *page, static inline void mem_cgroup_inc_page_stat(struct page *page,
enum mem_cgroup_page_stat_item idx) enum mem_cgroup_stat_index idx)
{ {
mem_cgroup_update_page_stat(page, idx, 1); mem_cgroup_update_page_stat(page, idx, 1);
} }
static inline void mem_cgroup_dec_page_stat(struct page *page, static inline void mem_cgroup_dec_page_stat(struct page *page,
enum mem_cgroup_page_stat_item idx) enum mem_cgroup_stat_index idx)
{ {
mem_cgroup_update_page_stat(page, idx, -1); mem_cgroup_update_page_stat(page, idx, -1);
} }
...@@ -449,12 +460,12 @@ static inline bool mem_cgroup_oom_synchronize(void) ...@@ -449,12 +460,12 @@ static inline bool mem_cgroup_oom_synchronize(void)
} }
static inline void mem_cgroup_inc_page_stat(struct page *page, static inline void mem_cgroup_inc_page_stat(struct page *page,
enum mem_cgroup_page_stat_item idx) enum mem_cgroup_stat_index idx)
{ {
} }
static inline void mem_cgroup_dec_page_stat(struct page *page, static inline void mem_cgroup_dec_page_stat(struct page *page,
enum mem_cgroup_page_stat_item idx) enum mem_cgroup_stat_index idx)
{ {
} }
......
...@@ -84,21 +84,6 @@ static int really_do_swap_account __initdata = 0; ...@@ -84,21 +84,6 @@ static int really_do_swap_account __initdata = 0;
#endif #endif
/*
* Statistics for memory cgroup.
*/
enum mem_cgroup_stat_index {
/*
* For MEM_CONTAINER_TYPE_ALL, usage = pagecache + rss.
*/
MEM_CGROUP_STAT_CACHE, /* # of pages charged as cache */
MEM_CGROUP_STAT_RSS, /* # of pages charged as anon rss */
MEM_CGROUP_STAT_RSS_HUGE, /* # of pages charged as anon huge */
MEM_CGROUP_STAT_FILE_MAPPED, /* # of pages charged as file rss */
MEM_CGROUP_STAT_SWAP, /* # of pages, swapped out */
MEM_CGROUP_STAT_NSTATS,
};
static const char * const mem_cgroup_stat_names[] = { static const char * const mem_cgroup_stat_names[] = {
"cache", "cache",
"rss", "rss",
...@@ -2231,7 +2216,7 @@ void __mem_cgroup_end_update_page_stat(struct page *page, unsigned long *flags) ...@@ -2231,7 +2216,7 @@ void __mem_cgroup_end_update_page_stat(struct page *page, unsigned long *flags)
} }
void mem_cgroup_update_page_stat(struct page *page, void mem_cgroup_update_page_stat(struct page *page,
enum mem_cgroup_page_stat_item idx, int val) enum mem_cgroup_stat_index idx, int val)
{ {
struct mem_cgroup *memcg; struct mem_cgroup *memcg;
struct page_cgroup *pc = lookup_page_cgroup(page); struct page_cgroup *pc = lookup_page_cgroup(page);
...@@ -2244,14 +2229,6 @@ void mem_cgroup_update_page_stat(struct page *page, ...@@ -2244,14 +2229,6 @@ void mem_cgroup_update_page_stat(struct page *page,
if (unlikely(!memcg || !PageCgroupUsed(pc))) if (unlikely(!memcg || !PageCgroupUsed(pc)))
return; return;
switch (idx) {
case MEMCG_NR_FILE_MAPPED:
idx = MEM_CGROUP_STAT_FILE_MAPPED;
break;
default:
BUG();
}
this_cpu_add(memcg->stat->count[idx], val); this_cpu_add(memcg->stat->count[idx], val);
} }
......
...@@ -1111,7 +1111,7 @@ void page_add_file_rmap(struct page *page) ...@@ -1111,7 +1111,7 @@ void page_add_file_rmap(struct page *page)
mem_cgroup_begin_update_page_stat(page, &locked, &flags); mem_cgroup_begin_update_page_stat(page, &locked, &flags);
if (atomic_inc_and_test(&page->_mapcount)) { if (atomic_inc_and_test(&page->_mapcount)) {
__inc_zone_page_state(page, NR_FILE_MAPPED); __inc_zone_page_state(page, NR_FILE_MAPPED);
mem_cgroup_inc_page_stat(page, MEMCG_NR_FILE_MAPPED); mem_cgroup_inc_page_stat(page, MEM_CGROUP_STAT_FILE_MAPPED);
} }
mem_cgroup_end_update_page_stat(page, &locked, &flags); mem_cgroup_end_update_page_stat(page, &locked, &flags);
} }
...@@ -1155,7 +1155,7 @@ void page_remove_rmap(struct page *page) ...@@ -1155,7 +1155,7 @@ void page_remove_rmap(struct page *page)
NR_ANON_TRANSPARENT_HUGEPAGES); NR_ANON_TRANSPARENT_HUGEPAGES);
} else { } else {
__dec_zone_page_state(page, NR_FILE_MAPPED); __dec_zone_page_state(page, NR_FILE_MAPPED);
mem_cgroup_dec_page_stat(page, MEMCG_NR_FILE_MAPPED); mem_cgroup_dec_page_stat(page, MEM_CGROUP_STAT_FILE_MAPPED);
mem_cgroup_end_update_page_stat(page, &locked, &flags); mem_cgroup_end_update_page_stat(page, &locked, &flags);
} }
if (unlikely(PageMlocked(page))) if (unlikely(PageMlocked(page)))
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment