Commit 2a7106f2 authored by Greg Thelen's avatar Greg Thelen Committed by Linus Torvalds

memcg: create extensible page stat update routines

Replace usage of the mem_cgroup_update_file_mapped() memcg
statistic update routine with two new routines:
* mem_cgroup_inc_page_stat()
* mem_cgroup_dec_page_stat()

As before, only the file_mapped statistic is managed.  However, these more
general interfaces allow for new statistics to be more easily added.  New
statistics are added with memcg dirty page accounting.
Signed-off-by: default avatarGreg Thelen <gthelen@google.com>
Signed-off-by: default avatarAndrea Righi <arighi@develer.com>
Acked-by: default avatarKAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com>
Acked-by: default avatarDaisuke Nishimura <nishimura@mxp.nes.nec.co.jp>
Cc: Balbir Singh <balbir@linux.vnet.ibm.com>
Cc: Minchan Kim <minchan.kim@gmail.com>
Cc: Wu Fengguang <fengguang.wu@intel.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent ece72400
...@@ -25,6 +25,11 @@ struct page_cgroup; ...@@ -25,6 +25,11 @@ struct page_cgroup;
struct page; struct page;
struct mm_struct; struct mm_struct;
/* Stats that can be updated by kernel. */
enum mem_cgroup_page_stat_item {
MEMCG_NR_FILE_MAPPED, /* # of pages charged as file rss */
};
extern unsigned long mem_cgroup_isolate_pages(unsigned long nr_to_scan, extern unsigned long mem_cgroup_isolate_pages(unsigned long nr_to_scan,
struct list_head *dst, struct list_head *dst,
unsigned long *scanned, int order, unsigned long *scanned, int order,
...@@ -121,7 +126,22 @@ static inline bool mem_cgroup_disabled(void) ...@@ -121,7 +126,22 @@ static inline bool mem_cgroup_disabled(void)
return false; return false;
} }
void mem_cgroup_update_file_mapped(struct page *page, int val); void mem_cgroup_update_page_stat(struct page *page,
enum mem_cgroup_page_stat_item idx,
int val);
static inline void mem_cgroup_inc_page_stat(struct page *page,
enum mem_cgroup_page_stat_item idx)
{
mem_cgroup_update_page_stat(page, idx, 1);
}
static inline void mem_cgroup_dec_page_stat(struct page *page,
enum mem_cgroup_page_stat_item idx)
{
mem_cgroup_update_page_stat(page, idx, -1);
}
unsigned long mem_cgroup_soft_limit_reclaim(struct zone *zone, int order, unsigned long mem_cgroup_soft_limit_reclaim(struct zone *zone, int order,
gfp_t gfp_mask); gfp_t gfp_mask);
u64 mem_cgroup_get_limit(struct mem_cgroup *mem); u64 mem_cgroup_get_limit(struct mem_cgroup *mem);
...@@ -293,8 +313,13 @@ mem_cgroup_print_oom_info(struct mem_cgroup *memcg, struct task_struct *p) ...@@ -293,8 +313,13 @@ mem_cgroup_print_oom_info(struct mem_cgroup *memcg, struct task_struct *p)
{ {
} }
static inline void mem_cgroup_update_file_mapped(struct page *page, static inline void mem_cgroup_inc_page_stat(struct page *page,
int val) enum mem_cgroup_page_stat_item idx)
{
}
static inline void mem_cgroup_dec_page_stat(struct page *page,
enum mem_cgroup_page_stat_item idx)
{ {
} }
......
...@@ -1600,7 +1600,8 @@ bool mem_cgroup_handle_oom(struct mem_cgroup *mem, gfp_t mask) ...@@ -1600,7 +1600,8 @@ bool mem_cgroup_handle_oom(struct mem_cgroup *mem, gfp_t mask)
* possibility of race condition. If there is, we take a lock. * possibility of race condition. If there is, we take a lock.
*/ */
static void mem_cgroup_update_file_stat(struct page *page, int idx, int val) void mem_cgroup_update_page_stat(struct page *page,
enum mem_cgroup_page_stat_item idx, int val)
{ {
struct mem_cgroup *mem; struct mem_cgroup *mem;
struct page_cgroup *pc = lookup_page_cgroup(page); struct page_cgroup *pc = lookup_page_cgroup(page);
...@@ -1623,30 +1624,27 @@ static void mem_cgroup_update_file_stat(struct page *page, int idx, int val) ...@@ -1623,30 +1624,27 @@ static void mem_cgroup_update_file_stat(struct page *page, int idx, int val)
goto out; goto out;
} }
this_cpu_add(mem->stat->count[idx], val);
switch (idx) { switch (idx) {
case MEM_CGROUP_STAT_FILE_MAPPED: case MEMCG_NR_FILE_MAPPED:
if (val > 0) if (val > 0)
SetPageCgroupFileMapped(pc); SetPageCgroupFileMapped(pc);
else if (!page_mapped(page)) else if (!page_mapped(page))
ClearPageCgroupFileMapped(pc); ClearPageCgroupFileMapped(pc);
idx = MEM_CGROUP_STAT_FILE_MAPPED;
break; break;
default: default:
BUG(); BUG();
} }
this_cpu_add(mem->stat->count[idx], val);
out: out:
if (unlikely(need_unlock)) if (unlikely(need_unlock))
unlock_page_cgroup(pc); unlock_page_cgroup(pc);
rcu_read_unlock(); rcu_read_unlock();
return; return;
} }
EXPORT_SYMBOL(mem_cgroup_update_page_stat);
void mem_cgroup_update_file_mapped(struct page *page, int val)
{
mem_cgroup_update_file_stat(page, MEM_CGROUP_STAT_FILE_MAPPED, val);
}
/* /*
* size of first charge trial. "32" comes from vmscan.c's magic value. * size of first charge trial. "32" comes from vmscan.c's magic value.
......
...@@ -937,7 +937,7 @@ void page_add_file_rmap(struct page *page) ...@@ -937,7 +937,7 @@ void page_add_file_rmap(struct page *page)
{ {
if (atomic_inc_and_test(&page->_mapcount)) { if (atomic_inc_and_test(&page->_mapcount)) {
__inc_zone_page_state(page, NR_FILE_MAPPED); __inc_zone_page_state(page, NR_FILE_MAPPED);
mem_cgroup_update_file_mapped(page, 1); mem_cgroup_inc_page_stat(page, MEMCG_NR_FILE_MAPPED);
} }
} }
...@@ -979,7 +979,7 @@ void page_remove_rmap(struct page *page) ...@@ -979,7 +979,7 @@ void page_remove_rmap(struct page *page)
NR_ANON_TRANSPARENT_HUGEPAGES); NR_ANON_TRANSPARENT_HUGEPAGES);
} else { } else {
__dec_zone_page_state(page, NR_FILE_MAPPED); __dec_zone_page_state(page, NR_FILE_MAPPED);
mem_cgroup_update_file_mapped(page, -1); mem_cgroup_dec_page_stat(page, MEMCG_NR_FILE_MAPPED);
} }
/* /*
* It would be tidy to reset the PageAnon mapping here, * It would be tidy to reset the PageAnon mapping here,
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment