Commit 26174efd authored by KAMEZAWA Hiroyuki's avatar KAMEZAWA Hiroyuki Committed by Linus Torvalds

memcg: generic filestat update interface

This patch extracts the core logic from mem_cgroup_update_file_mapped() as
mem_cgroup_update_file_stat() and adds a wrapper.

As a planned future update, memory cgroup has to count dirty pages to
implement dirty_ratio/limit.  And more, the number of dirty pages is
required to kick flusher thread to start writeback.  (Now, no kick.)

This patch is preparation for it and makes other statistics implementation
clearer.  Just a clean up.
Signed-off-by: default avatarKAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com>
Acked-by: default avatarBalbir Singh <balbir@linux.vnet.ibm.com>
Reviewed-by: default avatarGreg Thelen <gthelen@google.com>
Cc: Daisuke Nishimura <nishimura@mxp.nes.nec.co.jp>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent 1489ebad
...@@ -1591,7 +1591,8 @@ bool mem_cgroup_handle_oom(struct mem_cgroup *mem, gfp_t mask) ...@@ -1591,7 +1591,8 @@ bool mem_cgroup_handle_oom(struct mem_cgroup *mem, gfp_t mask)
* small, we check MEM_CGROUP_ON_MOVE percpu value and detect there are * small, we check MEM_CGROUP_ON_MOVE percpu value and detect there are
* possibility of race condition. If there is, we take a lock. * possibility of race condition. If there is, we take a lock.
*/ */
void mem_cgroup_update_file_mapped(struct page *page, int val)
static void mem_cgroup_update_file_stat(struct page *page, int idx, int val)
{ {
struct mem_cgroup *mem; struct mem_cgroup *mem;
struct page_cgroup *pc = lookup_page_cgroup(page); struct page_cgroup *pc = lookup_page_cgroup(page);
...@@ -1613,13 +1614,18 @@ void mem_cgroup_update_file_mapped(struct page *page, int val) ...@@ -1613,13 +1614,18 @@ void mem_cgroup_update_file_mapped(struct page *page, int val)
if (!mem || !PageCgroupUsed(pc)) if (!mem || !PageCgroupUsed(pc))
goto out; goto out;
} }
if (val > 0) {
this_cpu_inc(mem->stat->count[MEM_CGROUP_STAT_FILE_MAPPED]); this_cpu_add(mem->stat->count[idx], val);
switch (idx) {
case MEM_CGROUP_STAT_FILE_MAPPED:
if (val > 0)
SetPageCgroupFileMapped(pc); SetPageCgroupFileMapped(pc);
} else { else if (!page_mapped(page))
this_cpu_dec(mem->stat->count[MEM_CGROUP_STAT_FILE_MAPPED]);
if (!page_mapped(page)) /* for race between dec->inc counter */
ClearPageCgroupFileMapped(pc); ClearPageCgroupFileMapped(pc);
break;
default:
BUG();
} }
out: out:
...@@ -1629,6 +1635,11 @@ void mem_cgroup_update_file_mapped(struct page *page, int val) ...@@ -1629,6 +1635,11 @@ void mem_cgroup_update_file_mapped(struct page *page, int val)
return; return;
} }
void mem_cgroup_update_file_mapped(struct page *page, int val)
{
mem_cgroup_update_file_stat(page, MEM_CGROUP_STAT_FILE_MAPPED, val);
}
/* /*
* size of first charge trial. "32" comes from vmscan.c's magic value. * size of first charge trial. "32" comes from vmscan.c's magic value.
* TODO: maybe necessary to use big numbers in big irons. * TODO: maybe necessary to use big numbers in big irons.
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment