Commit 50de1dd9 authored by Daisuke Nishimura's avatar Daisuke Nishimura Committed by Linus Torvalds

memcg: fix memory migration of shmem swapcache

In the current implementation mem_cgroup_end_migration() decides whether
the page migration has succeeded or not by checking "oldpage->mapping".

But if we are tring to migrate a shmem swapcache, the page->mapping of it
is NULL from the begining, so the check would be invalid.  As a result,
mem_cgroup_end_migration() assumes the migration has succeeded even if
it's not, so "newpage" would be freed while it's not uncharged.

This patch fixes it by passing mem_cgroup_end_migration() the result of
the page migration.
Signed-off-by: default avatarDaisuke Nishimura <nishimura@mxp.nes.nec.co.jp>
Reviewed-by: default avatarMinchan Kim <minchan.kim@gmail.com>
Acked-by: default avatarKAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com>
Acked-by: default avatarBalbir Singh <balbir@linux.vnet.ibm.com>
Cc: Minchan Kim <minchan.kim@gmail.com>
Reviewed-by: default avatarJohannes Weiner <hannes@cmpxchg.org>
Cc: Hugh Dickins <hughd@google.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent 17295c88
...@@ -98,7 +98,7 @@ extern int ...@@ -98,7 +98,7 @@ extern int
mem_cgroup_prepare_migration(struct page *page, mem_cgroup_prepare_migration(struct page *page,
struct page *newpage, struct mem_cgroup **ptr); struct page *newpage, struct mem_cgroup **ptr);
extern void mem_cgroup_end_migration(struct mem_cgroup *mem, extern void mem_cgroup_end_migration(struct mem_cgroup *mem,
struct page *oldpage, struct page *newpage); struct page *oldpage, struct page *newpage, bool migration_ok);
/* /*
* For memory reclaim. * For memory reclaim.
...@@ -251,8 +251,7 @@ mem_cgroup_prepare_migration(struct page *page, struct page *newpage, ...@@ -251,8 +251,7 @@ mem_cgroup_prepare_migration(struct page *page, struct page *newpage,
} }
static inline void mem_cgroup_end_migration(struct mem_cgroup *mem, static inline void mem_cgroup_end_migration(struct mem_cgroup *mem,
struct page *oldpage, struct page *oldpage, struct page *newpage, bool migration_ok)
struct page *newpage)
{ {
} }
......
...@@ -2896,7 +2896,7 @@ int mem_cgroup_prepare_migration(struct page *page, ...@@ -2896,7 +2896,7 @@ int mem_cgroup_prepare_migration(struct page *page,
/* remove redundant charge if migration failed*/ /* remove redundant charge if migration failed*/
void mem_cgroup_end_migration(struct mem_cgroup *mem, void mem_cgroup_end_migration(struct mem_cgroup *mem,
struct page *oldpage, struct page *newpage) struct page *oldpage, struct page *newpage, bool migration_ok)
{ {
struct page *used, *unused; struct page *used, *unused;
struct page_cgroup *pc; struct page_cgroup *pc;
...@@ -2905,8 +2905,7 @@ void mem_cgroup_end_migration(struct mem_cgroup *mem, ...@@ -2905,8 +2905,7 @@ void mem_cgroup_end_migration(struct mem_cgroup *mem,
return; return;
/* blocks rmdir() */ /* blocks rmdir() */
cgroup_exclude_rmdir(&mem->css); cgroup_exclude_rmdir(&mem->css);
/* at migration success, oldpage->mapping is NULL. */ if (!migration_ok) {
if (oldpage->mapping) {
used = oldpage; used = oldpage;
unused = newpage; unused = newpage;
} else { } else {
......
...@@ -768,7 +768,7 @@ static int unmap_and_move(new_page_t get_new_page, unsigned long private, ...@@ -768,7 +768,7 @@ static int unmap_and_move(new_page_t get_new_page, unsigned long private,
uncharge: uncharge:
if (!charge) if (!charge)
mem_cgroup_end_migration(mem, page, newpage); mem_cgroup_end_migration(mem, page, newpage, rc == 0);
unlock: unlock:
unlock_page(page); unlock_page(page);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment