Commit 5a6475a4 authored by KAMEZAWA Hiroyuki's avatar KAMEZAWA Hiroyuki Committed by Linus Torvalds

memcg: fix leak on wrong LRU with FUSE

fs/fuse/dev.c::fuse_try_move_page() does

   (1) remove a page by ->steal()
   (2) re-add the page to page cache
   (3) link the page to LRU if it was not on LRU at (1)

This implies the page is _on_ LRU when it's added to radix-tree.  So, the
page is added to memory cgroup while it's on LRU.  because LRU is lazy and
no one flushs it.

This is the same behavior as SwapCache and needs special care as
 - remove page from LRU before overwrite pc->mem_cgroup.
 - add page to LRU after overwrite pc->mem_cgroup.

And we need to taking care of pagevec.

If PageLRU(page) is set before we add PCG_USED bit, the page will not be
added to memcg's LRU (in short period).  So, regardlress of PageLRU(page)
value before commit_charge(), we need to check PageLRU(page) after
commit_charge().

Addresses https://bugzilla.kernel.org/show_bug.cgi?id=30432Signed-off-by: default avatarKAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com>
Reviewed-by: default avatarJohannes Weiner <hannes@cmpxchg.org>
Acked-by: default avatarDaisuke Nishimura <nishimura@mxp.nes.nec.co.jp>
Cc: Miklos Szeredi <miklos@szeredi.hu>
Cc: Balbir Singh <balbir@in.ibm.com>
Reported-by: default avatarDaniel Poelzleithner <poelzi@poelzi.org>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent 6cfddb26
...@@ -926,18 +926,28 @@ void mem_cgroup_add_lru_list(struct page *page, enum lru_list lru) ...@@ -926,18 +926,28 @@ void mem_cgroup_add_lru_list(struct page *page, enum lru_list lru)
} }
/* /*
* At handling SwapCache, pc->mem_cgroup may be changed while it's linked to * At handling SwapCache and other FUSE stuff, pc->mem_cgroup may be changed
* lru because the page may.be reused after it's fully uncharged (because of * while it's linked to lru because the page may be reused after it's fully
* SwapCache behavior).To handle that, unlink page_cgroup from LRU when charge * uncharged. To handle that, unlink page_cgroup from LRU when charge it again.
* it again. This function is only used to charge SwapCache. It's done under * It's done under lock_page and expected that zone->lru_lock isnever held.
* lock_page and expected that zone->lru_lock is never held.
*/ */
static void mem_cgroup_lru_del_before_commit_swapcache(struct page *page) static void mem_cgroup_lru_del_before_commit(struct page *page)
{ {
unsigned long flags; unsigned long flags;
struct zone *zone = page_zone(page); struct zone *zone = page_zone(page);
struct page_cgroup *pc = lookup_page_cgroup(page); struct page_cgroup *pc = lookup_page_cgroup(page);
/*
* Doing this check without taking ->lru_lock seems wrong but this
* is safe. Because if page_cgroup's USED bit is unset, the page
* will not be added to any memcg's LRU. If page_cgroup's USED bit is
* set, the commit after this will fail, anyway.
* This all charge/uncharge is done under some mutual execustion.
* So, we don't need to taking care of changes in USED bit.
*/
if (likely(!PageLRU(page)))
return;
spin_lock_irqsave(&zone->lru_lock, flags); spin_lock_irqsave(&zone->lru_lock, flags);
/* /*
* Forget old LRU when this page_cgroup is *not* used. This Used bit * Forget old LRU when this page_cgroup is *not* used. This Used bit
...@@ -948,12 +958,15 @@ static void mem_cgroup_lru_del_before_commit_swapcache(struct page *page) ...@@ -948,12 +958,15 @@ static void mem_cgroup_lru_del_before_commit_swapcache(struct page *page)
spin_unlock_irqrestore(&zone->lru_lock, flags); spin_unlock_irqrestore(&zone->lru_lock, flags);
} }
static void mem_cgroup_lru_add_after_commit_swapcache(struct page *page) static void mem_cgroup_lru_add_after_commit(struct page *page)
{ {
unsigned long flags; unsigned long flags;
struct zone *zone = page_zone(page); struct zone *zone = page_zone(page);
struct page_cgroup *pc = lookup_page_cgroup(page); struct page_cgroup *pc = lookup_page_cgroup(page);
/* taking care of that the page is added to LRU while we commit it */
if (likely(!PageLRU(page)))
return;
spin_lock_irqsave(&zone->lru_lock, flags); spin_lock_irqsave(&zone->lru_lock, flags);
/* link when the page is linked to LRU but page_cgroup isn't */ /* link when the page is linked to LRU but page_cgroup isn't */
if (PageLRU(page) && !PageCgroupAcctLRU(pc)) if (PageLRU(page) && !PageCgroupAcctLRU(pc))
...@@ -2431,9 +2444,26 @@ static void ...@@ -2431,9 +2444,26 @@ static void
__mem_cgroup_commit_charge_swapin(struct page *page, struct mem_cgroup *ptr, __mem_cgroup_commit_charge_swapin(struct page *page, struct mem_cgroup *ptr,
enum charge_type ctype); enum charge_type ctype);
static void
__mem_cgroup_commit_charge_lrucare(struct page *page, struct mem_cgroup *mem,
enum charge_type ctype)
{
struct page_cgroup *pc = lookup_page_cgroup(page);
/*
* In some case, SwapCache, FUSE(splice_buf->radixtree), the page
* is already on LRU. It means the page may on some other page_cgroup's
* LRU. Take care of it.
*/
mem_cgroup_lru_del_before_commit(page);
__mem_cgroup_commit_charge(mem, page, 1, pc, ctype);
mem_cgroup_lru_add_after_commit(page);
return;
}
int mem_cgroup_cache_charge(struct page *page, struct mm_struct *mm, int mem_cgroup_cache_charge(struct page *page, struct mm_struct *mm,
gfp_t gfp_mask) gfp_t gfp_mask)
{ {
struct mem_cgroup *mem = NULL;
int ret; int ret;
if (mem_cgroup_disabled()) if (mem_cgroup_disabled())
...@@ -2468,14 +2498,22 @@ int mem_cgroup_cache_charge(struct page *page, struct mm_struct *mm, ...@@ -2468,14 +2498,22 @@ int mem_cgroup_cache_charge(struct page *page, struct mm_struct *mm,
if (unlikely(!mm)) if (unlikely(!mm))
mm = &init_mm; mm = &init_mm;
if (page_is_file_cache(page)) if (page_is_file_cache(page)) {
return mem_cgroup_charge_common(page, mm, gfp_mask, ret = __mem_cgroup_try_charge(mm, gfp_mask, 1, &mem, true);
MEM_CGROUP_CHARGE_TYPE_CACHE); if (ret || !mem)
return ret;
/*
* FUSE reuses pages without going through the final
* put that would remove them from the LRU list, make
* sure that they get relinked properly.
*/
__mem_cgroup_commit_charge_lrucare(page, mem,
MEM_CGROUP_CHARGE_TYPE_CACHE);
return ret;
}
/* shmem */ /* shmem */
if (PageSwapCache(page)) { if (PageSwapCache(page)) {
struct mem_cgroup *mem;
ret = mem_cgroup_try_charge_swapin(mm, page, gfp_mask, &mem); ret = mem_cgroup_try_charge_swapin(mm, page, gfp_mask, &mem);
if (!ret) if (!ret)
__mem_cgroup_commit_charge_swapin(page, mem, __mem_cgroup_commit_charge_swapin(page, mem,
...@@ -2532,17 +2570,13 @@ static void ...@@ -2532,17 +2570,13 @@ static void
__mem_cgroup_commit_charge_swapin(struct page *page, struct mem_cgroup *ptr, __mem_cgroup_commit_charge_swapin(struct page *page, struct mem_cgroup *ptr,
enum charge_type ctype) enum charge_type ctype)
{ {
struct page_cgroup *pc;
if (mem_cgroup_disabled()) if (mem_cgroup_disabled())
return; return;
if (!ptr) if (!ptr)
return; return;
cgroup_exclude_rmdir(&ptr->css); cgroup_exclude_rmdir(&ptr->css);
pc = lookup_page_cgroup(page);
mem_cgroup_lru_del_before_commit_swapcache(page); __mem_cgroup_commit_charge_lrucare(page, ptr, ctype);
__mem_cgroup_commit_charge(ptr, page, 1, pc, ctype);
mem_cgroup_lru_add_after_commit_swapcache(page);
/* /*
* Now swap is on-memory. This means this page may be * Now swap is on-memory. This means this page may be
* counted both as mem and swap....double count. * counted both as mem and swap....double count.
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment