Commit 3812c8c8 authored by Johannes Weiner's avatar Johannes Weiner Committed by Linus Torvalds

mm: memcg: do not trap chargers with full callstack on OOM

The memcg OOM handling is incredibly fragile and can deadlock.  When a
task fails to charge memory, it invokes the OOM killer and loops right
there in the charge code until it succeeds.  Comparably, any other task
that enters the charge path at this point will go to a waitqueue right
then and there and sleep until the OOM situation is resolved.  The problem
is that these tasks may hold filesystem locks and the mmap_sem; locks that
the selected OOM victim may need to exit.

For example, in one reported case, the task invoking the OOM killer was
about to charge a page cache page during a write(), which holds the
i_mutex.  The OOM killer selected a task that was just entering truncate()
and trying to acquire the i_mutex:

OOM invoking task:
  mem_cgroup_handle_oom+0x241/0x3b0
  mem_cgroup_cache_charge+0xbe/0xe0
  add_to_page_cache_locked+0x4c/0x140
  add_to_page_cache_lru+0x22/0x50
  grab_cache_page_write_begin+0x8b/0xe0
  ext3_write_begin+0x88/0x270
  generic_file_buffered_write+0x116/0x290
  __generic_file_aio_write+0x27c/0x480
  generic_file_aio_write+0x76/0xf0           # takes ->i_mutex
  do_sync_write+0xea/0x130
  vfs_write+0xf3/0x1f0
  sys_write+0x51/0x90
  system_call_fastpath+0x18/0x1d

OOM kill victim:
  do_truncate+0x58/0xa0              # takes i_mutex
  do_last+0x250/0xa30
  path_openat+0xd7/0x440
  do_filp_open+0x49/0xa0
  do_sys_open+0x106/0x240
  sys_open+0x20/0x30
  system_call_fastpath+0x18/0x1d

The OOM handling task will retry the charge indefinitely while the OOM
killed task is not releasing any resources.

A similar scenario can happen when the kernel OOM killer for a memcg is
disabled and a userspace task is in charge of resolving OOM situations.
In this case, ALL tasks that enter the OOM path will be made to sleep on
the OOM waitqueue and wait for userspace to free resources or increase
the group's limit.  But a userspace OOM handler is prone to deadlock
itself on the locks held by the waiting tasks.  For example one of the
sleeping tasks may be stuck in a brk() call with the mmap_sem held for
writing but the userspace handler, in order to pick an optimal victim,
may need to read files from /proc/<pid>, which tries to acquire the same
mmap_sem for reading and deadlocks.

This patch changes the way tasks behave after detecting a memcg OOM and
makes sure nobody loops or sleeps with locks held:

1. When OOMing in a user fault, invoke the OOM killer and restart the
   fault instead of looping on the charge attempt.  This way, the OOM
   victim can not get stuck on locks the looping task may hold.

2. When OOMing in a user fault but somebody else is handling it
   (either the kernel OOM killer or a userspace handler), don't go to
   sleep in the charge context.  Instead, remember the OOMing memcg in
   the task struct and then fully unwind the page fault stack with
   -ENOMEM.  pagefault_out_of_memory() will then call back into the
   memcg code to check if the -ENOMEM came from the memcg, and then
   either put the task to sleep on the memcg's OOM waitqueue or just
   restart the fault.  The OOM victim can no longer get stuck on any
   lock a sleeping task may hold.

Debugged by Michal Hocko.
Signed-off-by: default avatarJohannes Weiner <hannes@cmpxchg.org>
Reported-by: default avatarazurIt <azurit@pobox.sk>
Acked-by: default avatarMichal Hocko <mhocko@suse.cz>
Cc: David Rientjes <rientjes@google.com>
Cc: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com>
Cc: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent fb2a6fc5
...@@ -157,6 +157,10 @@ extern void mem_cgroup_replace_page_cache(struct page *oldpage, ...@@ -157,6 +157,10 @@ extern void mem_cgroup_replace_page_cache(struct page *oldpage,
* *
* Toggle whether a failed memcg charge should invoke the OOM killer * Toggle whether a failed memcg charge should invoke the OOM killer
* or just return -ENOMEM. Returns the previous toggle state. * or just return -ENOMEM. Returns the previous toggle state.
*
* NOTE: Any path that enables the OOM killer before charging must
* call mem_cgroup_oom_synchronize() afterward to finalize the
* OOM handling and clean up.
*/ */
static inline bool mem_cgroup_toggle_oom(bool new) static inline bool mem_cgroup_toggle_oom(bool new)
{ {
...@@ -182,6 +186,13 @@ static inline void mem_cgroup_disable_oom(void) ...@@ -182,6 +186,13 @@ static inline void mem_cgroup_disable_oom(void)
WARN_ON(old == false); WARN_ON(old == false);
} }
static inline bool task_in_memcg_oom(struct task_struct *p)
{
return p->memcg_oom.in_memcg_oom;
}
bool mem_cgroup_oom_synchronize(void);
#ifdef CONFIG_MEMCG_SWAP #ifdef CONFIG_MEMCG_SWAP
extern int do_swap_account; extern int do_swap_account;
#endif #endif
...@@ -427,6 +438,16 @@ static inline void mem_cgroup_disable_oom(void) ...@@ -427,6 +438,16 @@ static inline void mem_cgroup_disable_oom(void)
{ {
} }
static inline bool task_in_memcg_oom(struct task_struct *p)
{
return false;
}
static inline bool mem_cgroup_oom_synchronize(void)
{
return false;
}
static inline void mem_cgroup_inc_page_stat(struct page *page, static inline void mem_cgroup_inc_page_stat(struct page *page,
enum mem_cgroup_page_stat_item idx) enum mem_cgroup_page_stat_item idx)
{ {
......
...@@ -1395,6 +1395,10 @@ struct task_struct { ...@@ -1395,6 +1395,10 @@ struct task_struct {
unsigned int memcg_kmem_skip_account; unsigned int memcg_kmem_skip_account;
struct memcg_oom_info { struct memcg_oom_info {
unsigned int may_oom:1; unsigned int may_oom:1;
unsigned int in_memcg_oom:1;
unsigned int oom_locked:1;
int wakeups;
struct mem_cgroup *wait_on_memcg;
} memcg_oom; } memcg_oom;
#endif #endif
#ifdef CONFIG_UPROBES #ifdef CONFIG_UPROBES
......
...@@ -255,6 +255,7 @@ struct mem_cgroup { ...@@ -255,6 +255,7 @@ struct mem_cgroup {
bool oom_lock; bool oom_lock;
atomic_t under_oom; atomic_t under_oom;
atomic_t oom_wakeups;
int swappiness; int swappiness;
/* OOM-Killer disable */ /* OOM-Killer disable */
...@@ -2020,6 +2021,7 @@ static int memcg_oom_wake_function(wait_queue_t *wait, ...@@ -2020,6 +2021,7 @@ static int memcg_oom_wake_function(wait_queue_t *wait,
static void memcg_wakeup_oom(struct mem_cgroup *memcg) static void memcg_wakeup_oom(struct mem_cgroup *memcg)
{ {
atomic_inc(&memcg->oom_wakeups);
/* for filtering, pass "memcg" as argument. */ /* for filtering, pass "memcg" as argument. */
__wake_up(&memcg_oom_waitq, TASK_NORMAL, 0, memcg); __wake_up(&memcg_oom_waitq, TASK_NORMAL, 0, memcg);
} }
...@@ -2031,19 +2033,17 @@ static void memcg_oom_recover(struct mem_cgroup *memcg) ...@@ -2031,19 +2033,17 @@ static void memcg_oom_recover(struct mem_cgroup *memcg)
} }
/* /*
* try to call OOM killer. returns false if we should exit memory-reclaim loop. * try to call OOM killer
*/ */
static bool mem_cgroup_handle_oom(struct mem_cgroup *memcg, gfp_t mask, static void mem_cgroup_oom(struct mem_cgroup *memcg, gfp_t mask, int order)
int order)
{ {
struct oom_wait_info owait;
bool locked; bool locked;
int wakeups;
owait.memcg = memcg; if (!current->memcg_oom.may_oom)
owait.wait.flags = 0; return;
owait.wait.func = memcg_oom_wake_function;
owait.wait.private = current; current->memcg_oom.in_memcg_oom = 1;
INIT_LIST_HEAD(&owait.wait.task_list);
/* /*
* As with any blocking lock, a contender needs to start * As with any blocking lock, a contender needs to start
...@@ -2051,12 +2051,8 @@ static bool mem_cgroup_handle_oom(struct mem_cgroup *memcg, gfp_t mask, ...@@ -2051,12 +2051,8 @@ static bool mem_cgroup_handle_oom(struct mem_cgroup *memcg, gfp_t mask,
* otherwise it can miss the wakeup from the unlock and sleep * otherwise it can miss the wakeup from the unlock and sleep
* indefinitely. This is just open-coded because our locking * indefinitely. This is just open-coded because our locking
* is so particular to memcg hierarchies. * is so particular to memcg hierarchies.
*
* Even if signal_pending(), we can't quit charge() loop without
* accounting. So, UNINTERRUPTIBLE is appropriate. But SIGKILL
* under OOM is always welcomed, use TASK_KILLABLE here.
*/ */
prepare_to_wait(&memcg_oom_waitq, &owait.wait, TASK_KILLABLE); wakeups = atomic_read(&memcg->oom_wakeups);
mem_cgroup_mark_under_oom(memcg); mem_cgroup_mark_under_oom(memcg);
locked = mem_cgroup_oom_trylock(memcg); locked = mem_cgroup_oom_trylock(memcg);
...@@ -2066,15 +2062,95 @@ static bool mem_cgroup_handle_oom(struct mem_cgroup *memcg, gfp_t mask, ...@@ -2066,15 +2062,95 @@ static bool mem_cgroup_handle_oom(struct mem_cgroup *memcg, gfp_t mask,
if (locked && !memcg->oom_kill_disable) { if (locked && !memcg->oom_kill_disable) {
mem_cgroup_unmark_under_oom(memcg); mem_cgroup_unmark_under_oom(memcg);
finish_wait(&memcg_oom_waitq, &owait.wait);
mem_cgroup_out_of_memory(memcg, mask, order); mem_cgroup_out_of_memory(memcg, mask, order);
mem_cgroup_oom_unlock(memcg);
/*
* There is no guarantee that an OOM-lock contender
* sees the wakeups triggered by the OOM kill
* uncharges. Wake any sleepers explicitely.
*/
memcg_oom_recover(memcg);
} else { } else {
schedule(); /*
mem_cgroup_unmark_under_oom(memcg); * A system call can just return -ENOMEM, but if this
finish_wait(&memcg_oom_waitq, &owait.wait); * is a page fault and somebody else is handling the
* OOM already, we need to sleep on the OOM waitqueue
* for this memcg until the situation is resolved.
* Which can take some time because it might be
* handled by a userspace task.
*
* However, this is the charge context, which means
* that we may sit on a large call stack and hold
* various filesystem locks, the mmap_sem etc. and we
* don't want the OOM handler to deadlock on them
* while we sit here and wait. Store the current OOM
* context in the task_struct, then return -ENOMEM.
* At the end of the page fault handler, with the
* stack unwound, pagefault_out_of_memory() will check
* back with us by calling
* mem_cgroup_oom_synchronize(), possibly putting the
* task to sleep.
*/
current->memcg_oom.oom_locked = locked;
current->memcg_oom.wakeups = wakeups;
css_get(&memcg->css);
current->memcg_oom.wait_on_memcg = memcg;
} }
}
/**
* mem_cgroup_oom_synchronize - complete memcg OOM handling
*
* This has to be called at the end of a page fault if the the memcg
* OOM handler was enabled and the fault is returning %VM_FAULT_OOM.
*
* Memcg supports userspace OOM handling, so failed allocations must
* sleep on a waitqueue until the userspace task resolves the
* situation. Sleeping directly in the charge context with all kinds
* of locks held is not a good idea, instead we remember an OOM state
* in the task and mem_cgroup_oom_synchronize() has to be called at
* the end of the page fault to put the task to sleep and clean up the
* OOM state.
*
* Returns %true if an ongoing memcg OOM situation was detected and
* finalized, %false otherwise.
*/
bool mem_cgroup_oom_synchronize(void)
{
struct oom_wait_info owait;
struct mem_cgroup *memcg;
/* OOM is global, do not handle */
if (!current->memcg_oom.in_memcg_oom)
return false;
/*
* We invoked the OOM killer but there is a chance that a kill
* did not free up any charges. Everybody else might already
* be sleeping, so restart the fault and keep the rampage
* going until some charges are released.
*/
memcg = current->memcg_oom.wait_on_memcg;
if (!memcg)
goto out;
if (test_thread_flag(TIF_MEMDIE) || fatal_signal_pending(current))
goto out_memcg;
owait.memcg = memcg;
owait.wait.flags = 0;
owait.wait.func = memcg_oom_wake_function;
owait.wait.private = current;
INIT_LIST_HEAD(&owait.wait.task_list);
if (locked) { prepare_to_wait(&memcg_oom_waitq, &owait.wait, TASK_KILLABLE);
/* Only sleep if we didn't miss any wakeups since OOM */
if (atomic_read(&memcg->oom_wakeups) == current->memcg_oom.wakeups)
schedule();
finish_wait(&memcg_oom_waitq, &owait.wait);
out_memcg:
mem_cgroup_unmark_under_oom(memcg);
if (current->memcg_oom.oom_locked) {
mem_cgroup_oom_unlock(memcg); mem_cgroup_oom_unlock(memcg);
/* /*
* There is no guarantee that an OOM-lock contender * There is no guarantee that an OOM-lock contender
...@@ -2083,11 +2159,10 @@ static bool mem_cgroup_handle_oom(struct mem_cgroup *memcg, gfp_t mask, ...@@ -2083,11 +2159,10 @@ static bool mem_cgroup_handle_oom(struct mem_cgroup *memcg, gfp_t mask,
*/ */
memcg_oom_recover(memcg); memcg_oom_recover(memcg);
} }
css_put(&memcg->css);
if (test_thread_flag(TIF_MEMDIE) || fatal_signal_pending(current)) current->memcg_oom.wait_on_memcg = NULL;
return false; out:
/* Give chance to dying process */ current->memcg_oom.in_memcg_oom = 0;
schedule_timeout_uninterruptible(1);
return true; return true;
} }
...@@ -2400,12 +2475,11 @@ enum { ...@@ -2400,12 +2475,11 @@ enum {
CHARGE_RETRY, /* need to retry but retry is not bad */ CHARGE_RETRY, /* need to retry but retry is not bad */
CHARGE_NOMEM, /* we can't do more. return -ENOMEM */ CHARGE_NOMEM, /* we can't do more. return -ENOMEM */
CHARGE_WOULDBLOCK, /* GFP_WAIT wasn't set and no enough res. */ CHARGE_WOULDBLOCK, /* GFP_WAIT wasn't set and no enough res. */
CHARGE_OOM_DIE, /* the current is killed because of OOM */
}; };
static int mem_cgroup_do_charge(struct mem_cgroup *memcg, gfp_t gfp_mask, static int mem_cgroup_do_charge(struct mem_cgroup *memcg, gfp_t gfp_mask,
unsigned int nr_pages, unsigned int min_pages, unsigned int nr_pages, unsigned int min_pages,
bool oom_check) bool invoke_oom)
{ {
unsigned long csize = nr_pages * PAGE_SIZE; unsigned long csize = nr_pages * PAGE_SIZE;
struct mem_cgroup *mem_over_limit; struct mem_cgroup *mem_over_limit;
...@@ -2462,14 +2536,10 @@ static int mem_cgroup_do_charge(struct mem_cgroup *memcg, gfp_t gfp_mask, ...@@ -2462,14 +2536,10 @@ static int mem_cgroup_do_charge(struct mem_cgroup *memcg, gfp_t gfp_mask,
if (mem_cgroup_wait_acct_move(mem_over_limit)) if (mem_cgroup_wait_acct_move(mem_over_limit))
return CHARGE_RETRY; return CHARGE_RETRY;
/* If we don't need to call oom-killer at el, return immediately */ if (invoke_oom)
if (!oom_check || !current->memcg_oom.may_oom) mem_cgroup_oom(mem_over_limit, gfp_mask, get_order(csize));
return CHARGE_NOMEM;
/* check OOM */
if (!mem_cgroup_handle_oom(mem_over_limit, gfp_mask, get_order(csize)))
return CHARGE_OOM_DIE;
return CHARGE_RETRY; return CHARGE_NOMEM;
} }
/* /*
...@@ -2572,7 +2642,7 @@ static int __mem_cgroup_try_charge(struct mm_struct *mm, ...@@ -2572,7 +2642,7 @@ static int __mem_cgroup_try_charge(struct mm_struct *mm,
} }
do { do {
bool oom_check; bool invoke_oom = oom && !nr_oom_retries;
/* If killed, bypass charge */ /* If killed, bypass charge */
if (fatal_signal_pending(current)) { if (fatal_signal_pending(current)) {
...@@ -2580,14 +2650,8 @@ static int __mem_cgroup_try_charge(struct mm_struct *mm, ...@@ -2580,14 +2650,8 @@ static int __mem_cgroup_try_charge(struct mm_struct *mm,
goto bypass; goto bypass;
} }
oom_check = false; ret = mem_cgroup_do_charge(memcg, gfp_mask, batch,
if (oom && !nr_oom_retries) { nr_pages, invoke_oom);
oom_check = true;
nr_oom_retries = MEM_CGROUP_RECLAIM_RETRIES;
}
ret = mem_cgroup_do_charge(memcg, gfp_mask, batch, nr_pages,
oom_check);
switch (ret) { switch (ret) {
case CHARGE_OK: case CHARGE_OK:
break; break;
...@@ -2600,16 +2664,12 @@ static int __mem_cgroup_try_charge(struct mm_struct *mm, ...@@ -2600,16 +2664,12 @@ static int __mem_cgroup_try_charge(struct mm_struct *mm,
css_put(&memcg->css); css_put(&memcg->css);
goto nomem; goto nomem;
case CHARGE_NOMEM: /* OOM routine works */ case CHARGE_NOMEM: /* OOM routine works */
if (!oom) { if (!oom || invoke_oom) {
css_put(&memcg->css); css_put(&memcg->css);
goto nomem; goto nomem;
} }
/* If oom, we never return -ENOMEM */
nr_oom_retries--; nr_oom_retries--;
break; break;
case CHARGE_OOM_DIE: /* Killed by OOM Killer */
css_put(&memcg->css);
goto bypass;
} }
} while (ret != CHARGE_OK); } while (ret != CHARGE_OK);
......
...@@ -3867,6 +3867,9 @@ int handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma, ...@@ -3867,6 +3867,9 @@ int handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma,
if (flags & FAULT_FLAG_USER) if (flags & FAULT_FLAG_USER)
mem_cgroup_disable_oom(); mem_cgroup_disable_oom();
if (WARN_ON(task_in_memcg_oom(current) && !(ret & VM_FAULT_OOM)))
mem_cgroup_oom_synchronize();
return ret; return ret;
} }
......
...@@ -678,9 +678,12 @@ void out_of_memory(struct zonelist *zonelist, gfp_t gfp_mask, ...@@ -678,9 +678,12 @@ void out_of_memory(struct zonelist *zonelist, gfp_t gfp_mask,
*/ */
void pagefault_out_of_memory(void) void pagefault_out_of_memory(void)
{ {
struct zonelist *zonelist = node_zonelist(first_online_node, struct zonelist *zonelist;
GFP_KERNEL);
if (mem_cgroup_oom_synchronize())
return;
zonelist = node_zonelist(first_online_node, GFP_KERNEL);
if (try_set_zonelist_oom(zonelist, GFP_KERNEL)) { if (try_set_zonelist_oom(zonelist, GFP_KERNEL)) {
out_of_memory(NULL, 0, 0, NULL, false); out_of_memory(NULL, 0, 0, NULL, false);
clear_zonelist_oom(zonelist, GFP_KERNEL); clear_zonelist_oom(zonelist, GFP_KERNEL);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment