Commit 6c18ba7a authored by Michal Hocko's avatar Michal Hocko Committed by Linus Torvalds

mm: help __GFP_NOFAIL allocations which do not trigger OOM killer

Now that __GFP_NOFAIL doesn't override decisions to skip the oom killer
we are left with requests which require to loop inside the allocator
without invoking the oom killer (e.g.  GFP_NOFS|__GFP_NOFAIL used by fs
code) and so they might, in very unlikely situations, loop for ever -
e.g.  other parallel request could starve them.

This patch tries to limit the likelihood of such a lockup by giving
these __GFP_NOFAIL requests a chance to move on by consuming a small
part of memory reserves.  We are using ALLOC_HARDER which should be
enough to prevent from the starvation by regular allocation requests,
yet it shouldn't consume enough from the reserves to disrupt high
priority requests (ALLOC_HIGH).

While we are at it, let's introduce a helper __alloc_pages_cpuset_fallback
which enforces the cpusets but allows to fallback to ignore them if the
first attempt fails.  __GFP_NOFAIL requests can be considered important
enough to allow cpuset runaway in order for the system to move on.  It
is highly unlikely that any of these will be GFP_USER anyway.

Link: http://lkml.kernel.org/r/20161220134904.21023-4-mhocko@kernel.orgSigned-off-by: default avatarMichal Hocko <mhocko@suse.com>
Cc: Vlastimil Babka <vbabka@suse.cz>
Cc: Johannes Weiner <hannes@cmpxchg.org>
Cc: Hillf Danton <hillf.zj@alibaba-inc.com>
Cc: Tetsuo Handa <penguin-kernel@I-love.SAKURA.ne.jp>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent 06ad276a
...@@ -3055,6 +3055,26 @@ void warn_alloc(gfp_t gfp_mask, nodemask_t *nodemask, const char *fmt, ...) ...@@ -3055,6 +3055,26 @@ void warn_alloc(gfp_t gfp_mask, nodemask_t *nodemask, const char *fmt, ...)
warn_alloc_show_mem(gfp_mask, nm); warn_alloc_show_mem(gfp_mask, nm);
} }
static inline struct page *
__alloc_pages_cpuset_fallback(gfp_t gfp_mask, unsigned int order,
unsigned int alloc_flags,
const struct alloc_context *ac)
{
struct page *page;
page = get_page_from_freelist(gfp_mask, order,
alloc_flags|ALLOC_CPUSET, ac);
/*
* fallback to ignore cpuset restriction if our nodes
* are depleted
*/
if (!page)
page = get_page_from_freelist(gfp_mask, order,
alloc_flags, ac);
return page;
}
static inline struct page * static inline struct page *
__alloc_pages_may_oom(gfp_t gfp_mask, unsigned int order, __alloc_pages_may_oom(gfp_t gfp_mask, unsigned int order,
const struct alloc_context *ac, unsigned long *did_some_progress) const struct alloc_context *ac, unsigned long *did_some_progress)
...@@ -3119,17 +3139,13 @@ __alloc_pages_may_oom(gfp_t gfp_mask, unsigned int order, ...@@ -3119,17 +3139,13 @@ __alloc_pages_may_oom(gfp_t gfp_mask, unsigned int order,
if (out_of_memory(&oc) || WARN_ON_ONCE(gfp_mask & __GFP_NOFAIL)) { if (out_of_memory(&oc) || WARN_ON_ONCE(gfp_mask & __GFP_NOFAIL)) {
*did_some_progress = 1; *did_some_progress = 1;
if (gfp_mask & __GFP_NOFAIL) { /*
page = get_page_from_freelist(gfp_mask, order, * Help non-failing allocations by giving them access to memory
ALLOC_NO_WATERMARKS|ALLOC_CPUSET, ac); * reserves
/* */
* fallback to ignore cpuset restriction if our nodes if (gfp_mask & __GFP_NOFAIL)
* are depleted page = __alloc_pages_cpuset_fallback(gfp_mask, order,
*/
if (!page)
page = get_page_from_freelist(gfp_mask, order,
ALLOC_NO_WATERMARKS, ac); ALLOC_NO_WATERMARKS, ac);
}
} }
out: out:
mutex_unlock(&oom_lock); mutex_unlock(&oom_lock);
...@@ -3785,6 +3801,16 @@ __alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order, ...@@ -3785,6 +3801,16 @@ __alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order,
*/ */
WARN_ON_ONCE(order > PAGE_ALLOC_COSTLY_ORDER); WARN_ON_ONCE(order > PAGE_ALLOC_COSTLY_ORDER);
/*
* Help non-failing allocations by giving them access to memory
* reserves but do not use ALLOC_NO_WATERMARKS because this
* could deplete whole memory reserves which would just make
* the situation worse
*/
page = __alloc_pages_cpuset_fallback(gfp_mask, order, ALLOC_HARDER, ac);
if (page)
goto got_pg;
cond_resched(); cond_resched();
goto retry; goto retry;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment