Commit b9921ecd authored by Qiang Huang's avatar Qiang Huang Committed by Linus Torvalds

mm: add a helper function to check may oom condition

Use helper function to check if we need to deal with oom condition.
Signed-off-by: default avatarQiang Huang <h.huangqiang@huawei.com>
Acked-by: default avatarDavid Rientjes <rientjes@google.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent 9c2606b7
...@@ -82,6 +82,11 @@ static inline void oom_killer_enable(void) ...@@ -82,6 +82,11 @@ static inline void oom_killer_enable(void)
oom_killer_disabled = false; oom_killer_disabled = false;
} }
static inline bool oom_gfp_allowed(gfp_t gfp_mask)
{
return (gfp_mask & __GFP_FS) && !(gfp_mask & __GFP_NORETRY);
}
extern struct task_struct *find_lock_task_mm(struct task_struct *p); extern struct task_struct *find_lock_task_mm(struct task_struct *p);
/* sysctls */ /* sysctls */
......
...@@ -2984,21 +2984,14 @@ static int memcg_charge_kmem(struct mem_cgroup *memcg, gfp_t gfp, u64 size) ...@@ -2984,21 +2984,14 @@ static int memcg_charge_kmem(struct mem_cgroup *memcg, gfp_t gfp, u64 size)
struct res_counter *fail_res; struct res_counter *fail_res;
struct mem_cgroup *_memcg; struct mem_cgroup *_memcg;
int ret = 0; int ret = 0;
bool may_oom;
ret = res_counter_charge(&memcg->kmem, size, &fail_res); ret = res_counter_charge(&memcg->kmem, size, &fail_res);
if (ret) if (ret)
return ret; return ret;
/*
* Conditions under which we can wait for the oom_killer. Those are
* the same conditions tested by the core page allocator
*/
may_oom = (gfp & __GFP_FS) && !(gfp & __GFP_NORETRY);
_memcg = memcg; _memcg = memcg;
ret = __mem_cgroup_try_charge(NULL, gfp, size >> PAGE_SHIFT, ret = __mem_cgroup_try_charge(NULL, gfp, size >> PAGE_SHIFT,
&_memcg, may_oom); &_memcg, oom_gfp_allowed(gfp));
if (ret == -EINTR) { if (ret == -EINTR) {
/* /*
......
...@@ -2593,7 +2593,7 @@ __alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order, ...@@ -2593,7 +2593,7 @@ __alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order,
* running out of options and have to consider going OOM * running out of options and have to consider going OOM
*/ */
if (!did_some_progress) { if (!did_some_progress) {
if ((gfp_mask & __GFP_FS) && !(gfp_mask & __GFP_NORETRY)) { if (oom_gfp_allowed(gfp_mask)) {
if (oom_killer_disabled) if (oom_killer_disabled)
goto nopage; goto nopage;
/* Coredumps can quickly deplete all memory reserves */ /* Coredumps can quickly deplete all memory reserves */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment