Commit 08ab9b10 authored by David Rientjes's avatar David Rientjes Committed by Linus Torvalds

mm, oom: force oom kill on sysrq+f

The oom killer chooses not to kill a thread if:

 - an eligible thread has already been oom killed and has yet to exit,
   and

 - an eligible thread is exiting but has yet to free all its memory and
   is not the thread attempting to currently allocate memory.

SysRq+F manually invokes the global oom killer to kill a memory-hogging
task.  This is normally done as a last resort to free memory when no
progress is being made or to test the oom killer itself.

For both uses, we always want to kill a thread and never defer.  This
patch causes SysRq+F to always kill an eligible thread and can be used to
force a kill even if another oom killed thread has failed to exit.
Signed-off-by: default avatarDavid Rientjes <rientjes@google.com>
Acked-by: default avatarKOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com>
Acked-by: default avatarPekka Enberg <penberg@kernel.org>
Acked-by: default avatarKAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent b7643757
...@@ -346,7 +346,7 @@ static struct sysrq_key_op sysrq_term_op = { ...@@ -346,7 +346,7 @@ static struct sysrq_key_op sysrq_term_op = {
static void moom_callback(struct work_struct *ignored) static void moom_callback(struct work_struct *ignored)
{ {
out_of_memory(node_zonelist(0, GFP_KERNEL), GFP_KERNEL, 0, NULL); out_of_memory(node_zonelist(0, GFP_KERNEL), GFP_KERNEL, 0, NULL, true);
} }
static DECLARE_WORK(moom_work, moom_callback); static DECLARE_WORK(moom_work, moom_callback);
......
...@@ -49,7 +49,7 @@ extern int try_set_zonelist_oom(struct zonelist *zonelist, gfp_t gfp_flags); ...@@ -49,7 +49,7 @@ extern int try_set_zonelist_oom(struct zonelist *zonelist, gfp_t gfp_flags);
extern void clear_zonelist_oom(struct zonelist *zonelist, gfp_t gfp_flags); extern void clear_zonelist_oom(struct zonelist *zonelist, gfp_t gfp_flags);
extern void out_of_memory(struct zonelist *zonelist, gfp_t gfp_mask, extern void out_of_memory(struct zonelist *zonelist, gfp_t gfp_mask,
int order, nodemask_t *mask); int order, nodemask_t *mask, bool force_kill);
extern int register_oom_notifier(struct notifier_block *nb); extern int register_oom_notifier(struct notifier_block *nb);
extern int unregister_oom_notifier(struct notifier_block *nb); extern int unregister_oom_notifier(struct notifier_block *nb);
......
...@@ -310,7 +310,7 @@ static enum oom_constraint constrained_alloc(struct zonelist *zonelist, ...@@ -310,7 +310,7 @@ static enum oom_constraint constrained_alloc(struct zonelist *zonelist,
*/ */
static struct task_struct *select_bad_process(unsigned int *ppoints, static struct task_struct *select_bad_process(unsigned int *ppoints,
unsigned long totalpages, struct mem_cgroup *memcg, unsigned long totalpages, struct mem_cgroup *memcg,
const nodemask_t *nodemask) const nodemask_t *nodemask, bool force_kill)
{ {
struct task_struct *g, *p; struct task_struct *g, *p;
struct task_struct *chosen = NULL; struct task_struct *chosen = NULL;
...@@ -336,6 +336,7 @@ static struct task_struct *select_bad_process(unsigned int *ppoints, ...@@ -336,6 +336,7 @@ static struct task_struct *select_bad_process(unsigned int *ppoints,
if (test_tsk_thread_flag(p, TIF_MEMDIE)) { if (test_tsk_thread_flag(p, TIF_MEMDIE)) {
if (unlikely(frozen(p))) if (unlikely(frozen(p)))
__thaw_task(p); __thaw_task(p);
if (!force_kill)
return ERR_PTR(-1UL); return ERR_PTR(-1UL);
} }
if (!p->mm) if (!p->mm)
...@@ -354,7 +355,7 @@ static struct task_struct *select_bad_process(unsigned int *ppoints, ...@@ -354,7 +355,7 @@ static struct task_struct *select_bad_process(unsigned int *ppoints,
if (p == current) { if (p == current) {
chosen = p; chosen = p;
*ppoints = 1000; *ppoints = 1000;
} else { } else if (!force_kill) {
/* /*
* If this task is not being ptraced on exit, * If this task is not being ptraced on exit,
* then wait for it to finish before killing * then wait for it to finish before killing
...@@ -572,7 +573,7 @@ void mem_cgroup_out_of_memory(struct mem_cgroup *memcg, gfp_t gfp_mask) ...@@ -572,7 +573,7 @@ void mem_cgroup_out_of_memory(struct mem_cgroup *memcg, gfp_t gfp_mask)
check_panic_on_oom(CONSTRAINT_MEMCG, gfp_mask, 0, NULL); check_panic_on_oom(CONSTRAINT_MEMCG, gfp_mask, 0, NULL);
limit = mem_cgroup_get_limit(memcg) >> PAGE_SHIFT; limit = mem_cgroup_get_limit(memcg) >> PAGE_SHIFT;
read_lock(&tasklist_lock); read_lock(&tasklist_lock);
p = select_bad_process(&points, limit, memcg, NULL); p = select_bad_process(&points, limit, memcg, NULL, false);
if (p && PTR_ERR(p) != -1UL) if (p && PTR_ERR(p) != -1UL)
oom_kill_process(p, gfp_mask, 0, points, limit, memcg, NULL, oom_kill_process(p, gfp_mask, 0, points, limit, memcg, NULL,
"Memory cgroup out of memory"); "Memory cgroup out of memory");
...@@ -687,6 +688,7 @@ static void clear_system_oom(void) ...@@ -687,6 +688,7 @@ static void clear_system_oom(void)
* @gfp_mask: memory allocation flags * @gfp_mask: memory allocation flags
* @order: amount of memory being requested as a power of 2 * @order: amount of memory being requested as a power of 2
* @nodemask: nodemask passed to page allocator * @nodemask: nodemask passed to page allocator
* @force_kill: true if a task must be killed, even if others are exiting
* *
* If we run out of memory, we have the choice between either * If we run out of memory, we have the choice between either
* killing a random task (bad), letting the system crash (worse) * killing a random task (bad), letting the system crash (worse)
...@@ -694,7 +696,7 @@ static void clear_system_oom(void) ...@@ -694,7 +696,7 @@ static void clear_system_oom(void)
* don't have to be perfect here, we just have to be good. * don't have to be perfect here, we just have to be good.
*/ */
void out_of_memory(struct zonelist *zonelist, gfp_t gfp_mask, void out_of_memory(struct zonelist *zonelist, gfp_t gfp_mask,
int order, nodemask_t *nodemask) int order, nodemask_t *nodemask, bool force_kill)
{ {
const nodemask_t *mpol_mask; const nodemask_t *mpol_mask;
struct task_struct *p; struct task_struct *p;
...@@ -738,7 +740,8 @@ void out_of_memory(struct zonelist *zonelist, gfp_t gfp_mask, ...@@ -738,7 +740,8 @@ void out_of_memory(struct zonelist *zonelist, gfp_t gfp_mask,
goto out; goto out;
} }
p = select_bad_process(&points, totalpages, NULL, mpol_mask); p = select_bad_process(&points, totalpages, NULL, mpol_mask,
force_kill);
/* Found nothing?!?! Either we hang forever, or we panic. */ /* Found nothing?!?! Either we hang forever, or we panic. */
if (!p) { if (!p) {
dump_header(NULL, gfp_mask, order, NULL, mpol_mask); dump_header(NULL, gfp_mask, order, NULL, mpol_mask);
...@@ -770,7 +773,7 @@ void out_of_memory(struct zonelist *zonelist, gfp_t gfp_mask, ...@@ -770,7 +773,7 @@ void out_of_memory(struct zonelist *zonelist, gfp_t gfp_mask,
void pagefault_out_of_memory(void) void pagefault_out_of_memory(void)
{ {
if (try_set_system_oom()) { if (try_set_system_oom()) {
out_of_memory(NULL, 0, 0, NULL); out_of_memory(NULL, 0, 0, NULL, false);
clear_system_oom(); clear_system_oom();
} }
if (!test_thread_flag(TIF_MEMDIE)) if (!test_thread_flag(TIF_MEMDIE))
......
...@@ -1968,7 +1968,7 @@ __alloc_pages_may_oom(gfp_t gfp_mask, unsigned int order, ...@@ -1968,7 +1968,7 @@ __alloc_pages_may_oom(gfp_t gfp_mask, unsigned int order,
goto out; goto out;
} }
/* Exhausted what can be done so it's blamo time */ /* Exhausted what can be done so it's blamo time */
out_of_memory(zonelist, gfp_mask, order, nodemask); out_of_memory(zonelist, gfp_mask, order, nodemask, false);
out: out:
clear_zonelist_oom(zonelist, gfp_mask); clear_zonelist_oom(zonelist, gfp_mask);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment