Commit f44666b0 authored by Tetsuo Handa's avatar Tetsuo Handa Committed by Linus Torvalds

mm,oom: speed up select_bad_process() loop

Since commit 3a5dda7a ("oom: prevent unnecessary oom kills or kernel
panics"), select_bad_process() is using for_each_process_thread().

Since oom_unkillable_task() scans all threads in the caller's thread
group and oom_task_origin() scans signal_struct of the caller's thread
group, we don't need to call oom_unkillable_task() and oom_task_origin()
on each thread.  Also, since !mm test will be done later at
oom_badness(), we don't need to do !mm test on each thread.  Therefore,
we only need to do TIF_MEMDIE test on each thread.

Although the original code was correct it was quite inefficient because
each thread group was scanned num_threads times which can be a lot
especially with processes with many threads.  Even though the OOM is
extremely cold path it is always good to be as effective as possible
when we are inside rcu_read_lock() - aka unpreemptible context.

If we track number of TIF_MEMDIE threads inside signal_struct, we don't
need to do TIF_MEMDIE test on each thread.  This will allow
select_bad_process() to use for_each_process().

This patch adds a counter to signal_struct for tracking how many
TIF_MEMDIE threads are in a given thread group, and check it at
oom_scan_process_thread() so that select_bad_process() can use
for_each_process() rather than for_each_process_thread().

[mhocko@suse.com: do not blow the signal_struct size]
  Link: http://lkml.kernel.org/r/20160520075035.GF19172@dhcp22.suse.cz
Link: http://lkml.kernel.org/r/201605182230.IDC73435.MVSOHLFOQFOJtF@I-love.SAKURA.ne.jpSigned-off-by: default avatarTetsuo Handa <penguin-kernel@I-love.SAKURA.ne.jp>
Acked-by: default avatarMichal Hocko <mhocko@suse.com>
Cc: David Rientjes <rientjes@google.com>
Cc: Oleg Nesterov <oleg@redhat.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent 98748bd7
...@@ -669,6 +669,7 @@ struct signal_struct { ...@@ -669,6 +669,7 @@ struct signal_struct {
atomic_t sigcnt; atomic_t sigcnt;
atomic_t live; atomic_t live;
int nr_threads; int nr_threads;
atomic_t oom_victims; /* # of TIF_MEDIE threads in this thread group */
struct list_head thread_head; struct list_head thread_head;
wait_queue_head_t wait_chldexit; /* for wait4() */ wait_queue_head_t wait_chldexit; /* for wait4() */
......
...@@ -283,12 +283,8 @@ enum oom_scan_t oom_scan_process_thread(struct oom_control *oc, ...@@ -283,12 +283,8 @@ enum oom_scan_t oom_scan_process_thread(struct oom_control *oc,
* This task already has access to memory reserves and is being killed. * This task already has access to memory reserves and is being killed.
* Don't allow any other task to have access to the reserves. * Don't allow any other task to have access to the reserves.
*/ */
if (test_tsk_thread_flag(task, TIF_MEMDIE)) { if (!is_sysrq_oom(oc) && atomic_read(&task->signal->oom_victims))
if (!is_sysrq_oom(oc)) return OOM_SCAN_ABORT;
return OOM_SCAN_ABORT;
}
if (!task->mm)
return OOM_SCAN_CONTINUE;
/* /*
* If task is allocating a lot of memory and has been marked to be * If task is allocating a lot of memory and has been marked to be
...@@ -307,12 +303,12 @@ enum oom_scan_t oom_scan_process_thread(struct oom_control *oc, ...@@ -307,12 +303,12 @@ enum oom_scan_t oom_scan_process_thread(struct oom_control *oc,
static struct task_struct *select_bad_process(struct oom_control *oc, static struct task_struct *select_bad_process(struct oom_control *oc,
unsigned int *ppoints, unsigned long totalpages) unsigned int *ppoints, unsigned long totalpages)
{ {
struct task_struct *g, *p; struct task_struct *p;
struct task_struct *chosen = NULL; struct task_struct *chosen = NULL;
unsigned long chosen_points = 0; unsigned long chosen_points = 0;
rcu_read_lock(); rcu_read_lock();
for_each_process_thread(g, p) { for_each_process(p) {
unsigned int points; unsigned int points;
switch (oom_scan_process_thread(oc, p, totalpages)) { switch (oom_scan_process_thread(oc, p, totalpages)) {
...@@ -331,9 +327,6 @@ static struct task_struct *select_bad_process(struct oom_control *oc, ...@@ -331,9 +327,6 @@ static struct task_struct *select_bad_process(struct oom_control *oc,
points = oom_badness(p, NULL, oc->nodemask, totalpages); points = oom_badness(p, NULL, oc->nodemask, totalpages);
if (!points || points < chosen_points) if (!points || points < chosen_points)
continue; continue;
/* Prefer thread group leaders for display purposes */
if (points == chosen_points && thread_group_leader(chosen))
continue;
chosen = p; chosen = p;
chosen_points = points; chosen_points = points;
...@@ -673,6 +666,7 @@ void mark_oom_victim(struct task_struct *tsk) ...@@ -673,6 +666,7 @@ void mark_oom_victim(struct task_struct *tsk)
/* OOM killer might race with memcg OOM */ /* OOM killer might race with memcg OOM */
if (test_and_set_tsk_thread_flag(tsk, TIF_MEMDIE)) if (test_and_set_tsk_thread_flag(tsk, TIF_MEMDIE))
return; return;
atomic_inc(&tsk->signal->oom_victims);
/* /*
* Make sure that the task is woken up from uninterruptible sleep * Make sure that the task is woken up from uninterruptible sleep
* if it is frozen because OOM killer wouldn't be able to free * if it is frozen because OOM killer wouldn't be able to free
...@@ -690,6 +684,7 @@ void exit_oom_victim(struct task_struct *tsk) ...@@ -690,6 +684,7 @@ void exit_oom_victim(struct task_struct *tsk)
{ {
if (!test_and_clear_tsk_thread_flag(tsk, TIF_MEMDIE)) if (!test_and_clear_tsk_thread_flag(tsk, TIF_MEMDIE))
return; return;
atomic_dec(&tsk->signal->oom_victims);
if (!atomic_dec_return(&oom_victims)) if (!atomic_dec_return(&oom_victims))
wake_up_all(&oom_victims_wait); wake_up_all(&oom_victims_wait);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment