Commit 74614de1 authored by Tony Luck's avatar Tony Luck Committed by Linus Torvalds

mm/memory-failure.c: don't let collect_procs() skip over processes for MF_ACTION_REQUIRED

When Linux sees an "action optional" machine check (where h/w has reported
an error that is not in the current execution path) we generally do not
want to signal a process, since most processes do not have a SIGBUS
handler - we'd just prematurely terminate the process for a problem that
they might never actually see.

task_early_kill() decides whether to consider a process - and it checks
whether this specific process has been marked for early signals with
"prctl", or if the system administrator has requested early signals for
all processes using /proc/sys/vm/memory_failure_early_kill.

But for MF_ACTION_REQUIRED case we must not defer.  The error is in the
execution path of the current thread so we must send the SIGBUS
immediatley.

Fix by passing a flag argument through collect_procs*() to
task_early_kill() so it knows whether we can defer or must take action.
Signed-off-by: default avatarTony Luck <tony.luck@intel.com>
Signed-off-by: default avatarNaoya Horiguchi <n-horiguchi@ah.jp.nec.com>
Cc: Andi Kleen <andi@firstfloor.org>
Cc: Borislav Petkov <bp@suse.de>
Cc: Chen Gong <gong.chen@linux.jf.intel.com>
Cc: <stable@vger.kernel.org>	[3.2+]
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent a70ffcac
...@@ -380,10 +380,12 @@ static void kill_procs(struct list_head *to_kill, int forcekill, int trapno, ...@@ -380,10 +380,12 @@ static void kill_procs(struct list_head *to_kill, int forcekill, int trapno,
} }
} }
static int task_early_kill(struct task_struct *tsk) static int task_early_kill(struct task_struct *tsk, int force_early)
{ {
if (!tsk->mm) if (!tsk->mm)
return 0; return 0;
if (force_early)
return 1;
if (tsk->flags & PF_MCE_PROCESS) if (tsk->flags & PF_MCE_PROCESS)
return !!(tsk->flags & PF_MCE_EARLY); return !!(tsk->flags & PF_MCE_EARLY);
return sysctl_memory_failure_early_kill; return sysctl_memory_failure_early_kill;
...@@ -393,7 +395,7 @@ static int task_early_kill(struct task_struct *tsk) ...@@ -393,7 +395,7 @@ static int task_early_kill(struct task_struct *tsk)
* Collect processes when the error hit an anonymous page. * Collect processes when the error hit an anonymous page.
*/ */
static void collect_procs_anon(struct page *page, struct list_head *to_kill, static void collect_procs_anon(struct page *page, struct list_head *to_kill,
struct to_kill **tkc) struct to_kill **tkc, int force_early)
{ {
struct vm_area_struct *vma; struct vm_area_struct *vma;
struct task_struct *tsk; struct task_struct *tsk;
...@@ -409,7 +411,7 @@ static void collect_procs_anon(struct page *page, struct list_head *to_kill, ...@@ -409,7 +411,7 @@ static void collect_procs_anon(struct page *page, struct list_head *to_kill,
for_each_process (tsk) { for_each_process (tsk) {
struct anon_vma_chain *vmac; struct anon_vma_chain *vmac;
if (!task_early_kill(tsk)) if (!task_early_kill(tsk, force_early))
continue; continue;
anon_vma_interval_tree_foreach(vmac, &av->rb_root, anon_vma_interval_tree_foreach(vmac, &av->rb_root,
pgoff, pgoff) { pgoff, pgoff) {
...@@ -428,7 +430,7 @@ static void collect_procs_anon(struct page *page, struct list_head *to_kill, ...@@ -428,7 +430,7 @@ static void collect_procs_anon(struct page *page, struct list_head *to_kill,
* Collect processes when the error hit a file mapped page. * Collect processes when the error hit a file mapped page.
*/ */
static void collect_procs_file(struct page *page, struct list_head *to_kill, static void collect_procs_file(struct page *page, struct list_head *to_kill,
struct to_kill **tkc) struct to_kill **tkc, int force_early)
{ {
struct vm_area_struct *vma; struct vm_area_struct *vma;
struct task_struct *tsk; struct task_struct *tsk;
...@@ -439,7 +441,7 @@ static void collect_procs_file(struct page *page, struct list_head *to_kill, ...@@ -439,7 +441,7 @@ static void collect_procs_file(struct page *page, struct list_head *to_kill,
for_each_process(tsk) { for_each_process(tsk) {
pgoff_t pgoff = page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT); pgoff_t pgoff = page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT);
if (!task_early_kill(tsk)) if (!task_early_kill(tsk, force_early))
continue; continue;
vma_interval_tree_foreach(vma, &mapping->i_mmap, pgoff, vma_interval_tree_foreach(vma, &mapping->i_mmap, pgoff,
...@@ -465,7 +467,8 @@ static void collect_procs_file(struct page *page, struct list_head *to_kill, ...@@ -465,7 +467,8 @@ static void collect_procs_file(struct page *page, struct list_head *to_kill,
* First preallocate one tokill structure outside the spin locks, * First preallocate one tokill structure outside the spin locks,
* so that we can kill at least one process reasonably reliable. * so that we can kill at least one process reasonably reliable.
*/ */
static void collect_procs(struct page *page, struct list_head *tokill) static void collect_procs(struct page *page, struct list_head *tokill,
int force_early)
{ {
struct to_kill *tk; struct to_kill *tk;
...@@ -476,9 +479,9 @@ static void collect_procs(struct page *page, struct list_head *tokill) ...@@ -476,9 +479,9 @@ static void collect_procs(struct page *page, struct list_head *tokill)
if (!tk) if (!tk)
return; return;
if (PageAnon(page)) if (PageAnon(page))
collect_procs_anon(page, tokill, &tk); collect_procs_anon(page, tokill, &tk, force_early);
else else
collect_procs_file(page, tokill, &tk); collect_procs_file(page, tokill, &tk, force_early);
kfree(tk); kfree(tk);
} }
...@@ -963,7 +966,7 @@ static int hwpoison_user_mappings(struct page *p, unsigned long pfn, ...@@ -963,7 +966,7 @@ static int hwpoison_user_mappings(struct page *p, unsigned long pfn,
* there's nothing that can be done. * there's nothing that can be done.
*/ */
if (kill) if (kill)
collect_procs(ppage, &tokill); collect_procs(ppage, &tokill, flags & MF_ACTION_REQUIRED);
ret = try_to_unmap(ppage, ttu); ret = try_to_unmap(ppage, ttu);
if (ret != SWAP_SUCCESS) if (ret != SWAP_SUCCESS)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment