Commit 996ff7a0 authored by Jane Chu's avatar Jane Chu Committed by Linus Torvalds

mm/memory-failure.c clean up around tk pre-allocation

add_to_kill() expects the first 'tk' to be pre-allocated, it makes
subsequent allocations on need basis, this makes the code a bit
difficult to read.

Move all the allocation internal to add_to_kill() and drop the **tk
argument.

Link: http://lkml.kernel.org/r/1565112345-28754-2-git-send-email-jane.chu@oracle.comSigned-off-by: default avatarJane Chu <jane.chu@oracle.com>
Reviewed-by: default avatarDan Williams <dan.j.williams@intel.com>
Acked-by: default avatarNaoya Horiguchi <n-horiguchi@ah.jp.nec.com>
Cc: Michal Hocko <mhocko@kernel.org>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent 2e53c4e1
...@@ -303,25 +303,19 @@ static unsigned long dev_pagemap_mapping_shift(struct page *page, ...@@ -303,25 +303,19 @@ static unsigned long dev_pagemap_mapping_shift(struct page *page,
/* /*
* Schedule a process for later kill. * Schedule a process for later kill.
* Uses GFP_ATOMIC allocations to avoid potential recursions in the VM. * Uses GFP_ATOMIC allocations to avoid potential recursions in the VM.
* TBD would GFP_NOIO be enough?
*/ */
static void add_to_kill(struct task_struct *tsk, struct page *p, static void add_to_kill(struct task_struct *tsk, struct page *p,
struct vm_area_struct *vma, struct vm_area_struct *vma,
struct list_head *to_kill, struct list_head *to_kill)
struct to_kill **tkc)
{ {
struct to_kill *tk; struct to_kill *tk;
if (*tkc) { tk = kmalloc(sizeof(struct to_kill), GFP_ATOMIC);
tk = *tkc; if (!tk) {
*tkc = NULL; pr_err("Memory failure: Out of memory while machine check handling\n");
} else { return;
tk = kmalloc(sizeof(struct to_kill), GFP_ATOMIC);
if (!tk) {
pr_err("Memory failure: Out of memory while machine check handling\n");
return;
}
} }
tk->addr = page_address_in_vma(p, vma); tk->addr = page_address_in_vma(p, vma);
if (is_zone_device_page(p)) if (is_zone_device_page(p))
tk->size_shift = dev_pagemap_mapping_shift(p, vma); tk->size_shift = dev_pagemap_mapping_shift(p, vma);
...@@ -345,6 +339,7 @@ static void add_to_kill(struct task_struct *tsk, struct page *p, ...@@ -345,6 +339,7 @@ static void add_to_kill(struct task_struct *tsk, struct page *p,
kfree(tk); kfree(tk);
return; return;
} }
get_task_struct(tsk); get_task_struct(tsk);
tk->tsk = tsk; tk->tsk = tsk;
list_add_tail(&tk->nd, to_kill); list_add_tail(&tk->nd, to_kill);
...@@ -436,7 +431,7 @@ static struct task_struct *task_early_kill(struct task_struct *tsk, ...@@ -436,7 +431,7 @@ static struct task_struct *task_early_kill(struct task_struct *tsk,
* Collect processes when the error hit an anonymous page. * Collect processes when the error hit an anonymous page.
*/ */
static void collect_procs_anon(struct page *page, struct list_head *to_kill, static void collect_procs_anon(struct page *page, struct list_head *to_kill,
struct to_kill **tkc, int force_early) int force_early)
{ {
struct vm_area_struct *vma; struct vm_area_struct *vma;
struct task_struct *tsk; struct task_struct *tsk;
...@@ -461,7 +456,7 @@ static void collect_procs_anon(struct page *page, struct list_head *to_kill, ...@@ -461,7 +456,7 @@ static void collect_procs_anon(struct page *page, struct list_head *to_kill,
if (!page_mapped_in_vma(page, vma)) if (!page_mapped_in_vma(page, vma))
continue; continue;
if (vma->vm_mm == t->mm) if (vma->vm_mm == t->mm)
add_to_kill(t, page, vma, to_kill, tkc); add_to_kill(t, page, vma, to_kill);
} }
} }
read_unlock(&tasklist_lock); read_unlock(&tasklist_lock);
...@@ -472,7 +467,7 @@ static void collect_procs_anon(struct page *page, struct list_head *to_kill, ...@@ -472,7 +467,7 @@ static void collect_procs_anon(struct page *page, struct list_head *to_kill,
* Collect processes when the error hit a file mapped page. * Collect processes when the error hit a file mapped page.
*/ */
static void collect_procs_file(struct page *page, struct list_head *to_kill, static void collect_procs_file(struct page *page, struct list_head *to_kill,
struct to_kill **tkc, int force_early) int force_early)
{ {
struct vm_area_struct *vma; struct vm_area_struct *vma;
struct task_struct *tsk; struct task_struct *tsk;
...@@ -496,7 +491,7 @@ static void collect_procs_file(struct page *page, struct list_head *to_kill, ...@@ -496,7 +491,7 @@ static void collect_procs_file(struct page *page, struct list_head *to_kill,
* to be informed of all such data corruptions. * to be informed of all such data corruptions.
*/ */
if (vma->vm_mm == t->mm) if (vma->vm_mm == t->mm)
add_to_kill(t, page, vma, to_kill, tkc); add_to_kill(t, page, vma, to_kill);
} }
} }
read_unlock(&tasklist_lock); read_unlock(&tasklist_lock);
...@@ -505,26 +500,17 @@ static void collect_procs_file(struct page *page, struct list_head *to_kill, ...@@ -505,26 +500,17 @@ static void collect_procs_file(struct page *page, struct list_head *to_kill,
/* /*
* Collect the processes who have the corrupted page mapped to kill. * Collect the processes who have the corrupted page mapped to kill.
* This is done in two steps for locking reasons.
* First preallocate one tokill structure outside the spin locks,
* so that we can kill at least one process reasonably reliable.
*/ */
static void collect_procs(struct page *page, struct list_head *tokill, static void collect_procs(struct page *page, struct list_head *tokill,
int force_early) int force_early)
{ {
struct to_kill *tk;
if (!page->mapping) if (!page->mapping)
return; return;
tk = kmalloc(sizeof(struct to_kill), GFP_NOIO);
if (!tk)
return;
if (PageAnon(page)) if (PageAnon(page))
collect_procs_anon(page, tokill, &tk, force_early); collect_procs_anon(page, tokill, force_early);
else else
collect_procs_file(page, tokill, &tk, force_early); collect_procs_file(page, tokill, force_early);
kfree(tk);
} }
static const char *action_name[] = { static const char *action_name[] = {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment