Commit 31a78f23 authored by Balbir Singh's avatar Balbir Singh Committed by Linus Torvalds

mm owner: fix race between swapoff and exit

There's a race between mm->owner assignment and swapoff, more easily
seen when task slab poisoning is turned on.  The condition occurs when
try_to_unuse() runs in parallel with an exiting task.  A similar race
can occur with callers of get_task_mm(), such as /proc/<pid>/<mmstats>
or ptrace or page migration.

CPU0                                    CPU1
                                        try_to_unuse
                                        looks at mm = task0->mm
                                        increments mm->mm_users
task 0 exits
mm->owner needs to be updated, but no
new owner is found (mm_users > 1, but
no other task has task->mm = task0->mm)
mm_update_next_owner() leaves
                                        mmput(mm) decrements mm->mm_users
task0 freed
                                        dereferencing mm->owner fails

The fix is to notify the subsystem via mm_owner_changed callback(),
if no new owner is found, by specifying the new task as NULL.

Jiri Slaby:
mm->owner was set to NULL prior to calling cgroup_mm_owner_callbacks(), but
must be set after that, so as not to pass NULL as old owner causing oops.

Daisuke Nishimura:
mm_update_next_owner() may set mm->owner to NULL, but mem_cgroup_from_task()
and its callers need to take account of this situation to avoid oops.

Hugh Dickins:
Lockdep warning and hang below exec_mmap() when testing these patches.
exit_mm() up_reads mmap_sem before calling mm_update_next_owner(),
so exec_mmap() now needs to do the same.  And with that repositioning,
there's now no point in mm_need_new_owner() allowing for NULL mm.
Reported-by: default avatarHugh Dickins <hugh@veritas.com>
Signed-off-by: default avatarBalbir Singh <balbir@linux.vnet.ibm.com>
Signed-off-by: default avatarJiri Slaby <jirislaby@gmail.com>
Signed-off-by: default avatarDaisuke Nishimura <nishimura@mxp.nes.nec.co.jp>
Signed-off-by: default avatarHugh Dickins <hugh@veritas.com>
Cc: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com>
Cc: Paul Menage <menage@google.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent bf5cb664
...@@ -752,11 +752,11 @@ static int exec_mmap(struct mm_struct *mm) ...@@ -752,11 +752,11 @@ static int exec_mmap(struct mm_struct *mm)
tsk->active_mm = mm; tsk->active_mm = mm;
activate_mm(active_mm, mm); activate_mm(active_mm, mm);
task_unlock(tsk); task_unlock(tsk);
mm_update_next_owner(old_mm);
arch_pick_mmap_layout(mm); arch_pick_mmap_layout(mm);
if (old_mm) { if (old_mm) {
up_read(&old_mm->mmap_sem); up_read(&old_mm->mmap_sem);
BUG_ON(active_mm != old_mm); BUG_ON(active_mm != old_mm);
mm_update_next_owner(old_mm);
mmput(old_mm); mmput(old_mm);
return 0; return 0;
} }
......
...@@ -2738,14 +2738,15 @@ void cgroup_fork_callbacks(struct task_struct *child) ...@@ -2738,14 +2738,15 @@ void cgroup_fork_callbacks(struct task_struct *child)
*/ */
void cgroup_mm_owner_callbacks(struct task_struct *old, struct task_struct *new) void cgroup_mm_owner_callbacks(struct task_struct *old, struct task_struct *new)
{ {
struct cgroup *oldcgrp, *newcgrp; struct cgroup *oldcgrp, *newcgrp = NULL;
if (need_mm_owner_callback) { if (need_mm_owner_callback) {
int i; int i;
for (i = 0; i < CGROUP_SUBSYS_COUNT; i++) { for (i = 0; i < CGROUP_SUBSYS_COUNT; i++) {
struct cgroup_subsys *ss = subsys[i]; struct cgroup_subsys *ss = subsys[i];
oldcgrp = task_cgroup(old, ss->subsys_id); oldcgrp = task_cgroup(old, ss->subsys_id);
newcgrp = task_cgroup(new, ss->subsys_id); if (new)
newcgrp = task_cgroup(new, ss->subsys_id);
if (oldcgrp == newcgrp) if (oldcgrp == newcgrp)
continue; continue;
if (ss->mm_owner_changed) if (ss->mm_owner_changed)
......
...@@ -583,8 +583,6 @@ mm_need_new_owner(struct mm_struct *mm, struct task_struct *p) ...@@ -583,8 +583,6 @@ mm_need_new_owner(struct mm_struct *mm, struct task_struct *p)
* If there are other users of the mm and the owner (us) is exiting * If there are other users of the mm and the owner (us) is exiting
* we need to find a new owner to take on the responsibility. * we need to find a new owner to take on the responsibility.
*/ */
if (!mm)
return 0;
if (atomic_read(&mm->mm_users) <= 1) if (atomic_read(&mm->mm_users) <= 1)
return 0; return 0;
if (mm->owner != p) if (mm->owner != p)
...@@ -627,6 +625,16 @@ void mm_update_next_owner(struct mm_struct *mm) ...@@ -627,6 +625,16 @@ void mm_update_next_owner(struct mm_struct *mm)
} while_each_thread(g, c); } while_each_thread(g, c);
read_unlock(&tasklist_lock); read_unlock(&tasklist_lock);
/*
* We found no owner yet mm_users > 1: this implies that we are
* most likely racing with swapoff (try_to_unuse()) or /proc or
* ptrace or page migration (get_task_mm()). Mark owner as NULL,
* so that subsystems can understand the callback and take action.
*/
down_write(&mm->mmap_sem);
cgroup_mm_owner_callbacks(mm->owner, NULL);
mm->owner = NULL;
up_write(&mm->mmap_sem);
return; return;
assign_new_owner: assign_new_owner:
......
...@@ -250,6 +250,14 @@ static struct mem_cgroup *mem_cgroup_from_cont(struct cgroup *cont) ...@@ -250,6 +250,14 @@ static struct mem_cgroup *mem_cgroup_from_cont(struct cgroup *cont)
struct mem_cgroup *mem_cgroup_from_task(struct task_struct *p) struct mem_cgroup *mem_cgroup_from_task(struct task_struct *p)
{ {
/*
* mm_update_next_owner() may clear mm->owner to NULL
* if it races with swapoff, page migration, etc.
* So this can be called with p == NULL.
*/
if (unlikely(!p))
return NULL;
return container_of(task_subsys_state(p, mem_cgroup_subsys_id), return container_of(task_subsys_state(p, mem_cgroup_subsys_id),
struct mem_cgroup, css); struct mem_cgroup, css);
} }
...@@ -549,6 +557,11 @@ static int mem_cgroup_charge_common(struct page *page, struct mm_struct *mm, ...@@ -549,6 +557,11 @@ static int mem_cgroup_charge_common(struct page *page, struct mm_struct *mm,
if (likely(!memcg)) { if (likely(!memcg)) {
rcu_read_lock(); rcu_read_lock();
mem = mem_cgroup_from_task(rcu_dereference(mm->owner)); mem = mem_cgroup_from_task(rcu_dereference(mm->owner));
if (unlikely(!mem)) {
rcu_read_unlock();
kmem_cache_free(page_cgroup_cache, pc);
return 0;
}
/* /*
* For every charge from the cgroup, increment reference count * For every charge from the cgroup, increment reference count
*/ */
...@@ -801,6 +814,10 @@ int mem_cgroup_shrink_usage(struct mm_struct *mm, gfp_t gfp_mask) ...@@ -801,6 +814,10 @@ int mem_cgroup_shrink_usage(struct mm_struct *mm, gfp_t gfp_mask)
rcu_read_lock(); rcu_read_lock();
mem = mem_cgroup_from_task(rcu_dereference(mm->owner)); mem = mem_cgroup_from_task(rcu_dereference(mm->owner));
if (unlikely(!mem)) {
rcu_read_unlock();
return 0;
}
css_get(&mem->css); css_get(&mem->css);
rcu_read_unlock(); rcu_read_unlock();
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment