Commit c3f3ce04 authored by Andrea Arcangeli's avatar Andrea Arcangeli Committed by Linus Torvalds

userfaultfd: use RCU to free the task struct when fork fails

The task structure is freed while get_mem_cgroup_from_mm() holds
rcu_read_lock() and dereferences mm->owner.

  get_mem_cgroup_from_mm()                failing fork()
  ----                                    ---
  task = mm->owner
                                          mm->owner = NULL;
                                          free(task)
  if (task) *task; /* use after free */

The fix consists in freeing the task with RCU also in the fork failure
case, exactly like it always happens for the regular exit(2) path.  That
is enough to make the rcu_read_lock hold in get_mem_cgroup_from_mm()
(left side above) effective to avoid a use after free when dereferencing
the task structure.

An alternate possible fix would be to defer the delivery of the
userfaultfd contexts to the monitor until after fork() is guaranteed to
succeed.  Such a change would require more changes because it would
create a strict ordering dependency where the uffd methods would need to
be called beyond the last potentially failing branch in order to be
safe.  This solution as opposed only adds the dependency to common code
to set mm->owner to NULL and to free the task struct that was pointed by
mm->owner with RCU, if fork ends up failing.  The userfaultfd methods
can still be called anywhere during the fork runtime and the monitor
will keep discarding orphaned "mm" coming from failed forks in userland.

This race condition couldn't trigger if CONFIG_MEMCG was set =n at build
time.

[aarcange@redhat.com: improve changelog, reduce #ifdefs per Michal]
  Link: http://lkml.kernel.org/r/20190429035752.4508-1-aarcange@redhat.com
Link: http://lkml.kernel.org/r/20190325225636.11635-2-aarcange@redhat.com
Fixes: 893e26e6 ("userfaultfd: non-cooperative: Add fork() event")
Signed-off-by: default avatarAndrea Arcangeli <aarcange@redhat.com>
Tested-by: default avatarzhong jiang <zhongjiang@huawei.com>
Reported-by: syzbot+cbb52e396df3e565ab02@syzkaller.appspotmail.com
Cc: Oleg Nesterov <oleg@redhat.com>
Cc: Jann Horn <jannh@google.com>
Cc: Hugh Dickins <hughd@google.com>
Cc: Mike Rapoport <rppt@linux.vnet.ibm.com>
Cc: Mike Kravetz <mike.kravetz@oracle.com>
Cc: Peter Xu <peterx@redhat.com>
Cc: Jason Gunthorpe <jgg@mellanox.com>
Cc: "Kirill A . Shutemov" <kirill.shutemov@linux.intel.com>
Cc: Michal Hocko <mhocko@suse.com>
Cc: zhong jiang <zhongjiang@huawei.com>
Cc: syzbot+cbb52e396df3e565ab02@syzkaller.appspotmail.com
Cc: <stable@vger.kernel.org>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent acb2ec3d
...@@ -955,6 +955,15 @@ static void mm_init_aio(struct mm_struct *mm) ...@@ -955,6 +955,15 @@ static void mm_init_aio(struct mm_struct *mm)
#endif #endif
} }
static __always_inline void mm_clear_owner(struct mm_struct *mm,
struct task_struct *p)
{
#ifdef CONFIG_MEMCG
if (mm->owner == p)
WRITE_ONCE(mm->owner, NULL);
#endif
}
static void mm_init_owner(struct mm_struct *mm, struct task_struct *p) static void mm_init_owner(struct mm_struct *mm, struct task_struct *p)
{ {
#ifdef CONFIG_MEMCG #ifdef CONFIG_MEMCG
...@@ -1343,6 +1352,7 @@ static struct mm_struct *dup_mm(struct task_struct *tsk, ...@@ -1343,6 +1352,7 @@ static struct mm_struct *dup_mm(struct task_struct *tsk,
free_pt: free_pt:
/* don't put binfmt in mmput, we haven't got module yet */ /* don't put binfmt in mmput, we haven't got module yet */
mm->binfmt = NULL; mm->binfmt = NULL;
mm_init_owner(mm, NULL);
mmput(mm); mmput(mm);
fail_nomem: fail_nomem:
...@@ -1726,6 +1736,21 @@ static int pidfd_create(struct pid *pid) ...@@ -1726,6 +1736,21 @@ static int pidfd_create(struct pid *pid)
return fd; return fd;
} }
static void __delayed_free_task(struct rcu_head *rhp)
{
struct task_struct *tsk = container_of(rhp, struct task_struct, rcu);
free_task(tsk);
}
static __always_inline void delayed_free_task(struct task_struct *tsk)
{
if (IS_ENABLED(CONFIG_MEMCG))
call_rcu(&tsk->rcu, __delayed_free_task);
else
free_task(tsk);
}
/* /*
* This creates a new process as a copy of the old one, * This creates a new process as a copy of the old one,
* but does not actually start it yet. * but does not actually start it yet.
...@@ -2233,8 +2258,10 @@ static __latent_entropy struct task_struct *copy_process( ...@@ -2233,8 +2258,10 @@ static __latent_entropy struct task_struct *copy_process(
bad_fork_cleanup_namespaces: bad_fork_cleanup_namespaces:
exit_task_namespaces(p); exit_task_namespaces(p);
bad_fork_cleanup_mm: bad_fork_cleanup_mm:
if (p->mm) if (p->mm) {
mm_clear_owner(p->mm, p);
mmput(p->mm); mmput(p->mm);
}
bad_fork_cleanup_signal: bad_fork_cleanup_signal:
if (!(clone_flags & CLONE_THREAD)) if (!(clone_flags & CLONE_THREAD))
free_signal_struct(p->signal); free_signal_struct(p->signal);
...@@ -2265,7 +2292,7 @@ static __latent_entropy struct task_struct *copy_process( ...@@ -2265,7 +2292,7 @@ static __latent_entropy struct task_struct *copy_process(
bad_fork_free: bad_fork_free:
p->state = TASK_DEAD; p->state = TASK_DEAD;
put_task_stack(p); put_task_stack(p);
free_task(p); delayed_free_task(p);
fork_out: fork_out:
spin_lock_irq(&current->sighand->siglock); spin_lock_irq(&current->sighand->siglock);
hlist_del_init(&delayed.node); hlist_del_init(&delayed.node);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment