Commit df2cc96e authored by Mike Rapoport's avatar Mike Rapoport Committed by Linus Torvalds

userfaultfd: prevent non-cooperative events vs mcopy_atomic races

If a process monitored with userfaultfd changes it's memory mappings or
forks() at the same time as uffd monitor fills the process memory with
UFFDIO_COPY, the actual creation of page table entries and copying of
the data in mcopy_atomic may happen either before of after the memory
mapping modifications and there is no way for the uffd monitor to
maintain consistent view of the process memory layout.

For instance, let's consider fork() running in parallel with
userfaultfd_copy():

process        		         |	uffd monitor
---------------------------------+------------------------------
fork()        		         | userfaultfd_copy()
...        		         | ...
    dup_mmap()        	         |     down_read(mmap_sem)
    down_write(mmap_sem)         |     /* create PTEs, copy data */
        dup_uffd()               |     up_read(mmap_sem)
        copy_page_range()        |
        up_write(mmap_sem)       |
        dup_uffd_complete()      |
            /* notify monitor */ |

If the userfaultfd_copy() takes the mmap_sem first, the new page(s) will
be present by the time copy_page_range() is called and they will appear
in the child's memory mappings.  However, if the fork() is the first to
take the mmap_sem, the new pages won't be mapped in the child's address
space.

If the pages are not present and child tries to access them, the monitor
will get page fault notification and everything is fine.  However, if
the pages *are present*, the child can access them without uffd
noticing.  And if we copy them into child it'll see the wrong data.
Since we are talking about background copy, we'd need to decide whether
the pages should be copied or not regardless #PF notifications.

Since userfaultfd monitor has no way to determine what was the order,
let's disallow userfaultfd_copy in parallel with the non-cooperative
events.  In such case we return -EAGAIN and the uffd monitor can
understand that userfaultfd_copy() clashed with a non-cooperative event
and take an appropriate action.

Link: http://lkml.kernel.org/r/1527061324-19949-1-git-send-email-rppt@linux.vnet.ibm.comSigned-off-by: default avatarMike Rapoport <rppt@linux.vnet.ibm.com>
Acked-by: default avatarPavel Emelyanov <xemul@virtuozzo.com>
Cc: Andrea Arcangeli <aarcange@redhat.com>
Cc: Mike Kravetz <mike.kravetz@oracle.com>
Cc: Andrei Vagin <avagin@virtuozzo.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent be09102b
...@@ -62,6 +62,8 @@ struct userfaultfd_ctx { ...@@ -62,6 +62,8 @@ struct userfaultfd_ctx {
enum userfaultfd_state state; enum userfaultfd_state state;
/* released */ /* released */
bool released; bool released;
/* memory mappings are changing because of non-cooperative event */
bool mmap_changing;
/* mm with one ore more vmas attached to this userfaultfd_ctx */ /* mm with one ore more vmas attached to this userfaultfd_ctx */
struct mm_struct *mm; struct mm_struct *mm;
}; };
...@@ -641,6 +643,7 @@ static void userfaultfd_event_wait_completion(struct userfaultfd_ctx *ctx, ...@@ -641,6 +643,7 @@ static void userfaultfd_event_wait_completion(struct userfaultfd_ctx *ctx,
* already released. * already released.
*/ */
out: out:
WRITE_ONCE(ctx->mmap_changing, false);
userfaultfd_ctx_put(ctx); userfaultfd_ctx_put(ctx);
} }
...@@ -686,10 +689,12 @@ int dup_userfaultfd(struct vm_area_struct *vma, struct list_head *fcs) ...@@ -686,10 +689,12 @@ int dup_userfaultfd(struct vm_area_struct *vma, struct list_head *fcs)
ctx->state = UFFD_STATE_RUNNING; ctx->state = UFFD_STATE_RUNNING;
ctx->features = octx->features; ctx->features = octx->features;
ctx->released = false; ctx->released = false;
ctx->mmap_changing = false;
ctx->mm = vma->vm_mm; ctx->mm = vma->vm_mm;
mmgrab(ctx->mm); mmgrab(ctx->mm);
userfaultfd_ctx_get(octx); userfaultfd_ctx_get(octx);
WRITE_ONCE(octx->mmap_changing, true);
fctx->orig = octx; fctx->orig = octx;
fctx->new = ctx; fctx->new = ctx;
list_add_tail(&fctx->list, fcs); list_add_tail(&fctx->list, fcs);
...@@ -732,6 +737,7 @@ void mremap_userfaultfd_prep(struct vm_area_struct *vma, ...@@ -732,6 +737,7 @@ void mremap_userfaultfd_prep(struct vm_area_struct *vma,
if (ctx && (ctx->features & UFFD_FEATURE_EVENT_REMAP)) { if (ctx && (ctx->features & UFFD_FEATURE_EVENT_REMAP)) {
vm_ctx->ctx = ctx; vm_ctx->ctx = ctx;
userfaultfd_ctx_get(ctx); userfaultfd_ctx_get(ctx);
WRITE_ONCE(ctx->mmap_changing, true);
} }
} }
...@@ -772,6 +778,7 @@ bool userfaultfd_remove(struct vm_area_struct *vma, ...@@ -772,6 +778,7 @@ bool userfaultfd_remove(struct vm_area_struct *vma,
return true; return true;
userfaultfd_ctx_get(ctx); userfaultfd_ctx_get(ctx);
WRITE_ONCE(ctx->mmap_changing, true);
up_read(&mm->mmap_sem); up_read(&mm->mmap_sem);
msg_init(&ewq.msg); msg_init(&ewq.msg);
...@@ -815,6 +822,7 @@ int userfaultfd_unmap_prep(struct vm_area_struct *vma, ...@@ -815,6 +822,7 @@ int userfaultfd_unmap_prep(struct vm_area_struct *vma,
return -ENOMEM; return -ENOMEM;
userfaultfd_ctx_get(ctx); userfaultfd_ctx_get(ctx);
WRITE_ONCE(ctx->mmap_changing, true);
unmap_ctx->ctx = ctx; unmap_ctx->ctx = ctx;
unmap_ctx->start = start; unmap_ctx->start = start;
unmap_ctx->end = end; unmap_ctx->end = end;
...@@ -1653,6 +1661,10 @@ static int userfaultfd_copy(struct userfaultfd_ctx *ctx, ...@@ -1653,6 +1661,10 @@ static int userfaultfd_copy(struct userfaultfd_ctx *ctx,
user_uffdio_copy = (struct uffdio_copy __user *) arg; user_uffdio_copy = (struct uffdio_copy __user *) arg;
ret = -EAGAIN;
if (READ_ONCE(ctx->mmap_changing))
goto out;
ret = -EFAULT; ret = -EFAULT;
if (copy_from_user(&uffdio_copy, user_uffdio_copy, if (copy_from_user(&uffdio_copy, user_uffdio_copy,
/* don't copy "copy" last field */ /* don't copy "copy" last field */
...@@ -1674,7 +1686,7 @@ static int userfaultfd_copy(struct userfaultfd_ctx *ctx, ...@@ -1674,7 +1686,7 @@ static int userfaultfd_copy(struct userfaultfd_ctx *ctx,
goto out; goto out;
if (mmget_not_zero(ctx->mm)) { if (mmget_not_zero(ctx->mm)) {
ret = mcopy_atomic(ctx->mm, uffdio_copy.dst, uffdio_copy.src, ret = mcopy_atomic(ctx->mm, uffdio_copy.dst, uffdio_copy.src,
uffdio_copy.len); uffdio_copy.len, &ctx->mmap_changing);
mmput(ctx->mm); mmput(ctx->mm);
} else { } else {
return -ESRCH; return -ESRCH;
...@@ -1705,6 +1717,10 @@ static int userfaultfd_zeropage(struct userfaultfd_ctx *ctx, ...@@ -1705,6 +1717,10 @@ static int userfaultfd_zeropage(struct userfaultfd_ctx *ctx,
user_uffdio_zeropage = (struct uffdio_zeropage __user *) arg; user_uffdio_zeropage = (struct uffdio_zeropage __user *) arg;
ret = -EAGAIN;
if (READ_ONCE(ctx->mmap_changing))
goto out;
ret = -EFAULT; ret = -EFAULT;
if (copy_from_user(&uffdio_zeropage, user_uffdio_zeropage, if (copy_from_user(&uffdio_zeropage, user_uffdio_zeropage,
/* don't copy "zeropage" last field */ /* don't copy "zeropage" last field */
...@@ -1721,7 +1737,8 @@ static int userfaultfd_zeropage(struct userfaultfd_ctx *ctx, ...@@ -1721,7 +1737,8 @@ static int userfaultfd_zeropage(struct userfaultfd_ctx *ctx,
if (mmget_not_zero(ctx->mm)) { if (mmget_not_zero(ctx->mm)) {
ret = mfill_zeropage(ctx->mm, uffdio_zeropage.range.start, ret = mfill_zeropage(ctx->mm, uffdio_zeropage.range.start,
uffdio_zeropage.range.len); uffdio_zeropage.range.len,
&ctx->mmap_changing);
mmput(ctx->mm); mmput(ctx->mm);
} else { } else {
return -ESRCH; return -ESRCH;
...@@ -1900,6 +1917,7 @@ SYSCALL_DEFINE1(userfaultfd, int, flags) ...@@ -1900,6 +1917,7 @@ SYSCALL_DEFINE1(userfaultfd, int, flags)
ctx->features = 0; ctx->features = 0;
ctx->state = UFFD_STATE_WAIT_API; ctx->state = UFFD_STATE_WAIT_API;
ctx->released = false; ctx->released = false;
ctx->mmap_changing = false;
ctx->mm = current->mm; ctx->mm = current->mm;
/* prevent the mm struct to be freed */ /* prevent the mm struct to be freed */
mmgrab(ctx->mm); mmgrab(ctx->mm);
......
...@@ -31,10 +31,12 @@ ...@@ -31,10 +31,12 @@
extern int handle_userfault(struct vm_fault *vmf, unsigned long reason); extern int handle_userfault(struct vm_fault *vmf, unsigned long reason);
extern ssize_t mcopy_atomic(struct mm_struct *dst_mm, unsigned long dst_start, extern ssize_t mcopy_atomic(struct mm_struct *dst_mm, unsigned long dst_start,
unsigned long src_start, unsigned long len); unsigned long src_start, unsigned long len,
bool *mmap_changing);
extern ssize_t mfill_zeropage(struct mm_struct *dst_mm, extern ssize_t mfill_zeropage(struct mm_struct *dst_mm,
unsigned long dst_start, unsigned long dst_start,
unsigned long len); unsigned long len,
bool *mmap_changing);
/* mm helpers */ /* mm helpers */
static inline bool is_mergeable_vm_userfaultfd_ctx(struct vm_area_struct *vma, static inline bool is_mergeable_vm_userfaultfd_ctx(struct vm_area_struct *vma,
......
...@@ -404,7 +404,8 @@ static __always_inline ssize_t __mcopy_atomic(struct mm_struct *dst_mm, ...@@ -404,7 +404,8 @@ static __always_inline ssize_t __mcopy_atomic(struct mm_struct *dst_mm,
unsigned long dst_start, unsigned long dst_start,
unsigned long src_start, unsigned long src_start,
unsigned long len, unsigned long len,
bool zeropage) bool zeropage,
bool *mmap_changing)
{ {
struct vm_area_struct *dst_vma; struct vm_area_struct *dst_vma;
ssize_t err; ssize_t err;
...@@ -430,6 +431,15 @@ static __always_inline ssize_t __mcopy_atomic(struct mm_struct *dst_mm, ...@@ -430,6 +431,15 @@ static __always_inline ssize_t __mcopy_atomic(struct mm_struct *dst_mm,
retry: retry:
down_read(&dst_mm->mmap_sem); down_read(&dst_mm->mmap_sem);
/*
* If memory mappings are changing because of non-cooperative
* operation (e.g. mremap) running in parallel, bail out and
* request the user to retry later
*/
err = -EAGAIN;
if (mmap_changing && READ_ONCE(*mmap_changing))
goto out_unlock;
/* /*
* Make sure the vma is not shared, that the dst range is * Make sure the vma is not shared, that the dst range is
* both valid and fully within a single existing vma. * both valid and fully within a single existing vma.
...@@ -563,13 +573,15 @@ static __always_inline ssize_t __mcopy_atomic(struct mm_struct *dst_mm, ...@@ -563,13 +573,15 @@ static __always_inline ssize_t __mcopy_atomic(struct mm_struct *dst_mm,
} }
ssize_t mcopy_atomic(struct mm_struct *dst_mm, unsigned long dst_start, ssize_t mcopy_atomic(struct mm_struct *dst_mm, unsigned long dst_start,
unsigned long src_start, unsigned long len) unsigned long src_start, unsigned long len,
bool *mmap_changing)
{ {
return __mcopy_atomic(dst_mm, dst_start, src_start, len, false); return __mcopy_atomic(dst_mm, dst_start, src_start, len, false,
mmap_changing);
} }
ssize_t mfill_zeropage(struct mm_struct *dst_mm, unsigned long start, ssize_t mfill_zeropage(struct mm_struct *dst_mm, unsigned long start,
unsigned long len) unsigned long len, bool *mmap_changing)
{ {
return __mcopy_atomic(dst_mm, start, 0, len, true); return __mcopy_atomic(dst_mm, start, 0, len, true, mmap_changing);
} }
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment