Commit 230ca982 authored by Mike Rapoport's avatar Mike Rapoport Committed by Linus Torvalds

userfaultfd: non-cooperative: add madvise() event for MADV_FREE request

MADV_FREE is identical to MADV_DONTNEED from the point of view of uffd
monitor.  The monitor has to stop handling #PF events in the range being
freed.  We are reusing userfaultfd_remove callback along with the logic
required to re-get and re-validate the VMA which may change or disappear
because userfaultfd_remove releases mmap_sem.

Link: http://lkml.kernel.org/r/1497876311-18615-1-git-send-email-rppt@linux.vnet.ibm.comSigned-off-by: default avatarMike Rapoport <rppt@linux.vnet.ibm.com>
Cc: Andrea Arcangeli <aarcange@redhat.com>
Cc: Hillf Danton <hillf.zj@alibaba-inc.com>
Cc: Pavel Emelyanov <xemul@virtuozzo.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent 76b6f9b7
...@@ -451,9 +451,6 @@ static int madvise_free_single_vma(struct vm_area_struct *vma, ...@@ -451,9 +451,6 @@ static int madvise_free_single_vma(struct vm_area_struct *vma,
struct mm_struct *mm = vma->vm_mm; struct mm_struct *mm = vma->vm_mm;
struct mmu_gather tlb; struct mmu_gather tlb;
if (vma->vm_flags & (VM_LOCKED|VM_HUGETLB|VM_PFNMAP))
return -EINVAL;
/* MADV_FREE works for only anon vma at the moment */ /* MADV_FREE works for only anon vma at the moment */
if (!vma_is_anonymous(vma)) if (!vma_is_anonymous(vma))
return -EINVAL; return -EINVAL;
...@@ -477,14 +474,6 @@ static int madvise_free_single_vma(struct vm_area_struct *vma, ...@@ -477,14 +474,6 @@ static int madvise_free_single_vma(struct vm_area_struct *vma,
return 0; return 0;
} }
static long madvise_free(struct vm_area_struct *vma,
struct vm_area_struct **prev,
unsigned long start, unsigned long end)
{
*prev = vma;
return madvise_free_single_vma(vma, start, end);
}
/* /*
* Application no longer needs these pages. If the pages are dirty, * Application no longer needs these pages. If the pages are dirty,
* it's OK to just throw them away. The app will be more careful about * it's OK to just throw them away. The app will be more careful about
...@@ -504,9 +493,17 @@ static long madvise_free(struct vm_area_struct *vma, ...@@ -504,9 +493,17 @@ static long madvise_free(struct vm_area_struct *vma,
* An interface that causes the system to free clean pages and flush * An interface that causes the system to free clean pages and flush
* dirty pages is already available as msync(MS_INVALIDATE). * dirty pages is already available as msync(MS_INVALIDATE).
*/ */
static long madvise_dontneed(struct vm_area_struct *vma, static long madvise_dontneed_single_vma(struct vm_area_struct *vma,
struct vm_area_struct **prev, unsigned long start, unsigned long end)
unsigned long start, unsigned long end) {
zap_page_range(vma, start, end - start);
return 0;
}
static long madvise_dontneed_free(struct vm_area_struct *vma,
struct vm_area_struct **prev,
unsigned long start, unsigned long end,
int behavior)
{ {
*prev = vma; *prev = vma;
if (!can_madv_dontneed_vma(vma)) if (!can_madv_dontneed_vma(vma))
...@@ -526,7 +523,8 @@ static long madvise_dontneed(struct vm_area_struct *vma, ...@@ -526,7 +523,8 @@ static long madvise_dontneed(struct vm_area_struct *vma,
* is also < vma->vm_end. If start < * is also < vma->vm_end. If start <
* vma->vm_start it means an hole materialized * vma->vm_start it means an hole materialized
* in the user address space within the * in the user address space within the
* virtual range passed to MADV_DONTNEED. * virtual range passed to MADV_DONTNEED
* or MADV_FREE.
*/ */
return -ENOMEM; return -ENOMEM;
} }
...@@ -537,7 +535,7 @@ static long madvise_dontneed(struct vm_area_struct *vma, ...@@ -537,7 +535,7 @@ static long madvise_dontneed(struct vm_area_struct *vma,
* Don't fail if end > vma->vm_end. If the old * Don't fail if end > vma->vm_end. If the old
* vma was splitted while the mmap_sem was * vma was splitted while the mmap_sem was
* released the effect of the concurrent * released the effect of the concurrent
* operation may not cause MADV_DONTNEED to * operation may not cause madvise() to
* have an undefined result. There may be an * have an undefined result. There may be an
* adjacent next vma that we'll walk * adjacent next vma that we'll walk
* next. userfaultfd_remove() will generate an * next. userfaultfd_remove() will generate an
...@@ -549,8 +547,13 @@ static long madvise_dontneed(struct vm_area_struct *vma, ...@@ -549,8 +547,13 @@ static long madvise_dontneed(struct vm_area_struct *vma,
} }
VM_WARN_ON(start >= end); VM_WARN_ON(start >= end);
} }
zap_page_range(vma, start, end - start);
return 0; if (behavior == MADV_DONTNEED)
return madvise_dontneed_single_vma(vma, start, end);
else if (behavior == MADV_FREE)
return madvise_free_single_vma(vma, start, end);
else
return -EINVAL;
} }
/* /*
...@@ -656,9 +659,8 @@ madvise_vma(struct vm_area_struct *vma, struct vm_area_struct **prev, ...@@ -656,9 +659,8 @@ madvise_vma(struct vm_area_struct *vma, struct vm_area_struct **prev,
case MADV_WILLNEED: case MADV_WILLNEED:
return madvise_willneed(vma, prev, start, end); return madvise_willneed(vma, prev, start, end);
case MADV_FREE: case MADV_FREE:
return madvise_free(vma, prev, start, end);
case MADV_DONTNEED: case MADV_DONTNEED:
return madvise_dontneed(vma, prev, start, end); return madvise_dontneed_free(vma, prev, start, end, behavior);
default: default:
return madvise_behavior(vma, prev, start, end, behavior); return madvise_behavior(vma, prev, start, end, behavior);
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment