Commit 408e82b7 authored by Hugh Dickins's avatar Hugh Dickins Committed by Linus Torvalds

mm: munlock use follow_page

Hiroaki Wakabayashi points out that when mlock() has been interrupted
by SIGKILL, the subsequent munlock() takes unnecessarily long because
its use of __get_user_pages() insists on faulting in all the pages
which mlock() never reached.

It's worse than slowness if mlock() is terminated by Out Of Memory kill:
the munlock_vma_pages_all() in exit_mmap() insists on faulting in all the
pages which mlock() could not find memory for; so innocent bystanders are
killed too, and perhaps the system hangs.

__get_user_pages() does a lot that's silly for munlock(): so remove the
munlock option from __mlock_vma_pages_range(), and use a simple loop of
follow_page()s in munlock_vma_pages_range() instead; ignoring absent
pages, and not marking present pages as accessed or dirty.

(Change munlock() to only go so far as mlock() reached?  That does not
work out, given the convention that mlock() claims complete success even
when it has to give up early - in part so that an underlying file can be
extended later, and those pages locked which earlier would give SIGBUS.)
Signed-off-by: default avatarHugh Dickins <hugh.dickins@tiscali.co.uk>
Cc: <stable@kernel.org>
Acked-by: default avatarRik van Riel <riel@redhat.com>
Reviewed-by: default avatarMinchan Kim <minchan.kim@gmail.com>
Cc: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com>
Cc: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com>
Cc: Nick Piggin <npiggin@suse.de>
Cc: Mel Gorman <mel@csn.ul.ie>
Reviewed-by: default avatarHiroaki Wakabayashi <primulaelatior@gmail.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent 5d3bc270
...@@ -139,49 +139,36 @@ static void munlock_vma_page(struct page *page) ...@@ -139,49 +139,36 @@ static void munlock_vma_page(struct page *page)
} }
/** /**
* __mlock_vma_pages_range() - mlock/munlock a range of pages in the vma. * __mlock_vma_pages_range() - mlock a range of pages in the vma.
* @vma: target vma * @vma: target vma
* @start: start address * @start: start address
* @end: end address * @end: end address
* @mlock: 0 indicate munlock, otherwise mlock.
* *
* If @mlock == 0, unlock an mlocked range; * This takes care of making the pages present too.
* else mlock the range of pages. This takes care of making the pages present ,
* too.
* *
* return 0 on success, negative error code on error. * return 0 on success, negative error code on error.
* *
* vma->vm_mm->mmap_sem must be held for at least read. * vma->vm_mm->mmap_sem must be held for at least read.
*/ */
static long __mlock_vma_pages_range(struct vm_area_struct *vma, static long __mlock_vma_pages_range(struct vm_area_struct *vma,
unsigned long start, unsigned long end, unsigned long start, unsigned long end)
int mlock)
{ {
struct mm_struct *mm = vma->vm_mm; struct mm_struct *mm = vma->vm_mm;
unsigned long addr = start; unsigned long addr = start;
struct page *pages[16]; /* 16 gives a reasonable batch */ struct page *pages[16]; /* 16 gives a reasonable batch */
int nr_pages = (end - start) / PAGE_SIZE; int nr_pages = (end - start) / PAGE_SIZE;
int ret = 0; int ret = 0;
int gup_flags = 0; int gup_flags;
VM_BUG_ON(start & ~PAGE_MASK); VM_BUG_ON(start & ~PAGE_MASK);
VM_BUG_ON(end & ~PAGE_MASK); VM_BUG_ON(end & ~PAGE_MASK);
VM_BUG_ON(start < vma->vm_start); VM_BUG_ON(start < vma->vm_start);
VM_BUG_ON(end > vma->vm_end); VM_BUG_ON(end > vma->vm_end);
VM_BUG_ON((!rwsem_is_locked(&mm->mmap_sem)) && VM_BUG_ON(!rwsem_is_locked(&mm->mmap_sem));
(atomic_read(&mm->mm_users) != 0));
/*
* mlock: don't page populate if vma has PROT_NONE permission.
* munlock: always do munlock although the vma has PROT_NONE
* permission, or SIGKILL is pending.
*/
if (!mlock)
gup_flags |= GUP_FLAGS_IGNORE_VMA_PERMISSIONS |
GUP_FLAGS_IGNORE_SIGKILL;
gup_flags = 0;
if (vma->vm_flags & VM_WRITE) if (vma->vm_flags & VM_WRITE)
gup_flags |= GUP_FLAGS_WRITE; gup_flags = GUP_FLAGS_WRITE;
while (nr_pages > 0) { while (nr_pages > 0) {
int i; int i;
...@@ -201,19 +188,10 @@ static long __mlock_vma_pages_range(struct vm_area_struct *vma, ...@@ -201,19 +188,10 @@ static long __mlock_vma_pages_range(struct vm_area_struct *vma,
* This can happen for, e.g., VM_NONLINEAR regions before * This can happen for, e.g., VM_NONLINEAR regions before
* a page has been allocated and mapped at a given offset, * a page has been allocated and mapped at a given offset,
* or for addresses that map beyond end of a file. * or for addresses that map beyond end of a file.
* We'll mlock the the pages if/when they get faulted in. * We'll mlock the pages if/when they get faulted in.
*/ */
if (ret < 0) if (ret < 0)
break; break;
if (ret == 0) {
/*
* We know the vma is there, so the only time
* we cannot get a single page should be an
* error (ret < 0) case.
*/
WARN_ON(1);
break;
}
lru_add_drain(); /* push cached pages to LRU */ lru_add_drain(); /* push cached pages to LRU */
...@@ -224,28 +202,22 @@ static long __mlock_vma_pages_range(struct vm_area_struct *vma, ...@@ -224,28 +202,22 @@ static long __mlock_vma_pages_range(struct vm_area_struct *vma,
/* /*
* Because we lock page here and migration is blocked * Because we lock page here and migration is blocked
* by the elevated reference, we need only check for * by the elevated reference, we need only check for
* page truncation (file-cache only). * file-cache page truncation. This page->mapping
* check also neatly skips over the ZERO_PAGE(),
* though if that's common we'd prefer not to lock it.
*/ */
if (page->mapping) { if (page->mapping)
if (mlock)
mlock_vma_page(page); mlock_vma_page(page);
else
munlock_vma_page(page);
}
unlock_page(page); unlock_page(page);
put_page(page); /* ref from get_user_pages() */ put_page(page); /* ref from get_user_pages() */
/*
* here we assume that get_user_pages() has given us
* a list of virtually contiguous pages.
*/
addr += PAGE_SIZE; /* for next get_user_pages() */
nr_pages--;
} }
addr += ret * PAGE_SIZE;
nr_pages -= ret;
ret = 0; ret = 0;
} }
return ret; /* count entire vma as locked_vm */ return ret; /* 0 or negative error code */
} }
/* /*
...@@ -289,7 +261,7 @@ long mlock_vma_pages_range(struct vm_area_struct *vma, ...@@ -289,7 +261,7 @@ long mlock_vma_pages_range(struct vm_area_struct *vma,
is_vm_hugetlb_page(vma) || is_vm_hugetlb_page(vma) ||
vma == get_gate_vma(current))) { vma == get_gate_vma(current))) {
__mlock_vma_pages_range(vma, start, end, 1); __mlock_vma_pages_range(vma, start, end);
/* Hide errors from mmap() and other callers */ /* Hide errors from mmap() and other callers */
return 0; return 0;
...@@ -310,7 +282,6 @@ long mlock_vma_pages_range(struct vm_area_struct *vma, ...@@ -310,7 +282,6 @@ long mlock_vma_pages_range(struct vm_area_struct *vma,
return nr_pages; /* error or pages NOT mlocked */ return nr_pages; /* error or pages NOT mlocked */
} }
/* /*
* munlock_vma_pages_range() - munlock all pages in the vma range.' * munlock_vma_pages_range() - munlock all pages in the vma range.'
* @vma - vma containing range to be munlock()ed. * @vma - vma containing range to be munlock()ed.
...@@ -332,8 +303,22 @@ long mlock_vma_pages_range(struct vm_area_struct *vma, ...@@ -332,8 +303,22 @@ long mlock_vma_pages_range(struct vm_area_struct *vma,
void munlock_vma_pages_range(struct vm_area_struct *vma, void munlock_vma_pages_range(struct vm_area_struct *vma,
unsigned long start, unsigned long end) unsigned long start, unsigned long end)
{ {
unsigned long addr;
lru_add_drain();
vma->vm_flags &= ~VM_LOCKED; vma->vm_flags &= ~VM_LOCKED;
__mlock_vma_pages_range(vma, start, end, 0);
for (addr = start; addr < end; addr += PAGE_SIZE) {
struct page *page = follow_page(vma, addr, FOLL_GET);
if (page) {
lock_page(page);
if (page->mapping)
munlock_vma_page(page);
unlock_page(page);
put_page(page);
}
cond_resched();
}
} }
/* /*
...@@ -400,18 +385,14 @@ static int mlock_fixup(struct vm_area_struct *vma, struct vm_area_struct **prev, ...@@ -400,18 +385,14 @@ static int mlock_fixup(struct vm_area_struct *vma, struct vm_area_struct **prev,
* It's okay if try_to_unmap_one unmaps a page just after we * It's okay if try_to_unmap_one unmaps a page just after we
* set VM_LOCKED, __mlock_vma_pages_range will bring it back. * set VM_LOCKED, __mlock_vma_pages_range will bring it back.
*/ */
vma->vm_flags = newflags;
if (lock) { if (lock) {
ret = __mlock_vma_pages_range(vma, start, end, 1); vma->vm_flags = newflags;
ret = __mlock_vma_pages_range(vma, start, end);
if (ret > 0) { if (ret < 0)
mm->locked_vm -= ret; ret = __mlock_posix_error_return(ret);
ret = 0;
} else
ret = __mlock_posix_error_return(ret); /* translate if needed */
} else { } else {
__mlock_vma_pages_range(vma, start, end, 0); munlock_vma_pages_range(vma, start, end);
} }
out: out:
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment