Commit 1d069b7d authored by Hugh Dickins's avatar Hugh Dickins Committed by Linus Torvalds

huge pagecache: extend mremap pmd rmap lockout to files

Whatever huge pagecache implementation we go with, file rmap locking
must be added to anon rmap locking, when mremap's move_page_tables()
finds a pmd_trans_huge pmd entry: a simple change, let's do it now.

Factor out take_rmap_locks() and drop_rmap_locks() to handle the locking
for make move_ptes() and move_page_tables(), and delete the
VM_BUG_ON_VMA which rejected vm_file and required anon_vma.
Signed-off-by: default avatarHugh Dickins <hughd@google.com>
Cc: "Kirill A. Shutemov" <kirill.shutemov@linux.intel.com>
Cc: Andrea Arcangeli <aarcange@redhat.com>
Cc: Andres Lagar-Cavilla <andreslc@google.com>
Cc: Yang Shi <yang.shi@linaro.org>
Cc: Ning Qu <quning@gmail.com>
Cc: Mel Gorman <mgorman@techsingularity.net>
Cc: Andres Lagar-Cavilla <andreslc@google.com>
Cc: Konstantin Khlebnikov <koct9i@gmail.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent bf8616d5
...@@ -70,6 +70,22 @@ static pmd_t *alloc_new_pmd(struct mm_struct *mm, struct vm_area_struct *vma, ...@@ -70,6 +70,22 @@ static pmd_t *alloc_new_pmd(struct mm_struct *mm, struct vm_area_struct *vma,
return pmd; return pmd;
} }
static void take_rmap_locks(struct vm_area_struct *vma)
{
if (vma->vm_file)
i_mmap_lock_write(vma->vm_file->f_mapping);
if (vma->anon_vma)
anon_vma_lock_write(vma->anon_vma);
}
static void drop_rmap_locks(struct vm_area_struct *vma)
{
if (vma->anon_vma)
anon_vma_unlock_write(vma->anon_vma);
if (vma->vm_file)
i_mmap_unlock_write(vma->vm_file->f_mapping);
}
static pte_t move_soft_dirty_pte(pte_t pte) static pte_t move_soft_dirty_pte(pte_t pte)
{ {
/* /*
...@@ -90,8 +106,6 @@ static void move_ptes(struct vm_area_struct *vma, pmd_t *old_pmd, ...@@ -90,8 +106,6 @@ static void move_ptes(struct vm_area_struct *vma, pmd_t *old_pmd,
struct vm_area_struct *new_vma, pmd_t *new_pmd, struct vm_area_struct *new_vma, pmd_t *new_pmd,
unsigned long new_addr, bool need_rmap_locks) unsigned long new_addr, bool need_rmap_locks)
{ {
struct address_space *mapping = NULL;
struct anon_vma *anon_vma = NULL;
struct mm_struct *mm = vma->vm_mm; struct mm_struct *mm = vma->vm_mm;
pte_t *old_pte, *new_pte, pte; pte_t *old_pte, *new_pte, pte;
spinlock_t *old_ptl, *new_ptl; spinlock_t *old_ptl, *new_ptl;
...@@ -114,16 +128,8 @@ static void move_ptes(struct vm_area_struct *vma, pmd_t *old_pmd, ...@@ -114,16 +128,8 @@ static void move_ptes(struct vm_area_struct *vma, pmd_t *old_pmd,
* serialize access to individual ptes, but only rmap traversal * serialize access to individual ptes, but only rmap traversal
* order guarantees that we won't miss both the old and new ptes). * order guarantees that we won't miss both the old and new ptes).
*/ */
if (need_rmap_locks) { if (need_rmap_locks)
if (vma->vm_file) { take_rmap_locks(vma);
mapping = vma->vm_file->f_mapping;
i_mmap_lock_write(mapping);
}
if (vma->anon_vma) {
anon_vma = vma->anon_vma;
anon_vma_lock_write(anon_vma);
}
}
/* /*
* We don't have to worry about the ordering of src and dst * We don't have to worry about the ordering of src and dst
...@@ -151,10 +157,8 @@ static void move_ptes(struct vm_area_struct *vma, pmd_t *old_pmd, ...@@ -151,10 +157,8 @@ static void move_ptes(struct vm_area_struct *vma, pmd_t *old_pmd,
spin_unlock(new_ptl); spin_unlock(new_ptl);
pte_unmap(new_pte - 1); pte_unmap(new_pte - 1);
pte_unmap_unlock(old_pte - 1, old_ptl); pte_unmap_unlock(old_pte - 1, old_ptl);
if (anon_vma) if (need_rmap_locks)
anon_vma_unlock_write(anon_vma); drop_rmap_locks(vma);
if (mapping)
i_mmap_unlock_write(mapping);
} }
#define LATENCY_LIMIT (64 * PAGE_SIZE) #define LATENCY_LIMIT (64 * PAGE_SIZE)
...@@ -193,15 +197,13 @@ unsigned long move_page_tables(struct vm_area_struct *vma, ...@@ -193,15 +197,13 @@ unsigned long move_page_tables(struct vm_area_struct *vma,
if (pmd_trans_huge(*old_pmd)) { if (pmd_trans_huge(*old_pmd)) {
if (extent == HPAGE_PMD_SIZE) { if (extent == HPAGE_PMD_SIZE) {
bool moved; bool moved;
VM_BUG_ON_VMA(vma->vm_file || !vma->anon_vma,
vma);
/* See comment in move_ptes() */ /* See comment in move_ptes() */
if (need_rmap_locks) if (need_rmap_locks)
anon_vma_lock_write(vma->anon_vma); take_rmap_locks(vma);
moved = move_huge_pmd(vma, old_addr, new_addr, moved = move_huge_pmd(vma, old_addr, new_addr,
old_end, old_pmd, new_pmd); old_end, old_pmd, new_pmd);
if (need_rmap_locks) if (need_rmap_locks)
anon_vma_unlock_write(vma->anon_vma); drop_rmap_locks(vma);
if (moved) { if (moved) {
need_flush = true; need_flush = true;
continue; continue;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment