Commit 12352d3c authored by Konstantin Khlebnikov's avatar Konstantin Khlebnikov Committed by Linus Torvalds

mm: replace vma_lock_anon_vma with anon_vma_lock_read/write

Sequence vma_lock_anon_vma() - vma_unlock_anon_vma() isn't safe if
anon_vma appeared between lock and unlock.  We have to check anon_vma
first or call anon_vma_prepare() to be sure that it's here.  There are
only few users of these legacy helpers.  Let's get rid of them.

This patch fixes anon_vma lock imbalance in validate_mm().  Write lock
isn't required here, read lock is enough.

And reorders expand_downwards/expand_upwards: security_mmap_addr() and
wrapping-around check don't have to be under anon vma lock.

Link: https://lkml.kernel.org/r/CACT4Y+Y908EjM2z=706dv4rV6dWtxTLK9nFg9_7DhRMLppBo2g@mail.gmail.comSigned-off-by: default avatarKonstantin Khlebnikov <koct9i@gmail.com>
Reported-by: default avatarDmitry Vyukov <dvyukov@google.com>
Acked-by: default avatarKirill A. Shutemov <kirill.shutemov@linux.intel.com>
Cc: Andrea Arcangeli <aarcange@redhat.com>
Cc: <stable@vger.kernel.org>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent c95a5180
...@@ -109,20 +109,6 @@ static inline void put_anon_vma(struct anon_vma *anon_vma) ...@@ -109,20 +109,6 @@ static inline void put_anon_vma(struct anon_vma *anon_vma)
__put_anon_vma(anon_vma); __put_anon_vma(anon_vma);
} }
static inline void vma_lock_anon_vma(struct vm_area_struct *vma)
{
struct anon_vma *anon_vma = vma->anon_vma;
if (anon_vma)
down_write(&anon_vma->root->rwsem);
}
static inline void vma_unlock_anon_vma(struct vm_area_struct *vma)
{
struct anon_vma *anon_vma = vma->anon_vma;
if (anon_vma)
up_write(&anon_vma->root->rwsem);
}
static inline void anon_vma_lock_write(struct anon_vma *anon_vma) static inline void anon_vma_lock_write(struct anon_vma *anon_vma)
{ {
down_write(&anon_vma->root->rwsem); down_write(&anon_vma->root->rwsem);
......
...@@ -459,12 +459,16 @@ static void validate_mm(struct mm_struct *mm) ...@@ -459,12 +459,16 @@ static void validate_mm(struct mm_struct *mm)
struct vm_area_struct *vma = mm->mmap; struct vm_area_struct *vma = mm->mmap;
while (vma) { while (vma) {
struct anon_vma *anon_vma = vma->anon_vma;
struct anon_vma_chain *avc; struct anon_vma_chain *avc;
vma_lock_anon_vma(vma); if (anon_vma) {
list_for_each_entry(avc, &vma->anon_vma_chain, same_vma) anon_vma_lock_read(anon_vma);
anon_vma_interval_tree_verify(avc); list_for_each_entry(avc, &vma->anon_vma_chain, same_vma)
vma_unlock_anon_vma(vma); anon_vma_interval_tree_verify(avc);
anon_vma_unlock_read(anon_vma);
}
highest_address = vma->vm_end; highest_address = vma->vm_end;
vma = vma->vm_next; vma = vma->vm_next;
i++; i++;
...@@ -2145,32 +2149,27 @@ static int acct_stack_growth(struct vm_area_struct *vma, unsigned long size, uns ...@@ -2145,32 +2149,27 @@ static int acct_stack_growth(struct vm_area_struct *vma, unsigned long size, uns
int expand_upwards(struct vm_area_struct *vma, unsigned long address) int expand_upwards(struct vm_area_struct *vma, unsigned long address)
{ {
struct mm_struct *mm = vma->vm_mm; struct mm_struct *mm = vma->vm_mm;
int error; int error = 0;
if (!(vma->vm_flags & VM_GROWSUP)) if (!(vma->vm_flags & VM_GROWSUP))
return -EFAULT; return -EFAULT;
/* /* Guard against wrapping around to address 0. */
* We must make sure the anon_vma is allocated if (address < PAGE_ALIGN(address+4))
* so that the anon_vma locking is not a noop. address = PAGE_ALIGN(address+4);
*/ else
return -ENOMEM;
/* We must make sure the anon_vma is allocated. */
if (unlikely(anon_vma_prepare(vma))) if (unlikely(anon_vma_prepare(vma)))
return -ENOMEM; return -ENOMEM;
vma_lock_anon_vma(vma);
/* /*
* vma->vm_start/vm_end cannot change under us because the caller * vma->vm_start/vm_end cannot change under us because the caller
* is required to hold the mmap_sem in read mode. We need the * is required to hold the mmap_sem in read mode. We need the
* anon_vma lock to serialize against concurrent expand_stacks. * anon_vma lock to serialize against concurrent expand_stacks.
* Also guard against wrapping around to address 0.
*/ */
if (address < PAGE_ALIGN(address+4)) anon_vma_lock_write(vma->anon_vma);
address = PAGE_ALIGN(address+4);
else {
vma_unlock_anon_vma(vma);
return -ENOMEM;
}
error = 0;
/* Somebody else might have raced and expanded it already */ /* Somebody else might have raced and expanded it already */
if (address > vma->vm_end) { if (address > vma->vm_end) {
...@@ -2188,7 +2187,7 @@ int expand_upwards(struct vm_area_struct *vma, unsigned long address) ...@@ -2188,7 +2187,7 @@ int expand_upwards(struct vm_area_struct *vma, unsigned long address)
* updates, but we only hold a shared mmap_sem * updates, but we only hold a shared mmap_sem
* lock here, so we need to protect against * lock here, so we need to protect against
* concurrent vma expansions. * concurrent vma expansions.
* vma_lock_anon_vma() doesn't help here, as * anon_vma_lock_write() doesn't help here, as
* we don't guarantee that all growable vmas * we don't guarantee that all growable vmas
* in a mm share the same root anon vma. * in a mm share the same root anon vma.
* So, we reuse mm->page_table_lock to guard * So, we reuse mm->page_table_lock to guard
...@@ -2211,7 +2210,7 @@ int expand_upwards(struct vm_area_struct *vma, unsigned long address) ...@@ -2211,7 +2210,7 @@ int expand_upwards(struct vm_area_struct *vma, unsigned long address)
} }
} }
} }
vma_unlock_anon_vma(vma); anon_vma_unlock_write(vma->anon_vma);
khugepaged_enter_vma_merge(vma, vma->vm_flags); khugepaged_enter_vma_merge(vma, vma->vm_flags);
validate_mm(mm); validate_mm(mm);
return error; return error;
...@@ -2227,25 +2226,21 @@ int expand_downwards(struct vm_area_struct *vma, ...@@ -2227,25 +2226,21 @@ int expand_downwards(struct vm_area_struct *vma,
struct mm_struct *mm = vma->vm_mm; struct mm_struct *mm = vma->vm_mm;
int error; int error;
/*
* We must make sure the anon_vma is allocated
* so that the anon_vma locking is not a noop.
*/
if (unlikely(anon_vma_prepare(vma)))
return -ENOMEM;
address &= PAGE_MASK; address &= PAGE_MASK;
error = security_mmap_addr(address); error = security_mmap_addr(address);
if (error) if (error)
return error; return error;
vma_lock_anon_vma(vma); /* We must make sure the anon_vma is allocated. */
if (unlikely(anon_vma_prepare(vma)))
return -ENOMEM;
/* /*
* vma->vm_start/vm_end cannot change under us because the caller * vma->vm_start/vm_end cannot change under us because the caller
* is required to hold the mmap_sem in read mode. We need the * is required to hold the mmap_sem in read mode. We need the
* anon_vma lock to serialize against concurrent expand_stacks. * anon_vma lock to serialize against concurrent expand_stacks.
*/ */
anon_vma_lock_write(vma->anon_vma);
/* Somebody else might have raced and expanded it already */ /* Somebody else might have raced and expanded it already */
if (address < vma->vm_start) { if (address < vma->vm_start) {
...@@ -2263,7 +2258,7 @@ int expand_downwards(struct vm_area_struct *vma, ...@@ -2263,7 +2258,7 @@ int expand_downwards(struct vm_area_struct *vma,
* updates, but we only hold a shared mmap_sem * updates, but we only hold a shared mmap_sem
* lock here, so we need to protect against * lock here, so we need to protect against
* concurrent vma expansions. * concurrent vma expansions.
* vma_lock_anon_vma() doesn't help here, as * anon_vma_lock_write() doesn't help here, as
* we don't guarantee that all growable vmas * we don't guarantee that all growable vmas
* in a mm share the same root anon vma. * in a mm share the same root anon vma.
* So, we reuse mm->page_table_lock to guard * So, we reuse mm->page_table_lock to guard
...@@ -2284,7 +2279,7 @@ int expand_downwards(struct vm_area_struct *vma, ...@@ -2284,7 +2279,7 @@ int expand_downwards(struct vm_area_struct *vma,
} }
} }
} }
vma_unlock_anon_vma(vma); anon_vma_unlock_write(vma->anon_vma);
khugepaged_enter_vma_merge(vma, vma->vm_flags); khugepaged_enter_vma_merge(vma, vma->vm_flags);
validate_mm(mm); validate_mm(mm);
return error; return error;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment