Commit bb4a340e authored by Rik van Riel's avatar Rik van Riel Committed by Linus Torvalds

mm: rename anon_vma_lock to vma_lock_anon_vma

Rename anon_vma_lock to vma_lock_anon_vma.  This matches the naming style
used in page_lock_anon_vma and will come in really handy further down in
this patch series.
Signed-off-by: default avatarRik van Riel <riel@redhat.com>
Acked-by: default avatarMel Gorman <mel@csn.ul.ie>
Acked-by: default avatarKAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com>
Tested-by: default avatarLarry Woodman <lwoodman@redhat.com>
Acked-by: default avatarLarry Woodman <lwoodman@redhat.com>
Reviewed-by: default avatarMinchan Kim <minchan.kim@gmail.com>
Acked-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent 597781f3
......@@ -99,14 +99,14 @@ static inline struct anon_vma *page_anon_vma(struct page *page)
return page_rmapping(page);
}
static inline void anon_vma_lock(struct vm_area_struct *vma)
static inline void vma_lock_anon_vma(struct vm_area_struct *vma)
{
struct anon_vma *anon_vma = vma->anon_vma;
if (anon_vma)
spin_lock(&anon_vma->lock);
}
static inline void anon_vma_unlock(struct vm_area_struct *vma)
static inline void vma_unlock_anon_vma(struct vm_area_struct *vma)
{
struct anon_vma *anon_vma = vma->anon_vma;
if (anon_vma)
......
......@@ -452,12 +452,12 @@ static void vma_link(struct mm_struct *mm, struct vm_area_struct *vma,
spin_lock(&mapping->i_mmap_lock);
vma->vm_truncate_count = mapping->truncate_count;
}
anon_vma_lock(vma);
vma_lock_anon_vma(vma);
__vma_link(mm, vma, prev, rb_link, rb_parent);
__vma_link_file(vma);
anon_vma_unlock(vma);
vma_unlock_anon_vma(vma);
if (mapping)
spin_unlock(&mapping->i_mmap_lock);
......@@ -1710,7 +1710,7 @@ int expand_upwards(struct vm_area_struct *vma, unsigned long address)
*/
if (unlikely(anon_vma_prepare(vma)))
return -ENOMEM;
anon_vma_lock(vma);
vma_lock_anon_vma(vma);
/*
* vma->vm_start/vm_end cannot change under us because the caller
......@@ -1721,7 +1721,7 @@ int expand_upwards(struct vm_area_struct *vma, unsigned long address)
if (address < PAGE_ALIGN(address+4))
address = PAGE_ALIGN(address+4);
else {
anon_vma_unlock(vma);
vma_unlock_anon_vma(vma);
return -ENOMEM;
}
error = 0;
......@@ -1739,7 +1739,7 @@ int expand_upwards(struct vm_area_struct *vma, unsigned long address)
perf_event_mmap(vma);
}
}
anon_vma_unlock(vma);
vma_unlock_anon_vma(vma);
return error;
}
#endif /* CONFIG_STACK_GROWSUP || CONFIG_IA64 */
......@@ -1764,7 +1764,7 @@ static int expand_downwards(struct vm_area_struct *vma,
if (error)
return error;
anon_vma_lock(vma);
vma_lock_anon_vma(vma);
/*
* vma->vm_start/vm_end cannot change under us because the caller
......@@ -1786,7 +1786,7 @@ static int expand_downwards(struct vm_area_struct *vma,
perf_event_mmap(vma);
}
}
anon_vma_unlock(vma);
vma_unlock_anon_vma(vma);
return error;
}
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment