Commit 2f4612af authored by Davidlohr Bueso's avatar Davidlohr Bueso Committed by Linus Torvalds

mm,hugetlb: make unmap_ref_private() return void

This function always returns 1, thus no need to check return value in
hugetlb_cow().  By doing so, we can get rid of the unnecessary WARN_ON
call.  While this logic perhaps existed as a way of identifying future
unmap_ref_private() mishandling, reality is it serves no apparent
purpose.
Signed-off-by: default avatarDavidlohr Bueso <davidlohr@hp.com>
Cc: Aswin Chandramouleeswaran <aswin@hp.com>
Acked-by: default avatarDavid Rientjes <rientjes@google.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent eb39d618
...@@ -2754,7 +2754,7 @@ void unmap_hugepage_range(struct vm_area_struct *vma, unsigned long start, ...@@ -2754,7 +2754,7 @@ void unmap_hugepage_range(struct vm_area_struct *vma, unsigned long start,
* from other VMAs and let the children be SIGKILLed if they are faulting the * from other VMAs and let the children be SIGKILLed if they are faulting the
* same region. * same region.
*/ */
static int unmap_ref_private(struct mm_struct *mm, struct vm_area_struct *vma, static void unmap_ref_private(struct mm_struct *mm, struct vm_area_struct *vma,
struct page *page, unsigned long address) struct page *page, unsigned long address)
{ {
struct hstate *h = hstate_vma(vma); struct hstate *h = hstate_vma(vma);
...@@ -2794,8 +2794,6 @@ static int unmap_ref_private(struct mm_struct *mm, struct vm_area_struct *vma, ...@@ -2794,8 +2794,6 @@ static int unmap_ref_private(struct mm_struct *mm, struct vm_area_struct *vma,
address + huge_page_size(h), page); address + huge_page_size(h), page);
} }
mutex_unlock(&mapping->i_mmap_mutex); mutex_unlock(&mapping->i_mmap_mutex);
return 1;
} }
/* /*
...@@ -2857,7 +2855,7 @@ static int hugetlb_cow(struct mm_struct *mm, struct vm_area_struct *vma, ...@@ -2857,7 +2855,7 @@ static int hugetlb_cow(struct mm_struct *mm, struct vm_area_struct *vma,
*/ */
if (outside_reserve) { if (outside_reserve) {
BUG_ON(huge_pte_none(pte)); BUG_ON(huge_pte_none(pte));
if (unmap_ref_private(mm, vma, old_page, address)) { unmap_ref_private(mm, vma, old_page, address);
BUG_ON(huge_pte_none(pte)); BUG_ON(huge_pte_none(pte));
spin_lock(ptl); spin_lock(ptl);
ptep = huge_pte_offset(mm, address & huge_page_mask(h)); ptep = huge_pte_offset(mm, address & huge_page_mask(h));
...@@ -2870,8 +2868,6 @@ static int hugetlb_cow(struct mm_struct *mm, struct vm_area_struct *vma, ...@@ -2870,8 +2868,6 @@ static int hugetlb_cow(struct mm_struct *mm, struct vm_area_struct *vma,
*/ */
return 0; return 0;
} }
WARN_ON_ONCE(1);
}
/* Caller expects lock to be held */ /* Caller expects lock to be held */
spin_lock(ptl); spin_lock(ptl);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment