Commit 6e8bb019 authored by Al Viro's avatar Al Viro

VM: make unmap_vmas() return void

same story - nobody uses it and it's been pointless since
"mm: Remove i_mmap_lock lockbreak" went in.
Signed-off-by: default avatarAl Viro <viro@zeniv.linux.org.uk>
parent 853f5e26
...@@ -895,7 +895,7 @@ int zap_vma_ptes(struct vm_area_struct *vma, unsigned long address, ...@@ -895,7 +895,7 @@ int zap_vma_ptes(struct vm_area_struct *vma, unsigned long address,
unsigned long size); unsigned long size);
void zap_page_range(struct vm_area_struct *vma, unsigned long address, void zap_page_range(struct vm_area_struct *vma, unsigned long address,
unsigned long size, struct zap_details *); unsigned long size, struct zap_details *);
unsigned long unmap_vmas(struct mmu_gather *tlb, void unmap_vmas(struct mmu_gather *tlb,
struct vm_area_struct *start_vma, unsigned long start_addr, struct vm_area_struct *start_vma, unsigned long start_addr,
unsigned long end_addr, unsigned long *nr_accounted, unsigned long end_addr, unsigned long *nr_accounted,
struct zap_details *); struct zap_details *);
......
...@@ -1316,8 +1316,6 @@ static void unmap_page_range(struct mmu_gather *tlb, ...@@ -1316,8 +1316,6 @@ static void unmap_page_range(struct mmu_gather *tlb,
* @nr_accounted: Place number of unmapped pages in vm-accountable vma's here * @nr_accounted: Place number of unmapped pages in vm-accountable vma's here
* @details: details of nonlinear truncation or shared cache invalidation * @details: details of nonlinear truncation or shared cache invalidation
* *
* Returns the end address of the unmapping (restart addr if interrupted).
*
* Unmap all pages in the vma list. * Unmap all pages in the vma list.
* *
* Only addresses between `start' and `end' will be unmapped. * Only addresses between `start' and `end' will be unmapped.
...@@ -1329,7 +1327,7 @@ static void unmap_page_range(struct mmu_gather *tlb, ...@@ -1329,7 +1327,7 @@ static void unmap_page_range(struct mmu_gather *tlb,
* ensure that any thus-far unmapped pages are flushed before unmap_vmas() * ensure that any thus-far unmapped pages are flushed before unmap_vmas()
* drops the lock and schedules. * drops the lock and schedules.
*/ */
unsigned long unmap_vmas(struct mmu_gather *tlb, void unmap_vmas(struct mmu_gather *tlb,
struct vm_area_struct *vma, unsigned long start_addr, struct vm_area_struct *vma, unsigned long start_addr,
unsigned long end_addr, unsigned long *nr_accounted, unsigned long end_addr, unsigned long *nr_accounted,
struct zap_details *details) struct zap_details *details)
...@@ -1372,11 +1370,9 @@ unsigned long unmap_vmas(struct mmu_gather *tlb, ...@@ -1372,11 +1370,9 @@ unsigned long unmap_vmas(struct mmu_gather *tlb,
} else } else
unmap_page_range(tlb, vma, start, end, details); unmap_page_range(tlb, vma, start, end, details);
} }
start = end;
} }
mmu_notifier_invalidate_range_end(mm, start_addr, end_addr); mmu_notifier_invalidate_range_end(mm, start_addr, end_addr);
return start; /* which is now the end (or restart) address */
} }
/** /**
......
...@@ -2224,7 +2224,6 @@ void exit_mmap(struct mm_struct *mm) ...@@ -2224,7 +2224,6 @@ void exit_mmap(struct mm_struct *mm)
struct mmu_gather tlb; struct mmu_gather tlb;
struct vm_area_struct *vma; struct vm_area_struct *vma;
unsigned long nr_accounted = 0; unsigned long nr_accounted = 0;
unsigned long end;
/* mm's last user has gone, and its about to be pulled down */ /* mm's last user has gone, and its about to be pulled down */
mmu_notifier_release(mm); mmu_notifier_release(mm);
...@@ -2249,7 +2248,7 @@ void exit_mmap(struct mm_struct *mm) ...@@ -2249,7 +2248,7 @@ void exit_mmap(struct mm_struct *mm)
tlb_gather_mmu(&tlb, mm, 1); tlb_gather_mmu(&tlb, mm, 1);
/* update_hiwater_rss(mm) here? but nobody should be looking */ /* update_hiwater_rss(mm) here? but nobody should be looking */
/* Use -1 here to ensure all VMAs in the mm are unmapped */ /* Use -1 here to ensure all VMAs in the mm are unmapped */
end = unmap_vmas(&tlb, vma, 0, -1, &nr_accounted, NULL); unmap_vmas(&tlb, vma, 0, -1, &nr_accounted, NULL);
vm_unacct_memory(nr_accounted); vm_unacct_memory(nr_accounted);
free_pgtables(&tlb, vma, FIRST_USER_ADDRESS, 0); free_pgtables(&tlb, vma, FIRST_USER_ADDRESS, 0);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment