Commit ecf1385d authored by Kirill A. Shutemov's avatar Kirill A. Shutemov Committed by Linus Torvalds

mm: drop unused argument of zap_page_range()

There's no users of zap_page_range() who wants non-NULL 'details'.
Let's drop it.

Link: http://lkml.kernel.org/r/20170118122429.43661-3-kirill.shutemov@linux.intel.comSigned-off-by: default avatarKirill A. Shutemov <kirill.shutemov@linux.intel.com>
Acked-by: default avatarMichal Hocko <mhocko@suse.com>
Cc: Tetsuo Handa <penguin-kernel@I-love.SAKURA.ne.jp>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Rik van Riel <riel@redhat.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent 3e8715fd
...@@ -687,7 +687,7 @@ void gmap_discard(struct gmap *gmap, unsigned long from, unsigned long to) ...@@ -687,7 +687,7 @@ void gmap_discard(struct gmap *gmap, unsigned long from, unsigned long to)
/* Find vma in the parent mm */ /* Find vma in the parent mm */
vma = find_vma(gmap->mm, vmaddr); vma = find_vma(gmap->mm, vmaddr);
size = min(to - gaddr, PMD_SIZE - (gaddr & ~PMD_MASK)); size = min(to - gaddr, PMD_SIZE - (gaddr & ~PMD_MASK));
zap_page_range(vma, vmaddr, size, NULL); zap_page_range(vma, vmaddr, size);
} }
up_read(&gmap->mm->mmap_sem); up_read(&gmap->mm->mmap_sem);
} }
......
...@@ -796,7 +796,7 @@ static noinline int zap_bt_entries_mapping(struct mm_struct *mm, ...@@ -796,7 +796,7 @@ static noinline int zap_bt_entries_mapping(struct mm_struct *mm,
return -EINVAL; return -EINVAL;
len = min(vma->vm_end, end) - addr; len = min(vma->vm_end, end) - addr;
zap_page_range(vma, addr, len, NULL); zap_page_range(vma, addr, len);
trace_mpx_unmap_zap(addr, addr+len); trace_mpx_unmap_zap(addr, addr+len);
vma = vma->vm_next; vma = vma->vm_next;
......
...@@ -657,7 +657,7 @@ static int binder_update_page_range(struct binder_proc *proc, int allocate, ...@@ -657,7 +657,7 @@ static int binder_update_page_range(struct binder_proc *proc, int allocate,
page = &proc->pages[(page_addr - proc->buffer) / PAGE_SIZE]; page = &proc->pages[(page_addr - proc->buffer) / PAGE_SIZE];
if (vma) if (vma)
zap_page_range(vma, (uintptr_t)page_addr + zap_page_range(vma, (uintptr_t)page_addr +
proc->user_buffer_offset, PAGE_SIZE, NULL); proc->user_buffer_offset, PAGE_SIZE);
err_vm_insert_page_failed: err_vm_insert_page_failed:
unmap_kernel_range((unsigned long)page_addr, PAGE_SIZE); unmap_kernel_range((unsigned long)page_addr, PAGE_SIZE);
err_map_kernel_failed: err_map_kernel_failed:
......
...@@ -865,8 +865,7 @@ static void ion_buffer_sync_for_device(struct ion_buffer *buffer, ...@@ -865,8 +865,7 @@ static void ion_buffer_sync_for_device(struct ion_buffer *buffer,
list_for_each_entry(vma_list, &buffer->vmas, list) { list_for_each_entry(vma_list, &buffer->vmas, list) {
struct vm_area_struct *vma = vma_list->vma; struct vm_area_struct *vma = vma_list->vma;
zap_page_range(vma, vma->vm_start, vma->vm_end - vma->vm_start, zap_page_range(vma, vma->vm_start, vma->vm_end - vma->vm_start);
NULL);
} }
mutex_unlock(&buffer->lock); mutex_unlock(&buffer->lock);
} }
......
...@@ -1185,7 +1185,7 @@ struct page *vm_normal_page_pmd(struct vm_area_struct *vma, unsigned long addr, ...@@ -1185,7 +1185,7 @@ struct page *vm_normal_page_pmd(struct vm_area_struct *vma, unsigned long addr,
int zap_vma_ptes(struct vm_area_struct *vma, unsigned long address, int zap_vma_ptes(struct vm_area_struct *vma, unsigned long address,
unsigned long size); unsigned long size);
void zap_page_range(struct vm_area_struct *vma, unsigned long address, void zap_page_range(struct vm_area_struct *vma, unsigned long address,
unsigned long size, struct zap_details *); unsigned long size);
void unmap_vmas(struct mmu_gather *tlb, struct vm_area_struct *start_vma, void unmap_vmas(struct mmu_gather *tlb, struct vm_area_struct *start_vma,
unsigned long start, unsigned long end); unsigned long start, unsigned long end);
......
...@@ -478,7 +478,7 @@ static long madvise_dontneed(struct vm_area_struct *vma, ...@@ -478,7 +478,7 @@ static long madvise_dontneed(struct vm_area_struct *vma,
return -EINVAL; return -EINVAL;
madvise_userfault_dontneed(vma, prev, start, end); madvise_userfault_dontneed(vma, prev, start, end);
zap_page_range(vma, start, end - start, NULL); zap_page_range(vma, start, end - start);
return 0; return 0;
} }
......
...@@ -1370,12 +1370,11 @@ void unmap_vmas(struct mmu_gather *tlb, ...@@ -1370,12 +1370,11 @@ void unmap_vmas(struct mmu_gather *tlb,
* @vma: vm_area_struct holding the applicable pages * @vma: vm_area_struct holding the applicable pages
* @start: starting address of pages to zap * @start: starting address of pages to zap
* @size: number of bytes to zap * @size: number of bytes to zap
* @details: details of shared cache invalidation
* *
* Caller must protect the VMA list * Caller must protect the VMA list
*/ */
void zap_page_range(struct vm_area_struct *vma, unsigned long start, void zap_page_range(struct vm_area_struct *vma, unsigned long start,
unsigned long size, struct zap_details *details) unsigned long size)
{ {
struct mm_struct *mm = vma->vm_mm; struct mm_struct *mm = vma->vm_mm;
struct mmu_gather tlb; struct mmu_gather tlb;
...@@ -1386,7 +1385,7 @@ void zap_page_range(struct vm_area_struct *vma, unsigned long start, ...@@ -1386,7 +1385,7 @@ void zap_page_range(struct vm_area_struct *vma, unsigned long start,
update_hiwater_rss(mm); update_hiwater_rss(mm);
mmu_notifier_invalidate_range_start(mm, start, end); mmu_notifier_invalidate_range_start(mm, start, end);
for ( ; vma && vma->vm_start < end; vma = vma->vm_next) for ( ; vma && vma->vm_start < end; vma = vma->vm_next)
unmap_single_vma(&tlb, vma, start, end, details); unmap_single_vma(&tlb, vma, start, end, NULL);
mmu_notifier_invalidate_range_end(mm, start, end); mmu_notifier_invalidate_range_end(mm, start, end);
tlb_finish_mmu(&tlb, start, end); tlb_finish_mmu(&tlb, start, end);
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment