Commit 569f48b8 authored by Hillf Danton's avatar Hillf Danton Committed by Linus Torvalds

mm: hugetlb: fix __unmap_hugepage_range()

First, after flushing TLB, we have no need to scan pte from start again.
Second, before bail out loop, the address is forwarded one step.
Signed-off-by: default avatarHillf Danton <hillf.zj@alibaba-inc.com>
Reviewed-by: default avatarMichal Hocko <mhocko@suse.cz>
Acked-by: default avatarDavid Rientjes <rientjes@google.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent e4bd6a02
...@@ -2638,8 +2638,9 @@ void __unmap_hugepage_range(struct mmu_gather *tlb, struct vm_area_struct *vma, ...@@ -2638,8 +2638,9 @@ void __unmap_hugepage_range(struct mmu_gather *tlb, struct vm_area_struct *vma,
tlb_start_vma(tlb, vma); tlb_start_vma(tlb, vma);
mmu_notifier_invalidate_range_start(mm, mmun_start, mmun_end); mmu_notifier_invalidate_range_start(mm, mmun_start, mmun_end);
address = start;
again: again:
for (address = start; address < end; address += sz) { for (; address < end; address += sz) {
ptep = huge_pte_offset(mm, address); ptep = huge_pte_offset(mm, address);
if (!ptep) if (!ptep)
continue; continue;
...@@ -2686,6 +2687,7 @@ void __unmap_hugepage_range(struct mmu_gather *tlb, struct vm_area_struct *vma, ...@@ -2686,6 +2687,7 @@ void __unmap_hugepage_range(struct mmu_gather *tlb, struct vm_area_struct *vma,
page_remove_rmap(page); page_remove_rmap(page);
force_flush = !__tlb_remove_page(tlb, page); force_flush = !__tlb_remove_page(tlb, page);
if (force_flush) { if (force_flush) {
address += sz;
spin_unlock(ptl); spin_unlock(ptl);
break; break;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment