Commit 51c6f666 authored by Robin Holt's avatar Robin Holt Committed by Linus Torvalds

[PATCH] mm: ZAP_BLOCK causes redundant work

The address based work estimate for unmapping (for lockbreak) is and always
was horribly inefficient for sparse mappings.  The problem is most simply
explained with an example:

If we find a pgd is clear, we still have to call into unmap_page_range
PGDIR_SIZE / ZAP_BLOCK_SIZE times, each time checking the clear pgd, in
order to progress the working address to the next pgd.

The fundamental way to solve the problem is to keep track of the end
address we've processed and pass it back to the higher layers.

From: Nick Piggin <npiggin@suse.de>

  Modification to completely get away from address based work estimate
  and instead use an abstract count, with a very small cost for empty
  entries as opposed to present pages.

  On 2.6.14-git2, ppc64, and CONFIG_PREEMPT=y, mapping and unmapping 1TB
  of virtual address space takes 1.69s; with the following patch applied,
  this operation can be done 1000 times in less than 0.01s

From: Andrew Morton <akpm@osdl.org>

With CONFIG_HUTETLB_PAGE=n:

mm/memory.c: In function `unmap_vmas':
mm/memory.c:779: warning: division by zero

Due to

			zap_work -= (end - start) /
					(HPAGE_SIZE / PAGE_SIZE);

So make the dummy HPAGE_SIZE non-zero
Signed-off-by: default avatarRobin Holt <holt@sgi.com>
Signed-off-by: default avatarNick Piggin <npiggin@suse.de>
Cc: Hugh Dickins <hugh@veritas.com>
Signed-off-by: default avatarAndrew Morton <akpm@osdl.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@osdl.org>
parent 885036d3
...@@ -102,8 +102,8 @@ static inline unsigned long hugetlb_total_pages(void) ...@@ -102,8 +102,8 @@ static inline unsigned long hugetlb_total_pages(void)
#define hugetlb_fault(mm, vma, addr, write) ({ BUG(); 0; }) #define hugetlb_fault(mm, vma, addr, write) ({ BUG(); 0; })
#ifndef HPAGE_MASK #ifndef HPAGE_MASK
#define HPAGE_MASK 0 /* Keep the compiler happy */ #define HPAGE_MASK PAGE_MASK /* Keep the compiler happy */
#define HPAGE_SIZE 0 #define HPAGE_SIZE PAGE_SIZE
#endif #endif
#endif /* !CONFIG_HUGETLB_PAGE */ #endif /* !CONFIG_HUGETLB_PAGE */
......
...@@ -549,10 +549,10 @@ int copy_page_range(struct mm_struct *dst_mm, struct mm_struct *src_mm, ...@@ -549,10 +549,10 @@ int copy_page_range(struct mm_struct *dst_mm, struct mm_struct *src_mm,
return 0; return 0;
} }
static void zap_pte_range(struct mmu_gather *tlb, static unsigned long zap_pte_range(struct mmu_gather *tlb,
struct vm_area_struct *vma, pmd_t *pmd, struct vm_area_struct *vma, pmd_t *pmd,
unsigned long addr, unsigned long end, unsigned long addr, unsigned long end,
struct zap_details *details) long *zap_work, struct zap_details *details)
{ {
struct mm_struct *mm = tlb->mm; struct mm_struct *mm = tlb->mm;
pte_t *pte; pte_t *pte;
...@@ -563,10 +563,15 @@ static void zap_pte_range(struct mmu_gather *tlb, ...@@ -563,10 +563,15 @@ static void zap_pte_range(struct mmu_gather *tlb,
pte = pte_offset_map_lock(mm, pmd, addr, &ptl); pte = pte_offset_map_lock(mm, pmd, addr, &ptl);
do { do {
pte_t ptent = *pte; pte_t ptent = *pte;
if (pte_none(ptent)) if (pte_none(ptent)) {
(*zap_work)--;
continue; continue;
}
if (pte_present(ptent)) { if (pte_present(ptent)) {
struct page *page = NULL; struct page *page = NULL;
(*zap_work) -= PAGE_SIZE;
if (!(vma->vm_flags & VM_RESERVED)) { if (!(vma->vm_flags & VM_RESERVED)) {
unsigned long pfn = pte_pfn(ptent); unsigned long pfn = pte_pfn(ptent);
if (unlikely(!pfn_valid(pfn))) if (unlikely(!pfn_valid(pfn)))
...@@ -624,16 +629,18 @@ static void zap_pte_range(struct mmu_gather *tlb, ...@@ -624,16 +629,18 @@ static void zap_pte_range(struct mmu_gather *tlb,
if (!pte_file(ptent)) if (!pte_file(ptent))
free_swap_and_cache(pte_to_swp_entry(ptent)); free_swap_and_cache(pte_to_swp_entry(ptent));
pte_clear_full(mm, addr, pte, tlb->fullmm); pte_clear_full(mm, addr, pte, tlb->fullmm);
} while (pte++, addr += PAGE_SIZE, addr != end); } while (pte++, addr += PAGE_SIZE, (addr != end && *zap_work > 0));
add_mm_rss(mm, file_rss, anon_rss); add_mm_rss(mm, file_rss, anon_rss);
pte_unmap_unlock(pte - 1, ptl); pte_unmap_unlock(pte - 1, ptl);
return addr;
} }
static inline void zap_pmd_range(struct mmu_gather *tlb, static inline unsigned long zap_pmd_range(struct mmu_gather *tlb,
struct vm_area_struct *vma, pud_t *pud, struct vm_area_struct *vma, pud_t *pud,
unsigned long addr, unsigned long end, unsigned long addr, unsigned long end,
struct zap_details *details) long *zap_work, struct zap_details *details)
{ {
pmd_t *pmd; pmd_t *pmd;
unsigned long next; unsigned long next;
...@@ -641,16 +648,21 @@ static inline void zap_pmd_range(struct mmu_gather *tlb, ...@@ -641,16 +648,21 @@ static inline void zap_pmd_range(struct mmu_gather *tlb,
pmd = pmd_offset(pud, addr); pmd = pmd_offset(pud, addr);
do { do {
next = pmd_addr_end(addr, end); next = pmd_addr_end(addr, end);
if (pmd_none_or_clear_bad(pmd)) if (pmd_none_or_clear_bad(pmd)) {
(*zap_work)--;
continue; continue;
zap_pte_range(tlb, vma, pmd, addr, next, details); }
} while (pmd++, addr = next, addr != end); next = zap_pte_range(tlb, vma, pmd, addr, next,
zap_work, details);
} while (pmd++, addr = next, (addr != end && *zap_work > 0));
return addr;
} }
static inline void zap_pud_range(struct mmu_gather *tlb, static inline unsigned long zap_pud_range(struct mmu_gather *tlb,
struct vm_area_struct *vma, pgd_t *pgd, struct vm_area_struct *vma, pgd_t *pgd,
unsigned long addr, unsigned long end, unsigned long addr, unsigned long end,
struct zap_details *details) long *zap_work, struct zap_details *details)
{ {
pud_t *pud; pud_t *pud;
unsigned long next; unsigned long next;
...@@ -658,15 +670,21 @@ static inline void zap_pud_range(struct mmu_gather *tlb, ...@@ -658,15 +670,21 @@ static inline void zap_pud_range(struct mmu_gather *tlb,
pud = pud_offset(pgd, addr); pud = pud_offset(pgd, addr);
do { do {
next = pud_addr_end(addr, end); next = pud_addr_end(addr, end);
if (pud_none_or_clear_bad(pud)) if (pud_none_or_clear_bad(pud)) {
(*zap_work)--;
continue; continue;
zap_pmd_range(tlb, vma, pud, addr, next, details); }
} while (pud++, addr = next, addr != end); next = zap_pmd_range(tlb, vma, pud, addr, next,
zap_work, details);
} while (pud++, addr = next, (addr != end && *zap_work > 0));
return addr;
} }
static void unmap_page_range(struct mmu_gather *tlb, struct vm_area_struct *vma, static unsigned long unmap_page_range(struct mmu_gather *tlb,
struct vm_area_struct *vma,
unsigned long addr, unsigned long end, unsigned long addr, unsigned long end,
struct zap_details *details) long *zap_work, struct zap_details *details)
{ {
pgd_t *pgd; pgd_t *pgd;
unsigned long next; unsigned long next;
...@@ -679,11 +697,16 @@ static void unmap_page_range(struct mmu_gather *tlb, struct vm_area_struct *vma, ...@@ -679,11 +697,16 @@ static void unmap_page_range(struct mmu_gather *tlb, struct vm_area_struct *vma,
pgd = pgd_offset(vma->vm_mm, addr); pgd = pgd_offset(vma->vm_mm, addr);
do { do {
next = pgd_addr_end(addr, end); next = pgd_addr_end(addr, end);
if (pgd_none_or_clear_bad(pgd)) if (pgd_none_or_clear_bad(pgd)) {
(*zap_work)--;
continue; continue;
zap_pud_range(tlb, vma, pgd, addr, next, details); }
} while (pgd++, addr = next, addr != end); next = zap_pud_range(tlb, vma, pgd, addr, next,
zap_work, details);
} while (pgd++, addr = next, (addr != end && *zap_work > 0));
tlb_end_vma(tlb, vma); tlb_end_vma(tlb, vma);
return addr;
} }
#ifdef CONFIG_PREEMPT #ifdef CONFIG_PREEMPT
...@@ -724,7 +747,7 @@ unsigned long unmap_vmas(struct mmu_gather **tlbp, ...@@ -724,7 +747,7 @@ unsigned long unmap_vmas(struct mmu_gather **tlbp,
unsigned long end_addr, unsigned long *nr_accounted, unsigned long end_addr, unsigned long *nr_accounted,
struct zap_details *details) struct zap_details *details)
{ {
unsigned long zap_bytes = ZAP_BLOCK_SIZE; long zap_work = ZAP_BLOCK_SIZE;
unsigned long tlb_start = 0; /* For tlb_finish_mmu */ unsigned long tlb_start = 0; /* For tlb_finish_mmu */
int tlb_start_valid = 0; int tlb_start_valid = 0;
unsigned long start = start_addr; unsigned long start = start_addr;
...@@ -745,27 +768,25 @@ unsigned long unmap_vmas(struct mmu_gather **tlbp, ...@@ -745,27 +768,25 @@ unsigned long unmap_vmas(struct mmu_gather **tlbp,
*nr_accounted += (end - start) >> PAGE_SHIFT; *nr_accounted += (end - start) >> PAGE_SHIFT;
while (start != end) { while (start != end) {
unsigned long block;
if (!tlb_start_valid) { if (!tlb_start_valid) {
tlb_start = start; tlb_start = start;
tlb_start_valid = 1; tlb_start_valid = 1;
} }
if (is_vm_hugetlb_page(vma)) { if (unlikely(is_vm_hugetlb_page(vma))) {
block = end - start;
unmap_hugepage_range(vma, start, end); unmap_hugepage_range(vma, start, end);
} else { zap_work -= (end - start) /
block = min(zap_bytes, end - start); (HPAGE_SIZE / PAGE_SIZE);
unmap_page_range(*tlbp, vma, start, start = end;
start + block, details); } else
start = unmap_page_range(*tlbp, vma,
start, end, &zap_work, details);
if (zap_work > 0) {
BUG_ON(start != end);
break;
} }
start += block;
zap_bytes -= block;
if ((long)zap_bytes > 0)
continue;
tlb_finish_mmu(*tlbp, tlb_start, start); tlb_finish_mmu(*tlbp, tlb_start, start);
if (need_resched() || if (need_resched() ||
...@@ -779,7 +800,7 @@ unsigned long unmap_vmas(struct mmu_gather **tlbp, ...@@ -779,7 +800,7 @@ unsigned long unmap_vmas(struct mmu_gather **tlbp,
*tlbp = tlb_gather_mmu(vma->vm_mm, fullmm); *tlbp = tlb_gather_mmu(vma->vm_mm, fullmm);
tlb_start_valid = 0; tlb_start_valid = 0;
zap_bytes = ZAP_BLOCK_SIZE; zap_work = ZAP_BLOCK_SIZE;
} }
} }
out: out:
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment