Commit 62dc69da authored by Nick Piggin's avatar Nick Piggin Committed by Linus Torvalds

[PATCH] replace clear_page_tables with clear_page_range

Rename clear_page_tables to clear_page_range. clear_page_range takes byte
ranges, and aggressively frees page table pages. Maybe useful to control
page table memory consumption on 4-level architectures (and even 3 level
ones).

Possible downsides are:
- flush_tlb_pgtables gets called more often (only a problem for sparc64
  AFAIKS).

- the opportunistic "expand to fill PGDIR_SIZE hole" logic that ensures
  something actually gets done under the old system is still in place.
  This could sometimes make unmapping small regions more inefficient. There
  are some other solutions to look at if this is the case though.
Signed-off-by: default avatarNick Piggin <nickpiggin@yahoo.com.au>
Signed-off-by: default avatarLinus Torvalds <torvalds@osdl.org>
parent d537e007
...@@ -252,6 +252,6 @@ void pgd_free(pgd_t *pgd) ...@@ -252,6 +252,6 @@ void pgd_free(pgd_t *pgd)
if (PTRS_PER_PMD > 1) if (PTRS_PER_PMD > 1)
for (i = 0; i < USER_PTRS_PER_PGD; ++i) for (i = 0; i < USER_PTRS_PER_PGD; ++i)
kmem_cache_free(pmd_cache, (void *)__va(pgd_val(pgd[i])-1)); kmem_cache_free(pmd_cache, (void *)__va(pgd_val(pgd[i])-1));
/* in the non-PAE case, clear_page_tables() clears user pgd entries */ /* in the non-PAE case, clear_page_range() clears user pgd entries */
kmem_cache_free(pgd_cache, pgd); kmem_cache_free(pgd_cache, pgd);
} }
...@@ -187,7 +187,6 @@ void hugetlb_free_pgtables(struct mmu_gather *tlb, struct vm_area_struct *prev, ...@@ -187,7 +187,6 @@ void hugetlb_free_pgtables(struct mmu_gather *tlb, struct vm_area_struct *prev,
{ {
unsigned long first = start & HUGETLB_PGDIR_MASK; unsigned long first = start & HUGETLB_PGDIR_MASK;
unsigned long last = end + HUGETLB_PGDIR_SIZE - 1; unsigned long last = end + HUGETLB_PGDIR_SIZE - 1;
unsigned long start_index, end_index;
struct mm_struct *mm = tlb->mm; struct mm_struct *mm = tlb->mm;
if (!prev) { if (!prev) {
...@@ -212,23 +211,13 @@ void hugetlb_free_pgtables(struct mmu_gather *tlb, struct vm_area_struct *prev, ...@@ -212,23 +211,13 @@ void hugetlb_free_pgtables(struct mmu_gather *tlb, struct vm_area_struct *prev,
last = next->vm_start; last = next->vm_start;
} }
if (prev->vm_end > first) if (prev->vm_end > first)
first = prev->vm_end + HUGETLB_PGDIR_SIZE - 1; first = prev->vm_end;
break; break;
} }
no_mmaps: no_mmaps:
if (last < first) /* for arches with discontiguous pgd indices */ if (last < first) /* for arches with discontiguous pgd indices */
return; return;
/* clear_page_range(tlb, first, last);
* If the PGD bits are not consecutive in the virtual address, the
* old method of shifting the VA >> by PGDIR_SHIFT doesn't work.
*/
start_index = pgd_index(htlbpage_to_page(first));
end_index = pgd_index(htlbpage_to_page(last));
if (end_index > start_index) {
clear_page_tables(tlb, start_index, end_index - start_index);
}
} }
void unmap_hugepage_range(struct vm_area_struct *vma, unsigned long start, unsigned long end) void unmap_hugepage_range(struct vm_area_struct *vma, unsigned long start, unsigned long end)
......
...@@ -566,7 +566,7 @@ int unmap_vmas(struct mmu_gather **tlbp, struct mm_struct *mm, ...@@ -566,7 +566,7 @@ int unmap_vmas(struct mmu_gather **tlbp, struct mm_struct *mm,
struct vm_area_struct *start_vma, unsigned long start_addr, struct vm_area_struct *start_vma, unsigned long start_addr,
unsigned long end_addr, unsigned long *nr_accounted, unsigned long end_addr, unsigned long *nr_accounted,
struct zap_details *); struct zap_details *);
void clear_page_tables(struct mmu_gather *tlb, unsigned long first, int nr); void clear_page_range(struct mmu_gather *tlb, unsigned long addr, unsigned long end);
int copy_page_range(struct mm_struct *dst, struct mm_struct *src, int copy_page_range(struct mm_struct *dst, struct mm_struct *src,
struct vm_area_struct *vma); struct vm_area_struct *vma);
int zeromap_page_range(struct vm_area_struct *vma, unsigned long from, int zeromap_page_range(struct vm_area_struct *vma, unsigned long from,
......
...@@ -100,58 +100,76 @@ static inline void copy_cow_page(struct page * from, struct page * to, unsigned ...@@ -100,58 +100,76 @@ static inline void copy_cow_page(struct page * from, struct page * to, unsigned
* Note: this doesn't free the actual pages themselves. That * Note: this doesn't free the actual pages themselves. That
* has been handled earlier when unmapping all the memory regions. * has been handled earlier when unmapping all the memory regions.
*/ */
static inline void free_one_pmd(struct mmu_gather *tlb, pmd_t * dir) static inline void clear_pmd_range(struct mmu_gather *tlb, pmd_t *pmd, unsigned long start, unsigned long end)
{ {
struct page *page; struct page *page;
if (pmd_none(*dir)) if (pmd_none(*pmd))
return; return;
if (unlikely(pmd_bad(*dir))) { if (unlikely(pmd_bad(*pmd))) {
pmd_ERROR(*dir); pmd_ERROR(*pmd);
pmd_clear(dir); pmd_clear(pmd);
return; return;
} }
page = pmd_page(*dir); if (!(start & ~PMD_MASK) && !(end & ~PMD_MASK)) {
pmd_clear(dir); page = pmd_page(*pmd);
dec_page_state(nr_page_table_pages); pmd_clear(pmd);
tlb->mm->nr_ptes--; dec_page_state(nr_page_table_pages);
pte_free_tlb(tlb, page); tlb->mm->nr_ptes--;
pte_free_tlb(tlb, page);
}
} }
static inline void free_one_pgd(struct mmu_gather *tlb, pgd_t * dir) static inline void clear_pgd_range(struct mmu_gather *tlb, pgd_t *pgd, unsigned long start, unsigned long end)
{ {
int j; unsigned long addr = start, next;
pmd_t * pmd; pmd_t *pmd, *__pmd;
if (pgd_none(*dir)) if (pgd_none(*pgd))
return; return;
if (unlikely(pgd_bad(*dir))) { if (unlikely(pgd_bad(*pgd))) {
pgd_ERROR(*dir); pgd_ERROR(*pgd);
pgd_clear(dir); pgd_clear(pgd);
return; return;
} }
pmd = pmd_offset(dir, 0);
pgd_clear(dir); pmd = __pmd = pmd_offset(pgd, start);
for (j = 0; j < PTRS_PER_PMD ; j++) do {
free_one_pmd(tlb, pmd+j); next = (addr + PMD_SIZE) & PMD_MASK;
pmd_free_tlb(tlb, pmd); if (next > end || next <= addr)
next = end;
clear_pmd_range(tlb, pmd, addr, next);
pmd++;
addr = next;
} while (addr && (addr <= end - 1));
if (!(start & ~PGDIR_MASK) && !(end & ~PGDIR_MASK)) {
pgd_clear(pgd);
pmd_free_tlb(tlb, __pmd);
}
} }
/* /*
* This function clears all user-level page tables of a process - this * This function clears user-level page tables of a process.
* is needed by execve(), so that old pages aren't in the way.
* *
* Must be called with pagetable lock held. * Must be called with pagetable lock held.
*/ */
void clear_page_tables(struct mmu_gather *tlb, unsigned long first, int nr) void clear_page_range(struct mmu_gather *tlb, unsigned long start, unsigned long end)
{ {
pgd_t * page_dir = tlb->mm->pgd; unsigned long addr = start, next;
unsigned long i, nr = pgd_index(end + PGDIR_SIZE-1) - pgd_index(start);
pgd_t * pgd = pgd_offset(tlb->mm, start);
page_dir += first; for (i = 0; i < nr; i++) {
do { next = (addr + PGDIR_SIZE) & PGDIR_MASK;
free_one_pgd(tlb, page_dir); if (next > end || next <= addr)
page_dir++; next = end;
} while (--nr);
clear_pgd_range(tlb, pgd, addr, next);
pgd++;
addr = next;
}
} }
pte_t fastcall * pte_alloc_map(struct mm_struct *mm, pmd_t *pmd, unsigned long address) pte_t fastcall * pte_alloc_map(struct mm_struct *mm, pmd_t *pmd, unsigned long address)
......
...@@ -1474,7 +1474,6 @@ static void free_pgtables(struct mmu_gather *tlb, struct vm_area_struct *prev, ...@@ -1474,7 +1474,6 @@ static void free_pgtables(struct mmu_gather *tlb, struct vm_area_struct *prev,
{ {
unsigned long first = start & PGDIR_MASK; unsigned long first = start & PGDIR_MASK;
unsigned long last = end + PGDIR_SIZE - 1; unsigned long last = end + PGDIR_SIZE - 1;
unsigned long start_index, end_index;
struct mm_struct *mm = tlb->mm; struct mm_struct *mm = tlb->mm;
if (!prev) { if (!prev) {
...@@ -1499,23 +1498,18 @@ static void free_pgtables(struct mmu_gather *tlb, struct vm_area_struct *prev, ...@@ -1499,23 +1498,18 @@ static void free_pgtables(struct mmu_gather *tlb, struct vm_area_struct *prev,
last = next->vm_start; last = next->vm_start;
} }
if (prev->vm_end > first) if (prev->vm_end > first)
first = prev->vm_end + PGDIR_SIZE - 1; first = prev->vm_end;
break; break;
} }
no_mmaps: no_mmaps:
if (last < first) /* for arches with discontiguous pgd indices */ if (last < first) /* for arches with discontiguous pgd indices */
return; return;
/* if (first < FIRST_USER_PGD_NR * PGDIR_SIZE)
* If the PGD bits are not consecutive in the virtual address, the first = FIRST_USER_PGD_NR * PGDIR_SIZE;
* old method of shifting the VA >> by PGDIR_SHIFT doesn't work. /* No point trying to free anything if we're in the same pte page */
*/ if ((first & PMD_MASK) < (last & PMD_MASK)) {
start_index = pgd_index(first); clear_page_range(tlb, first, last);
if (start_index < FIRST_USER_PGD_NR) flush_tlb_pgtables(mm, first, last);
start_index = FIRST_USER_PGD_NR;
end_index = pgd_index(last);
if (end_index > start_index) {
clear_page_tables(tlb, start_index, end_index - start_index);
flush_tlb_pgtables(mm, first & PGDIR_MASK, last & PGDIR_MASK);
} }
} }
...@@ -1844,7 +1838,9 @@ void exit_mmap(struct mm_struct *mm) ...@@ -1844,7 +1838,9 @@ void exit_mmap(struct mm_struct *mm)
~0UL, &nr_accounted, NULL); ~0UL, &nr_accounted, NULL);
vm_unacct_memory(nr_accounted); vm_unacct_memory(nr_accounted);
BUG_ON(mm->map_count); /* This is just debugging */ BUG_ON(mm->map_count); /* This is just debugging */
clear_page_tables(tlb, FIRST_USER_PGD_NR, USER_PTRS_PER_PGD); clear_page_range(tlb, FIRST_USER_PGD_NR * PGDIR_SIZE,
(TASK_SIZE + PGDIR_SIZE - 1) & PGDIR_MASK);
tlb_finish_mmu(tlb, 0, MM_VM_SIZE(mm)); tlb_finish_mmu(tlb, 0, MM_VM_SIZE(mm));
vma = mm->mmap; vma = mm->mmap;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment