Commit 9736ce3a authored by Hugh Dickins's avatar Hugh Dickins Committed by Linus Torvalds

[PATCH] ptwalk: unmap_page_range

Convert unmap_page_range pagetable walkers to loops using p?d_addr_end.
Move blanking of irrelevant details up to unmap_page_range as Nick did.
Signed-off-by: default avatarHugh Dickins <hugh@veritas.com>
Signed-off-by: default avatarAndrew Morton <akpm@osdl.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@osdl.org>
parent b634975c
...@@ -454,29 +454,22 @@ int copy_page_range(struct mm_struct *dst, struct mm_struct *src, ...@@ -454,29 +454,22 @@ int copy_page_range(struct mm_struct *dst, struct mm_struct *src,
return err; return err;
} }
static void zap_pte_range(struct mmu_gather *tlb, static void zap_pte_range(struct mmu_gather *tlb, pmd_t *pmd,
pmd_t *pmd, unsigned long address, unsigned long addr, unsigned long end,
unsigned long size, struct zap_details *details) struct zap_details *details)
{ {
unsigned long offset; pte_t *pte;
pte_t *ptep;
if (pmd_none_or_clear_bad(pmd)) if (pmd_none_or_clear_bad(pmd))
return; return;
ptep = pte_offset_map(pmd, address); pte = pte_offset_map(pmd, addr);
offset = address & ~PMD_MASK; do {
if (offset + size > PMD_SIZE) pte_t ptent = *pte;
size = PMD_SIZE - offset; if (pte_none(ptent))
size &= PAGE_MASK;
if (details && !details->check_mapping && !details->nonlinear_vma)
details = NULL;
for (offset=0; offset < size; ptep++, offset += PAGE_SIZE) {
pte_t pte = *ptep;
if (pte_none(pte))
continue; continue;
if (pte_present(pte)) { if (pte_present(ptent)) {
struct page *page = NULL; struct page *page = NULL;
unsigned long pfn = pte_pfn(pte); unsigned long pfn = pte_pfn(ptent);
if (pfn_valid(pfn)) { if (pfn_valid(pfn)) {
page = pfn_to_page(pfn); page = pfn_to_page(pfn);
if (PageReserved(page)) if (PageReserved(page))
...@@ -500,20 +493,20 @@ static void zap_pte_range(struct mmu_gather *tlb, ...@@ -500,20 +493,20 @@ static void zap_pte_range(struct mmu_gather *tlb,
page->index > details->last_index)) page->index > details->last_index))
continue; continue;
} }
pte = ptep_get_and_clear(tlb->mm, address+offset, ptep); ptent = ptep_get_and_clear(tlb->mm, addr, pte);
tlb_remove_tlb_entry(tlb, ptep, address+offset); tlb_remove_tlb_entry(tlb, pte, addr);
if (unlikely(!page)) if (unlikely(!page))
continue; continue;
if (unlikely(details) && details->nonlinear_vma if (unlikely(details) && details->nonlinear_vma
&& linear_page_index(details->nonlinear_vma, && linear_page_index(details->nonlinear_vma,
address+offset) != page->index) addr) != page->index)
set_pte_at(tlb->mm, address+offset, set_pte_at(tlb->mm, addr, pte,
ptep, pgoff_to_pte(page->index)); pgoff_to_pte(page->index));
if (pte_dirty(pte)) if (pte_dirty(ptent))
set_page_dirty(page); set_page_dirty(page);
if (PageAnon(page)) if (PageAnon(page))
tlb->mm->anon_rss--; tlb->mm->anon_rss--;
else if (pte_young(pte)) else if (pte_young(ptent))
mark_page_accessed(page); mark_page_accessed(page);
tlb->freed++; tlb->freed++;
page_remove_rmap(page); page_remove_rmap(page);
...@@ -526,68 +519,62 @@ static void zap_pte_range(struct mmu_gather *tlb, ...@@ -526,68 +519,62 @@ static void zap_pte_range(struct mmu_gather *tlb,
*/ */
if (unlikely(details)) if (unlikely(details))
continue; continue;
if (!pte_file(pte)) if (!pte_file(ptent))
free_swap_and_cache(pte_to_swp_entry(pte)); free_swap_and_cache(pte_to_swp_entry(ptent));
pte_clear(tlb->mm, address+offset, ptep); pte_clear(tlb->mm, addr, pte);
} } while (pte++, addr += PAGE_SIZE, addr != end);
pte_unmap(ptep-1); pte_unmap(pte - 1);
} }
static void zap_pmd_range(struct mmu_gather *tlb, static void zap_pmd_range(struct mmu_gather *tlb, pud_t *pud,
pud_t *pud, unsigned long address, unsigned long addr, unsigned long end,
unsigned long size, struct zap_details *details) struct zap_details *details)
{ {
pmd_t * pmd; pmd_t *pmd;
unsigned long end; unsigned long next;
if (pud_none_or_clear_bad(pud)) if (pud_none_or_clear_bad(pud))
return; return;
pmd = pmd_offset(pud, address); pmd = pmd_offset(pud, addr);
end = address + size;
if (end > ((address + PUD_SIZE) & PUD_MASK))
end = ((address + PUD_SIZE) & PUD_MASK);
do { do {
zap_pte_range(tlb, pmd, address, end - address, details); next = pmd_addr_end(addr, end);
address = (address + PMD_SIZE) & PMD_MASK; zap_pte_range(tlb, pmd, addr, next, details);
pmd++; } while (pmd++, addr = next, addr != end);
} while (address && (address < end));
} }
static void zap_pud_range(struct mmu_gather *tlb, static void zap_pud_range(struct mmu_gather *tlb, pgd_t *pgd,
pgd_t * pgd, unsigned long address, unsigned long addr, unsigned long end,
unsigned long end, struct zap_details *details) struct zap_details *details)
{ {
pud_t * pud; pud_t *pud;
unsigned long next;
if (pgd_none_or_clear_bad(pgd)) if (pgd_none_or_clear_bad(pgd))
return; return;
pud = pud_offset(pgd, address); pud = pud_offset(pgd, addr);
do { do {
zap_pmd_range(tlb, pud, address, end - address, details); next = pud_addr_end(addr, end);
address = (address + PUD_SIZE) & PUD_MASK; zap_pmd_range(tlb, pud, addr, next, details);
pud++; } while (pud++, addr = next, addr != end);
} while (address && (address < end));
} }
static void unmap_page_range(struct mmu_gather *tlb, static void unmap_page_range(struct mmu_gather *tlb, struct vm_area_struct *vma,
struct vm_area_struct *vma, unsigned long address, unsigned long addr, unsigned long end,
unsigned long end, struct zap_details *details) struct zap_details *details)
{ {
unsigned long next;
pgd_t *pgd; pgd_t *pgd;
int i; unsigned long next;
if (details && !details->check_mapping && !details->nonlinear_vma)
details = NULL;
BUG_ON(address >= end); BUG_ON(addr >= end);
pgd = pgd_offset(vma->vm_mm, address);
tlb_start_vma(tlb, vma); tlb_start_vma(tlb, vma);
for (i = pgd_index(address); i <= pgd_index(end-1); i++) { pgd = pgd_offset(vma->vm_mm, addr);
next = (address + PGDIR_SIZE) & PGDIR_MASK; do {
if (next <= address || next > end) next = pgd_addr_end(addr, end);
next = end; zap_pud_range(tlb, pgd, addr, next, details);
zap_pud_range(tlb, pgd, address, next, details); } while (pgd++, addr = next, addr != end);
address = next;
pgd++;
}
tlb_end_vma(tlb, vma); tlb_end_vma(tlb, vma);
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment