Commit a5afe74c authored by Hugh Dickins's avatar Hugh Dickins Committed by Linus Torvalds

[PATCH] ptwalk: copy_page_range

Convert copy_page_range pagetable walkers to loops using p?d_addr_end.
Merge copy_swap_pte into copy_one_pte, make a few minor tidyups.
Signed-off-by: default avatarHugh Dickins <hugh@veritas.com>
Signed-off-by: default avatarAndrew Morton <akpm@osdl.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@osdl.org>
parent 9736ce3a
...@@ -260,20 +260,7 @@ pte_t fastcall * pte_alloc_kernel(struct mm_struct *mm, pmd_t *pmd, unsigned lon ...@@ -260,20 +260,7 @@ pte_t fastcall * pte_alloc_kernel(struct mm_struct *mm, pmd_t *pmd, unsigned lon
*/ */
static inline void static inline void
copy_swap_pte(struct mm_struct *dst_mm, struct mm_struct *src_mm, pte_t pte) copy_one_pte(struct mm_struct *dst_mm, struct mm_struct *src_mm,
{
if (pte_file(pte))
return;
swap_duplicate(pte_to_swp_entry(pte));
if (list_empty(&dst_mm->mmlist)) {
spin_lock(&mmlist_lock);
list_add(&dst_mm->mmlist, &src_mm->mmlist);
spin_unlock(&mmlist_lock);
}
}
static inline void
copy_one_pte(struct mm_struct *dst_mm, struct mm_struct *src_mm,
pte_t *dst_pte, pte_t *src_pte, unsigned long vm_flags, pte_t *dst_pte, pte_t *src_pte, unsigned long vm_flags,
unsigned long addr) unsigned long addr)
{ {
...@@ -281,12 +268,21 @@ copy_one_pte(struct mm_struct *dst_mm, struct mm_struct *src_mm, ...@@ -281,12 +268,21 @@ copy_one_pte(struct mm_struct *dst_mm, struct mm_struct *src_mm,
struct page *page; struct page *page;
unsigned long pfn; unsigned long pfn;
/* pte contains position in swap, so copy. */ /* pte contains position in swap or file, so copy. */
if (!pte_present(pte)) { if (unlikely(!pte_present(pte))) {
copy_swap_pte(dst_mm, src_mm, pte); if (!pte_file(pte)) {
swap_duplicate(pte_to_swp_entry(pte));
/* make sure dst_mm is on swapoff's mmlist. */
if (unlikely(list_empty(&dst_mm->mmlist))) {
spin_lock(&mmlist_lock);
list_add(&dst_mm->mmlist, &src_mm->mmlist);
spin_unlock(&mmlist_lock);
}
}
set_pte_at(dst_mm, addr, dst_pte, pte); set_pte_at(dst_mm, addr, dst_pte, pte);
return; return;
} }
pfn = pte_pfn(pte); pfn = pte_pfn(pte);
/* the pte points outside of valid memory, the /* the pte points outside of valid memory, the
* mapping is assumed to be good, meaningful * mapping is assumed to be good, meaningful
...@@ -326,25 +322,21 @@ copy_one_pte(struct mm_struct *dst_mm, struct mm_struct *src_mm, ...@@ -326,25 +322,21 @@ copy_one_pte(struct mm_struct *dst_mm, struct mm_struct *src_mm,
page_dup_rmap(page); page_dup_rmap(page);
} }
static int copy_pte_range(struct mm_struct *dst_mm, struct mm_struct *src_mm, static int copy_pte_range(struct mm_struct *dst_mm, struct mm_struct *src_mm,
pmd_t *dst_pmd, pmd_t *src_pmd, struct vm_area_struct *vma, pmd_t *dst_pmd, pmd_t *src_pmd, struct vm_area_struct *vma,
unsigned long addr, unsigned long end) unsigned long addr, unsigned long end)
{ {
pte_t *src_pte, *dst_pte; pte_t *src_pte, *dst_pte;
pte_t *s, *d;
unsigned long vm_flags = vma->vm_flags; unsigned long vm_flags = vma->vm_flags;
again: again:
d = dst_pte = pte_alloc_map(dst_mm, dst_pmd, addr); dst_pte = pte_alloc_map(dst_mm, dst_pmd, addr);
if (!dst_pte) if (!dst_pte)
return -ENOMEM; return -ENOMEM;
src_pte = pte_offset_map_nested(src_pmd, addr);
spin_lock(&src_mm->page_table_lock); spin_lock(&src_mm->page_table_lock);
s = src_pte = pte_offset_map_nested(src_pmd, addr); do {
for (; addr < end; s++, d++) {
if (!pte_none(*s))
copy_one_pte(dst_mm, src_mm, d, s, vm_flags, addr);
addr += PAGE_SIZE;
/* /*
* We are holding two locks at this point - either of them * We are holding two locks at this point - either of them
* could generate latencies in another task on another CPU. * could generate latencies in another task on another CPU.
...@@ -353,105 +345,86 @@ static int copy_pte_range(struct mm_struct *dst_mm, struct mm_struct *src_mm, ...@@ -353,105 +345,86 @@ static int copy_pte_range(struct mm_struct *dst_mm, struct mm_struct *src_mm,
need_lockbreak(&src_mm->page_table_lock) || need_lockbreak(&src_mm->page_table_lock) ||
need_lockbreak(&dst_mm->page_table_lock)) need_lockbreak(&dst_mm->page_table_lock))
break; break;
} if (pte_none(*src_pte))
pte_unmap_nested(src_pte); continue;
pte_unmap(dst_pte); copy_one_pte(dst_mm, src_mm, dst_pte, src_pte, vm_flags, addr);
} while (dst_pte++, src_pte++, addr += PAGE_SIZE, addr != end);
spin_unlock(&src_mm->page_table_lock); spin_unlock(&src_mm->page_table_lock);
pte_unmap_nested(src_pte - 1);
pte_unmap(dst_pte - 1);
cond_resched_lock(&dst_mm->page_table_lock); cond_resched_lock(&dst_mm->page_table_lock);
if (addr < end) if (addr != end)
goto again; goto again;
return 0; return 0;
} }
static int copy_pmd_range(struct mm_struct *dst_mm, struct mm_struct *src_mm, static int copy_pmd_range(struct mm_struct *dst_mm, struct mm_struct *src_mm,
pud_t *dst_pud, pud_t *src_pud, struct vm_area_struct *vma, pud_t *dst_pud, pud_t *src_pud, struct vm_area_struct *vma,
unsigned long addr, unsigned long end) unsigned long addr, unsigned long end)
{ {
pmd_t *src_pmd, *dst_pmd; pmd_t *src_pmd, *dst_pmd;
int err = 0;
unsigned long next; unsigned long next;
src_pmd = pmd_offset(src_pud, addr);
dst_pmd = pmd_alloc(dst_mm, dst_pud, addr); dst_pmd = pmd_alloc(dst_mm, dst_pud, addr);
if (!dst_pmd) if (!dst_pmd)
return -ENOMEM; return -ENOMEM;
src_pmd = pmd_offset(src_pud, addr);
for (; addr < end; addr = next, src_pmd++, dst_pmd++) { do {
next = (addr + PMD_SIZE) & PMD_MASK; next = pmd_addr_end(addr, end);
if (next > end || next <= addr)
next = end;
if (pmd_none_or_clear_bad(src_pmd)) if (pmd_none_or_clear_bad(src_pmd))
continue; continue;
err = copy_pte_range(dst_mm, src_mm, dst_pmd, src_pmd, if (copy_pte_range(dst_mm, src_mm, dst_pmd, src_pmd,
vma, addr, next); vma, addr, next))
if (err) return -ENOMEM;
break; } while (dst_pmd++, src_pmd++, addr = next, addr != end);
} return 0;
return err;
} }
static int copy_pud_range(struct mm_struct *dst_mm, struct mm_struct *src_mm, static int copy_pud_range(struct mm_struct *dst_mm, struct mm_struct *src_mm,
pgd_t *dst_pgd, pgd_t *src_pgd, struct vm_area_struct *vma, pgd_t *dst_pgd, pgd_t *src_pgd, struct vm_area_struct *vma,
unsigned long addr, unsigned long end) unsigned long addr, unsigned long end)
{ {
pud_t *src_pud, *dst_pud; pud_t *src_pud, *dst_pud;
int err = 0;
unsigned long next; unsigned long next;
src_pud = pud_offset(src_pgd, addr);
dst_pud = pud_alloc(dst_mm, dst_pgd, addr); dst_pud = pud_alloc(dst_mm, dst_pgd, addr);
if (!dst_pud) if (!dst_pud)
return -ENOMEM; return -ENOMEM;
src_pud = pud_offset(src_pgd, addr);
for (; addr < end; addr = next, src_pud++, dst_pud++) { do {
next = (addr + PUD_SIZE) & PUD_MASK; next = pud_addr_end(addr, end);
if (next > end || next <= addr)
next = end;
if (pud_none_or_clear_bad(src_pud)) if (pud_none_or_clear_bad(src_pud))
continue; continue;
err = copy_pmd_range(dst_mm, src_mm, dst_pud, src_pud, if (copy_pmd_range(dst_mm, src_mm, dst_pud, src_pud,
vma, addr, next); vma, addr, next))
if (err) return -ENOMEM;
break; } while (dst_pud++, src_pud++, addr = next, addr != end);
} return 0;
return err;
} }
int copy_page_range(struct mm_struct *dst, struct mm_struct *src, int copy_page_range(struct mm_struct *dst_mm, struct mm_struct *src_mm,
struct vm_area_struct *vma) struct vm_area_struct *vma)
{ {
pgd_t *src_pgd, *dst_pgd; pgd_t *src_pgd, *dst_pgd;
unsigned long addr, start, end, next; unsigned long next;
int err = 0; unsigned long addr = vma->vm_start;
unsigned long end = vma->vm_end;
if (is_vm_hugetlb_page(vma)) if (is_vm_hugetlb_page(vma))
return copy_hugetlb_page_range(dst, src, vma); return copy_hugetlb_page_range(dst_mm, src_mm, vma);
start = vma->vm_start;
src_pgd = pgd_offset(src, start);
dst_pgd = pgd_offset(dst, start);
end = vma->vm_end; dst_pgd = pgd_offset(dst_mm, addr);
addr = start; src_pgd = pgd_offset(src_mm, addr);
while (addr && (addr < end-1)) { do {
next = (addr + PGDIR_SIZE) & PGDIR_MASK; next = pgd_addr_end(addr, end);
if (next > end || next <= addr)
next = end;
if (pgd_none_or_clear_bad(src_pgd)) if (pgd_none_or_clear_bad(src_pgd))
goto next_pgd; continue;
err = copy_pud_range(dst, src, dst_pgd, src_pgd, if (copy_pud_range(dst_mm, src_mm, dst_pgd, src_pgd,
vma, addr, next); vma, addr, next))
if (err) return -ENOMEM;
break; } while (dst_pgd++, src_pgd++, addr = next, addr != end);
return 0;
next_pgd:
src_pgd++;
dst_pgd++;
addr = next;
}
return err;
} }
static void zap_pte_range(struct mmu_gather *tlb, pmd_t *pmd, static void zap_pte_range(struct mmu_gather *tlb, pmd_t *pmd,
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment