Commit 4d9ebed4 authored by Andrew Morton's avatar Andrew Morton Committed by Linus Torvalds

[PATCH] copy_page_range: minor cleanup

 - Don't evaluate pfn_to_page(pte_pfn(pte)) twice.

 - adjust whitespace

 - rename inner variable `ptepage' to `page'.  It's more logical, and
   reduces collisions with the shared pagetable patch (which has to rename it
   anyway, because it adds a `ptepage' which really is "the page which holds
   ptes").
parent 96688ef6
...@@ -209,11 +209,12 @@ int copy_page_range(struct mm_struct *dst, struct mm_struct *src, ...@@ -209,11 +209,12 @@ int copy_page_range(struct mm_struct *dst, struct mm_struct *src,
pgd_t * src_pgd, * dst_pgd; pgd_t * src_pgd, * dst_pgd;
unsigned long address = vma->vm_start; unsigned long address = vma->vm_start;
unsigned long end = vma->vm_end; unsigned long end = vma->vm_end;
unsigned long cow = (vma->vm_flags & (VM_SHARED | VM_MAYWRITE)) == VM_MAYWRITE; unsigned long cow;
if (is_vm_hugetlb_page(vma)) if (is_vm_hugetlb_page(vma))
return copy_hugetlb_page_range(dst, src, vma); return copy_hugetlb_page_range(dst, src, vma);
cow = (vma->vm_flags & (VM_SHARED | VM_MAYWRITE)) == VM_MAYWRITE;
src_pgd = pgd_offset(src, address)-1; src_pgd = pgd_offset(src, address)-1;
dst_pgd = pgd_offset(dst, address)-1; dst_pgd = pgd_offset(dst, address)-1;
...@@ -250,7 +251,8 @@ skip_copy_pmd_range: address = (address + PGDIR_SIZE) & PGDIR_MASK; ...@@ -250,7 +251,8 @@ skip_copy_pmd_range: address = (address + PGDIR_SIZE) & PGDIR_MASK;
if (pmd_bad(*src_pmd)) { if (pmd_bad(*src_pmd)) {
pmd_ERROR(*src_pmd); pmd_ERROR(*src_pmd);
pmd_clear(src_pmd); pmd_clear(src_pmd);
skip_copy_pte_range: address = (address + PMD_SIZE) & PMD_MASK; skip_copy_pte_range:
address = (address + PMD_SIZE) & PMD_MASK;
if (address >= end) if (address >= end)
goto out; goto out;
goto cont_copy_pmd_range; goto cont_copy_pmd_range;
...@@ -263,7 +265,7 @@ skip_copy_pte_range: address = (address + PMD_SIZE) & PMD_MASK; ...@@ -263,7 +265,7 @@ skip_copy_pte_range: address = (address + PMD_SIZE) & PMD_MASK;
src_pte = pte_offset_map_nested(src_pmd, address); src_pte = pte_offset_map_nested(src_pmd, address);
do { do {
pte_t pte = *src_pte; pte_t pte = *src_pte;
struct page *ptepage; struct page *page;
unsigned long pfn; unsigned long pfn;
/* copy_one_pte */ /* copy_one_pte */
...@@ -276,30 +278,37 @@ skip_copy_pte_range: address = (address + PMD_SIZE) & PMD_MASK; ...@@ -276,30 +278,37 @@ skip_copy_pte_range: address = (address + PMD_SIZE) & PMD_MASK;
set_pte(dst_pte, pte); set_pte(dst_pte, pte);
goto cont_copy_pte_range_noset; goto cont_copy_pte_range_noset;
} }
ptepage = pte_page(pte);
pfn = pte_pfn(pte); pfn = pte_pfn(pte);
page = pfn_to_page(pfn);
if (!pfn_valid(pfn)) if (!pfn_valid(pfn))
goto cont_copy_pte_range; goto cont_copy_pte_range;
ptepage = pfn_to_page(pfn); if (PageReserved(page))
if (PageReserved(ptepage))
goto cont_copy_pte_range; goto cont_copy_pte_range;
/* If it's a COW mapping, write protect it both in the parent and the child */ /*
* If it's a COW mapping, write protect it both
* in the parent and the child
*/
if (cow) { if (cow) {
ptep_set_wrprotect(src_pte); ptep_set_wrprotect(src_pte);
pte = *src_pte; pte = *src_pte;
} }
/* If it's a shared mapping, mark it clean in the child */ /*
* If it's a shared mapping, mark it clean in
* the child
*/
if (vma->vm_flags & VM_SHARED) if (vma->vm_flags & VM_SHARED)
pte = pte_mkclean(pte); pte = pte_mkclean(pte);
pte = pte_mkold(pte); pte = pte_mkold(pte);
get_page(ptepage); get_page(page);
dst->rss++; dst->rss++;
cont_copy_pte_range: set_pte(dst_pte, pte); cont_copy_pte_range:
page_add_rmap(ptepage, dst_pte); set_pte(dst_pte, pte);
cont_copy_pte_range_noset: address += PAGE_SIZE; page_add_rmap(page, dst_pte);
cont_copy_pte_range_noset:
address += PAGE_SIZE;
if (address >= end) { if (address >= end) {
pte_unmap_nested(src_pte); pte_unmap_nested(src_pte);
pte_unmap(dst_pte); pte_unmap(dst_pte);
...@@ -312,7 +321,8 @@ cont_copy_pte_range_noset: address += PAGE_SIZE; ...@@ -312,7 +321,8 @@ cont_copy_pte_range_noset: address += PAGE_SIZE;
pte_unmap(dst_pte-1); pte_unmap(dst_pte-1);
spin_unlock(&src->page_table_lock); spin_unlock(&src->page_table_lock);
cont_copy_pmd_range: src_pmd++; cont_copy_pmd_range:
src_pmd++;
dst_pmd++; dst_pmd++;
} while ((unsigned long)src_pmd & PMD_TABLE_MASK); } while ((unsigned long)src_pmd & PMD_TABLE_MASK);
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment