Commit 64a43fe2 authored by Hugh Dickins's avatar Hugh Dickins Committed by Linus Torvalds

[PATCH] ptwalk: sync_page_range

Convert filemap_sync pagetable walkers to loops using p?d_addr_end; use
similar loop to split filemap_sync into chunks.  Merge filemap_sync_pte
into sync_pte_range, cut filemap_ off the longer names, vma arg first.

There is no error from filemap_sync, nor is any use made of the flags:
if it should do something else for MS_INVALIDATE, reinstate it when that
is implemented.  Remove the redundant flush_tlb_range from afterwards:
as its comment noted, each dirty pte has already been flushed.
Signed-off-by: default avatarHugh Dickins <hugh@veritas.com>
Signed-off-by: default avatarAndrew Morton <akpm@osdl.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@osdl.org>
parent 5ba3dbb4
...@@ -21,155 +21,109 @@ ...@@ -21,155 +21,109 @@
* Called with mm->page_table_lock held to protect against other * Called with mm->page_table_lock held to protect against other
* threads/the swapper from ripping pte's out from under us. * threads/the swapper from ripping pte's out from under us.
*/ */
static int filemap_sync_pte(pte_t *ptep, struct vm_area_struct *vma,
unsigned long address, unsigned int flags)
{
pte_t pte = *ptep;
unsigned long pfn = pte_pfn(pte);
struct page *page;
if (pte_present(pte) && pfn_valid(pfn)) {
page = pfn_to_page(pfn);
if (!PageReserved(page) &&
(ptep_clear_flush_dirty(vma, address, ptep) ||
page_test_and_clear_dirty(page)))
set_page_dirty(page);
}
return 0;
}
static int filemap_sync_pte_range(pmd_t * pmd, static void sync_pte_range(struct vm_area_struct *vma, pmd_t *pmd,
unsigned long address, unsigned long end, unsigned long addr, unsigned long end)
struct vm_area_struct *vma, unsigned int flags)
{ {
pte_t *pte; pte_t *pte;
int error;
if (pmd_none_or_clear_bad(pmd)) if (pmd_none_or_clear_bad(pmd))
return 0; return;
pte = pte_offset_map(pmd, address); pte = pte_offset_map(pmd, addr);
if ((address & PMD_MASK) != (end & PMD_MASK))
end = (address & PMD_MASK) + PMD_SIZE;
error = 0;
do { do {
error |= filemap_sync_pte(pte, vma, address, flags); unsigned long pfn;
address += PAGE_SIZE; struct page *page;
pte++;
} while (address && (address < end));
pte_unmap(pte - 1); if (!pte_present(*pte))
continue;
pfn = pte_pfn(*pte);
if (!pfn_valid(pfn))
continue;
page = pfn_to_page(pfn);
if (PageReserved(page))
continue;
return error; if (ptep_clear_flush_dirty(vma, addr, pte) ||
page_test_and_clear_dirty(page))
set_page_dirty(page);
} while (pte++, addr += PAGE_SIZE, addr != end);
pte_unmap(pte - 1);
} }
static inline int filemap_sync_pmd_range(pud_t * pud, static inline void sync_pmd_range(struct vm_area_struct *vma, pud_t *pud,
unsigned long address, unsigned long end, unsigned long addr, unsigned long end)
struct vm_area_struct *vma, unsigned int flags)
{ {
pmd_t * pmd; pmd_t *pmd;
int error; unsigned long next;
if (pud_none_or_clear_bad(pud)) if (pud_none_or_clear_bad(pud))
return 0; return;
pmd = pmd_offset(pud, address); pmd = pmd_offset(pud, addr);
if ((address & PUD_MASK) != (end & PUD_MASK))
end = (address & PUD_MASK) + PUD_SIZE;
error = 0;
do { do {
error |= filemap_sync_pte_range(pmd, address, end, vma, flags); next = pmd_addr_end(addr, end);
address = (address + PMD_SIZE) & PMD_MASK; sync_pte_range(vma, pmd, addr, next);
pmd++; } while (pmd++, addr = next, addr != end);
} while (address && (address < end));
return error;
} }
static inline int filemap_sync_pud_range(pgd_t *pgd, static inline void sync_pud_range(struct vm_area_struct *vma, pgd_t *pgd,
unsigned long address, unsigned long end, unsigned long addr, unsigned long end)
struct vm_area_struct *vma, unsigned int flags)
{ {
pud_t *pud; pud_t *pud;
int error; unsigned long next;
if (pgd_none_or_clear_bad(pgd)) if (pgd_none_or_clear_bad(pgd))
return 0; return;
pud = pud_offset(pgd, address); pud = pud_offset(pgd, addr);
if ((address & PGDIR_MASK) != (end & PGDIR_MASK))
end = (address & PGDIR_MASK) + PGDIR_SIZE;
error = 0;
do { do {
error |= filemap_sync_pmd_range(pud, address, end, vma, flags); next = pud_addr_end(addr, end);
address = (address + PUD_SIZE) & PUD_MASK; sync_pmd_range(vma, pud, addr, next);
pud++; } while (pud++, addr = next, addr != end);
} while (address && (address < end));
return error;
} }
static int __filemap_sync(struct vm_area_struct *vma, unsigned long address, static void sync_page_range(struct vm_area_struct *vma,
size_t size, unsigned int flags) unsigned long addr, unsigned long end)
{ {
struct mm_struct *mm = vma->vm_mm;
pgd_t *pgd; pgd_t *pgd;
unsigned long end = address + size;
unsigned long next; unsigned long next;
int i;
int error = 0;
/* Aquire the lock early; it may be possible to avoid dropping
* and reaquiring it repeatedly.
*/
spin_lock(&vma->vm_mm->page_table_lock);
pgd = pgd_offset(vma->vm_mm, address);
flush_cache_range(vma, address, end);
/* For hugepages we can't go walking the page table normally, /* For hugepages we can't go walking the page table normally,
* but that's ok, hugetlbfs is memory based, so we don't need * but that's ok, hugetlbfs is memory based, so we don't need
* to do anything more on an msync() */ * to do anything more on an msync() */
if (is_vm_hugetlb_page(vma)) if (is_vm_hugetlb_page(vma))
goto out; return;
if (address >= end)
BUG();
for (i = pgd_index(address); i <= pgd_index(end-1); i++) {
next = (address + PGDIR_SIZE) & PGDIR_MASK;
if (next <= address || next > end)
next = end;
error |= filemap_sync_pud_range(pgd, address, next, vma, flags);
address = next;
pgd++;
}
/*
* Why flush ? filemap_sync_pte already flushed the tlbs with the
* dirty bits.
*/
flush_tlb_range(vma, end - size, end);
out:
spin_unlock(&vma->vm_mm->page_table_lock);
return error; BUG_ON(addr >= end);
pgd = pgd_offset(mm, addr);
flush_cache_range(vma, addr, end);
spin_lock(&mm->page_table_lock);
do {
next = pgd_addr_end(addr, end);
sync_pud_range(vma, pgd, addr, next);
} while (pgd++, addr = next, addr != end);
spin_unlock(&mm->page_table_lock);
} }
#ifdef CONFIG_PREEMPT #ifdef CONFIG_PREEMPT
static int filemap_sync(struct vm_area_struct *vma, unsigned long address, static void filemap_sync(struct vm_area_struct *vma,
size_t size, unsigned int flags) unsigned long addr, unsigned long end)
{ {
const size_t chunk = 64 * 1024; /* bytes */ const size_t chunk = 64 * 1024; /* bytes */
int error = 0; unsigned long next;
while (size) {
size_t sz = min(size, chunk);
error |= __filemap_sync(vma, address, sz, flags); do {
next = addr + chunk;
if (next > end || next < addr)
next = end;
sync_page_range(vma, addr, next);
cond_resched(); cond_resched();
address += sz; } while (addr = next, addr != end);
size -= sz;
}
return error;
} }
#else #else
static int filemap_sync(struct vm_area_struct *vma, unsigned long address, static void filemap_sync(struct vm_area_struct *vma,
size_t size, unsigned int flags) unsigned long addr, unsigned long end)
{ {
return __filemap_sync(vma, address, size, flags); sync_page_range(vma, addr, end);
} }
#endif #endif
...@@ -184,19 +138,19 @@ static int filemap_sync(struct vm_area_struct *vma, unsigned long address, ...@@ -184,19 +138,19 @@ static int filemap_sync(struct vm_area_struct *vma, unsigned long address,
* So my _not_ starting I/O in MS_ASYNC we provide complete flexibility to * So my _not_ starting I/O in MS_ASYNC we provide complete flexibility to
* applications. * applications.
*/ */
static int msync_interval(struct vm_area_struct * vma, static int msync_interval(struct vm_area_struct *vma,
unsigned long start, unsigned long end, int flags) unsigned long addr, unsigned long end, int flags)
{ {
int ret = 0; int ret = 0;
struct file * file = vma->vm_file; struct file *file = vma->vm_file;
if ((flags & MS_INVALIDATE) && (vma->vm_flags & VM_LOCKED)) if ((flags & MS_INVALIDATE) && (vma->vm_flags & VM_LOCKED))
return -EBUSY; return -EBUSY;
if (file && (vma->vm_flags & VM_SHARED)) { if (file && (vma->vm_flags & VM_SHARED)) {
ret = filemap_sync(vma, start, end-start, flags); filemap_sync(vma, addr, end);
if (!ret && (flags & MS_SYNC)) { if (flags & MS_SYNC) {
struct address_space *mapping = file->f_mapping; struct address_space *mapping = file->f_mapping;
int err; int err;
...@@ -221,7 +175,7 @@ static int msync_interval(struct vm_area_struct * vma, ...@@ -221,7 +175,7 @@ static int msync_interval(struct vm_area_struct * vma,
asmlinkage long sys_msync(unsigned long start, size_t len, int flags) asmlinkage long sys_msync(unsigned long start, size_t len, int flags)
{ {
unsigned long end; unsigned long end;
struct vm_area_struct * vma; struct vm_area_struct *vma;
int unmapped_error, error = -EINVAL; int unmapped_error, error = -EINVAL;
if (flags & MS_SYNC) if (flags & MS_SYNC)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment