Commit d08b3851 authored by Peter Zijlstra's avatar Peter Zijlstra Committed by Linus Torvalds

[PATCH] mm: tracking shared dirty pages

Tracking of dirty pages in shared writeable mmap()s.

The idea is simple: write protect clean shared writeable pages, catch the
write-fault, make writeable and set dirty.  On page write-back clean all the
PTE dirty bits and write protect them once again.

The implementation is a tad harder, mainly because the default
backing_dev_info capabilities were too loosely maintained.  Hence it is not
enough to test the backing_dev_info for cap_account_dirty.

The current heuristic is as follows, a VMA is eligible when:
 - its shared writeable
    (vm_flags & (VM_WRITE|VM_SHARED)) == (VM_WRITE|VM_SHARED)
 - it is not a 'special' mapping
    (vm_flags & (VM_PFNMAP|VM_INSERTPAGE)) == 0
 - the backing_dev_info is cap_account_dirty
    mapping_cap_account_dirty(vma->vm_file->f_mapping)
 - f_op->mmap() didn't change the default page protection

Page from remap_pfn_range() are explicitly excluded because their COW
semantics are already horrid enough (see vm_normal_page() in do_wp_page()) and
because they don't have a backing store anyway.

mprotect() is taught about the new behaviour as well.  However it overrides
the last condition.

Cleaning the pages on write-back is done with page_mkclean() a new rmap call.
It can be called on any page, but is currently only implemented for mapped
pages, if the page is found the be of a VMA that accounts dirty pages it will
also wrprotect the PTE.

Finally, in fs/buffers.c:try_to_free_buffers(); remove clear_page_dirty() from
under ->private_lock.  This seems to be safe, since ->private_lock is used to
serialize access to the buffers, not the page itself.  This is needed because
clear_page_dirty() will call into page_mkclean() and would thereby violate
locking order.

[dhowells@redhat.com: Provide a page_mkclean() implementation for NOMMU]
Signed-off-by: default avatarPeter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Hugh Dickins <hugh@veritas.com>
Signed-off-by: default avatarDavid Howells <dhowells@redhat.com>
Signed-off-by: default avatarAndrew Morton <akpm@osdl.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@osdl.org>
parent 725d704e
...@@ -2987,6 +2987,7 @@ int try_to_free_buffers(struct page *page) ...@@ -2987,6 +2987,7 @@ int try_to_free_buffers(struct page *page)
spin_lock(&mapping->private_lock); spin_lock(&mapping->private_lock);
ret = drop_buffers(page, &buffers_to_free); ret = drop_buffers(page, &buffers_to_free);
spin_unlock(&mapping->private_lock);
if (ret) { if (ret) {
/* /*
* If the filesystem writes its buffers by hand (eg ext3) * If the filesystem writes its buffers by hand (eg ext3)
...@@ -2998,7 +2999,6 @@ int try_to_free_buffers(struct page *page) ...@@ -2998,7 +2999,6 @@ int try_to_free_buffers(struct page *page)
*/ */
clear_page_dirty(page); clear_page_dirty(page);
} }
spin_unlock(&mapping->private_lock);
out: out:
if (buffers_to_free) { if (buffers_to_free) {
struct buffer_head *bh = buffers_to_free; struct buffer_head *bh = buffers_to_free;
......
...@@ -15,6 +15,7 @@ ...@@ -15,6 +15,7 @@
#include <linux/fs.h> #include <linux/fs.h>
#include <linux/mutex.h> #include <linux/mutex.h>
#include <linux/debug_locks.h> #include <linux/debug_locks.h>
#include <linux/backing-dev.h>
struct mempolicy; struct mempolicy;
struct anon_vma; struct anon_vma;
...@@ -810,6 +811,39 @@ struct shrinker; ...@@ -810,6 +811,39 @@ struct shrinker;
extern struct shrinker *set_shrinker(int, shrinker_t); extern struct shrinker *set_shrinker(int, shrinker_t);
extern void remove_shrinker(struct shrinker *shrinker); extern void remove_shrinker(struct shrinker *shrinker);
/*
* Some shared mappigns will want the pages marked read-only
* to track write events. If so, we'll downgrade vm_page_prot
* to the private version (using protection_map[] without the
* VM_SHARED bit).
*/
static inline int vma_wants_writenotify(struct vm_area_struct *vma)
{
unsigned int vm_flags = vma->vm_flags;
/* If it was private or non-writable, the write bit is already clear */
if ((vm_flags & (VM_WRITE|VM_SHARED)) != ((VM_WRITE|VM_SHARED)))
return 0;
/* The backer wishes to know when pages are first written to? */
if (vma->vm_ops && vma->vm_ops->page_mkwrite)
return 1;
/* The open routine did something to the protections already? */
if (pgprot_val(vma->vm_page_prot) !=
pgprot_val(protection_map[vm_flags &
(VM_READ|VM_WRITE|VM_EXEC|VM_SHARED)]))
return 0;
/* Specialty mapping? */
if (vm_flags & (VM_PFNMAP|VM_INSERTPAGE))
return 0;
/* Can the mapping track the dirty pages? */
return vma->vm_file && vma->vm_file->f_mapping &&
mapping_cap_account_dirty(vma->vm_file->f_mapping);
}
extern pte_t *FASTCALL(get_locked_pte(struct mm_struct *mm, unsigned long addr, spinlock_t **ptl)); extern pte_t *FASTCALL(get_locked_pte(struct mm_struct *mm, unsigned long addr, spinlock_t **ptl));
int __pud_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long address); int __pud_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long address);
......
...@@ -103,6 +103,14 @@ pte_t *page_check_address(struct page *, struct mm_struct *, ...@@ -103,6 +103,14 @@ pte_t *page_check_address(struct page *, struct mm_struct *,
*/ */
unsigned long page_address_in_vma(struct page *, struct vm_area_struct *); unsigned long page_address_in_vma(struct page *, struct vm_area_struct *);
/*
* Cleans the PTEs of shared mappings.
* (and since clean PTEs should also be readonly, write protects them too)
*
* returns the number of cleaned PTEs.
*/
int page_mkclean(struct page *);
#else /* !CONFIG_MMU */ #else /* !CONFIG_MMU */
#define anon_vma_init() do {} while (0) #define anon_vma_init() do {} while (0)
...@@ -112,6 +120,12 @@ unsigned long page_address_in_vma(struct page *, struct vm_area_struct *); ...@@ -112,6 +120,12 @@ unsigned long page_address_in_vma(struct page *, struct vm_area_struct *);
#define page_referenced(page,l) TestClearPageReferenced(page) #define page_referenced(page,l) TestClearPageReferenced(page)
#define try_to_unmap(page, refs) SWAP_FAIL #define try_to_unmap(page, refs) SWAP_FAIL
static inline int page_mkclean(struct page *page)
{
return 0;
}
#endif /* CONFIG_MMU */ #endif /* CONFIG_MMU */
/* /*
......
...@@ -1458,14 +1458,19 @@ static int do_wp_page(struct mm_struct *mm, struct vm_area_struct *vma, ...@@ -1458,14 +1458,19 @@ static int do_wp_page(struct mm_struct *mm, struct vm_area_struct *vma,
{ {
struct page *old_page, *new_page; struct page *old_page, *new_page;
pte_t entry; pte_t entry;
int reuse, ret = VM_FAULT_MINOR; int reuse = 0, ret = VM_FAULT_MINOR;
struct page *dirty_page = NULL;
old_page = vm_normal_page(vma, address, orig_pte); old_page = vm_normal_page(vma, address, orig_pte);
if (!old_page) if (!old_page)
goto gotten; goto gotten;
if (unlikely((vma->vm_flags & (VM_SHARED|VM_WRITE)) == /*
(VM_SHARED|VM_WRITE))) { * Only catch write-faults on shared writable pages, read-only
* shared pages can get COWed by get_user_pages(.write=1, .force=1).
*/
if (unlikely((vma->vm_flags & (VM_WRITE|VM_SHARED)) ==
(VM_WRITE|VM_SHARED))) {
if (vma->vm_ops && vma->vm_ops->page_mkwrite) { if (vma->vm_ops && vma->vm_ops->page_mkwrite) {
/* /*
* Notify the address space that the page is about to * Notify the address space that the page is about to
...@@ -1494,13 +1499,12 @@ static int do_wp_page(struct mm_struct *mm, struct vm_area_struct *vma, ...@@ -1494,13 +1499,12 @@ static int do_wp_page(struct mm_struct *mm, struct vm_area_struct *vma,
if (!pte_same(*page_table, orig_pte)) if (!pte_same(*page_table, orig_pte))
goto unlock; goto unlock;
} }
dirty_page = old_page;
get_page(dirty_page);
reuse = 1; reuse = 1;
} else if (PageAnon(old_page) && !TestSetPageLocked(old_page)) { } else if (PageAnon(old_page) && !TestSetPageLocked(old_page)) {
reuse = can_share_swap_page(old_page); reuse = can_share_swap_page(old_page);
unlock_page(old_page); unlock_page(old_page);
} else {
reuse = 0;
} }
if (reuse) { if (reuse) {
...@@ -1566,6 +1570,10 @@ static int do_wp_page(struct mm_struct *mm, struct vm_area_struct *vma, ...@@ -1566,6 +1570,10 @@ static int do_wp_page(struct mm_struct *mm, struct vm_area_struct *vma,
page_cache_release(old_page); page_cache_release(old_page);
unlock: unlock:
pte_unmap_unlock(page_table, ptl); pte_unmap_unlock(page_table, ptl);
if (dirty_page) {
set_page_dirty(dirty_page);
put_page(dirty_page);
}
return ret; return ret;
oom: oom:
if (old_page) if (old_page)
...@@ -2098,6 +2106,7 @@ static int do_no_page(struct mm_struct *mm, struct vm_area_struct *vma, ...@@ -2098,6 +2106,7 @@ static int do_no_page(struct mm_struct *mm, struct vm_area_struct *vma,
unsigned int sequence = 0; unsigned int sequence = 0;
int ret = VM_FAULT_MINOR; int ret = VM_FAULT_MINOR;
int anon = 0; int anon = 0;
struct page *dirty_page = NULL;
pte_unmap(page_table); pte_unmap(page_table);
BUG_ON(vma->vm_flags & VM_PFNMAP); BUG_ON(vma->vm_flags & VM_PFNMAP);
...@@ -2192,6 +2201,10 @@ static int do_no_page(struct mm_struct *mm, struct vm_area_struct *vma, ...@@ -2192,6 +2201,10 @@ static int do_no_page(struct mm_struct *mm, struct vm_area_struct *vma,
} else { } else {
inc_mm_counter(mm, file_rss); inc_mm_counter(mm, file_rss);
page_add_file_rmap(new_page); page_add_file_rmap(new_page);
if (write_access) {
dirty_page = new_page;
get_page(dirty_page);
}
} }
} else { } else {
/* One of our sibling threads was faster, back out. */ /* One of our sibling threads was faster, back out. */
...@@ -2204,6 +2217,10 @@ static int do_no_page(struct mm_struct *mm, struct vm_area_struct *vma, ...@@ -2204,6 +2217,10 @@ static int do_no_page(struct mm_struct *mm, struct vm_area_struct *vma,
lazy_mmu_prot_update(entry); lazy_mmu_prot_update(entry);
unlock: unlock:
pte_unmap_unlock(page_table, ptl); pte_unmap_unlock(page_table, ptl);
if (dirty_page) {
set_page_dirty(dirty_page);
put_page(dirty_page);
}
return ret; return ret;
oom: oom:
page_cache_release(new_page); page_cache_release(new_page);
......
...@@ -1105,12 +1105,6 @@ unsigned long do_mmap_pgoff(struct file * file, unsigned long addr, ...@@ -1105,12 +1105,6 @@ unsigned long do_mmap_pgoff(struct file * file, unsigned long addr,
goto free_vma; goto free_vma;
} }
/* Don't make the VMA automatically writable if it's shared, but the
* backer wishes to know when pages are first written to */
if (vma->vm_ops && vma->vm_ops->page_mkwrite)
vma->vm_page_prot =
protection_map[vm_flags & (VM_READ|VM_WRITE|VM_EXEC)];
/* We set VM_ACCOUNT in a shared mapping's vm_flags, to inform /* We set VM_ACCOUNT in a shared mapping's vm_flags, to inform
* shmem_zero_setup (perhaps called through /dev/zero's ->mmap) * shmem_zero_setup (perhaps called through /dev/zero's ->mmap)
* that memory reservation must be checked; but that reservation * that memory reservation must be checked; but that reservation
...@@ -1128,6 +1122,10 @@ unsigned long do_mmap_pgoff(struct file * file, unsigned long addr, ...@@ -1128,6 +1122,10 @@ unsigned long do_mmap_pgoff(struct file * file, unsigned long addr,
pgoff = vma->vm_pgoff; pgoff = vma->vm_pgoff;
vm_flags = vma->vm_flags; vm_flags = vma->vm_flags;
if (vma_wants_writenotify(vma))
vma->vm_page_prot =
protection_map[vm_flags & (VM_READ|VM_WRITE|VM_EXEC)];
if (!file || !vma_merge(mm, prev, addr, vma->vm_end, if (!file || !vma_merge(mm, prev, addr, vma->vm_end,
vma->vm_flags, NULL, file, pgoff, vma_policy(vma))) { vma->vm_flags, NULL, file, pgoff, vma_policy(vma))) {
file = vma->vm_file; file = vma->vm_file;
......
...@@ -123,8 +123,6 @@ mprotect_fixup(struct vm_area_struct *vma, struct vm_area_struct **pprev, ...@@ -123,8 +123,6 @@ mprotect_fixup(struct vm_area_struct *vma, struct vm_area_struct **pprev,
unsigned long oldflags = vma->vm_flags; unsigned long oldflags = vma->vm_flags;
long nrpages = (end - start) >> PAGE_SHIFT; long nrpages = (end - start) >> PAGE_SHIFT;
unsigned long charged = 0; unsigned long charged = 0;
unsigned int mask;
pgprot_t newprot;
pgoff_t pgoff; pgoff_t pgoff;
int error; int error;
...@@ -176,24 +174,21 @@ mprotect_fixup(struct vm_area_struct *vma, struct vm_area_struct **pprev, ...@@ -176,24 +174,21 @@ mprotect_fixup(struct vm_area_struct *vma, struct vm_area_struct **pprev,
} }
success: success:
/* Don't make the VMA automatically writable if it's shared, but the
* backer wishes to know when pages are first written to */
mask = VM_READ|VM_WRITE|VM_EXEC|VM_SHARED;
if (vma->vm_ops && vma->vm_ops->page_mkwrite)
mask &= ~VM_SHARED;
newprot = protection_map[newflags & mask];
/* /*
* vm_flags and vm_page_prot are protected by the mmap_sem * vm_flags and vm_page_prot are protected by the mmap_sem
* held in write mode. * held in write mode.
*/ */
vma->vm_flags = newflags; vma->vm_flags = newflags;
vma->vm_page_prot = newprot; vma->vm_page_prot = protection_map[newflags &
(VM_READ|VM_WRITE|VM_EXEC|VM_SHARED)];
if (vma_wants_writenotify(vma))
vma->vm_page_prot = protection_map[newflags &
(VM_READ|VM_WRITE|VM_EXEC)];
if (is_vm_hugetlb_page(vma)) if (is_vm_hugetlb_page(vma))
hugetlb_change_protection(vma, start, end, newprot); hugetlb_change_protection(vma, start, end, vma->vm_page_prot);
else else
change_protection(vma, start, end, newprot); change_protection(vma, start, end, vma->vm_page_prot);
vm_stat_account(mm, oldflags, vma->vm_file, -nrpages); vm_stat_account(mm, oldflags, vma->vm_file, -nrpages);
vm_stat_account(mm, newflags, vma->vm_file, nrpages); vm_stat_account(mm, newflags, vma->vm_file, nrpages);
return 0; return 0;
......
...@@ -23,6 +23,7 @@ ...@@ -23,6 +23,7 @@
#include <linux/backing-dev.h> #include <linux/backing-dev.h>
#include <linux/blkdev.h> #include <linux/blkdev.h>
#include <linux/mpage.h> #include <linux/mpage.h>
#include <linux/rmap.h>
#include <linux/percpu.h> #include <linux/percpu.h>
#include <linux/notifier.h> #include <linux/notifier.h>
#include <linux/smp.h> #include <linux/smp.h>
...@@ -712,9 +713,15 @@ int test_clear_page_dirty(struct page *page) ...@@ -712,9 +713,15 @@ int test_clear_page_dirty(struct page *page)
radix_tree_tag_clear(&mapping->page_tree, radix_tree_tag_clear(&mapping->page_tree,
page_index(page), page_index(page),
PAGECACHE_TAG_DIRTY); PAGECACHE_TAG_DIRTY);
if (mapping_cap_account_dirty(mapping))
__dec_zone_page_state(page, NR_FILE_DIRTY);
write_unlock_irqrestore(&mapping->tree_lock, flags); write_unlock_irqrestore(&mapping->tree_lock, flags);
/*
* We can continue to use `mapping' here because the
* page is locked, which pins the address_space
*/
if (mapping_cap_account_dirty(mapping)) {
page_mkclean(page);
dec_zone_page_state(page, NR_FILE_DIRTY);
}
return 1; return 1;
} }
write_unlock_irqrestore(&mapping->tree_lock, flags); write_unlock_irqrestore(&mapping->tree_lock, flags);
...@@ -744,8 +751,10 @@ int clear_page_dirty_for_io(struct page *page) ...@@ -744,8 +751,10 @@ int clear_page_dirty_for_io(struct page *page)
if (mapping) { if (mapping) {
if (TestClearPageDirty(page)) { if (TestClearPageDirty(page)) {
if (mapping_cap_account_dirty(mapping)) if (mapping_cap_account_dirty(mapping)) {
page_mkclean(page);
dec_zone_page_state(page, NR_FILE_DIRTY); dec_zone_page_state(page, NR_FILE_DIRTY);
}
return 1; return 1;
} }
return 0; return 0;
......
...@@ -434,6 +434,71 @@ int page_referenced(struct page *page, int is_locked) ...@@ -434,6 +434,71 @@ int page_referenced(struct page *page, int is_locked)
return referenced; return referenced;
} }
static int page_mkclean_one(struct page *page, struct vm_area_struct *vma)
{
struct mm_struct *mm = vma->vm_mm;
unsigned long address;
pte_t *pte, entry;
spinlock_t *ptl;
int ret = 0;
address = vma_address(page, vma);
if (address == -EFAULT)
goto out;
pte = page_check_address(page, mm, address, &ptl);
if (!pte)
goto out;
if (!pte_dirty(*pte) && !pte_write(*pte))
goto unlock;
entry = ptep_get_and_clear(mm, address, pte);
entry = pte_mkclean(entry);
entry = pte_wrprotect(entry);
ptep_establish(vma, address, pte, entry);
lazy_mmu_prot_update(entry);
ret = 1;
unlock:
pte_unmap_unlock(pte, ptl);
out:
return ret;
}
static int page_mkclean_file(struct address_space *mapping, struct page *page)
{
pgoff_t pgoff = page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT);
struct vm_area_struct *vma;
struct prio_tree_iter iter;
int ret = 0;
BUG_ON(PageAnon(page));
spin_lock(&mapping->i_mmap_lock);
vma_prio_tree_foreach(vma, &iter, &mapping->i_mmap, pgoff, pgoff) {
if (vma->vm_flags & VM_SHARED)
ret += page_mkclean_one(page, vma);
}
spin_unlock(&mapping->i_mmap_lock);
return ret;
}
int page_mkclean(struct page *page)
{
int ret = 0;
BUG_ON(!PageLocked(page));
if (page_mapped(page)) {
struct address_space *mapping = page_mapping(page);
if (mapping)
ret = page_mkclean_file(mapping, page);
}
return ret;
}
/** /**
* page_set_anon_rmap - setup new anonymous rmap * page_set_anon_rmap - setup new anonymous rmap
* @page: the page to add the mapping to * @page: the page to add the mapping to
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment