Commit 7afadfdc authored by Zach Brown's avatar Zach Brown Committed by Linus Torvalds

[PATCH] invalidate range of pages after direct IO write

Presently we invalidate all of a file's pages when writing to any part of
that file with direct-IO.

After a direct IO write only invalidate the pages that the write intersected.
invalidate_inode_pages2_range(mapping, pgoff start, pgoff end) is added and
called from generic_file_direct_IO().

While we're in there, invalidate_inode_pages2() was calling
unmap_mapping_range() with the wrong convention in the single page case.
It was providing the byte offset of the final page rather than the length
of the hole being unmapped.  This is also fixed.

This was lightly tested with a 10k op fsx run with O_DIRECT on a 16MB file
in ext3 on a junky old IDE drive.  Totaling vmstat columns of blocks read
and written during the runs shows that read traffic drops significantly.
The run time seems to have gone down a little.

Two runs before the patch gave the following user/real/sys times and total
blocks in and out:

0m28.029s 0m20.093s 0m3.166s 16673 125107 
0m27.949s 0m20.068s 0m3.227s 18426 126094

and after the patch:

0m26.775s 0m19.996s 0m3.060s 3505 124982
0m26.856s 0m19.935s 0m3.052s 3505 125279

akpm:

- Don't look up more pages than we're going to use

- Don't test page->index until we've locked the page

- Check for the cursor wrapping at the end of the mapping.
Signed-off-by: default avatarZach Brown <zach.brown@oracle.com>
Signed-off-by: default avatarAndrew Morton <akpm@osdl.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@osdl.org>
parent 545a604c
......@@ -1353,6 +1353,8 @@ static inline void invalidate_remote_inode(struct inode *inode)
invalidate_inode_pages(inode->i_mapping);
}
extern int invalidate_inode_pages2(struct address_space *mapping);
extern int invalidate_inode_pages2_range(struct address_space *mapping,
pgoff_t start, pgoff_t end);
extern int write_inode_now(struct inode *, int);
extern int filemap_fdatawrite(struct address_space *);
extern int filemap_flush(struct address_space *);
......
......@@ -2283,7 +2283,10 @@ generic_file_direct_IO(int rw, struct kiocb *iocb, const struct iovec *iov,
retval = mapping->a_ops->direct_IO(rw, iocb, iov,
offset, nr_segs);
if (rw == WRITE && mapping->nrpages) {
int err = invalidate_inode_pages2(mapping);
pgoff_t end = (offset + iov_length(iov, nr_segs) - 1)
>> PAGE_CACHE_SHIFT;
int err = invalidate_inode_pages2_range(mapping,
offset >> PAGE_CACHE_SHIFT, end);
if (err)
retval = err;
}
......
......@@ -241,54 +241,62 @@ unsigned long invalidate_inode_pages(struct address_space *mapping)
EXPORT_SYMBOL(invalidate_inode_pages);
/**
* invalidate_inode_pages2 - remove all pages from an address_space
* invalidate_inode_pages2_range - remove range of pages from an address_space
* @mapping - the address_space
* @start: the page offset 'from' which to invalidate
* @end: the page offset 'to' which to invalidate (inclusive)
*
* Any pages which are found to be mapped into pagetables are unmapped prior to
* invalidation.
*
* Returns -EIO if any pages could not be invalidated.
*/
int invalidate_inode_pages2(struct address_space *mapping)
int invalidate_inode_pages2_range(struct address_space *mapping,
pgoff_t start, pgoff_t end)
{
struct pagevec pvec;
pgoff_t next = 0;
pgoff_t next;
int i;
int ret = 0;
int did_full_unmap = 0;
int did_range_unmap = 0;
int wrapped = 0;
pagevec_init(&pvec, 0);
while (!ret && pagevec_lookup(&pvec, mapping, next, PAGEVEC_SIZE)) {
next = start;
while (next <= end && !ret && !wrapped &&
pagevec_lookup(&pvec, mapping, next,
min(end - next, (pgoff_t)PAGEVEC_SIZE - 1) + 1)) {
for (i = 0; !ret && i < pagevec_count(&pvec); i++) {
struct page *page = pvec.pages[i];
int was_dirty;
lock_page(page);
if (page->mapping != mapping) { /* truncate race? */
if (page->mapping != mapping || page->index > end) {
unlock_page(page);
continue;
}
wait_on_page_writeback(page);
next = page->index + 1;
if (next == 0)
wrapped = 1;
while (page_mapped(page)) {
if (!did_full_unmap) {
if (!did_range_unmap) {
/*
* Zap the rest of the file in one hit.
* FIXME: invalidate_inode_pages2()
* should take start/end offsets.
*/
unmap_mapping_range(mapping,
page->index << PAGE_CACHE_SHIFT,
-1, 0);
did_full_unmap = 1;
page->index << PAGE_CACHE_SHIFT,
(end - page->index + 1)
<< PAGE_CACHE_SHIFT,
0);
did_range_unmap = 1;
} else {
/*
* Just zap this page
*/
unmap_mapping_range(mapping,
page->index << PAGE_CACHE_SHIFT,
(page->index << PAGE_CACHE_SHIFT)+1,
0);
PAGE_CACHE_SIZE, 0);
}
}
was_dirty = test_clear_page_dirty(page);
......@@ -304,4 +312,19 @@ int invalidate_inode_pages2(struct address_space *mapping)
}
return ret;
}
EXPORT_SYMBOL_GPL(invalidate_inode_pages2_range);
/**
* invalidate_inode_pages2 - remove all pages from an address_space
* @mapping - the address_space
*
* Any pages which are found to be mapped into pagetables are unmapped prior to
* invalidation.
*
* Returns -EIO if any pages could not be invalidated.
*/
int invalidate_inode_pages2(struct address_space *mapping)
{
return invalidate_inode_pages2_range(mapping, 0, -1);
}
EXPORT_SYMBOL_GPL(invalidate_inode_pages2);
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment