Commit d8983910 authored by Fengguang Wu's avatar Fengguang Wu Committed by Linus Torvalds

readahead: pass real splice size

Pass real splice size to page_cache_readahead_ondemand().

The splice code works in chunks of 16 pages internally.  The readahead code
should be told of the overall splice size, instead of the internal chunk size.
 Otherwize bad things may happen.  Imagine some 17-page random splice reads.
The code before this patch will result in two readahead calls: readahead(16);
readahead(1); That leads to one 16-page I/O and one 32-page I/O: one extra I/O
and 31 readahead miss pages.
Signed-off-by: default avatarFengguang Wu <wfg@mail.ustc.edu.cn>
Cc: Jens Axboe <jens.axboe@oracle.com>
Cc: Rusty Russell <rusty@rustcorp.com.au>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent 431a4820
...@@ -265,7 +265,7 @@ __generic_file_splice_read(struct file *in, loff_t *ppos, ...@@ -265,7 +265,7 @@ __generic_file_splice_read(struct file *in, loff_t *ppos,
unsigned int flags) unsigned int flags)
{ {
struct address_space *mapping = in->f_mapping; struct address_space *mapping = in->f_mapping;
unsigned int loff, nr_pages; unsigned int loff, nr_pages, req_pages;
struct page *pages[PIPE_BUFFERS]; struct page *pages[PIPE_BUFFERS];
struct partial_page partial[PIPE_BUFFERS]; struct partial_page partial[PIPE_BUFFERS];
struct page *page; struct page *page;
...@@ -281,10 +281,8 @@ __generic_file_splice_read(struct file *in, loff_t *ppos, ...@@ -281,10 +281,8 @@ __generic_file_splice_read(struct file *in, loff_t *ppos,
index = *ppos >> PAGE_CACHE_SHIFT; index = *ppos >> PAGE_CACHE_SHIFT;
loff = *ppos & ~PAGE_CACHE_MASK; loff = *ppos & ~PAGE_CACHE_MASK;
nr_pages = (len + loff + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT; req_pages = (len + loff + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
nr_pages = min(req_pages, (unsigned)PIPE_BUFFERS);
if (nr_pages > PIPE_BUFFERS)
nr_pages = PIPE_BUFFERS;
/* /*
* Lookup the (hopefully) full range of pages we need. * Lookup the (hopefully) full range of pages we need.
...@@ -298,7 +296,7 @@ __generic_file_splice_read(struct file *in, loff_t *ppos, ...@@ -298,7 +296,7 @@ __generic_file_splice_read(struct file *in, loff_t *ppos,
*/ */
if (spd.nr_pages < nr_pages) if (spd.nr_pages < nr_pages)
page_cache_readahead_ondemand(mapping, &in->f_ra, in, page_cache_readahead_ondemand(mapping, &in->f_ra, in,
NULL, index, nr_pages - spd.nr_pages); NULL, index, req_pages - spd.nr_pages);
error = 0; error = 0;
while (spd.nr_pages < nr_pages) { while (spd.nr_pages < nr_pages) {
...@@ -355,7 +353,7 @@ __generic_file_splice_read(struct file *in, loff_t *ppos, ...@@ -355,7 +353,7 @@ __generic_file_splice_read(struct file *in, loff_t *ppos,
if (PageReadahead(page)) if (PageReadahead(page))
page_cache_readahead_ondemand(mapping, &in->f_ra, in, page_cache_readahead_ondemand(mapping, &in->f_ra, in,
page, index, nr_pages - page_nr); page, index, req_pages - page_nr);
/* /*
* If the page isn't uptodate, we may need to start io on it * If the page isn't uptodate, we may need to start io on it
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment