Commit 3ea89ee8 authored by Fengguang Wu's avatar Fengguang Wu Committed by Linus Torvalds

readahead: convert filemap invocations

Convert filemap reads to use on-demand readahead.

The new call scheme is to
- call readahead on non-cached page
- call readahead on look-ahead page
- update prev_index when finished with the read request
Signed-off-by: default avatarFengguang Wu <wfg@mail.ustc.edu.cn>
Cc: Steven Pratt <slpratt@austin.ibm.com>
Cc: Ram Pai <linuxram@us.ibm.com>
Cc: Rusty Russell <rusty@rustcorp.com.au>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent 122a21d1
......@@ -891,16 +891,21 @@ void do_generic_mapping_read(struct address_space *mapping,
unsigned long nr, ret;
cond_resched();
if (index == next_index)
next_index = page_cache_readahead(mapping, &ra, filp,
index, last_index - index);
find_page:
page = find_get_page(mapping, index);
if (unlikely(page == NULL)) {
handle_ra_miss(mapping, &ra, index);
if (!page) {
page_cache_readahead_ondemand(mapping,
&ra, filp, page,
index, last_index - index);
page = find_get_page(mapping, index);
if (unlikely(page == NULL))
goto no_cached_page;
}
if (PageReadahead(page)) {
page_cache_readahead_ondemand(mapping,
&ra, filp, page,
index, last_index - index);
}
if (!PageUptodate(page))
goto page_not_up_to_date;
page_ok:
......@@ -1051,6 +1056,7 @@ void do_generic_mapping_read(struct address_space *mapping,
out:
*_ra = ra;
_ra->prev_index = prev_index;
*ppos = ((loff_t) index << PAGE_CACHE_SHIFT) + offset;
if (cached_page)
......@@ -1332,27 +1338,31 @@ int filemap_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
if (VM_RandomReadHint(vma))
goto no_cached_page;
/*
* The readahead code wants to be told about each and every page
* so it can build and shrink its windows appropriately
*
* For sequential accesses, we use the generic readahead logic.
*/
if (VM_SequentialReadHint(vma))
page_cache_readahead(mapping, ra, file, vmf->pgoff, 1);
/*
* Do we have something in the page cache already?
*/
retry_find:
page = find_lock_page(mapping, vmf->pgoff);
if (!page) {
unsigned long ra_pages;
/*
* For sequential accesses, we use the generic readahead logic.
*/
if (VM_SequentialReadHint(vma)) {
handle_ra_miss(mapping, ra, vmf->pgoff);
if (!page) {
page_cache_readahead_ondemand(mapping, ra, file, page,
vmf->pgoff, 1);
page = find_lock_page(mapping, vmf->pgoff);
if (!page)
goto no_cached_page;
}
if (PageReadahead(page)) {
page_cache_readahead_ondemand(mapping, ra, file, page,
vmf->pgoff, 1);
}
}
if (!page) {
unsigned long ra_pages;
ra->mmap_miss++;
/*
......@@ -1405,6 +1415,7 @@ int filemap_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
* Found the page and have a reference on it.
*/
mark_page_accessed(page);
ra->prev_index = page->index;
vmf->page = page;
return ret | VM_FAULT_LOCKED;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment