Commit 3ea89ee8 authored by Fengguang Wu's avatar Fengguang Wu Committed by Linus Torvalds

readahead: convert filemap invocations

Convert filemap reads to use on-demand readahead.

The new call scheme is to
- call readahead on non-cached page
- call readahead on look-ahead page
- update prev_index when finished with the read request
Signed-off-by: default avatarFengguang Wu <wfg@mail.ustc.edu.cn>
Cc: Steven Pratt <slpratt@austin.ibm.com>
Cc: Ram Pai <linuxram@us.ibm.com>
Cc: Rusty Russell <rusty@rustcorp.com.au>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent 122a21d1
...@@ -891,15 +891,20 @@ void do_generic_mapping_read(struct address_space *mapping, ...@@ -891,15 +891,20 @@ void do_generic_mapping_read(struct address_space *mapping,
unsigned long nr, ret; unsigned long nr, ret;
cond_resched(); cond_resched();
if (index == next_index)
next_index = page_cache_readahead(mapping, &ra, filp,
index, last_index - index);
find_page: find_page:
page = find_get_page(mapping, index); page = find_get_page(mapping, index);
if (unlikely(page == NULL)) { if (!page) {
handle_ra_miss(mapping, &ra, index); page_cache_readahead_ondemand(mapping,
goto no_cached_page; &ra, filp, page,
index, last_index - index);
page = find_get_page(mapping, index);
if (unlikely(page == NULL))
goto no_cached_page;
}
if (PageReadahead(page)) {
page_cache_readahead_ondemand(mapping,
&ra, filp, page,
index, last_index - index);
} }
if (!PageUptodate(page)) if (!PageUptodate(page))
goto page_not_up_to_date; goto page_not_up_to_date;
...@@ -1051,6 +1056,7 @@ void do_generic_mapping_read(struct address_space *mapping, ...@@ -1051,6 +1056,7 @@ void do_generic_mapping_read(struct address_space *mapping,
out: out:
*_ra = ra; *_ra = ra;
_ra->prev_index = prev_index;
*ppos = ((loff_t) index << PAGE_CACHE_SHIFT) + offset; *ppos = ((loff_t) index << PAGE_CACHE_SHIFT) + offset;
if (cached_page) if (cached_page)
...@@ -1332,27 +1338,31 @@ int filemap_fault(struct vm_area_struct *vma, struct vm_fault *vmf) ...@@ -1332,27 +1338,31 @@ int filemap_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
if (VM_RandomReadHint(vma)) if (VM_RandomReadHint(vma))
goto no_cached_page; goto no_cached_page;
/*
* The readahead code wants to be told about each and every page
* so it can build and shrink its windows appropriately
*
* For sequential accesses, we use the generic readahead logic.
*/
if (VM_SequentialReadHint(vma))
page_cache_readahead(mapping, ra, file, vmf->pgoff, 1);
/* /*
* Do we have something in the page cache already? * Do we have something in the page cache already?
*/ */
retry_find: retry_find:
page = find_lock_page(mapping, vmf->pgoff); page = find_lock_page(mapping, vmf->pgoff);
/*
* For sequential accesses, we use the generic readahead logic.
*/
if (VM_SequentialReadHint(vma)) {
if (!page) {
page_cache_readahead_ondemand(mapping, ra, file, page,
vmf->pgoff, 1);
page = find_lock_page(mapping, vmf->pgoff);
if (!page)
goto no_cached_page;
}
if (PageReadahead(page)) {
page_cache_readahead_ondemand(mapping, ra, file, page,
vmf->pgoff, 1);
}
}
if (!page) { if (!page) {
unsigned long ra_pages; unsigned long ra_pages;
if (VM_SequentialReadHint(vma)) {
handle_ra_miss(mapping, ra, vmf->pgoff);
goto no_cached_page;
}
ra->mmap_miss++; ra->mmap_miss++;
/* /*
...@@ -1405,6 +1415,7 @@ int filemap_fault(struct vm_area_struct *vma, struct vm_fault *vmf) ...@@ -1405,6 +1415,7 @@ int filemap_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
* Found the page and have a reference on it. * Found the page and have a reference on it.
*/ */
mark_page_accessed(page); mark_page_accessed(page);
ra->prev_index = page->index;
vmf->page = page; vmf->page = page;
return ret | VM_FAULT_LOCKED; return ret | VM_FAULT_LOCKED;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment