Commit f9acc8c7 authored by Fengguang Wu's avatar Fengguang Wu Committed by Linus Torvalds

readahead: sanify file_ra_state names

Rename some file_ra_state variables and remove some accessors.

It results in much simpler code.
Kudos to Rusty!
Signed-off-by: default avatarFengguang Wu <wfg@mail.ustc.edu.cn>
Cc: Rusty Russell <rusty@rustcorp.com.au>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent cf914a7d
...@@ -695,16 +695,12 @@ struct fown_struct { ...@@ -695,16 +695,12 @@ struct fown_struct {
/* /*
* Track a single file's readahead state * Track a single file's readahead state
*
* ================#============|==================#==================|
* ^ ^ ^ ^
* file_ra_state.la_index .ra_index .lookahead_index .readahead_index
*/ */
struct file_ra_state { struct file_ra_state {
pgoff_t la_index; /* enqueue time */ pgoff_t start; /* where readahead started */
pgoff_t ra_index; /* begin offset */ unsigned long size; /* # of readahead pages */
pgoff_t lookahead_index; /* time to do next readahead */ unsigned long async_size; /* do asynchronous readahead when
pgoff_t readahead_index; /* end offset */ there are only # of pages ahead */
unsigned long ra_pages; /* Maximum readahead window */ unsigned long ra_pages; /* Maximum readahead window */
unsigned long mmap_hit; /* Cache hit stat for mmap accesses */ unsigned long mmap_hit; /* Cache hit stat for mmap accesses */
...@@ -713,60 +709,15 @@ struct file_ra_state { ...@@ -713,60 +709,15 @@ struct file_ra_state {
unsigned int prev_offset; /* Offset where last read() ended in a page */ unsigned int prev_offset; /* Offset where last read() ended in a page */
}; };
/*
* Measuring read-ahead sizes.
*
* |----------- readahead size ------------>|
* ===#============|==================#=====================|
* |------- invoke interval ------>|-- lookahead size -->|
*/
static inline unsigned long ra_readahead_size(struct file_ra_state *ra)
{
return ra->readahead_index - ra->ra_index;
}
static inline unsigned long ra_lookahead_size(struct file_ra_state *ra)
{
return ra->readahead_index - ra->lookahead_index;
}
static inline unsigned long ra_invoke_interval(struct file_ra_state *ra)
{
return ra->lookahead_index - ra->la_index;
}
/* /*
* Check if @index falls in the readahead windows. * Check if @index falls in the readahead windows.
*/ */
static inline int ra_has_index(struct file_ra_state *ra, pgoff_t index) static inline int ra_has_index(struct file_ra_state *ra, pgoff_t index)
{ {
return (index >= ra->la_index && return (index >= ra->start &&
index < ra->readahead_index); index < ra->start + ra->size);
}
/*
* Where is the old read-ahead and look-ahead?
*/
static inline void ra_set_index(struct file_ra_state *ra,
pgoff_t la_index, pgoff_t ra_index)
{
ra->la_index = la_index;
ra->ra_index = ra_index;
} }
/*
* Where is the new read-ahead and look-ahead?
*/
static inline void ra_set_size(struct file_ra_state *ra,
unsigned long ra_size, unsigned long la_size)
{
ra->readahead_index = ra->ra_index + ra_size;
ra->lookahead_index = ra->ra_index + ra_size - la_size;
}
unsigned long ra_submit(struct file_ra_state *ra,
struct address_space *mapping, struct file *filp);
struct file { struct file {
/* /*
* fu_list becomes invalid after file_free is called and queued via * fu_list becomes invalid after file_free is called and queued via
......
...@@ -253,21 +253,16 @@ unsigned long max_sane_readahead(unsigned long nr) ...@@ -253,21 +253,16 @@ unsigned long max_sane_readahead(unsigned long nr)
/* /*
* Submit IO for the read-ahead request in file_ra_state. * Submit IO for the read-ahead request in file_ra_state.
*/ */
unsigned long ra_submit(struct file_ra_state *ra, static unsigned long ra_submit(struct file_ra_state *ra,
struct address_space *mapping, struct file *filp) struct address_space *mapping, struct file *filp)
{ {
unsigned long ra_size;
unsigned long la_size;
int actual; int actual;
ra_size = ra_readahead_size(ra);
la_size = ra_lookahead_size(ra);
actual = __do_page_cache_readahead(mapping, filp, actual = __do_page_cache_readahead(mapping, filp,
ra->ra_index, ra_size, la_size); ra->start, ra->size, ra->async_size);
return actual; return actual;
} }
EXPORT_SYMBOL_GPL(ra_submit);
/* /*
* Set the initial window size, round to next power of 2 and square * Set the initial window size, round to next power of 2 and square
...@@ -296,7 +291,7 @@ static unsigned long get_init_ra_size(unsigned long size, unsigned long max) ...@@ -296,7 +291,7 @@ static unsigned long get_init_ra_size(unsigned long size, unsigned long max)
static unsigned long get_next_ra_size(struct file_ra_state *ra, static unsigned long get_next_ra_size(struct file_ra_state *ra,
unsigned long max) unsigned long max)
{ {
unsigned long cur = ra->readahead_index - ra->ra_index; unsigned long cur = ra->size;
unsigned long newsize; unsigned long newsize;
if (cur < max / 16) if (cur < max / 16)
...@@ -313,28 +308,21 @@ static unsigned long get_next_ra_size(struct file_ra_state *ra, ...@@ -313,28 +308,21 @@ static unsigned long get_next_ra_size(struct file_ra_state *ra,
* The fields in struct file_ra_state represent the most-recently-executed * The fields in struct file_ra_state represent the most-recently-executed
* readahead attempt: * readahead attempt:
* *
* |-------- last readahead window -------->| * |<----- async_size ---------|
* |-- application walking here -->| * |------------------- size -------------------->|
* ======#============|==================#=====================| * |==================#===========================|
* ^la_index ^ra_index ^lookahead_index ^readahead_index * ^start ^page marked with PG_readahead
*
* [ra_index, readahead_index) represents the last readahead window.
*
* [la_index, lookahead_index] is where the application would be walking(in
* the common case of cache-cold sequential reads): the last window was
* established when the application was at la_index, and the next window will
* be bring in when the application reaches lookahead_index.
* *
* To overlap application thinking time and disk I/O time, we do * To overlap application thinking time and disk I/O time, we do
* `readahead pipelining': Do not wait until the application consumed all * `readahead pipelining': Do not wait until the application consumed all
* readahead pages and stalled on the missing page at readahead_index; * readahead pages and stalled on the missing page at readahead_index;
* Instead, submit an asynchronous readahead I/O as early as the application * Instead, submit an asynchronous readahead I/O as soon as there are
* reads on the page at lookahead_index. Normally lookahead_index will be * only async_size pages left in the readahead window. Normally async_size
* equal to ra_index, for maximum pipelining. * will be equal to size, for maximum pipelining.
* *
* In interleaved sequential reads, concurrent streams on the same fd can * In interleaved sequential reads, concurrent streams on the same fd can
* be invalidating each other's readahead state. So we flag the new readahead * be invalidating each other's readahead state. So we flag the new readahead
* page at lookahead_index with PG_readahead, and use it as readahead * page at (start+size-async_size) with PG_readahead, and use it as readahead
* indicator. The flag won't be set on already cached pages, to avoid the * indicator. The flag won't be set on already cached pages, to avoid the
* readahead-for-nothing fuss, saving pointless page cache lookups. * readahead-for-nothing fuss, saving pointless page cache lookups.
* *
...@@ -363,24 +351,21 @@ ondemand_readahead(struct address_space *mapping, ...@@ -363,24 +351,21 @@ ondemand_readahead(struct address_space *mapping,
unsigned long req_size) unsigned long req_size)
{ {
unsigned long max; /* max readahead pages */ unsigned long max; /* max readahead pages */
pgoff_t ra_index; /* readahead index */
unsigned long ra_size; /* readahead size */
unsigned long la_size; /* lookahead size */
int sequential; int sequential;
max = ra->ra_pages; max = ra->ra_pages;
sequential = (offset - ra->prev_index <= 1UL) || (req_size > max); sequential = (offset - ra->prev_index <= 1UL) || (req_size > max);
/* /*
* Lookahead/readahead hit, assume sequential access. * It's the expected callback offset, assume sequential access.
* Ramp up sizes, and push forward the readahead window. * Ramp up sizes, and push forward the readahead window.
*/ */
if (offset && (offset == ra->lookahead_index || if (offset && (offset == (ra->start + ra->size - ra->async_size) ||
offset == ra->readahead_index)) { offset == (ra->start + ra->size))) {
ra_index = ra->readahead_index; ra->start += ra->size;
ra_size = get_next_ra_size(ra, max); ra->size = get_next_ra_size(ra, max);
la_size = ra_size; ra->async_size = ra->size;
goto fill_ra; goto readit;
} }
/* /*
...@@ -399,24 +384,21 @@ ondemand_readahead(struct address_space *mapping, ...@@ -399,24 +384,21 @@ ondemand_readahead(struct address_space *mapping,
* - oversize random read * - oversize random read
* Start readahead for it. * Start readahead for it.
*/ */
ra_index = offset; ra->start = offset;
ra_size = get_init_ra_size(req_size, max); ra->size = get_init_ra_size(req_size, max);
la_size = ra_size > req_size ? ra_size - req_size : ra_size; ra->async_size = ra->size > req_size ? ra->size - req_size : ra->size;
/* /*
* Hit on a lookahead page without valid readahead state. * Hit on a marked page without valid readahead state.
* E.g. interleaved reads. * E.g. interleaved reads.
* Not knowing its readahead pos/size, bet on the minimal possible one. * Not knowing its readahead pos/size, bet on the minimal possible one.
*/ */
if (hit_readahead_marker) { if (hit_readahead_marker) {
ra_index++; ra->start++;
ra_size = min(4 * ra_size, max); ra->size = get_next_ra_size(ra, max);
} }
fill_ra: readit:
ra_set_index(ra, offset, ra_index);
ra_set_size(ra, ra_size, la_size);
return ra_submit(ra, mapping, filp); return ra_submit(ra, mapping, filp);
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment