mm/readahead: Convert page_cache_async_readahead to take a folio

Removes a couple of calls to compound_head and saves a few bytes.
Also convert verity's read_file_data_page() to be folio-based.
Signed-off-by: default avatarMatthew Wilcox (Oracle) <willy@infradead.org>
Reviewed-by: default avatarChristoph Hellwig <hch@lst.de>
parent 520f301c
......@@ -2967,8 +2967,9 @@ static int relocate_one_page(struct inode *inode, struct file_ra_state *ra,
goto release_page;
if (PageReadahead(page))
page_cache_async_readahead(inode->i_mapping, ra, NULL, page,
page_index, last_index + 1 - page_index);
page_cache_async_readahead(inode->i_mapping, ra, NULL,
page_folio(page), page_index,
last_index + 1 - page_index);
if (!PageUptodate(page)) {
btrfs_readpage(NULL, page);
......
......@@ -4986,7 +4986,8 @@ static int put_file_data(struct send_ctx *sctx, u64 offset, u32 len)
if (PageReadahead(page)) {
page_cache_async_readahead(inode->i_mapping, &sctx->ra,
NULL, page, index, last_index + 1 - index);
NULL, page_folio(page), index,
last_index + 1 - index);
}
if (!PageUptodate(page)) {
......
......@@ -18,27 +18,26 @@
* Read a file data page for Merkle tree construction. Do aggressive readahead,
* since we're sequentially reading the entire file.
*/
static struct page *read_file_data_page(struct file *filp, pgoff_t index,
static struct page *read_file_data_page(struct file *file, pgoff_t index,
struct file_ra_state *ra,
unsigned long remaining_pages)
{
struct page *page;
DEFINE_READAHEAD(ractl, file, ra, file->f_mapping, index);
struct folio *folio;
page = find_get_page_flags(filp->f_mapping, index, FGP_ACCESSED);
if (!page || !PageUptodate(page)) {
if (page)
put_page(page);
folio = __filemap_get_folio(ractl.mapping, index, FGP_ACCESSED, 0);
if (!folio || !folio_test_uptodate(folio)) {
if (folio)
folio_put(folio);
else
page_cache_sync_readahead(filp->f_mapping, ra, filp,
index, remaining_pages);
page = read_mapping_page(filp->f_mapping, index, NULL);
if (IS_ERR(page))
return page;
page_cache_sync_ra(&ractl, remaining_pages);
folio = read_cache_folio(ractl.mapping, index, NULL, file);
if (IS_ERR(folio))
return &folio->page;
}
if (PageReadahead(page))
page_cache_async_readahead(filp->f_mapping, ra, filp, page,
index, remaining_pages);
return page;
if (folio_test_readahead(folio))
page_cache_async_ra(&ractl, folio, remaining_pages);
return folio_file_page(folio, index);
}
static int build_merkle_tree_level(struct file *filp, unsigned int level,
......
......@@ -1242,7 +1242,7 @@ void page_cache_sync_readahead(struct address_space *mapping,
* @mapping: address_space which holds the pagecache and I/O vectors
* @ra: file_ra_state which holds the readahead state
* @file: Used by the filesystem for authentication.
* @page: The page at @index which triggered the readahead call.
* @folio: The folio at @index which triggered the readahead call.
* @index: Index of first page to be read.
* @req_count: Total number of pages being read by the caller.
*
......@@ -1254,10 +1254,10 @@ void page_cache_sync_readahead(struct address_space *mapping,
static inline
void page_cache_async_readahead(struct address_space *mapping,
struct file_ra_state *ra, struct file *file,
struct page *page, pgoff_t index, unsigned long req_count)
struct folio *folio, pgoff_t index, unsigned long req_count)
{
DEFINE_READAHEAD(ractl, file, ra, mapping, index);
page_cache_async_ra(&ractl, page_folio(page), req_count);
page_cache_async_ra(&ractl, folio, req_count);
}
static inline struct folio *__readahead_folio(struct readahead_control *ractl)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment