Commit ba206a02 authored by Matthew Wilcox (Oracle)'s avatar Matthew Wilcox (Oracle) Committed by Linus Torvalds

btrfs: convert from readpages to readahead

Implement the new readahead method in btrfs using the new
readahead_page_batch() function.
Signed-off-by: default avatarMatthew Wilcox (Oracle) <willy@infradead.org>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Reviewed-by: default avatarWilliam Kucharski <william.kucharski@oracle.com>
Cc: Chao Yu <yuchao0@huawei.com>
Cc: Christoph Hellwig <hch@lst.de>
Cc: Cong Wang <xiyou.wangcong@gmail.com>
Cc: Darrick J. Wong <darrick.wong@oracle.com>
Cc: Dave Chinner <dchinner@redhat.com>
Cc: Eric Biggers <ebiggers@google.com>
Cc: Gao Xiang <gaoxiang25@huawei.com>
Cc: Jaegeuk Kim <jaegeuk@kernel.org>
Cc: John Hubbard <jhubbard@nvidia.com>
Cc: Joseph Qi <joseph.qi@linux.alibaba.com>
Cc: Junxiao Bi <junxiao.bi@oracle.com>
Cc: Michal Hocko <mhocko@suse.com>
Cc: Zi Yan <ziy@nvidia.com>
Cc: Johannes Thumshirn <johannes.thumshirn@wdc.com>
Cc: Miklos Szeredi <mszeredi@redhat.com>
Link: http://lkml.kernel.org/r/20200414150233.24495-18-willy@infradead.orgSigned-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent d4388340
......@@ -4367,51 +4367,32 @@ int extent_writepages(struct address_space *mapping,
return ret;
}
int extent_readpages(struct address_space *mapping, struct list_head *pages,
unsigned nr_pages)
void extent_readahead(struct readahead_control *rac)
{
struct bio *bio = NULL;
unsigned long bio_flags = 0;
struct page *pagepool[16];
struct extent_map *em_cached = NULL;
int nr = 0;
u64 prev_em_start = (u64)-1;
int nr;
while (!list_empty(pages)) {
u64 contig_end = 0;
for (nr = 0; nr < ARRAY_SIZE(pagepool) && !list_empty(pages);) {
struct page *page = lru_to_page(pages);
prefetchw(&page->flags);
list_del(&page->lru);
if (add_to_page_cache_lru(page, mapping, page->index,
readahead_gfp_mask(mapping))) {
put_page(page);
break;
}
pagepool[nr++] = page;
contig_end = page_offset(page) + PAGE_SIZE - 1;
}
if (nr) {
while ((nr = readahead_page_batch(rac, pagepool))) {
u64 contig_start = page_offset(pagepool[0]);
u64 contig_end = page_offset(pagepool[nr - 1]) + PAGE_SIZE - 1;
ASSERT(contig_start + nr * PAGE_SIZE - 1 == contig_end);
contiguous_readpages(pagepool, nr, contig_start,
contig_end, &em_cached, &bio, &bio_flags,
&prev_em_start);
}
contiguous_readpages(pagepool, nr, contig_start, contig_end,
&em_cached, &bio, &bio_flags, &prev_em_start);
}
if (em_cached)
free_extent_map(em_cached);
if (bio)
return submit_one_bio(bio, 0, bio_flags);
return 0;
if (bio) {
if (submit_one_bio(bio, 0, bio_flags))
return;
}
}
/*
......
......@@ -198,8 +198,7 @@ int extent_writepages(struct address_space *mapping,
struct writeback_control *wbc);
int btree_write_cache_pages(struct address_space *mapping,
struct writeback_control *wbc);
int extent_readpages(struct address_space *mapping, struct list_head *pages,
unsigned nr_pages);
void extent_readahead(struct readahead_control *rac);
int extent_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
__u64 start, __u64 len);
void set_page_extent_mapped(struct page *page);
......
......@@ -4856,8 +4856,8 @@ static void evict_inode_truncate_pages(struct inode *inode)
/*
* Keep looping until we have no more ranges in the io tree.
* We can have ongoing bios started by readpages (called from readahead)
* that have their endio callback (extent_io.c:end_bio_extent_readpage)
* We can have ongoing bios started by readahead that have
* their endio callback (extent_io.c:end_bio_extent_readpage)
* still in progress (unlocked the pages in the bio but did not yet
* unlocked the ranges in the io tree). Therefore this means some
* ranges can still be locked and eviction started because before
......@@ -7050,11 +7050,11 @@ static int lock_extent_direct(struct inode *inode, u64 lockstart, u64 lockend,
* for it to complete) and then invalidate the pages for
* this range (through invalidate_inode_pages2_range()),
* but that can lead us to a deadlock with a concurrent
* call to readpages() (a buffered read or a defrag call
* call to readahead (a buffered read or a defrag call
* triggered a readahead) on a page lock due to an
* ordered dio extent we created before but did not have
* yet a corresponding bio submitted (whence it can not
* complete), which makes readpages() wait for that
* complete), which makes readahead wait for that
* ordered extent to complete while holding a lock on
* that page.
*/
......@@ -8293,11 +8293,9 @@ static int btrfs_writepages(struct address_space *mapping,
return extent_writepages(mapping, wbc);
}
static int
btrfs_readpages(struct file *file, struct address_space *mapping,
struct list_head *pages, unsigned nr_pages)
static void btrfs_readahead(struct readahead_control *rac)
{
return extent_readpages(mapping, pages, nr_pages);
extent_readahead(rac);
}
static int __btrfs_releasepage(struct page *page, gfp_t gfp_flags)
......@@ -10553,7 +10551,7 @@ static const struct address_space_operations btrfs_aops = {
.readpage = btrfs_readpage,
.writepage = btrfs_writepage,
.writepages = btrfs_writepages,
.readpages = btrfs_readpages,
.readahead = btrfs_readahead,
.direct_IO = btrfs_direct_IO,
.invalidatepage = btrfs_invalidatepage,
.releasepage = btrfs_releasepage,
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment