Commit 9d24a13a authored by Matthew Wilcox (Oracle)'s avatar Matthew Wilcox (Oracle) Committed by Linus Torvalds

iomap: convert from readpages to readahead

Use the new readahead operation in iomap.  Convert XFS and ZoneFS to use
it.
Signed-off-by: default avatarMatthew Wilcox (Oracle) <willy@infradead.org>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Reviewed-by: default avatarChristoph Hellwig <hch@lst.de>
Reviewed-by: default avatarDarrick J. Wong <darrick.wong@oracle.com>
Reviewed-by: default avatarWilliam Kucharski <william.kucharski@oracle.com>
Cc: Chao Yu <yuchao0@huawei.com>
Cc: Cong Wang <xiyou.wangcong@gmail.com>
Cc: Dave Chinner <dchinner@redhat.com>
Cc: Eric Biggers <ebiggers@google.com>
Cc: Gao Xiang <gaoxiang25@huawei.com>
Cc: Jaegeuk Kim <jaegeuk@kernel.org>
Cc: John Hubbard <jhubbard@nvidia.com>
Cc: Joseph Qi <joseph.qi@linux.alibaba.com>
Cc: Junxiao Bi <junxiao.bi@oracle.com>
Cc: Michal Hocko <mhocko@suse.com>
Cc: Zi Yan <ziy@nvidia.com>
Cc: Johannes Thumshirn <johannes.thumshirn@wdc.com>
Cc: Miklos Szeredi <mszeredi@redhat.com>
Link: http://lkml.kernel.org/r/20200414150233.24495-26-willy@infradead.orgSigned-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent 76a0294e
...@@ -214,9 +214,8 @@ iomap_read_end_io(struct bio *bio) ...@@ -214,9 +214,8 @@ iomap_read_end_io(struct bio *bio)
struct iomap_readpage_ctx { struct iomap_readpage_ctx {
struct page *cur_page; struct page *cur_page;
bool cur_page_in_bio; bool cur_page_in_bio;
bool is_readahead;
struct bio *bio; struct bio *bio;
struct list_head *pages; struct readahead_control *rac;
}; };
static void static void
...@@ -308,7 +307,7 @@ iomap_readpage_actor(struct inode *inode, loff_t pos, loff_t length, void *data, ...@@ -308,7 +307,7 @@ iomap_readpage_actor(struct inode *inode, loff_t pos, loff_t length, void *data,
if (ctx->bio) if (ctx->bio)
submit_bio(ctx->bio); submit_bio(ctx->bio);
if (ctx->is_readahead) /* same as readahead_gfp_mask */ if (ctx->rac) /* same as readahead_gfp_mask */
gfp |= __GFP_NORETRY | __GFP_NOWARN; gfp |= __GFP_NORETRY | __GFP_NOWARN;
ctx->bio = bio_alloc(gfp, min(BIO_MAX_PAGES, nr_vecs)); ctx->bio = bio_alloc(gfp, min(BIO_MAX_PAGES, nr_vecs));
/* /*
...@@ -319,7 +318,7 @@ iomap_readpage_actor(struct inode *inode, loff_t pos, loff_t length, void *data, ...@@ -319,7 +318,7 @@ iomap_readpage_actor(struct inode *inode, loff_t pos, loff_t length, void *data,
if (!ctx->bio) if (!ctx->bio)
ctx->bio = bio_alloc(orig_gfp, 1); ctx->bio = bio_alloc(orig_gfp, 1);
ctx->bio->bi_opf = REQ_OP_READ; ctx->bio->bi_opf = REQ_OP_READ;
if (ctx->is_readahead) if (ctx->rac)
ctx->bio->bi_opf |= REQ_RAHEAD; ctx->bio->bi_opf |= REQ_RAHEAD;
ctx->bio->bi_iter.bi_sector = sector; ctx->bio->bi_iter.bi_sector = sector;
bio_set_dev(ctx->bio, iomap->bdev); bio_set_dev(ctx->bio, iomap->bdev);
...@@ -375,36 +374,8 @@ iomap_readpage(struct page *page, const struct iomap_ops *ops) ...@@ -375,36 +374,8 @@ iomap_readpage(struct page *page, const struct iomap_ops *ops)
} }
EXPORT_SYMBOL_GPL(iomap_readpage); EXPORT_SYMBOL_GPL(iomap_readpage);
static struct page *
iomap_next_page(struct inode *inode, struct list_head *pages, loff_t pos,
loff_t length, loff_t *done)
{
while (!list_empty(pages)) {
struct page *page = lru_to_page(pages);
if (page_offset(page) >= (u64)pos + length)
break;
list_del(&page->lru);
if (!add_to_page_cache_lru(page, inode->i_mapping, page->index,
GFP_NOFS))
return page;
/*
* If we already have a page in the page cache at index we are
* done. Upper layers don't care if it is uptodate after the
* readpages call itself as every page gets checked again once
* actually needed.
*/
*done += PAGE_SIZE;
put_page(page);
}
return NULL;
}
static loff_t static loff_t
iomap_readpages_actor(struct inode *inode, loff_t pos, loff_t length, iomap_readahead_actor(struct inode *inode, loff_t pos, loff_t length,
void *data, struct iomap *iomap, struct iomap *srcmap) void *data, struct iomap *iomap, struct iomap *srcmap)
{ {
struct iomap_readpage_ctx *ctx = data; struct iomap_readpage_ctx *ctx = data;
...@@ -418,10 +389,7 @@ iomap_readpages_actor(struct inode *inode, loff_t pos, loff_t length, ...@@ -418,10 +389,7 @@ iomap_readpages_actor(struct inode *inode, loff_t pos, loff_t length,
ctx->cur_page = NULL; ctx->cur_page = NULL;
} }
if (!ctx->cur_page) { if (!ctx->cur_page) {
ctx->cur_page = iomap_next_page(inode, ctx->pages, ctx->cur_page = readahead_page(ctx->rac);
pos, length, &done);
if (!ctx->cur_page)
break;
ctx->cur_page_in_bio = false; ctx->cur_page_in_bio = false;
} }
ret = iomap_readpage_actor(inode, pos + done, length - done, ret = iomap_readpage_actor(inode, pos + done, length - done,
...@@ -431,32 +399,43 @@ iomap_readpages_actor(struct inode *inode, loff_t pos, loff_t length, ...@@ -431,32 +399,43 @@ iomap_readpages_actor(struct inode *inode, loff_t pos, loff_t length,
return done; return done;
} }
int /**
iomap_readpages(struct address_space *mapping, struct list_head *pages, * iomap_readahead - Attempt to read pages from a file.
unsigned nr_pages, const struct iomap_ops *ops) * @rac: Describes the pages to be read.
* @ops: The operations vector for the filesystem.
*
* This function is for filesystems to call to implement their readahead
* address_space operation.
*
* Context: The @ops callbacks may submit I/O (eg to read the addresses of
* blocks from disc), and may wait for it. The caller may be trying to
* access a different page, and so sleeping excessively should be avoided.
* It may allocate memory, but should avoid costly allocations. This
* function is called with memalloc_nofs set, so allocations will not cause
* the filesystem to be reentered.
*/
void iomap_readahead(struct readahead_control *rac, const struct iomap_ops *ops)
{ {
struct inode *inode = rac->mapping->host;
loff_t pos = readahead_pos(rac);
loff_t length = readahead_length(rac);
struct iomap_readpage_ctx ctx = { struct iomap_readpage_ctx ctx = {
.pages = pages, .rac = rac,
.is_readahead = true,
}; };
loff_t pos = page_offset(list_entry(pages->prev, struct page, lru));
loff_t last = page_offset(list_entry(pages->next, struct page, lru));
loff_t length = last - pos + PAGE_SIZE, ret = 0;
trace_iomap_readpages(mapping->host, nr_pages); trace_iomap_readahead(inode, readahead_count(rac));
while (length > 0) { while (length > 0) {
ret = iomap_apply(mapping->host, pos, length, 0, ops, loff_t ret = iomap_apply(inode, pos, length, 0, ops,
&ctx, iomap_readpages_actor); &ctx, iomap_readahead_actor);
if (ret <= 0) { if (ret <= 0) {
WARN_ON_ONCE(ret == 0); WARN_ON_ONCE(ret == 0);
goto done; break;
} }
pos += ret; pos += ret;
length -= ret; length -= ret;
} }
ret = 0;
done:
if (ctx.bio) if (ctx.bio)
submit_bio(ctx.bio); submit_bio(ctx.bio);
if (ctx.cur_page) { if (ctx.cur_page) {
...@@ -464,15 +443,8 @@ iomap_readpages(struct address_space *mapping, struct list_head *pages, ...@@ -464,15 +443,8 @@ iomap_readpages(struct address_space *mapping, struct list_head *pages,
unlock_page(ctx.cur_page); unlock_page(ctx.cur_page);
put_page(ctx.cur_page); put_page(ctx.cur_page);
} }
/*
* Check that we didn't lose a page due to the arcance calling
* conventions..
*/
WARN_ON_ONCE(!ret && !list_empty(ctx.pages));
return ret;
} }
EXPORT_SYMBOL_GPL(iomap_readpages); EXPORT_SYMBOL_GPL(iomap_readahead);
/* /*
* iomap_is_partially_uptodate checks whether blocks within a page are * iomap_is_partially_uptodate checks whether blocks within a page are
......
...@@ -39,7 +39,7 @@ DEFINE_EVENT(iomap_readpage_class, name, \ ...@@ -39,7 +39,7 @@ DEFINE_EVENT(iomap_readpage_class, name, \
TP_PROTO(struct inode *inode, int nr_pages), \ TP_PROTO(struct inode *inode, int nr_pages), \
TP_ARGS(inode, nr_pages)) TP_ARGS(inode, nr_pages))
DEFINE_READPAGE_EVENT(iomap_readpage); DEFINE_READPAGE_EVENT(iomap_readpage);
DEFINE_READPAGE_EVENT(iomap_readpages); DEFINE_READPAGE_EVENT(iomap_readahead);
DECLARE_EVENT_CLASS(iomap_range_class, DECLARE_EVENT_CLASS(iomap_range_class,
TP_PROTO(struct inode *inode, unsigned long off, unsigned int len), TP_PROTO(struct inode *inode, unsigned long off, unsigned int len),
......
...@@ -621,14 +621,11 @@ xfs_vm_readpage( ...@@ -621,14 +621,11 @@ xfs_vm_readpage(
return iomap_readpage(page, &xfs_read_iomap_ops); return iomap_readpage(page, &xfs_read_iomap_ops);
} }
STATIC int STATIC void
xfs_vm_readpages( xfs_vm_readahead(
struct file *unused, struct readahead_control *rac)
struct address_space *mapping,
struct list_head *pages,
unsigned nr_pages)
{ {
return iomap_readpages(mapping, pages, nr_pages, &xfs_read_iomap_ops); iomap_readahead(rac, &xfs_read_iomap_ops);
} }
static int static int
...@@ -644,7 +641,7 @@ xfs_iomap_swapfile_activate( ...@@ -644,7 +641,7 @@ xfs_iomap_swapfile_activate(
const struct address_space_operations xfs_address_space_operations = { const struct address_space_operations xfs_address_space_operations = {
.readpage = xfs_vm_readpage, .readpage = xfs_vm_readpage,
.readpages = xfs_vm_readpages, .readahead = xfs_vm_readahead,
.writepage = xfs_vm_writepage, .writepage = xfs_vm_writepage,
.writepages = xfs_vm_writepages, .writepages = xfs_vm_writepages,
.set_page_dirty = iomap_set_page_dirty, .set_page_dirty = iomap_set_page_dirty,
......
...@@ -78,10 +78,9 @@ static int zonefs_readpage(struct file *unused, struct page *page) ...@@ -78,10 +78,9 @@ static int zonefs_readpage(struct file *unused, struct page *page)
return iomap_readpage(page, &zonefs_iomap_ops); return iomap_readpage(page, &zonefs_iomap_ops);
} }
static int zonefs_readpages(struct file *unused, struct address_space *mapping, static void zonefs_readahead(struct readahead_control *rac)
struct list_head *pages, unsigned int nr_pages)
{ {
return iomap_readpages(mapping, pages, nr_pages, &zonefs_iomap_ops); iomap_readahead(rac, &zonefs_iomap_ops);
} }
/* /*
...@@ -128,7 +127,7 @@ static int zonefs_writepages(struct address_space *mapping, ...@@ -128,7 +127,7 @@ static int zonefs_writepages(struct address_space *mapping,
static const struct address_space_operations zonefs_file_aops = { static const struct address_space_operations zonefs_file_aops = {
.readpage = zonefs_readpage, .readpage = zonefs_readpage,
.readpages = zonefs_readpages, .readahead = zonefs_readahead,
.writepage = zonefs_writepage, .writepage = zonefs_writepage,
.writepages = zonefs_writepages, .writepages = zonefs_writepages,
.set_page_dirty = iomap_set_page_dirty, .set_page_dirty = iomap_set_page_dirty,
......
...@@ -155,8 +155,7 @@ loff_t iomap_apply(struct inode *inode, loff_t pos, loff_t length, ...@@ -155,8 +155,7 @@ loff_t iomap_apply(struct inode *inode, loff_t pos, loff_t length,
ssize_t iomap_file_buffered_write(struct kiocb *iocb, struct iov_iter *from, ssize_t iomap_file_buffered_write(struct kiocb *iocb, struct iov_iter *from,
const struct iomap_ops *ops); const struct iomap_ops *ops);
int iomap_readpage(struct page *page, const struct iomap_ops *ops); int iomap_readpage(struct page *page, const struct iomap_ops *ops);
int iomap_readpages(struct address_space *mapping, struct list_head *pages, void iomap_readahead(struct readahead_control *, const struct iomap_ops *ops);
unsigned nr_pages, const struct iomap_ops *ops);
int iomap_set_page_dirty(struct page *page); int iomap_set_page_dirty(struct page *page);
int iomap_is_partially_uptodate(struct page *page, unsigned long from, int iomap_is_partially_uptodate(struct page *page, unsigned long from,
unsigned long count); unsigned long count);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment