Commit a308996e authored by Christoph Hellwig's avatar Christoph Hellwig Committed by Anna Schumaker

nfs: split nfs_read_folio

nfs_read_folio is a bit hard to follow because it mixes highlevel logic
with the actual data read.  Split the latter into a helper and update
the comments to be more accurate.
Signed-off-by: default avatarChristoph Hellwig <hch@lst.de>
Signed-off-by: default avatarAnna Schumaker <Anna.Schumaker@Netapp.com>
parent fada32ed
...@@ -325,18 +325,52 @@ int nfs_read_add_folio(struct nfs_pageio_descriptor *pgio, ...@@ -325,18 +325,52 @@ int nfs_read_add_folio(struct nfs_pageio_descriptor *pgio,
} }
/* /*
* Read a page over NFS. * Actually read a folio over the wire.
* We read the page synchronously in the following case: */
* - The error flag is set for this page. This happens only when a static int nfs_do_read_folio(struct file *file, struct folio *folio)
* previous async read operation failed. {
struct inode *inode = file_inode(file);
struct nfs_pageio_descriptor pgio;
struct nfs_open_context *ctx;
int ret;
ctx = get_nfs_open_context(nfs_file_open_context(file));
xchg(&ctx->error, 0);
nfs_pageio_init_read(&pgio, inode, false,
&nfs_async_read_completion_ops);
ret = nfs_read_add_folio(&pgio, ctx, folio);
if (ret)
goto out_put;
nfs_pageio_complete_read(&pgio);
nfs_update_delegated_atime(inode);
if (pgio.pg_error < 0) {
ret = pgio.pg_error;
goto out_put;
}
ret = folio_wait_locked_killable(folio);
if (!folio_test_uptodate(folio) && !ret)
ret = xchg(&ctx->error, 0);
out_put:
put_nfs_open_context(ctx);
return ret;
}
/*
* Synchronously read a folio.
*
* This is not heavily used as most users to try an asynchronous
* large read through ->readahead first.
*/ */
int nfs_read_folio(struct file *file, struct folio *folio) int nfs_read_folio(struct file *file, struct folio *folio)
{ {
struct inode *inode = file_inode(file); struct inode *inode = file_inode(file);
loff_t pos = folio_pos(folio); loff_t pos = folio_pos(folio);
size_t len = folio_size(folio); size_t len = folio_size(folio);
struct nfs_pageio_descriptor pgio;
struct nfs_open_context *ctx;
int ret; int ret;
trace_nfs_aop_readpage(inode, pos, len); trace_nfs_aop_readpage(inode, pos, len);
...@@ -361,29 +395,8 @@ int nfs_read_folio(struct file *file, struct folio *folio) ...@@ -361,29 +395,8 @@ int nfs_read_folio(struct file *file, struct folio *folio)
goto out_unlock; goto out_unlock;
ret = nfs_netfs_read_folio(file, folio); ret = nfs_netfs_read_folio(file, folio);
if (!ret)
goto out;
ctx = get_nfs_open_context(nfs_file_open_context(file));
xchg(&ctx->error, 0);
nfs_pageio_init_read(&pgio, inode, false,
&nfs_async_read_completion_ops);
ret = nfs_read_add_folio(&pgio, ctx, folio);
if (ret) if (ret)
goto out_put; ret = nfs_do_read_folio(file, folio);
nfs_pageio_complete_read(&pgio);
nfs_update_delegated_atime(inode);
ret = pgio.pg_error < 0 ? pgio.pg_error : 0;
if (!ret) {
ret = folio_wait_locked_killable(folio);
if (!folio_test_uptodate(folio) && !ret)
ret = xchg(&ctx->error, 0);
}
out_put:
put_nfs_open_context(ctx);
out: out:
trace_nfs_aop_readpage_done(inode, pos, len, ret); trace_nfs_aop_readpage_done(inode, pos, len, ret);
return ret; return ret;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment