Commit c373fff7 authored by Trond Myklebust's avatar Trond Myklebust

NFSv4: Don't special case "launder"

If the client receives a fatal server error from nfs_pageio_add_request(),
then we should always truncate the page on which the error occurred.
Signed-off-by: default avatarTrond Myklebust <trond.myklebust@primarydata.com>
parent 54551d85
...@@ -482,7 +482,7 @@ static int nfs_launder_page(struct page *page) ...@@ -482,7 +482,7 @@ static int nfs_launder_page(struct page *page)
inode->i_ino, (long long)page_offset(page)); inode->i_ino, (long long)page_offset(page));
nfs_fscache_wait_on_page_write(nfsi, page); nfs_fscache_wait_on_page_write(nfsi, page);
return nfs_wb_launder_page(inode, page); return nfs_wb_page(inode, page);
} }
static int nfs_swap_activate(struct swap_info_struct *sis, struct file *file, static int nfs_swap_activate(struct swap_info_struct *sis, struct file *file,
......
...@@ -586,8 +586,7 @@ nfs_error_is_fatal_on_server(int err) ...@@ -586,8 +586,7 @@ nfs_error_is_fatal_on_server(int err)
* May return an error if the user signalled nfs_wait_on_request(). * May return an error if the user signalled nfs_wait_on_request().
*/ */
static int nfs_page_async_flush(struct nfs_pageio_descriptor *pgio, static int nfs_page_async_flush(struct nfs_pageio_descriptor *pgio,
struct page *page, bool nonblock, struct page *page, bool nonblock)
bool launder)
{ {
struct nfs_page *req; struct nfs_page *req;
int ret = 0; int ret = 0;
...@@ -610,13 +609,11 @@ static int nfs_page_async_flush(struct nfs_pageio_descriptor *pgio, ...@@ -610,13 +609,11 @@ static int nfs_page_async_flush(struct nfs_pageio_descriptor *pgio,
if (!nfs_pageio_add_request(pgio, req)) { if (!nfs_pageio_add_request(pgio, req)) {
ret = pgio->pg_error; ret = pgio->pg_error;
/* /*
* Remove the problematic req upon fatal errors * Remove the problematic req upon fatal errors on the server
* in launder case, while other dirty pages can
* still be around until they get flushed.
*/ */
if (nfs_error_is_fatal(ret)) { if (nfs_error_is_fatal(ret)) {
nfs_context_set_write_error(req->wb_context, ret); nfs_context_set_write_error(req->wb_context, ret);
if (launder) if (nfs_error_is_fatal_on_server(ret))
goto out_launder; goto out_launder;
} }
nfs_redirty_request(req); nfs_redirty_request(req);
...@@ -632,13 +629,12 @@ static int nfs_page_async_flush(struct nfs_pageio_descriptor *pgio, ...@@ -632,13 +629,12 @@ static int nfs_page_async_flush(struct nfs_pageio_descriptor *pgio,
} }
static int nfs_do_writepage(struct page *page, struct writeback_control *wbc, static int nfs_do_writepage(struct page *page, struct writeback_control *wbc,
struct nfs_pageio_descriptor *pgio, bool launder) struct nfs_pageio_descriptor *pgio)
{ {
int ret; int ret;
nfs_pageio_cond_complete(pgio, page_index(page)); nfs_pageio_cond_complete(pgio, page_index(page));
ret = nfs_page_async_flush(pgio, page, wbc->sync_mode == WB_SYNC_NONE, ret = nfs_page_async_flush(pgio, page, wbc->sync_mode == WB_SYNC_NONE);
launder);
if (ret == -EAGAIN) { if (ret == -EAGAIN) {
redirty_page_for_writepage(wbc, page); redirty_page_for_writepage(wbc, page);
ret = 0; ret = 0;
...@@ -650,8 +646,7 @@ static int nfs_do_writepage(struct page *page, struct writeback_control *wbc, ...@@ -650,8 +646,7 @@ static int nfs_do_writepage(struct page *page, struct writeback_control *wbc,
* Write an mmapped page to the server. * Write an mmapped page to the server.
*/ */
static int nfs_writepage_locked(struct page *page, static int nfs_writepage_locked(struct page *page,
struct writeback_control *wbc, struct writeback_control *wbc)
bool launder)
{ {
struct nfs_pageio_descriptor pgio; struct nfs_pageio_descriptor pgio;
struct inode *inode = page_file_mapping(page)->host; struct inode *inode = page_file_mapping(page)->host;
...@@ -660,7 +655,7 @@ static int nfs_writepage_locked(struct page *page, ...@@ -660,7 +655,7 @@ static int nfs_writepage_locked(struct page *page,
nfs_inc_stats(inode, NFSIOS_VFSWRITEPAGE); nfs_inc_stats(inode, NFSIOS_VFSWRITEPAGE);
nfs_pageio_init_write(&pgio, inode, 0, nfs_pageio_init_write(&pgio, inode, 0,
false, &nfs_async_write_completion_ops); false, &nfs_async_write_completion_ops);
err = nfs_do_writepage(page, wbc, &pgio, launder); err = nfs_do_writepage(page, wbc, &pgio);
nfs_pageio_complete(&pgio); nfs_pageio_complete(&pgio);
if (err < 0) if (err < 0)
return err; return err;
...@@ -673,7 +668,7 @@ int nfs_writepage(struct page *page, struct writeback_control *wbc) ...@@ -673,7 +668,7 @@ int nfs_writepage(struct page *page, struct writeback_control *wbc)
{ {
int ret; int ret;
ret = nfs_writepage_locked(page, wbc, false); ret = nfs_writepage_locked(page, wbc);
unlock_page(page); unlock_page(page);
return ret; return ret;
} }
...@@ -682,7 +677,7 @@ static int nfs_writepages_callback(struct page *page, struct writeback_control * ...@@ -682,7 +677,7 @@ static int nfs_writepages_callback(struct page *page, struct writeback_control *
{ {
int ret; int ret;
ret = nfs_do_writepage(page, wbc, data, false); ret = nfs_do_writepage(page, wbc, data);
unlock_page(page); unlock_page(page);
return ret; return ret;
} }
...@@ -2013,7 +2008,7 @@ int nfs_wb_page_cancel(struct inode *inode, struct page *page) ...@@ -2013,7 +2008,7 @@ int nfs_wb_page_cancel(struct inode *inode, struct page *page)
/* /*
* Write back all requests on one page - we do this before reading it. * Write back all requests on one page - we do this before reading it.
*/ */
int nfs_wb_single_page(struct inode *inode, struct page *page, bool launder) int nfs_wb_page(struct inode *inode, struct page *page)
{ {
loff_t range_start = page_file_offset(page); loff_t range_start = page_file_offset(page);
loff_t range_end = range_start + (loff_t)(PAGE_SIZE - 1); loff_t range_end = range_start + (loff_t)(PAGE_SIZE - 1);
...@@ -2030,7 +2025,7 @@ int nfs_wb_single_page(struct inode *inode, struct page *page, bool launder) ...@@ -2030,7 +2025,7 @@ int nfs_wb_single_page(struct inode *inode, struct page *page, bool launder)
for (;;) { for (;;) {
wait_on_page_writeback(page); wait_on_page_writeback(page);
if (clear_page_dirty_for_io(page)) { if (clear_page_dirty_for_io(page)) {
ret = nfs_writepage_locked(page, &wbc, launder); ret = nfs_writepage_locked(page, &wbc);
if (ret < 0) if (ret < 0)
goto out_error; goto out_error;
continue; continue;
......
...@@ -500,24 +500,12 @@ extern int nfs_updatepage(struct file *, struct page *, unsigned int, unsigned ...@@ -500,24 +500,12 @@ extern int nfs_updatepage(struct file *, struct page *, unsigned int, unsigned
*/ */
extern int nfs_sync_inode(struct inode *inode); extern int nfs_sync_inode(struct inode *inode);
extern int nfs_wb_all(struct inode *inode); extern int nfs_wb_all(struct inode *inode);
extern int nfs_wb_single_page(struct inode *inode, struct page *page, bool launder); extern int nfs_wb_page(struct inode *inode, struct page *page);
extern int nfs_wb_page_cancel(struct inode *inode, struct page* page); extern int nfs_wb_page_cancel(struct inode *inode, struct page* page);
extern int nfs_commit_inode(struct inode *, int); extern int nfs_commit_inode(struct inode *, int);
extern struct nfs_commit_data *nfs_commitdata_alloc(bool never_fail); extern struct nfs_commit_data *nfs_commitdata_alloc(bool never_fail);
extern void nfs_commit_free(struct nfs_commit_data *data); extern void nfs_commit_free(struct nfs_commit_data *data);
static inline int
nfs_wb_launder_page(struct inode *inode, struct page *page)
{
return nfs_wb_single_page(inode, page, true);
}
static inline int
nfs_wb_page(struct inode *inode, struct page *page)
{
return nfs_wb_single_page(inode, page, false);
}
static inline int static inline int
nfs_have_writebacks(struct inode *inode) nfs_have_writebacks(struct inode *inode)
{ {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment