nfs: Convert from invalidatepage to invalidate_folio

Print the folio index instead of the pointer, since this is more
useful.  We also don't need to use page_file_mapping() as we do not
invalidate swapcache pages.  Since this is the only caller of
nfs_wb_page_cancel(), convert it to nfs_wb_folio_cancel().
Signed-off-by: default avatarMatthew Wilcox (Oracle) <willy@infradead.org>
Tested-by: default avatarDamien Le Moal <damien.lemoal@opensource.wdc.com>
Acked-by: default avatarDamien Le Moal <damien.lemoal@opensource.wdc.com>
Tested-by: Mike Marshall <hubcap@omnibond.com> # orangefs
Tested-by: David Howells <dhowells@redhat.com> # afs
parent c5b56b50
......@@ -406,17 +406,17 @@ static int nfs_write_end(struct file *file, struct address_space *mapping,
* - Called if either PG_private or PG_fscache is set on the page
* - Caller holds page lock
*/
static void nfs_invalidate_page(struct page *page, unsigned int offset,
unsigned int length)
static void nfs_invalidate_folio(struct folio *folio, size_t offset,
size_t length)
{
dfprintk(PAGECACHE, "NFS: invalidate_page(%p, %u, %u)\n",
page, offset, length);
dfprintk(PAGECACHE, "NFS: invalidate_folio(%lu, %zu, %zu)\n",
folio->index, offset, length);
if (offset != 0 || length < PAGE_SIZE)
if (offset != 0 || length < folio_size(folio))
return;
/* Cancel any unstarted writes on this page */
nfs_wb_page_cancel(page_file_mapping(page)->host, page);
wait_on_page_fscache(page);
nfs_wb_folio_cancel(folio->mapping->host, folio);
folio_wait_fscache(folio);
}
/*
......@@ -520,7 +520,7 @@ const struct address_space_operations nfs_file_aops = {
.writepages = nfs_writepages,
.write_begin = nfs_write_begin,
.write_end = nfs_write_end,
.invalidatepage = nfs_invalidate_page,
.invalidate_folio = nfs_invalidate_folio,
.releasepage = nfs_release_page,
.direct_IO = nfs_direct_IO,
#ifdef CONFIG_MIGRATION
......
......@@ -2049,21 +2049,21 @@ int nfs_wb_all(struct inode *inode)
}
EXPORT_SYMBOL_GPL(nfs_wb_all);
int nfs_wb_page_cancel(struct inode *inode, struct page *page)
int nfs_wb_folio_cancel(struct inode *inode, struct folio *folio)
{
struct nfs_page *req;
int ret = 0;
wait_on_page_writeback(page);
folio_wait_writeback(folio);
/* blocking call to cancel all requests and join to a single (head)
* request */
req = nfs_lock_and_join_requests(page);
req = nfs_lock_and_join_requests(&folio->page);
if (IS_ERR(req)) {
ret = PTR_ERR(req);
} else if (req) {
/* all requests from this page have been cancelled by
/* all requests from this folio have been cancelled by
* nfs_lock_and_join_requests, so just remove the head
* request from the inode / page_private pointer and
* release it */
......
......@@ -583,7 +583,7 @@ extern int nfs_updatepage(struct file *, struct page *, unsigned int, unsigned
extern int nfs_sync_inode(struct inode *inode);
extern int nfs_wb_all(struct inode *inode);
extern int nfs_wb_page(struct inode *inode, struct page *page);
extern int nfs_wb_page_cancel(struct inode *inode, struct page* page);
int nfs_wb_folio_cancel(struct inode *inode, struct folio *folio);
extern int nfs_commit_inode(struct inode *, int);
extern struct nfs_commit_data *nfs_commitdata_alloc(bool never_fail);
extern void nfs_commit_free(struct nfs_commit_data *data);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment