fscache: Convert fscache_set_page_dirty() to fscache_dirty_folio()

Convert all users of fscache_set_page_dirty to use fscache_dirty_folio.
Signed-off-by: default avatarMatthew Wilcox (Oracle) <willy@infradead.org>
Tested-by: default avatarDamien Le Moal <damien.lemoal@opensource.wdc.com>
Acked-by: default avatarDamien Le Moal <damien.lemoal@opensource.wdc.com>
Tested-by: Mike Marshall <hubcap@omnibond.com> # orangefs
Tested-by: David Howells <dhowells@redhat.com> # afs
parent 6f31a5a2
...@@ -345,8 +345,9 @@ The following facilities are provided to manage this: ...@@ -345,8 +345,9 @@ The following facilities are provided to manage this:
To support this, the following functions are provided:: To support this, the following functions are provided::
int fscache_set_page_dirty(struct page *page, bool fscache_dirty_folio(struct address_space *mapping,
struct fscache_cookie *cookie); struct folio *folio,
struct fscache_cookie *cookie);
void fscache_unpin_writeback(struct writeback_control *wbc, void fscache_unpin_writeback(struct writeback_control *wbc,
struct fscache_cookie *cookie); struct fscache_cookie *cookie);
void fscache_clear_inode_writeback(struct fscache_cookie *cookie, void fscache_clear_inode_writeback(struct fscache_cookie *cookie,
...@@ -354,7 +355,7 @@ To support this, the following functions are provided:: ...@@ -354,7 +355,7 @@ To support this, the following functions are provided::
const void *aux); const void *aux);
The *set* function is intended to be called from the filesystem's The *set* function is intended to be called from the filesystem's
``set_page_dirty`` address space operation. If ``I_PINNING_FSCACHE_WB`` is not ``dirty_folio`` address space operation. If ``I_PINNING_FSCACHE_WB`` is not
set, it sets that flag and increments the use count on the cookie (the caller set, it sets that flag and increments the use count on the cookie (the caller
must already have called ``fscache_use_cookie()``). must already have called ``fscache_use_cookie()``).
......
...@@ -359,20 +359,20 @@ static int v9fs_write_end(struct file *filp, struct address_space *mapping, ...@@ -359,20 +359,20 @@ static int v9fs_write_end(struct file *filp, struct address_space *mapping,
* Mark a page as having been made dirty and thus needing writeback. We also * Mark a page as having been made dirty and thus needing writeback. We also
* need to pin the cache object to write back to. * need to pin the cache object to write back to.
*/ */
static int v9fs_set_page_dirty(struct page *page) static bool v9fs_dirty_folio(struct address_space *mapping, struct folio *folio)
{ {
struct v9fs_inode *v9inode = V9FS_I(page->mapping->host); struct v9fs_inode *v9inode = V9FS_I(mapping->host);
return fscache_set_page_dirty(page, v9fs_inode_cookie(v9inode)); return fscache_dirty_folio(mapping, folio, v9fs_inode_cookie(v9inode));
} }
#else #else
#define v9fs_set_page_dirty __set_page_dirty_nobuffers #define v9fs_dirty_folio filemap_dirty_folio
#endif #endif
const struct address_space_operations v9fs_addr_operations = { const struct address_space_operations v9fs_addr_operations = {
.readpage = v9fs_vfs_readpage, .readpage = v9fs_vfs_readpage,
.readahead = v9fs_vfs_readahead, .readahead = v9fs_vfs_readahead,
.set_page_dirty = v9fs_set_page_dirty, .dirty_folio = v9fs_dirty_folio,
.writepage = v9fs_vfs_writepage, .writepage = v9fs_vfs_writepage,
.write_begin = v9fs_write_begin, .write_begin = v9fs_write_begin,
.write_end = v9fs_write_end, .write_end = v9fs_write_end,
......
...@@ -54,7 +54,7 @@ const struct inode_operations afs_file_inode_operations = { ...@@ -54,7 +54,7 @@ const struct inode_operations afs_file_inode_operations = {
const struct address_space_operations afs_file_aops = { const struct address_space_operations afs_file_aops = {
.readpage = afs_readpage, .readpage = afs_readpage,
.readahead = afs_readahead, .readahead = afs_readahead,
.set_page_dirty = afs_set_page_dirty, .dirty_folio = afs_dirty_folio,
.launder_folio = afs_launder_folio, .launder_folio = afs_launder_folio,
.releasepage = afs_releasepage, .releasepage = afs_releasepage,
.invalidate_folio = afs_invalidate_folio, .invalidate_folio = afs_invalidate_folio,
......
...@@ -1521,9 +1521,9 @@ extern int afs_check_volume_status(struct afs_volume *, struct afs_operation *); ...@@ -1521,9 +1521,9 @@ extern int afs_check_volume_status(struct afs_volume *, struct afs_operation *);
* write.c * write.c
*/ */
#ifdef CONFIG_AFS_FSCACHE #ifdef CONFIG_AFS_FSCACHE
extern int afs_set_page_dirty(struct page *); bool afs_dirty_folio(struct address_space *, struct folio *);
#else #else
#define afs_set_page_dirty __set_page_dirty_nobuffers #define afs_dirty_folio filemap_dirty_folio
#endif #endif
extern int afs_write_begin(struct file *file, struct address_space *mapping, extern int afs_write_begin(struct file *file, struct address_space *mapping,
loff_t pos, unsigned len, unsigned flags, loff_t pos, unsigned len, unsigned flags,
......
...@@ -22,9 +22,10 @@ static void afs_write_to_cache(struct afs_vnode *vnode, loff_t start, size_t len ...@@ -22,9 +22,10 @@ static void afs_write_to_cache(struct afs_vnode *vnode, loff_t start, size_t len
* Mark a page as having been made dirty and thus needing writeback. We also * Mark a page as having been made dirty and thus needing writeback. We also
* need to pin the cache object to write back to. * need to pin the cache object to write back to.
*/ */
int afs_set_page_dirty(struct page *page) bool afs_dirty_folio(struct address_space *mapping, struct folio *folio)
{ {
return fscache_set_page_dirty(page, afs_vnode_cache(AFS_FS_I(page->mapping->host))); return fscache_dirty_folio(mapping, folio,
afs_vnode_cache(AFS_FS_I(mapping->host)));
} }
static void afs_folio_start_fscache(bool caching, struct folio *folio) static void afs_folio_start_fscache(bool caching, struct folio *folio)
{ {
......
...@@ -76,18 +76,17 @@ static inline struct ceph_snap_context *page_snap_context(struct page *page) ...@@ -76,18 +76,17 @@ static inline struct ceph_snap_context *page_snap_context(struct page *page)
* Dirty a page. Optimistically adjust accounting, on the assumption * Dirty a page. Optimistically adjust accounting, on the assumption
* that we won't race with invalidate. If we do, readjust. * that we won't race with invalidate. If we do, readjust.
*/ */
static int ceph_set_page_dirty(struct page *page) static bool ceph_dirty_folio(struct address_space *mapping, struct folio *folio)
{ {
struct address_space *mapping = page->mapping;
struct inode *inode; struct inode *inode;
struct ceph_inode_info *ci; struct ceph_inode_info *ci;
struct ceph_snap_context *snapc; struct ceph_snap_context *snapc;
if (PageDirty(page)) { if (folio_test_dirty(folio)) {
dout("%p set_page_dirty %p idx %lu -- already dirty\n", dout("%p dirty_folio %p idx %lu -- already dirty\n",
mapping->host, page, page->index); mapping->host, folio, folio->index);
BUG_ON(!PagePrivate(page)); BUG_ON(!folio_get_private(folio));
return 0; return false;
} }
inode = mapping->host; inode = mapping->host;
...@@ -111,22 +110,22 @@ static int ceph_set_page_dirty(struct page *page) ...@@ -111,22 +110,22 @@ static int ceph_set_page_dirty(struct page *page)
if (ci->i_wrbuffer_ref == 0) if (ci->i_wrbuffer_ref == 0)
ihold(inode); ihold(inode);
++ci->i_wrbuffer_ref; ++ci->i_wrbuffer_ref;
dout("%p set_page_dirty %p idx %lu head %d/%d -> %d/%d " dout("%p dirty_folio %p idx %lu head %d/%d -> %d/%d "
"snapc %p seq %lld (%d snaps)\n", "snapc %p seq %lld (%d snaps)\n",
mapping->host, page, page->index, mapping->host, folio, folio->index,
ci->i_wrbuffer_ref-1, ci->i_wrbuffer_ref_head-1, ci->i_wrbuffer_ref-1, ci->i_wrbuffer_ref_head-1,
ci->i_wrbuffer_ref, ci->i_wrbuffer_ref_head, ci->i_wrbuffer_ref, ci->i_wrbuffer_ref_head,
snapc, snapc->seq, snapc->num_snaps); snapc, snapc->seq, snapc->num_snaps);
spin_unlock(&ci->i_ceph_lock); spin_unlock(&ci->i_ceph_lock);
/* /*
* Reference snap context in page->private. Also set * Reference snap context in folio->private. Also set
* PagePrivate so that we get invalidate_folio callback. * PagePrivate so that we get invalidate_folio callback.
*/ */
BUG_ON(PagePrivate(page)); BUG_ON(folio_get_private(folio));
attach_page_private(page, snapc); folio_attach_private(folio, snapc);
return ceph_fscache_set_page_dirty(page); return ceph_fscache_dirty_folio(mapping, folio);
} }
/* /*
...@@ -1376,7 +1375,7 @@ const struct address_space_operations ceph_aops = { ...@@ -1376,7 +1375,7 @@ const struct address_space_operations ceph_aops = {
.writepages = ceph_writepages_start, .writepages = ceph_writepages_start,
.write_begin = ceph_write_begin, .write_begin = ceph_write_begin,
.write_end = ceph_write_end, .write_end = ceph_write_end,
.set_page_dirty = ceph_set_page_dirty, .dirty_folio = ceph_dirty_folio,
.invalidate_folio = ceph_invalidate_folio, .invalidate_folio = ceph_invalidate_folio,
.releasepage = ceph_releasepage, .releasepage = ceph_releasepage,
.direct_IO = noop_direct_IO, .direct_IO = noop_direct_IO,
......
...@@ -54,12 +54,12 @@ static inline void ceph_fscache_unpin_writeback(struct inode *inode, ...@@ -54,12 +54,12 @@ static inline void ceph_fscache_unpin_writeback(struct inode *inode,
fscache_unpin_writeback(wbc, ceph_fscache_cookie(ceph_inode(inode))); fscache_unpin_writeback(wbc, ceph_fscache_cookie(ceph_inode(inode)));
} }
static inline int ceph_fscache_set_page_dirty(struct page *page) static inline int ceph_fscache_dirty_folio(struct address_space *mapping,
struct folio *folio)
{ {
struct inode *inode = page->mapping->host; struct ceph_inode_info *ci = ceph_inode(mapping->host);
struct ceph_inode_info *ci = ceph_inode(inode);
return fscache_set_page_dirty(page, ceph_fscache_cookie(ci)); return fscache_dirty_folio(mapping, folio, ceph_fscache_cookie(ci));
} }
static inline int ceph_begin_cache_operation(struct netfs_read_request *rreq) static inline int ceph_begin_cache_operation(struct netfs_read_request *rreq)
...@@ -133,9 +133,10 @@ static inline void ceph_fscache_unpin_writeback(struct inode *inode, ...@@ -133,9 +133,10 @@ static inline void ceph_fscache_unpin_writeback(struct inode *inode,
{ {
} }
static inline int ceph_fscache_set_page_dirty(struct page *page) static inline int ceph_fscache_dirty_folio(struct address_space *mapping,
struct folio *folio)
{ {
return __set_page_dirty_nobuffers(page); return filemap_dirty_folio(mapping, folio);
} }
static inline bool ceph_is_cache_enabled(struct inode *inode) static inline bool ceph_is_cache_enabled(struct inode *inode)
......
...@@ -4939,12 +4939,13 @@ static void cifs_swap_deactivate(struct file *file) ...@@ -4939,12 +4939,13 @@ static void cifs_swap_deactivate(struct file *file)
* need to pin the cache object to write back to. * need to pin the cache object to write back to.
*/ */
#ifdef CONFIG_CIFS_FSCACHE #ifdef CONFIG_CIFS_FSCACHE
static int cifs_set_page_dirty(struct page *page) static bool cifs_dirty_folio(struct address_space *mapping, struct folio *folio)
{ {
return fscache_set_page_dirty(page, cifs_inode_cookie(page->mapping->host)); return fscache_dirty_folio(mapping, folio,
cifs_inode_cookie(mapping->host));
} }
#else #else
#define cifs_set_page_dirty __set_page_dirty_nobuffers #define cifs_dirty_folio filemap_dirty_folio
#endif #endif
const struct address_space_operations cifs_addr_ops = { const struct address_space_operations cifs_addr_ops = {
...@@ -4954,7 +4955,7 @@ const struct address_space_operations cifs_addr_ops = { ...@@ -4954,7 +4955,7 @@ const struct address_space_operations cifs_addr_ops = {
.writepages = cifs_writepages, .writepages = cifs_writepages,
.write_begin = cifs_write_begin, .write_begin = cifs_write_begin,
.write_end = cifs_write_end, .write_end = cifs_write_end,
.set_page_dirty = cifs_set_page_dirty, .dirty_folio = cifs_dirty_folio,
.releasepage = cifs_release_page, .releasepage = cifs_release_page,
.direct_IO = cifs_direct_io, .direct_IO = cifs_direct_io,
.invalidate_folio = cifs_invalidate_folio, .invalidate_folio = cifs_invalidate_folio,
...@@ -4979,7 +4980,7 @@ const struct address_space_operations cifs_addr_ops_smallbuf = { ...@@ -4979,7 +4980,7 @@ const struct address_space_operations cifs_addr_ops_smallbuf = {
.writepages = cifs_writepages, .writepages = cifs_writepages,
.write_begin = cifs_write_begin, .write_begin = cifs_write_begin,
.write_end = cifs_write_end, .write_end = cifs_write_end,
.set_page_dirty = cifs_set_page_dirty, .dirty_folio = cifs_dirty_folio,
.releasepage = cifs_release_page, .releasepage = cifs_release_page,
.invalidate_folio = cifs_invalidate_folio, .invalidate_folio = cifs_invalidate_folio,
.launder_folio = cifs_launder_folio, .launder_folio = cifs_launder_folio,
......
...@@ -159,27 +159,29 @@ int __fscache_begin_write_operation(struct netfs_cache_resources *cres, ...@@ -159,27 +159,29 @@ int __fscache_begin_write_operation(struct netfs_cache_resources *cres,
EXPORT_SYMBOL(__fscache_begin_write_operation); EXPORT_SYMBOL(__fscache_begin_write_operation);
/** /**
* fscache_set_page_dirty - Mark page dirty and pin a cache object for writeback * fscache_dirty_folio - Mark folio dirty and pin a cache object for writeback
* @page: The page being dirtied * @mapping: The mapping the folio belongs to.
* @folio: The folio being dirtied.
* @cookie: The cookie referring to the cache object * @cookie: The cookie referring to the cache object
* *
* Set the dirty flag on a page and pin an in-use cache object in memory when * Set the dirty flag on a folio and pin an in-use cache object in memory
* dirtying a page so that writeback can later write to it. This is intended * so that writeback can later write to it. This is intended
* to be called from the filesystem's ->set_page_dirty() method. * to be called from the filesystem's ->dirty_folio() method.
* *
* Returns 1 if PG_dirty was set on the page, 0 otherwise. * Return: true if the dirty flag was set on the folio, false otherwise.
*/ */
int fscache_set_page_dirty(struct page *page, struct fscache_cookie *cookie) bool fscache_dirty_folio(struct address_space *mapping, struct folio *folio,
struct fscache_cookie *cookie)
{ {
struct inode *inode = page->mapping->host; struct inode *inode = mapping->host;
bool need_use = false; bool need_use = false;
_enter(""); _enter("");
if (!__set_page_dirty_nobuffers(page)) if (!filemap_dirty_folio(mapping, folio))
return 0; return false;
if (!fscache_cookie_valid(cookie)) if (!fscache_cookie_valid(cookie))
return 1; return true;
if (!(inode->i_state & I_PINNING_FSCACHE_WB)) { if (!(inode->i_state & I_PINNING_FSCACHE_WB)) {
spin_lock(&inode->i_lock); spin_lock(&inode->i_lock);
...@@ -192,9 +194,9 @@ int fscache_set_page_dirty(struct page *page, struct fscache_cookie *cookie) ...@@ -192,9 +194,9 @@ int fscache_set_page_dirty(struct page *page, struct fscache_cookie *cookie)
if (need_use) if (need_use)
fscache_use_cookie(cookie, true); fscache_use_cookie(cookie, true);
} }
return 1; return true;
} }
EXPORT_SYMBOL(fscache_set_page_dirty); EXPORT_SYMBOL(fscache_dirty_folio);
struct fscache_write_request { struct fscache_write_request {
struct netfs_cache_resources cache_resources; struct netfs_cache_resources cache_resources;
......
...@@ -616,9 +616,11 @@ static inline void fscache_write_to_cache(struct fscache_cookie *cookie, ...@@ -616,9 +616,11 @@ static inline void fscache_write_to_cache(struct fscache_cookie *cookie,
} }
#if __fscache_available #if __fscache_available
extern int fscache_set_page_dirty(struct page *page, struct fscache_cookie *cookie); bool fscache_dirty_folio(struct address_space *mapping, struct folio *folio,
struct fscache_cookie *cookie);
#else #else
#define fscache_set_page_dirty(PAGE, COOKIE) (__set_page_dirty_nobuffers((PAGE))) #define fscache_dirty_folio(MAPPING, FOLIO, COOKIE) \
filemap_dirty_folio(MAPPING, FOLIO)
#endif #endif
/** /**
...@@ -626,7 +628,7 @@ extern int fscache_set_page_dirty(struct page *page, struct fscache_cookie *cook ...@@ -626,7 +628,7 @@ extern int fscache_set_page_dirty(struct page *page, struct fscache_cookie *cook
* @wbc: The writeback control * @wbc: The writeback control
* @cookie: The cookie referring to the cache object * @cookie: The cookie referring to the cache object
* *
* Unpin the writeback resources pinned by fscache_set_page_dirty(). This is * Unpin the writeback resources pinned by fscache_dirty_folio(). This is
* intended to be called by the netfs's ->write_inode() method. * intended to be called by the netfs's ->write_inode() method.
*/ */
static inline void fscache_unpin_writeback(struct writeback_control *wbc, static inline void fscache_unpin_writeback(struct writeback_control *wbc,
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment