fs: Convert is_dirty_writeback() to take a folio

Pass a folio instead of a page to aops->is_dirty_writeback().
Convert both implementations and the caller.
Signed-off-by: default avatarMatthew Wilcox (Oracle) <willy@infradead.org>
Reviewed-by: default avatarChristoph Hellwig <hch@lst.de>
parent a42634a6
...@@ -747,7 +747,7 @@ cache in your filesystem. The following members are defined: ...@@ -747,7 +747,7 @@ cache in your filesystem. The following members are defined:
bool (*is_partially_uptodate) (struct folio *, size_t from, bool (*is_partially_uptodate) (struct folio *, size_t from,
size_t count); size_t count);
void (*is_dirty_writeback) (struct page *, bool *, bool *); void (*is_dirty_writeback)(struct folio *, bool *, bool *);
int (*error_remove_page) (struct mapping *mapping, struct page *page); int (*error_remove_page) (struct mapping *mapping, struct page *page);
int (*swap_activate)(struct file *); int (*swap_activate)(struct file *);
int (*swap_deactivate)(struct file *); int (*swap_deactivate)(struct file *);
...@@ -932,14 +932,14 @@ cache in your filesystem. The following members are defined: ...@@ -932,14 +932,14 @@ cache in your filesystem. The following members are defined:
without needing I/O to bring the whole page up to date. without needing I/O to bring the whole page up to date.
``is_dirty_writeback`` ``is_dirty_writeback``
Called by the VM when attempting to reclaim a page. The VM uses Called by the VM when attempting to reclaim a folio. The VM uses
dirty and writeback information to determine if it needs to dirty and writeback information to determine if it needs to
stall to allow flushers a chance to complete some IO. stall to allow flushers a chance to complete some IO.
Ordinarily it can use PageDirty and PageWriteback but some Ordinarily it can use folio_test_dirty and folio_test_writeback but
filesystems have more complex state (unstable pages in NFS some filesystems have more complex state (unstable folios in NFS
prevent reclaim) or do not set those flags due to locking prevent reclaim) or do not set those flags due to locking
problems. This callback allows a filesystem to indicate to the problems. This callback allows a filesystem to indicate to the
VM if a page should be treated as dirty or writeback for the VM if a folio should be treated as dirty or writeback for the
purposes of stalling. purposes of stalling.
``error_remove_page`` ``error_remove_page``
......
...@@ -79,26 +79,26 @@ void unlock_buffer(struct buffer_head *bh) ...@@ -79,26 +79,26 @@ void unlock_buffer(struct buffer_head *bh)
EXPORT_SYMBOL(unlock_buffer); EXPORT_SYMBOL(unlock_buffer);
/* /*
* Returns if the page has dirty or writeback buffers. If all the buffers * Returns if the folio has dirty or writeback buffers. If all the buffers
* are unlocked and clean then the PageDirty information is stale. If * are unlocked and clean then the folio_test_dirty information is stale. If
* any of the pages are locked, it is assumed they are locked for IO. * any of the buffers are locked, it is assumed they are locked for IO.
*/ */
void buffer_check_dirty_writeback(struct page *page, void buffer_check_dirty_writeback(struct folio *folio,
bool *dirty, bool *writeback) bool *dirty, bool *writeback)
{ {
struct buffer_head *head, *bh; struct buffer_head *head, *bh;
*dirty = false; *dirty = false;
*writeback = false; *writeback = false;
BUG_ON(!PageLocked(page)); BUG_ON(!folio_test_locked(folio));
if (!page_has_buffers(page)) head = folio_buffers(folio);
if (!head)
return; return;
if (PageWriteback(page)) if (folio_test_writeback(folio))
*writeback = true; *writeback = true;
head = page_buffers(page);
bh = head; bh = head;
do { do {
if (buffer_locked(bh)) if (buffer_locked(bh))
......
...@@ -430,19 +430,16 @@ static int nfs_release_page(struct page *page, gfp_t gfp) ...@@ -430,19 +430,16 @@ static int nfs_release_page(struct page *page, gfp_t gfp)
return nfs_fscache_release_page(page, gfp); return nfs_fscache_release_page(page, gfp);
} }
static void nfs_check_dirty_writeback(struct page *page, static void nfs_check_dirty_writeback(struct folio *folio,
bool *dirty, bool *writeback) bool *dirty, bool *writeback)
{ {
struct nfs_inode *nfsi; struct nfs_inode *nfsi;
struct address_space *mapping = page_file_mapping(page); struct address_space *mapping = folio->mapping;
if (!mapping || PageSwapCache(page))
return;
/* /*
* Check if an unstable page is currently being committed and * Check if an unstable folio is currently being committed and
* if so, have the VM treat it as if the page is under writeback * if so, have the VM treat it as if the folio is under writeback
* so it will not block due to pages that will shortly be freeable. * so it will not block due to folios that will shortly be freeable.
*/ */
nfsi = NFS_I(mapping->host); nfsi = NFS_I(mapping->host);
if (atomic_read(&nfsi->commit_info.rpcs_out)) { if (atomic_read(&nfsi->commit_info.rpcs_out)) {
...@@ -451,11 +448,11 @@ static void nfs_check_dirty_writeback(struct page *page, ...@@ -451,11 +448,11 @@ static void nfs_check_dirty_writeback(struct page *page,
} }
/* /*
* If PagePrivate() is set, then the page is not freeable and as the * If the private flag is set, then the folio is not freeable
* inode is not being committed, it's not going to be cleaned in the * and as the inode is not being committed, it's not going to
* near future so treat it as dirty * be cleaned in the near future so treat it as dirty
*/ */
if (PagePrivate(page)) if (folio_test_private(folio))
*dirty = true; *dirty = true;
} }
......
...@@ -146,7 +146,7 @@ BUFFER_FNS(Defer_Completion, defer_completion) ...@@ -146,7 +146,7 @@ BUFFER_FNS(Defer_Completion, defer_completion)
#define page_has_buffers(page) PagePrivate(page) #define page_has_buffers(page) PagePrivate(page)
#define folio_buffers(folio) folio_get_private(folio) #define folio_buffers(folio) folio_get_private(folio)
void buffer_check_dirty_writeback(struct page *page, void buffer_check_dirty_writeback(struct folio *folio,
bool *dirty, bool *writeback); bool *dirty, bool *writeback);
/* /*
......
...@@ -369,7 +369,7 @@ struct address_space_operations { ...@@ -369,7 +369,7 @@ struct address_space_operations {
int (*launder_folio)(struct folio *); int (*launder_folio)(struct folio *);
bool (*is_partially_uptodate) (struct folio *, size_t from, bool (*is_partially_uptodate) (struct folio *, size_t from,
size_t count); size_t count);
void (*is_dirty_writeback) (struct page *, bool *, bool *); void (*is_dirty_writeback) (struct folio *, bool *dirty, bool *wb);
int (*error_remove_page)(struct address_space *, struct page *); int (*error_remove_page)(struct address_space *, struct page *);
/* swapfile support */ /* swapfile support */
......
...@@ -1451,7 +1451,7 @@ static void folio_check_dirty_writeback(struct folio *folio, ...@@ -1451,7 +1451,7 @@ static void folio_check_dirty_writeback(struct folio *folio,
mapping = folio_mapping(folio); mapping = folio_mapping(folio);
if (mapping && mapping->a_ops->is_dirty_writeback) if (mapping && mapping->a_ops->is_dirty_writeback)
mapping->a_ops->is_dirty_writeback(&folio->page, dirty, writeback); mapping->a_ops->is_dirty_writeback(folio, dirty, writeback);
} }
static struct page *alloc_demote_page(struct page *page, unsigned long node) static struct page *alloc_demote_page(struct page *page, unsigned long node)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment