fs: Convert is_dirty_writeback() to take a folio

Pass a folio instead of a page to aops->is_dirty_writeback().
Convert both implementations and the caller.
Signed-off-by: default avatarMatthew Wilcox (Oracle) <willy@infradead.org>
Reviewed-by: default avatarChristoph Hellwig <hch@lst.de>
parent a42634a6
......@@ -747,7 +747,7 @@ cache in your filesystem. The following members are defined:
bool (*is_partially_uptodate) (struct folio *, size_t from,
size_t count);
void (*is_dirty_writeback) (struct page *, bool *, bool *);
void (*is_dirty_writeback)(struct folio *, bool *, bool *);
int (*error_remove_page) (struct mapping *mapping, struct page *page);
int (*swap_activate)(struct file *);
int (*swap_deactivate)(struct file *);
......@@ -932,14 +932,14 @@ cache in your filesystem. The following members are defined:
without needing I/O to bring the whole page up to date.
``is_dirty_writeback``
Called by the VM when attempting to reclaim a page. The VM uses
Called by the VM when attempting to reclaim a folio. The VM uses
dirty and writeback information to determine if it needs to
stall to allow flushers a chance to complete some IO.
Ordinarily it can use PageDirty and PageWriteback but some
filesystems have more complex state (unstable pages in NFS
Ordinarily it can use folio_test_dirty and folio_test_writeback but
some filesystems have more complex state (unstable folios in NFS
prevent reclaim) or do not set those flags due to locking
problems. This callback allows a filesystem to indicate to the
VM if a page should be treated as dirty or writeback for the
VM if a folio should be treated as dirty or writeback for the
purposes of stalling.
``error_remove_page``
......
......@@ -79,26 +79,26 @@ void unlock_buffer(struct buffer_head *bh)
EXPORT_SYMBOL(unlock_buffer);
/*
* Returns if the page has dirty or writeback buffers. If all the buffers
* are unlocked and clean then the PageDirty information is stale. If
* any of the pages are locked, it is assumed they are locked for IO.
* Returns if the folio has dirty or writeback buffers. If all the buffers
* are unlocked and clean then the folio_test_dirty information is stale. If
* any of the buffers are locked, it is assumed they are locked for IO.
*/
void buffer_check_dirty_writeback(struct page *page,
void buffer_check_dirty_writeback(struct folio *folio,
bool *dirty, bool *writeback)
{
struct buffer_head *head, *bh;
*dirty = false;
*writeback = false;
BUG_ON(!PageLocked(page));
BUG_ON(!folio_test_locked(folio));
if (!page_has_buffers(page))
head = folio_buffers(folio);
if (!head)
return;
if (PageWriteback(page))
if (folio_test_writeback(folio))
*writeback = true;
head = page_buffers(page);
bh = head;
do {
if (buffer_locked(bh))
......
......@@ -430,19 +430,16 @@ static int nfs_release_page(struct page *page, gfp_t gfp)
return nfs_fscache_release_page(page, gfp);
}
static void nfs_check_dirty_writeback(struct page *page,
static void nfs_check_dirty_writeback(struct folio *folio,
bool *dirty, bool *writeback)
{
struct nfs_inode *nfsi;
struct address_space *mapping = page_file_mapping(page);
if (!mapping || PageSwapCache(page))
return;
struct address_space *mapping = folio->mapping;
/*
* Check if an unstable page is currently being committed and
* if so, have the VM treat it as if the page is under writeback
* so it will not block due to pages that will shortly be freeable.
* Check if an unstable folio is currently being committed and
* if so, have the VM treat it as if the folio is under writeback
* so it will not block due to folios that will shortly be freeable.
*/
nfsi = NFS_I(mapping->host);
if (atomic_read(&nfsi->commit_info.rpcs_out)) {
......@@ -451,11 +448,11 @@ static void nfs_check_dirty_writeback(struct page *page,
}
/*
* If PagePrivate() is set, then the page is not freeable and as the
* inode is not being committed, it's not going to be cleaned in the
* near future so treat it as dirty
* If the private flag is set, then the folio is not freeable
* and as the inode is not being committed, it's not going to
* be cleaned in the near future so treat it as dirty
*/
if (PagePrivate(page))
if (folio_test_private(folio))
*dirty = true;
}
......
......@@ -146,7 +146,7 @@ BUFFER_FNS(Defer_Completion, defer_completion)
#define page_has_buffers(page) PagePrivate(page)
#define folio_buffers(folio) folio_get_private(folio)
void buffer_check_dirty_writeback(struct page *page,
void buffer_check_dirty_writeback(struct folio *folio,
bool *dirty, bool *writeback);
/*
......
......@@ -369,7 +369,7 @@ struct address_space_operations {
int (*launder_folio)(struct folio *);
bool (*is_partially_uptodate) (struct folio *, size_t from,
size_t count);
void (*is_dirty_writeback) (struct page *, bool *, bool *);
void (*is_dirty_writeback) (struct folio *, bool *dirty, bool *wb);
int (*error_remove_page)(struct address_space *, struct page *);
/* swapfile support */
......
......@@ -1451,7 +1451,7 @@ static void folio_check_dirty_writeback(struct folio *folio,
mapping = folio_mapping(folio);
if (mapping && mapping->a_ops->is_dirty_writeback)
mapping->a_ops->is_dirty_writeback(&folio->page, dirty, writeback);
mapping->a_ops->is_dirty_writeback(folio, dirty, writeback);
}
static struct page *alloc_demote_page(struct page *page, unsigned long node)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment