ext4: Convert invalidatepage to invalidate_folio

Extensive changes, but fairly mechanical.
Signed-off-by: default avatarMatthew Wilcox (Oracle) <willy@infradead.org>
Tested-by: default avatarDamien Le Moal <damien.lemoal@opensource.wdc.com>
Acked-by: default avatarDamien Le Moal <damien.lemoal@opensource.wdc.com>
Tested-by: Mike Marshall <hubcap@omnibond.com> # orangefs
Tested-by: David Howells <dhowells@redhat.com> # afs
parent 39653e69
...@@ -184,7 +184,7 @@ void ext4_evict_inode(struct inode *inode) ...@@ -184,7 +184,7 @@ void ext4_evict_inode(struct inode *inode)
* journal. So although mm thinks everything is clean and * journal. So although mm thinks everything is clean and
* ready for reaping the inode might still have some pages to * ready for reaping the inode might still have some pages to
* write in the running transaction or waiting to be * write in the running transaction or waiting to be
* checkpointed. Thus calling jbd2_journal_invalidatepage() * checkpointed. Thus calling jbd2_journal_invalidate_folio()
* (via truncate_inode_pages()) to discard these buffers can * (via truncate_inode_pages()) to discard these buffers can
* cause data loss. Also even if we did not discard these * cause data loss. Also even if we did not discard these
* buffers, we would have no way to find them after the inode * buffers, we would have no way to find them after the inode
...@@ -3186,7 +3186,7 @@ static void ext4_readahead(struct readahead_control *rac) ...@@ -3186,7 +3186,7 @@ static void ext4_readahead(struct readahead_control *rac)
static void ext4_invalidate_folio(struct folio *folio, size_t offset, static void ext4_invalidate_folio(struct folio *folio, size_t offset,
size_t length) size_t length)
{ {
trace_ext4_invalidatepage(&folio->page, offset, length); trace_ext4_invalidate_folio(folio, offset, length);
/* No journalling happens on data buffers when this function is used */ /* No journalling happens on data buffers when this function is used */
WARN_ON(folio_buffers(folio) && buffer_jbd(folio_buffers(folio))); WARN_ON(folio_buffers(folio) && buffer_jbd(folio_buffers(folio)));
...@@ -3194,29 +3194,28 @@ static void ext4_invalidate_folio(struct folio *folio, size_t offset, ...@@ -3194,29 +3194,28 @@ static void ext4_invalidate_folio(struct folio *folio, size_t offset,
block_invalidate_folio(folio, offset, length); block_invalidate_folio(folio, offset, length);
} }
static int __ext4_journalled_invalidatepage(struct page *page, static int __ext4_journalled_invalidate_folio(struct folio *folio,
unsigned int offset, size_t offset, size_t length)
unsigned int length)
{ {
journal_t *journal = EXT4_JOURNAL(page->mapping->host); journal_t *journal = EXT4_JOURNAL(folio->mapping->host);
trace_ext4_journalled_invalidatepage(page, offset, length); trace_ext4_journalled_invalidate_folio(folio, offset, length);
/* /*
* If it's a full truncate we just forget about the pending dirtying * If it's a full truncate we just forget about the pending dirtying
*/ */
if (offset == 0 && length == PAGE_SIZE) if (offset == 0 && length == folio_size(folio))
ClearPageChecked(page); folio_clear_checked(folio);
return jbd2_journal_invalidatepage(journal, page, offset, length); return jbd2_journal_invalidate_folio(journal, folio, offset, length);
} }
/* Wrapper for aops... */ /* Wrapper for aops... */
static void ext4_journalled_invalidatepage(struct page *page, static void ext4_journalled_invalidate_folio(struct folio *folio,
unsigned int offset, size_t offset,
unsigned int length) size_t length)
{ {
WARN_ON(__ext4_journalled_invalidatepage(page, offset, length) < 0); WARN_ON(__ext4_journalled_invalidate_folio(folio, offset, length) < 0);
} }
static int ext4_releasepage(struct page *page, gfp_t wait) static int ext4_releasepage(struct page *page, gfp_t wait)
...@@ -3601,7 +3600,7 @@ static const struct address_space_operations ext4_journalled_aops = { ...@@ -3601,7 +3600,7 @@ static const struct address_space_operations ext4_journalled_aops = {
.write_end = ext4_journalled_write_end, .write_end = ext4_journalled_write_end,
.set_page_dirty = ext4_journalled_set_page_dirty, .set_page_dirty = ext4_journalled_set_page_dirty,
.bmap = ext4_bmap, .bmap = ext4_bmap,
.invalidatepage = ext4_journalled_invalidatepage, .invalidate_folio = ext4_journalled_invalidate_folio,
.releasepage = ext4_releasepage, .releasepage = ext4_releasepage,
.direct_IO = noop_direct_IO, .direct_IO = noop_direct_IO,
.is_partially_uptodate = block_is_partially_uptodate, .is_partially_uptodate = block_is_partially_uptodate,
...@@ -5204,13 +5203,12 @@ int ext4_write_inode(struct inode *inode, struct writeback_control *wbc) ...@@ -5204,13 +5203,12 @@ int ext4_write_inode(struct inode *inode, struct writeback_control *wbc)
} }
/* /*
* In data=journal mode ext4_journalled_invalidatepage() may fail to invalidate * In data=journal mode ext4_journalled_invalidate_folio() may fail to invalidate
* buffers that are attached to a page stradding i_size and are undergoing * buffers that are attached to a folio straddling i_size and are undergoing
* commit. In that case we have to wait for commit to finish and try again. * commit. In that case we have to wait for commit to finish and try again.
*/ */
static void ext4_wait_for_tail_page_commit(struct inode *inode) static void ext4_wait_for_tail_page_commit(struct inode *inode)
{ {
struct page *page;
unsigned offset; unsigned offset;
journal_t *journal = EXT4_SB(inode->i_sb)->s_journal; journal_t *journal = EXT4_SB(inode->i_sb)->s_journal;
tid_t commit_tid = 0; tid_t commit_tid = 0;
...@@ -5218,25 +5216,25 @@ static void ext4_wait_for_tail_page_commit(struct inode *inode) ...@@ -5218,25 +5216,25 @@ static void ext4_wait_for_tail_page_commit(struct inode *inode)
offset = inode->i_size & (PAGE_SIZE - 1); offset = inode->i_size & (PAGE_SIZE - 1);
/* /*
* If the page is fully truncated, we don't need to wait for any commit * If the folio is fully truncated, we don't need to wait for any commit
* (and we even should not as __ext4_journalled_invalidatepage() may * (and we even should not as __ext4_journalled_invalidate_folio() may
* strip all buffers from the page but keep the page dirty which can then * strip all buffers from the folio but keep the folio dirty which can then
* confuse e.g. concurrent ext4_writepage() seeing dirty page without * confuse e.g. concurrent ext4_writepage() seeing dirty folio without
* buffers). Also we don't need to wait for any commit if all buffers in * buffers). Also we don't need to wait for any commit if all buffers in
* the page remain valid. This is most beneficial for the common case of * the folio remain valid. This is most beneficial for the common case of
* blocksize == PAGESIZE. * blocksize == PAGESIZE.
*/ */
if (!offset || offset > (PAGE_SIZE - i_blocksize(inode))) if (!offset || offset > (PAGE_SIZE - i_blocksize(inode)))
return; return;
while (1) { while (1) {
page = find_lock_page(inode->i_mapping, struct folio *folio = filemap_lock_folio(inode->i_mapping,
inode->i_size >> PAGE_SHIFT); inode->i_size >> PAGE_SHIFT);
if (!page) if (!folio)
return; return;
ret = __ext4_journalled_invalidatepage(page, offset, ret = __ext4_journalled_invalidate_folio(folio, offset,
PAGE_SIZE - offset); folio_size(folio) - offset);
unlock_page(page); folio_unlock(folio);
put_page(page); folio_put(folio);
if (ret != -EBUSY) if (ret != -EBUSY)
return; return;
commit_tid = 0; commit_tid = 0;
......
...@@ -86,7 +86,7 @@ EXPORT_SYMBOL(jbd2_journal_start_commit); ...@@ -86,7 +86,7 @@ EXPORT_SYMBOL(jbd2_journal_start_commit);
EXPORT_SYMBOL(jbd2_journal_force_commit_nested); EXPORT_SYMBOL(jbd2_journal_force_commit_nested);
EXPORT_SYMBOL(jbd2_journal_wipe); EXPORT_SYMBOL(jbd2_journal_wipe);
EXPORT_SYMBOL(jbd2_journal_blocks_per_page); EXPORT_SYMBOL(jbd2_journal_blocks_per_page);
EXPORT_SYMBOL(jbd2_journal_invalidatepage); EXPORT_SYMBOL(jbd2_journal_invalidate_folio);
EXPORT_SYMBOL(jbd2_journal_try_to_free_buffers); EXPORT_SYMBOL(jbd2_journal_try_to_free_buffers);
EXPORT_SYMBOL(jbd2_journal_force_commit); EXPORT_SYMBOL(jbd2_journal_force_commit);
EXPORT_SYMBOL(jbd2_journal_inode_ranged_write); EXPORT_SYMBOL(jbd2_journal_inode_ranged_write);
......
...@@ -2219,14 +2219,14 @@ static int __dispose_buffer(struct journal_head *jh, transaction_t *transaction) ...@@ -2219,14 +2219,14 @@ static int __dispose_buffer(struct journal_head *jh, transaction_t *transaction)
} }
/* /*
* jbd2_journal_invalidatepage * jbd2_journal_invalidate_folio
* *
* This code is tricky. It has a number of cases to deal with. * This code is tricky. It has a number of cases to deal with.
* *
* There are two invariants which this code relies on: * There are two invariants which this code relies on:
* *
* i_size must be updated on disk before we start calling invalidatepage on the * i_size must be updated on disk before we start calling invalidate_folio
* data. * on the data.
* *
* This is done in ext3 by defining an ext3_setattr method which * This is done in ext3 by defining an ext3_setattr method which
* updates i_size before truncate gets going. By maintaining this * updates i_size before truncate gets going. By maintaining this
...@@ -2428,9 +2428,9 @@ static int journal_unmap_buffer(journal_t *journal, struct buffer_head *bh, ...@@ -2428,9 +2428,9 @@ static int journal_unmap_buffer(journal_t *journal, struct buffer_head *bh,
} }
/** /**
* jbd2_journal_invalidatepage() * jbd2_journal_invalidate_folio()
* @journal: journal to use for flush... * @journal: journal to use for flush...
* @page: page to flush * @folio: folio to flush
* @offset: start of the range to invalidate * @offset: start of the range to invalidate
* @length: length of the range to invalidate * @length: length of the range to invalidate
* *
...@@ -2439,30 +2439,29 @@ static int journal_unmap_buffer(journal_t *journal, struct buffer_head *bh, ...@@ -2439,30 +2439,29 @@ static int journal_unmap_buffer(journal_t *journal, struct buffer_head *bh,
* the page is straddling i_size. Caller then has to wait for current commit * the page is straddling i_size. Caller then has to wait for current commit
* and try again. * and try again.
*/ */
int jbd2_journal_invalidatepage(journal_t *journal, int jbd2_journal_invalidate_folio(journal_t *journal, struct folio *folio,
struct page *page, size_t offset, size_t length)
unsigned int offset,
unsigned int length)
{ {
struct buffer_head *head, *bh, *next; struct buffer_head *head, *bh, *next;
unsigned int stop = offset + length; unsigned int stop = offset + length;
unsigned int curr_off = 0; unsigned int curr_off = 0;
int partial_page = (offset || length < PAGE_SIZE); int partial_page = (offset || length < folio_size(folio));
int may_free = 1; int may_free = 1;
int ret = 0; int ret = 0;
if (!PageLocked(page)) if (!folio_test_locked(folio))
BUG(); BUG();
if (!page_has_buffers(page)) head = folio_buffers(folio);
if (!head)
return 0; return 0;
BUG_ON(stop > PAGE_SIZE || stop < length); BUG_ON(stop > folio_size(folio) || stop < length);
/* We will potentially be playing with lists other than just the /* We will potentially be playing with lists other than just the
* data lists (especially for journaled data mode), so be * data lists (especially for journaled data mode), so be
* cautious in our locking. */ * cautious in our locking. */
head = bh = page_buffers(page); bh = head;
do { do {
unsigned int next_off = curr_off + bh->b_size; unsigned int next_off = curr_off + bh->b_size;
next = bh->b_this_page; next = bh->b_this_page;
...@@ -2485,8 +2484,8 @@ int jbd2_journal_invalidatepage(journal_t *journal, ...@@ -2485,8 +2484,8 @@ int jbd2_journal_invalidatepage(journal_t *journal,
} while (bh != head); } while (bh != head);
if (!partial_page) { if (!partial_page) {
if (may_free && try_to_free_buffers(page)) if (may_free && try_to_free_buffers(&folio->page))
J_ASSERT(!page_has_buffers(page)); J_ASSERT(!folio_buffers(folio));
} }
return 0; return 0;
} }
......
...@@ -1530,8 +1530,8 @@ void jbd2_journal_set_triggers(struct buffer_head *, ...@@ -1530,8 +1530,8 @@ void jbd2_journal_set_triggers(struct buffer_head *,
struct jbd2_buffer_trigger_type *type); struct jbd2_buffer_trigger_type *type);
extern int jbd2_journal_dirty_metadata (handle_t *, struct buffer_head *); extern int jbd2_journal_dirty_metadata (handle_t *, struct buffer_head *);
extern int jbd2_journal_forget (handle_t *, struct buffer_head *); extern int jbd2_journal_forget (handle_t *, struct buffer_head *);
extern int jbd2_journal_invalidatepage(journal_t *, int jbd2_journal_invalidate_folio(journal_t *, struct folio *,
struct page *, unsigned int, unsigned int); size_t offset, size_t length);
extern int jbd2_journal_try_to_free_buffers(journal_t *journal, struct page *page); extern int jbd2_journal_try_to_free_buffers(journal_t *journal, struct page *page);
extern int jbd2_journal_stop(handle_t *); extern int jbd2_journal_stop(handle_t *);
extern int jbd2_journal_flush(journal_t *journal, unsigned int flags); extern int jbd2_journal_flush(journal_t *journal, unsigned int flags);
......
...@@ -422,6 +422,24 @@ static inline struct folio *filemap_get_folio(struct address_space *mapping, ...@@ -422,6 +422,24 @@ static inline struct folio *filemap_get_folio(struct address_space *mapping,
return __filemap_get_folio(mapping, index, 0, 0); return __filemap_get_folio(mapping, index, 0, 0);
} }
/**
* filemap_lock_folio - Find and lock a folio.
* @mapping: The address_space to search.
* @index: The page index.
*
* Looks up the page cache entry at @mapping & @index. If a folio is
* present, it is returned locked with an increased refcount.
*
* Context: May sleep.
* Return: A folio or %NULL if there is no folio in the cache for this
* index. Will not return a shadow, swap or DAX entry.
*/
static inline struct folio *filemap_lock_folio(struct address_space *mapping,
pgoff_t index)
{
return __filemap_get_folio(mapping, index, FGP_LOCK, 0);
}
/** /**
* find_get_page - find and get a page reference * find_get_page - find and get a page reference
* @mapping: the address_space to search * @mapping: the address_space to search
......
...@@ -597,44 +597,44 @@ DEFINE_EVENT(ext4__page_op, ext4_releasepage, ...@@ -597,44 +597,44 @@ DEFINE_EVENT(ext4__page_op, ext4_releasepage,
TP_ARGS(page) TP_ARGS(page)
); );
DECLARE_EVENT_CLASS(ext4_invalidatepage_op, DECLARE_EVENT_CLASS(ext4_invalidate_folio_op,
TP_PROTO(struct page *page, unsigned int offset, unsigned int length), TP_PROTO(struct folio *folio, size_t offset, size_t length),
TP_ARGS(page, offset, length), TP_ARGS(folio, offset, length),
TP_STRUCT__entry( TP_STRUCT__entry(
__field( dev_t, dev ) __field( dev_t, dev )
__field( ino_t, ino ) __field( ino_t, ino )
__field( pgoff_t, index ) __field( pgoff_t, index )
__field( unsigned int, offset ) __field( size_t, offset )
__field( unsigned int, length ) __field( size_t, length )
), ),
TP_fast_assign( TP_fast_assign(
__entry->dev = page->mapping->host->i_sb->s_dev; __entry->dev = folio->mapping->host->i_sb->s_dev;
__entry->ino = page->mapping->host->i_ino; __entry->ino = folio->mapping->host->i_ino;
__entry->index = page->index; __entry->index = folio->index;
__entry->offset = offset; __entry->offset = offset;
__entry->length = length; __entry->length = length;
), ),
TP_printk("dev %d,%d ino %lu page_index %lu offset %u length %u", TP_printk("dev %d,%d ino %lu folio_index %lu offset %zu length %zu",
MAJOR(__entry->dev), MINOR(__entry->dev), MAJOR(__entry->dev), MINOR(__entry->dev),
(unsigned long) __entry->ino, (unsigned long) __entry->ino,
(unsigned long) __entry->index, (unsigned long) __entry->index,
__entry->offset, __entry->length) __entry->offset, __entry->length)
); );
DEFINE_EVENT(ext4_invalidatepage_op, ext4_invalidatepage, DEFINE_EVENT(ext4_invalidate_folio_op, ext4_invalidate_folio,
TP_PROTO(struct page *page, unsigned int offset, unsigned int length), TP_PROTO(struct folio *folio, size_t offset, size_t length),
TP_ARGS(page, offset, length) TP_ARGS(folio, offset, length)
); );
DEFINE_EVENT(ext4_invalidatepage_op, ext4_journalled_invalidatepage, DEFINE_EVENT(ext4_invalidate_folio_op, ext4_journalled_invalidate_folio,
TP_PROTO(struct page *page, unsigned int offset, unsigned int length), TP_PROTO(struct folio *folio, size_t offset, size_t length),
TP_ARGS(page, offset, length) TP_ARGS(folio, offset, length)
); );
TRACE_EVENT(ext4_discard_blocks, TRACE_EVENT(ext4_discard_blocks,
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment