Commit 4d934a5e authored by Matthew Wilcox's avatar Matthew Wilcox Committed by Theodore Ts'o

ext4: Convert ext4_write_begin() to use a folio

Remove a lot of calls to compound_head().
Signed-off-by: default avatarMatthew Wilcox (Oracle) <willy@infradead.org>
Link: https://lore.kernel.org/r/20230324180129.1220691-17-willy@infradead.orgSigned-off-by: default avatarTheodore Ts'o <tytso@mit.edu>
parent 6b90d413
...@@ -1139,7 +1139,7 @@ static int ext4_write_begin(struct file *file, struct address_space *mapping, ...@@ -1139,7 +1139,7 @@ static int ext4_write_begin(struct file *file, struct address_space *mapping,
int ret, needed_blocks; int ret, needed_blocks;
handle_t *handle; handle_t *handle;
int retries = 0; int retries = 0;
struct page *page; struct folio *folio;
pgoff_t index; pgoff_t index;
unsigned from, to; unsigned from, to;
...@@ -1166,68 +1166,69 @@ static int ext4_write_begin(struct file *file, struct address_space *mapping, ...@@ -1166,68 +1166,69 @@ static int ext4_write_begin(struct file *file, struct address_space *mapping,
} }
/* /*
* grab_cache_page_write_begin() can take a long time if the * __filemap_get_folio() can take a long time if the
* system is thrashing due to memory pressure, or if the page * system is thrashing due to memory pressure, or if the folio
* is being written back. So grab it first before we start * is being written back. So grab it first before we start
* the transaction handle. This also allows us to allocate * the transaction handle. This also allows us to allocate
* the page (if needed) without using GFP_NOFS. * the folio (if needed) without using GFP_NOFS.
*/ */
retry_grab: retry_grab:
page = grab_cache_page_write_begin(mapping, index); folio = __filemap_get_folio(mapping, index, FGP_WRITEBEGIN,
if (!page) mapping_gfp_mask(mapping));
if (!folio)
return -ENOMEM; return -ENOMEM;
/* /*
* The same as page allocation, we prealloc buffer heads before * The same as page allocation, we prealloc buffer heads before
* starting the handle. * starting the handle.
*/ */
if (!page_has_buffers(page)) if (!folio_buffers(folio))
create_empty_buffers(page, inode->i_sb->s_blocksize, 0); create_empty_buffers(&folio->page, inode->i_sb->s_blocksize, 0);
unlock_page(page); folio_unlock(folio);
retry_journal: retry_journal:
handle = ext4_journal_start(inode, EXT4_HT_WRITE_PAGE, needed_blocks); handle = ext4_journal_start(inode, EXT4_HT_WRITE_PAGE, needed_blocks);
if (IS_ERR(handle)) { if (IS_ERR(handle)) {
put_page(page); folio_put(folio);
return PTR_ERR(handle); return PTR_ERR(handle);
} }
lock_page(page); folio_lock(folio);
if (page->mapping != mapping) { if (folio->mapping != mapping) {
/* The page got truncated from under us */ /* The folio got truncated from under us */
unlock_page(page); folio_unlock(folio);
put_page(page); folio_put(folio);
ext4_journal_stop(handle); ext4_journal_stop(handle);
goto retry_grab; goto retry_grab;
} }
/* In case writeback began while the page was unlocked */ /* In case writeback began while the folio was unlocked */
wait_for_stable_page(page); folio_wait_stable(folio);
#ifdef CONFIG_FS_ENCRYPTION #ifdef CONFIG_FS_ENCRYPTION
if (ext4_should_dioread_nolock(inode)) if (ext4_should_dioread_nolock(inode))
ret = ext4_block_write_begin(page, pos, len, ret = ext4_block_write_begin(&folio->page, pos, len,
ext4_get_block_unwritten); ext4_get_block_unwritten);
else else
ret = ext4_block_write_begin(page, pos, len, ret = ext4_block_write_begin(&folio->page, pos, len,
ext4_get_block); ext4_get_block);
#else #else
if (ext4_should_dioread_nolock(inode)) if (ext4_should_dioread_nolock(inode))
ret = __block_write_begin(page, pos, len, ret = __block_write_begin(&folio->page, pos, len,
ext4_get_block_unwritten); ext4_get_block_unwritten);
else else
ret = __block_write_begin(page, pos, len, ext4_get_block); ret = __block_write_begin(&folio->page, pos, len, ext4_get_block);
#endif #endif
if (!ret && ext4_should_journal_data(inode)) { if (!ret && ext4_should_journal_data(inode)) {
ret = ext4_walk_page_buffers(handle, inode, ret = ext4_walk_page_buffers(handle, inode,
page_buffers(page), from, to, NULL, folio_buffers(folio), from, to,
do_journal_get_write_access); NULL, do_journal_get_write_access);
} }
if (ret) { if (ret) {
bool extended = (pos + len > inode->i_size) && bool extended = (pos + len > inode->i_size) &&
!ext4_verity_in_progress(inode); !ext4_verity_in_progress(inode);
unlock_page(page); folio_unlock(folio);
/* /*
* __block_write_begin may have instantiated a few blocks * __block_write_begin may have instantiated a few blocks
* outside i_size. Trim these off again. Don't need * outside i_size. Trim these off again. Don't need
...@@ -1255,10 +1256,10 @@ static int ext4_write_begin(struct file *file, struct address_space *mapping, ...@@ -1255,10 +1256,10 @@ static int ext4_write_begin(struct file *file, struct address_space *mapping,
if (ret == -ENOSPC && if (ret == -ENOSPC &&
ext4_should_retry_alloc(inode->i_sb, &retries)) ext4_should_retry_alloc(inode->i_sb, &retries))
goto retry_journal; goto retry_journal;
put_page(page); folio_put(folio);
return ret; return ret;
} }
*pagep = page; *pagep = &folio->page;
return ret; return ret;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment