Commit cd57b771 authored by Matthew Wilcox's avatar Matthew Wilcox Committed by Theodore Ts'o

ext4: Convert ext4_bio_write_page() to use a folio

Remove several calls to compound_head() and the last caller of
set_page_writeback_keepwrite(), so remove the wrapper too.

Also export bio_add_folio() as this is the first caller from a module.
Signed-off-by: default avatarMatthew Wilcox (Oracle) <willy@infradead.org>
Reviewed-by: default avatarRitesh Harjani (IBM) <ritesh.list@gmail.com>
Reviewed-by: default avatarTheodore Ts'o <tytso@mit.edu>
Link: https://lore.kernel.org/r/20230324180129.1220691-4-willy@infradead.orgSigned-off-by: default avatarTheodore Ts'o <tytso@mit.edu>
parent c76e14dc
...@@ -1159,6 +1159,7 @@ bool bio_add_folio(struct bio *bio, struct folio *folio, size_t len, ...@@ -1159,6 +1159,7 @@ bool bio_add_folio(struct bio *bio, struct folio *folio, size_t len,
return false; return false;
return bio_add_page(bio, &folio->page, len, off) > 0; return bio_add_page(bio, &folio->page, len, off) > 0;
} }
EXPORT_SYMBOL(bio_add_folio);
void __bio_release_pages(struct bio *bio, bool mark_dirty) void __bio_release_pages(struct bio *bio, bool mark_dirty)
{ {
......
...@@ -409,12 +409,10 @@ static void io_submit_init_bio(struct ext4_io_submit *io, ...@@ -409,12 +409,10 @@ static void io_submit_init_bio(struct ext4_io_submit *io,
static void io_submit_add_bh(struct ext4_io_submit *io, static void io_submit_add_bh(struct ext4_io_submit *io,
struct inode *inode, struct inode *inode,
struct page *pagecache_page, struct folio *folio,
struct page *bounce_page, struct folio *io_folio,
struct buffer_head *bh) struct buffer_head *bh)
{ {
int ret;
if (io->io_bio && (bh->b_blocknr != io->io_next_block || if (io->io_bio && (bh->b_blocknr != io->io_next_block ||
!fscrypt_mergeable_bio_bh(io->io_bio, bh))) { !fscrypt_mergeable_bio_bh(io->io_bio, bh))) {
submit_and_retry: submit_and_retry:
...@@ -422,11 +420,9 @@ static void io_submit_add_bh(struct ext4_io_submit *io, ...@@ -422,11 +420,9 @@ static void io_submit_add_bh(struct ext4_io_submit *io,
} }
if (io->io_bio == NULL) if (io->io_bio == NULL)
io_submit_init_bio(io, bh); io_submit_init_bio(io, bh);
ret = bio_add_page(io->io_bio, bounce_page ?: pagecache_page, if (!bio_add_folio(io->io_bio, io_folio, bh->b_size, bh_offset(bh)))
bh->b_size, bh_offset(bh));
if (ret != bh->b_size)
goto submit_and_retry; goto submit_and_retry;
wbc_account_cgroup_owner(io->io_wbc, pagecache_page, bh->b_size); wbc_account_cgroup_owner(io->io_wbc, &folio->page, bh->b_size);
io->io_next_block++; io->io_next_block++;
} }
...@@ -434,8 +430,9 @@ int ext4_bio_write_page(struct ext4_io_submit *io, ...@@ -434,8 +430,9 @@ int ext4_bio_write_page(struct ext4_io_submit *io,
struct page *page, struct page *page,
int len) int len)
{ {
struct page *bounce_page = NULL; struct folio *folio = page_folio(page);
struct inode *inode = page->mapping->host; struct folio *io_folio = folio;
struct inode *inode = folio->mapping->host;
unsigned block_start; unsigned block_start;
struct buffer_head *bh, *head; struct buffer_head *bh, *head;
int ret = 0; int ret = 0;
...@@ -443,30 +440,30 @@ int ext4_bio_write_page(struct ext4_io_submit *io, ...@@ -443,30 +440,30 @@ int ext4_bio_write_page(struct ext4_io_submit *io,
struct writeback_control *wbc = io->io_wbc; struct writeback_control *wbc = io->io_wbc;
bool keep_towrite = false; bool keep_towrite = false;
BUG_ON(!PageLocked(page)); BUG_ON(!folio_test_locked(folio));
BUG_ON(PageWriteback(page)); BUG_ON(folio_test_writeback(folio));
ClearPageError(page); folio_clear_error(folio);
/* /*
* Comments copied from block_write_full_page: * Comments copied from block_write_full_page:
* *
* The page straddles i_size. It must be zeroed out on each and every * The folio straddles i_size. It must be zeroed out on each and every
* writepage invocation because it may be mmapped. "A file is mapped * writepage invocation because it may be mmapped. "A file is mapped
* in multiples of the page size. For a file that is not a multiple of * in multiples of the page size. For a file that is not a multiple of
* the page size, the remaining memory is zeroed when mapped, and * the page size, the remaining memory is zeroed when mapped, and
* writes to that region are not written out to the file." * writes to that region are not written out to the file."
*/ */
if (len < PAGE_SIZE) if (len < folio_size(folio))
zero_user_segment(page, len, PAGE_SIZE); folio_zero_segment(folio, len, folio_size(folio));
/* /*
* In the first loop we prepare and mark buffers to submit. We have to * In the first loop we prepare and mark buffers to submit. We have to
* mark all buffers in the page before submitting so that * mark all buffers in the folio before submitting so that
* end_page_writeback() cannot be called from ext4_end_bio() when IO * folio_end_writeback() cannot be called from ext4_end_bio() when IO
* on the first buffer finishes and we are still working on submitting * on the first buffer finishes and we are still working on submitting
* the second buffer. * the second buffer.
*/ */
bh = head = page_buffers(page); bh = head = folio_buffers(folio);
do { do {
block_start = bh_offset(bh); block_start = bh_offset(bh);
if (block_start >= len) { if (block_start >= len) {
...@@ -481,14 +478,14 @@ int ext4_bio_write_page(struct ext4_io_submit *io, ...@@ -481,14 +478,14 @@ int ext4_bio_write_page(struct ext4_io_submit *io,
clear_buffer_dirty(bh); clear_buffer_dirty(bh);
/* /*
* Keeping dirty some buffer we cannot write? Make sure * Keeping dirty some buffer we cannot write? Make sure
* to redirty the page and keep TOWRITE tag so that * to redirty the folio and keep TOWRITE tag so that
* racing WB_SYNC_ALL writeback does not skip the page. * racing WB_SYNC_ALL writeback does not skip the folio.
* This happens e.g. when doing writeout for * This happens e.g. when doing writeout for
* transaction commit. * transaction commit.
*/ */
if (buffer_dirty(bh)) { if (buffer_dirty(bh)) {
if (!PageDirty(page)) if (!folio_test_dirty(folio))
redirty_page_for_writepage(wbc, page); folio_redirty_for_writepage(wbc, folio);
keep_towrite = true; keep_towrite = true;
} }
continue; continue;
...@@ -500,11 +497,11 @@ int ext4_bio_write_page(struct ext4_io_submit *io, ...@@ -500,11 +497,11 @@ int ext4_bio_write_page(struct ext4_io_submit *io,
nr_to_submit++; nr_to_submit++;
} while ((bh = bh->b_this_page) != head); } while ((bh = bh->b_this_page) != head);
/* Nothing to submit? Just unlock the page... */ /* Nothing to submit? Just unlock the folio... */
if (!nr_to_submit) if (!nr_to_submit)
return 0; return 0;
bh = head = page_buffers(page); bh = head = folio_buffers(folio);
/* /*
* If any blocks are being written to an encrypted file, encrypt them * If any blocks are being written to an encrypted file, encrypt them
...@@ -516,6 +513,7 @@ int ext4_bio_write_page(struct ext4_io_submit *io, ...@@ -516,6 +513,7 @@ int ext4_bio_write_page(struct ext4_io_submit *io,
if (fscrypt_inode_uses_fs_layer_crypto(inode) && nr_to_submit) { if (fscrypt_inode_uses_fs_layer_crypto(inode) && nr_to_submit) {
gfp_t gfp_flags = GFP_NOFS; gfp_t gfp_flags = GFP_NOFS;
unsigned int enc_bytes = round_up(len, i_blocksize(inode)); unsigned int enc_bytes = round_up(len, i_blocksize(inode));
struct page *bounce_page;
/* /*
* Since bounce page allocation uses a mempool, we can only use * Since bounce page allocation uses a mempool, we can only use
...@@ -542,7 +540,7 @@ int ext4_bio_write_page(struct ext4_io_submit *io, ...@@ -542,7 +540,7 @@ int ext4_bio_write_page(struct ext4_io_submit *io,
} }
printk_ratelimited(KERN_ERR "%s: ret = %d\n", __func__, ret); printk_ratelimited(KERN_ERR "%s: ret = %d\n", __func__, ret);
redirty_page_for_writepage(wbc, page); folio_redirty_for_writepage(wbc, folio);
do { do {
if (buffer_async_write(bh)) { if (buffer_async_write(bh)) {
clear_buffer_async_write(bh); clear_buffer_async_write(bh);
...@@ -553,18 +551,16 @@ int ext4_bio_write_page(struct ext4_io_submit *io, ...@@ -553,18 +551,16 @@ int ext4_bio_write_page(struct ext4_io_submit *io,
return ret; return ret;
} }
io_folio = page_folio(bounce_page);
} }
if (keep_towrite) __folio_start_writeback(folio, keep_towrite);
set_page_writeback_keepwrite(page);
else
set_page_writeback(page);
/* Now submit buffers to write */ /* Now submit buffers to write */
do { do {
if (!buffer_async_write(bh)) if (!buffer_async_write(bh))
continue; continue;
io_submit_add_bh(io, inode, page, bounce_page, bh); io_submit_add_bh(io, inode, folio, io_folio, bh);
} while ((bh = bh->b_this_page) != head); } while ((bh = bh->b_this_page) != head);
return 0; return 0;
......
...@@ -766,11 +766,6 @@ bool set_page_writeback(struct page *page); ...@@ -766,11 +766,6 @@ bool set_page_writeback(struct page *page);
#define folio_start_writeback_keepwrite(folio) \ #define folio_start_writeback_keepwrite(folio) \
__folio_start_writeback(folio, true) __folio_start_writeback(folio, true)
static inline void set_page_writeback_keepwrite(struct page *page)
{
folio_start_writeback_keepwrite(page_folio(page));
}
static inline bool test_set_page_writeback(struct page *page) static inline bool test_set_page_writeback(struct page *page)
{ {
return set_page_writeback(page); return set_page_writeback(page);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment