Commit 2c70fe16 authored by Qu Wenruo's avatar Qu Wenruo Committed by David Sterba

btrfs: remove the nr_ret parameter from __extent_writepage_io()

The parameter @nr_ret is used to tell the caller how many sectors have
been submitted for IO.

Then callers check @nr_ret value to determine if we need to manually
clear the PAGECACHE_TAG_DIRTY, as if we submitted no sector (e.g. all
sectors are beyond i_size) there is no folio_start_writeback() called thus
PAGECACHE_TAG_DIRTY tag will not be cleared.

Remove this parameter by:

- Moving the btrfs_folio_clear_writeback() call into
  __extent_writepage_io()
  So that if we didn't submit any IO, then manually call
  btrfs_folio_set_writeback() to clear PAGECACHE_TAG_DIRTY when
  the page is no longer dirty.

- Use a bool to record if we have submitted any sector
  Instead of an int.

- Use subpage compatible helpers to end folio writeback.
  This brings no change to the behavior, just for the sake of consistency.

  As for the call site inside __extent_writepage(), we're always called
  for the whole page, so the existing full page helper
  folio_(start|end)_writeback() is totally fine.

  For the call site inside extent_write_locked_range(), although we can
  have subpage range, folio_start_writeback() will only clear
  PAGECACHE_TAG_DIRTY if the page is no longer dirty, and the full folio
  will still be dirty if there is any subpage dirty range.
  Only when the last dirty subpage sector is cleared, the
  folio_start_writeback() will clear PAGECACHE_TAG_DIRTY.

  So no matter if we call the full page or subpage helper, the result
  is still the same, then just use the subpage helpers for consistency.
Signed-off-by: default avatarQu Wenruo <wqu@suse.com>
Reviewed-by: default avatarDavid Sterba <dsterba@suse.com>
Signed-off-by: default avatarDavid Sterba <dsterba@suse.com>
parent e39ba5df
...@@ -1409,7 +1409,7 @@ static noinline_for_stack int __extent_writepage_io(struct btrfs_inode *inode, ...@@ -1409,7 +1409,7 @@ static noinline_for_stack int __extent_writepage_io(struct btrfs_inode *inode,
struct folio *folio, struct folio *folio,
u64 start, u32 len, u64 start, u32 len,
struct btrfs_bio_ctrl *bio_ctrl, struct btrfs_bio_ctrl *bio_ctrl,
loff_t i_size, int *nr_ret) loff_t i_size)
{ {
struct btrfs_fs_info *fs_info = inode->root->fs_info; struct btrfs_fs_info *fs_info = inode->root->fs_info;
unsigned long range_bitmap = 0; unsigned long range_bitmap = 0;
...@@ -1422,11 +1422,11 @@ static noinline_for_stack int __extent_writepage_io(struct btrfs_inode *inode, ...@@ -1422,11 +1422,11 @@ static noinline_for_stack int __extent_writepage_io(struct btrfs_inode *inode,
*/ */
unsigned long dirty_bitmap = 1; unsigned long dirty_bitmap = 1;
unsigned int bitmap_size = 1; unsigned int bitmap_size = 1;
bool submitted_io = false;
const u64 folio_start = folio_pos(folio); const u64 folio_start = folio_pos(folio);
u64 cur; u64 cur;
int bit; int bit;
int ret = 0; int ret = 0;
int nr = 0;
ASSERT(start >= folio_start && ASSERT(start >= folio_start &&
start + len <= folio_start + folio_size(folio)); start + len <= folio_start + folio_size(folio));
...@@ -1470,20 +1470,24 @@ static noinline_for_stack int __extent_writepage_io(struct btrfs_inode *inode, ...@@ -1470,20 +1470,24 @@ static noinline_for_stack int __extent_writepage_io(struct btrfs_inode *inode,
} }
ret = submit_one_sector(inode, folio, cur, bio_ctrl, i_size); ret = submit_one_sector(inode, folio, cur, bio_ctrl, i_size);
if (ret < 0) if (ret < 0)
goto out_error; goto out;
nr++; submitted_io = true;
} }
btrfs_folio_assert_not_dirty(fs_info, folio, start, len); btrfs_folio_assert_not_dirty(fs_info, folio, start, len);
*nr_ret = nr; out:
return 0;
out_error:
/* /*
* If we finish without problem, we should not only clear folio dirty, * If we didn't submitted any sector (>= i_size), folio dirty get
* but also empty subpage dirty bits * cleared but PAGECACHE_TAG_DIRTY is not cleared (only cleared
* by folio_start_writeback() if the folio is not dirty).
*
* Here we set writeback and clear for the range. If the full folio
* is no longer dirty then we clear the PAGECACHE_TAG_DIRTY tag.
*/ */
*nr_ret = nr; if (!submitted_io) {
btrfs_folio_set_writeback(fs_info, folio, start, len);
btrfs_folio_clear_writeback(fs_info, folio, start, len);
}
return ret; return ret;
} }
...@@ -1501,7 +1505,6 @@ static int __extent_writepage(struct folio *folio, struct btrfs_bio_ctrl *bio_ct ...@@ -1501,7 +1505,6 @@ static int __extent_writepage(struct folio *folio, struct btrfs_bio_ctrl *bio_ct
struct inode *inode = folio->mapping->host; struct inode *inode = folio->mapping->host;
const u64 page_start = folio_pos(folio); const u64 page_start = folio_pos(folio);
int ret; int ret;
int nr = 0;
size_t pg_offset; size_t pg_offset;
loff_t i_size = i_size_read(inode); loff_t i_size = i_size_read(inode);
unsigned long end_index = i_size >> PAGE_SHIFT; unsigned long end_index = i_size >> PAGE_SHIFT;
...@@ -1532,18 +1535,13 @@ static int __extent_writepage(struct folio *folio, struct btrfs_bio_ctrl *bio_ct ...@@ -1532,18 +1535,13 @@ static int __extent_writepage(struct folio *folio, struct btrfs_bio_ctrl *bio_ct
goto done; goto done;
ret = __extent_writepage_io(BTRFS_I(inode), folio, folio_pos(folio), ret = __extent_writepage_io(BTRFS_I(inode), folio, folio_pos(folio),
PAGE_SIZE, bio_ctrl, i_size, &nr); PAGE_SIZE, bio_ctrl, i_size);
if (ret == 1) if (ret == 1)
return 0; return 0;
bio_ctrl->wbc->nr_to_write--; bio_ctrl->wbc->nr_to_write--;
done: done:
if (nr == 0) {
/* make sure the mapping tag for page dirty gets cleared */
folio_start_writeback(folio);
folio_end_writeback(folio);
}
if (ret) { if (ret) {
btrfs_mark_ordered_io_finished(BTRFS_I(inode), folio, btrfs_mark_ordered_io_finished(BTRFS_I(inode), folio,
page_start, PAGE_SIZE, !ret); page_start, PAGE_SIZE, !ret);
...@@ -2276,7 +2274,6 @@ void extent_write_locked_range(struct inode *inode, const struct folio *locked_f ...@@ -2276,7 +2274,6 @@ void extent_write_locked_range(struct inode *inode, const struct folio *locked_f
u64 cur_end = min(round_down(cur, PAGE_SIZE) + PAGE_SIZE - 1, end); u64 cur_end = min(round_down(cur, PAGE_SIZE) + PAGE_SIZE - 1, end);
u32 cur_len = cur_end + 1 - cur; u32 cur_len = cur_end + 1 - cur;
struct folio *folio; struct folio *folio;
int nr = 0;
folio = __filemap_get_folio(mapping, cur >> PAGE_SHIFT, 0, 0); folio = __filemap_get_folio(mapping, cur >> PAGE_SHIFT, 0, 0);
...@@ -2297,15 +2294,10 @@ void extent_write_locked_range(struct inode *inode, const struct folio *locked_f ...@@ -2297,15 +2294,10 @@ void extent_write_locked_range(struct inode *inode, const struct folio *locked_f
ASSERT(folio_test_dirty(folio)); ASSERT(folio_test_dirty(folio));
ret = __extent_writepage_io(BTRFS_I(inode), folio, cur, cur_len, ret = __extent_writepage_io(BTRFS_I(inode), folio, cur, cur_len,
&bio_ctrl, i_size, &nr); &bio_ctrl, i_size);
if (ret == 1) if (ret == 1)
goto next_page; goto next_page;
/* Make sure the mapping tag for page dirty gets cleared. */
if (nr == 0) {
btrfs_folio_set_writeback(fs_info, folio, cur, cur_len);
btrfs_folio_clear_writeback(fs_info, folio, cur, cur_len);
}
if (ret) { if (ret) {
btrfs_mark_ordered_io_finished(BTRFS_I(inode), folio, btrfs_mark_ordered_io_finished(BTRFS_I(inode), folio,
cur, cur_len, !ret); cur, cur_len, !ret);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment