btrfs: Convert from set_page_dirty to dirty_folio

Optimise the non-DEBUG case to just call filemap_dirty_folio
directly.  The DEBUG case doesn't actually compile, but convert
it to dirty_folio anyway.
Signed-off-by: default avatarMatthew Wilcox (Oracle) <willy@infradead.org>
Tested-by: default avatarDamien Le Moal <damien.lemoal@opensource.wdc.com>
Acked-by: default avatarDamien Le Moal <damien.lemoal@opensource.wdc.com>
Tested-by: Mike Marshall <hubcap@omnibond.com> # orangefs
Tested-by: David Howells <dhowells@redhat.com> # afs
parent 8fb72b4a
...@@ -1014,26 +1014,25 @@ static void btree_invalidate_folio(struct folio *folio, size_t offset, ...@@ -1014,26 +1014,25 @@ static void btree_invalidate_folio(struct folio *folio, size_t offset,
} }
} }
static int btree_set_page_dirty(struct page *page)
{
#ifdef DEBUG #ifdef DEBUG
struct btrfs_fs_info *fs_info = btrfs_sb(page->mapping->host->i_sb); static bool btree_dirty_folio(struct address_space *mapping,
struct folio *folio)
{
struct btrfs_fs_info *fs_info = btrfs_sb(mapping->host->i_sb);
struct btrfs_subpage *subpage; struct btrfs_subpage *subpage;
struct extent_buffer *eb; struct extent_buffer *eb;
int cur_bit = 0; int cur_bit = 0;
u64 page_start = page_offset(page); u64 page_start = folio_pos(folio);
if (fs_info->sectorsize == PAGE_SIZE) { if (fs_info->sectorsize == PAGE_SIZE) {
BUG_ON(!PagePrivate(page)); eb = folio_get_private(folio);
eb = (struct extent_buffer *)page->private;
BUG_ON(!eb); BUG_ON(!eb);
BUG_ON(!test_bit(EXTENT_BUFFER_DIRTY, &eb->bflags)); BUG_ON(!test_bit(EXTENT_BUFFER_DIRTY, &eb->bflags));
BUG_ON(!atomic_read(&eb->refs)); BUG_ON(!atomic_read(&eb->refs));
btrfs_assert_tree_write_locked(eb); btrfs_assert_tree_write_locked(eb);
return __set_page_dirty_nobuffers(page); return filemap_dirty_folio(mapping, folio);
} }
ASSERT(PagePrivate(page) && page->private); subpage = folio_get_private(folio);
subpage = (struct btrfs_subpage *)page->private;
ASSERT(subpage->dirty_bitmap); ASSERT(subpage->dirty_bitmap);
while (cur_bit < BTRFS_SUBPAGE_BITMAP_SIZE) { while (cur_bit < BTRFS_SUBPAGE_BITMAP_SIZE) {
...@@ -1059,9 +1058,11 @@ static int btree_set_page_dirty(struct page *page) ...@@ -1059,9 +1058,11 @@ static int btree_set_page_dirty(struct page *page)
cur_bit += (fs_info->nodesize >> fs_info->sectorsize_bits); cur_bit += (fs_info->nodesize >> fs_info->sectorsize_bits);
} }
#endif return filemap_dirty_folio(mapping, folio);
return __set_page_dirty_nobuffers(page);
} }
#else
#define btree_dirty_folio filemap_dirty_folio
#endif
static const struct address_space_operations btree_aops = { static const struct address_space_operations btree_aops = {
.writepages = btree_writepages, .writepages = btree_writepages,
...@@ -1070,7 +1071,7 @@ static const struct address_space_operations btree_aops = { ...@@ -1070,7 +1071,7 @@ static const struct address_space_operations btree_aops = {
#ifdef CONFIG_MIGRATION #ifdef CONFIG_MIGRATION
.migratepage = btree_migratepage, .migratepage = btree_migratepage,
#endif #endif
.set_page_dirty = btree_set_page_dirty, .dirty_folio = btree_dirty_folio,
}; };
struct extent_buffer *btrfs_find_create_tree_block( struct extent_buffer *btrfs_find_create_tree_block(
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment