Commit e4f94347 authored by Qu Wenruo's avatar Qu Wenruo Committed by David Sterba

btrfs: subpage: add bitmap for PageChecked flag

Although in btrfs we have very limited usage of PageChecked flag, it's
still some page flag not yet subpage compatible.

Fix it by introducing btrfs_subpage::checked_offset to do the convert.

For most call sites, especially for free-space cache, COW fixup and
btrfs_invalidatepage(), they all work in full page mode anyway.

For other call sites, they work as subpage compatible mode.

Some call sites need extra modification:

- btrfs_drop_pages()
  Needs extra parameter to get the real range we need to clear checked
  flag.

  Also since btrfs_drop_pages() will accept pages beyond the dirtied
  range, update btrfs_subpage_clamp_range() to handle such case
  by setting @len to 0 if the page is beyond target range.

- btrfs_invalidatepage()
  We need to call subpage helper before calling __btrfs_releasepage(),
  or it will trigger ASSERT() as page->private will be cleared.

- btrfs_verify_data_csum()
  In theory we don't need the io_bio->csum check anymore, but it's
  won't hurt.  Just change the comment.
Signed-off-by: default avatarQu Wenruo <wqu@suse.com>
Signed-off-by: default avatarDavid Sterba <dsterba@suse.com>
parent 6ec9765d
...@@ -296,8 +296,14 @@ static void end_compressed_bio_read(struct bio *bio) ...@@ -296,8 +296,14 @@ static void end_compressed_bio_read(struct bio *bio)
* checked so the end_io handlers know about it * checked so the end_io handlers know about it
*/ */
ASSERT(!bio_flagged(bio, BIO_CLONED)); ASSERT(!bio_flagged(bio, BIO_CLONED));
bio_for_each_segment_all(bvec, cb->orig_bio, iter_all) bio_for_each_segment_all(bvec, cb->orig_bio, iter_all) {
SetPageChecked(bvec->bv_page); u64 bvec_start = page_offset(bvec->bv_page) +
bvec->bv_offset;
btrfs_page_set_checked(btrfs_sb(cb->inode->i_sb),
bvec->bv_page, bvec_start,
bvec->bv_len);
}
bio_endio(cb->orig_bio); bio_endio(cb->orig_bio);
} }
......
...@@ -437,9 +437,15 @@ static noinline int btrfs_copy_from_user(loff_t pos, size_t write_bytes, ...@@ -437,9 +437,15 @@ static noinline int btrfs_copy_from_user(loff_t pos, size_t write_bytes,
/* /*
* unlocks pages after btrfs_file_write is done with them * unlocks pages after btrfs_file_write is done with them
*/ */
static void btrfs_drop_pages(struct page **pages, size_t num_pages) static void btrfs_drop_pages(struct btrfs_fs_info *fs_info,
struct page **pages, size_t num_pages,
u64 pos, u64 copied)
{ {
size_t i; size_t i;
u64 block_start = round_down(pos, fs_info->sectorsize);
u64 block_len = round_up(pos + copied, fs_info->sectorsize) - block_start;
ASSERT(block_len <= U32_MAX);
for (i = 0; i < num_pages; i++) { for (i = 0; i < num_pages; i++) {
/* page checked is some magic around finding pages that /* page checked is some magic around finding pages that
* have been modified without going through btrfs_set_page_dirty * have been modified without going through btrfs_set_page_dirty
...@@ -447,7 +453,8 @@ static void btrfs_drop_pages(struct page **pages, size_t num_pages) ...@@ -447,7 +453,8 @@ static void btrfs_drop_pages(struct page **pages, size_t num_pages)
* accessed as prepare_pages should have marked them accessed * accessed as prepare_pages should have marked them accessed
* in prepare_pages via find_or_create_page() * in prepare_pages via find_or_create_page()
*/ */
ClearPageChecked(pages[i]); btrfs_page_clamp_clear_checked(fs_info, pages[i], block_start,
block_len);
unlock_page(pages[i]); unlock_page(pages[i]);
put_page(pages[i]); put_page(pages[i]);
} }
...@@ -504,7 +511,7 @@ int btrfs_dirty_pages(struct btrfs_inode *inode, struct page **pages, ...@@ -504,7 +511,7 @@ int btrfs_dirty_pages(struct btrfs_inode *inode, struct page **pages,
struct page *p = pages[i]; struct page *p = pages[i];
btrfs_page_clamp_set_uptodate(fs_info, p, start_pos, num_bytes); btrfs_page_clamp_set_uptodate(fs_info, p, start_pos, num_bytes);
ClearPageChecked(p); btrfs_page_clamp_clear_checked(fs_info, p, start_pos, num_bytes);
btrfs_page_clamp_set_dirty(fs_info, p, start_pos, num_bytes); btrfs_page_clamp_set_dirty(fs_info, p, start_pos, num_bytes);
} }
...@@ -1843,7 +1850,7 @@ static noinline ssize_t btrfs_buffered_write(struct kiocb *iocb, ...@@ -1843,7 +1850,7 @@ static noinline ssize_t btrfs_buffered_write(struct kiocb *iocb,
btrfs_delalloc_release_extents(BTRFS_I(inode), reserve_bytes); btrfs_delalloc_release_extents(BTRFS_I(inode), reserve_bytes);
if (ret) { if (ret) {
btrfs_drop_pages(pages, num_pages); btrfs_drop_pages(fs_info, pages, num_pages, pos, copied);
break; break;
} }
...@@ -1851,7 +1858,7 @@ static noinline ssize_t btrfs_buffered_write(struct kiocb *iocb, ...@@ -1851,7 +1858,7 @@ static noinline ssize_t btrfs_buffered_write(struct kiocb *iocb,
if (only_release_metadata) if (only_release_metadata)
btrfs_check_nocow_unlock(BTRFS_I(inode)); btrfs_check_nocow_unlock(BTRFS_I(inode));
btrfs_drop_pages(pages, num_pages); btrfs_drop_pages(fs_info, pages, num_pages, pos, copied);
cond_resched(); cond_resched();
......
...@@ -22,6 +22,7 @@ ...@@ -22,6 +22,7 @@
#include "delalloc-space.h" #include "delalloc-space.h"
#include "block-group.h" #include "block-group.h"
#include "discard.h" #include "discard.h"
#include "subpage.h"
#define BITS_PER_BITMAP (PAGE_SIZE * 8UL) #define BITS_PER_BITMAP (PAGE_SIZE * 8UL)
#define MAX_CACHE_BYTES_PER_GIG SZ_64K #define MAX_CACHE_BYTES_PER_GIG SZ_64K
...@@ -411,7 +412,10 @@ static void io_ctl_drop_pages(struct btrfs_io_ctl *io_ctl) ...@@ -411,7 +412,10 @@ static void io_ctl_drop_pages(struct btrfs_io_ctl *io_ctl)
for (i = 0; i < io_ctl->num_pages; i++) { for (i = 0; i < io_ctl->num_pages; i++) {
if (io_ctl->pages[i]) { if (io_ctl->pages[i]) {
ClearPageChecked(io_ctl->pages[i]); btrfs_page_clear_checked(io_ctl->fs_info,
io_ctl->pages[i],
page_offset(io_ctl->pages[i]),
PAGE_SIZE);
unlock_page(io_ctl->pages[i]); unlock_page(io_ctl->pages[i]);
put_page(io_ctl->pages[i]); put_page(io_ctl->pages[i]);
} }
......
...@@ -2764,7 +2764,7 @@ static void btrfs_writepage_fixup_worker(struct btrfs_work *work) ...@@ -2764,7 +2764,7 @@ static void btrfs_writepage_fixup_worker(struct btrfs_work *work)
clear_page_dirty_for_io(page); clear_page_dirty_for_io(page);
SetPageError(page); SetPageError(page);
} }
ClearPageChecked(page); btrfs_page_clear_checked(inode->root->fs_info, page, page_start, PAGE_SIZE);
unlock_page(page); unlock_page(page);
put_page(page); put_page(page);
kfree(fixup); kfree(fixup);
...@@ -2819,7 +2819,7 @@ int btrfs_writepage_cow_fixup(struct page *page) ...@@ -2819,7 +2819,7 @@ int btrfs_writepage_cow_fixup(struct page *page)
* page->mapping outside of the page lock. * page->mapping outside of the page lock.
*/ */
ihold(inode); ihold(inode);
SetPageChecked(page); btrfs_page_set_checked(fs_info, page, page_offset(page), PAGE_SIZE);
get_page(page); get_page(page);
btrfs_init_work(&fixup->work, btrfs_writepage_fixup_worker, NULL, NULL); btrfs_init_work(&fixup->work, btrfs_writepage_fixup_worker, NULL, NULL);
fixup->page = page; fixup->page = page;
...@@ -3269,27 +3269,22 @@ unsigned int btrfs_verify_data_csum(struct btrfs_bio *bbio, ...@@ -3269,27 +3269,22 @@ unsigned int btrfs_verify_data_csum(struct btrfs_bio *bbio,
u64 start, u64 end) u64 start, u64 end)
{ {
struct inode *inode = page->mapping->host; struct inode *inode = page->mapping->host;
struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree; struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
struct btrfs_root *root = BTRFS_I(inode)->root; struct btrfs_root *root = BTRFS_I(inode)->root;
const u32 sectorsize = root->fs_info->sectorsize; const u32 sectorsize = root->fs_info->sectorsize;
u32 pg_off; u32 pg_off;
unsigned int result = 0; unsigned int result = 0;
if (PageChecked(page)) { if (btrfs_page_test_checked(fs_info, page, start, end + 1 - start)) {
ClearPageChecked(page); btrfs_page_clear_checked(fs_info, page, start, end + 1 - start);
return 0; return 0;
} }
/* /*
* For subpage case, above PageChecked is not safe as it's not subpage * This only happens for NODATASUM or compressed read.
* compatible. * Normally this should be covered by above check for compressed read
* But for now only cow fixup and compressed read utilize PageChecked * or the next check for NODATASUM. Just do a quicker exit here.
* flag, while in this context we can easily use bbio->csum to
* determine if we really need to do csum verification.
*
* So for now, just exit if bbio->csum is NULL, as it means it's
* compressed read, and its compressed data csum has already been
* verified.
*/ */
if (bbio->csum == NULL) if (bbio->csum == NULL)
return 0; return 0;
...@@ -5110,7 +5105,8 @@ int btrfs_truncate_block(struct btrfs_inode *inode, loff_t from, loff_t len, ...@@ -5110,7 +5105,8 @@ int btrfs_truncate_block(struct btrfs_inode *inode, loff_t from, loff_t len,
len); len);
flush_dcache_page(page); flush_dcache_page(page);
} }
ClearPageChecked(page); btrfs_page_clear_checked(fs_info, page, block_start,
block_end + 1 - block_start);
btrfs_page_set_dirty(fs_info, page, block_start, block_end + 1 - block_start); btrfs_page_set_dirty(fs_info, page, block_start, block_end + 1 - block_start);
unlock_extent_cached(io_tree, block_start, block_end, &cached_state); unlock_extent_cached(io_tree, block_start, block_end, &cached_state);
...@@ -8705,9 +8701,9 @@ static void btrfs_invalidatepage(struct page *page, unsigned int offset, ...@@ -8705,9 +8701,9 @@ static void btrfs_invalidatepage(struct page *page, unsigned int offset,
* did something wrong. * did something wrong.
*/ */
ASSERT(!PageOrdered(page)); ASSERT(!PageOrdered(page));
btrfs_page_clear_checked(fs_info, page, page_offset(page), PAGE_SIZE);
if (!inode_evicting) if (!inode_evicting)
__btrfs_releasepage(page, GFP_NOFS); __btrfs_releasepage(page, GFP_NOFS);
ClearPageChecked(page);
clear_page_extent_mapped(page); clear_page_extent_mapped(page);
} }
...@@ -8851,7 +8847,7 @@ vm_fault_t btrfs_page_mkwrite(struct vm_fault *vmf) ...@@ -8851,7 +8847,7 @@ vm_fault_t btrfs_page_mkwrite(struct vm_fault *vmf)
memzero_page(page, zero_start, PAGE_SIZE - zero_start); memzero_page(page, zero_start, PAGE_SIZE - zero_start);
flush_dcache_page(page); flush_dcache_page(page);
} }
ClearPageChecked(page); btrfs_page_clear_checked(fs_info, page, page_start, PAGE_SIZE);
btrfs_page_set_dirty(fs_info, page, page_start, end + 1 - page_start); btrfs_page_set_dirty(fs_info, page, page_start, end + 1 - page_start);
btrfs_page_set_uptodate(fs_info, page, page_start, end + 1 - page_start); btrfs_page_set_uptodate(fs_info, page, page_start, end + 1 - page_start);
......
...@@ -138,7 +138,7 @@ static int copy_inline_to_page(struct btrfs_inode *inode, ...@@ -138,7 +138,7 @@ static int copy_inline_to_page(struct btrfs_inode *inode,
} }
btrfs_page_set_uptodate(fs_info, page, file_offset, block_size); btrfs_page_set_uptodate(fs_info, page, file_offset, block_size);
ClearPageChecked(page); btrfs_page_clear_checked(fs_info, page, file_offset, block_size);
btrfs_page_set_dirty(fs_info, page, file_offset, block_size); btrfs_page_set_dirty(fs_info, page, file_offset, block_size);
out_unlock: out_unlock:
if (page) { if (page) {
......
...@@ -88,6 +88,9 @@ void btrfs_init_subpage_info(struct btrfs_subpage_info *subpage_info, u32 sector ...@@ -88,6 +88,9 @@ void btrfs_init_subpage_info(struct btrfs_subpage_info *subpage_info, u32 sector
subpage_info->ordered_offset = cur; subpage_info->ordered_offset = cur;
cur += nr_bits; cur += nr_bits;
subpage_info->checked_offset = cur;
cur += nr_bits;
subpage_info->total_nr_bits = cur; subpage_info->total_nr_bits = cur;
} }
...@@ -255,6 +258,14 @@ static void btrfs_subpage_clamp_range(struct page *page, u64 *start, u32 *len) ...@@ -255,6 +258,14 @@ static void btrfs_subpage_clamp_range(struct page *page, u64 *start, u32 *len)
u32 orig_len = *len; u32 orig_len = *len;
*start = max_t(u64, page_offset(page), orig_start); *start = max_t(u64, page_offset(page), orig_start);
/*
* For certain call sites like btrfs_drop_pages(), we may have pages
* beyond the target range. In that case, just set @len to 0, subpage
* helpers can handle @len == 0 without any problem.
*/
if (page_offset(page) >= orig_start + orig_len)
*len = 0;
else
*len = min_t(u64, page_offset(page) + PAGE_SIZE, *len = min_t(u64, page_offset(page) + PAGE_SIZE,
orig_start + orig_len) - *start; orig_start + orig_len) - *start;
} }
...@@ -532,6 +543,36 @@ void btrfs_subpage_clear_ordered(const struct btrfs_fs_info *fs_info, ...@@ -532,6 +543,36 @@ void btrfs_subpage_clear_ordered(const struct btrfs_fs_info *fs_info,
ClearPageOrdered(page); ClearPageOrdered(page);
spin_unlock_irqrestore(&subpage->lock, flags); spin_unlock_irqrestore(&subpage->lock, flags);
} }
void btrfs_subpage_set_checked(const struct btrfs_fs_info *fs_info,
struct page *page, u64 start, u32 len)
{
struct btrfs_subpage *subpage = (struct btrfs_subpage *)page->private;
unsigned int start_bit = subpage_calc_start_bit(fs_info, page,
checked, start, len);
unsigned long flags;
spin_lock_irqsave(&subpage->lock, flags);
bitmap_set(subpage->bitmaps, start_bit, len >> fs_info->sectorsize_bits);
if (subpage_test_bitmap_all_set(fs_info, subpage, checked))
SetPageChecked(page);
spin_unlock_irqrestore(&subpage->lock, flags);
}
void btrfs_subpage_clear_checked(const struct btrfs_fs_info *fs_info,
struct page *page, u64 start, u32 len)
{
struct btrfs_subpage *subpage = (struct btrfs_subpage *)page->private;
unsigned int start_bit = subpage_calc_start_bit(fs_info, page,
checked, start, len);
unsigned long flags;
spin_lock_irqsave(&subpage->lock, flags);
bitmap_clear(subpage->bitmaps, start_bit, len >> fs_info->sectorsize_bits);
ClearPageChecked(page);
spin_unlock_irqrestore(&subpage->lock, flags);
}
/* /*
* Unlike set/clear which is dependent on each page status, for test all bits * Unlike set/clear which is dependent on each page status, for test all bits
* are tested in the same way. * are tested in the same way.
...@@ -557,6 +598,7 @@ IMPLEMENT_BTRFS_SUBPAGE_TEST_OP(error); ...@@ -557,6 +598,7 @@ IMPLEMENT_BTRFS_SUBPAGE_TEST_OP(error);
IMPLEMENT_BTRFS_SUBPAGE_TEST_OP(dirty); IMPLEMENT_BTRFS_SUBPAGE_TEST_OP(dirty);
IMPLEMENT_BTRFS_SUBPAGE_TEST_OP(writeback); IMPLEMENT_BTRFS_SUBPAGE_TEST_OP(writeback);
IMPLEMENT_BTRFS_SUBPAGE_TEST_OP(ordered); IMPLEMENT_BTRFS_SUBPAGE_TEST_OP(ordered);
IMPLEMENT_BTRFS_SUBPAGE_TEST_OP(checked);
/* /*
* Note that, in selftests (extent-io-tests), we can have empty fs_info passed * Note that, in selftests (extent-io-tests), we can have empty fs_info passed
...@@ -627,6 +669,7 @@ IMPLEMENT_BTRFS_PAGE_OPS(writeback, set_page_writeback, end_page_writeback, ...@@ -627,6 +669,7 @@ IMPLEMENT_BTRFS_PAGE_OPS(writeback, set_page_writeback, end_page_writeback,
PageWriteback); PageWriteback);
IMPLEMENT_BTRFS_PAGE_OPS(ordered, SetPageOrdered, ClearPageOrdered, IMPLEMENT_BTRFS_PAGE_OPS(ordered, SetPageOrdered, ClearPageOrdered,
PageOrdered); PageOrdered);
IMPLEMENT_BTRFS_PAGE_OPS(checked, SetPageChecked, ClearPageChecked, PageChecked);
/* /*
* Make sure not only the page dirty bit is cleared, but also subpage dirty bit * Make sure not only the page dirty bit is cleared, but also subpage dirty bit
......
...@@ -36,6 +36,7 @@ struct btrfs_subpage_info { ...@@ -36,6 +36,7 @@ struct btrfs_subpage_info {
unsigned int dirty_offset; unsigned int dirty_offset;
unsigned int writeback_offset; unsigned int writeback_offset;
unsigned int ordered_offset; unsigned int ordered_offset;
unsigned int checked_offset;
}; };
/* /*
...@@ -142,6 +143,7 @@ DECLARE_BTRFS_SUBPAGE_OPS(error); ...@@ -142,6 +143,7 @@ DECLARE_BTRFS_SUBPAGE_OPS(error);
DECLARE_BTRFS_SUBPAGE_OPS(dirty); DECLARE_BTRFS_SUBPAGE_OPS(dirty);
DECLARE_BTRFS_SUBPAGE_OPS(writeback); DECLARE_BTRFS_SUBPAGE_OPS(writeback);
DECLARE_BTRFS_SUBPAGE_OPS(ordered); DECLARE_BTRFS_SUBPAGE_OPS(ordered);
DECLARE_BTRFS_SUBPAGE_OPS(checked);
bool btrfs_subpage_clear_and_test_dirty(const struct btrfs_fs_info *fs_info, bool btrfs_subpage_clear_and_test_dirty(const struct btrfs_fs_info *fs_info,
struct page *page, u64 start, u32 len); struct page *page, u64 start, u32 len);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment