Commit 570eb97b authored by Josef Bacik's avatar Josef Bacik Committed by David Sterba

btrfs: unify the lock/unlock extent variants

We have two variants of lock/unlock extent, one set that takes a cached
state, another that does not.  This is slightly annoying, and generally
speaking there are only a few places where we don't have a cached state.
Simplify this by making lock_extent/unlock_extent the only variant and
make it take a cached state, then convert all the callers appropriately.
Signed-off-by: default avatarJosef Bacik <josef@toxicpanda.com>
Reviewed-by: default avatarDavid Sterba <dsterba@suse.com>
Signed-off-by: default avatarDavid Sterba <dsterba@suse.com>
parent 291bbb1e
...@@ -588,7 +588,7 @@ static noinline int add_ra_bio_pages(struct inode *inode, ...@@ -588,7 +588,7 @@ static noinline int add_ra_bio_pages(struct inode *inode,
} }
page_end = (pg_index << PAGE_SHIFT) + PAGE_SIZE - 1; page_end = (pg_index << PAGE_SHIFT) + PAGE_SIZE - 1;
lock_extent(tree, cur, page_end); lock_extent(tree, cur, page_end, NULL);
read_lock(&em_tree->lock); read_lock(&em_tree->lock);
em = lookup_extent_mapping(em_tree, cur, page_end + 1 - cur); em = lookup_extent_mapping(em_tree, cur, page_end + 1 - cur);
read_unlock(&em_tree->lock); read_unlock(&em_tree->lock);
...@@ -602,7 +602,7 @@ static noinline int add_ra_bio_pages(struct inode *inode, ...@@ -602,7 +602,7 @@ static noinline int add_ra_bio_pages(struct inode *inode,
(cur + fs_info->sectorsize > extent_map_end(em)) || (cur + fs_info->sectorsize > extent_map_end(em)) ||
(em->block_start >> 9) != cb->orig_bio->bi_iter.bi_sector) { (em->block_start >> 9) != cb->orig_bio->bi_iter.bi_sector) {
free_extent_map(em); free_extent_map(em);
unlock_extent(tree, cur, page_end); unlock_extent(tree, cur, page_end, NULL);
unlock_page(page); unlock_page(page);
put_page(page); put_page(page);
break; break;
...@@ -622,7 +622,7 @@ static noinline int add_ra_bio_pages(struct inode *inode, ...@@ -622,7 +622,7 @@ static noinline int add_ra_bio_pages(struct inode *inode,
add_size = min(em->start + em->len, page_end + 1) - cur; add_size = min(em->start + em->len, page_end + 1) - cur;
ret = bio_add_page(cb->orig_bio, page, add_size, offset_in_page(cur)); ret = bio_add_page(cb->orig_bio, page, add_size, offset_in_page(cur));
if (ret != add_size) { if (ret != add_size) {
unlock_extent(tree, cur, page_end); unlock_extent(tree, cur, page_end, NULL);
unlock_page(page); unlock_page(page);
put_page(page); put_page(page);
break; break;
......
...@@ -131,8 +131,7 @@ static int verify_parent_transid(struct extent_io_tree *io_tree, ...@@ -131,8 +131,7 @@ static int verify_parent_transid(struct extent_io_tree *io_tree,
if (atomic) if (atomic)
return -EAGAIN; return -EAGAIN;
lock_extent_bits(io_tree, eb->start, eb->start + eb->len - 1, lock_extent(io_tree, eb->start, eb->start + eb->len - 1, &cached_state);
&cached_state);
if (extent_buffer_uptodate(eb) && if (extent_buffer_uptodate(eb) &&
btrfs_header_generation(eb) == parent_transid) { btrfs_header_generation(eb) == parent_transid) {
ret = 0; ret = 0;
...@@ -145,7 +144,7 @@ static int verify_parent_transid(struct extent_io_tree *io_tree, ...@@ -145,7 +144,7 @@ static int verify_parent_transid(struct extent_io_tree *io_tree,
ret = 1; ret = 1;
clear_extent_buffer_uptodate(eb); clear_extent_buffer_uptodate(eb);
out: out:
unlock_extent_cached(io_tree, eb->start, eb->start + eb->len - 1, unlock_extent(io_tree, eb->start, eb->start + eb->len - 1,
&cached_state); &cached_state);
return ret; return ret;
} }
......
...@@ -1649,7 +1649,7 @@ int try_lock_extent(struct extent_io_tree *tree, u64 start, u64 end) ...@@ -1649,7 +1649,7 @@ int try_lock_extent(struct extent_io_tree *tree, u64 start, u64 end)
* Either insert or lock state struct between start and end use mask to tell * Either insert or lock state struct between start and end use mask to tell
* us if waiting is desired. * us if waiting is desired.
*/ */
int lock_extent_bits(struct extent_io_tree *tree, u64 start, u64 end, int lock_extent(struct extent_io_tree *tree, u64 start, u64 end,
struct extent_state **cached_state) struct extent_state **cached_state)
{ {
int err; int err;
......
...@@ -98,14 +98,9 @@ void extent_io_tree_init(struct btrfs_fs_info *fs_info, ...@@ -98,14 +98,9 @@ void extent_io_tree_init(struct btrfs_fs_info *fs_info,
void *private_data); void *private_data);
void extent_io_tree_release(struct extent_io_tree *tree); void extent_io_tree_release(struct extent_io_tree *tree);
int lock_extent_bits(struct extent_io_tree *tree, u64 start, u64 end, int lock_extent(struct extent_io_tree *tree, u64 start, u64 end,
struct extent_state **cached); struct extent_state **cached);
static inline int lock_extent(struct extent_io_tree *tree, u64 start, u64 end)
{
return lock_extent_bits(tree, start, end, NULL);
}
int try_lock_extent(struct extent_io_tree *tree, u64 start, u64 end); int try_lock_extent(struct extent_io_tree *tree, u64 start, u64 end);
int __init extent_state_init_cachep(void); int __init extent_state_init_cachep(void);
...@@ -132,20 +127,15 @@ static inline int clear_extent_bit(struct extent_io_tree *tree, u64 start, ...@@ -132,20 +127,15 @@ static inline int clear_extent_bit(struct extent_io_tree *tree, u64 start,
GFP_NOFS, NULL); GFP_NOFS, NULL);
} }
static inline int unlock_extent(struct extent_io_tree *tree, u64 start, u64 end) static inline int unlock_extent(struct extent_io_tree *tree, u64 start, u64 end,
{ struct extent_state **cached)
return clear_extent_bit(tree, start, end, EXTENT_LOCKED, 0, NULL);
}
static inline int unlock_extent_cached(struct extent_io_tree *tree, u64 start,
u64 end, struct extent_state **cached)
{ {
return __clear_extent_bit(tree, start, end, EXTENT_LOCKED, 0, cached, return __clear_extent_bit(tree, start, end, EXTENT_LOCKED, 0, cached,
GFP_NOFS, NULL); GFP_NOFS, NULL);
} }
static inline int unlock_extent_cached_atomic(struct extent_io_tree *tree, static inline int unlock_extent_atomic(struct extent_io_tree *tree, u64 start,
u64 start, u64 end, struct extent_state **cached) u64 end, struct extent_state **cached)
{ {
return __clear_extent_bit(tree, start, end, EXTENT_LOCKED, 0, cached, return __clear_extent_bit(tree, start, end, EXTENT_LOCKED, 0, cached,
GFP_ATOMIC, NULL); GFP_ATOMIC, NULL);
......
...@@ -463,13 +463,13 @@ noinline_for_stack bool find_lock_delalloc_range(struct inode *inode, ...@@ -463,13 +463,13 @@ noinline_for_stack bool find_lock_delalloc_range(struct inode *inode,
} }
/* step three, lock the state bits for the whole range */ /* step three, lock the state bits for the whole range */
lock_extent_bits(tree, delalloc_start, delalloc_end, &cached_state); lock_extent(tree, delalloc_start, delalloc_end, &cached_state);
/* then test to make sure it is all still delalloc */ /* then test to make sure it is all still delalloc */
ret = test_range_bit(tree, delalloc_start, delalloc_end, ret = test_range_bit(tree, delalloc_start, delalloc_end,
EXTENT_DELALLOC, 1, cached_state); EXTENT_DELALLOC, 1, cached_state);
if (!ret) { if (!ret) {
unlock_extent_cached(tree, delalloc_start, delalloc_end, unlock_extent(tree, delalloc_start, delalloc_end,
&cached_state); &cached_state);
__unlock_for_delalloc(inode, locked_page, __unlock_for_delalloc(inode, locked_page,
delalloc_start, delalloc_end); delalloc_start, delalloc_end);
...@@ -913,8 +913,8 @@ static void end_sector_io(struct page *page, u64 offset, bool uptodate) ...@@ -913,8 +913,8 @@ static void end_sector_io(struct page *page, u64 offset, bool uptodate)
if (uptodate) if (uptodate)
set_extent_uptodate(&inode->io_tree, offset, set_extent_uptodate(&inode->io_tree, offset,
offset + sectorsize - 1, &cached, GFP_ATOMIC); offset + sectorsize - 1, &cached, GFP_ATOMIC);
unlock_extent_cached_atomic(&inode->io_tree, offset, unlock_extent_atomic(&inode->io_tree, offset, offset + sectorsize - 1,
offset + sectorsize - 1, &cached); &cached);
} }
static void submit_data_read_repair(struct inode *inode, static void submit_data_read_repair(struct inode *inode,
...@@ -1118,8 +1118,7 @@ static void endio_readpage_release_extent(struct processed_extent *processed, ...@@ -1118,8 +1118,7 @@ static void endio_readpage_release_extent(struct processed_extent *processed,
* Now we don't have range contiguous to the processed range, release * Now we don't have range contiguous to the processed range, release
* the processed range now. * the processed range now.
*/ */
unlock_extent_cached_atomic(tree, processed->start, processed->end, unlock_extent_atomic(tree, processed->start, processed->end, &cached);
&cached);
update: update:
/* Update processed to current range */ /* Update processed to current range */
...@@ -1761,7 +1760,7 @@ static int btrfs_do_readpage(struct page *page, struct extent_map **em_cached, ...@@ -1761,7 +1760,7 @@ static int btrfs_do_readpage(struct page *page, struct extent_map **em_cached,
ret = set_page_extent_mapped(page); ret = set_page_extent_mapped(page);
if (ret < 0) { if (ret < 0) {
unlock_extent(tree, start, end); unlock_extent(tree, start, end, NULL);
btrfs_page_set_error(fs_info, page, start, PAGE_SIZE); btrfs_page_set_error(fs_info, page, start, PAGE_SIZE);
unlock_page(page); unlock_page(page);
goto out; goto out;
...@@ -1789,15 +1788,14 @@ static int btrfs_do_readpage(struct page *page, struct extent_map **em_cached, ...@@ -1789,15 +1788,14 @@ static int btrfs_do_readpage(struct page *page, struct extent_map **em_cached,
memzero_page(page, pg_offset, iosize); memzero_page(page, pg_offset, iosize);
set_extent_uptodate(tree, cur, cur + iosize - 1, set_extent_uptodate(tree, cur, cur + iosize - 1,
&cached, GFP_NOFS); &cached, GFP_NOFS);
unlock_extent_cached(tree, cur, unlock_extent(tree, cur, cur + iosize - 1, &cached);
cur + iosize - 1, &cached);
end_page_read(page, true, cur, iosize); end_page_read(page, true, cur, iosize);
break; break;
} }
em = __get_extent_map(inode, page, pg_offset, cur, em = __get_extent_map(inode, page, pg_offset, cur,
end - cur + 1, em_cached); end - cur + 1, em_cached);
if (IS_ERR(em)) { if (IS_ERR(em)) {
unlock_extent(tree, cur, end); unlock_extent(tree, cur, end, NULL);
end_page_read(page, false, cur, end + 1 - cur); end_page_read(page, false, cur, end + 1 - cur);
ret = PTR_ERR(em); ret = PTR_ERR(em);
break; break;
...@@ -1872,8 +1870,7 @@ static int btrfs_do_readpage(struct page *page, struct extent_map **em_cached, ...@@ -1872,8 +1870,7 @@ static int btrfs_do_readpage(struct page *page, struct extent_map **em_cached,
set_extent_uptodate(tree, cur, cur + iosize - 1, set_extent_uptodate(tree, cur, cur + iosize - 1,
&cached, GFP_NOFS); &cached, GFP_NOFS);
unlock_extent_cached(tree, cur, unlock_extent(tree, cur, cur + iosize - 1, &cached);
cur + iosize - 1, &cached);
end_page_read(page, true, cur, iosize); end_page_read(page, true, cur, iosize);
cur = cur + iosize; cur = cur + iosize;
pg_offset += iosize; pg_offset += iosize;
...@@ -1881,7 +1878,7 @@ static int btrfs_do_readpage(struct page *page, struct extent_map **em_cached, ...@@ -1881,7 +1878,7 @@ static int btrfs_do_readpage(struct page *page, struct extent_map **em_cached,
} }
/* the get_extent function already copied into the page */ /* the get_extent function already copied into the page */
if (block_start == EXTENT_MAP_INLINE) { if (block_start == EXTENT_MAP_INLINE) {
unlock_extent(tree, cur, cur + iosize - 1); unlock_extent(tree, cur, cur + iosize - 1, NULL);
end_page_read(page, true, cur, iosize); end_page_read(page, true, cur, iosize);
cur = cur + iosize; cur = cur + iosize;
pg_offset += iosize; pg_offset += iosize;
...@@ -1897,7 +1894,7 @@ static int btrfs_do_readpage(struct page *page, struct extent_map **em_cached, ...@@ -1897,7 +1894,7 @@ static int btrfs_do_readpage(struct page *page, struct extent_map **em_cached,
* We have to unlock the remaining range, or the page * We have to unlock the remaining range, or the page
* will never be unlocked. * will never be unlocked.
*/ */
unlock_extent(tree, cur, end); unlock_extent(tree, cur, end, NULL);
end_page_read(page, false, cur, end + 1 - cur); end_page_read(page, false, cur, end + 1 - cur);
goto out; goto out;
} }
...@@ -3364,7 +3361,7 @@ int extent_invalidate_folio(struct extent_io_tree *tree, ...@@ -3364,7 +3361,7 @@ int extent_invalidate_folio(struct extent_io_tree *tree,
if (start > end) if (start > end)
return 0; return 0;
lock_extent_bits(tree, start, end, &cached_state); lock_extent(tree, start, end, &cached_state);
folio_wait_writeback(folio); folio_wait_writeback(folio);
/* /*
...@@ -3372,7 +3369,7 @@ int extent_invalidate_folio(struct extent_io_tree *tree, ...@@ -3372,7 +3369,7 @@ int extent_invalidate_folio(struct extent_io_tree *tree,
* so here we only need to unlock the extent range to free any * so here we only need to unlock the extent range to free any
* existing extent state. * existing extent state.
*/ */
unlock_extent_cached(tree, start, end, &cached_state); unlock_extent(tree, start, end, &cached_state);
return 0; return 0;
} }
...@@ -3939,7 +3936,7 @@ int extent_fiemap(struct btrfs_inode *inode, struct fiemap_extent_info *fieinfo, ...@@ -3939,7 +3936,7 @@ int extent_fiemap(struct btrfs_inode *inode, struct fiemap_extent_info *fieinfo,
lockend = round_up(start + len, btrfs_inode_sectorsize(inode)); lockend = round_up(start + len, btrfs_inode_sectorsize(inode));
prev_extent_end = lockstart; prev_extent_end = lockstart;
lock_extent_bits(&inode->io_tree, lockstart, lockend, &cached_state); lock_extent(&inode->io_tree, lockstart, lockend, &cached_state);
ret = fiemap_find_last_extent_offset(inode, path, &last_extent_end); ret = fiemap_find_last_extent_offset(inode, path, &last_extent_end);
if (ret < 0) if (ret < 0)
...@@ -4129,7 +4126,7 @@ int extent_fiemap(struct btrfs_inode *inode, struct fiemap_extent_info *fieinfo, ...@@ -4129,7 +4126,7 @@ int extent_fiemap(struct btrfs_inode *inode, struct fiemap_extent_info *fieinfo,
ret = emit_last_fiemap_cache(fieinfo, &cache); ret = emit_last_fiemap_cache(fieinfo, &cache);
out_unlock: out_unlock:
unlock_extent_cached(&inode->io_tree, lockstart, lockend, &cached_state); unlock_extent(&inode->io_tree, lockstart, lockend, &cached_state);
out: out:
kfree(backref_cache); kfree(backref_cache);
btrfs_free_path(path); btrfs_free_path(path);
...@@ -4972,7 +4969,7 @@ static int read_extent_buffer_subpage(struct extent_buffer *eb, int wait, ...@@ -4972,7 +4969,7 @@ static int read_extent_buffer_subpage(struct extent_buffer *eb, int wait,
if (!try_lock_extent(io_tree, eb->start, eb->start + eb->len - 1)) if (!try_lock_extent(io_tree, eb->start, eb->start + eb->len - 1))
return -EAGAIN; return -EAGAIN;
} else { } else {
ret = lock_extent(io_tree, eb->start, eb->start + eb->len - 1); ret = lock_extent(io_tree, eb->start, eb->start + eb->len - 1, NULL);
if (ret < 0) if (ret < 0)
return ret; return ret;
} }
...@@ -4982,7 +4979,7 @@ static int read_extent_buffer_subpage(struct extent_buffer *eb, int wait, ...@@ -4982,7 +4979,7 @@ static int read_extent_buffer_subpage(struct extent_buffer *eb, int wait,
PageUptodate(page) || PageUptodate(page) ||
btrfs_subpage_test_uptodate(fs_info, page, eb->start, eb->len)) { btrfs_subpage_test_uptodate(fs_info, page, eb->start, eb->len)) {
set_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags); set_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags);
unlock_extent(io_tree, eb->start, eb->start + eb->len - 1); unlock_extent(io_tree, eb->start, eb->start + eb->len - 1, NULL);
return ret; return ret;
} }
......
...@@ -1426,15 +1426,14 @@ lock_and_cleanup_extent_if_need(struct btrfs_inode *inode, struct page **pages, ...@@ -1426,15 +1426,14 @@ lock_and_cleanup_extent_if_need(struct btrfs_inode *inode, struct page **pages,
if (start_pos < inode->vfs_inode.i_size) { if (start_pos < inode->vfs_inode.i_size) {
struct btrfs_ordered_extent *ordered; struct btrfs_ordered_extent *ordered;
lock_extent_bits(&inode->io_tree, start_pos, last_pos, lock_extent(&inode->io_tree, start_pos, last_pos, cached_state);
cached_state);
ordered = btrfs_lookup_ordered_range(inode, start_pos, ordered = btrfs_lookup_ordered_range(inode, start_pos,
last_pos - start_pos + 1); last_pos - start_pos + 1);
if (ordered && if (ordered &&
ordered->file_offset + ordered->num_bytes > start_pos && ordered->file_offset + ordered->num_bytes > start_pos &&
ordered->file_offset <= last_pos) { ordered->file_offset <= last_pos) {
unlock_extent_cached(&inode->io_tree, start_pos, unlock_extent(&inode->io_tree, start_pos, last_pos,
last_pos, cached_state); cached_state);
for (i = 0; i < num_pages; i++) { for (i = 0; i < num_pages; i++) {
unlock_page(pages[i]); unlock_page(pages[i]);
put_page(pages[i]); put_page(pages[i]);
...@@ -1510,7 +1509,7 @@ int btrfs_check_nocow_lock(struct btrfs_inode *inode, loff_t pos, ...@@ -1510,7 +1509,7 @@ int btrfs_check_nocow_lock(struct btrfs_inode *inode, loff_t pos,
*write_bytes = min_t(size_t, *write_bytes , *write_bytes = min_t(size_t, *write_bytes ,
num_bytes - pos + lockstart); num_bytes - pos + lockstart);
} }
unlock_extent(&inode->io_tree, lockstart, lockend); unlock_extent(&inode->io_tree, lockstart, lockend, NULL);
return ret; return ret;
} }
...@@ -1782,8 +1781,8 @@ static noinline ssize_t btrfs_buffered_write(struct kiocb *iocb, ...@@ -1782,8 +1781,8 @@ static noinline ssize_t btrfs_buffered_write(struct kiocb *iocb,
* possible cached extent state to avoid a memory leak. * possible cached extent state to avoid a memory leak.
*/ */
if (extents_locked) if (extents_locked)
unlock_extent_cached(&BTRFS_I(inode)->io_tree, unlock_extent(&BTRFS_I(inode)->io_tree, lockstart,
lockstart, lockend, &cached_state); lockend, &cached_state);
else else
free_extent_state(cached_state); free_extent_state(cached_state);
...@@ -2592,7 +2591,7 @@ static void btrfs_punch_hole_lock_range(struct inode *inode, ...@@ -2592,7 +2591,7 @@ static void btrfs_punch_hole_lock_range(struct inode *inode,
while (1) { while (1) {
truncate_pagecache_range(inode, lockstart, lockend); truncate_pagecache_range(inode, lockstart, lockend);
lock_extent_bits(&BTRFS_I(inode)->io_tree, lockstart, lockend, lock_extent(&BTRFS_I(inode)->io_tree, lockstart, lockend,
cached_state); cached_state);
/* /*
* We can't have ordered extents in the range, nor dirty/writeback * We can't have ordered extents in the range, nor dirty/writeback
...@@ -2608,8 +2607,8 @@ static void btrfs_punch_hole_lock_range(struct inode *inode, ...@@ -2608,8 +2607,8 @@ static void btrfs_punch_hole_lock_range(struct inode *inode,
page_lockend)) page_lockend))
break; break;
unlock_extent_cached(&BTRFS_I(inode)->io_tree, lockstart, unlock_extent(&BTRFS_I(inode)->io_tree, lockstart, lockend,
lockend, cached_state); cached_state);
} }
btrfs_assert_inode_range_clean(BTRFS_I(inode), lockstart, lockend); btrfs_assert_inode_range_clean(BTRFS_I(inode), lockstart, lockend);
...@@ -3109,7 +3108,7 @@ static int btrfs_punch_hole(struct file *file, loff_t offset, loff_t len) ...@@ -3109,7 +3108,7 @@ static int btrfs_punch_hole(struct file *file, loff_t offset, loff_t len)
btrfs_end_transaction(trans); btrfs_end_transaction(trans);
btrfs_btree_balance_dirty(fs_info); btrfs_btree_balance_dirty(fs_info);
out: out:
unlock_extent_cached(&BTRFS_I(inode)->io_tree, lockstart, lockend, unlock_extent(&BTRFS_I(inode)->io_tree, lockstart, lockend,
&cached_state); &cached_state);
out_only_mutex: out_only_mutex:
if (!updated_inode && truncated_block && !ret) { if (!updated_inode && truncated_block && !ret) {
...@@ -3383,7 +3382,7 @@ static int btrfs_zero_range(struct inode *inode, ...@@ -3383,7 +3382,7 @@ static int btrfs_zero_range(struct inode *inode,
ret = btrfs_qgroup_reserve_data(BTRFS_I(inode), &data_reserved, ret = btrfs_qgroup_reserve_data(BTRFS_I(inode), &data_reserved,
alloc_start, bytes_to_reserve); alloc_start, bytes_to_reserve);
if (ret) { if (ret) {
unlock_extent_cached(&BTRFS_I(inode)->io_tree, lockstart, unlock_extent(&BTRFS_I(inode)->io_tree, lockstart,
lockend, &cached_state); lockend, &cached_state);
goto out; goto out;
} }
...@@ -3391,8 +3390,8 @@ static int btrfs_zero_range(struct inode *inode, ...@@ -3391,8 +3390,8 @@ static int btrfs_zero_range(struct inode *inode,
alloc_end - alloc_start, alloc_end - alloc_start,
i_blocksize(inode), i_blocksize(inode),
offset + len, &alloc_hint); offset + len, &alloc_hint);
unlock_extent_cached(&BTRFS_I(inode)->io_tree, lockstart, unlock_extent(&BTRFS_I(inode)->io_tree, lockstart, lockend,
lockend, &cached_state); &cached_state);
/* btrfs_prealloc_file_range releases reserved space on error */ /* btrfs_prealloc_file_range releases reserved space on error */
if (ret) { if (ret) {
space_reserved = false; space_reserved = false;
...@@ -3503,7 +3502,7 @@ static long btrfs_fallocate(struct file *file, int mode, ...@@ -3503,7 +3502,7 @@ static long btrfs_fallocate(struct file *file, int mode,
} }
locked_end = alloc_end - 1; locked_end = alloc_end - 1;
lock_extent_bits(&BTRFS_I(inode)->io_tree, alloc_start, locked_end, lock_extent(&BTRFS_I(inode)->io_tree, alloc_start, locked_end,
&cached_state); &cached_state);
btrfs_assert_inode_range_clean(BTRFS_I(inode), alloc_start, locked_end); btrfs_assert_inode_range_clean(BTRFS_I(inode), alloc_start, locked_end);
...@@ -3593,7 +3592,7 @@ static long btrfs_fallocate(struct file *file, int mode, ...@@ -3593,7 +3592,7 @@ static long btrfs_fallocate(struct file *file, int mode,
*/ */
ret = btrfs_fallocate_update_isize(inode, actual_end, mode); ret = btrfs_fallocate_update_isize(inode, actual_end, mode);
out_unlock: out_unlock:
unlock_extent_cached(&BTRFS_I(inode)->io_tree, alloc_start, locked_end, unlock_extent(&BTRFS_I(inode)->io_tree, alloc_start, locked_end,
&cached_state); &cached_state);
out: out:
btrfs_inode_unlock(inode, BTRFS_ILOCK_MMAP); btrfs_inode_unlock(inode, BTRFS_ILOCK_MMAP);
...@@ -3899,7 +3898,7 @@ static loff_t find_desired_extent(struct btrfs_inode *inode, loff_t offset, ...@@ -3899,7 +3898,7 @@ static loff_t find_desired_extent(struct btrfs_inode *inode, loff_t offset,
last_extent_end = lockstart; last_extent_end = lockstart;
lock_extent_bits(&inode->io_tree, lockstart, lockend, &cached_state); lock_extent(&inode->io_tree, lockstart, lockend, &cached_state);
ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
if (ret < 0) { if (ret < 0) {
...@@ -4035,8 +4034,7 @@ static loff_t find_desired_extent(struct btrfs_inode *inode, loff_t offset, ...@@ -4035,8 +4034,7 @@ static loff_t find_desired_extent(struct btrfs_inode *inode, loff_t offset,
} }
out: out:
unlock_extent_cached(&inode->io_tree, lockstart, lockend, unlock_extent(&inode->io_tree, lockstart, lockend, &cached_state);
&cached_state);
btrfs_free_path(path); btrfs_free_path(path);
if (ret < 0) if (ret < 0)
......
...@@ -348,7 +348,7 @@ int btrfs_truncate_free_space_cache(struct btrfs_trans_handle *trans, ...@@ -348,7 +348,7 @@ int btrfs_truncate_free_space_cache(struct btrfs_trans_handle *trans,
btrfs_i_size_write(inode, 0); btrfs_i_size_write(inode, 0);
truncate_pagecache(vfs_inode, 0); truncate_pagecache(vfs_inode, 0);
lock_extent_bits(&inode->io_tree, 0, (u64)-1, &cached_state); lock_extent(&inode->io_tree, 0, (u64)-1, &cached_state);
btrfs_drop_extent_cache(inode, 0, (u64)-1, 0); btrfs_drop_extent_cache(inode, 0, (u64)-1, 0);
/* /*
...@@ -360,7 +360,7 @@ int btrfs_truncate_free_space_cache(struct btrfs_trans_handle *trans, ...@@ -360,7 +360,7 @@ int btrfs_truncate_free_space_cache(struct btrfs_trans_handle *trans,
inode_sub_bytes(&inode->vfs_inode, control.sub_bytes); inode_sub_bytes(&inode->vfs_inode, control.sub_bytes);
btrfs_inode_safe_disk_i_size_write(inode, control.last_size); btrfs_inode_safe_disk_i_size_write(inode, control.last_size);
unlock_extent_cached(&inode->io_tree, 0, (u64)-1, &cached_state); unlock_extent(&inode->io_tree, 0, (u64)-1, &cached_state);
if (ret) if (ret)
goto fail; goto fail;
...@@ -1292,8 +1292,8 @@ cleanup_write_cache_enospc(struct inode *inode, ...@@ -1292,8 +1292,8 @@ cleanup_write_cache_enospc(struct inode *inode,
struct extent_state **cached_state) struct extent_state **cached_state)
{ {
io_ctl_drop_pages(io_ctl); io_ctl_drop_pages(io_ctl);
unlock_extent_cached(&BTRFS_I(inode)->io_tree, 0, unlock_extent(&BTRFS_I(inode)->io_tree, 0, i_size_read(inode) - 1,
i_size_read(inode) - 1, cached_state); cached_state);
} }
static int __btrfs_wait_cache_io(struct btrfs_root *root, static int __btrfs_wait_cache_io(struct btrfs_root *root,
...@@ -1418,7 +1418,7 @@ static int __btrfs_write_out_cache(struct btrfs_root *root, struct inode *inode, ...@@ -1418,7 +1418,7 @@ static int __btrfs_write_out_cache(struct btrfs_root *root, struct inode *inode,
if (ret) if (ret)
goto out_unlock; goto out_unlock;
lock_extent_bits(&BTRFS_I(inode)->io_tree, 0, i_size_read(inode) - 1, lock_extent(&BTRFS_I(inode)->io_tree, 0, i_size_read(inode) - 1,
&cached_state); &cached_state);
io_ctl_set_generation(io_ctl, trans->transid); io_ctl_set_generation(io_ctl, trans->transid);
...@@ -1474,8 +1474,8 @@ static int __btrfs_write_out_cache(struct btrfs_root *root, struct inode *inode, ...@@ -1474,8 +1474,8 @@ static int __btrfs_write_out_cache(struct btrfs_root *root, struct inode *inode,
io_ctl_drop_pages(io_ctl); io_ctl_drop_pages(io_ctl);
io_ctl_free(io_ctl); io_ctl_free(io_ctl);
unlock_extent_cached(&BTRFS_I(inode)->io_tree, 0, unlock_extent(&BTRFS_I(inode)->io_tree, 0, i_size_read(inode) - 1,
i_size_read(inode) - 1, &cached_state); &cached_state);
/* /*
* at this point the pages are under IO and we're happy, * at this point the pages are under IO and we're happy,
......
This diff is collapsed.
...@@ -1218,10 +1218,10 @@ static struct extent_map *defrag_lookup_extent(struct inode *inode, u64 start, ...@@ -1218,10 +1218,10 @@ static struct extent_map *defrag_lookup_extent(struct inode *inode, u64 start,
/* get the big lock and read metadata off disk */ /* get the big lock and read metadata off disk */
if (!locked) if (!locked)
lock_extent_bits(io_tree, start, end, &cached); lock_extent(io_tree, start, end, &cached);
em = defrag_get_extent(BTRFS_I(inode), start, newer_than); em = defrag_get_extent(BTRFS_I(inode), start, newer_than);
if (!locked) if (!locked)
unlock_extent_cached(io_tree, start, end, &cached); unlock_extent(io_tree, start, end, &cached);
if (IS_ERR(em)) if (IS_ERR(em))
return NULL; return NULL;
...@@ -1333,9 +1333,9 @@ static struct page *defrag_prepare_one_page(struct btrfs_inode *inode, ...@@ -1333,9 +1333,9 @@ static struct page *defrag_prepare_one_page(struct btrfs_inode *inode,
while (1) { while (1) {
struct btrfs_ordered_extent *ordered; struct btrfs_ordered_extent *ordered;
lock_extent_bits(&inode->io_tree, page_start, page_end, &cached_state); lock_extent(&inode->io_tree, page_start, page_end, &cached_state);
ordered = btrfs_lookup_ordered_range(inode, page_start, PAGE_SIZE); ordered = btrfs_lookup_ordered_range(inode, page_start, PAGE_SIZE);
unlock_extent_cached(&inode->io_tree, page_start, page_end, unlock_extent(&inode->io_tree, page_start, page_end,
&cached_state); &cached_state);
if (!ordered) if (!ordered)
break; break;
...@@ -1666,7 +1666,7 @@ static int defrag_one_range(struct btrfs_inode *inode, u64 start, u32 len, ...@@ -1666,7 +1666,7 @@ static int defrag_one_range(struct btrfs_inode *inode, u64 start, u32 len,
wait_on_page_writeback(pages[i]); wait_on_page_writeback(pages[i]);
/* Lock the pages range */ /* Lock the pages range */
lock_extent_bits(&inode->io_tree, start_index << PAGE_SHIFT, lock_extent(&inode->io_tree, start_index << PAGE_SHIFT,
(last_index << PAGE_SHIFT) + PAGE_SIZE - 1, (last_index << PAGE_SHIFT) + PAGE_SIZE - 1,
&cached_state); &cached_state);
/* /*
...@@ -1694,7 +1694,7 @@ static int defrag_one_range(struct btrfs_inode *inode, u64 start, u32 len, ...@@ -1694,7 +1694,7 @@ static int defrag_one_range(struct btrfs_inode *inode, u64 start, u32 len,
kfree(entry); kfree(entry);
} }
unlock_extent: unlock_extent:
unlock_extent_cached(&inode->io_tree, start_index << PAGE_SHIFT, unlock_extent(&inode->io_tree, start_index << PAGE_SHIFT,
(last_index << PAGE_SHIFT) + PAGE_SIZE - 1, (last_index << PAGE_SHIFT) + PAGE_SIZE - 1,
&cached_state); &cached_state);
free_pages: free_pages:
......
...@@ -1043,7 +1043,7 @@ void btrfs_lock_and_flush_ordered_range(struct btrfs_inode *inode, u64 start, ...@@ -1043,7 +1043,7 @@ void btrfs_lock_and_flush_ordered_range(struct btrfs_inode *inode, u64 start,
cachedp = cached_state; cachedp = cached_state;
while (1) { while (1) {
lock_extent_bits(&inode->io_tree, start, end, cachedp); lock_extent(&inode->io_tree, start, end, cachedp);
ordered = btrfs_lookup_ordered_range(inode, start, ordered = btrfs_lookup_ordered_range(inode, start,
end - start + 1); end - start + 1);
if (!ordered) { if (!ordered) {
...@@ -1056,7 +1056,7 @@ void btrfs_lock_and_flush_ordered_range(struct btrfs_inode *inode, u64 start, ...@@ -1056,7 +1056,7 @@ void btrfs_lock_and_flush_ordered_range(struct btrfs_inode *inode, u64 start,
refcount_dec(&cache->refs); refcount_dec(&cache->refs);
break; break;
} }
unlock_extent_cached(&inode->io_tree, start, end, cachedp); unlock_extent(&inode->io_tree, start, end, cachedp);
btrfs_start_ordered_extent(ordered, 1); btrfs_start_ordered_extent(ordered, 1);
btrfs_put_ordered_extent(ordered); btrfs_put_ordered_extent(ordered);
} }
......
...@@ -615,8 +615,8 @@ static int btrfs_clone(struct inode *src, struct inode *inode, ...@@ -615,8 +615,8 @@ static int btrfs_clone(struct inode *src, struct inode *inode,
static void btrfs_double_extent_unlock(struct inode *inode1, u64 loff1, static void btrfs_double_extent_unlock(struct inode *inode1, u64 loff1,
struct inode *inode2, u64 loff2, u64 len) struct inode *inode2, u64 loff2, u64 len)
{ {
unlock_extent(&BTRFS_I(inode1)->io_tree, loff1, loff1 + len - 1); unlock_extent(&BTRFS_I(inode1)->io_tree, loff1, loff1 + len - 1, NULL);
unlock_extent(&BTRFS_I(inode2)->io_tree, loff2, loff2 + len - 1); unlock_extent(&BTRFS_I(inode2)->io_tree, loff2, loff2 + len - 1, NULL);
} }
static void btrfs_double_extent_lock(struct inode *inode1, u64 loff1, static void btrfs_double_extent_lock(struct inode *inode1, u64 loff1,
...@@ -634,8 +634,8 @@ static void btrfs_double_extent_lock(struct inode *inode1, u64 loff1, ...@@ -634,8 +634,8 @@ static void btrfs_double_extent_lock(struct inode *inode1, u64 loff1,
swap(range1_end, range2_end); swap(range1_end, range2_end);
} }
lock_extent(&BTRFS_I(inode1)->io_tree, loff1, range1_end); lock_extent(&BTRFS_I(inode1)->io_tree, loff1, range1_end, NULL);
lock_extent(&BTRFS_I(inode2)->io_tree, loff2, range2_end); lock_extent(&BTRFS_I(inode2)->io_tree, loff2, range2_end, NULL);
btrfs_assert_inode_range_clean(BTRFS_I(inode1), loff1, range1_end); btrfs_assert_inode_range_clean(BTRFS_I(inode1), loff1, range1_end);
btrfs_assert_inode_range_clean(BTRFS_I(inode2), loff2, range2_end); btrfs_assert_inode_range_clean(BTRFS_I(inode2), loff2, range2_end);
......
...@@ -1127,7 +1127,7 @@ int replace_file_extents(struct btrfs_trans_handle *trans, ...@@ -1127,7 +1127,7 @@ int replace_file_extents(struct btrfs_trans_handle *trans,
btrfs_drop_extent_cache(BTRFS_I(inode), btrfs_drop_extent_cache(BTRFS_I(inode),
key.offset, end, 1); key.offset, end, 1);
unlock_extent(&BTRFS_I(inode)->io_tree, unlock_extent(&BTRFS_I(inode)->io_tree,
key.offset, end); key.offset, end, NULL);
} }
} }
...@@ -1566,9 +1566,9 @@ static int invalidate_extent_cache(struct btrfs_root *root, ...@@ -1566,9 +1566,9 @@ static int invalidate_extent_cache(struct btrfs_root *root,
} }
/* the lock_extent waits for read_folio to complete */ /* the lock_extent waits for read_folio to complete */
lock_extent(&BTRFS_I(inode)->io_tree, start, end); lock_extent(&BTRFS_I(inode)->io_tree, start, end, NULL);
btrfs_drop_extent_cache(BTRFS_I(inode), start, end, 1); btrfs_drop_extent_cache(BTRFS_I(inode), start, end, 1);
unlock_extent(&BTRFS_I(inode)->io_tree, start, end); unlock_extent(&BTRFS_I(inode)->io_tree, start, end, NULL);
} }
return 0; return 0;
} }
...@@ -2869,13 +2869,13 @@ static noinline_for_stack int prealloc_file_extent_cluster( ...@@ -2869,13 +2869,13 @@ static noinline_for_stack int prealloc_file_extent_cluster(
else else
end = cluster->end - offset; end = cluster->end - offset;
lock_extent(&inode->io_tree, start, end); lock_extent(&inode->io_tree, start, end, NULL);
num_bytes = end + 1 - start; num_bytes = end + 1 - start;
ret = btrfs_prealloc_file_range(&inode->vfs_inode, 0, start, ret = btrfs_prealloc_file_range(&inode->vfs_inode, 0, start,
num_bytes, num_bytes, num_bytes, num_bytes,
end + 1, &alloc_hint); end + 1, &alloc_hint);
cur_offset = end + 1; cur_offset = end + 1;
unlock_extent(&inode->io_tree, start, end); unlock_extent(&inode->io_tree, start, end, NULL);
if (ret) if (ret)
break; break;
} }
...@@ -2904,7 +2904,7 @@ static noinline_for_stack int setup_relocation_extent_mapping(struct inode *inod ...@@ -2904,7 +2904,7 @@ static noinline_for_stack int setup_relocation_extent_mapping(struct inode *inod
em->block_start = block_start; em->block_start = block_start;
set_bit(EXTENT_FLAG_PINNED, &em->flags); set_bit(EXTENT_FLAG_PINNED, &em->flags);
lock_extent(&BTRFS_I(inode)->io_tree, start, end); lock_extent(&BTRFS_I(inode)->io_tree, start, end, NULL);
while (1) { while (1) {
write_lock(&em_tree->lock); write_lock(&em_tree->lock);
ret = add_extent_mapping(em_tree, em, 0); ret = add_extent_mapping(em_tree, em, 0);
...@@ -2915,7 +2915,7 @@ static noinline_for_stack int setup_relocation_extent_mapping(struct inode *inod ...@@ -2915,7 +2915,7 @@ static noinline_for_stack int setup_relocation_extent_mapping(struct inode *inod
} }
btrfs_drop_extent_cache(BTRFS_I(inode), start, end, 0); btrfs_drop_extent_cache(BTRFS_I(inode), start, end, 0);
} }
unlock_extent(&BTRFS_I(inode)->io_tree, start, end); unlock_extent(&BTRFS_I(inode)->io_tree, start, end, NULL);
return ret; return ret;
} }
...@@ -3006,7 +3006,7 @@ static int relocate_one_page(struct inode *inode, struct file_ra_state *ra, ...@@ -3006,7 +3006,7 @@ static int relocate_one_page(struct inode *inode, struct file_ra_state *ra,
goto release_page; goto release_page;
/* Mark the range delalloc and dirty for later writeback */ /* Mark the range delalloc and dirty for later writeback */
lock_extent(&BTRFS_I(inode)->io_tree, clamped_start, clamped_end); lock_extent(&BTRFS_I(inode)->io_tree, clamped_start, clamped_end, NULL);
ret = btrfs_set_extent_delalloc(BTRFS_I(inode), clamped_start, ret = btrfs_set_extent_delalloc(BTRFS_I(inode), clamped_start,
clamped_end, 0, NULL); clamped_end, 0, NULL);
if (ret) { if (ret) {
...@@ -3039,7 +3039,7 @@ static int relocate_one_page(struct inode *inode, struct file_ra_state *ra, ...@@ -3039,7 +3039,7 @@ static int relocate_one_page(struct inode *inode, struct file_ra_state *ra,
boundary_start, boundary_end, boundary_start, boundary_end,
EXTENT_BOUNDARY); EXTENT_BOUNDARY);
} }
unlock_extent(&BTRFS_I(inode)->io_tree, clamped_start, clamped_end); unlock_extent(&BTRFS_I(inode)->io_tree, clamped_start, clamped_end, NULL);
btrfs_delalloc_release_extents(BTRFS_I(inode), clamped_len); btrfs_delalloc_release_extents(BTRFS_I(inode), clamped_len);
cur += clamped_len; cur += clamped_len;
......
...@@ -172,7 +172,7 @@ static int test_find_delalloc(u32 sectorsize) ...@@ -172,7 +172,7 @@ static int test_find_delalloc(u32 sectorsize)
sectorsize - 1, start, end); sectorsize - 1, start, end);
goto out_bits; goto out_bits;
} }
unlock_extent(tmp, start, end); unlock_extent(tmp, start, end, NULL);
unlock_page(locked_page); unlock_page(locked_page);
put_page(locked_page); put_page(locked_page);
...@@ -208,7 +208,7 @@ static int test_find_delalloc(u32 sectorsize) ...@@ -208,7 +208,7 @@ static int test_find_delalloc(u32 sectorsize)
test_err("there were unlocked pages in the range"); test_err("there were unlocked pages in the range");
goto out_bits; goto out_bits;
} }
unlock_extent(tmp, start, end); unlock_extent(tmp, start, end, NULL);
/* locked_page was unlocked above */ /* locked_page was unlocked above */
put_page(locked_page); put_page(locked_page);
...@@ -263,7 +263,7 @@ static int test_find_delalloc(u32 sectorsize) ...@@ -263,7 +263,7 @@ static int test_find_delalloc(u32 sectorsize)
test_err("pages in range were not all locked"); test_err("pages in range were not all locked");
goto out_bits; goto out_bits;
} }
unlock_extent(tmp, start, end); unlock_extent(tmp, start, end, NULL);
/* /*
* Now to test where we run into a page that is no longer dirty in the * Now to test where we run into a page that is no longer dirty in the
......
...@@ -4271,8 +4271,8 @@ static int log_csums(struct btrfs_trans_handle *trans, ...@@ -4271,8 +4271,8 @@ static int log_csums(struct btrfs_trans_handle *trans,
* file which happens to refer to the same extent as well. Such races * file which happens to refer to the same extent as well. Such races
* can leave checksum items in the log with overlapping ranges. * can leave checksum items in the log with overlapping ranges.
*/ */
ret = lock_extent_bits(&log_root->log_csum_range, sums->bytenr, ret = lock_extent(&log_root->log_csum_range, sums->bytenr, lock_end,
lock_end, &cached_state); &cached_state);
if (ret) if (ret)
return ret; return ret;
/* /*
...@@ -4288,7 +4288,7 @@ static int log_csums(struct btrfs_trans_handle *trans, ...@@ -4288,7 +4288,7 @@ static int log_csums(struct btrfs_trans_handle *trans,
if (!ret) if (!ret)
ret = btrfs_csum_file_blocks(trans, log_root, sums); ret = btrfs_csum_file_blocks(trans, log_root, sums);
unlock_extent_cached(&log_root->log_csum_range, sums->bytenr, lock_end, unlock_extent(&log_root->log_csum_range, sums->bytenr, lock_end,
&cached_state); &cached_state);
return ret; return ret;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment