Commit 83ae4133 authored by Josef Bacik's avatar Josef Bacik Committed by David Sterba

btrfs: add a cached_state to try_lock_extent

With nowait becoming more pervasive throughout our codebase go ahead and
add a cached_state to try_lock_extent().  This allows us to be faster
about clearing the locked area if we have contention, and then gives us
the same optimization for unlock if we are able to lock the range.
Reviewed-by: default avatarFilipe Manana <fdmanana@suse.com>
Signed-off-by: default avatarJosef Bacik <josef@toxicpanda.com>
Reviewed-by: default avatarDavid Sterba <dsterba@suse.com>
Signed-off-by: default avatarDavid Sterba <dsterba@suse.com>
parent 76dcd734
...@@ -1615,17 +1615,18 @@ int clear_record_extent_bits(struct extent_io_tree *tree, u64 start, u64 end, ...@@ -1615,17 +1615,18 @@ int clear_record_extent_bits(struct extent_io_tree *tree, u64 start, u64 end,
changeset); changeset);
} }
int try_lock_extent(struct extent_io_tree *tree, u64 start, u64 end) int try_lock_extent(struct extent_io_tree *tree, u64 start, u64 end,
struct extent_state **cached)
{ {
int err; int err;
u64 failed_start; u64 failed_start;
err = __set_extent_bit(tree, start, end, EXTENT_LOCKED, &failed_start, err = __set_extent_bit(tree, start, end, EXTENT_LOCKED, &failed_start,
NULL, NULL, GFP_NOFS); cached, NULL, GFP_NOFS);
if (err == -EEXIST) { if (err == -EEXIST) {
if (failed_start > start) if (failed_start > start)
clear_extent_bit(tree, start, failed_start - 1, clear_extent_bit(tree, start, failed_start - 1,
EXTENT_LOCKED, NULL); EXTENT_LOCKED, cached);
return 0; return 0;
} }
return 1; return 1;
......
...@@ -106,7 +106,8 @@ void extent_io_tree_release(struct extent_io_tree *tree); ...@@ -106,7 +106,8 @@ void extent_io_tree_release(struct extent_io_tree *tree);
int lock_extent(struct extent_io_tree *tree, u64 start, u64 end, int lock_extent(struct extent_io_tree *tree, u64 start, u64 end,
struct extent_state **cached); struct extent_state **cached);
int try_lock_extent(struct extent_io_tree *tree, u64 start, u64 end); int try_lock_extent(struct extent_io_tree *tree, u64 start, u64 end,
struct extent_state **cached);
int __init extent_state_init_cachep(void); int __init extent_state_init_cachep(void);
void __cold extent_state_free_cachep(void); void __cold extent_state_free_cachep(void);
......
...@@ -4959,7 +4959,8 @@ static int read_extent_buffer_subpage(struct extent_buffer *eb, int wait, ...@@ -4959,7 +4959,8 @@ static int read_extent_buffer_subpage(struct extent_buffer *eb, int wait,
io_tree = &BTRFS_I(fs_info->btree_inode)->io_tree; io_tree = &BTRFS_I(fs_info->btree_inode)->io_tree;
if (wait == WAIT_NONE) { if (wait == WAIT_NONE) {
if (!try_lock_extent(io_tree, eb->start, eb->start + eb->len - 1)) if (!try_lock_extent(io_tree, eb->start, eb->start + eb->len - 1,
NULL))
return -EAGAIN; return -EAGAIN;
} else { } else {
ret = lock_extent(io_tree, eb->start, eb->start + eb->len - 1, NULL); ret = lock_extent(io_tree, eb->start, eb->start + eb->len - 1, NULL);
......
...@@ -1302,7 +1302,8 @@ lock_and_cleanup_extent_if_need(struct btrfs_inode *inode, struct page **pages, ...@@ -1302,7 +1302,8 @@ lock_and_cleanup_extent_if_need(struct btrfs_inode *inode, struct page **pages,
struct btrfs_ordered_extent *ordered; struct btrfs_ordered_extent *ordered;
if (nowait) { if (nowait) {
if (!try_lock_extent(&inode->io_tree, start_pos, last_pos)) { if (!try_lock_extent(&inode->io_tree, start_pos, last_pos,
cached_state)) {
for (i = 0; i < num_pages; i++) { for (i = 0; i < num_pages; i++) {
unlock_page(pages[i]); unlock_page(pages[i]);
put_page(pages[i]); put_page(pages[i]);
......
...@@ -7255,7 +7255,8 @@ static int lock_extent_direct(struct inode *inode, u64 lockstart, u64 lockend, ...@@ -7255,7 +7255,8 @@ static int lock_extent_direct(struct inode *inode, u64 lockstart, u64 lockend,
while (1) { while (1) {
if (nowait) { if (nowait) {
if (!try_lock_extent(io_tree, lockstart, lockend)) if (!try_lock_extent(io_tree, lockstart, lockend,
cached_state))
return -EAGAIN; return -EAGAIN;
} else { } else {
lock_extent(io_tree, lockstart, lockend, cached_state); lock_extent(io_tree, lockstart, lockend, cached_state);
......
...@@ -1073,7 +1073,7 @@ bool btrfs_try_lock_ordered_range(struct btrfs_inode *inode, u64 start, u64 end) ...@@ -1073,7 +1073,7 @@ bool btrfs_try_lock_ordered_range(struct btrfs_inode *inode, u64 start, u64 end)
{ {
struct btrfs_ordered_extent *ordered; struct btrfs_ordered_extent *ordered;
if (!try_lock_extent(&inode->io_tree, start, end)) if (!try_lock_extent(&inode->io_tree, start, end, NULL))
return false; return false;
ordered = btrfs_lookup_ordered_range(inode, start, end - start + 1); ordered = btrfs_lookup_ordered_range(inode, start, end - start + 1);
......
...@@ -1120,7 +1120,7 @@ int replace_file_extents(struct btrfs_trans_handle *trans, ...@@ -1120,7 +1120,7 @@ int replace_file_extents(struct btrfs_trans_handle *trans,
WARN_ON(!IS_ALIGNED(end, fs_info->sectorsize)); WARN_ON(!IS_ALIGNED(end, fs_info->sectorsize));
end--; end--;
ret = try_lock_extent(&BTRFS_I(inode)->io_tree, ret = try_lock_extent(&BTRFS_I(inode)->io_tree,
key.offset, end); key.offset, end, NULL);
if (!ret) if (!ret)
continue; continue;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment