Commit b272ae22 authored by David Sterba's avatar David Sterba

btrfs: drop argument tree from btrfs_lock_and_flush_ordered_range

The tree pointer can be safely read from the inode so we can drop the
redundant argument from btrfs_lock_and_flush_ordered_range.
Reviewed-by: default avatarJohannes Thumshirn <johannes.thumshirn@wdc.com>
Reviewed-by: default avatarNikolay Borisov <nborisov@suse.com>
Signed-off-by: default avatarDavid Sterba <dsterba@suse.com>
parent ae6957eb
...@@ -3331,7 +3331,7 @@ static inline void contiguous_readpages(struct extent_io_tree *tree, ...@@ -3331,7 +3331,7 @@ static inline void contiguous_readpages(struct extent_io_tree *tree,
ASSERT(tree == &inode->io_tree); ASSERT(tree == &inode->io_tree);
btrfs_lock_and_flush_ordered_range(tree, inode, start, end, NULL); btrfs_lock_and_flush_ordered_range(inode, start, end, NULL);
for (index = 0; index < nr_pages; index++) { for (index = 0; index < nr_pages; index++) {
__do_readpage(tree, pages[index], btrfs_get_extent, em_cached, __do_readpage(tree, pages[index], btrfs_get_extent, em_cached,
...@@ -3354,7 +3354,7 @@ static int __extent_read_full_page(struct extent_io_tree *tree, ...@@ -3354,7 +3354,7 @@ static int __extent_read_full_page(struct extent_io_tree *tree,
ASSERT(tree == &inode->io_tree); ASSERT(tree == &inode->io_tree);
btrfs_lock_and_flush_ordered_range(tree, inode, start, end, NULL); btrfs_lock_and_flush_ordered_range(inode, start, end, NULL);
ret = __do_readpage(tree, page, get_extent, NULL, bio, mirror_num, ret = __do_readpage(tree, page, get_extent, NULL, bio, mirror_num,
bio_flags, read_flags, NULL); bio_flags, read_flags, NULL);
......
...@@ -1561,7 +1561,7 @@ static noinline int check_can_nocow(struct btrfs_inode *inode, loff_t pos, ...@@ -1561,7 +1561,7 @@ static noinline int check_can_nocow(struct btrfs_inode *inode, loff_t pos,
lockend = round_up(pos + *write_bytes, lockend = round_up(pos + *write_bytes,
fs_info->sectorsize) - 1; fs_info->sectorsize) - 1;
btrfs_lock_and_flush_ordered_range(&inode->io_tree, inode, lockstart, btrfs_lock_and_flush_ordered_range(inode, lockstart,
lockend, NULL); lockend, NULL);
num_bytes = lockend - lockstart + 1; num_bytes = lockend - lockstart + 1;
......
...@@ -4619,7 +4619,7 @@ int btrfs_cont_expand(struct inode *inode, loff_t oldsize, loff_t size) ...@@ -4619,7 +4619,7 @@ int btrfs_cont_expand(struct inode *inode, loff_t oldsize, loff_t size)
if (size <= hole_start) if (size <= hole_start)
return 0; return 0;
btrfs_lock_and_flush_ordered_range(io_tree, BTRFS_I(inode), hole_start, btrfs_lock_and_flush_ordered_range(BTRFS_I(inode), hole_start,
block_end - 1, &cached_state); block_end - 1, &cached_state);
cur_offset = hole_start; cur_offset = hole_start;
while (1) { while (1) {
......
...@@ -835,7 +835,6 @@ int btrfs_find_ordered_sum(struct inode *inode, u64 offset, u64 disk_bytenr, ...@@ -835,7 +835,6 @@ int btrfs_find_ordered_sum(struct inode *inode, u64 offset, u64 disk_bytenr,
* btrfs_flush_ordered_range - Lock the passed range and ensures all pending * btrfs_flush_ordered_range - Lock the passed range and ensures all pending
* ordered extents in it are run to completion. * ordered extents in it are run to completion.
* *
* @tree: IO tree used for locking out other users of the range
* @inode: Inode whose ordered tree is to be searched * @inode: Inode whose ordered tree is to be searched
* @start: Beginning of range to flush * @start: Beginning of range to flush
* @end: Last byte of range to lock * @end: Last byte of range to lock
...@@ -845,8 +844,7 @@ int btrfs_find_ordered_sum(struct inode *inode, u64 offset, u64 disk_bytenr, ...@@ -845,8 +844,7 @@ int btrfs_find_ordered_sum(struct inode *inode, u64 offset, u64 disk_bytenr,
* This function always returns with the given range locked, ensuring after it's * This function always returns with the given range locked, ensuring after it's
* called no order extent can be pending. * called no order extent can be pending.
*/ */
void btrfs_lock_and_flush_ordered_range(struct extent_io_tree *tree, void btrfs_lock_and_flush_ordered_range(struct btrfs_inode *inode, u64 start,
struct btrfs_inode *inode, u64 start,
u64 end, u64 end,
struct extent_state **cached_state) struct extent_state **cached_state)
{ {
...@@ -854,13 +852,11 @@ void btrfs_lock_and_flush_ordered_range(struct extent_io_tree *tree, ...@@ -854,13 +852,11 @@ void btrfs_lock_and_flush_ordered_range(struct extent_io_tree *tree,
struct extent_state *cache = NULL; struct extent_state *cache = NULL;
struct extent_state **cachedp = &cache; struct extent_state **cachedp = &cache;
ASSERT(tree == &inode->io_tree);
if (cached_state) if (cached_state)
cachedp = cached_state; cachedp = cached_state;
while (1) { while (1) {
lock_extent_bits(tree, start, end, cachedp); lock_extent_bits(&inode->io_tree, start, end, cachedp);
ordered = btrfs_lookup_ordered_range(inode, start, ordered = btrfs_lookup_ordered_range(inode, start,
end - start + 1); end - start + 1);
if (!ordered) { if (!ordered) {
...@@ -873,7 +869,7 @@ void btrfs_lock_and_flush_ordered_range(struct extent_io_tree *tree, ...@@ -873,7 +869,7 @@ void btrfs_lock_and_flush_ordered_range(struct extent_io_tree *tree,
refcount_dec(&cache->refs); refcount_dec(&cache->refs);
break; break;
} }
unlock_extent_cached(tree, start, end, cachedp); unlock_extent_cached(&inode->io_tree, start, end, cachedp);
btrfs_start_ordered_extent(&inode->vfs_inode, ordered, 1); btrfs_start_ordered_extent(&inode->vfs_inode, ordered, 1);
btrfs_put_ordered_extent(ordered); btrfs_put_ordered_extent(ordered);
} }
......
...@@ -183,8 +183,7 @@ u64 btrfs_wait_ordered_extents(struct btrfs_root *root, u64 nr, ...@@ -183,8 +183,7 @@ u64 btrfs_wait_ordered_extents(struct btrfs_root *root, u64 nr,
const u64 range_start, const u64 range_len); const u64 range_start, const u64 range_len);
void btrfs_wait_ordered_roots(struct btrfs_fs_info *fs_info, u64 nr, void btrfs_wait_ordered_roots(struct btrfs_fs_info *fs_info, u64 nr,
const u64 range_start, const u64 range_len); const u64 range_start, const u64 range_len);
void btrfs_lock_and_flush_ordered_range(struct extent_io_tree *tree, void btrfs_lock_and_flush_ordered_range(struct btrfs_inode *inode, u64 start,
struct btrfs_inode *inode, u64 start,
u64 end, u64 end,
struct extent_state **cached_state); struct extent_state **cached_state);
int __init ordered_data_init(void); int __init ordered_data_init(void);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment