Commit 55961c8a authored by Filipe Manana's avatar Filipe Manana Committed by David Sterba

btrfs: remove ordered extent check and wait during hole punching and zero range

For hole punching and zero range we have this loop that checks if we have
ordered extents after locking the file range, and if so unlock the range,
wait for ordered extents, and retry until we don't find more ordered
extents.

This logic was needed in the past because:

1) Direct IO writes within the i_size boundary did not take the inode's
   VFS lock. This was because that lock used to be a mutex, then some
   years ago it was switched to a rw semaphore (commit 9902af79
   ("parallel lookups: actual switch to rwsem")), and then btrfs was
   changed to take the VFS inode's lock in shared mode for writes that
   don't cross the i_size boundary (commit e9adabb9 ("btrfs: use
   shared lock for direct writes within EOF"));

2) We could race with memory mapped writes, because memory mapped writes
   don't acquire the inode's VFS lock. We don't have that race anymore,
   as we have a rw semaphore to synchronize memory mapped writes with
   fallocate (and reflinking too). That change happened with commit
   8d9b4a16 ("btrfs: exclude mmap from happening during all
   fallocate operations").

So stop looking for ordered extents after locking the file range when
doing hole punching and zero range operations.
Signed-off-by: default avatarFilipe Manana <fdmanana@suse.com>
Signed-off-by: default avatarDavid Sterba <dsterba@suse.com>
parent bd6526d0
...@@ -2570,10 +2570,10 @@ static int find_first_non_hole(struct btrfs_inode *inode, u64 *start, u64 *len) ...@@ -2570,10 +2570,10 @@ static int find_first_non_hole(struct btrfs_inode *inode, u64 *start, u64 *len)
return ret; return ret;
} }
static int btrfs_punch_hole_lock_range(struct inode *inode, static void btrfs_punch_hole_lock_range(struct inode *inode,
const u64 lockstart, const u64 lockstart,
const u64 lockend, const u64 lockend,
struct extent_state **cached_state) struct extent_state **cached_state)
{ {
/* /*
* For subpage case, if the range is not at page boundary, we could * For subpage case, if the range is not at page boundary, we could
...@@ -2587,40 +2587,27 @@ static int btrfs_punch_hole_lock_range(struct inode *inode, ...@@ -2587,40 +2587,27 @@ static int btrfs_punch_hole_lock_range(struct inode *inode,
const u64 page_lockend = round_down(lockend + 1, PAGE_SIZE) - 1; const u64 page_lockend = round_down(lockend + 1, PAGE_SIZE) - 1;
while (1) { while (1) {
struct btrfs_ordered_extent *ordered;
int ret;
truncate_pagecache_range(inode, lockstart, lockend); truncate_pagecache_range(inode, lockstart, lockend);
lock_extent_bits(&BTRFS_I(inode)->io_tree, lockstart, lockend, lock_extent_bits(&BTRFS_I(inode)->io_tree, lockstart, lockend,
cached_state); cached_state);
ordered = btrfs_lookup_first_ordered_extent(BTRFS_I(inode),
lockend);
/* /*
* We need to make sure we have no ordered extents in this range * We can't have ordered extents in the range, nor dirty/writeback
* and nobody raced in and read a page in this range, if we did * pages, because we have locked the inode's VFS lock in exclusive
* we need to try again. * mode, we have locked the inode's i_mmap_lock in exclusive mode,
* we have flushed all delalloc in the range and we have waited
* for any ordered extents in the range to complete.
* We can race with anyone reading pages from this range, so after
* locking the range check if we have pages in the range, and if
* we do, unlock the range and retry.
*/ */
if ((!ordered || if (!filemap_range_has_page(inode->i_mapping, page_lockstart,
(ordered->file_offset + ordered->num_bytes <= lockstart || page_lockend))
ordered->file_offset > lockend)) &&
!filemap_range_has_page(inode->i_mapping,
page_lockstart, page_lockend)) {
if (ordered)
btrfs_put_ordered_extent(ordered);
break; break;
}
if (ordered)
btrfs_put_ordered_extent(ordered);
unlock_extent_cached(&BTRFS_I(inode)->io_tree, lockstart, unlock_extent_cached(&BTRFS_I(inode)->io_tree, lockstart,
lockend, cached_state); lockend, cached_state);
ret = btrfs_wait_ordered_range(inode, lockstart,
lockend - lockstart + 1);
if (ret)
return ret;
} }
return 0;
} }
static int btrfs_insert_replace_extent(struct btrfs_trans_handle *trans, static int btrfs_insert_replace_extent(struct btrfs_trans_handle *trans,
...@@ -3073,10 +3060,7 @@ static int btrfs_punch_hole(struct file *file, loff_t offset, loff_t len) ...@@ -3073,10 +3060,7 @@ static int btrfs_punch_hole(struct file *file, loff_t offset, loff_t len)
goto out_only_mutex; goto out_only_mutex;
} }
ret = btrfs_punch_hole_lock_range(inode, lockstart, lockend, btrfs_punch_hole_lock_range(inode, lockstart, lockend, &cached_state);
&cached_state);
if (ret)
goto out_only_mutex;
path = btrfs_alloc_path(); path = btrfs_alloc_path();
if (!path) { if (!path) {
...@@ -3367,10 +3351,8 @@ static int btrfs_zero_range(struct inode *inode, ...@@ -3367,10 +3351,8 @@ static int btrfs_zero_range(struct inode *inode,
if (ret < 0) if (ret < 0)
goto out; goto out;
space_reserved = true; space_reserved = true;
ret = btrfs_punch_hole_lock_range(inode, lockstart, lockend, btrfs_punch_hole_lock_range(inode, lockstart, lockend,
&cached_state); &cached_state);
if (ret)
goto out;
ret = btrfs_qgroup_reserve_data(BTRFS_I(inode), &data_reserved, ret = btrfs_qgroup_reserve_data(BTRFS_I(inode), &data_reserved,
alloc_start, bytes_to_reserve); alloc_start, bytes_to_reserve);
if (ret) { if (ret) {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment