Commit b7849516 authored by Christoph Hellwig's avatar Christoph Hellwig Committed by Carlos Maiolino

iomap: move locking out of iomap_write_delalloc_release

XFS (which currently is the only user of iomap_write_delalloc_release)
already holds invalidate_lock for most zeroing operations.  To be able
to avoid a deadlock it needs to stop taking the lock, but doing so
in iomap would leak XFS locking details into iomap.

To avoid this require the caller to hold invalidate_lock when calling
iomap_write_delalloc_release instead of taking it there.
Signed-off-by: default avatarChristoph Hellwig <hch@lst.de>
Reviewed-by: default avatarDarrick J. Wong <djwong@kernel.org>
Signed-off-by: default avatarCarlos Maiolino <cem@kernel.org>
parent caf0ea45
...@@ -1211,12 +1211,13 @@ void iomap_write_delalloc_release(struct inode *inode, loff_t start_byte, ...@@ -1211,12 +1211,13 @@ void iomap_write_delalloc_release(struct inode *inode, loff_t start_byte,
loff_t scan_end_byte = min(i_size_read(inode), end_byte); loff_t scan_end_byte = min(i_size_read(inode), end_byte);
/* /*
* Lock the mapping to avoid races with page faults re-instantiating * The caller must hold invalidate_lock to avoid races with page faults
* folios and dirtying them via ->page_mkwrite whilst we walk the * re-instantiating folios and dirtying them via ->page_mkwrite whilst
* cache and perform delalloc extent removal. Failing to do this can * we walk the cache and perform delalloc extent removal. Failing to do
* leave dirty pages with no space reservation in the cache. * this can leave dirty pages with no space reservation in the cache.
*/ */
filemap_invalidate_lock(inode->i_mapping); lockdep_assert_held_write(&inode->i_mapping->invalidate_lock);
while (start_byte < scan_end_byte) { while (start_byte < scan_end_byte) {
loff_t data_end; loff_t data_end;
...@@ -1233,7 +1234,7 @@ void iomap_write_delalloc_release(struct inode *inode, loff_t start_byte, ...@@ -1233,7 +1234,7 @@ void iomap_write_delalloc_release(struct inode *inode, loff_t start_byte,
if (start_byte == -ENXIO || start_byte == scan_end_byte) if (start_byte == -ENXIO || start_byte == scan_end_byte)
break; break;
if (WARN_ON_ONCE(start_byte < 0)) if (WARN_ON_ONCE(start_byte < 0))
goto out_unlock; return;
WARN_ON_ONCE(start_byte < punch_start_byte); WARN_ON_ONCE(start_byte < punch_start_byte);
WARN_ON_ONCE(start_byte > scan_end_byte); WARN_ON_ONCE(start_byte > scan_end_byte);
...@@ -1244,7 +1245,7 @@ void iomap_write_delalloc_release(struct inode *inode, loff_t start_byte, ...@@ -1244,7 +1245,7 @@ void iomap_write_delalloc_release(struct inode *inode, loff_t start_byte,
data_end = mapping_seek_hole_data(inode->i_mapping, start_byte, data_end = mapping_seek_hole_data(inode->i_mapping, start_byte,
scan_end_byte, SEEK_HOLE); scan_end_byte, SEEK_HOLE);
if (WARN_ON_ONCE(data_end < 0)) if (WARN_ON_ONCE(data_end < 0))
goto out_unlock; return;
/* /*
* If we race with post-direct I/O invalidation of the page cache, * If we race with post-direct I/O invalidation of the page cache,
...@@ -1266,8 +1267,6 @@ void iomap_write_delalloc_release(struct inode *inode, loff_t start_byte, ...@@ -1266,8 +1267,6 @@ void iomap_write_delalloc_release(struct inode *inode, loff_t start_byte,
if (punch_start_byte < end_byte) if (punch_start_byte < end_byte)
punch(inode, punch_start_byte, end_byte - punch_start_byte, punch(inode, punch_start_byte, end_byte - punch_start_byte,
iomap); iomap);
out_unlock:
filemap_invalidate_unlock(inode->i_mapping);
} }
EXPORT_SYMBOL_GPL(iomap_write_delalloc_release); EXPORT_SYMBOL_GPL(iomap_write_delalloc_release);
......
...@@ -1239,8 +1239,10 @@ xfs_buffered_write_iomap_end( ...@@ -1239,8 +1239,10 @@ xfs_buffered_write_iomap_end(
if (start_byte >= end_byte) if (start_byte >= end_byte)
return 0; return 0;
filemap_invalidate_lock(inode->i_mapping);
iomap_write_delalloc_release(inode, start_byte, end_byte, flags, iomap, iomap_write_delalloc_release(inode, start_byte, end_byte, flags, iomap,
xfs_buffered_write_delalloc_punch); xfs_buffered_write_delalloc_punch);
filemap_invalidate_unlock(inode->i_mapping);
return 0; return 0;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment