Commit 0e4656a2 authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'iomap-5.9-merge-5' of git://git.kernel.org/pub/scm/fs/xfs/xfs-linux

Pull iomap updates from Darrick Wong:
 "The most notable changes are:

   - iomap no longer invalidates the page cache when performing a direct
     read, since doing so is unnecessary and the old directio code
     doesn't do that either.

   - iomap embraced the use of returning ENOTBLK from a direct write to
     trigger falling back to a buffered write since ext4 already did
     this and btrfs wants it for their port.

   - iomap falls back to buffered writes if we're doing a direct write
     and the page cache invalidation after the flush fails; this was
     necessary to handle a corner case in the btrfs port.

   - Remove email virus scanner detritus that was accidentally included
     in yesterday's pull request. Clearly I need(ed) to update my git
     branch checker scripts. :("

* tag 'iomap-5.9-merge-5' of git://git.kernel.org/pub/scm/fs/xfs/xfs-linux:
  iomap: fall back to buffered writes for invalidation failures
  xfs: use ENOTBLK for direct I/O to buffered I/O fallback
  iomap: Only invalidate page cache pages on direct IO writes
  iomap: Make sure iomap_end is called after iomap_begin
parents eb65405e 60263d58
...@@ -544,6 +544,8 @@ static ssize_t ext4_dio_write_iter(struct kiocb *iocb, struct iov_iter *from) ...@@ -544,6 +544,8 @@ static ssize_t ext4_dio_write_iter(struct kiocb *iocb, struct iov_iter *from)
iomap_ops = &ext4_iomap_overwrite_ops; iomap_ops = &ext4_iomap_overwrite_ops;
ret = iomap_dio_rw(iocb, from, iomap_ops, &ext4_dio_write_ops, ret = iomap_dio_rw(iocb, from, iomap_ops, &ext4_dio_write_ops,
is_sync_kiocb(iocb) || unaligned_io || extend); is_sync_kiocb(iocb) || unaligned_io || extend);
if (ret == -ENOTBLK)
ret = 0;
if (extend) if (extend)
ret = ext4_handle_inode_extension(inode, offset, ret, count); ret = ext4_handle_inode_extension(inode, offset, ret, count);
......
...@@ -835,7 +835,8 @@ static ssize_t gfs2_file_direct_write(struct kiocb *iocb, struct iov_iter *from) ...@@ -835,7 +835,8 @@ static ssize_t gfs2_file_direct_write(struct kiocb *iocb, struct iov_iter *from)
ret = iomap_dio_rw(iocb, from, &gfs2_iomap_ops, NULL, ret = iomap_dio_rw(iocb, from, &gfs2_iomap_ops, NULL,
is_sync_kiocb(iocb)); is_sync_kiocb(iocb));
if (ret == -ENOTBLK)
ret = 0;
out: out:
gfs2_glock_dq(&gh); gfs2_glock_dq(&gh);
out_uninit: out_uninit:
......
...@@ -46,10 +46,14 @@ iomap_apply(struct inode *inode, loff_t pos, loff_t length, unsigned flags, ...@@ -46,10 +46,14 @@ iomap_apply(struct inode *inode, loff_t pos, loff_t length, unsigned flags,
ret = ops->iomap_begin(inode, pos, length, flags, &iomap, &srcmap); ret = ops->iomap_begin(inode, pos, length, flags, &iomap, &srcmap);
if (ret) if (ret)
return ret; return ret;
if (WARN_ON(iomap.offset > pos)) if (WARN_ON(iomap.offset > pos)) {
return -EIO; written = -EIO;
if (WARN_ON(iomap.length == 0)) goto out;
return -EIO; }
if (WARN_ON(iomap.length == 0)) {
written = -EIO;
goto out;
}
trace_iomap_apply_dstmap(inode, &iomap); trace_iomap_apply_dstmap(inode, &iomap);
if (srcmap.type != IOMAP_HOLE) if (srcmap.type != IOMAP_HOLE)
...@@ -80,6 +84,7 @@ iomap_apply(struct inode *inode, loff_t pos, loff_t length, unsigned flags, ...@@ -80,6 +84,7 @@ iomap_apply(struct inode *inode, loff_t pos, loff_t length, unsigned flags,
written = actor(inode, pos, length, data, &iomap, written = actor(inode, pos, length, data, &iomap,
srcmap.type != IOMAP_HOLE ? &srcmap : &iomap); srcmap.type != IOMAP_HOLE ? &srcmap : &iomap);
out:
/* /*
* Now the data has been copied, commit the range we've copied. This * Now the data has been copied, commit the range we've copied. This
* should not fail unless the filesystem has had a fatal error. * should not fail unless the filesystem has had a fatal error.
......
...@@ -10,6 +10,7 @@ ...@@ -10,6 +10,7 @@
#include <linux/backing-dev.h> #include <linux/backing-dev.h>
#include <linux/uio.h> #include <linux/uio.h>
#include <linux/task_io_accounting_ops.h> #include <linux/task_io_accounting_ops.h>
#include "trace.h"
#include "../internal.h" #include "../internal.h"
...@@ -401,6 +402,9 @@ iomap_dio_actor(struct inode *inode, loff_t pos, loff_t length, ...@@ -401,6 +402,9 @@ iomap_dio_actor(struct inode *inode, loff_t pos, loff_t length,
* can be mapped into multiple disjoint IOs and only a subset of the IOs issued * can be mapped into multiple disjoint IOs and only a subset of the IOs issued
* may be pure data writes. In that case, we still need to do a full data sync * may be pure data writes. In that case, we still need to do a full data sync
* completion. * completion.
*
* Returns -ENOTBLK In case of a page invalidation invalidation failure for
* writes. The callers needs to fall back to buffered I/O in this case.
*/ */
ssize_t ssize_t
iomap_dio_rw(struct kiocb *iocb, struct iov_iter *iter, iomap_dio_rw(struct kiocb *iocb, struct iov_iter *iter,
...@@ -475,23 +479,24 @@ iomap_dio_rw(struct kiocb *iocb, struct iov_iter *iter, ...@@ -475,23 +479,24 @@ iomap_dio_rw(struct kiocb *iocb, struct iov_iter *iter,
if (ret) if (ret)
goto out_free_dio; goto out_free_dio;
/* if (iov_iter_rw(iter) == WRITE) {
* Try to invalidate cache pages for the range we're direct /*
* writing. If this invalidation fails, tough, the write will * Try to invalidate cache pages for the range we are writing.
* still work, but racing two incompatible write paths is a * If this invalidation fails, let the caller fall back to
* pretty crazy thing to do, so we don't support it 100%. * buffered I/O.
*/ */
ret = invalidate_inode_pages2_range(mapping, if (invalidate_inode_pages2_range(mapping, pos >> PAGE_SHIFT,
pos >> PAGE_SHIFT, end >> PAGE_SHIFT); end >> PAGE_SHIFT)) {
if (ret) trace_iomap_dio_invalidate_fail(inode, pos, count);
dio_warn_stale_pagecache(iocb->ki_filp); ret = -ENOTBLK;
ret = 0;
if (iov_iter_rw(iter) == WRITE && !wait_for_completion &&
!inode->i_sb->s_dio_done_wq) {
ret = sb_init_dio_done_wq(inode->i_sb);
if (ret < 0)
goto out_free_dio; goto out_free_dio;
}
if (!wait_for_completion && !inode->i_sb->s_dio_done_wq) {
ret = sb_init_dio_done_wq(inode->i_sb);
if (ret < 0)
goto out_free_dio;
}
} }
inode_dio_begin(inode); inode_dio_begin(inode);
......
...@@ -74,6 +74,7 @@ DEFINE_EVENT(iomap_range_class, name, \ ...@@ -74,6 +74,7 @@ DEFINE_EVENT(iomap_range_class, name, \
DEFINE_RANGE_EVENT(iomap_writepage); DEFINE_RANGE_EVENT(iomap_writepage);
DEFINE_RANGE_EVENT(iomap_releasepage); DEFINE_RANGE_EVENT(iomap_releasepage);
DEFINE_RANGE_EVENT(iomap_invalidatepage); DEFINE_RANGE_EVENT(iomap_invalidatepage);
DEFINE_RANGE_EVENT(iomap_dio_invalidate_fail);
#define IOMAP_TYPE_STRINGS \ #define IOMAP_TYPE_STRINGS \
{ IOMAP_HOLE, "HOLE" }, \ { IOMAP_HOLE, "HOLE" }, \
......
...@@ -505,7 +505,7 @@ xfs_file_dio_aio_write( ...@@ -505,7 +505,7 @@ xfs_file_dio_aio_write(
*/ */
if (xfs_is_cow_inode(ip)) { if (xfs_is_cow_inode(ip)) {
trace_xfs_reflink_bounce_dio_write(ip, iocb->ki_pos, count); trace_xfs_reflink_bounce_dio_write(ip, iocb->ki_pos, count);
return -EREMCHG; return -ENOTBLK;
} }
iolock = XFS_IOLOCK_EXCL; iolock = XFS_IOLOCK_EXCL;
} else { } else {
...@@ -553,8 +553,8 @@ xfs_file_dio_aio_write( ...@@ -553,8 +553,8 @@ xfs_file_dio_aio_write(
xfs_iunlock(ip, iolock); xfs_iunlock(ip, iolock);
/* /*
* No fallback to buffered IO on errors for XFS, direct IO will either * No fallback to buffered IO after short writes for XFS, direct I/O
* complete fully or fail. * will either complete fully or return an error.
*/ */
ASSERT(ret < 0 || ret == count); ASSERT(ret < 0 || ret == count);
return ret; return ret;
...@@ -714,7 +714,7 @@ xfs_file_write_iter( ...@@ -714,7 +714,7 @@ xfs_file_write_iter(
* allow an operation to fall back to buffered mode. * allow an operation to fall back to buffered mode.
*/ */
ret = xfs_file_dio_aio_write(iocb, from); ret = xfs_file_dio_aio_write(iocb, from);
if (ret != -EREMCHG) if (ret != -ENOTBLK)
return ret; return ret;
} }
......
...@@ -786,8 +786,11 @@ static ssize_t zonefs_file_write_iter(struct kiocb *iocb, struct iov_iter *from) ...@@ -786,8 +786,11 @@ static ssize_t zonefs_file_write_iter(struct kiocb *iocb, struct iov_iter *from)
if (iocb->ki_pos >= ZONEFS_I(inode)->i_max_size) if (iocb->ki_pos >= ZONEFS_I(inode)->i_max_size)
return -EFBIG; return -EFBIG;
if (iocb->ki_flags & IOCB_DIRECT) if (iocb->ki_flags & IOCB_DIRECT) {
return zonefs_file_dio_write(iocb, from); ssize_t ret = zonefs_file_dio_write(iocb, from);
if (ret != -ENOTBLK)
return ret;
}
return zonefs_file_buffered_write(iocb, from); return zonefs_file_buffered_write(iocb, from);
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment