Commit 378f32ba authored by Matthew Bobrowski's avatar Matthew Bobrowski Committed by Theodore Ts'o

ext4: introduce direct I/O write using iomap infrastructure

This patch introduces a new direct I/O write path which makes use of
the iomap infrastructure.

All direct I/O writes are now passed from the ->write_iter() callback
through to the new direct I/O handler ext4_dio_write_iter(). This
function is responsible for calling into the iomap infrastructure via
iomap_dio_rw().

Code snippets from the existing direct I/O write code within
ext4_file_write_iter() such as, checking whether the I/O request is
unaligned asynchronous I/O, or whether the write will result in an
overwrite have effectively been moved out and into the new direct I/O
->write_iter() handler.
The block mapping flags that are eventually passed down to
ext4_map_blocks() from the *_get_block_*() suite of routines have been
taken out and introduced within ext4_iomap_alloc().

For inode extension cases, ext4_handle_inode_extension() is
effectively the function responsible for performing such metadata
updates. This is called after iomap_dio_rw() has returned so that we
can safely determine whether we need to potentially truncate any
allocated blocks that may have been prepared for this direct I/O
write. We don't perform the inode extension, or truncate operations
from the ->end_io() handler as we don't have the original I/O 'length'
available there. The ->end_io() however is responsible fo converting
allocated unwritten extents to written extents.

In the instance of a short write, we fallback and complete the
remainder of the I/O using buffered I/O via
ext4_buffered_write_iter().

The existing buffer_head direct I/O implementation has been removed as
it's now redundant.

[ Fix up ext4_dio_write_iter() per Jan's comments at
  https://lore.kernel.org/r/20191105135932.GN22379@quack2.suse.cz -- TYT ]
Signed-off-by: default avatarMatthew Bobrowski <mbobrowski@mbobrowski.org>
Reviewed-by: default avatarJan Kara <jack@suse.cz>
Reviewed-by: default avatarRitesh Harjani <riteshh@linux.ibm.com>
Link: https://lore.kernel.org/r/e55db6f12ae6ff017f36774135e79f3e7b0333da.1572949325.git.mbobrowski@mbobrowski.orgSigned-off-by: default avatarTheodore Ts'o <tytso@mit.edu>
parent 3eaf9cc6
...@@ -1584,7 +1584,6 @@ enum { ...@@ -1584,7 +1584,6 @@ enum {
EXT4_STATE_NO_EXPAND, /* No space for expansion */ EXT4_STATE_NO_EXPAND, /* No space for expansion */
EXT4_STATE_DA_ALLOC_CLOSE, /* Alloc DA blks on close */ EXT4_STATE_DA_ALLOC_CLOSE, /* Alloc DA blks on close */
EXT4_STATE_EXT_MIGRATE, /* Inode is migrating */ EXT4_STATE_EXT_MIGRATE, /* Inode is migrating */
EXT4_STATE_DIO_UNWRITTEN, /* need convert on dio done*/
EXT4_STATE_NEWENTRY, /* File just added to dir */ EXT4_STATE_NEWENTRY, /* File just added to dir */
EXT4_STATE_MAY_INLINE_DATA, /* may have in-inode data */ EXT4_STATE_MAY_INLINE_DATA, /* may have in-inode data */
EXT4_STATE_EXT_PRECACHED, /* extents have been precached */ EXT4_STATE_EXT_PRECACHED, /* extents have been precached */
...@@ -2565,8 +2564,6 @@ int ext4_get_block_unwritten(struct inode *inode, sector_t iblock, ...@@ -2565,8 +2564,6 @@ int ext4_get_block_unwritten(struct inode *inode, sector_t iblock,
struct buffer_head *bh_result, int create); struct buffer_head *bh_result, int create);
int ext4_get_block(struct inode *inode, sector_t iblock, int ext4_get_block(struct inode *inode, sector_t iblock,
struct buffer_head *bh_result, int create); struct buffer_head *bh_result, int create);
int ext4_dio_get_block(struct inode *inode, sector_t iblock,
struct buffer_head *bh_result, int create);
int ext4_da_get_block_prep(struct inode *inode, sector_t iblock, int ext4_da_get_block_prep(struct inode *inode, sector_t iblock,
struct buffer_head *bh, int create); struct buffer_head *bh, int create);
int ext4_walk_page_buffers(handle_t *handle, int ext4_walk_page_buffers(handle_t *handle,
......
...@@ -1753,16 +1753,9 @@ ext4_can_extents_be_merged(struct inode *inode, struct ext4_extent *ex1, ...@@ -1753,16 +1753,9 @@ ext4_can_extents_be_merged(struct inode *inode, struct ext4_extent *ex1,
*/ */
if (ext1_ee_len + ext2_ee_len > EXT_INIT_MAX_LEN) if (ext1_ee_len + ext2_ee_len > EXT_INIT_MAX_LEN)
return 0; return 0;
/*
* The check for IO to unwritten extent is somewhat racy as we
* increment i_unwritten / set EXT4_STATE_DIO_UNWRITTEN only after
* dropping i_data_sem. But reserved blocks should save us in that
* case.
*/
if (ext4_ext_is_unwritten(ex1) && if (ext4_ext_is_unwritten(ex1) &&
(ext4_test_inode_state(inode, EXT4_STATE_DIO_UNWRITTEN) || ext1_ee_len + ext2_ee_len > EXT_UNWRITTEN_MAX_LEN)
atomic_read(&EXT4_I(inode)->i_unwritten) ||
(ext1_ee_len + ext2_ee_len > EXT_UNWRITTEN_MAX_LEN)))
return 0; return 0;
#ifdef AGGRESSIVE_TEST #ifdef AGGRESSIVE_TEST
if (ext1_ee_len >= 4) if (ext1_ee_len >= 4)
......
...@@ -29,6 +29,7 @@ ...@@ -29,6 +29,7 @@
#include <linux/pagevec.h> #include <linux/pagevec.h>
#include <linux/uio.h> #include <linux/uio.h>
#include <linux/mman.h> #include <linux/mman.h>
#include <linux/backing-dev.h>
#include "ext4.h" #include "ext4.h"
#include "ext4_jbd2.h" #include "ext4_jbd2.h"
#include "xattr.h" #include "xattr.h"
...@@ -155,13 +156,6 @@ static int ext4_release_file(struct inode *inode, struct file *filp) ...@@ -155,13 +156,6 @@ static int ext4_release_file(struct inode *inode, struct file *filp)
return 0; return 0;
} }
static void ext4_unwritten_wait(struct inode *inode)
{
wait_queue_head_t *wq = ext4_ioend_wq(inode);
wait_event(*wq, (atomic_read(&EXT4_I(inode)->i_unwritten) == 0));
}
/* /*
* This tests whether the IO in question is block-aligned or not. * This tests whether the IO in question is block-aligned or not.
* Ext4 utilizes unwritten extents when hole-filling during direct IO, and they * Ext4 utilizes unwritten extents when hole-filling during direct IO, and they
...@@ -214,13 +208,13 @@ static ssize_t ext4_write_checks(struct kiocb *iocb, struct iov_iter *from) ...@@ -214,13 +208,13 @@ static ssize_t ext4_write_checks(struct kiocb *iocb, struct iov_iter *from)
struct inode *inode = file_inode(iocb->ki_filp); struct inode *inode = file_inode(iocb->ki_filp);
ssize_t ret; ssize_t ret;
if (unlikely(IS_IMMUTABLE(inode)))
return -EPERM;
ret = generic_write_checks(iocb, from); ret = generic_write_checks(iocb, from);
if (ret <= 0) if (ret <= 0)
return ret; return ret;
if (unlikely(IS_IMMUTABLE(inode)))
return -EPERM;
/* /*
* If we have encountered a bitmap-format file, the size limit * If we have encountered a bitmap-format file, the size limit
* is smaller than s_maxbytes, which is for extent-mapped files. * is smaller than s_maxbytes, which is for extent-mapped files.
...@@ -232,9 +226,42 @@ static ssize_t ext4_write_checks(struct kiocb *iocb, struct iov_iter *from) ...@@ -232,9 +226,42 @@ static ssize_t ext4_write_checks(struct kiocb *iocb, struct iov_iter *from)
return -EFBIG; return -EFBIG;
iov_iter_truncate(from, sbi->s_bitmap_maxbytes - iocb->ki_pos); iov_iter_truncate(from, sbi->s_bitmap_maxbytes - iocb->ki_pos);
} }
ret = file_modified(iocb->ki_filp);
if (ret)
return ret;
return iov_iter_count(from); return iov_iter_count(from);
} }
static ssize_t ext4_buffered_write_iter(struct kiocb *iocb,
struct iov_iter *from)
{
ssize_t ret;
struct inode *inode = file_inode(iocb->ki_filp);
if (iocb->ki_flags & IOCB_NOWAIT)
return -EOPNOTSUPP;
inode_lock(inode);
ret = ext4_write_checks(iocb, from);
if (ret <= 0)
goto out;
current->backing_dev_info = inode_to_bdi(inode);
ret = generic_perform_write(iocb->ki_filp, from, iocb->ki_pos);
current->backing_dev_info = NULL;
out:
inode_unlock(inode);
if (likely(ret > 0)) {
iocb->ki_pos += ret;
ret = generic_write_sync(iocb, ret);
}
return ret;
}
static ssize_t ext4_handle_inode_extension(struct inode *inode, loff_t offset, static ssize_t ext4_handle_inode_extension(struct inode *inode, loff_t offset,
ssize_t written, size_t count) ssize_t written, size_t count)
{ {
...@@ -316,6 +343,139 @@ static ssize_t ext4_handle_inode_extension(struct inode *inode, loff_t offset, ...@@ -316,6 +343,139 @@ static ssize_t ext4_handle_inode_extension(struct inode *inode, loff_t offset,
return written; return written;
} }
static int ext4_dio_write_end_io(struct kiocb *iocb, ssize_t size,
int error, unsigned int flags)
{
loff_t offset = iocb->ki_pos;
struct inode *inode = file_inode(iocb->ki_filp);
if (error)
return error;
if (size && flags & IOMAP_DIO_UNWRITTEN)
return ext4_convert_unwritten_extents(NULL, inode,
offset, size);
return 0;
}
static const struct iomap_dio_ops ext4_dio_write_ops = {
.end_io = ext4_dio_write_end_io,
};
static ssize_t ext4_dio_write_iter(struct kiocb *iocb, struct iov_iter *from)
{
ssize_t ret;
size_t count;
loff_t offset;
handle_t *handle;
struct inode *inode = file_inode(iocb->ki_filp);
bool extend = false, overwrite = false, unaligned_aio = false;
if (iocb->ki_flags & IOCB_NOWAIT) {
if (!inode_trylock(inode))
return -EAGAIN;
} else {
inode_lock(inode);
}
if (!ext4_dio_supported(inode)) {
inode_unlock(inode);
/*
* Fallback to buffered I/O if the inode does not support
* direct I/O.
*/
return ext4_buffered_write_iter(iocb, from);
}
ret = ext4_write_checks(iocb, from);
if (ret <= 0) {
inode_unlock(inode);
return ret;
}
/*
* Unaligned asynchronous direct I/O must be serialized among each
* other as the zeroing of partial blocks of two competing unaligned
* asynchronous direct I/O writes can result in data corruption.
*/
offset = iocb->ki_pos;
count = iov_iter_count(from);
if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS) &&
!is_sync_kiocb(iocb) && ext4_unaligned_aio(inode, from, offset)) {
unaligned_aio = true;
inode_dio_wait(inode);
}
/*
* Determine whether the I/O will overwrite allocated and initialized
* blocks. If so, check to see whether it is possible to take the
* dioread_nolock path.
*/
if (!unaligned_aio && ext4_overwrite_io(inode, offset, count) &&
ext4_should_dioread_nolock(inode)) {
overwrite = true;
downgrade_write(&inode->i_rwsem);
}
if (offset + count > EXT4_I(inode)->i_disksize) {
handle = ext4_journal_start(inode, EXT4_HT_INODE, 2);
if (IS_ERR(handle)) {
ret = PTR_ERR(handle);
goto out;
}
ret = ext4_orphan_add(handle, inode);
if (ret) {
ext4_journal_stop(handle);
goto out;
}
extend = true;
ext4_journal_stop(handle);
}
ret = iomap_dio_rw(iocb, from, &ext4_iomap_ops, &ext4_dio_write_ops,
is_sync_kiocb(iocb) || unaligned_aio || extend);
if (extend)
ret = ext4_handle_inode_extension(inode, offset, ret, count);
out:
if (overwrite)
inode_unlock_shared(inode);
else
inode_unlock(inode);
if (ret >= 0 && iov_iter_count(from)) {
ssize_t err;
loff_t endbyte;
offset = iocb->ki_pos;
err = ext4_buffered_write_iter(iocb, from);
if (err < 0)
return err;
/*
* We need to ensure that the pages within the page cache for
* the range covered by this I/O are written to disk and
* invalidated. This is in attempt to preserve the expected
* direct I/O semantics in the case we fallback to buffered I/O
* to complete off the I/O request.
*/
ret += err;
endbyte = offset + err - 1;
err = filemap_write_and_wait_range(iocb->ki_filp->f_mapping,
offset, endbyte);
if (!err)
invalidate_mapping_pages(iocb->ki_filp->f_mapping,
offset >> PAGE_SHIFT,
endbyte >> PAGE_SHIFT);
}
return ret;
}
#ifdef CONFIG_FS_DAX #ifdef CONFIG_FS_DAX
static ssize_t static ssize_t
ext4_dax_write_iter(struct kiocb *iocb, struct iov_iter *from) ext4_dax_write_iter(struct kiocb *iocb, struct iov_iter *from)
...@@ -332,15 +492,10 @@ ext4_dax_write_iter(struct kiocb *iocb, struct iov_iter *from) ...@@ -332,15 +492,10 @@ ext4_dax_write_iter(struct kiocb *iocb, struct iov_iter *from)
return -EAGAIN; return -EAGAIN;
inode_lock(inode); inode_lock(inode);
} }
ret = ext4_write_checks(iocb, from); ret = ext4_write_checks(iocb, from);
if (ret <= 0) if (ret <= 0)
goto out; goto out;
ret = file_remove_privs(iocb->ki_filp);
if (ret)
goto out;
ret = file_update_time(iocb->ki_filp);
if (ret)
goto out;
offset = iocb->ki_pos; offset = iocb->ki_pos;
count = iov_iter_count(from); count = iov_iter_count(from);
...@@ -378,10 +533,6 @@ static ssize_t ...@@ -378,10 +533,6 @@ static ssize_t
ext4_file_write_iter(struct kiocb *iocb, struct iov_iter *from) ext4_file_write_iter(struct kiocb *iocb, struct iov_iter *from)
{ {
struct inode *inode = file_inode(iocb->ki_filp); struct inode *inode = file_inode(iocb->ki_filp);
int o_direct = iocb->ki_flags & IOCB_DIRECT;
int unaligned_aio = 0;
int overwrite = 0;
ssize_t ret;
if (unlikely(ext4_forced_shutdown(EXT4_SB(inode->i_sb)))) if (unlikely(ext4_forced_shutdown(EXT4_SB(inode->i_sb))))
return -EIO; return -EIO;
...@@ -390,59 +541,10 @@ ext4_file_write_iter(struct kiocb *iocb, struct iov_iter *from) ...@@ -390,59 +541,10 @@ ext4_file_write_iter(struct kiocb *iocb, struct iov_iter *from)
if (IS_DAX(inode)) if (IS_DAX(inode))
return ext4_dax_write_iter(iocb, from); return ext4_dax_write_iter(iocb, from);
#endif #endif
if (iocb->ki_flags & IOCB_DIRECT)
return ext4_dio_write_iter(iocb, from);
if (!inode_trylock(inode)) { return ext4_buffered_write_iter(iocb, from);
if (iocb->ki_flags & IOCB_NOWAIT)
return -EAGAIN;
inode_lock(inode);
}
ret = ext4_write_checks(iocb, from);
if (ret <= 0)
goto out;
/*
* Unaligned direct AIO must be serialized among each other as zeroing
* of partial blocks of two competing unaligned AIOs can result in data
* corruption.
*/
if (o_direct && ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS) &&
!is_sync_kiocb(iocb) &&
ext4_unaligned_aio(inode, from, iocb->ki_pos)) {
unaligned_aio = 1;
ext4_unwritten_wait(inode);
}
iocb->private = &overwrite;
/* Check whether we do a DIO overwrite or not */
if (o_direct && !unaligned_aio) {
if (ext4_overwrite_io(inode, iocb->ki_pos, iov_iter_count(from))) {
if (ext4_should_dioread_nolock(inode))
overwrite = 1;
} else if (iocb->ki_flags & IOCB_NOWAIT) {
ret = -EAGAIN;
goto out;
}
}
ret = __generic_file_write_iter(iocb, from);
/*
* Unaligned direct AIO must be the only IO in flight. Otherwise
* overlapping aligned IO after unaligned might result in data
* corruption.
*/
if (ret == -EIOCBQUEUED && unaligned_aio)
ext4_unwritten_wait(inode);
inode_unlock(inode);
if (ret > 0)
ret = generic_write_sync(iocb, ret);
return ret;
out:
inode_unlock(inode);
return ret;
} }
#ifdef CONFIG_FS_DAX #ifdef CONFIG_FS_DAX
......
This diff is collapsed.
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment