Commit 12735f88 authored by Jan Kara's avatar Jan Kara Committed by Theodore Ts'o

ext4: pre-zero allocated blocks for DAX IO

Currently ext4 treats DAX IO the same way as direct IO. I.e., it
allocates unwritten extents before IO is done and converts unwritten
extents afterwards. However this way DAX IO can race with page fault to
the same area:

ext4_ext_direct_IO()				dax_fault()
  dax_io()
    get_block() - allocates unwritten extent
    copy_from_iter_pmem()
						  get_block() - converts
						    unwritten block to
						    written and zeroes it
						    out
  ext4_convert_unwritten_extents()

So data written with DAX IO gets lost. Similarly dax_new_buf() called
from dax_io() can overwrite data that has been already written to the
block via mmap.

Fix the problem by using pre-zeroed blocks for DAX IO the same way as we
use them for DAX mmap. The downside of this solution is that every
allocating write writes each block twice (once zeros, once data). Fixing
the race with locking is possible as well however we would need to
lock-out faults for the whole range written to by DAX IO. And that is
not easy to do without locking-out faults for the whole file which seems
too aggressive.
Signed-off-by: default avatarJan Kara <jack@suse.cz>
Signed-off-by: default avatarTheodore Ts'o <tytso@mit.edu>
parent 914f82a3
...@@ -2527,7 +2527,7 @@ struct buffer_head *ext4_getblk(handle_t *, struct inode *, ext4_lblk_t, int); ...@@ -2527,7 +2527,7 @@ struct buffer_head *ext4_getblk(handle_t *, struct inode *, ext4_lblk_t, int);
struct buffer_head *ext4_bread(handle_t *, struct inode *, ext4_lblk_t, int); struct buffer_head *ext4_bread(handle_t *, struct inode *, ext4_lblk_t, int);
int ext4_get_block_unwritten(struct inode *inode, sector_t iblock, int ext4_get_block_unwritten(struct inode *inode, sector_t iblock,
struct buffer_head *bh_result, int create); struct buffer_head *bh_result, int create);
int ext4_dax_mmap_get_block(struct inode *inode, sector_t iblock, int ext4_dax_get_block(struct inode *inode, sector_t iblock,
struct buffer_head *bh_result, int create); struct buffer_head *bh_result, int create);
int ext4_get_block(struct inode *inode, sector_t iblock, int ext4_get_block(struct inode *inode, sector_t iblock,
struct buffer_head *bh_result, int create); struct buffer_head *bh_result, int create);
...@@ -3334,6 +3334,13 @@ static inline void ext4_clear_io_unwritten_flag(ext4_io_end_t *io_end) ...@@ -3334,6 +3334,13 @@ static inline void ext4_clear_io_unwritten_flag(ext4_io_end_t *io_end)
} }
} }
static inline bool ext4_aligned_io(struct inode *inode, loff_t off, loff_t len)
{
int blksize = 1 << inode->i_blkbits;
return IS_ALIGNED(off, blksize) && IS_ALIGNED(len, blksize);
}
#endif /* __KERNEL__ */ #endif /* __KERNEL__ */
#define EFSBADCRC EBADMSG /* Bad CRC detected */ #define EFSBADCRC EBADMSG /* Bad CRC detected */
......
...@@ -207,7 +207,7 @@ static int ext4_dax_fault(struct vm_area_struct *vma, struct vm_fault *vmf) ...@@ -207,7 +207,7 @@ static int ext4_dax_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
if (IS_ERR(handle)) if (IS_ERR(handle))
result = VM_FAULT_SIGBUS; result = VM_FAULT_SIGBUS;
else else
result = __dax_fault(vma, vmf, ext4_dax_mmap_get_block, NULL); result = __dax_fault(vma, vmf, ext4_dax_get_block, NULL);
if (write) { if (write) {
if (!IS_ERR(handle)) if (!IS_ERR(handle))
...@@ -243,7 +243,7 @@ static int ext4_dax_pmd_fault(struct vm_area_struct *vma, unsigned long addr, ...@@ -243,7 +243,7 @@ static int ext4_dax_pmd_fault(struct vm_area_struct *vma, unsigned long addr,
result = VM_FAULT_SIGBUS; result = VM_FAULT_SIGBUS;
else else
result = __dax_pmd_fault(vma, addr, pmd, flags, result = __dax_pmd_fault(vma, addr, pmd, flags,
ext4_dax_mmap_get_block, NULL); ext4_dax_get_block, NULL);
if (write) { if (write) {
if (!IS_ERR(handle)) if (!IS_ERR(handle))
......
...@@ -3229,13 +3229,17 @@ static int ext4_releasepage(struct page *page, gfp_t wait) ...@@ -3229,13 +3229,17 @@ static int ext4_releasepage(struct page *page, gfp_t wait)
} }
#ifdef CONFIG_FS_DAX #ifdef CONFIG_FS_DAX
int ext4_dax_mmap_get_block(struct inode *inode, sector_t iblock, /*
* Get block function for DAX IO and mmap faults. It takes care of converting
* unwritten extents to written ones and initializes new / converted blocks
* to zeros.
*/
int ext4_dax_get_block(struct inode *inode, sector_t iblock,
struct buffer_head *bh_result, int create) struct buffer_head *bh_result, int create)
{ {
int ret; int ret;
ext4_debug("ext4_dax_mmap_get_block: inode %lu, create flag %d\n", ext4_debug("inode %lu, create flag %d\n", inode->i_ino, create);
inode->i_ino, create);
if (!create) if (!create)
return _ext4_get_block(inode, iblock, bh_result, 0); return _ext4_get_block(inode, iblock, bh_result, 0);
...@@ -3247,9 +3251,9 @@ int ext4_dax_mmap_get_block(struct inode *inode, sector_t iblock, ...@@ -3247,9 +3251,9 @@ int ext4_dax_mmap_get_block(struct inode *inode, sector_t iblock,
if (buffer_unwritten(bh_result)) { if (buffer_unwritten(bh_result)) {
/* /*
* We are protected by i_mmap_sem so we know block cannot go * We are protected by i_mmap_sem or i_mutex so we know block
* away from under us even though we dropped i_data_sem. * cannot go away from under us even though we dropped
* Convert extent to written and write zeros there. * i_data_sem. Convert extent to written and write zeros there.
*/ */
ret = ext4_get_block_trans(inode, iblock, bh_result, ret = ext4_get_block_trans(inode, iblock, bh_result,
EXT4_GET_BLOCKS_CONVERT | EXT4_GET_BLOCKS_CONVERT |
...@@ -3264,6 +3268,14 @@ int ext4_dax_mmap_get_block(struct inode *inode, sector_t iblock, ...@@ -3264,6 +3268,14 @@ int ext4_dax_mmap_get_block(struct inode *inode, sector_t iblock,
clear_buffer_new(bh_result); clear_buffer_new(bh_result);
return 0; return 0;
} }
#else
/* Just define empty function, it will never get called. */
int ext4_dax_get_block(struct inode *inode, sector_t iblock,
struct buffer_head *bh_result, int create)
{
BUG();
return 0;
}
#endif #endif
static int ext4_end_io_dio(struct kiocb *iocb, loff_t offset, static int ext4_end_io_dio(struct kiocb *iocb, loff_t offset,
...@@ -3385,7 +3397,19 @@ static ssize_t ext4_direct_IO_write(struct kiocb *iocb, struct iov_iter *iter, ...@@ -3385,7 +3397,19 @@ static ssize_t ext4_direct_IO_write(struct kiocb *iocb, struct iov_iter *iter,
iocb->private = NULL; iocb->private = NULL;
if (overwrite) if (overwrite)
get_block_func = ext4_dio_get_block_overwrite; get_block_func = ext4_dio_get_block_overwrite;
else if (!ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS) || else if (IS_DAX(inode)) {
/*
* We can avoid zeroing for aligned DAX writes beyond EOF. Other
* writes need zeroing either because they can race with page
* faults or because they use partial blocks.
*/
if (round_down(offset, 1<<inode->i_blkbits) >= inode->i_size &&
ext4_aligned_io(inode, offset, count))
get_block_func = ext4_dio_get_block;
else
get_block_func = ext4_dax_get_block;
dio_flags = DIO_LOCKING;
} else if (!ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS) ||
round_down(offset, 1 << inode->i_blkbits) >= inode->i_size) { round_down(offset, 1 << inode->i_blkbits) >= inode->i_size) {
get_block_func = ext4_dio_get_block; get_block_func = ext4_dio_get_block;
dio_flags = DIO_LOCKING | DIO_SKIP_HOLES; dio_flags = DIO_LOCKING | DIO_SKIP_HOLES;
...@@ -3400,7 +3424,6 @@ static ssize_t ext4_direct_IO_write(struct kiocb *iocb, struct iov_iter *iter, ...@@ -3400,7 +3424,6 @@ static ssize_t ext4_direct_IO_write(struct kiocb *iocb, struct iov_iter *iter,
BUG_ON(ext4_encrypted_inode(inode) && S_ISREG(inode->i_mode)); BUG_ON(ext4_encrypted_inode(inode) && S_ISREG(inode->i_mode));
#endif #endif
if (IS_DAX(inode)) { if (IS_DAX(inode)) {
dio_flags &= ~DIO_SKIP_HOLES;
ret = dax_do_io(iocb, inode, iter, offset, get_block_func, ret = dax_do_io(iocb, inode, iter, offset, get_block_func,
ext4_end_io_dio, dio_flags); ext4_end_io_dio, dio_flags);
} else } else
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment