Commit 2492a606 authored by Christoph Hellwig's avatar Christoph Hellwig Committed by Darrick J. Wong

xfs: initialize iomap->flags in xfs_bmbt_to_iomap

Currently we don't overwrite the flags field in the iomap in
xfs_bmbt_to_iomap.  This works fine with 0-initialized iomaps on stack,
but is harmful once we want to be able to reuse an iomap in the
writeback code.  Replace the shared parameter with a set of initial
flags an thus ensures the flags field is always reinitialized.
Signed-off-by: default avatarChristoph Hellwig <hch@lst.de>
Reviewed-by: default avatarDave Chinner <dchinner@redhat.com>
Reviewed-by: default avatarDarrick J. Wong <darrick.wong@oracle.com>
Signed-off-by: default avatarDarrick J. Wong <darrick.wong@oracle.com>
parent 7684e2c4
...@@ -54,7 +54,7 @@ xfs_bmbt_to_iomap( ...@@ -54,7 +54,7 @@ xfs_bmbt_to_iomap(
struct xfs_inode *ip, struct xfs_inode *ip,
struct iomap *iomap, struct iomap *iomap,
struct xfs_bmbt_irec *imap, struct xfs_bmbt_irec *imap,
bool shared) u16 flags)
{ {
struct xfs_mount *mp = ip->i_mount; struct xfs_mount *mp = ip->i_mount;
...@@ -79,12 +79,11 @@ xfs_bmbt_to_iomap( ...@@ -79,12 +79,11 @@ xfs_bmbt_to_iomap(
iomap->length = XFS_FSB_TO_B(mp, imap->br_blockcount); iomap->length = XFS_FSB_TO_B(mp, imap->br_blockcount);
iomap->bdev = xfs_find_bdev_for_inode(VFS_I(ip)); iomap->bdev = xfs_find_bdev_for_inode(VFS_I(ip));
iomap->dax_dev = xfs_find_daxdev_for_inode(VFS_I(ip)); iomap->dax_dev = xfs_find_daxdev_for_inode(VFS_I(ip));
iomap->flags = flags;
if (xfs_ipincount(ip) && if (xfs_ipincount(ip) &&
(ip->i_itemp->ili_fsync_fields & ~XFS_ILOG_TIMESTAMP)) (ip->i_itemp->ili_fsync_fields & ~XFS_ILOG_TIMESTAMP))
iomap->flags |= IOMAP_F_DIRTY; iomap->flags |= IOMAP_F_DIRTY;
if (shared)
iomap->flags |= IOMAP_F_SHARED;
return 0; return 0;
} }
...@@ -540,6 +539,7 @@ xfs_file_iomap_begin_delay( ...@@ -540,6 +539,7 @@ xfs_file_iomap_begin_delay(
struct xfs_iext_cursor icur, ccur; struct xfs_iext_cursor icur, ccur;
xfs_fsblock_t prealloc_blocks = 0; xfs_fsblock_t prealloc_blocks = 0;
bool eof = false, cow_eof = false, shared = false; bool eof = false, cow_eof = false, shared = false;
u16 iomap_flags = 0;
int whichfork = XFS_DATA_FORK; int whichfork = XFS_DATA_FORK;
int error = 0; int error = 0;
...@@ -707,7 +707,7 @@ xfs_file_iomap_begin_delay( ...@@ -707,7 +707,7 @@ xfs_file_iomap_begin_delay(
* Flag newly allocated delalloc blocks with IOMAP_F_NEW so we punch * Flag newly allocated delalloc blocks with IOMAP_F_NEW so we punch
* them out if the write happens to fail. * them out if the write happens to fail.
*/ */
iomap->flags |= IOMAP_F_NEW; iomap_flags |= IOMAP_F_NEW;
trace_xfs_iomap_alloc(ip, offset, count, whichfork, trace_xfs_iomap_alloc(ip, offset, count, whichfork,
whichfork == XFS_DATA_FORK ? &imap : &cmap); whichfork == XFS_DATA_FORK ? &imap : &cmap);
done: done:
...@@ -715,14 +715,17 @@ xfs_file_iomap_begin_delay( ...@@ -715,14 +715,17 @@ xfs_file_iomap_begin_delay(
if (imap.br_startoff > offset_fsb) { if (imap.br_startoff > offset_fsb) {
xfs_trim_extent(&cmap, offset_fsb, xfs_trim_extent(&cmap, offset_fsb,
imap.br_startoff - offset_fsb); imap.br_startoff - offset_fsb);
error = xfs_bmbt_to_iomap(ip, iomap, &cmap, true); error = xfs_bmbt_to_iomap(ip, iomap, &cmap,
IOMAP_F_SHARED);
goto out_unlock; goto out_unlock;
} }
/* ensure we only report blocks we have a reservation for */ /* ensure we only report blocks we have a reservation for */
xfs_trim_extent(&imap, cmap.br_startoff, cmap.br_blockcount); xfs_trim_extent(&imap, cmap.br_startoff, cmap.br_blockcount);
shared = true; shared = true;
} }
error = xfs_bmbt_to_iomap(ip, iomap, &imap, shared); if (shared)
iomap_flags |= IOMAP_F_SHARED;
error = xfs_bmbt_to_iomap(ip, iomap, &imap, iomap_flags);
out_unlock: out_unlock:
xfs_iunlock(ip, XFS_ILOCK_EXCL); xfs_iunlock(ip, XFS_ILOCK_EXCL);
return error; return error;
...@@ -930,6 +933,7 @@ xfs_file_iomap_begin( ...@@ -930,6 +933,7 @@ xfs_file_iomap_begin(
xfs_fileoff_t offset_fsb, end_fsb; xfs_fileoff_t offset_fsb, end_fsb;
int nimaps = 1, error = 0; int nimaps = 1, error = 0;
bool shared = false; bool shared = false;
u16 iomap_flags = 0;
unsigned lockmode; unsigned lockmode;
if (XFS_FORCED_SHUTDOWN(mp)) if (XFS_FORCED_SHUTDOWN(mp))
...@@ -1045,7 +1049,7 @@ xfs_file_iomap_begin( ...@@ -1045,7 +1049,7 @@ xfs_file_iomap_begin(
if (error) if (error)
return error; return error;
iomap->flags |= IOMAP_F_NEW; iomap_flags |= IOMAP_F_NEW;
trace_xfs_iomap_alloc(ip, offset, length, XFS_DATA_FORK, &imap); trace_xfs_iomap_alloc(ip, offset, length, XFS_DATA_FORK, &imap);
out_finish: out_finish:
...@@ -1055,8 +1059,10 @@ xfs_file_iomap_begin( ...@@ -1055,8 +1059,10 @@ xfs_file_iomap_begin(
* there is no other metadata changes pending or have been made here. * there is no other metadata changes pending or have been made here.
*/ */
if ((flags & IOMAP_WRITE) && offset + length > i_size_read(inode)) if ((flags & IOMAP_WRITE) && offset + length > i_size_read(inode))
iomap->flags |= IOMAP_F_DIRTY; iomap_flags |= IOMAP_F_DIRTY;
return xfs_bmbt_to_iomap(ip, iomap, &imap, shared); if (shared)
iomap_flags |= IOMAP_F_SHARED;
return xfs_bmbt_to_iomap(ip, iomap, &imap, iomap_flags);
out_found: out_found:
ASSERT(nimaps); ASSERT(nimaps);
...@@ -1200,7 +1206,7 @@ xfs_seek_iomap_begin( ...@@ -1200,7 +1206,7 @@ xfs_seek_iomap_begin(
if (data_fsb < cow_fsb + cmap.br_blockcount) if (data_fsb < cow_fsb + cmap.br_blockcount)
end_fsb = min(end_fsb, data_fsb); end_fsb = min(end_fsb, data_fsb);
xfs_trim_extent(&cmap, offset_fsb, end_fsb); xfs_trim_extent(&cmap, offset_fsb, end_fsb);
error = xfs_bmbt_to_iomap(ip, iomap, &cmap, true); error = xfs_bmbt_to_iomap(ip, iomap, &cmap, IOMAP_F_SHARED);
/* /*
* This is a COW extent, so we must probe the page cache * This is a COW extent, so we must probe the page cache
* because there could be dirty page cache being backed * because there could be dirty page cache being backed
...@@ -1222,7 +1228,7 @@ xfs_seek_iomap_begin( ...@@ -1222,7 +1228,7 @@ xfs_seek_iomap_begin(
imap.br_state = XFS_EXT_NORM; imap.br_state = XFS_EXT_NORM;
done: done:
xfs_trim_extent(&imap, offset_fsb, end_fsb); xfs_trim_extent(&imap, offset_fsb, end_fsb);
error = xfs_bmbt_to_iomap(ip, iomap, &imap, false); error = xfs_bmbt_to_iomap(ip, iomap, &imap, 0);
out_unlock: out_unlock:
xfs_iunlock(ip, lockmode); xfs_iunlock(ip, lockmode);
return error; return error;
...@@ -1268,7 +1274,7 @@ xfs_xattr_iomap_begin( ...@@ -1268,7 +1274,7 @@ xfs_xattr_iomap_begin(
if (error) if (error)
return error; return error;
ASSERT(nimaps); ASSERT(nimaps);
return xfs_bmbt_to_iomap(ip, iomap, &imap, false); return xfs_bmbt_to_iomap(ip, iomap, &imap, 0);
} }
const struct iomap_ops xfs_xattr_iomap_ops = { const struct iomap_ops xfs_xattr_iomap_ops = {
......
...@@ -16,7 +16,7 @@ int xfs_iomap_write_direct(struct xfs_inode *, xfs_off_t, size_t, ...@@ -16,7 +16,7 @@ int xfs_iomap_write_direct(struct xfs_inode *, xfs_off_t, size_t,
int xfs_iomap_write_unwritten(struct xfs_inode *, xfs_off_t, xfs_off_t, bool); int xfs_iomap_write_unwritten(struct xfs_inode *, xfs_off_t, xfs_off_t, bool);
int xfs_bmbt_to_iomap(struct xfs_inode *, struct iomap *, int xfs_bmbt_to_iomap(struct xfs_inode *, struct iomap *,
struct xfs_bmbt_irec *, bool shared); struct xfs_bmbt_irec *, u16);
xfs_extlen_t xfs_eof_alignment(struct xfs_inode *ip, xfs_extlen_t extsize); xfs_extlen_t xfs_eof_alignment(struct xfs_inode *ip, xfs_extlen_t extsize);
static inline xfs_filblks_t static inline xfs_filblks_t
......
...@@ -178,7 +178,7 @@ xfs_fs_map_blocks( ...@@ -178,7 +178,7 @@ xfs_fs_map_blocks(
} }
xfs_iunlock(ip, XFS_IOLOCK_EXCL); xfs_iunlock(ip, XFS_IOLOCK_EXCL);
error = xfs_bmbt_to_iomap(ip, iomap, &imap, false); error = xfs_bmbt_to_iomap(ip, iomap, &imap, 0);
*device_generation = mp->m_generation; *device_generation = mp->m_generation;
return error; return error;
out_unlock: out_unlock:
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment