Commit 4be4a00f authored by Christoph Hellwig's avatar Christoph Hellwig Committed by Felix Blyakher

xfs: a couple getbmap cleanups

 - reshuffle various conditionals for data vs attr fork to make the code
   more readable
 - do fine-grainded goto-based error handling
 - exit early from conditionals instead of keeping a long else branch around
 - allow kmem_alloc to fail
Signed-off-by: default avatarChristoph Hellwig <hch@lst.de>
Reviewed-by: default avatarEric Sandeen <sandeen@sandeen.net>
Reviewed-by: default avatarFelix Blyakher <felixb@sgi.com>
Signed-off-by: default avatarFelix Blyakher <felixb@sgi.com>
parent 2ac00af7
...@@ -5880,7 +5880,7 @@ xfs_getbmap( ...@@ -5880,7 +5880,7 @@ xfs_getbmap(
void *arg) /* formatter arg */ void *arg) /* formatter arg */
{ {
__int64_t bmvend; /* last block requested */ __int64_t bmvend; /* last block requested */
int error; /* return value */ int error = 0; /* return value */
__int64_t fixlen; /* length for -1 case */ __int64_t fixlen; /* length for -1 case */
int i; /* extent number */ int i; /* extent number */
int lock; /* lock state */ int lock; /* lock state */
...@@ -5899,30 +5899,8 @@ xfs_getbmap( ...@@ -5899,30 +5899,8 @@ xfs_getbmap(
mp = ip->i_mount; mp = ip->i_mount;
iflags = bmv->bmv_iflags; iflags = bmv->bmv_iflags;
whichfork = iflags & BMV_IF_ATTRFORK ? XFS_ATTR_FORK : XFS_DATA_FORK; whichfork = iflags & BMV_IF_ATTRFORK ? XFS_ATTR_FORK : XFS_DATA_FORK;
/* If the BMV_IF_NO_DMAPI_READ interface bit specified, do not
* generate a DMAPI read event. Otherwise, if the DM_EVENT_READ
* bit is set for the file, generate a read event in order
* that the DMAPI application may do its thing before we return
* the extents. Usually this means restoring user file data to
* regions of the file that look like holes.
*
* The "old behavior" (from XFS_IOC_GETBMAP) is to not specify
* BMV_IF_NO_DMAPI_READ so that read events are generated.
* If this were not true, callers of ioctl( XFS_IOC_GETBMAP )
* could misinterpret holes in a DMAPI file as true holes,
* when in fact they may represent offline user data.
*/
if ((iflags & BMV_IF_NO_DMAPI_READ) == 0 &&
DM_EVENT_ENABLED(ip, DM_EVENT_READ) &&
whichfork == XFS_DATA_FORK) {
error = XFS_SEND_DATA(mp, DM_EVENT_READ, ip, 0, 0, 0, NULL);
if (error)
return XFS_ERROR(error);
}
if (whichfork == XFS_ATTR_FORK) { if (whichfork == XFS_ATTR_FORK) {
if (XFS_IFORK_Q(ip)) { if (XFS_IFORK_Q(ip)) {
if (ip->i_d.di_aformat != XFS_DINODE_FMT_EXTENTS && if (ip->i_d.di_aformat != XFS_DINODE_FMT_EXTENTS &&
...@@ -5936,11 +5914,37 @@ xfs_getbmap( ...@@ -5936,11 +5914,37 @@ xfs_getbmap(
ip->i_mount); ip->i_mount);
return XFS_ERROR(EFSCORRUPTED); return XFS_ERROR(EFSCORRUPTED);
} }
} else if (ip->i_d.di_format != XFS_DINODE_FMT_EXTENTS &&
prealloced = 0;
fixlen = 1LL << 32;
} else {
/*
* If the BMV_IF_NO_DMAPI_READ interface bit specified, do
* not generate a DMAPI read event. Otherwise, if the
* DM_EVENT_READ bit is set for the file, generate a read
* event in order that the DMAPI application may do its thing
* before we return the extents. Usually this means restoring
* user file data to regions of the file that look like holes.
*
* The "old behavior" (from XFS_IOC_GETBMAP) is to not specify
* BMV_IF_NO_DMAPI_READ so that read events are generated.
* If this were not true, callers of ioctl(XFS_IOC_GETBMAP)
* could misinterpret holes in a DMAPI file as true holes,
* when in fact they may represent offline user data.
*/
if (DM_EVENT_ENABLED(ip, DM_EVENT_READ) &&
!(iflags & BMV_IF_NO_DMAPI_READ)) {
error = XFS_SEND_DATA(mp, DM_EVENT_READ, ip,
0, 0, 0, NULL);
if (error)
return XFS_ERROR(error);
}
if (ip->i_d.di_format != XFS_DINODE_FMT_EXTENTS &&
ip->i_d.di_format != XFS_DINODE_FMT_BTREE && ip->i_d.di_format != XFS_DINODE_FMT_BTREE &&
ip->i_d.di_format != XFS_DINODE_FMT_LOCAL) ip->i_d.di_format != XFS_DINODE_FMT_LOCAL)
return XFS_ERROR(EINVAL); return XFS_ERROR(EINVAL);
if (whichfork == XFS_DATA_FORK) {
if (xfs_get_extsz_hint(ip) || if (xfs_get_extsz_hint(ip) ||
ip->i_d.di_flags & (XFS_DIFLAG_PREALLOC|XFS_DIFLAG_APPEND)){ ip->i_d.di_flags & (XFS_DIFLAG_PREALLOC|XFS_DIFLAG_APPEND)){
prealloced = 1; prealloced = 1;
...@@ -5949,42 +5953,34 @@ xfs_getbmap( ...@@ -5949,42 +5953,34 @@ xfs_getbmap(
prealloced = 0; prealloced = 0;
fixlen = ip->i_size; fixlen = ip->i_size;
} }
} else {
prealloced = 0;
fixlen = 1LL << 32;
} }
if (bmv->bmv_length == -1) { if (bmv->bmv_length == -1) {
fixlen = XFS_FSB_TO_BB(mp, XFS_B_TO_FSB(mp, fixlen)); fixlen = XFS_FSB_TO_BB(mp, XFS_B_TO_FSB(mp, fixlen));
bmv->bmv_length = MAX( (__int64_t)(fixlen - bmv->bmv_offset), bmv->bmv_length =
(__int64_t)0); max_t(__int64_t, fixlen - bmv->bmv_offset, 0);
} else if (bmv->bmv_length < 0) } else if (bmv->bmv_length == 0) {
return XFS_ERROR(EINVAL);
if (bmv->bmv_length == 0) {
bmv->bmv_entries = 0; bmv->bmv_entries = 0;
return 0; return 0;
} else if (bmv->bmv_length < 0) {
return XFS_ERROR(EINVAL);
} }
nex = bmv->bmv_count - 1; nex = bmv->bmv_count - 1;
if (nex <= 0) if (nex <= 0)
return XFS_ERROR(EINVAL); return XFS_ERROR(EINVAL);
bmvend = bmv->bmv_offset + bmv->bmv_length; bmvend = bmv->bmv_offset + bmv->bmv_length;
xfs_ilock(ip, XFS_IOLOCK_SHARED); xfs_ilock(ip, XFS_IOLOCK_SHARED);
if (whichfork == XFS_DATA_FORK && !(iflags & BMV_IF_DELALLOC)) {
if (((iflags & BMV_IF_DELALLOC) == 0) && if (ip->i_delayed_blks || ip->i_size > ip->i_d.di_size) {
(whichfork == XFS_DATA_FORK) && error = xfs_flush_pages(ip, 0, -1, 0, FI_REMAPF);
(ip->i_delayed_blks || ip->i_size > ip->i_d.di_size)) { if (error)
/* xfs_fsize_t last_byte = xfs_file_last_byte(ip); */ goto out_unlock_iolock;
error = xfs_flush_pages(ip, (xfs_off_t)0,
-1, 0, FI_REMAPF);
if (error) {
xfs_iunlock(ip, XFS_IOLOCK_SHARED);
return error;
}
} }
ASSERT(whichfork == XFS_ATTR_FORK || (iflags & BMV_IF_DELALLOC) || ASSERT(ip->i_delayed_blks == 0);
ip->i_delayed_blks == 0); }
lock = xfs_ilock_map_shared(ip); lock = xfs_ilock_map_shared(ip);
...@@ -5995,23 +5991,25 @@ xfs_getbmap( ...@@ -5995,23 +5991,25 @@ xfs_getbmap(
if (nex > XFS_IFORK_NEXTENTS(ip, whichfork) * 2 + 1) if (nex > XFS_IFORK_NEXTENTS(ip, whichfork) * 2 + 1)
nex = XFS_IFORK_NEXTENTS(ip, whichfork) * 2 + 1; nex = XFS_IFORK_NEXTENTS(ip, whichfork) * 2 + 1;
bmapi_flags = xfs_bmapi_aflag(whichfork) | bmapi_flags = xfs_bmapi_aflag(whichfork);
((iflags & BMV_IF_PREALLOC) ? 0 : XFS_BMAPI_IGSTATE); if (!(iflags & BMV_IF_PREALLOC))
bmapi_flags |= XFS_BMAPI_IGSTATE;
/* /*
* Allocate enough space to handle "subnex" maps at a time. * Allocate enough space to handle "subnex" maps at a time.
*/ */
error = ENOMEM;
subnex = 16; subnex = 16;
map = kmem_alloc(subnex * sizeof(*map), KM_SLEEP); map = kmem_alloc(subnex * sizeof(*map), KM_MAYFAIL);
if (!map)
goto out_unlock_ilock;
bmv->bmv_entries = 0; bmv->bmv_entries = 0;
if ((XFS_IFORK_NEXTENTS(ip, whichfork) == 0)) { if (XFS_IFORK_NEXTENTS(ip, whichfork) == 0 &&
if (((iflags & BMV_IF_DELALLOC) == 0) || (whichfork == XFS_ATTR_FORK || !(iflags & BMV_IF_DELALLOC))) {
whichfork == XFS_ATTR_FORK) {
error = 0; error = 0;
goto unlock_and_return; goto out_free_map;
}
} }
nexleft = nex; nexleft = nex;
...@@ -6023,10 +6021,12 @@ xfs_getbmap( ...@@ -6023,10 +6021,12 @@ xfs_getbmap(
bmapi_flags, NULL, 0, map, &nmap, bmapi_flags, NULL, 0, map, &nmap,
NULL, NULL); NULL, NULL);
if (error) if (error)
goto unlock_and_return; goto out_free_map;
ASSERT(nmap <= subnex); ASSERT(nmap <= subnex);
for (i = 0; i < nmap && nexleft && bmv->bmv_length; i++) { for (i = 0; i < nmap && nexleft && bmv->bmv_length; i++) {
int full = 0; /* user array is full */
out.bmv_oflags = 0; out.bmv_oflags = 0;
if (map[i].br_state == XFS_EXT_UNWRITTEN) if (map[i].br_state == XFS_EXT_UNWRITTEN)
out.bmv_oflags |= BMV_OF_PREALLOC; out.bmv_oflags |= BMV_OF_PREALLOC;
...@@ -6041,36 +6041,32 @@ xfs_getbmap( ...@@ -6041,36 +6041,32 @@ xfs_getbmap(
whichfork == XFS_ATTR_FORK) { whichfork == XFS_ATTR_FORK) {
/* came to the end of attribute fork */ /* came to the end of attribute fork */
out.bmv_oflags |= BMV_OF_LAST; out.bmv_oflags |= BMV_OF_LAST;
goto unlock_and_return; goto out_free_map;
} else {
int full = 0; /* user array is full */
if (!xfs_getbmapx_fix_eof_hole(ip, &out,
prealloced, bmvend,
map[i].br_startblock)) {
goto unlock_and_return;
} }
if (!xfs_getbmapx_fix_eof_hole(ip, &out, prealloced,
bmvend, map[i].br_startblock))
goto out_free_map;
/* format results & advance arg */ /* format results & advance arg */
error = formatter(&arg, &out, &full); error = formatter(&arg, &out, &full);
if (error || full) if (error || full)
goto unlock_and_return; goto out_free_map;
nexleft--; nexleft--;
bmv->bmv_offset = bmv->bmv_offset =
out.bmv_offset + out.bmv_length; out.bmv_offset + out.bmv_length;
bmv->bmv_length = MAX((__int64_t)0, bmv->bmv_length =
(__int64_t)(bmvend - bmv->bmv_offset)); max_t(__int64_t, 0, bmvend - bmv->bmv_offset);
bmv->bmv_entries++; bmv->bmv_entries++;
} }
}
} while (nmap && nexleft && bmv->bmv_length); } while (nmap && nexleft && bmv->bmv_length);
unlock_and_return: out_free_map:
kmem_free(map);
out_unlock_ilock:
xfs_iunlock_map_shared(ip, lock); xfs_iunlock_map_shared(ip, lock);
out_unlock_iolock:
xfs_iunlock(ip, XFS_IOLOCK_SHARED); xfs_iunlock(ip, XFS_IOLOCK_SHARED);
kmem_free(map);
return error; return error;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment