Commit 7eeabbd4 authored by Dave Chinner's avatar Dave Chinner

Merge branch 'xfs-misc-fixes-for-4.5' into for-next

parents 609adfc2 a841b64d
...@@ -535,6 +535,7 @@ xfs_agfl_write_verify( ...@@ -535,6 +535,7 @@ xfs_agfl_write_verify(
} }
const struct xfs_buf_ops xfs_agfl_buf_ops = { const struct xfs_buf_ops xfs_agfl_buf_ops = {
.name = "xfs_agfl",
.verify_read = xfs_agfl_read_verify, .verify_read = xfs_agfl_read_verify,
.verify_write = xfs_agfl_write_verify, .verify_write = xfs_agfl_write_verify,
}; };
...@@ -1926,7 +1927,7 @@ xfs_alloc_space_available( ...@@ -1926,7 +1927,7 @@ xfs_alloc_space_available(
* Decide whether to use this allocation group for this allocation. * Decide whether to use this allocation group for this allocation.
* If so, fix up the btree freelist's size. * If so, fix up the btree freelist's size.
*/ */
STATIC int /* error */ int /* error */
xfs_alloc_fix_freelist( xfs_alloc_fix_freelist(
struct xfs_alloc_arg *args, /* allocation argument structure */ struct xfs_alloc_arg *args, /* allocation argument structure */
int flags) /* XFS_ALLOC_FLAG_... */ int flags) /* XFS_ALLOC_FLAG_... */
...@@ -2339,6 +2340,7 @@ xfs_agf_write_verify( ...@@ -2339,6 +2340,7 @@ xfs_agf_write_verify(
} }
const struct xfs_buf_ops xfs_agf_buf_ops = { const struct xfs_buf_ops xfs_agf_buf_ops = {
.name = "xfs_agf",
.verify_read = xfs_agf_read_verify, .verify_read = xfs_agf_read_verify,
.verify_write = xfs_agf_write_verify, .verify_write = xfs_agf_write_verify,
}; };
......
...@@ -235,5 +235,6 @@ xfs_alloc_get_rec( ...@@ -235,5 +235,6 @@ xfs_alloc_get_rec(
int xfs_read_agf(struct xfs_mount *mp, struct xfs_trans *tp, int xfs_read_agf(struct xfs_mount *mp, struct xfs_trans *tp,
xfs_agnumber_t agno, int flags, struct xfs_buf **bpp); xfs_agnumber_t agno, int flags, struct xfs_buf **bpp);
int xfs_alloc_fix_freelist(struct xfs_alloc_arg *args, int flags);
#endif /* __XFS_ALLOC_H__ */ #endif /* __XFS_ALLOC_H__ */
...@@ -293,14 +293,7 @@ xfs_allocbt_verify( ...@@ -293,14 +293,7 @@ xfs_allocbt_verify(
level = be16_to_cpu(block->bb_level); level = be16_to_cpu(block->bb_level);
switch (block->bb_magic) { switch (block->bb_magic) {
case cpu_to_be32(XFS_ABTB_CRC_MAGIC): case cpu_to_be32(XFS_ABTB_CRC_MAGIC):
if (!xfs_sb_version_hascrc(&mp->m_sb)) if (!xfs_btree_sblock_v5hdr_verify(bp))
return false;
if (!uuid_equal(&block->bb_u.s.bb_uuid, &mp->m_sb.sb_meta_uuid))
return false;
if (block->bb_u.s.bb_blkno != cpu_to_be64(bp->b_bn))
return false;
if (pag &&
be32_to_cpu(block->bb_u.s.bb_owner) != pag->pag_agno)
return false; return false;
/* fall through */ /* fall through */
case cpu_to_be32(XFS_ABTB_MAGIC): case cpu_to_be32(XFS_ABTB_MAGIC):
...@@ -311,14 +304,7 @@ xfs_allocbt_verify( ...@@ -311,14 +304,7 @@ xfs_allocbt_verify(
return false; return false;
break; break;
case cpu_to_be32(XFS_ABTC_CRC_MAGIC): case cpu_to_be32(XFS_ABTC_CRC_MAGIC):
if (!xfs_sb_version_hascrc(&mp->m_sb)) if (!xfs_btree_sblock_v5hdr_verify(bp))
return false;
if (!uuid_equal(&block->bb_u.s.bb_uuid, &mp->m_sb.sb_meta_uuid))
return false;
if (block->bb_u.s.bb_blkno != cpu_to_be64(bp->b_bn))
return false;
if (pag &&
be32_to_cpu(block->bb_u.s.bb_owner) != pag->pag_agno)
return false; return false;
/* fall through */ /* fall through */
case cpu_to_be32(XFS_ABTC_MAGIC): case cpu_to_be32(XFS_ABTC_MAGIC):
...@@ -332,21 +318,7 @@ xfs_allocbt_verify( ...@@ -332,21 +318,7 @@ xfs_allocbt_verify(
return false; return false;
} }
/* numrecs verification */ return xfs_btree_sblock_verify(bp, mp->m_alloc_mxr[level != 0]);
if (be16_to_cpu(block->bb_numrecs) > mp->m_alloc_mxr[level != 0])
return false;
/* sibling pointer verification */
if (!block->bb_u.s.bb_leftsib ||
(be32_to_cpu(block->bb_u.s.bb_leftsib) >= mp->m_sb.sb_agblocks &&
block->bb_u.s.bb_leftsib != cpu_to_be32(NULLAGBLOCK)))
return false;
if (!block->bb_u.s.bb_rightsib ||
(be32_to_cpu(block->bb_u.s.bb_rightsib) >= mp->m_sb.sb_agblocks &&
block->bb_u.s.bb_rightsib != cpu_to_be32(NULLAGBLOCK)))
return false;
return true;
} }
static void static void
...@@ -379,6 +351,7 @@ xfs_allocbt_write_verify( ...@@ -379,6 +351,7 @@ xfs_allocbt_write_verify(
} }
const struct xfs_buf_ops xfs_allocbt_buf_ops = { const struct xfs_buf_ops xfs_allocbt_buf_ops = {
.name = "xfs_allocbt",
.verify_read = xfs_allocbt_read_verify, .verify_read = xfs_allocbt_read_verify,
.verify_write = xfs_allocbt_write_verify, .verify_write = xfs_allocbt_write_verify,
}; };
......
...@@ -328,6 +328,7 @@ xfs_attr3_leaf_read_verify( ...@@ -328,6 +328,7 @@ xfs_attr3_leaf_read_verify(
} }
const struct xfs_buf_ops xfs_attr3_leaf_buf_ops = { const struct xfs_buf_ops xfs_attr3_leaf_buf_ops = {
.name = "xfs_attr3_leaf",
.verify_read = xfs_attr3_leaf_read_verify, .verify_read = xfs_attr3_leaf_read_verify,
.verify_write = xfs_attr3_leaf_write_verify, .verify_write = xfs_attr3_leaf_write_verify,
}; };
......
...@@ -201,6 +201,7 @@ xfs_attr3_rmt_write_verify( ...@@ -201,6 +201,7 @@ xfs_attr3_rmt_write_verify(
} }
const struct xfs_buf_ops xfs_attr3_rmt_buf_ops = { const struct xfs_buf_ops xfs_attr3_rmt_buf_ops = {
.name = "xfs_attr3_rmt",
.verify_read = xfs_attr3_rmt_read_verify, .verify_read = xfs_attr3_rmt_read_verify,
.verify_write = xfs_attr3_rmt_write_verify, .verify_write = xfs_attr3_rmt_write_verify,
}; };
......
...@@ -32,13 +32,13 @@ int ...@@ -32,13 +32,13 @@ int
xfs_bitmap_empty(uint *map, uint size) xfs_bitmap_empty(uint *map, uint size)
{ {
uint i; uint i;
uint ret = 0;
for (i = 0; i < size; i++) { for (i = 0; i < size; i++) {
ret |= map[i]; if (map[i] != 0)
return 0;
} }
return (ret == 0); return 1;
} }
/* /*
......
...@@ -1723,10 +1723,11 @@ xfs_bmap_add_extent_delay_real( ...@@ -1723,10 +1723,11 @@ xfs_bmap_add_extent_delay_real(
xfs_filblks_t temp=0; /* value for da_new calculations */ xfs_filblks_t temp=0; /* value for da_new calculations */
xfs_filblks_t temp2=0;/* value for da_new calculations */ xfs_filblks_t temp2=0;/* value for da_new calculations */
int tmp_rval; /* partial logging flags */ int tmp_rval; /* partial logging flags */
int whichfork = XFS_DATA_FORK;
struct xfs_mount *mp; struct xfs_mount *mp;
mp = bma->tp ? bma->tp->t_mountp : NULL; mp = bma->ip->i_mount;
ifp = XFS_IFORK_PTR(bma->ip, XFS_DATA_FORK); ifp = XFS_IFORK_PTR(bma->ip, whichfork);
ASSERT(bma->idx >= 0); ASSERT(bma->idx >= 0);
ASSERT(bma->idx <= ifp->if_bytes / sizeof(struct xfs_bmbt_rec)); ASSERT(bma->idx <= ifp->if_bytes / sizeof(struct xfs_bmbt_rec));
...@@ -1785,7 +1786,7 @@ xfs_bmap_add_extent_delay_real( ...@@ -1785,7 +1786,7 @@ xfs_bmap_add_extent_delay_real(
* Don't set contiguous if the combined extent would be too large. * Don't set contiguous if the combined extent would be too large.
* Also check for all-three-contiguous being too large. * Also check for all-three-contiguous being too large.
*/ */
if (bma->idx < bma->ip->i_df.if_bytes / (uint)sizeof(xfs_bmbt_rec_t) - 1) { if (bma->idx < ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t) - 1) {
state |= BMAP_RIGHT_VALID; state |= BMAP_RIGHT_VALID;
xfs_bmbt_get_all(xfs_iext_get_ext(ifp, bma->idx + 1), &RIGHT); xfs_bmbt_get_all(xfs_iext_get_ext(ifp, bma->idx + 1), &RIGHT);
...@@ -2016,10 +2017,10 @@ xfs_bmap_add_extent_delay_real( ...@@ -2016,10 +2017,10 @@ xfs_bmap_add_extent_delay_real(
XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done); XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
} }
if (xfs_bmap_needs_btree(bma->ip, XFS_DATA_FORK)) { if (xfs_bmap_needs_btree(bma->ip, whichfork)) {
error = xfs_bmap_extents_to_btree(bma->tp, bma->ip, error = xfs_bmap_extents_to_btree(bma->tp, bma->ip,
bma->firstblock, bma->flist, bma->firstblock, bma->flist,
&bma->cur, 1, &tmp_rval, XFS_DATA_FORK); &bma->cur, 1, &tmp_rval, whichfork);
rval |= tmp_rval; rval |= tmp_rval;
if (error) if (error)
goto done; goto done;
...@@ -2100,10 +2101,10 @@ xfs_bmap_add_extent_delay_real( ...@@ -2100,10 +2101,10 @@ xfs_bmap_add_extent_delay_real(
XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done); XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
} }
if (xfs_bmap_needs_btree(bma->ip, XFS_DATA_FORK)) { if (xfs_bmap_needs_btree(bma->ip, whichfork)) {
error = xfs_bmap_extents_to_btree(bma->tp, bma->ip, error = xfs_bmap_extents_to_btree(bma->tp, bma->ip,
bma->firstblock, bma->flist, &bma->cur, 1, bma->firstblock, bma->flist, &bma->cur, 1,
&tmp_rval, XFS_DATA_FORK); &tmp_rval, whichfork);
rval |= tmp_rval; rval |= tmp_rval;
if (error) if (error)
goto done; goto done;
...@@ -2169,10 +2170,10 @@ xfs_bmap_add_extent_delay_real( ...@@ -2169,10 +2170,10 @@ xfs_bmap_add_extent_delay_real(
XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done); XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
} }
if (xfs_bmap_needs_btree(bma->ip, XFS_DATA_FORK)) { if (xfs_bmap_needs_btree(bma->ip, whichfork)) {
error = xfs_bmap_extents_to_btree(bma->tp, bma->ip, error = xfs_bmap_extents_to_btree(bma->tp, bma->ip,
bma->firstblock, bma->flist, &bma->cur, bma->firstblock, bma->flist, &bma->cur,
1, &tmp_rval, XFS_DATA_FORK); 1, &tmp_rval, whichfork);
rval |= tmp_rval; rval |= tmp_rval;
if (error) if (error)
goto done; goto done;
...@@ -2215,13 +2216,13 @@ xfs_bmap_add_extent_delay_real( ...@@ -2215,13 +2216,13 @@ xfs_bmap_add_extent_delay_real(
} }
/* convert to a btree if necessary */ /* convert to a btree if necessary */
if (xfs_bmap_needs_btree(bma->ip, XFS_DATA_FORK)) { if (xfs_bmap_needs_btree(bma->ip, whichfork)) {
int tmp_logflags; /* partial log flag return val */ int tmp_logflags; /* partial log flag return val */
ASSERT(bma->cur == NULL); ASSERT(bma->cur == NULL);
error = xfs_bmap_extents_to_btree(bma->tp, bma->ip, error = xfs_bmap_extents_to_btree(bma->tp, bma->ip,
bma->firstblock, bma->flist, &bma->cur, bma->firstblock, bma->flist, &bma->cur,
da_old > 0, &tmp_logflags, XFS_DATA_FORK); da_old > 0, &tmp_logflags, whichfork);
bma->logflags |= tmp_logflags; bma->logflags |= tmp_logflags;
if (error) if (error)
goto done; goto done;
...@@ -2242,7 +2243,7 @@ xfs_bmap_add_extent_delay_real( ...@@ -2242,7 +2243,7 @@ xfs_bmap_add_extent_delay_real(
if (bma->cur) if (bma->cur)
bma->cur->bc_private.b.allocated = 0; bma->cur->bc_private.b.allocated = 0;
xfs_bmap_check_leaf_extents(bma->cur, bma->ip, XFS_DATA_FORK); xfs_bmap_check_leaf_extents(bma->cur, bma->ip, whichfork);
done: done:
bma->logflags |= rval; bma->logflags |= rval;
return error; return error;
...@@ -2939,7 +2940,7 @@ xfs_bmap_add_extent_hole_real( ...@@ -2939,7 +2940,7 @@ xfs_bmap_add_extent_hole_real(
int state; /* state bits, accessed thru macros */ int state; /* state bits, accessed thru macros */
struct xfs_mount *mp; struct xfs_mount *mp;
mp = bma->tp ? bma->tp->t_mountp : NULL; mp = bma->ip->i_mount;
ifp = XFS_IFORK_PTR(bma->ip, whichfork); ifp = XFS_IFORK_PTR(bma->ip, whichfork);
ASSERT(bma->idx >= 0); ASSERT(bma->idx >= 0);
......
...@@ -720,6 +720,7 @@ xfs_bmbt_write_verify( ...@@ -720,6 +720,7 @@ xfs_bmbt_write_verify(
} }
const struct xfs_buf_ops xfs_bmbt_buf_ops = { const struct xfs_buf_ops xfs_bmbt_buf_ops = {
.name = "xfs_bmbt",
.verify_read = xfs_bmbt_read_verify, .verify_read = xfs_bmbt_read_verify,
.verify_write = xfs_bmbt_write_verify, .verify_write = xfs_bmbt_write_verify,
}; };
......
...@@ -4080,3 +4080,61 @@ xfs_btree_change_owner( ...@@ -4080,3 +4080,61 @@ xfs_btree_change_owner(
return 0; return 0;
} }
/**
* xfs_btree_sblock_v5hdr_verify() -- verify the v5 fields of a short-format
* btree block
*
* @bp: buffer containing the btree block
* @max_recs: pointer to the m_*_mxr max records field in the xfs mount
* @pag_max_level: pointer to the per-ag max level field
*/
bool
xfs_btree_sblock_v5hdr_verify(
struct xfs_buf *bp)
{
struct xfs_mount *mp = bp->b_target->bt_mount;
struct xfs_btree_block *block = XFS_BUF_TO_BLOCK(bp);
struct xfs_perag *pag = bp->b_pag;
if (!xfs_sb_version_hascrc(&mp->m_sb))
return false;
if (!uuid_equal(&block->bb_u.s.bb_uuid, &mp->m_sb.sb_meta_uuid))
return false;
if (block->bb_u.s.bb_blkno != cpu_to_be64(bp->b_bn))
return false;
if (pag && be32_to_cpu(block->bb_u.s.bb_owner) != pag->pag_agno)
return false;
return true;
}
/**
* xfs_btree_sblock_verify() -- verify a short-format btree block
*
* @bp: buffer containing the btree block
* @max_recs: maximum records allowed in this btree node
*/
bool
xfs_btree_sblock_verify(
struct xfs_buf *bp,
unsigned int max_recs)
{
struct xfs_mount *mp = bp->b_target->bt_mount;
struct xfs_btree_block *block = XFS_BUF_TO_BLOCK(bp);
/* numrecs verification */
if (be16_to_cpu(block->bb_numrecs) > max_recs)
return false;
/* sibling pointer verification */
if (!block->bb_u.s.bb_leftsib ||
(be32_to_cpu(block->bb_u.s.bb_leftsib) >= mp->m_sb.sb_agblocks &&
block->bb_u.s.bb_leftsib != cpu_to_be32(NULLAGBLOCK)))
return false;
if (!block->bb_u.s.bb_rightsib ||
(be32_to_cpu(block->bb_u.s.bb_rightsib) >= mp->m_sb.sb_agblocks &&
block->bb_u.s.bb_rightsib != cpu_to_be32(NULLAGBLOCK)))
return false;
return true;
}
...@@ -472,4 +472,7 @@ static inline int xfs_btree_get_level(struct xfs_btree_block *block) ...@@ -472,4 +472,7 @@ static inline int xfs_btree_get_level(struct xfs_btree_block *block)
#define XFS_BTREE_TRACE_ARGR(c, r) #define XFS_BTREE_TRACE_ARGR(c, r)
#define XFS_BTREE_TRACE_CURSOR(c, t) #define XFS_BTREE_TRACE_CURSOR(c, t)
bool xfs_btree_sblock_v5hdr_verify(struct xfs_buf *bp);
bool xfs_btree_sblock_verify(struct xfs_buf *bp, unsigned int max_recs);
#endif /* __XFS_BTREE_H__ */ #endif /* __XFS_BTREE_H__ */
...@@ -245,6 +245,7 @@ xfs_da3_node_read_verify( ...@@ -245,6 +245,7 @@ xfs_da3_node_read_verify(
} }
const struct xfs_buf_ops xfs_da3_node_buf_ops = { const struct xfs_buf_ops xfs_da3_node_buf_ops = {
.name = "xfs_da3_node",
.verify_read = xfs_da3_node_read_verify, .verify_read = xfs_da3_node_read_verify,
.verify_write = xfs_da3_node_write_verify, .verify_write = xfs_da3_node_write_verify,
}; };
......
...@@ -123,6 +123,7 @@ xfs_dir3_block_write_verify( ...@@ -123,6 +123,7 @@ xfs_dir3_block_write_verify(
} }
const struct xfs_buf_ops xfs_dir3_block_buf_ops = { const struct xfs_buf_ops xfs_dir3_block_buf_ops = {
.name = "xfs_dir3_block",
.verify_read = xfs_dir3_block_read_verify, .verify_read = xfs_dir3_block_read_verify,
.verify_write = xfs_dir3_block_write_verify, .verify_write = xfs_dir3_block_write_verify,
}; };
......
...@@ -305,11 +305,13 @@ xfs_dir3_data_write_verify( ...@@ -305,11 +305,13 @@ xfs_dir3_data_write_verify(
} }
const struct xfs_buf_ops xfs_dir3_data_buf_ops = { const struct xfs_buf_ops xfs_dir3_data_buf_ops = {
.name = "xfs_dir3_data",
.verify_read = xfs_dir3_data_read_verify, .verify_read = xfs_dir3_data_read_verify,
.verify_write = xfs_dir3_data_write_verify, .verify_write = xfs_dir3_data_write_verify,
}; };
static const struct xfs_buf_ops xfs_dir3_data_reada_buf_ops = { static const struct xfs_buf_ops xfs_dir3_data_reada_buf_ops = {
.name = "xfs_dir3_data_reada",
.verify_read = xfs_dir3_data_reada_verify, .verify_read = xfs_dir3_data_reada_verify,
.verify_write = xfs_dir3_data_write_verify, .verify_write = xfs_dir3_data_write_verify,
}; };
......
...@@ -245,11 +245,13 @@ xfs_dir3_leafn_write_verify( ...@@ -245,11 +245,13 @@ xfs_dir3_leafn_write_verify(
} }
const struct xfs_buf_ops xfs_dir3_leaf1_buf_ops = { const struct xfs_buf_ops xfs_dir3_leaf1_buf_ops = {
.name = "xfs_dir3_leaf1",
.verify_read = xfs_dir3_leaf1_read_verify, .verify_read = xfs_dir3_leaf1_read_verify,
.verify_write = xfs_dir3_leaf1_write_verify, .verify_write = xfs_dir3_leaf1_write_verify,
}; };
const struct xfs_buf_ops xfs_dir3_leafn_buf_ops = { const struct xfs_buf_ops xfs_dir3_leafn_buf_ops = {
.name = "xfs_dir3_leafn",
.verify_read = xfs_dir3_leafn_read_verify, .verify_read = xfs_dir3_leafn_read_verify,
.verify_write = xfs_dir3_leafn_write_verify, .verify_write = xfs_dir3_leafn_write_verify,
}; };
......
...@@ -150,6 +150,7 @@ xfs_dir3_free_write_verify( ...@@ -150,6 +150,7 @@ xfs_dir3_free_write_verify(
} }
const struct xfs_buf_ops xfs_dir3_free_buf_ops = { const struct xfs_buf_ops xfs_dir3_free_buf_ops = {
.name = "xfs_dir3_free",
.verify_read = xfs_dir3_free_read_verify, .verify_read = xfs_dir3_free_read_verify,
.verify_write = xfs_dir3_free_write_verify, .verify_write = xfs_dir3_free_write_verify,
}; };
......
...@@ -282,6 +282,7 @@ xfs_dquot_buf_write_verify( ...@@ -282,6 +282,7 @@ xfs_dquot_buf_write_verify(
} }
const struct xfs_buf_ops xfs_dquot_buf_ops = { const struct xfs_buf_ops xfs_dquot_buf_ops = {
.name = "xfs_dquot",
.verify_read = xfs_dquot_buf_read_verify, .verify_read = xfs_dquot_buf_read_verify,
.verify_write = xfs_dquot_buf_write_verify, .verify_write = xfs_dquot_buf_write_verify,
}; };
......
...@@ -786,7 +786,7 @@ typedef struct xfs_agfl { ...@@ -786,7 +786,7 @@ typedef struct xfs_agfl {
__be64 agfl_lsn; __be64 agfl_lsn;
__be32 agfl_crc; __be32 agfl_crc;
__be32 agfl_bno[]; /* actually XFS_AGFL_SIZE(mp) */ __be32 agfl_bno[]; /* actually XFS_AGFL_SIZE(mp) */
} xfs_agfl_t; } __attribute__((packed)) xfs_agfl_t;
#define XFS_AGFL_CRC_OFF offsetof(struct xfs_agfl, agfl_crc) #define XFS_AGFL_CRC_OFF offsetof(struct xfs_agfl, agfl_crc)
......
...@@ -2572,6 +2572,7 @@ xfs_agi_write_verify( ...@@ -2572,6 +2572,7 @@ xfs_agi_write_verify(
} }
const struct xfs_buf_ops xfs_agi_buf_ops = { const struct xfs_buf_ops xfs_agi_buf_ops = {
.name = "xfs_agi",
.verify_read = xfs_agi_read_verify, .verify_read = xfs_agi_read_verify,
.verify_write = xfs_agi_write_verify, .verify_write = xfs_agi_write_verify,
}; };
......
...@@ -221,7 +221,6 @@ xfs_inobt_verify( ...@@ -221,7 +221,6 @@ xfs_inobt_verify(
{ {
struct xfs_mount *mp = bp->b_target->bt_mount; struct xfs_mount *mp = bp->b_target->bt_mount;
struct xfs_btree_block *block = XFS_BUF_TO_BLOCK(bp); struct xfs_btree_block *block = XFS_BUF_TO_BLOCK(bp);
struct xfs_perag *pag = bp->b_pag;
unsigned int level; unsigned int level;
/* /*
...@@ -237,14 +236,7 @@ xfs_inobt_verify( ...@@ -237,14 +236,7 @@ xfs_inobt_verify(
switch (block->bb_magic) { switch (block->bb_magic) {
case cpu_to_be32(XFS_IBT_CRC_MAGIC): case cpu_to_be32(XFS_IBT_CRC_MAGIC):
case cpu_to_be32(XFS_FIBT_CRC_MAGIC): case cpu_to_be32(XFS_FIBT_CRC_MAGIC):
if (!xfs_sb_version_hascrc(&mp->m_sb)) if (!xfs_btree_sblock_v5hdr_verify(bp))
return false;
if (!uuid_equal(&block->bb_u.s.bb_uuid, &mp->m_sb.sb_meta_uuid))
return false;
if (block->bb_u.s.bb_blkno != cpu_to_be64(bp->b_bn))
return false;
if (pag &&
be32_to_cpu(block->bb_u.s.bb_owner) != pag->pag_agno)
return false; return false;
/* fall through */ /* fall through */
case cpu_to_be32(XFS_IBT_MAGIC): case cpu_to_be32(XFS_IBT_MAGIC):
...@@ -254,24 +246,12 @@ xfs_inobt_verify( ...@@ -254,24 +246,12 @@ xfs_inobt_verify(
return 0; return 0;
} }
/* numrecs and level verification */ /* level verification */
level = be16_to_cpu(block->bb_level); level = be16_to_cpu(block->bb_level);
if (level >= mp->m_in_maxlevels) if (level >= mp->m_in_maxlevels)
return false; return false;
if (be16_to_cpu(block->bb_numrecs) > mp->m_inobt_mxr[level != 0])
return false;
/* sibling pointer verification */
if (!block->bb_u.s.bb_leftsib ||
(be32_to_cpu(block->bb_u.s.bb_leftsib) >= mp->m_sb.sb_agblocks &&
block->bb_u.s.bb_leftsib != cpu_to_be32(NULLAGBLOCK)))
return false;
if (!block->bb_u.s.bb_rightsib ||
(be32_to_cpu(block->bb_u.s.bb_rightsib) >= mp->m_sb.sb_agblocks &&
block->bb_u.s.bb_rightsib != cpu_to_be32(NULLAGBLOCK)))
return false;
return true; return xfs_btree_sblock_verify(bp, mp->m_inobt_mxr[level != 0]);
} }
static void static void
...@@ -304,6 +284,7 @@ xfs_inobt_write_verify( ...@@ -304,6 +284,7 @@ xfs_inobt_write_verify(
} }
const struct xfs_buf_ops xfs_inobt_buf_ops = { const struct xfs_buf_ops xfs_inobt_buf_ops = {
.name = "xfs_inobt",
.verify_read = xfs_inobt_read_verify, .verify_read = xfs_inobt_read_verify,
.verify_write = xfs_inobt_write_verify, .verify_write = xfs_inobt_write_verify,
}; };
......
...@@ -132,11 +132,13 @@ xfs_inode_buf_write_verify( ...@@ -132,11 +132,13 @@ xfs_inode_buf_write_verify(
} }
const struct xfs_buf_ops xfs_inode_buf_ops = { const struct xfs_buf_ops xfs_inode_buf_ops = {
.name = "xfs_inode",
.verify_read = xfs_inode_buf_read_verify, .verify_read = xfs_inode_buf_read_verify,
.verify_write = xfs_inode_buf_write_verify, .verify_write = xfs_inode_buf_write_verify,
}; };
const struct xfs_buf_ops xfs_inode_buf_ra_ops = { const struct xfs_buf_ops xfs_inode_buf_ra_ops = {
.name = "xxfs_inode_ra",
.verify_read = xfs_inode_buf_readahead_verify, .verify_read = xfs_inode_buf_readahead_verify,
.verify_write = xfs_inode_buf_write_verify, .verify_write = xfs_inode_buf_write_verify,
}; };
......
...@@ -679,11 +679,13 @@ xfs_sb_write_verify( ...@@ -679,11 +679,13 @@ xfs_sb_write_verify(
} }
const struct xfs_buf_ops xfs_sb_buf_ops = { const struct xfs_buf_ops xfs_sb_buf_ops = {
.name = "xfs_sb",
.verify_read = xfs_sb_read_verify, .verify_read = xfs_sb_read_verify,
.verify_write = xfs_sb_write_verify, .verify_write = xfs_sb_write_verify,
}; };
const struct xfs_buf_ops xfs_sb_quiet_buf_ops = { const struct xfs_buf_ops xfs_sb_quiet_buf_ops = {
.name = "xfs_sb_quiet",
.verify_read = xfs_sb_quiet_read_verify, .verify_read = xfs_sb_quiet_read_verify,
.verify_write = xfs_sb_write_verify, .verify_write = xfs_sb_write_verify,
}; };
......
...@@ -168,6 +168,7 @@ xfs_symlink_write_verify( ...@@ -168,6 +168,7 @@ xfs_symlink_write_verify(
} }
const struct xfs_buf_ops xfs_symlink_buf_ops = { const struct xfs_buf_ops xfs_symlink_buf_ops = {
.name = "xfs_symlink",
.verify_read = xfs_symlink_read_verify, .verify_read = xfs_symlink_read_verify,
.verify_write = xfs_symlink_write_verify, .verify_write = xfs_symlink_write_verify,
}; };
......
...@@ -1045,7 +1045,7 @@ xfs_buf_ioend_work( ...@@ -1045,7 +1045,7 @@ xfs_buf_ioend_work(
xfs_buf_ioend(bp); xfs_buf_ioend(bp);
} }
void static void
xfs_buf_ioend_async( xfs_buf_ioend_async(
struct xfs_buf *bp) struct xfs_buf *bp)
{ {
......
...@@ -132,6 +132,7 @@ struct xfs_buf_map { ...@@ -132,6 +132,7 @@ struct xfs_buf_map {
struct xfs_buf_map (map) = { .bm_bn = (blkno), .bm_len = (numblk) }; struct xfs_buf_map (map) = { .bm_bn = (blkno), .bm_len = (numblk) };
struct xfs_buf_ops { struct xfs_buf_ops {
char *name;
void (*verify_read)(struct xfs_buf *); void (*verify_read)(struct xfs_buf *);
void (*verify_write)(struct xfs_buf *); void (*verify_write)(struct xfs_buf *);
}; };
......
...@@ -164,9 +164,9 @@ xfs_verifier_error( ...@@ -164,9 +164,9 @@ xfs_verifier_error(
{ {
struct xfs_mount *mp = bp->b_target->bt_mount; struct xfs_mount *mp = bp->b_target->bt_mount;
xfs_alert(mp, "Metadata %s detected at %pF, block 0x%llx", xfs_alert(mp, "Metadata %s detected at %pF, %s block 0x%llx",
bp->b_error == -EFSBADCRC ? "CRC error" : "corruption", bp->b_error == -EFSBADCRC ? "CRC error" : "corruption",
__return_address, bp->b_bn); __return_address, bp->b_ops->name, bp->b_bn);
xfs_alert(mp, "Unmount and run xfs_repair"); xfs_alert(mp, "Unmount and run xfs_repair");
......
...@@ -2068,12 +2068,14 @@ xlog_print_tic_res( ...@@ -2068,12 +2068,14 @@ xlog_print_tic_res(
"QM_DQCLUSTER", "QM_DQCLUSTER",
"QM_QINOCREATE", "QM_QINOCREATE",
"QM_QUOTAOFF_END", "QM_QUOTAOFF_END",
"SB_UNIT",
"FSYNC_TS", "FSYNC_TS",
"GROWFSRT_ALLOC", "GROWFSRT_ALLOC",
"GROWFSRT_ZERO", "GROWFSRT_ZERO",
"GROWFSRT_FREE", "GROWFSRT_FREE",
"SWAPEXT" "SWAPEXT",
"CHECKPOINT",
"ICREATE",
"CREATE_TMPFILE"
}; };
xfs_warn(mp, "xlog_write: reservation summary:"); xfs_warn(mp, "xlog_write: reservation summary:");
......
...@@ -137,7 +137,7 @@ static const match_table_t tokens = { ...@@ -137,7 +137,7 @@ static const match_table_t tokens = {
}; };
STATIC unsigned long STATIC int
suffix_kstrtoint(char *s, unsigned int base, int *res) suffix_kstrtoint(char *s, unsigned int base, int *res)
{ {
int last, shift_left_factor = 0, _res; int last, shift_left_factor = 0, _res;
......
...@@ -572,12 +572,16 @@ xfs_quota_warn( ...@@ -572,12 +572,16 @@ xfs_quota_warn(
struct xfs_dquot *dqp, struct xfs_dquot *dqp,
int type) int type)
{ {
/* no warnings for project quotas - we just return ENOSPC later */ enum quota_type qtype;
if (dqp->dq_flags & XFS_DQ_PROJ) if (dqp->dq_flags & XFS_DQ_PROJ)
return; qtype = PRJQUOTA;
quota_send_warning(make_kqid(&init_user_ns, else if (dqp->dq_flags & XFS_DQ_USER)
(dqp->dq_flags & XFS_DQ_USER) ? qtype = USRQUOTA;
USRQUOTA : GRPQUOTA, else
qtype = GRPQUOTA;
quota_send_warning(make_kqid(&init_user_ns, qtype,
be32_to_cpu(dqp->q_core.d_id)), be32_to_cpu(dqp->q_core.d_id)),
mp->m_super->s_dev, type); mp->m_super->s_dev, type);
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment