Commit 87d9d609 authored by Darrick J. Wong's avatar Darrick J. Wong

xfs: quota scrub should use bmapbtd scrubber

Replace the quota scrubber's open-coded data fork scrubber with a
redirected call to the bmapbtd scrubber.  This strengthens the quota
scrub to include all the cross-referencing that it does.
Signed-off-by: default avatarDarrick J. Wong <darrick.wong@oracle.com>
Reviewed-by: default avatarBrian Foster <bfoster@redhat.com>
parent 8bc763c2
......@@ -44,6 +44,8 @@
#include "xfs_rmap_btree.h"
#include "xfs_log.h"
#include "xfs_trans_priv.h"
#include "xfs_attr.h"
#include "xfs_reflink.h"
#include "scrub/xfs_scrub.h"
#include "scrub/scrub.h"
#include "scrub/common.h"
......@@ -787,3 +789,58 @@ xfs_scrub_buffer_recheck(
sc->sm->sm_flags |= XFS_SCRUB_OFLAG_CORRUPT;
trace_xfs_scrub_block_error(sc, bp->b_bn, fa);
}
/*
* Scrub the attr/data forks of a metadata inode. The metadata inode must be
* pointed to by sc->ip and the ILOCK must be held.
*/
int
xfs_scrub_metadata_inode_forks(
struct xfs_scrub_context *sc)
{
__u32 smtype;
bool shared;
int error;
if (sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT)
return 0;
/* Metadata inodes don't live on the rt device. */
if (sc->ip->i_d.di_flags & XFS_DIFLAG_REALTIME) {
xfs_scrub_ino_set_corrupt(sc, sc->ip->i_ino);
return 0;
}
/* They should never participate in reflink. */
if (xfs_is_reflink_inode(sc->ip)) {
xfs_scrub_ino_set_corrupt(sc, sc->ip->i_ino);
return 0;
}
/* They also should never have extended attributes. */
if (xfs_inode_hasattr(sc->ip)) {
xfs_scrub_ino_set_corrupt(sc, sc->ip->i_ino);
return 0;
}
/* Invoke the data fork scrubber. */
smtype = sc->sm->sm_type;
sc->sm->sm_type = XFS_SCRUB_TYPE_BMBTD;
error = xfs_scrub_bmap_data(sc);
sc->sm->sm_type = smtype;
if (error || (sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT))
return error;
/* Look for incorrect shared blocks. */
if (xfs_sb_version_hasreflink(&sc->mp->m_sb)) {
error = xfs_reflink_inode_has_shared_extents(sc->tp, sc->ip,
&shared);
if (!xfs_scrub_fblock_process_error(sc, XFS_DATA_FORK, 0,
&error))
return error;
if (shared)
xfs_scrub_ino_set_corrupt(sc, sc->ip->i_ino);
}
return error;
}
......@@ -155,4 +155,6 @@ static inline bool xfs_scrub_skip_xref(struct xfs_scrub_metadata *sm)
XFS_SCRUB_OFLAG_XCORRUPT);
}
int xfs_scrub_metadata_inode_forks(struct xfs_scrub_context *sc);
#endif /* __XFS_SCRUB_COMMON_H__ */
......@@ -206,65 +206,62 @@ xfs_scrub_quota_item(
return 0;
}
/* Scrub all of a quota type's items. */
int
xfs_scrub_quota(
/* Check the quota's data fork. */
STATIC int
xfs_scrub_quota_data_fork(
struct xfs_scrub_context *sc)
{
struct xfs_bmbt_irec irec = { 0 };
struct xfs_scrub_quota_info sqi;
struct xfs_mount *mp = sc->mp;
struct xfs_quotainfo *qi = mp->m_quotainfo;
struct xfs_iext_cursor icur;
struct xfs_quotainfo *qi = sc->mp->m_quotainfo;
struct xfs_ifork *ifp;
xfs_fileoff_t max_dqid_off;
xfs_fileoff_t off = 0;
uint dqtype;
int nimaps;
int error = 0;
dqtype = xfs_scrub_quota_to_dqtype(sc);
/* Invoke the fork scrubber. */
error = xfs_scrub_metadata_inode_forks(sc);
if (error || (sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT))
return error;
/* Look for problem extents. */
if (sc->ip->i_d.di_flags & XFS_DIFLAG_REALTIME) {
xfs_scrub_ino_set_corrupt(sc, sc->ip->i_ino);
goto out;
}
/* Check for data fork problems that apply only to quota files. */
max_dqid_off = ((xfs_dqid_t)-1) / qi->qi_dqperchunk;
while (1) {
ifp = XFS_IFORK_PTR(sc->ip, XFS_DATA_FORK);
for_each_xfs_iext(ifp, &icur, &irec) {
if (xfs_scrub_should_terminate(sc, &error))
break;
off = irec.br_startoff + irec.br_blockcount;
nimaps = 1;
error = xfs_bmapi_read(sc->ip, off, -1, &irec, &nimaps,
XFS_BMAPI_ENTIRE);
if (!xfs_scrub_fblock_process_error(sc, XFS_DATA_FORK, off,
&error))
goto out;
if (!nimaps)
break;
if (irec.br_startblock == HOLESTARTBLOCK)
continue;
/* Check the extent record doesn't point to crap. */
if (irec.br_startblock + irec.br_blockcount <=
irec.br_startblock)
xfs_scrub_fblock_set_corrupt(sc, XFS_DATA_FORK,
irec.br_startoff);
if (!xfs_verify_fsbno(mp, irec.br_startblock) ||
!xfs_verify_fsbno(mp, irec.br_startblock +
irec.br_blockcount - 1))
xfs_scrub_fblock_set_corrupt(sc, XFS_DATA_FORK,
irec.br_startoff);
/*
* Unwritten extents or blocks mapped above the highest
* delalloc extents or blocks mapped above the highest
* quota id shouldn't happen.
*/
if (isnullstartblock(irec.br_startblock) ||
irec.br_startoff > max_dqid_off ||
irec.br_startoff + irec.br_blockcount > max_dqid_off + 1)
xfs_scrub_fblock_set_corrupt(sc, XFS_DATA_FORK, off);
irec.br_startoff + irec.br_blockcount - 1 > max_dqid_off) {
xfs_scrub_fblock_set_corrupt(sc, XFS_DATA_FORK,
irec.br_startoff);
break;
}
}
return error;
}
/* Scrub all of a quota type's items. */
int
xfs_scrub_quota(
struct xfs_scrub_context *sc)
{
struct xfs_scrub_quota_info sqi;
struct xfs_mount *mp = sc->mp;
struct xfs_quotainfo *qi = mp->m_quotainfo;
uint dqtype;
int error = 0;
dqtype = xfs_scrub_quota_to_dqtype(sc);
/* Look for problem extents. */
error = xfs_scrub_quota_data_fork(sc);
if (error)
goto out;
if (sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT)
goto out;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment