Commit 65b99a08 authored by Brian Foster's avatar Brian Foster Committed by Dave Chinner

xfs: refactor unmount record detection into helper

Once the mount sequence has identified the head and tail blocks of the
physical log, the record at the head of the log is located and examined
for an unmount record to determine if the log is clean. This currently
occurs after torn write verification of the head region of the log.

This must ultimately be separated from torn write verification and may
need to be called again if the log head is walked back due to a torn
write (to determine whether the new head record is an unmount record).
Separate this logic into a new helper function. This patch does not
change behavior.
Signed-off-by: default avatarBrian Foster <bfoster@redhat.com>
Reviewed-by: default avatarDave Chinner <dchinner@redhat.com>
Signed-off-by: default avatarDave Chinner <david@fromorbit.com>
parent 82ff6cc2
......@@ -1198,6 +1198,84 @@ xlog_verify_head(
return error;
}
/*
* Check whether the head of the log points to an unmount record. In other
* words, determine whether the log is clean. If so, update the in-core state
* appropriately.
*/
static int
xlog_check_unmount_rec(
struct xlog *log,
xfs_daddr_t *head_blk,
xfs_daddr_t *tail_blk,
struct xlog_rec_header *rhead,
xfs_daddr_t rhead_blk,
struct xfs_buf *bp,
bool *clean)
{
struct xlog_op_header *op_head;
xfs_daddr_t umount_data_blk;
xfs_daddr_t after_umount_blk;
int hblks;
int error;
char *offset;
*clean = false;
/*
* Look for unmount record. If we find it, then we know there was a
* clean unmount. Since 'i' could be the last block in the physical
* log, we convert to a log block before comparing to the head_blk.
*
* Save the current tail lsn to use to pass to xlog_clear_stale_blocks()
* below. We won't want to clear the unmount record if there is one, so
* we pass the lsn of the unmount record rather than the block after it.
*/
if (xfs_sb_version_haslogv2(&log->l_mp->m_sb)) {
int h_size = be32_to_cpu(rhead->h_size);
int h_version = be32_to_cpu(rhead->h_version);
if ((h_version & XLOG_VERSION_2) &&
(h_size > XLOG_HEADER_CYCLE_SIZE)) {
hblks = h_size / XLOG_HEADER_CYCLE_SIZE;
if (h_size % XLOG_HEADER_CYCLE_SIZE)
hblks++;
} else {
hblks = 1;
}
} else {
hblks = 1;
}
after_umount_blk = rhead_blk + hblks + BTOBB(be32_to_cpu(rhead->h_len));
after_umount_blk = do_mod(after_umount_blk, log->l_logBBsize);
if (*head_blk == after_umount_blk &&
be32_to_cpu(rhead->h_num_logops) == 1) {
umount_data_blk = rhead_blk + hblks;
umount_data_blk = do_mod(umount_data_blk, log->l_logBBsize);
error = xlog_bread(log, umount_data_blk, 1, bp, &offset);
if (error)
return error;
op_head = (struct xlog_op_header *)offset;
if (op_head->oh_flags & XLOG_UNMOUNT_TRANS) {
/*
* Set tail and last sync so that newly written log
* records will point recovery to after the current
* unmount record.
*/
xlog_assign_atomic_lsn(&log->l_tail_lsn,
log->l_curr_cycle, after_umount_blk);
xlog_assign_atomic_lsn(&log->l_last_sync_lsn,
log->l_curr_cycle, after_umount_blk);
*tail_blk = after_umount_blk;
*clean = true;
}
}
return 0;
}
/*
* Find the sync block number or the tail of the log.
*
......@@ -1221,16 +1299,13 @@ xlog_find_tail(
xfs_daddr_t *tail_blk)
{
xlog_rec_header_t *rhead;
xlog_op_header_t *op_head;
char *offset = NULL;
xfs_buf_t *bp;
int error;
xfs_daddr_t umount_data_blk;
xfs_daddr_t after_umount_blk;
xfs_daddr_t rhead_blk;
xfs_lsn_t tail_lsn;
int hblks;
bool wrapped = false;
bool clean = false;
/*
* Find previous log record
......@@ -1301,66 +1376,24 @@ xlog_find_tail(
BBTOB(log->l_curr_block));
xlog_assign_grant_head(&log->l_write_head.grant, log->l_curr_cycle,
BBTOB(log->l_curr_block));
tail_lsn = atomic64_read(&log->l_tail_lsn);
/*
* Look for unmount record. If we find it, then we know there
* was a clean unmount. Since 'i' could be the last block in
* the physical log, we convert to a log block before comparing
* to the head_blk.
*
* Save the current tail lsn to use to pass to
* xlog_clear_stale_blocks() below. We won't want to clear the
* unmount record if there is one, so we pass the lsn of the
* unmount record rather than the block after it.
* Look for an unmount record at the head of the log. This sets the log
* state to determine whether recovery is necessary.
*/
if (xfs_sb_version_haslogv2(&log->l_mp->m_sb)) {
int h_size = be32_to_cpu(rhead->h_size);
int h_version = be32_to_cpu(rhead->h_version);
if ((h_version & XLOG_VERSION_2) &&
(h_size > XLOG_HEADER_CYCLE_SIZE)) {
hblks = h_size / XLOG_HEADER_CYCLE_SIZE;
if (h_size % XLOG_HEADER_CYCLE_SIZE)
hblks++;
} else {
hblks = 1;
}
} else {
hblks = 1;
}
after_umount_blk = rhead_blk + hblks + BTOBB(be32_to_cpu(rhead->h_len));
after_umount_blk = do_mod(after_umount_blk, log->l_logBBsize);
tail_lsn = atomic64_read(&log->l_tail_lsn);
if (*head_blk == after_umount_blk &&
be32_to_cpu(rhead->h_num_logops) == 1) {
umount_data_blk = rhead_blk + hblks;
umount_data_blk = do_mod(umount_data_blk, log->l_logBBsize);
error = xlog_bread(log, umount_data_blk, 1, bp, &offset);
error = xlog_check_unmount_rec(log, head_blk, tail_blk, rhead,
rhead_blk, bp, &clean);
if (error)
goto done;
op_head = (xlog_op_header_t *)offset;
if (op_head->oh_flags & XLOG_UNMOUNT_TRANS) {
/*
* Set tail and last sync so that newly written
* log records will point recovery to after the
* current unmount record.
*/
xlog_assign_atomic_lsn(&log->l_tail_lsn,
log->l_curr_cycle, after_umount_blk);
xlog_assign_atomic_lsn(&log->l_last_sync_lsn,
log->l_curr_cycle, after_umount_blk);
*tail_blk = after_umount_blk;
/*
* Note that the unmount was clean. If the unmount
* was not clean, we need to know this to rebuild the
* superblock counters from the perag headers if we
* have a filesystem using non-persistent counters.
* Note that the unmount was clean. If the unmount was not clean, we
* need to know this to rebuild the superblock counters from the perag
* headers if we have a filesystem using non-persistent counters.
*/
if (clean)
log->l_mp->m_flags |= XFS_MOUNT_WAS_CLEAN;
}
}
/*
* Make sure that there are no blocks in front of the head
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment