Commit 3e4da466 authored by Christoph Hellwig's avatar Christoph Hellwig Committed by Darrick J. Wong

xfs: unwind the try_again loop in xfs_log_force

Instead split out a __xfs_log_fore_lsn helper that gets called again
with the already_slept flag set to true in case we had to sleep.

This prepares for aio_fsync support.
Signed-off-by: default avatarChristoph Hellwig <hch@lst.de>
Reviewed-by: default avatarDarrick J. Wong <darrick.wong@oracle.com>
Signed-off-by: default avatarDarrick J. Wong <darrick.wong@oracle.com>
parent 93806299
...@@ -3395,41 +3395,17 @@ xfs_log_force( ...@@ -3395,41 +3395,17 @@ xfs_log_force(
return -EIO; return -EIO;
} }
/* static int
* Force the in-core log to disk for a specific LSN. __xfs_log_force_lsn(
*
* Find in-core log with lsn.
* If it is in the DIRTY state, just return.
* If it is in the ACTIVE state, move the in-core log into the WANT_SYNC
* state and go to sleep or return.
* If it is in any other state, go to sleep or return.
*
* Synchronous forces are implemented with a wait queue. All callers trying
* to force a given lsn to disk must wait on the queue attached to the
* specific in-core log. When given in-core log finally completes its write
* to disk, that thread will wake up all threads waiting on the queue.
*/
int
xfs_log_force_lsn(
struct xfs_mount *mp, struct xfs_mount *mp,
xfs_lsn_t lsn, xfs_lsn_t lsn,
uint flags, uint flags,
int *log_flushed) int *log_flushed,
bool already_slept)
{ {
struct xlog *log = mp->m_log; struct xlog *log = mp->m_log;
struct xlog_in_core *iclog; struct xlog_in_core *iclog;
int already_slept = 0;
ASSERT(lsn != 0);
XFS_STATS_INC(mp, xs_log_force);
trace_xfs_log_force(mp, lsn, _RET_IP_);
lsn = xlog_cil_force_lsn(log, lsn);
if (lsn == NULLCOMMITLSN)
return 0;
try_again:
spin_lock(&log->l_icloglock); spin_lock(&log->l_icloglock);
iclog = log->l_iclog; iclog = log->l_iclog;
if (iclog->ic_state & XLOG_STATE_IOERROR) if (iclog->ic_state & XLOG_STATE_IOERROR)
...@@ -3469,8 +3445,7 @@ xfs_log_force_lsn( ...@@ -3469,8 +3445,7 @@ xfs_log_force_lsn(
xlog_wait(&iclog->ic_prev->ic_write_wait, xlog_wait(&iclog->ic_prev->ic_write_wait,
&log->l_icloglock); &log->l_icloglock);
already_slept = 1; return -EAGAIN;
goto try_again;
} }
atomic_inc(&iclog->ic_refcnt); atomic_inc(&iclog->ic_refcnt);
xlog_state_switch_iclogs(log, iclog, 0); xlog_state_switch_iclogs(log, iclog, 0);
...@@ -3503,6 +3478,43 @@ xfs_log_force_lsn( ...@@ -3503,6 +3478,43 @@ xfs_log_force_lsn(
return -EIO; return -EIO;
} }
/*
* Force the in-core log to disk for a specific LSN.
*
* Find in-core log with lsn.
* If it is in the DIRTY state, just return.
* If it is in the ACTIVE state, move the in-core log into the WANT_SYNC
* state and go to sleep or return.
* If it is in any other state, go to sleep or return.
*
* Synchronous forces are implemented with a wait queue. All callers trying
* to force a given lsn to disk must wait on the queue attached to the
* specific in-core log. When given in-core log finally completes its write
* to disk, that thread will wake up all threads waiting on the queue.
*/
int
xfs_log_force_lsn(
struct xfs_mount *mp,
xfs_lsn_t lsn,
uint flags,
int *log_flushed)
{
int ret;
ASSERT(lsn != 0);
XFS_STATS_INC(mp, xs_log_force);
trace_xfs_log_force(mp, lsn, _RET_IP_);
lsn = xlog_cil_force_lsn(mp->m_log, lsn);
if (lsn == NULLCOMMITLSN)
return 0;
ret = __xfs_log_force_lsn(mp, lsn, flags, log_flushed, false);
if (ret == -EAGAIN)
ret = __xfs_log_force_lsn(mp, lsn, flags, log_flushed, true);
return ret;
}
/* /*
* Called when we want to mark the current iclog as being ready to sync to * Called when we want to mark the current iclog as being ready to sync to
* disk. * disk.
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment