Commit 1c304625 authored by Christoph Hellwig's avatar Christoph Hellwig Committed by Ben Myers

xfs: allow assigning the tail lsn with the AIL lock held

Provide a variant of xlog_assign_tail_lsn that has the AIL lock already
held.  By doing so we do an additional atomic_read + atomic_set under
the lock, which comes down to two instructions.

Switch xfs_trans_ail_update_bulk and xfs_trans_ail_delete_bulk to the
new version to reduce the number of lock roundtrips, and prepare for
a new addition that would require a third lock roundtrip in
xfs_trans_ail_delete_bulk.  This addition is also the reason for
slightly rearranging the conditionals and relying on xfs_log_space_wake
for checking that the filesystem has been shut down internally.
Signed-off-by: default avatarChristoph Hellwig <hch@lst.de>
Reviewed-by: default avatarDave Chinner <dchinner@redhat.com>
Reviewed-by: default avatarMark Tinguely <tinguely@sgi.com>
Signed-off-by: default avatarBen Myers <bpm@sgi.com>
parent 32ce90a4
...@@ -916,27 +916,42 @@ xfs_log_need_covered(xfs_mount_t *mp) ...@@ -916,27 +916,42 @@ xfs_log_need_covered(xfs_mount_t *mp)
* We may be holding the log iclog lock upon entering this routine. * We may be holding the log iclog lock upon entering this routine.
*/ */
xfs_lsn_t xfs_lsn_t
xlog_assign_tail_lsn( xlog_assign_tail_lsn_locked(
struct xfs_mount *mp) struct xfs_mount *mp)
{ {
xfs_lsn_t tail_lsn;
struct log *log = mp->m_log; struct log *log = mp->m_log;
struct xfs_log_item *lip;
xfs_lsn_t tail_lsn;
assert_spin_locked(&mp->m_ail->xa_lock);
/* /*
* To make sure we always have a valid LSN for the log tail we keep * To make sure we always have a valid LSN for the log tail we keep
* track of the last LSN which was committed in log->l_last_sync_lsn, * track of the last LSN which was committed in log->l_last_sync_lsn,
* and use that when the AIL was empty and xfs_ail_min_lsn returns 0. * and use that when the AIL was empty.
*
* If the AIL has been emptied we also need to wake any process
* waiting for this condition.
*/ */
tail_lsn = xfs_ail_min_lsn(mp->m_ail); lip = xfs_ail_min(mp->m_ail);
if (!tail_lsn) if (lip)
tail_lsn = lip->li_lsn;
else
tail_lsn = atomic64_read(&log->l_last_sync_lsn); tail_lsn = atomic64_read(&log->l_last_sync_lsn);
atomic64_set(&log->l_tail_lsn, tail_lsn); atomic64_set(&log->l_tail_lsn, tail_lsn);
return tail_lsn; return tail_lsn;
} }
xfs_lsn_t
xlog_assign_tail_lsn(
struct xfs_mount *mp)
{
xfs_lsn_t tail_lsn;
spin_lock(&mp->m_ail->xa_lock);
tail_lsn = xlog_assign_tail_lsn_locked(mp);
spin_unlock(&mp->m_ail->xa_lock);
return tail_lsn;
}
/* /*
* Return the space in the log between the tail and the head. The head * Return the space in the log between the tail and the head. The head
* is passed in the cycle/bytes formal parms. In the special case where * is passed in the cycle/bytes formal parms. In the special case where
......
...@@ -152,6 +152,7 @@ int xfs_log_mount(struct xfs_mount *mp, ...@@ -152,6 +152,7 @@ int xfs_log_mount(struct xfs_mount *mp,
int num_bblocks); int num_bblocks);
int xfs_log_mount_finish(struct xfs_mount *mp); int xfs_log_mount_finish(struct xfs_mount *mp);
xfs_lsn_t xlog_assign_tail_lsn(struct xfs_mount *mp); xfs_lsn_t xlog_assign_tail_lsn(struct xfs_mount *mp);
xfs_lsn_t xlog_assign_tail_lsn_locked(struct xfs_mount *mp);
void xfs_log_space_wake(struct xfs_mount *mp); void xfs_log_space_wake(struct xfs_mount *mp);
int xfs_log_notify(struct xfs_mount *mp, int xfs_log_notify(struct xfs_mount *mp,
struct xlog_in_core *iclog, struct xlog_in_core *iclog,
......
...@@ -79,7 +79,7 @@ xfs_ail_check( ...@@ -79,7 +79,7 @@ xfs_ail_check(
* Return a pointer to the first item in the AIL. If the AIL is empty, then * Return a pointer to the first item in the AIL. If the AIL is empty, then
* return NULL. * return NULL.
*/ */
static xfs_log_item_t * xfs_log_item_t *
xfs_ail_min( xfs_ail_min(
struct xfs_ail *ailp) struct xfs_ail *ailp)
{ {
...@@ -667,11 +667,15 @@ xfs_trans_ail_update_bulk( ...@@ -667,11 +667,15 @@ xfs_trans_ail_update_bulk(
if (!list_empty(&tmp)) if (!list_empty(&tmp))
xfs_ail_splice(ailp, cur, &tmp, lsn); xfs_ail_splice(ailp, cur, &tmp, lsn);
spin_unlock(&ailp->xa_lock);
if (mlip_changed && !XFS_FORCED_SHUTDOWN(ailp->xa_mount)) { if (mlip_changed) {
xlog_assign_tail_lsn(ailp->xa_mount); if (!XFS_FORCED_SHUTDOWN(ailp->xa_mount))
xlog_assign_tail_lsn_locked(ailp->xa_mount);
spin_unlock(&ailp->xa_lock);
xfs_log_space_wake(ailp->xa_mount); xfs_log_space_wake(ailp->xa_mount);
} else {
spin_unlock(&ailp->xa_lock);
} }
} }
...@@ -729,11 +733,15 @@ xfs_trans_ail_delete_bulk( ...@@ -729,11 +733,15 @@ xfs_trans_ail_delete_bulk(
if (mlip == lip) if (mlip == lip)
mlip_changed = 1; mlip_changed = 1;
} }
spin_unlock(&ailp->xa_lock);
if (mlip_changed && !XFS_FORCED_SHUTDOWN(ailp->xa_mount)) { if (mlip_changed) {
xlog_assign_tail_lsn(ailp->xa_mount); if (!XFS_FORCED_SHUTDOWN(ailp->xa_mount))
xlog_assign_tail_lsn_locked(ailp->xa_mount);
spin_unlock(&ailp->xa_lock);
xfs_log_space_wake(ailp->xa_mount); xfs_log_space_wake(ailp->xa_mount);
} else {
spin_unlock(&ailp->xa_lock);
} }
} }
......
...@@ -102,6 +102,7 @@ xfs_trans_ail_delete( ...@@ -102,6 +102,7 @@ xfs_trans_ail_delete(
void xfs_ail_push(struct xfs_ail *, xfs_lsn_t); void xfs_ail_push(struct xfs_ail *, xfs_lsn_t);
void xfs_ail_push_all(struct xfs_ail *); void xfs_ail_push_all(struct xfs_ail *);
struct xfs_log_item *xfs_ail_min(struct xfs_ail *ailp);
xfs_lsn_t xfs_ail_min_lsn(struct xfs_ail *ailp); xfs_lsn_t xfs_ail_min_lsn(struct xfs_ail *ailp);
struct xfs_log_item * xfs_trans_ail_cursor_first(struct xfs_ail *ailp, struct xfs_log_item * xfs_trans_ail_cursor_first(struct xfs_ail *ailp,
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment