Commit 17b38471 authored by Christoph Hellwig's avatar Christoph Hellwig Committed by Alex Elder

xfs: force the log if we encounter pinned buffers in .iop_pushbuf

We need to check for pinned buffers even in .iop_pushbuf given that inode
items flush into the same buffers that may be pinned directly due operations
on the unlinked inode list operating directly on buffers.  To do this add a
return value to .iop_pushbuf that tells the AIL push about this and use
the existing log force mechanisms to unpin it.
Signed-off-by: default avatarChristoph Hellwig <hch@lst.de>
Reported-by: default avatarStefan Priebe <s.priebe@profihost.ag>
Tested-by: default avatarStefan Priebe <s.priebe@profihost.ag>
Reviewed-by: default avatarDave Chinner <dchinner@redhat.com>
Signed-off-by: default avatarAlex Elder <aelder@sgi.com>
parent bc6e588a
...@@ -629,7 +629,7 @@ xfs_buf_item_push( ...@@ -629,7 +629,7 @@ xfs_buf_item_push(
* the xfsbufd to get this buffer written. We have to unlock the buffer * the xfsbufd to get this buffer written. We have to unlock the buffer
* to allow the xfsbufd to write it, too. * to allow the xfsbufd to write it, too.
*/ */
STATIC void STATIC bool
xfs_buf_item_pushbuf( xfs_buf_item_pushbuf(
struct xfs_log_item *lip) struct xfs_log_item *lip)
{ {
...@@ -643,6 +643,7 @@ xfs_buf_item_pushbuf( ...@@ -643,6 +643,7 @@ xfs_buf_item_pushbuf(
xfs_buf_delwri_promote(bp); xfs_buf_delwri_promote(bp);
xfs_buf_relse(bp); xfs_buf_relse(bp);
return true;
} }
STATIC void STATIC void
......
...@@ -183,13 +183,14 @@ xfs_qm_dqunpin_wait( ...@@ -183,13 +183,14 @@ xfs_qm_dqunpin_wait(
* search the buffer cache can be a time consuming thing, and AIL lock is a * search the buffer cache can be a time consuming thing, and AIL lock is a
* spinlock. * spinlock.
*/ */
STATIC void STATIC bool
xfs_qm_dquot_logitem_pushbuf( xfs_qm_dquot_logitem_pushbuf(
struct xfs_log_item *lip) struct xfs_log_item *lip)
{ {
struct xfs_dq_logitem *qlip = DQUOT_ITEM(lip); struct xfs_dq_logitem *qlip = DQUOT_ITEM(lip);
struct xfs_dquot *dqp = qlip->qli_dquot; struct xfs_dquot *dqp = qlip->qli_dquot;
struct xfs_buf *bp; struct xfs_buf *bp;
bool ret = true;
ASSERT(XFS_DQ_IS_LOCKED(dqp)); ASSERT(XFS_DQ_IS_LOCKED(dqp));
...@@ -201,17 +202,20 @@ xfs_qm_dquot_logitem_pushbuf( ...@@ -201,17 +202,20 @@ xfs_qm_dquot_logitem_pushbuf(
if (completion_done(&dqp->q_flush) || if (completion_done(&dqp->q_flush) ||
!(lip->li_flags & XFS_LI_IN_AIL)) { !(lip->li_flags & XFS_LI_IN_AIL)) {
xfs_dqunlock(dqp); xfs_dqunlock(dqp);
return; return true;
} }
bp = xfs_incore(dqp->q_mount->m_ddev_targp, qlip->qli_format.qlf_blkno, bp = xfs_incore(dqp->q_mount->m_ddev_targp, qlip->qli_format.qlf_blkno,
dqp->q_mount->m_quotainfo->qi_dqchunklen, XBF_TRYLOCK); dqp->q_mount->m_quotainfo->qi_dqchunklen, XBF_TRYLOCK);
xfs_dqunlock(dqp); xfs_dqunlock(dqp);
if (!bp) if (!bp)
return; return true;
if (XFS_BUF_ISDELAYWRITE(bp)) if (XFS_BUF_ISDELAYWRITE(bp))
xfs_buf_delwri_promote(bp); xfs_buf_delwri_promote(bp);
if (xfs_buf_ispinned(bp))
ret = false;
xfs_buf_relse(bp); xfs_buf_relse(bp);
return ret;
} }
/* /*
......
...@@ -708,13 +708,14 @@ xfs_inode_item_committed( ...@@ -708,13 +708,14 @@ xfs_inode_item_committed(
* marked delayed write. If that's the case, we'll promote it and that will * marked delayed write. If that's the case, we'll promote it and that will
* allow the caller to write the buffer by triggering the xfsbufd to run. * allow the caller to write the buffer by triggering the xfsbufd to run.
*/ */
STATIC void STATIC bool
xfs_inode_item_pushbuf( xfs_inode_item_pushbuf(
struct xfs_log_item *lip) struct xfs_log_item *lip)
{ {
struct xfs_inode_log_item *iip = INODE_ITEM(lip); struct xfs_inode_log_item *iip = INODE_ITEM(lip);
struct xfs_inode *ip = iip->ili_inode; struct xfs_inode *ip = iip->ili_inode;
struct xfs_buf *bp; struct xfs_buf *bp;
bool ret = true;
ASSERT(xfs_isilocked(ip, XFS_ILOCK_SHARED)); ASSERT(xfs_isilocked(ip, XFS_ILOCK_SHARED));
...@@ -725,7 +726,7 @@ xfs_inode_item_pushbuf( ...@@ -725,7 +726,7 @@ xfs_inode_item_pushbuf(
if (completion_done(&ip->i_flush) || if (completion_done(&ip->i_flush) ||
!(lip->li_flags & XFS_LI_IN_AIL)) { !(lip->li_flags & XFS_LI_IN_AIL)) {
xfs_iunlock(ip, XFS_ILOCK_SHARED); xfs_iunlock(ip, XFS_ILOCK_SHARED);
return; return true;
} }
bp = xfs_incore(ip->i_mount->m_ddev_targp, iip->ili_format.ilf_blkno, bp = xfs_incore(ip->i_mount->m_ddev_targp, iip->ili_format.ilf_blkno,
...@@ -733,10 +734,13 @@ xfs_inode_item_pushbuf( ...@@ -733,10 +734,13 @@ xfs_inode_item_pushbuf(
xfs_iunlock(ip, XFS_ILOCK_SHARED); xfs_iunlock(ip, XFS_ILOCK_SHARED);
if (!bp) if (!bp)
return; return true;
if (XFS_BUF_ISDELAYWRITE(bp)) if (XFS_BUF_ISDELAYWRITE(bp))
xfs_buf_delwri_promote(bp); xfs_buf_delwri_promote(bp);
if (xfs_buf_ispinned(bp))
ret = false;
xfs_buf_relse(bp); xfs_buf_relse(bp);
return ret;
} }
/* /*
......
...@@ -350,7 +350,7 @@ typedef struct xfs_item_ops { ...@@ -350,7 +350,7 @@ typedef struct xfs_item_ops {
void (*iop_unlock)(xfs_log_item_t *); void (*iop_unlock)(xfs_log_item_t *);
xfs_lsn_t (*iop_committed)(xfs_log_item_t *, xfs_lsn_t); xfs_lsn_t (*iop_committed)(xfs_log_item_t *, xfs_lsn_t);
void (*iop_push)(xfs_log_item_t *); void (*iop_push)(xfs_log_item_t *);
void (*iop_pushbuf)(xfs_log_item_t *); bool (*iop_pushbuf)(xfs_log_item_t *);
void (*iop_committing)(xfs_log_item_t *, xfs_lsn_t); void (*iop_committing)(xfs_log_item_t *, xfs_lsn_t);
} xfs_item_ops_t; } xfs_item_ops_t;
......
...@@ -427,8 +427,13 @@ xfs_ail_worker( ...@@ -427,8 +427,13 @@ xfs_ail_worker(
case XFS_ITEM_PUSHBUF: case XFS_ITEM_PUSHBUF:
XFS_STATS_INC(xs_push_ail_pushbuf); XFS_STATS_INC(xs_push_ail_pushbuf);
IOP_PUSHBUF(lip);
if (!IOP_PUSHBUF(lip)) {
stuck++;
flush_log = 1;
} else {
ailp->xa_last_pushed_lsn = lsn; ailp->xa_last_pushed_lsn = lsn;
}
push_xfsbufd = 1; push_xfsbufd = 1;
break; break;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment