Commit 3536b61e authored by Dave Chinner's avatar Dave Chinner Committed by Darrick J. Wong

xfs: unwind log item error flagging

When an buffer IO error occurs, we want to mark all
the log items attached to the buffer as failed. Open code
the error handling loop so that we can modify the flagging for the
different types of objects directly and independently of each other.

This also allows us to remove the ->iop_error method from the log
item operations.
Signed-off-by: default avatarDave Chinner <dchinner@redhat.com>
Reviewed-by: default avatarBrian Foster <bfoster@redhat.com>
Reviewed-by: default avatarDarrick J. Wong <darrick.wong@oracle.com>
Signed-off-by: default avatarDarrick J. Wong <darrick.wong@oracle.com>
parent 428947e9
...@@ -12,6 +12,7 @@ ...@@ -12,6 +12,7 @@
#include "xfs_bit.h" #include "xfs_bit.h"
#include "xfs_mount.h" #include "xfs_mount.h"
#include "xfs_trans.h" #include "xfs_trans.h"
#include "xfs_trans_priv.h"
#include "xfs_buf_item.h" #include "xfs_buf_item.h"
#include "xfs_inode.h" #include "xfs_inode.h"
#include "xfs_inode_item.h" #include "xfs_inode_item.h"
...@@ -955,37 +956,6 @@ xfs_buf_item_relse( ...@@ -955,37 +956,6 @@ xfs_buf_item_relse(
xfs_buf_item_free(bip); xfs_buf_item_free(bip);
} }
/*
* Invoke the error state callback for each log item affected by the failed I/O.
*
* If a metadata buffer write fails with a non-permanent error, the buffer is
* eventually resubmitted and so the completion callbacks are not run. The error
* state may need to be propagated to the log items attached to the buffer,
* however, so the next AIL push of the item knows hot to handle it correctly.
*/
STATIC void
xfs_buf_do_callbacks_fail(
struct xfs_buf *bp)
{
struct xfs_ail *ailp = bp->b_mount->m_ail;
struct xfs_log_item *lip;
/*
* Buffer log item errors are handled directly by xfs_buf_item_push()
* and xfs_buf_iodone_callback_error, and they have no IO error
* callbacks. Check only for items in b_li_list.
*/
if (list_empty(&bp->b_li_list))
return;
spin_lock(&ailp->ail_lock);
list_for_each_entry(lip, &bp->b_li_list, li_bio_list) {
if (lip->li_ops->iop_error)
lip->li_ops->iop_error(lip, bp);
}
spin_unlock(&ailp->ail_lock);
}
/* /*
* Decide if we're going to retry the write after a failure, and prepare * Decide if we're going to retry the write after a failure, and prepare
* the buffer for retrying the write. * the buffer for retrying the write.
...@@ -1165,6 +1135,7 @@ xfs_buf_inode_iodone( ...@@ -1165,6 +1135,7 @@ xfs_buf_inode_iodone(
struct xfs_buf *bp) struct xfs_buf *bp)
{ {
if (bp->b_error) { if (bp->b_error) {
struct xfs_log_item *lip;
int ret = xfs_buf_iodone_error(bp); int ret = xfs_buf_iodone_error(bp);
if (ret == XBF_IOERROR_FINISH) if (ret == XBF_IOERROR_FINISH)
...@@ -1172,7 +1143,11 @@ xfs_buf_inode_iodone( ...@@ -1172,7 +1143,11 @@ xfs_buf_inode_iodone(
if (ret == XBF_IOERROR_DONE) if (ret == XBF_IOERROR_DONE)
return; return;
ASSERT(ret == XBF_IOERROR_FAIL); ASSERT(ret == XBF_IOERROR_FAIL);
xfs_buf_do_callbacks_fail(bp); spin_lock(&bp->b_mount->m_ail->ail_lock);
list_for_each_entry(lip, &bp->b_li_list, li_bio_list) {
xfs_set_li_failed(lip, bp);
}
spin_unlock(&bp->b_mount->m_ail->ail_lock);
xfs_buf_ioerror(bp, 0); xfs_buf_ioerror(bp, 0);
xfs_buf_relse(bp); xfs_buf_relse(bp);
return; return;
...@@ -1193,6 +1168,7 @@ xfs_buf_dquot_iodone( ...@@ -1193,6 +1168,7 @@ xfs_buf_dquot_iodone(
struct xfs_buf *bp) struct xfs_buf *bp)
{ {
if (bp->b_error) { if (bp->b_error) {
struct xfs_log_item *lip;
int ret = xfs_buf_iodone_error(bp); int ret = xfs_buf_iodone_error(bp);
if (ret == XBF_IOERROR_FINISH) if (ret == XBF_IOERROR_FINISH)
...@@ -1200,7 +1176,11 @@ xfs_buf_dquot_iodone( ...@@ -1200,7 +1176,11 @@ xfs_buf_dquot_iodone(
if (ret == XBF_IOERROR_DONE) if (ret == XBF_IOERROR_DONE)
return; return;
ASSERT(ret == XBF_IOERROR_FAIL); ASSERT(ret == XBF_IOERROR_FAIL);
xfs_buf_do_callbacks_fail(bp); spin_lock(&bp->b_mount->m_ail->ail_lock);
list_for_each_entry(lip, &bp->b_li_list, li_bio_list) {
xfs_set_li_failed(lip, bp);
}
spin_unlock(&bp->b_mount->m_ail->ail_lock);
xfs_buf_ioerror(bp, 0); xfs_buf_ioerror(bp, 0);
xfs_buf_relse(bp); xfs_buf_relse(bp);
return; return;
...@@ -1232,7 +1212,7 @@ xfs_buf_iodone( ...@@ -1232,7 +1212,7 @@ xfs_buf_iodone(
if (ret == XBF_IOERROR_DONE) if (ret == XBF_IOERROR_DONE)
return; return;
ASSERT(ret == XBF_IOERROR_FAIL); ASSERT(ret == XBF_IOERROR_FAIL);
xfs_buf_do_callbacks_fail(bp); ASSERT(list_empty(&bp->b_li_list));
xfs_buf_ioerror(bp, 0); xfs_buf_ioerror(bp, 0);
xfs_buf_relse(bp); xfs_buf_relse(bp);
return; return;
......
...@@ -113,23 +113,6 @@ xfs_qm_dqunpin_wait( ...@@ -113,23 +113,6 @@ xfs_qm_dqunpin_wait(
wait_event(dqp->q_pinwait, (atomic_read(&dqp->q_pincount) == 0)); wait_event(dqp->q_pinwait, (atomic_read(&dqp->q_pincount) == 0));
} }
/*
* Callback used to mark a buffer with XFS_LI_FAILED when items in the buffer
* have been failed during writeback
*
* this informs the AIL that the dquot is already flush locked on the next push,
* and acquires a hold on the buffer to ensure that it isn't reclaimed before
* dirty data makes it to disk.
*/
STATIC void
xfs_dquot_item_error(
struct xfs_log_item *lip,
struct xfs_buf *bp)
{
ASSERT(!completion_done(&DQUOT_ITEM(lip)->qli_dquot->q_flush));
xfs_set_li_failed(lip, bp);
}
STATIC uint STATIC uint
xfs_qm_dquot_logitem_push( xfs_qm_dquot_logitem_push(
struct xfs_log_item *lip, struct xfs_log_item *lip,
...@@ -216,7 +199,6 @@ static const struct xfs_item_ops xfs_dquot_item_ops = { ...@@ -216,7 +199,6 @@ static const struct xfs_item_ops xfs_dquot_item_ops = {
.iop_release = xfs_qm_dquot_logitem_release, .iop_release = xfs_qm_dquot_logitem_release,
.iop_committing = xfs_qm_dquot_logitem_committing, .iop_committing = xfs_qm_dquot_logitem_committing,
.iop_push = xfs_qm_dquot_logitem_push, .iop_push = xfs_qm_dquot_logitem_push,
.iop_error = xfs_dquot_item_error
}; };
/* /*
......
...@@ -464,23 +464,6 @@ xfs_inode_item_unpin( ...@@ -464,23 +464,6 @@ xfs_inode_item_unpin(
wake_up_bit(&ip->i_flags, __XFS_IPINNED_BIT); wake_up_bit(&ip->i_flags, __XFS_IPINNED_BIT);
} }
/*
* Callback used to mark a buffer with XFS_LI_FAILED when items in the buffer
* have been failed during writeback
*
* This informs the AIL that the inode is already flush locked on the next push,
* and acquires a hold on the buffer to ensure that it isn't reclaimed before
* dirty data makes it to disk.
*/
STATIC void
xfs_inode_item_error(
struct xfs_log_item *lip,
struct xfs_buf *bp)
{
ASSERT(xfs_isiflocked(INODE_ITEM(lip)->ili_inode));
xfs_set_li_failed(lip, bp);
}
STATIC uint STATIC uint
xfs_inode_item_push( xfs_inode_item_push(
struct xfs_log_item *lip, struct xfs_log_item *lip,
...@@ -619,7 +602,6 @@ static const struct xfs_item_ops xfs_inode_item_ops = { ...@@ -619,7 +602,6 @@ static const struct xfs_item_ops xfs_inode_item_ops = {
.iop_committed = xfs_inode_item_committed, .iop_committed = xfs_inode_item_committed,
.iop_push = xfs_inode_item_push, .iop_push = xfs_inode_item_push,
.iop_committing = xfs_inode_item_committing, .iop_committing = xfs_inode_item_committing,
.iop_error = xfs_inode_item_error
}; };
......
...@@ -74,7 +74,6 @@ struct xfs_item_ops { ...@@ -74,7 +74,6 @@ struct xfs_item_ops {
void (*iop_committing)(struct xfs_log_item *, xfs_lsn_t commit_lsn); void (*iop_committing)(struct xfs_log_item *, xfs_lsn_t commit_lsn);
void (*iop_release)(struct xfs_log_item *); void (*iop_release)(struct xfs_log_item *);
xfs_lsn_t (*iop_committed)(struct xfs_log_item *, xfs_lsn_t); xfs_lsn_t (*iop_committed)(struct xfs_log_item *, xfs_lsn_t);
void (*iop_error)(struct xfs_log_item *, xfs_buf_t *);
int (*iop_recover)(struct xfs_log_item *lip, struct xfs_trans *tp); int (*iop_recover)(struct xfs_log_item *lip, struct xfs_trans *tp);
bool (*iop_match)(struct xfs_log_item *item, uint64_t id); bool (*iop_match)(struct xfs_log_item *item, uint64_t id);
}; };
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment