Commit 82ff27bc authored by Brian Foster's avatar Brian Foster Committed by Darrick J. Wong

xfs: automatic dfops buffer relogging

Buffers that are held across deferred operations are explicitly
joined to the dfops structure to ensure appropriate relogging.
While buffers are currently joined explicitly, we can detect the
conditions that require relogging at dfops finish time by inspecting
the transaction item list for held buffers.

Replace the xfs_defer_bjoin() infrastructure with such detection and
automatic relogging of held buffers. This eliminates the need for
the per-dfops buffer list, replaced by an on-stack variant in
xfs_defer_trans_roll().
Signed-off-by: default avatarBrian Foster <bfoster@redhat.com>
Reviewed-by: default avatarChristoph Hellwig <hch@lst.de>
Reviewed-by: default avatarDarrick J. Wong <darrick.wong@oracle.com>
Signed-off-by: default avatarDarrick J. Wong <darrick.wong@oracle.com>
parent 488c919a
......@@ -320,7 +320,6 @@ xfs_attr_set(
* buffer and run into problems with the write verifier.
*/
xfs_trans_bhold(args.trans, leaf_bp);
xfs_defer_bjoin(args.trans->t_dfops, leaf_bp);
xfs_defer_ijoin(args.trans->t_dfops, dp);
error = xfs_defer_finish(&args.trans);
if (error)
......
......@@ -14,6 +14,7 @@
#include "xfs_mount.h"
#include "xfs_defer.h"
#include "xfs_trans.h"
#include "xfs_buf_item.h"
#include "xfs_trace.h"
/*
......@@ -228,6 +229,10 @@ xfs_defer_trans_roll(
struct xfs_trans **tp)
{
struct xfs_defer_ops *dop = (*tp)->t_dfops;
struct xfs_buf_log_item *bli;
struct xfs_log_item *lip;
struct xfs_buf *bplist[XFS_DEFER_OPS_NR_BUFS];
int bpcount = 0;
int i;
int error;
......@@ -235,9 +240,24 @@ xfs_defer_trans_roll(
for (i = 0; i < XFS_DEFER_OPS_NR_INODES && dop->dop_inodes[i]; i++)
xfs_trans_log_inode(*tp, dop->dop_inodes[i], XFS_ILOG_CORE);
/* Hold the (previously bjoin'd) buffer locked across the roll. */
for (i = 0; i < XFS_DEFER_OPS_NR_BUFS && dop->dop_bufs[i]; i++)
xfs_trans_dirty_buf(*tp, dop->dop_bufs[i]);
list_for_each_entry(lip, &(*tp)->t_items, li_trans) {
switch (lip->li_type) {
case XFS_LI_BUF:
bli = container_of(lip, struct xfs_buf_log_item,
bli_item);
if (bli->bli_flags & XFS_BLI_HOLD) {
if (bpcount >= XFS_DEFER_OPS_NR_BUFS) {
ASSERT(0);
return -EFSCORRUPTED;
}
xfs_trans_dirty_buf(*tp, bli->bli_buf);
bplist[bpcount++] = bli->bli_buf;
}
break;
default:
break;
}
}
trace_xfs_defer_trans_roll((*tp)->t_mountp, dop, _RET_IP_);
......@@ -255,9 +275,9 @@ xfs_defer_trans_roll(
xfs_trans_ijoin(*tp, dop->dop_inodes[i], 0);
/* Rejoin the buffers and dirty them so the log moves forward. */
for (i = 0; i < XFS_DEFER_OPS_NR_BUFS && dop->dop_bufs[i]; i++) {
xfs_trans_bjoin(*tp, dop->dop_bufs[i]);
xfs_trans_bhold(*tp, dop->dop_bufs[i]);
for (i = 0; i < bpcount; i++) {
xfs_trans_bjoin(*tp, bplist[i]);
xfs_trans_bhold(*tp, bplist[i]);
}
return error;
......@@ -295,30 +315,6 @@ xfs_defer_ijoin(
return -EFSCORRUPTED;
}
/*
* Add this buffer to the deferred op. Each joined buffer is relogged
* each time we roll the transaction.
*/
int
xfs_defer_bjoin(
struct xfs_defer_ops *dop,
struct xfs_buf *bp)
{
int i;
for (i = 0; i < XFS_DEFER_OPS_NR_BUFS; i++) {
if (dop->dop_bufs[i] == bp)
return 0;
else if (dop->dop_bufs[i] == NULL) {
dop->dop_bufs[i] = bp;
return 0;
}
}
ASSERT(0);
return -EFSCORRUPTED;
}
/*
* Reset an already used dfops after finish.
*/
......@@ -331,7 +327,6 @@ xfs_defer_reset(
ASSERT(!xfs_defer_has_unfinished_work(dop));
memset(dop->dop_inodes, 0, sizeof(dop->dop_inodes));
memset(dop->dop_bufs, 0, sizeof(dop->dop_bufs));
/*
* Low mode state transfers across transaction rolls to mirror dfops
......@@ -594,7 +589,6 @@ xfs_defer_move(
list_splice_init(&src->dop_pending, &dst->dop_pending);
memcpy(dst->dop_inodes, src->dop_inodes, sizeof(dst->dop_inodes));
memcpy(dst->dop_bufs, src->dop_bufs, sizeof(dst->dop_bufs));
/*
* Low free space mode was historically controlled by a dfops field.
......
......@@ -43,7 +43,6 @@ void xfs_defer_cancel(struct xfs_trans *);
void xfs_defer_init(struct xfs_trans *tp, struct xfs_defer_ops *dop);
bool xfs_defer_has_unfinished_work(struct xfs_defer_ops *dop);
int xfs_defer_ijoin(struct xfs_defer_ops *dop, struct xfs_inode *ip);
int xfs_defer_bjoin(struct xfs_defer_ops *dop, struct xfs_buf *bp);
void xfs_defer_move(struct xfs_trans *dtp, struct xfs_trans *stp);
/* Description of a deferred type. */
......
......@@ -362,7 +362,6 @@ xfs_dquot_disk_alloc(
* manually or by committing the transaction.
*/
xfs_trans_bhold(tp, bp);
error = xfs_defer_bjoin(tp->t_dfops, bp);
if (error) {
xfs_trans_bhold_release(tp, bp);
xfs_trans_brelse(tp, bp);
......
......@@ -100,7 +100,6 @@ struct xfs_defer_ops {
/* relog these with each roll */
struct xfs_inode *dop_inodes[XFS_DEFER_OPS_NR_INODES];
struct xfs_buf *dop_bufs[XFS_DEFER_OPS_NR_BUFS];
};
/*
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment