Commit ff4ab5e0 authored by Darrick J. Wong's avatar Darrick J. Wong

xfs: fix an incore inode UAF in xfs_bui_recover

In xfs_bui_item_recover, there exists a use-after-free bug with regards
to the inode that is involved in the bmap replay operation.  If the
mapping operation does not complete, we call xfs_bmap_unmap_extent to
create a deferred op to finish the unmapping work, and we retain a
pointer to the incore inode.

Unfortunately, the very next thing we do is commit the transaction and
drop the inode.  If reclaim tears down the inode before we try to finish
the defer ops, we dereference garbage and blow up.  Therefore, create a
way to join inodes to the defer ops freezer so that we can maintain the
xfs_inode reference until we're done with the inode.

Note: This imposes the requirement that there be enough memory to keep
every incore inode in memory throughout recovery.
Signed-off-by: default avatarDarrick J. Wong <darrick.wong@oracle.com>
Reviewed-by: default avatarBrian Foster <bfoster@redhat.com>
Reviewed-by: default avatarChristoph Hellwig <hch@lst.de>
parent 64a3f331
...@@ -16,6 +16,7 @@ ...@@ -16,6 +16,7 @@
#include "xfs_inode.h" #include "xfs_inode.h"
#include "xfs_inode_item.h" #include "xfs_inode_item.h"
#include "xfs_trace.h" #include "xfs_trace.h"
#include "xfs_icache.h"
/* /*
* Deferred Operations in XFS * Deferred Operations in XFS
...@@ -553,10 +554,14 @@ xfs_defer_move( ...@@ -553,10 +554,14 @@ xfs_defer_move(
* deferred ops state is transferred to the capture structure and the * deferred ops state is transferred to the capture structure and the
* transaction is then ready for the caller to commit it. If there are no * transaction is then ready for the caller to commit it. If there are no
* intent items to capture, this function returns NULL. * intent items to capture, this function returns NULL.
*
* If capture_ip is not NULL, the capture structure will obtain an extra
* reference to the inode.
*/ */
static struct xfs_defer_capture * static struct xfs_defer_capture *
xfs_defer_ops_capture( xfs_defer_ops_capture(
struct xfs_trans *tp) struct xfs_trans *tp,
struct xfs_inode *capture_ip)
{ {
struct xfs_defer_capture *dfc; struct xfs_defer_capture *dfc;
...@@ -582,6 +587,15 @@ xfs_defer_ops_capture( ...@@ -582,6 +587,15 @@ xfs_defer_ops_capture(
/* Preserve the log reservation size. */ /* Preserve the log reservation size. */
dfc->dfc_logres = tp->t_log_res; dfc->dfc_logres = tp->t_log_res;
/*
* Grab an extra reference to this inode and attach it to the capture
* structure.
*/
if (capture_ip) {
ihold(VFS_I(capture_ip));
dfc->dfc_capture_ip = capture_ip;
}
return dfc; return dfc;
} }
...@@ -592,24 +606,33 @@ xfs_defer_ops_release( ...@@ -592,24 +606,33 @@ xfs_defer_ops_release(
struct xfs_defer_capture *dfc) struct xfs_defer_capture *dfc)
{ {
xfs_defer_cancel_list(mp, &dfc->dfc_dfops); xfs_defer_cancel_list(mp, &dfc->dfc_dfops);
if (dfc->dfc_capture_ip)
xfs_irele(dfc->dfc_capture_ip);
kmem_free(dfc); kmem_free(dfc);
} }
/* /*
* Capture any deferred ops and commit the transaction. This is the last step * Capture any deferred ops and commit the transaction. This is the last step
* needed to finish a log intent item that we recovered from the log. * needed to finish a log intent item that we recovered from the log. If any
* of the deferred ops operate on an inode, the caller must pass in that inode
* so that the reference can be transferred to the capture structure. The
* caller must hold ILOCK_EXCL on the inode, and must unlock it before calling
* xfs_defer_ops_continue.
*/ */
int int
xfs_defer_ops_capture_and_commit( xfs_defer_ops_capture_and_commit(
struct xfs_trans *tp, struct xfs_trans *tp,
struct xfs_inode *capture_ip,
struct list_head *capture_list) struct list_head *capture_list)
{ {
struct xfs_mount *mp = tp->t_mountp; struct xfs_mount *mp = tp->t_mountp;
struct xfs_defer_capture *dfc; struct xfs_defer_capture *dfc;
int error; int error;
ASSERT(!capture_ip || xfs_isilocked(capture_ip, XFS_ILOCK_EXCL));
/* If we don't capture anything, commit transaction and exit. */ /* If we don't capture anything, commit transaction and exit. */
dfc = xfs_defer_ops_capture(tp); dfc = xfs_defer_ops_capture(tp, capture_ip);
if (!dfc) if (!dfc)
return xfs_trans_commit(tp); return xfs_trans_commit(tp);
...@@ -626,16 +649,26 @@ xfs_defer_ops_capture_and_commit( ...@@ -626,16 +649,26 @@ xfs_defer_ops_capture_and_commit(
/* /*
* Attach a chain of captured deferred ops to a new transaction and free the * Attach a chain of captured deferred ops to a new transaction and free the
* capture structure. * capture structure. If an inode was captured, it will be passed back to the
* caller with ILOCK_EXCL held and joined to the transaction with lockflags==0.
* The caller now owns the inode reference.
*/ */
void void
xfs_defer_ops_continue( xfs_defer_ops_continue(
struct xfs_defer_capture *dfc, struct xfs_defer_capture *dfc,
struct xfs_trans *tp) struct xfs_trans *tp,
struct xfs_inode **captured_ipp)
{ {
ASSERT(tp->t_flags & XFS_TRANS_PERM_LOG_RES); ASSERT(tp->t_flags & XFS_TRANS_PERM_LOG_RES);
ASSERT(!(tp->t_flags & XFS_TRANS_DIRTY)); ASSERT(!(tp->t_flags & XFS_TRANS_DIRTY));
/* Lock and join the captured inode to the new transaction. */
if (dfc->dfc_capture_ip) {
xfs_ilock(dfc->dfc_capture_ip, XFS_ILOCK_EXCL);
xfs_trans_ijoin(tp, dfc->dfc_capture_ip, 0);
}
*captured_ipp = dfc->dfc_capture_ip;
/* Move captured dfops chain and state to the transaction. */ /* Move captured dfops chain and state to the transaction. */
list_splice_init(&dfc->dfc_dfops, &tp->t_dfops); list_splice_init(&dfc->dfc_dfops, &tp->t_dfops);
tp->t_flags |= dfc->dfc_tpflags; tp->t_flags |= dfc->dfc_tpflags;
......
...@@ -82,6 +82,12 @@ struct xfs_defer_capture { ...@@ -82,6 +82,12 @@ struct xfs_defer_capture {
/* Log reservation saved from the transaction. */ /* Log reservation saved from the transaction. */
unsigned int dfc_logres; unsigned int dfc_logres;
/*
* An inode reference that must be maintained to complete the deferred
* work.
*/
struct xfs_inode *dfc_capture_ip;
}; };
/* /*
...@@ -89,8 +95,9 @@ struct xfs_defer_capture { ...@@ -89,8 +95,9 @@ struct xfs_defer_capture {
* This doesn't normally happen except log recovery. * This doesn't normally happen except log recovery.
*/ */
int xfs_defer_ops_capture_and_commit(struct xfs_trans *tp, int xfs_defer_ops_capture_and_commit(struct xfs_trans *tp,
struct list_head *capture_list); struct xfs_inode *capture_ip, struct list_head *capture_list);
void xfs_defer_ops_continue(struct xfs_defer_capture *d, struct xfs_trans *tp); void xfs_defer_ops_continue(struct xfs_defer_capture *d, struct xfs_trans *tp,
struct xfs_inode **captured_ipp);
void xfs_defer_ops_release(struct xfs_mount *mp, struct xfs_defer_capture *d); void xfs_defer_ops_release(struct xfs_mount *mp, struct xfs_defer_capture *d);
#endif /* __XFS_DEFER_H__ */ #endif /* __XFS_DEFER_H__ */
...@@ -513,8 +513,11 @@ xfs_bui_item_recover( ...@@ -513,8 +513,11 @@ xfs_bui_item_recover(
xfs_bmap_unmap_extent(tp, ip, &irec); xfs_bmap_unmap_extent(tp, ip, &irec);
} }
/* Commit transaction, which frees the transaction. */ /*
error = xfs_defer_ops_capture_and_commit(tp, capture_list); * Commit transaction, which frees the transaction and saves the inode
* for later replay activities.
*/
error = xfs_defer_ops_capture_and_commit(tp, ip, capture_list);
if (error) if (error)
goto err_unlock; goto err_unlock;
......
...@@ -627,7 +627,7 @@ xfs_efi_item_recover( ...@@ -627,7 +627,7 @@ xfs_efi_item_recover(
} }
return xfs_defer_ops_capture_and_commit(tp, capture_list); return xfs_defer_ops_capture_and_commit(tp, NULL, capture_list);
abort_error: abort_error:
xfs_trans_cancel(tp); xfs_trans_cancel(tp);
......
...@@ -2439,6 +2439,7 @@ xlog_finish_defer_ops( ...@@ -2439,6 +2439,7 @@ xlog_finish_defer_ops(
{ {
struct xfs_defer_capture *dfc, *next; struct xfs_defer_capture *dfc, *next;
struct xfs_trans *tp; struct xfs_trans *tp;
struct xfs_inode *ip;
int error = 0; int error = 0;
list_for_each_entry_safe(dfc, next, capture_list, dfc_list) { list_for_each_entry_safe(dfc, next, capture_list, dfc_list) {
...@@ -2464,9 +2465,13 @@ xlog_finish_defer_ops( ...@@ -2464,9 +2465,13 @@ xlog_finish_defer_ops(
* from recovering a single intent item. * from recovering a single intent item.
*/ */
list_del_init(&dfc->dfc_list); list_del_init(&dfc->dfc_list);
xfs_defer_ops_continue(dfc, tp); xfs_defer_ops_continue(dfc, tp, &ip);
error = xfs_trans_commit(tp); error = xfs_trans_commit(tp);
if (ip) {
xfs_iunlock(ip, XFS_ILOCK_EXCL);
xfs_irele(ip);
}
if (error) if (error)
return error; return error;
} }
......
...@@ -544,7 +544,7 @@ xfs_cui_item_recover( ...@@ -544,7 +544,7 @@ xfs_cui_item_recover(
} }
xfs_refcount_finish_one_cleanup(tp, rcur, error); xfs_refcount_finish_one_cleanup(tp, rcur, error);
return xfs_defer_ops_capture_and_commit(tp, capture_list); return xfs_defer_ops_capture_and_commit(tp, NULL, capture_list);
abort_error: abort_error:
xfs_refcount_finish_one_cleanup(tp, rcur, error); xfs_refcount_finish_one_cleanup(tp, rcur, error);
......
...@@ -567,7 +567,7 @@ xfs_rui_item_recover( ...@@ -567,7 +567,7 @@ xfs_rui_item_recover(
} }
xfs_rmap_finish_one_cleanup(tp, rcur, error); xfs_rmap_finish_one_cleanup(tp, rcur, error);
return xfs_defer_ops_capture_and_commit(tp, capture_list); return xfs_defer_ops_capture_and_commit(tp, NULL, capture_list);
abort_error: abort_error:
xfs_rmap_finish_one_cleanup(tp, rcur, error); xfs_rmap_finish_one_cleanup(tp, rcur, error);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment