Commit fce667c5 authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'for-linus-v3.5-rc7' of git://oss.sgi.com/xfs/xfs

Pull xfs regression fixes from Ben Myers:
 - Really fix a cursor leak in xfs_alloc_ag_vextent_near
 - Fix a performance regression related to doing allocation in
   workqueues
 - Prevent recursion in xfs_buf_iorequest which is causing stack
   overflows
 - Don't call xfs_bdstrat_cb in xfs_buf_iodone callbacks

* tag 'for-linus-v3.5-rc7' of git://oss.sgi.com/xfs/xfs:
  xfs: do not call xfs_bdstrat_cb in xfs_buf_iodone_callbacks
  xfs: prevent recursion in xfs_buf_iorequest
  xfs: don't defer metadata allocation to the workqueue
  xfs: really fix the cursor leak in xfs_alloc_ag_vextent_near
parents 3e997130 1632dcc9
......@@ -1074,13 +1074,13 @@ xfs_alloc_ag_vextent_near(
* If we couldn't get anything, give up.
*/
if (bno_cur_lt == NULL && bno_cur_gt == NULL) {
xfs_btree_del_cursor(cnt_cur, XFS_BTREE_NOERROR);
if (!forced++) {
trace_xfs_alloc_near_busy(args);
xfs_log_force(args->mp, XFS_LOG_SYNC);
goto restart;
}
xfs_btree_del_cursor(cnt_cur, XFS_BTREE_NOERROR);
trace_xfs_alloc_size_neither(args);
args->agbno = NULLAGBLOCK;
return 0;
......@@ -2434,13 +2434,22 @@ xfs_alloc_vextent_worker(
current_restore_flags_nested(&pflags, PF_FSTRANS);
}
int /* error */
/*
* Data allocation requests often come in with little stack to work on. Push
* them off to a worker thread so there is lots of stack to use. Metadata
* requests, OTOH, are generally from low stack usage paths, so avoid the
* context switch overhead here.
*/
int
xfs_alloc_vextent(
xfs_alloc_arg_t *args) /* allocation argument structure */
struct xfs_alloc_arg *args)
{
DECLARE_COMPLETION_ONSTACK(done);
if (!args->userdata)
return __xfs_alloc_vextent(args);
args->done = &done;
INIT_WORK_ONSTACK(&args->work, xfs_alloc_vextent_worker);
queue_work(xfs_alloc_wq, &args->work);
......
......@@ -989,27 +989,6 @@ xfs_buf_ioerror_alert(
(__uint64_t)XFS_BUF_ADDR(bp), func, bp->b_error, bp->b_length);
}
int
xfs_bwrite(
struct xfs_buf *bp)
{
int error;
ASSERT(xfs_buf_islocked(bp));
bp->b_flags |= XBF_WRITE;
bp->b_flags &= ~(XBF_ASYNC | XBF_READ | _XBF_DELWRI_Q);
xfs_bdstrat_cb(bp);
error = xfs_buf_iowait(bp);
if (error) {
xfs_force_shutdown(bp->b_target->bt_mount,
SHUTDOWN_META_IO_ERROR);
}
return error;
}
/*
* Called when we want to stop a buffer from getting written or read.
* We attach the EIO error, muck with its flags, and call xfs_buf_ioend
......@@ -1079,14 +1058,7 @@ xfs_bioerror_relse(
return EIO;
}
/*
* All xfs metadata buffers except log state machine buffers
* get this attached as their b_bdstrat callback function.
* This is so that we can catch a buffer
* after prematurely unpinning it to forcibly shutdown the filesystem.
*/
int
STATIC int
xfs_bdstrat_cb(
struct xfs_buf *bp)
{
......@@ -1107,6 +1079,27 @@ xfs_bdstrat_cb(
return 0;
}
int
xfs_bwrite(
struct xfs_buf *bp)
{
int error;
ASSERT(xfs_buf_islocked(bp));
bp->b_flags |= XBF_WRITE;
bp->b_flags &= ~(XBF_ASYNC | XBF_READ | _XBF_DELWRI_Q);
xfs_bdstrat_cb(bp);
error = xfs_buf_iowait(bp);
if (error) {
xfs_force_shutdown(bp->b_target->bt_mount,
SHUTDOWN_META_IO_ERROR);
}
return error;
}
/*
* Wrapper around bdstrat so that we can stop data from going to disk in case
* we are shutting down the filesystem. Typically user data goes thru this
......@@ -1243,7 +1236,7 @@ xfs_buf_iorequest(
*/
atomic_set(&bp->b_io_remaining, 1);
_xfs_buf_ioapply(bp);
_xfs_buf_ioend(bp, 0);
_xfs_buf_ioend(bp, 1);
xfs_buf_rele(bp);
}
......
......@@ -180,7 +180,6 @@ extern void xfs_buf_unlock(xfs_buf_t *);
extern int xfs_bwrite(struct xfs_buf *bp);
extern void xfsbdstrat(struct xfs_mount *, struct xfs_buf *);
extern int xfs_bdstrat_cb(struct xfs_buf *);
extern void xfs_buf_ioend(xfs_buf_t *, int);
extern void xfs_buf_ioerror(xfs_buf_t *, int);
......
......@@ -954,7 +954,7 @@ xfs_buf_iodone_callbacks(
if (!XFS_BUF_ISSTALE(bp)) {
bp->b_flags |= XBF_WRITE | XBF_ASYNC | XBF_DONE;
xfs_bdstrat_cb(bp);
xfs_buf_iorequest(bp);
} else {
xfs_buf_relse(bp);
}
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment