Commit 0030807c authored by Christoph Hellwig's avatar Christoph Hellwig Committed by Alex Elder

xfs: revert to using a kthread for AIL pushing

Currently we have a few issues with the way the workqueue code is used to
implement AIL pushing:

 - it accidentally uses the same workqueue as the syncer action, and thus
   can be prevented from running if there are enough sync actions active
   in the system.
 - it doesn't use the HIGHPRI flag to queue at the head of the queue of
   work items

At this point I'm not confident enough in getting all the workqueue flags and
tweaks right to provide a perfectly reliable execution context for AIL
pushing, which is the most important piece in XFS to make forward progress
when the log fills.

Revert back to use a kthread per filesystem which fixes all the above issues
at the cost of having a task struct and stack around for each mounted
filesystem.  In addition this also gives us much better ways to diagnose
any issues involving hung AIL pushing and removes a small amount of code.
Signed-off-by: default avatarChristoph Hellwig <hch@lst.de>
Reported-by: default avatarStefan Priebe <s.priebe@profihost.ag>
Tested-by: default avatarStefan Priebe <s.priebe@profihost.ag>
Reviewed-by: default avatarDave Chinner <dchinner@redhat.com>
Signed-off-by: default avatarAlex Elder <aelder@sgi.com>
parent 17b38471
...@@ -68,6 +68,8 @@ ...@@ -68,6 +68,8 @@
#include <linux/ctype.h> #include <linux/ctype.h>
#include <linux/writeback.h> #include <linux/writeback.h>
#include <linux/capability.h> #include <linux/capability.h>
#include <linux/kthread.h>
#include <linux/freezer.h>
#include <linux/list_sort.h> #include <linux/list_sort.h>
#include <asm/page.h> #include <asm/page.h>
......
...@@ -1652,24 +1652,13 @@ xfs_init_workqueues(void) ...@@ -1652,24 +1652,13 @@ xfs_init_workqueues(void)
*/ */
xfs_syncd_wq = alloc_workqueue("xfssyncd", WQ_CPU_INTENSIVE, 8); xfs_syncd_wq = alloc_workqueue("xfssyncd", WQ_CPU_INTENSIVE, 8);
if (!xfs_syncd_wq) if (!xfs_syncd_wq)
goto out; return -ENOMEM;
xfs_ail_wq = alloc_workqueue("xfsail", WQ_CPU_INTENSIVE, 8);
if (!xfs_ail_wq)
goto out_destroy_syncd;
return 0; return 0;
out_destroy_syncd:
destroy_workqueue(xfs_syncd_wq);
out:
return -ENOMEM;
} }
STATIC void STATIC void
xfs_destroy_workqueues(void) xfs_destroy_workqueues(void)
{ {
destroy_workqueue(xfs_ail_wq);
destroy_workqueue(xfs_syncd_wq); destroy_workqueue(xfs_syncd_wq);
} }
......
...@@ -28,8 +28,6 @@ ...@@ -28,8 +28,6 @@
#include "xfs_trans_priv.h" #include "xfs_trans_priv.h"
#include "xfs_error.h" #include "xfs_error.h"
struct workqueue_struct *xfs_ail_wq; /* AIL workqueue */
#ifdef DEBUG #ifdef DEBUG
/* /*
* Check that the list is sorted as it should be. * Check that the list is sorted as it should be.
...@@ -356,16 +354,10 @@ xfs_ail_delete( ...@@ -356,16 +354,10 @@ xfs_ail_delete(
xfs_trans_ail_cursor_clear(ailp, lip); xfs_trans_ail_cursor_clear(ailp, lip);
} }
/* static long
* xfs_ail_worker does the work of pushing on the AIL. It will requeue itself xfsaild_push(
* to run at a later time if there is more work to do to complete the push. struct xfs_ail *ailp)
*/
STATIC void
xfs_ail_worker(
struct work_struct *work)
{ {
struct xfs_ail *ailp = container_of(to_delayed_work(work),
struct xfs_ail, xa_work);
xfs_mount_t *mp = ailp->xa_mount; xfs_mount_t *mp = ailp->xa_mount;
struct xfs_ail_cursor cur; struct xfs_ail_cursor cur;
xfs_log_item_t *lip; xfs_log_item_t *lip;
...@@ -505,20 +497,6 @@ xfs_ail_worker( ...@@ -505,20 +497,6 @@ xfs_ail_worker(
/* We're past our target or empty, so idle */ /* We're past our target or empty, so idle */
ailp->xa_last_pushed_lsn = 0; ailp->xa_last_pushed_lsn = 0;
/*
* We clear the XFS_AIL_PUSHING_BIT first before checking
* whether the target has changed. If the target has changed,
* this pushes the requeue race directly onto the result of the
* atomic test/set bit, so we are guaranteed that either the
* the pusher that changed the target or ourselves will requeue
* the work (but not both).
*/
clear_bit(XFS_AIL_PUSHING_BIT, &ailp->xa_flags);
smp_rmb();
if (XFS_LSN_CMP(ailp->xa_target, target) == 0 ||
test_and_set_bit(XFS_AIL_PUSHING_BIT, &ailp->xa_flags))
return;
tout = 50; tout = 50;
} else if (XFS_LSN_CMP(lsn, target) >= 0) { } else if (XFS_LSN_CMP(lsn, target) >= 0) {
/* /*
...@@ -541,9 +519,30 @@ xfs_ail_worker( ...@@ -541,9 +519,30 @@ xfs_ail_worker(
tout = 20; tout = 20;
} }
/* There is more to do, requeue us. */ return tout;
queue_delayed_work(xfs_syncd_wq, &ailp->xa_work, }
msecs_to_jiffies(tout));
static int
xfsaild(
void *data)
{
struct xfs_ail *ailp = data;
long tout = 0; /* milliseconds */
while (!kthread_should_stop()) {
if (tout && tout <= 20)
__set_current_state(TASK_KILLABLE);
else
__set_current_state(TASK_INTERRUPTIBLE);
schedule_timeout(tout ?
msecs_to_jiffies(tout) : MAX_SCHEDULE_TIMEOUT);
try_to_freeze();
tout = xfsaild_push(ailp);
}
return 0;
} }
/* /*
...@@ -578,8 +577,9 @@ xfs_ail_push( ...@@ -578,8 +577,9 @@ xfs_ail_push(
*/ */
smp_wmb(); smp_wmb();
xfs_trans_ail_copy_lsn(ailp, &ailp->xa_target, &threshold_lsn); xfs_trans_ail_copy_lsn(ailp, &ailp->xa_target, &threshold_lsn);
if (!test_and_set_bit(XFS_AIL_PUSHING_BIT, &ailp->xa_flags)) smp_wmb();
queue_delayed_work(xfs_syncd_wq, &ailp->xa_work, 0);
wake_up_process(ailp->xa_task);
} }
/* /*
...@@ -817,9 +817,18 @@ xfs_trans_ail_init( ...@@ -817,9 +817,18 @@ xfs_trans_ail_init(
INIT_LIST_HEAD(&ailp->xa_ail); INIT_LIST_HEAD(&ailp->xa_ail);
INIT_LIST_HEAD(&ailp->xa_cursors); INIT_LIST_HEAD(&ailp->xa_cursors);
spin_lock_init(&ailp->xa_lock); spin_lock_init(&ailp->xa_lock);
INIT_DELAYED_WORK(&ailp->xa_work, xfs_ail_worker);
ailp->xa_task = kthread_run(xfsaild, ailp, "xfsaild/%s",
ailp->xa_mount->m_fsname);
if (IS_ERR(ailp->xa_task))
goto out_free_ailp;
mp->m_ail = ailp; mp->m_ail = ailp;
return 0; return 0;
out_free_ailp:
kmem_free(ailp);
return ENOMEM;
} }
void void
...@@ -828,6 +837,6 @@ xfs_trans_ail_destroy( ...@@ -828,6 +837,6 @@ xfs_trans_ail_destroy(
{ {
struct xfs_ail *ailp = mp->m_ail; struct xfs_ail *ailp = mp->m_ail;
cancel_delayed_work_sync(&ailp->xa_work); kthread_stop(ailp->xa_task);
kmem_free(ailp); kmem_free(ailp);
} }
...@@ -64,23 +64,17 @@ struct xfs_ail_cursor { ...@@ -64,23 +64,17 @@ struct xfs_ail_cursor {
*/ */
struct xfs_ail { struct xfs_ail {
struct xfs_mount *xa_mount; struct xfs_mount *xa_mount;
struct task_struct *xa_task;
struct list_head xa_ail; struct list_head xa_ail;
xfs_lsn_t xa_target; xfs_lsn_t xa_target;
struct list_head xa_cursors; struct list_head xa_cursors;
spinlock_t xa_lock; spinlock_t xa_lock;
struct delayed_work xa_work;
xfs_lsn_t xa_last_pushed_lsn; xfs_lsn_t xa_last_pushed_lsn;
unsigned long xa_flags;
}; };
#define XFS_AIL_PUSHING_BIT 0
/* /*
* From xfs_trans_ail.c * From xfs_trans_ail.c
*/ */
extern struct workqueue_struct *xfs_ail_wq; /* AIL workqueue */
void xfs_trans_ail_update_bulk(struct xfs_ail *ailp, void xfs_trans_ail_update_bulk(struct xfs_ail *ailp,
struct xfs_ail_cursor *cur, struct xfs_ail_cursor *cur,
struct xfs_log_item **log_items, int nr_items, struct xfs_log_item **log_items, int nr_items,
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment