Commit cb357bf3 authored by Darrick J. Wong's avatar Darrick J. Wong

xfs: implement per-inode writeback completion queues

When scheduling writeback of dirty file data in the page cache, XFS uses
IO completion workqueue items to ensure that filesystem metadata only
updates after the write completes successfully.  This is essential for
converting unwritten extents to real extents at the right time and
performing COW remappings.

Unfortunately, XFS queues each IO completion work item to an unbounded
workqueue, which means that the kernel can spawn dozens of threads to
try to handle the items quickly.  These threads need to take the ILOCK
to update file metadata, which results in heavy ILOCK contention if a
large number of the work items target a single file, which is
inefficient.

Worse yet, the writeback completion threads get stuck waiting for the
ILOCK while holding transaction reservations, which can use up all
available log reservation space.  When that happens, metadata updates to
other parts of the filesystem grind to a halt, even if the filesystem
could otherwise have handled it.

Even worse, if one of the things grinding to a halt happens to be a
thread in the middle of a defer-ops finish holding the same ILOCK and
trying to obtain more log reservation having exhausted the permanent
reservation, we now have an ABBA deadlock - writeback completion has a
transaction reserved and wants the ILOCK, and someone else has the ILOCK
and wants a transaction reservation.

Therefore, we create a per-inode writeback io completion queue + work
item.  When writeback finishes, it can add the ioend to the per-inode
queue and let the single worker item process that queue.  This
dramatically cuts down on the number of kworkers and ILOCK contention in
the system, and seems to have eliminated an occasional deadlock I was
seeing while running generic/476.

Testing with a program that simulates a heavy random-write workload to a
single file demonstrates that the number of kworkers drops from
approximately 120 threads per file to 1, without dramatically changing
write bandwidth or pagecache access latency.

Note that we leave the xfs-conv workqueue's max_active alone because we
still want to be able to run ioend processing for as many inodes as the
system can handle.
Signed-off-by: default avatarDarrick J. Wong <darrick.wong@oracle.com>
Reviewed-by: default avatarBrian Foster <bfoster@redhat.com>
parent 4fb7951f
...@@ -234,11 +234,9 @@ xfs_setfilesize_ioend( ...@@ -234,11 +234,9 @@ xfs_setfilesize_ioend(
* IO write completion. * IO write completion.
*/ */
STATIC void STATIC void
xfs_end_io( xfs_end_ioend(
struct work_struct *work) struct xfs_ioend *ioend)
{ {
struct xfs_ioend *ioend =
container_of(work, struct xfs_ioend, io_work);
struct xfs_inode *ip = XFS_I(ioend->io_inode); struct xfs_inode *ip = XFS_I(ioend->io_inode);
xfs_off_t offset = ioend->io_offset; xfs_off_t offset = ioend->io_offset;
size_t size = ioend->io_size; size_t size = ioend->io_size;
...@@ -278,19 +276,49 @@ xfs_end_io( ...@@ -278,19 +276,49 @@ xfs_end_io(
xfs_destroy_ioend(ioend, error); xfs_destroy_ioend(ioend, error);
} }
/* Finish all pending io completions. */
void
xfs_end_io(
struct work_struct *work)
{
struct xfs_inode *ip;
struct xfs_ioend *ioend;
struct list_head completion_list;
unsigned long flags;
ip = container_of(work, struct xfs_inode, i_ioend_work);
spin_lock_irqsave(&ip->i_ioend_lock, flags);
list_replace_init(&ip->i_ioend_list, &completion_list);
spin_unlock_irqrestore(&ip->i_ioend_lock, flags);
while (!list_empty(&completion_list)) {
ioend = list_first_entry(&completion_list, struct xfs_ioend,
io_list);
list_del_init(&ioend->io_list);
xfs_end_ioend(ioend);
}
}
STATIC void STATIC void
xfs_end_bio( xfs_end_bio(
struct bio *bio) struct bio *bio)
{ {
struct xfs_ioend *ioend = bio->bi_private; struct xfs_ioend *ioend = bio->bi_private;
struct xfs_mount *mp = XFS_I(ioend->io_inode)->i_mount; struct xfs_inode *ip = XFS_I(ioend->io_inode);
struct xfs_mount *mp = ip->i_mount;
unsigned long flags;
if (ioend->io_fork == XFS_COW_FORK || if (ioend->io_fork == XFS_COW_FORK ||
ioend->io_state == XFS_EXT_UNWRITTEN) ioend->io_state == XFS_EXT_UNWRITTEN ||
queue_work(mp->m_unwritten_workqueue, &ioend->io_work); ioend->io_append_trans != NULL) {
else if (ioend->io_append_trans) spin_lock_irqsave(&ip->i_ioend_lock, flags);
queue_work(mp->m_data_workqueue, &ioend->io_work); if (list_empty(&ip->i_ioend_list))
else WARN_ON_ONCE(!queue_work(mp->m_unwritten_workqueue,
&ip->i_ioend_work));
list_add_tail(&ioend->io_list, &ip->i_ioend_list);
spin_unlock_irqrestore(&ip->i_ioend_lock, flags);
} else
xfs_destroy_ioend(ioend, blk_status_to_errno(bio->bi_status)); xfs_destroy_ioend(ioend, blk_status_to_errno(bio->bi_status));
} }
...@@ -594,7 +622,6 @@ xfs_alloc_ioend( ...@@ -594,7 +622,6 @@ xfs_alloc_ioend(
ioend->io_inode = inode; ioend->io_inode = inode;
ioend->io_size = 0; ioend->io_size = 0;
ioend->io_offset = offset; ioend->io_offset = offset;
INIT_WORK(&ioend->io_work, xfs_end_io);
ioend->io_append_trans = NULL; ioend->io_append_trans = NULL;
ioend->io_bio = bio; ioend->io_bio = bio;
return ioend; return ioend;
......
...@@ -18,7 +18,6 @@ struct xfs_ioend { ...@@ -18,7 +18,6 @@ struct xfs_ioend {
struct inode *io_inode; /* file being written to */ struct inode *io_inode; /* file being written to */
size_t io_size; /* size of the extent */ size_t io_size; /* size of the extent */
xfs_off_t io_offset; /* offset in the file */ xfs_off_t io_offset; /* offset in the file */
struct work_struct io_work; /* xfsdatad work queue */
struct xfs_trans *io_append_trans;/* xact. for size update */ struct xfs_trans *io_append_trans;/* xact. for size update */
struct bio *io_bio; /* bio being built */ struct bio *io_bio; /* bio being built */
struct bio io_inline_bio; /* MUST BE LAST! */ struct bio io_inline_bio; /* MUST BE LAST! */
......
...@@ -72,6 +72,9 @@ xfs_inode_alloc( ...@@ -72,6 +72,9 @@ xfs_inode_alloc(
memset(&ip->i_d, 0, sizeof(ip->i_d)); memset(&ip->i_d, 0, sizeof(ip->i_d));
ip->i_sick = 0; ip->i_sick = 0;
ip->i_checked = 0; ip->i_checked = 0;
INIT_WORK(&ip->i_ioend_work, xfs_end_io);
INIT_LIST_HEAD(&ip->i_ioend_list);
spin_lock_init(&ip->i_ioend_lock);
return ip; return ip;
} }
......
...@@ -65,6 +65,11 @@ typedef struct xfs_inode { ...@@ -65,6 +65,11 @@ typedef struct xfs_inode {
/* VFS inode */ /* VFS inode */
struct inode i_vnode; /* embedded VFS inode */ struct inode i_vnode; /* embedded VFS inode */
/* pending io completions */
spinlock_t i_ioend_lock;
struct work_struct i_ioend_work;
struct list_head i_ioend_list;
} xfs_inode_t; } xfs_inode_t;
/* Convert from vfs inode to xfs inode */ /* Convert from vfs inode to xfs inode */
...@@ -511,4 +516,6 @@ bool xfs_inode_verify_forks(struct xfs_inode *ip); ...@@ -511,4 +516,6 @@ bool xfs_inode_verify_forks(struct xfs_inode *ip);
int xfs_iunlink_init(struct xfs_perag *pag); int xfs_iunlink_init(struct xfs_perag *pag);
void xfs_iunlink_destroy(struct xfs_perag *pag); void xfs_iunlink_destroy(struct xfs_perag *pag);
void xfs_end_io(struct work_struct *work);
#endif /* __XFS_INODE_H__ */ #endif /* __XFS_INODE_H__ */
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment