Commit 0cfe4bdf authored by Christian Brauner's avatar Christian Brauner

Merge series 'Fixes and cleanups to fs-writeback' of...

Merge series 'Fixes and cleanups to fs-writeback' of https://lore.kernel.org/r/20240228091958.288260-1-shikemeng@huaweicloud.com

Pull writeback fixes and cleanups from Kemeng Shi:

This contains several fixes and cleanups for the writeback code.

* series 'Fixes and cleanups to fs-writeback' of https://lore.kernel.org/r/20240228091958.288260-1-shikemeng@huaweicloud.com: (6 commits)
  fs/writeback: remove unnecessary return in writeback_inodes_sb
  fs/writeback: correct comment of __wakeup_flusher_threads_bdi
  fs/writeback: only calculate dirtied_before when b_io is empty
  fs/writeback: remove unused parameter wb of finish_writeback_work
  fs/writeback: bail out if there is no more inodes for IO and queued once
  fs/writeback: avoid to writeback non-expired inode in kupdate writeback
Signed-off-by: default avatarChristian Brauner <brauner@kernel.org>
parents fc253215 6a1ee871
......@@ -166,8 +166,7 @@ static void wb_wakeup_delayed(struct bdi_writeback *wb)
spin_unlock_irq(&wb->work_lock);
}
static void finish_writeback_work(struct bdi_writeback *wb,
struct wb_writeback_work *work)
static void finish_writeback_work(struct wb_writeback_work *work)
{
struct wb_completion *done = work->done;
......@@ -196,7 +195,7 @@ static void wb_queue_work(struct bdi_writeback *wb,
list_add_tail(&work->list, &wb->work_list);
mod_delayed_work(bdi_wq, &wb->dwork, 0);
} else
finish_writeback_work(wb, work);
finish_writeback_work(work);
spin_unlock_irq(&wb->work_lock);
}
......@@ -1561,7 +1560,8 @@ static void inode_sleep_on_writeback(struct inode *inode)
* thread's back can have unexpected consequences.
*/
static void requeue_inode(struct inode *inode, struct bdi_writeback *wb,
struct writeback_control *wbc)
struct writeback_control *wbc,
unsigned long dirtied_before)
{
if (inode->i_state & I_FREEING)
return;
......@@ -1594,7 +1594,8 @@ static void requeue_inode(struct inode *inode, struct bdi_writeback *wb,
* We didn't write back all the pages. nfs_writepages()
* sometimes bales out without doing anything.
*/
if (wbc->nr_to_write <= 0) {
if (wbc->nr_to_write <= 0 &&
!inode_dirtied_after(inode, dirtied_before)) {
/* Slice used up. Queue for next turn. */
requeue_io(inode, wb);
} else {
......@@ -1862,6 +1863,11 @@ static long writeback_sb_inodes(struct super_block *sb,
unsigned long start_time = jiffies;
long write_chunk;
long total_wrote = 0; /* count both pages and inodes */
unsigned long dirtied_before = jiffies;
if (work->for_kupdate)
dirtied_before = jiffies -
msecs_to_jiffies(dirty_expire_interval * 10);
while (!list_empty(&wb->b_io)) {
struct inode *inode = wb_inode(wb->b_io.prev);
......@@ -1967,7 +1973,7 @@ static long writeback_sb_inodes(struct super_block *sb,
spin_lock(&inode->i_lock);
if (!(inode->i_state & I_DIRTY_ALL))
total_wrote++;
requeue_inode(inode, tmp_wb, &wbc);
requeue_inode(inode, tmp_wb, &wbc, dirtied_before);
inode_sync_complete(inode);
spin_unlock(&inode->i_lock);
......@@ -2069,6 +2075,7 @@ static long wb_writeback(struct bdi_writeback *wb,
struct inode *inode;
long progress;
struct blk_plug plug;
bool queued = false;
blk_start_plug(&plug);
for (;;) {
......@@ -2098,21 +2105,24 @@ static long wb_writeback(struct bdi_writeback *wb,
spin_lock(&wb->list_lock);
/*
* Kupdate and background works are special and we want to
* include all inodes that need writing. Livelock avoidance is
* handled by these works yielding to any other work so we are
* safe.
*/
if (work->for_kupdate) {
dirtied_before = jiffies -
msecs_to_jiffies(dirty_expire_interval * 10);
} else if (work->for_background)
dirtied_before = jiffies;
trace_writeback_start(wb, work);
if (list_empty(&wb->b_io))
if (list_empty(&wb->b_io)) {
/*
* Kupdate and background works are special and we want
* to include all inodes that need writing. Livelock
* avoidance is handled by these works yielding to any
* other work so we are safe.
*/
if (work->for_kupdate) {
dirtied_before = jiffies -
msecs_to_jiffies(dirty_expire_interval *
10);
} else if (work->for_background)
dirtied_before = jiffies;
queue_io(wb, work, dirtied_before);
queued = true;
}
if (work->sb)
progress = writeback_sb_inodes(work->sb, wb, work);
else
......@@ -2127,7 +2137,7 @@ static long wb_writeback(struct bdi_writeback *wb,
* mean the overall work is done. So we keep looping as long
* as made some progress on cleaning pages or inodes.
*/
if (progress) {
if (progress || !queued) {
spin_unlock(&wb->list_lock);
continue;
}
......@@ -2262,7 +2272,7 @@ static long wb_do_writeback(struct bdi_writeback *wb)
while ((work = get_next_work_item(wb)) != NULL) {
trace_writeback_exec(wb, work);
wrote += wb_writeback(wb, work);
finish_writeback_work(wb, work);
finish_writeback_work(work);
}
/*
......@@ -2322,8 +2332,7 @@ void wb_workfn(struct work_struct *work)
}
/*
* Start writeback of `nr_pages' pages on this bdi. If `nr_pages' is zero,
* write back the whole world.
* Start writeback of all dirty pages on this bdi.
*/
static void __wakeup_flusher_threads_bdi(struct backing_dev_info *bdi,
enum wb_reason reason)
......@@ -2726,7 +2735,7 @@ EXPORT_SYMBOL(writeback_inodes_sb_nr);
*/
void writeback_inodes_sb(struct super_block *sb, enum wb_reason reason)
{
return writeback_inodes_sb_nr(sb, get_nr_dirty_pages(), reason);
writeback_inodes_sb_nr(sb, get_nr_dirty_pages(), reason);
}
EXPORT_SYMBOL(writeback_inodes_sb);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment