Commit 5b9cce4c authored by Tejun Heo's avatar Tejun Heo Committed by Jens Axboe

writeback: Generalize and expose wb_completion

wb_completion is used to track writeback completions.  We want to use
it from memcg side for foreign inode flushes.  This patch updates it
to remember the target waitq instead of assuming bdi->wb_waitq and
expose it outside of fs-writeback.c.
Reviewed-by: default avatarJan Kara <jack@suse.cz>
Signed-off-by: default avatarTejun Heo <tj@kernel.org>
Signed-off-by: default avatarJens Axboe <axboe@kernel.dk>
parent 38b4e09f
...@@ -36,10 +36,6 @@ ...@@ -36,10 +36,6 @@
*/ */
#define MIN_WRITEBACK_PAGES (4096UL >> (PAGE_SHIFT - 10)) #define MIN_WRITEBACK_PAGES (4096UL >> (PAGE_SHIFT - 10))
struct wb_completion {
atomic_t cnt;
};
/* /*
* Passed into wb_writeback(), essentially a subset of writeback_control * Passed into wb_writeback(), essentially a subset of writeback_control
*/ */
...@@ -60,19 +56,6 @@ struct wb_writeback_work { ...@@ -60,19 +56,6 @@ struct wb_writeback_work {
struct wb_completion *done; /* set if the caller waits */ struct wb_completion *done; /* set if the caller waits */
}; };
/*
* If one wants to wait for one or more wb_writeback_works, each work's
* ->done should be set to a wb_completion defined using the following
* macro. Once all work items are issued with wb_queue_work(), the caller
* can wait for the completion of all using wb_wait_for_completion(). Work
* items which are waited upon aren't freed automatically on completion.
*/
#define DEFINE_WB_COMPLETION_ONSTACK(cmpl) \
struct wb_completion cmpl = { \
.cnt = ATOMIC_INIT(1), \
}
/* /*
* If an inode is constantly having its pages dirtied, but then the * If an inode is constantly having its pages dirtied, but then the
* updates stop dirtytime_expire_interval seconds in the past, it's * updates stop dirtytime_expire_interval seconds in the past, it's
...@@ -182,7 +165,7 @@ static void finish_writeback_work(struct bdi_writeback *wb, ...@@ -182,7 +165,7 @@ static void finish_writeback_work(struct bdi_writeback *wb,
if (work->auto_free) if (work->auto_free)
kfree(work); kfree(work);
if (done && atomic_dec_and_test(&done->cnt)) if (done && atomic_dec_and_test(&done->cnt))
wake_up_all(&wb->bdi->wb_waitq); wake_up_all(done->waitq);
} }
static void wb_queue_work(struct bdi_writeback *wb, static void wb_queue_work(struct bdi_writeback *wb,
...@@ -206,20 +189,18 @@ static void wb_queue_work(struct bdi_writeback *wb, ...@@ -206,20 +189,18 @@ static void wb_queue_work(struct bdi_writeback *wb,
/** /**
* wb_wait_for_completion - wait for completion of bdi_writeback_works * wb_wait_for_completion - wait for completion of bdi_writeback_works
* @bdi: bdi work items were issued to
* @done: target wb_completion * @done: target wb_completion
* *
* Wait for one or more work items issued to @bdi with their ->done field * Wait for one or more work items issued to @bdi with their ->done field
* set to @done, which should have been defined with * set to @done, which should have been initialized with
* DEFINE_WB_COMPLETION_ONSTACK(). This function returns after all such * DEFINE_WB_COMPLETION(). This function returns after all such work items
* work items are completed. Work items which are waited upon aren't freed * are completed. Work items which are waited upon aren't freed
* automatically on completion. * automatically on completion.
*/ */
static void wb_wait_for_completion(struct backing_dev_info *bdi, void wb_wait_for_completion(struct wb_completion *done)
struct wb_completion *done)
{ {
atomic_dec(&done->cnt); /* put down the initial count */ atomic_dec(&done->cnt); /* put down the initial count */
wait_event(bdi->wb_waitq, !atomic_read(&done->cnt)); wait_event(*done->waitq, !atomic_read(&done->cnt));
} }
#ifdef CONFIG_CGROUP_WRITEBACK #ifdef CONFIG_CGROUP_WRITEBACK
...@@ -854,7 +835,7 @@ static void bdi_split_work_to_wbs(struct backing_dev_info *bdi, ...@@ -854,7 +835,7 @@ static void bdi_split_work_to_wbs(struct backing_dev_info *bdi,
restart: restart:
rcu_read_lock(); rcu_read_lock();
list_for_each_entry_continue_rcu(wb, &bdi->wb_list, bdi_node) { list_for_each_entry_continue_rcu(wb, &bdi->wb_list, bdi_node) {
DEFINE_WB_COMPLETION_ONSTACK(fallback_work_done); DEFINE_WB_COMPLETION(fallback_work_done, bdi);
struct wb_writeback_work fallback_work; struct wb_writeback_work fallback_work;
struct wb_writeback_work *work; struct wb_writeback_work *work;
long nr_pages; long nr_pages;
...@@ -901,7 +882,7 @@ static void bdi_split_work_to_wbs(struct backing_dev_info *bdi, ...@@ -901,7 +882,7 @@ static void bdi_split_work_to_wbs(struct backing_dev_info *bdi,
last_wb = wb; last_wb = wb;
rcu_read_unlock(); rcu_read_unlock();
wb_wait_for_completion(bdi, &fallback_work_done); wb_wait_for_completion(&fallback_work_done);
goto restart; goto restart;
} }
rcu_read_unlock(); rcu_read_unlock();
...@@ -2373,7 +2354,8 @@ static void wait_sb_inodes(struct super_block *sb) ...@@ -2373,7 +2354,8 @@ static void wait_sb_inodes(struct super_block *sb)
static void __writeback_inodes_sb_nr(struct super_block *sb, unsigned long nr, static void __writeback_inodes_sb_nr(struct super_block *sb, unsigned long nr,
enum wb_reason reason, bool skip_if_busy) enum wb_reason reason, bool skip_if_busy)
{ {
DEFINE_WB_COMPLETION_ONSTACK(done); struct backing_dev_info *bdi = sb->s_bdi;
DEFINE_WB_COMPLETION(done, bdi);
struct wb_writeback_work work = { struct wb_writeback_work work = {
.sb = sb, .sb = sb,
.sync_mode = WB_SYNC_NONE, .sync_mode = WB_SYNC_NONE,
...@@ -2382,14 +2364,13 @@ static void __writeback_inodes_sb_nr(struct super_block *sb, unsigned long nr, ...@@ -2382,14 +2364,13 @@ static void __writeback_inodes_sb_nr(struct super_block *sb, unsigned long nr,
.nr_pages = nr, .nr_pages = nr,
.reason = reason, .reason = reason,
}; };
struct backing_dev_info *bdi = sb->s_bdi;
if (!bdi_has_dirty_io(bdi) || bdi == &noop_backing_dev_info) if (!bdi_has_dirty_io(bdi) || bdi == &noop_backing_dev_info)
return; return;
WARN_ON(!rwsem_is_locked(&sb->s_umount)); WARN_ON(!rwsem_is_locked(&sb->s_umount));
bdi_split_work_to_wbs(sb->s_bdi, &work, skip_if_busy); bdi_split_work_to_wbs(sb->s_bdi, &work, skip_if_busy);
wb_wait_for_completion(bdi, &done); wb_wait_for_completion(&done);
} }
/** /**
...@@ -2451,7 +2432,8 @@ EXPORT_SYMBOL(try_to_writeback_inodes_sb); ...@@ -2451,7 +2432,8 @@ EXPORT_SYMBOL(try_to_writeback_inodes_sb);
*/ */
void sync_inodes_sb(struct super_block *sb) void sync_inodes_sb(struct super_block *sb)
{ {
DEFINE_WB_COMPLETION_ONSTACK(done); struct backing_dev_info *bdi = sb->s_bdi;
DEFINE_WB_COMPLETION(done, bdi);
struct wb_writeback_work work = { struct wb_writeback_work work = {
.sb = sb, .sb = sb,
.sync_mode = WB_SYNC_ALL, .sync_mode = WB_SYNC_ALL,
...@@ -2461,7 +2443,6 @@ void sync_inodes_sb(struct super_block *sb) ...@@ -2461,7 +2443,6 @@ void sync_inodes_sb(struct super_block *sb)
.reason = WB_REASON_SYNC, .reason = WB_REASON_SYNC,
.for_sync = 1, .for_sync = 1,
}; };
struct backing_dev_info *bdi = sb->s_bdi;
/* /*
* Can't skip on !bdi_has_dirty() because we should wait for !dirty * Can't skip on !bdi_has_dirty() because we should wait for !dirty
...@@ -2475,7 +2456,7 @@ void sync_inodes_sb(struct super_block *sb) ...@@ -2475,7 +2456,7 @@ void sync_inodes_sb(struct super_block *sb)
/* protect against inode wb switch, see inode_switch_wbs_work_fn() */ /* protect against inode wb switch, see inode_switch_wbs_work_fn() */
bdi_down_write_wb_switch_rwsem(bdi); bdi_down_write_wb_switch_rwsem(bdi);
bdi_split_work_to_wbs(bdi, &work, false); bdi_split_work_to_wbs(bdi, &work, false);
wb_wait_for_completion(bdi, &done); wb_wait_for_completion(&done);
bdi_up_write_wb_switch_rwsem(bdi); bdi_up_write_wb_switch_rwsem(bdi);
wait_sb_inodes(sb); wait_sb_inodes(sb);
......
...@@ -67,6 +67,26 @@ enum wb_reason { ...@@ -67,6 +67,26 @@ enum wb_reason {
WB_REASON_MAX, WB_REASON_MAX,
}; };
struct wb_completion {
atomic_t cnt;
wait_queue_head_t *waitq;
};
#define __WB_COMPLETION_INIT(_waitq) \
(struct wb_completion){ .cnt = ATOMIC_INIT(1), .waitq = (_waitq) }
/*
* If one wants to wait for one or more wb_writeback_works, each work's
* ->done should be set to a wb_completion defined using the following
* macro. Once all work items are issued with wb_queue_work(), the caller
* can wait for the completion of all using wb_wait_for_completion(). Work
* items which are waited upon aren't freed automatically on completion.
*/
#define WB_COMPLETION_INIT(bdi) __WB_COMPLETION_INIT(&(bdi)->wb_waitq)
#define DEFINE_WB_COMPLETION(cmpl, bdi) \
struct wb_completion cmpl = WB_COMPLETION_INIT(bdi)
/* /*
* For cgroup writeback, multiple wb's may map to the same blkcg. Those * For cgroup writeback, multiple wb's may map to the same blkcg. Those
* wb's can operate mostly independently but should share the congested * wb's can operate mostly independently but should share the congested
......
...@@ -44,6 +44,8 @@ void wb_start_background_writeback(struct bdi_writeback *wb); ...@@ -44,6 +44,8 @@ void wb_start_background_writeback(struct bdi_writeback *wb);
void wb_workfn(struct work_struct *work); void wb_workfn(struct work_struct *work);
void wb_wakeup_delayed(struct bdi_writeback *wb); void wb_wakeup_delayed(struct bdi_writeback *wb);
void wb_wait_for_completion(struct wb_completion *done);
extern spinlock_t bdi_lock; extern spinlock_t bdi_lock;
extern struct list_head bdi_list; extern struct list_head bdi_list;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment