Commit 093258e6 authored by David Sterba's avatar David Sterba

btrfs: replace waitqueue_actvie with cond_wake_up

Use the wrappers and reduce the amount of low-level details about the
waitqueue management.
Reviewed-by: default avatarNikolay Borisov <nborisov@suse.com>
Signed-off-by: default avatarDavid Sterba <dsterba@suse.com>
parent 3d3a2e61
...@@ -990,12 +990,7 @@ static void __free_workspace(int type, struct list_head *workspace, ...@@ -990,12 +990,7 @@ static void __free_workspace(int type, struct list_head *workspace,
btrfs_compress_op[idx]->free_workspace(workspace); btrfs_compress_op[idx]->free_workspace(workspace);
atomic_dec(total_ws); atomic_dec(total_ws);
wake: wake:
/* cond_wake_up(ws_wait);
* Make sure counter is updated before we wake up waiters.
*/
smp_mb();
if (waitqueue_active(ws_wait))
wake_up(ws_wait);
} }
static void free_workspace(int type, struct list_head *ws) static void free_workspace(int type, struct list_head *ws)
......
...@@ -460,13 +460,10 @@ static void finish_one_item(struct btrfs_delayed_root *delayed_root) ...@@ -460,13 +460,10 @@ static void finish_one_item(struct btrfs_delayed_root *delayed_root)
{ {
int seq = atomic_inc_return(&delayed_root->items_seq); int seq = atomic_inc_return(&delayed_root->items_seq);
/* /* atomic_dec_return implies a barrier */
* atomic_dec_return implies a barrier for waitqueue_active
*/
if ((atomic_dec_return(&delayed_root->items) < if ((atomic_dec_return(&delayed_root->items) <
BTRFS_DELAYED_BACKGROUND || seq % BTRFS_DELAYED_BATCH == 0) && BTRFS_DELAYED_BACKGROUND || seq % BTRFS_DELAYED_BATCH == 0))
waitqueue_active(&delayed_root->wait)) cond_wake_up_nomb(&delayed_root->wait);
wake_up(&delayed_root->wait);
} }
static void __btrfs_remove_delayed_item(struct btrfs_delayed_item *delayed_item) static void __btrfs_remove_delayed_item(struct btrfs_delayed_item *delayed_item)
......
...@@ -1009,9 +1009,9 @@ void btrfs_dev_replace_clear_lock_blocking( ...@@ -1009,9 +1009,9 @@ void btrfs_dev_replace_clear_lock_blocking(
ASSERT(atomic_read(&dev_replace->read_locks) > 0); ASSERT(atomic_read(&dev_replace->read_locks) > 0);
ASSERT(atomic_read(&dev_replace->blocking_readers) > 0); ASSERT(atomic_read(&dev_replace->blocking_readers) > 0);
read_lock(&dev_replace->lock); read_lock(&dev_replace->lock);
if (atomic_dec_and_test(&dev_replace->blocking_readers) && /* Barrier implied by atomic_dec_and_test */
waitqueue_active(&dev_replace->read_lock_wq)) if (atomic_dec_and_test(&dev_replace->blocking_readers))
wake_up(&dev_replace->read_lock_wq); cond_wake_up_nomb(&dev_replace->read_lock_wq);
} }
void btrfs_bio_counter_inc_noblocked(struct btrfs_fs_info *fs_info) void btrfs_bio_counter_inc_noblocked(struct btrfs_fs_info *fs_info)
...@@ -1022,9 +1022,7 @@ void btrfs_bio_counter_inc_noblocked(struct btrfs_fs_info *fs_info) ...@@ -1022,9 +1022,7 @@ void btrfs_bio_counter_inc_noblocked(struct btrfs_fs_info *fs_info)
void btrfs_bio_counter_sub(struct btrfs_fs_info *fs_info, s64 amount) void btrfs_bio_counter_sub(struct btrfs_fs_info *fs_info, s64 amount)
{ {
percpu_counter_sub(&fs_info->bio_counter, amount); percpu_counter_sub(&fs_info->bio_counter, amount);
cond_wake_up_nomb(&fs_info->replace_wait);
if (waitqueue_active(&fs_info->replace_wait))
wake_up(&fs_info->replace_wait);
} }
void btrfs_bio_counter_inc_blocked(struct btrfs_fs_info *fs_info) void btrfs_bio_counter_inc_blocked(struct btrfs_fs_info *fs_info)
......
...@@ -11081,12 +11081,7 @@ int btrfs_trim_fs(struct btrfs_fs_info *fs_info, struct fstrim_range *range) ...@@ -11081,12 +11081,7 @@ int btrfs_trim_fs(struct btrfs_fs_info *fs_info, struct fstrim_range *range)
void btrfs_end_write_no_snapshotting(struct btrfs_root *root) void btrfs_end_write_no_snapshotting(struct btrfs_root *root)
{ {
percpu_counter_dec(&root->subv_writers->counter); percpu_counter_dec(&root->subv_writers->counter);
/* cond_wake_up(&root->subv_writers->wait);
* Make sure counter is updated before we wake up waiters.
*/
smp_mb();
if (waitqueue_active(&root->subv_writers->wait))
wake_up(&root->subv_writers->wait);
} }
int btrfs_start_write_no_snapshotting(struct btrfs_root *root) int btrfs_start_write_no_snapshotting(struct btrfs_root *root)
......
...@@ -1156,13 +1156,10 @@ static noinline void async_cow_submit(struct btrfs_work *work) ...@@ -1156,13 +1156,10 @@ static noinline void async_cow_submit(struct btrfs_work *work)
nr_pages = (async_cow->end - async_cow->start + PAGE_SIZE) >> nr_pages = (async_cow->end - async_cow->start + PAGE_SIZE) >>
PAGE_SHIFT; PAGE_SHIFT;
/* /* atomic_sub_return implies a barrier */
* atomic_sub_return implies a barrier for waitqueue_active
*/
if (atomic_sub_return(nr_pages, &fs_info->async_delalloc_pages) < if (atomic_sub_return(nr_pages, &fs_info->async_delalloc_pages) <
5 * SZ_1M && 5 * SZ_1M)
waitqueue_active(&fs_info->async_submit_wait)) cond_wake_up_nomb(&fs_info->async_submit_wait);
wake_up(&fs_info->async_submit_wait);
if (async_cow->inode) if (async_cow->inode)
submit_compressed_extents(async_cow->inode, async_cow); submit_compressed_extents(async_cow->inode, async_cow);
......
...@@ -66,22 +66,16 @@ void btrfs_clear_lock_blocking_rw(struct extent_buffer *eb, int rw) ...@@ -66,22 +66,16 @@ void btrfs_clear_lock_blocking_rw(struct extent_buffer *eb, int rw)
write_lock(&eb->lock); write_lock(&eb->lock);
WARN_ON(atomic_read(&eb->spinning_writers)); WARN_ON(atomic_read(&eb->spinning_writers));
atomic_inc(&eb->spinning_writers); atomic_inc(&eb->spinning_writers);
/* /* atomic_dec_and_test implies a barrier */
* atomic_dec_and_test implies a barrier for waitqueue_active if (atomic_dec_and_test(&eb->blocking_writers))
*/ cond_wake_up_nomb(&eb->write_lock_wq);
if (atomic_dec_and_test(&eb->blocking_writers) &&
waitqueue_active(&eb->write_lock_wq))
wake_up(&eb->write_lock_wq);
} else if (rw == BTRFS_READ_LOCK_BLOCKING) { } else if (rw == BTRFS_READ_LOCK_BLOCKING) {
BUG_ON(atomic_read(&eb->blocking_readers) == 0); BUG_ON(atomic_read(&eb->blocking_readers) == 0);
read_lock(&eb->lock); read_lock(&eb->lock);
atomic_inc(&eb->spinning_readers); atomic_inc(&eb->spinning_readers);
/* /* atomic_dec_and_test implies a barrier */
* atomic_dec_and_test implies a barrier for waitqueue_active if (atomic_dec_and_test(&eb->blocking_readers))
*/ cond_wake_up_nomb(&eb->read_lock_wq);
if (atomic_dec_and_test(&eb->blocking_readers) &&
waitqueue_active(&eb->read_lock_wq))
wake_up(&eb->read_lock_wq);
} }
} }
...@@ -221,12 +215,9 @@ void btrfs_tree_read_unlock_blocking(struct extent_buffer *eb) ...@@ -221,12 +215,9 @@ void btrfs_tree_read_unlock_blocking(struct extent_buffer *eb)
} }
btrfs_assert_tree_read_locked(eb); btrfs_assert_tree_read_locked(eb);
WARN_ON(atomic_read(&eb->blocking_readers) == 0); WARN_ON(atomic_read(&eb->blocking_readers) == 0);
/* /* atomic_dec_and_test implies a barrier */
* atomic_dec_and_test implies a barrier for waitqueue_active if (atomic_dec_and_test(&eb->blocking_readers))
*/ cond_wake_up_nomb(&eb->read_lock_wq);
if (atomic_dec_and_test(&eb->blocking_readers) &&
waitqueue_active(&eb->read_lock_wq))
wake_up(&eb->read_lock_wq);
atomic_dec(&eb->read_locks); atomic_dec(&eb->read_locks);
} }
...@@ -275,12 +266,9 @@ void btrfs_tree_unlock(struct extent_buffer *eb) ...@@ -275,12 +266,9 @@ void btrfs_tree_unlock(struct extent_buffer *eb)
if (blockers) { if (blockers) {
WARN_ON(atomic_read(&eb->spinning_writers)); WARN_ON(atomic_read(&eb->spinning_writers));
atomic_dec(&eb->blocking_writers); atomic_dec(&eb->blocking_writers);
/* /* Use the lighter barrier after atomic */
* Make sure counter is updated before we wake up waiters.
*/
smp_mb__after_atomic(); smp_mb__after_atomic();
if (waitqueue_active(&eb->write_lock_wq)) cond_wake_up_nomb(&eb->write_lock_wq);
wake_up(&eb->write_lock_wq);
} else { } else {
WARN_ON(atomic_read(&eb->spinning_writers) != 1); WARN_ON(atomic_read(&eb->spinning_writers) != 1);
atomic_dec(&eb->spinning_writers); atomic_dec(&eb->spinning_writers);
......
...@@ -343,11 +343,8 @@ int btrfs_dec_test_first_ordered_pending(struct inode *inode, ...@@ -343,11 +343,8 @@ int btrfs_dec_test_first_ordered_pending(struct inode *inode,
if (entry->bytes_left == 0) { if (entry->bytes_left == 0) {
ret = test_and_set_bit(BTRFS_ORDERED_IO_DONE, &entry->flags); ret = test_and_set_bit(BTRFS_ORDERED_IO_DONE, &entry->flags);
/* /* test_and_set_bit implies a barrier */
* Implicit memory barrier after test_and_set_bit cond_wake_up_nomb(&entry->wait);
*/
if (waitqueue_active(&entry->wait))
wake_up(&entry->wait);
} else { } else {
ret = 1; ret = 1;
} }
...@@ -410,11 +407,8 @@ int btrfs_dec_test_ordered_pending(struct inode *inode, ...@@ -410,11 +407,8 @@ int btrfs_dec_test_ordered_pending(struct inode *inode,
if (entry->bytes_left == 0) { if (entry->bytes_left == 0) {
ret = test_and_set_bit(BTRFS_ORDERED_IO_DONE, &entry->flags); ret = test_and_set_bit(BTRFS_ORDERED_IO_DONE, &entry->flags);
/* /* test_and_set_bit implies a barrier */
* Implicit memory barrier after test_and_set_bit cond_wake_up_nomb(&entry->wait);
*/
if (waitqueue_active(&entry->wait))
wake_up(&entry->wait);
} else { } else {
ret = 1; ret = 1;
} }
......
...@@ -877,12 +877,7 @@ static int __btrfs_end_transaction(struct btrfs_trans_handle *trans, ...@@ -877,12 +877,7 @@ static int __btrfs_end_transaction(struct btrfs_trans_handle *trans,
atomic_dec(&cur_trans->num_writers); atomic_dec(&cur_trans->num_writers);
extwriter_counter_dec(cur_trans, trans->type); extwriter_counter_dec(cur_trans, trans->type);
/* cond_wake_up(&cur_trans->writer_wait);
* Make sure counter is updated before we wake up waiters.
*/
smp_mb();
if (waitqueue_active(&cur_trans->writer_wait))
wake_up(&cur_trans->writer_wait);
btrfs_put_transaction(cur_trans); btrfs_put_transaction(cur_trans);
if (current->journal_info == trans) if (current->journal_info == trans)
......
...@@ -222,11 +222,8 @@ int btrfs_pin_log_trans(struct btrfs_root *root) ...@@ -222,11 +222,8 @@ int btrfs_pin_log_trans(struct btrfs_root *root)
void btrfs_end_log_trans(struct btrfs_root *root) void btrfs_end_log_trans(struct btrfs_root *root)
{ {
if (atomic_dec_and_test(&root->log_writers)) { if (atomic_dec_and_test(&root->log_writers)) {
/* /* atomic_dec_and_test implies a barrier */
* Implicit memory barrier after atomic_dec_and_test cond_wake_up_nomb(&root->log_writer_wait);
*/
if (waitqueue_active(&root->log_writer_wait))
wake_up(&root->log_writer_wait);
} }
} }
...@@ -2988,11 +2985,8 @@ int btrfs_sync_log(struct btrfs_trans_handle *trans, ...@@ -2988,11 +2985,8 @@ int btrfs_sync_log(struct btrfs_trans_handle *trans,
mutex_lock(&log_root_tree->log_mutex); mutex_lock(&log_root_tree->log_mutex);
if (atomic_dec_and_test(&log_root_tree->log_writers)) { if (atomic_dec_and_test(&log_root_tree->log_writers)) {
/* /* atomic_dec_and_test implies a barrier */
* Implicit memory barrier after atomic_dec_and_test cond_wake_up_nomb(&log_root_tree->log_writer_wait);
*/
if (waitqueue_active(&log_root_tree->log_writer_wait))
wake_up(&log_root_tree->log_writer_wait);
} }
if (ret) { if (ret) {
...@@ -3116,13 +3110,11 @@ int btrfs_sync_log(struct btrfs_trans_handle *trans, ...@@ -3116,13 +3110,11 @@ int btrfs_sync_log(struct btrfs_trans_handle *trans,
mutex_unlock(&log_root_tree->log_mutex); mutex_unlock(&log_root_tree->log_mutex);
/* /*
* The barrier before waitqueue_active is needed so all the updates * The barrier before waitqueue_active (in cond_wake_up) is needed so
* above are seen by the woken threads. It might not be necessary, but * all the updates above are seen by the woken threads. It might not be
* proving that seems to be hard. * necessary, but proving that seems to be hard.
*/ */
smp_mb(); cond_wake_up(&log_root_tree->log_commit_wait[index2]);
if (waitqueue_active(&log_root_tree->log_commit_wait[index2]))
wake_up(&log_root_tree->log_commit_wait[index2]);
out: out:
mutex_lock(&root->log_mutex); mutex_lock(&root->log_mutex);
btrfs_remove_all_log_ctxs(root, index1, ret); btrfs_remove_all_log_ctxs(root, index1, ret);
...@@ -3131,13 +3123,11 @@ int btrfs_sync_log(struct btrfs_trans_handle *trans, ...@@ -3131,13 +3123,11 @@ int btrfs_sync_log(struct btrfs_trans_handle *trans,
mutex_unlock(&root->log_mutex); mutex_unlock(&root->log_mutex);
/* /*
* The barrier before waitqueue_active is needed so all the updates * The barrier before waitqueue_active (in cond_wake_up) is needed so
* above are seen by the woken threads. It might not be necessary, but * all the updates above are seen by the woken threads. It might not be
* proving that seems to be hard. * necessary, but proving that seems to be hard.
*/ */
smp_mb(); cond_wake_up(&root->log_commit_wait[index1]);
if (waitqueue_active(&root->log_commit_wait[index1]))
wake_up(&root->log_commit_wait[index1]);
return ret; return ret;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment