Commit 843ccf9f authored by David Sterba's avatar David Sterba

btrfs: use assertion helpers for spinning writers

Use the helpers where open coded. On non-debug builds, the warnings will
not trigger and extent_buffer::spining_writers become unused and can be
moved to the appropriate section, saving a few bytes.
Reviewed-by: default avatarNikolay Borisov <nborisov@suse.com>
Reviewed-by: default avatarJohannes Thumshirn <jthumshirn@suse.de>
Signed-off-by: default avatarDavid Sterba <dsterba@suse.com>
parent e4e9fd0f
...@@ -4682,7 +4682,6 @@ __alloc_extent_buffer(struct btrfs_fs_info *fs_info, u64 start, ...@@ -4682,7 +4682,6 @@ __alloc_extent_buffer(struct btrfs_fs_info *fs_info, u64 start,
atomic_set(&eb->blocking_readers, 0); atomic_set(&eb->blocking_readers, 0);
atomic_set(&eb->blocking_writers, 0); atomic_set(&eb->blocking_writers, 0);
atomic_set(&eb->spinning_readers, 0); atomic_set(&eb->spinning_readers, 0);
atomic_set(&eb->spinning_writers, 0);
eb->lock_nested = 0; eb->lock_nested = 0;
init_waitqueue_head(&eb->write_lock_wq); init_waitqueue_head(&eb->write_lock_wq);
init_waitqueue_head(&eb->read_lock_wq); init_waitqueue_head(&eb->read_lock_wq);
...@@ -4700,6 +4699,10 @@ __alloc_extent_buffer(struct btrfs_fs_info *fs_info, u64 start, ...@@ -4700,6 +4699,10 @@ __alloc_extent_buffer(struct btrfs_fs_info *fs_info, u64 start,
> MAX_INLINE_EXTENT_BUFFER_SIZE); > MAX_INLINE_EXTENT_BUFFER_SIZE);
BUG_ON(len > MAX_INLINE_EXTENT_BUFFER_SIZE); BUG_ON(len > MAX_INLINE_EXTENT_BUFFER_SIZE);
#ifdef CONFIG_BTRFS_DEBUG
atomic_set(&eb->spinning_writers, 0);
#endif
return eb; return eb;
} }
......
...@@ -166,7 +166,6 @@ struct extent_buffer { ...@@ -166,7 +166,6 @@ struct extent_buffer {
atomic_t blocking_writers; atomic_t blocking_writers;
atomic_t blocking_readers; atomic_t blocking_readers;
atomic_t spinning_readers; atomic_t spinning_readers;
atomic_t spinning_writers;
short lock_nested; short lock_nested;
/* >= 0 if eb belongs to a log tree, -1 otherwise */ /* >= 0 if eb belongs to a log tree, -1 otherwise */
short log_index; short log_index;
...@@ -185,6 +184,7 @@ struct extent_buffer { ...@@ -185,6 +184,7 @@ struct extent_buffer {
wait_queue_head_t read_lock_wq; wait_queue_head_t read_lock_wq;
struct page *pages[INLINE_EXTENT_BUFFER_PAGES]; struct page *pages[INLINE_EXTENT_BUFFER_PAGES];
#ifdef CONFIG_BTRFS_DEBUG #ifdef CONFIG_BTRFS_DEBUG
atomic_t spinning_writers;
struct list_head leak_list; struct list_head leak_list;
#endif #endif
}; };
......
...@@ -64,8 +64,7 @@ void btrfs_set_lock_blocking_write(struct extent_buffer *eb) ...@@ -64,8 +64,7 @@ void btrfs_set_lock_blocking_write(struct extent_buffer *eb)
if (eb->lock_nested && current->pid == eb->lock_owner) if (eb->lock_nested && current->pid == eb->lock_owner)
return; return;
if (atomic_read(&eb->blocking_writers) == 0) { if (atomic_read(&eb->blocking_writers) == 0) {
WARN_ON(atomic_read(&eb->spinning_writers) != 1); btrfs_assert_spinning_writers_put(eb);
atomic_dec(&eb->spinning_writers);
btrfs_assert_tree_locked(eb); btrfs_assert_tree_locked(eb);
atomic_inc(&eb->blocking_writers); atomic_inc(&eb->blocking_writers);
write_unlock(&eb->lock); write_unlock(&eb->lock);
...@@ -101,8 +100,7 @@ void btrfs_clear_lock_blocking_write(struct extent_buffer *eb) ...@@ -101,8 +100,7 @@ void btrfs_clear_lock_blocking_write(struct extent_buffer *eb)
return; return;
BUG_ON(atomic_read(&eb->blocking_writers) != 1); BUG_ON(atomic_read(&eb->blocking_writers) != 1);
write_lock(&eb->lock); write_lock(&eb->lock);
WARN_ON(atomic_read(&eb->spinning_writers)); btrfs_assert_spinning_writers_get(eb);
atomic_inc(&eb->spinning_writers);
/* atomic_dec_and_test implies a barrier */ /* atomic_dec_and_test implies a barrier */
if (atomic_dec_and_test(&eb->blocking_writers)) if (atomic_dec_and_test(&eb->blocking_writers))
cond_wake_up_nomb(&eb->write_lock_wq); cond_wake_up_nomb(&eb->write_lock_wq);
...@@ -200,7 +198,7 @@ int btrfs_try_tree_write_lock(struct extent_buffer *eb) ...@@ -200,7 +198,7 @@ int btrfs_try_tree_write_lock(struct extent_buffer *eb)
return 0; return 0;
} }
atomic_inc(&eb->write_locks); atomic_inc(&eb->write_locks);
atomic_inc(&eb->spinning_writers); btrfs_assert_spinning_writers_get(eb);
eb->lock_owner = current->pid; eb->lock_owner = current->pid;
return 1; return 1;
} }
...@@ -266,8 +264,7 @@ void btrfs_tree_lock(struct extent_buffer *eb) ...@@ -266,8 +264,7 @@ void btrfs_tree_lock(struct extent_buffer *eb)
write_unlock(&eb->lock); write_unlock(&eb->lock);
goto again; goto again;
} }
WARN_ON(atomic_read(&eb->spinning_writers)); btrfs_assert_spinning_writers_get(eb);
atomic_inc(&eb->spinning_writers);
atomic_inc(&eb->write_locks); atomic_inc(&eb->write_locks);
eb->lock_owner = current->pid; eb->lock_owner = current->pid;
} }
...@@ -286,14 +283,13 @@ void btrfs_tree_unlock(struct extent_buffer *eb) ...@@ -286,14 +283,13 @@ void btrfs_tree_unlock(struct extent_buffer *eb)
atomic_dec(&eb->write_locks); atomic_dec(&eb->write_locks);
if (blockers) { if (blockers) {
WARN_ON(atomic_read(&eb->spinning_writers)); btrfs_assert_no_spinning_writers(eb);
atomic_dec(&eb->blocking_writers); atomic_dec(&eb->blocking_writers);
/* Use the lighter barrier after atomic */ /* Use the lighter barrier after atomic */
smp_mb__after_atomic(); smp_mb__after_atomic();
cond_wake_up_nomb(&eb->write_lock_wq); cond_wake_up_nomb(&eb->write_lock_wq);
} else { } else {
WARN_ON(atomic_read(&eb->spinning_writers) != 1); btrfs_assert_spinning_writers_put(eb);
atomic_dec(&eb->spinning_writers);
write_unlock(&eb->lock); write_unlock(&eb->lock);
} }
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment