Commit b95be2d9 authored by David Sterba's avatar David Sterba

btrfs: split btrfs_set_lock_blocking_rw to read and write helpers

There are many callers that hardcode the desired lock type so we can
avoid the switch and call them directly. Split the current function to
two but leave a helper that still takes the variable lock type to make
current code compile.  The call sites will be converted in followup
patches.
Reviewed-by: default avatarJohannes Thumshirn <jthumshirn@suse.de>
Signed-off-by: default avatarDavid Sterba <dsterba@suse.com>
parent 9627736b
...@@ -14,22 +14,31 @@ ...@@ -14,22 +14,31 @@
static void btrfs_assert_tree_read_locked(struct extent_buffer *eb); static void btrfs_assert_tree_read_locked(struct extent_buffer *eb);
/* void btrfs_set_lock_blocking_read(struct extent_buffer *eb)
* if we currently have a spinning reader or writer lock {
* (indicated by the rw flag) this will bump the count /*
* of blocking holders and drop the spinlock. * No lock is required. The lock owner may change if we have a read
* lock, but it won't change to or away from us. If we have the write
* lock, we are the owner and it'll never change.
*/ */
void btrfs_set_lock_blocking_rw(struct extent_buffer *eb, int rw) if (eb->lock_nested && current->pid == eb->lock_owner)
return;
btrfs_assert_tree_read_locked(eb);
atomic_inc(&eb->blocking_readers);
WARN_ON(atomic_read(&eb->spinning_readers) == 0);
atomic_dec(&eb->spinning_readers);
read_unlock(&eb->lock);
}
void btrfs_set_lock_blocking_write(struct extent_buffer *eb)
{ {
/* /*
* no lock is required. The lock owner may change if * No lock is required. The lock owner may change if we have a read
* we have a read lock, but it won't change to or away * lock, but it won't change to or away from us. If we have the write
* from us. If we have the write lock, we are the owner * lock, we are the owner and it'll never change.
* and it'll never change.
*/ */
if (eb->lock_nested && current->pid == eb->lock_owner) if (eb->lock_nested && current->pid == eb->lock_owner)
return; return;
if (rw == BTRFS_WRITE_LOCK) {
if (atomic_read(&eb->blocking_writers) == 0) { if (atomic_read(&eb->blocking_writers) == 0) {
WARN_ON(atomic_read(&eb->spinning_writers) != 1); WARN_ON(atomic_read(&eb->spinning_writers) != 1);
atomic_dec(&eb->spinning_writers); atomic_dec(&eb->spinning_writers);
...@@ -37,13 +46,6 @@ void btrfs_set_lock_blocking_rw(struct extent_buffer *eb, int rw) ...@@ -37,13 +46,6 @@ void btrfs_set_lock_blocking_rw(struct extent_buffer *eb, int rw)
atomic_inc(&eb->blocking_writers); atomic_inc(&eb->blocking_writers);
write_unlock(&eb->lock); write_unlock(&eb->lock);
} }
} else if (rw == BTRFS_READ_LOCK) {
btrfs_assert_tree_read_locked(eb);
atomic_inc(&eb->blocking_readers);
WARN_ON(atomic_read(&eb->spinning_readers) == 0);
atomic_dec(&eb->spinning_readers);
read_unlock(&eb->lock);
}
} }
/* /*
......
...@@ -17,7 +17,8 @@ void btrfs_tree_unlock(struct extent_buffer *eb); ...@@ -17,7 +17,8 @@ void btrfs_tree_unlock(struct extent_buffer *eb);
void btrfs_tree_read_lock(struct extent_buffer *eb); void btrfs_tree_read_lock(struct extent_buffer *eb);
void btrfs_tree_read_unlock(struct extent_buffer *eb); void btrfs_tree_read_unlock(struct extent_buffer *eb);
void btrfs_tree_read_unlock_blocking(struct extent_buffer *eb); void btrfs_tree_read_unlock_blocking(struct extent_buffer *eb);
void btrfs_set_lock_blocking_rw(struct extent_buffer *eb, int rw); void btrfs_set_lock_blocking_read(struct extent_buffer *eb);
void btrfs_set_lock_blocking_write(struct extent_buffer *eb);
void btrfs_clear_lock_blocking_rw(struct extent_buffer *eb, int rw); void btrfs_clear_lock_blocking_rw(struct extent_buffer *eb, int rw);
void btrfs_assert_tree_locked(struct extent_buffer *eb); void btrfs_assert_tree_locked(struct extent_buffer *eb);
int btrfs_try_tree_read_lock(struct extent_buffer *eb); int btrfs_try_tree_read_lock(struct extent_buffer *eb);
...@@ -37,6 +38,18 @@ static inline void btrfs_tree_unlock_rw(struct extent_buffer *eb, int rw) ...@@ -37,6 +38,18 @@ static inline void btrfs_tree_unlock_rw(struct extent_buffer *eb, int rw)
BUG(); BUG();
} }
/*
* If we currently have a spinning reader or writer lock (indicated by the rw
* flag) this will bump the count of blocking holders and drop the spinlock.
*/
static inline void btrfs_set_lock_blocking_rw(struct extent_buffer *eb, int rw)
{
if (rw == BTRFS_WRITE_LOCK)
btrfs_set_lock_blocking_write(eb);
else if (rw == BTRFS_READ_LOCK)
btrfs_set_lock_blocking_read(eb);
}
static inline void btrfs_set_lock_blocking(struct extent_buffer *eb) static inline void btrfs_set_lock_blocking(struct extent_buffer *eb)
{ {
btrfs_set_lock_blocking_rw(eb, BTRFS_WRITE_LOCK); btrfs_set_lock_blocking_rw(eb, BTRFS_WRITE_LOCK);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment