Commit 3cad1284 authored by Josef Bacik's avatar Josef Bacik Committed by David Sterba

btrfs: migrate the block group ref counting stuff

Another easy set to move over to block-group.c.
Signed-off-by: default avatarJosef Bacik <josef@toxicpanda.com>
Reviewed-by: default avatarDavid Sterba <dsterba@suse.com>
Signed-off-by: default avatarDavid Sterba <dsterba@suse.com>
parent 2e405ad8
...@@ -3,6 +3,31 @@ ...@@ -3,6 +3,31 @@
#include "ctree.h" #include "ctree.h"
#include "block-group.h" #include "block-group.h"
void btrfs_get_block_group(struct btrfs_block_group_cache *cache)
{
atomic_inc(&cache->count);
}
void btrfs_put_block_group(struct btrfs_block_group_cache *cache)
{
if (atomic_dec_and_test(&cache->count)) {
WARN_ON(cache->pinned > 0);
WARN_ON(cache->reserved > 0);
/*
* If not empty, someone is still holding mutex of
* full_stripe_lock, which can only be released by caller.
* And it will definitely cause use-after-free when caller
* tries to release full stripe lock.
*
* No better way to resolve, but only to warn.
*/
WARN_ON(!RB_EMPTY_ROOT(&cache->full_stripe_locks_root.root));
kfree(cache->free_space_ctl);
kfree(cache);
}
}
/* /*
* This will return the block group at or after bytenr if contains is 0, else * This will return the block group at or after bytenr if contains is 0, else
* it will return the block group that contains the bytenr * it will return the block group that contains the bytenr
......
...@@ -157,5 +157,7 @@ struct btrfs_block_group_cache *btrfs_lookup_block_group( ...@@ -157,5 +157,7 @@ struct btrfs_block_group_cache *btrfs_lookup_block_group(
struct btrfs_fs_info *info, u64 bytenr); struct btrfs_fs_info *info, u64 bytenr);
struct btrfs_block_group_cache *btrfs_next_block_group( struct btrfs_block_group_cache *btrfs_next_block_group(
struct btrfs_block_group_cache *cache); struct btrfs_block_group_cache *cache);
void btrfs_get_block_group(struct btrfs_block_group_cache *cache);
void btrfs_put_block_group(struct btrfs_block_group_cache *cache);
#endif /* BTRFS_BLOCK_GROUP_H */ #endif /* BTRFS_BLOCK_GROUP_H */
...@@ -2479,7 +2479,6 @@ void btrfs_wait_block_group_reservations(struct btrfs_block_group_cache *bg); ...@@ -2479,7 +2479,6 @@ void btrfs_wait_block_group_reservations(struct btrfs_block_group_cache *bg);
bool btrfs_inc_nocow_writers(struct btrfs_fs_info *fs_info, u64 bytenr); bool btrfs_inc_nocow_writers(struct btrfs_fs_info *fs_info, u64 bytenr);
void btrfs_dec_nocow_writers(struct btrfs_fs_info *fs_info, u64 bytenr); void btrfs_dec_nocow_writers(struct btrfs_fs_info *fs_info, u64 bytenr);
void btrfs_wait_nocow_writers(struct btrfs_block_group_cache *bg); void btrfs_wait_nocow_writers(struct btrfs_block_group_cache *bg);
void btrfs_put_block_group(struct btrfs_block_group_cache *cache);
int btrfs_run_delayed_refs(struct btrfs_trans_handle *trans, int btrfs_run_delayed_refs(struct btrfs_trans_handle *trans,
unsigned long count); unsigned long count);
void btrfs_cleanup_ref_head_accounting(struct btrfs_fs_info *fs_info, void btrfs_cleanup_ref_head_accounting(struct btrfs_fs_info *fs_info,
...@@ -2496,8 +2495,6 @@ int btrfs_pin_extent_for_log_replay(struct btrfs_fs_info *fs_info, ...@@ -2496,8 +2495,6 @@ int btrfs_pin_extent_for_log_replay(struct btrfs_fs_info *fs_info,
int btrfs_exclude_logged_extents(struct extent_buffer *eb); int btrfs_exclude_logged_extents(struct extent_buffer *eb);
int btrfs_cross_ref_exist(struct btrfs_root *root, int btrfs_cross_ref_exist(struct btrfs_root *root,
u64 objectid, u64 offset, u64 bytenr); u64 objectid, u64 offset, u64 bytenr);
void btrfs_get_block_group(struct btrfs_block_group_cache *cache);
void btrfs_put_block_group(struct btrfs_block_group_cache *cache);
struct extent_buffer *btrfs_alloc_tree_block(struct btrfs_trans_handle *trans, struct extent_buffer *btrfs_alloc_tree_block(struct btrfs_trans_handle *trans,
struct btrfs_root *root, struct btrfs_root *root,
u64 parent, u64 root_objectid, u64 parent, u64 root_objectid,
......
...@@ -68,31 +68,6 @@ static int block_group_bits(struct btrfs_block_group_cache *cache, u64 bits) ...@@ -68,31 +68,6 @@ static int block_group_bits(struct btrfs_block_group_cache *cache, u64 bits)
return (cache->flags & bits) == bits; return (cache->flags & bits) == bits;
} }
void btrfs_get_block_group(struct btrfs_block_group_cache *cache)
{
atomic_inc(&cache->count);
}
void btrfs_put_block_group(struct btrfs_block_group_cache *cache)
{
if (atomic_dec_and_test(&cache->count)) {
WARN_ON(cache->pinned > 0);
WARN_ON(cache->reserved > 0);
/*
* If not empty, someone is still holding mutex of
* full_stripe_lock, which can only be released by caller.
* And it will definitely cause use-after-free when caller
* tries to release full stripe lock.
*
* No better way to resolve, but only to warn.
*/
WARN_ON(!RB_EMPTY_ROOT(&cache->full_stripe_locks_root.root));
kfree(cache->free_space_ctl);
kfree(cache);
}
}
/* /*
* this adds the block group to the fs_info rb tree for the block group * this adds the block group to the fs_info rb tree for the block group
* cache * cache
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment