Commit 3eeb3226 authored by Josef Bacik's avatar Josef Bacik Committed by David Sterba

btrfs: migrate nocow and reservation helpers

These are relatively straightforward as well.
Signed-off-by: default avatarJosef Bacik <josef@toxicpanda.com>
Reviewed-by: default avatarDavid Sterba <dsterba@suse.com>
Signed-off-by: default avatarDavid Sterba <dsterba@suse.com>
parent 3cad1284
...@@ -2,6 +2,7 @@ ...@@ -2,6 +2,7 @@
#include "ctree.h" #include "ctree.h"
#include "block-group.h" #include "block-group.h"
#include "space-info.h"
void btrfs_get_block_group(struct btrfs_block_group_cache *cache) void btrfs_get_block_group(struct btrfs_block_group_cache *cache)
{ {
...@@ -118,3 +119,84 @@ struct btrfs_block_group_cache *btrfs_next_block_group( ...@@ -118,3 +119,84 @@ struct btrfs_block_group_cache *btrfs_next_block_group(
spin_unlock(&fs_info->block_group_cache_lock); spin_unlock(&fs_info->block_group_cache_lock);
return cache; return cache;
} }
bool btrfs_inc_nocow_writers(struct btrfs_fs_info *fs_info, u64 bytenr)
{
struct btrfs_block_group_cache *bg;
bool ret = true;
bg = btrfs_lookup_block_group(fs_info, bytenr);
if (!bg)
return false;
spin_lock(&bg->lock);
if (bg->ro)
ret = false;
else
atomic_inc(&bg->nocow_writers);
spin_unlock(&bg->lock);
/* No put on block group, done by btrfs_dec_nocow_writers */
if (!ret)
btrfs_put_block_group(bg);
return ret;
}
void btrfs_dec_nocow_writers(struct btrfs_fs_info *fs_info, u64 bytenr)
{
struct btrfs_block_group_cache *bg;
bg = btrfs_lookup_block_group(fs_info, bytenr);
ASSERT(bg);
if (atomic_dec_and_test(&bg->nocow_writers))
wake_up_var(&bg->nocow_writers);
/*
* Once for our lookup and once for the lookup done by a previous call
* to btrfs_inc_nocow_writers()
*/
btrfs_put_block_group(bg);
btrfs_put_block_group(bg);
}
void btrfs_wait_nocow_writers(struct btrfs_block_group_cache *bg)
{
wait_var_event(&bg->nocow_writers, !atomic_read(&bg->nocow_writers));
}
void btrfs_dec_block_group_reservations(struct btrfs_fs_info *fs_info,
const u64 start)
{
struct btrfs_block_group_cache *bg;
bg = btrfs_lookup_block_group(fs_info, start);
ASSERT(bg);
if (atomic_dec_and_test(&bg->reservations))
wake_up_var(&bg->reservations);
btrfs_put_block_group(bg);
}
void btrfs_wait_block_group_reservations(struct btrfs_block_group_cache *bg)
{
struct btrfs_space_info *space_info = bg->space_info;
ASSERT(bg->ro);
if (!(bg->flags & BTRFS_BLOCK_GROUP_DATA))
return;
/*
* Our block group is read only but before we set it to read only,
* some task might have had allocated an extent from it already, but it
* has not yet created a respective ordered extent (and added it to a
* root's list of ordered extents).
* Therefore wait for any task currently allocating extents, since the
* block group's reservations counter is incremented while a read lock
* on the groups' semaphore is held and decremented after releasing
* the read access on that semaphore and creating the ordered extent.
*/
down_write(&space_info->groups_sem);
up_write(&space_info->groups_sem);
wait_var_event(&bg->reservations, !atomic_read(&bg->reservations));
}
...@@ -159,5 +159,11 @@ struct btrfs_block_group_cache *btrfs_next_block_group( ...@@ -159,5 +159,11 @@ struct btrfs_block_group_cache *btrfs_next_block_group(
struct btrfs_block_group_cache *cache); struct btrfs_block_group_cache *cache);
void btrfs_get_block_group(struct btrfs_block_group_cache *cache); void btrfs_get_block_group(struct btrfs_block_group_cache *cache);
void btrfs_put_block_group(struct btrfs_block_group_cache *cache); void btrfs_put_block_group(struct btrfs_block_group_cache *cache);
void btrfs_dec_block_group_reservations(struct btrfs_fs_info *fs_info,
const u64 start);
void btrfs_wait_block_group_reservations(struct btrfs_block_group_cache *bg);
bool btrfs_inc_nocow_writers(struct btrfs_fs_info *fs_info, u64 bytenr);
void btrfs_dec_nocow_writers(struct btrfs_fs_info *fs_info, u64 bytenr);
void btrfs_wait_nocow_writers(struct btrfs_block_group_cache *bg);
#endif /* BTRFS_BLOCK_GROUP_H */ #endif /* BTRFS_BLOCK_GROUP_H */
...@@ -2473,12 +2473,6 @@ static inline u64 btrfs_calc_trunc_metadata_size(struct btrfs_fs_info *fs_info, ...@@ -2473,12 +2473,6 @@ static inline u64 btrfs_calc_trunc_metadata_size(struct btrfs_fs_info *fs_info,
return (u64)fs_info->nodesize * BTRFS_MAX_LEVEL * num_items; return (u64)fs_info->nodesize * BTRFS_MAX_LEVEL * num_items;
} }
void btrfs_dec_block_group_reservations(struct btrfs_fs_info *fs_info,
const u64 start);
void btrfs_wait_block_group_reservations(struct btrfs_block_group_cache *bg);
bool btrfs_inc_nocow_writers(struct btrfs_fs_info *fs_info, u64 bytenr);
void btrfs_dec_nocow_writers(struct btrfs_fs_info *fs_info, u64 bytenr);
void btrfs_wait_nocow_writers(struct btrfs_block_group_cache *bg);
int btrfs_run_delayed_refs(struct btrfs_trans_handle *trans, int btrfs_run_delayed_refs(struct btrfs_trans_handle *trans,
unsigned long count); unsigned long count);
void btrfs_cleanup_ref_head_accounting(struct btrfs_fs_info *fs_info, void btrfs_cleanup_ref_head_accounting(struct btrfs_fs_info *fs_info,
......
...@@ -3561,51 +3561,6 @@ int btrfs_extent_readonly(struct btrfs_fs_info *fs_info, u64 bytenr) ...@@ -3561,51 +3561,6 @@ int btrfs_extent_readonly(struct btrfs_fs_info *fs_info, u64 bytenr)
return readonly; return readonly;
} }
bool btrfs_inc_nocow_writers(struct btrfs_fs_info *fs_info, u64 bytenr)
{
struct btrfs_block_group_cache *bg;
bool ret = true;
bg = btrfs_lookup_block_group(fs_info, bytenr);
if (!bg)
return false;
spin_lock(&bg->lock);
if (bg->ro)
ret = false;
else
atomic_inc(&bg->nocow_writers);
spin_unlock(&bg->lock);
/* no put on block group, done by btrfs_dec_nocow_writers */
if (!ret)
btrfs_put_block_group(bg);
return ret;
}
void btrfs_dec_nocow_writers(struct btrfs_fs_info *fs_info, u64 bytenr)
{
struct btrfs_block_group_cache *bg;
bg = btrfs_lookup_block_group(fs_info, bytenr);
ASSERT(bg);
if (atomic_dec_and_test(&bg->nocow_writers))
wake_up_var(&bg->nocow_writers);
/*
* Once for our lookup and once for the lookup done by a previous call
* to btrfs_inc_nocow_writers()
*/
btrfs_put_block_group(bg);
btrfs_put_block_group(bg);
}
void btrfs_wait_nocow_writers(struct btrfs_block_group_cache *bg)
{
wait_var_event(&bg->nocow_writers, !atomic_read(&bg->nocow_writers));
}
static void set_avail_alloc_bits(struct btrfs_fs_info *fs_info, u64 flags) static void set_avail_alloc_bits(struct btrfs_fs_info *fs_info, u64 flags)
{ {
u64 extra_flags = chunk_to_extended(flags) & u64 extra_flags = chunk_to_extended(flags) &
...@@ -4277,43 +4232,6 @@ btrfs_inc_block_group_reservations(struct btrfs_block_group_cache *bg) ...@@ -4277,43 +4232,6 @@ btrfs_inc_block_group_reservations(struct btrfs_block_group_cache *bg)
atomic_inc(&bg->reservations); atomic_inc(&bg->reservations);
} }
void btrfs_dec_block_group_reservations(struct btrfs_fs_info *fs_info,
const u64 start)
{
struct btrfs_block_group_cache *bg;
bg = btrfs_lookup_block_group(fs_info, start);
ASSERT(bg);
if (atomic_dec_and_test(&bg->reservations))
wake_up_var(&bg->reservations);
btrfs_put_block_group(bg);
}
void btrfs_wait_block_group_reservations(struct btrfs_block_group_cache *bg)
{
struct btrfs_space_info *space_info = bg->space_info;
ASSERT(bg->ro);
if (!(bg->flags & BTRFS_BLOCK_GROUP_DATA))
return;
/*
* Our block group is read only but before we set it to read only,
* some task might have had allocated an extent from it already, but it
* has not yet created a respective ordered extent (and added it to a
* root's list of ordered extents).
* Therefore wait for any task currently allocating extents, since the
* block group's reservations counter is incremented while a read lock
* on the groups' semaphore is held and decremented after releasing
* the read access on that semaphore and creating the ordered extent.
*/
down_write(&space_info->groups_sem);
up_write(&space_info->groups_sem);
wait_var_event(&bg->reservations, !atomic_read(&bg->reservations));
}
/** /**
* btrfs_add_reserved_bytes - update the block_group and space info counters * btrfs_add_reserved_bytes - update the block_group and space info counters
* @cache: The cache we are manipulating * @cache: The cache we are manipulating
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment