Commit 748f553c authored by David Sterba's avatar David Sterba

btrfs: add KCSAN annotations for unlocked access to block_rsv->full

KCSAN reports that there's unlocked access mixed with locked access,
which is technically correct but is not a bug.  To avoid false alerts at
least from KCSAN, add annotation and use a wrapper whenever ->full is
accessed for read outside of lock.

It is used as a fast check and only advisory.  In the worst case the
block reserve is found !full and becomes full in the meantime, but
properly handled.

Depending on the value of ->full, btrfs_block_rsv_release decides
where to return the reservation, and block_rsv_release_bytes handles a
NULL pointer for block_rsv and if it's not NULL then it double checks
the full status under a lock.

Link: https://lore.kernel.org/linux-btrfs/CAAwBoOJDjei5Hnem155N_cJwiEkVwJYvgN-tQrwWbZQGhFU=cA@mail.gmail.com/
Link: https://lore.kernel.org/linux-btrfs/YvHU/vsXd7uz5V6j@hungrycats.orgReported-by: default avatarZygo Blaxell <ce3g8jdj@umail.furryterror.org>
Signed-off-by: default avatarDavid Sterba <dsterba@suse.com>
parent b0b47a38
...@@ -286,7 +286,7 @@ u64 btrfs_block_rsv_release(struct btrfs_fs_info *fs_info, ...@@ -286,7 +286,7 @@ u64 btrfs_block_rsv_release(struct btrfs_fs_info *fs_info,
*/ */
if (block_rsv == delayed_rsv) if (block_rsv == delayed_rsv)
target = global_rsv; target = global_rsv;
else if (block_rsv != global_rsv && !delayed_rsv->full) else if (block_rsv != global_rsv && !btrfs_block_rsv_full(delayed_rsv))
target = delayed_rsv; target = delayed_rsv;
if (target && block_rsv->space_info != target->space_info) if (target && block_rsv->space_info != target->space_info)
......
...@@ -92,4 +92,13 @@ static inline void btrfs_unuse_block_rsv(struct btrfs_fs_info *fs_info, ...@@ -92,4 +92,13 @@ static inline void btrfs_unuse_block_rsv(struct btrfs_fs_info *fs_info,
btrfs_block_rsv_release(fs_info, block_rsv, 0, NULL); btrfs_block_rsv_release(fs_info, block_rsv, 0, NULL);
} }
/*
* Fast path to check if the reserve is full, may be carefully used outside of
* locks.
*/
static inline bool btrfs_block_rsv_full(const struct btrfs_block_rsv *rsv)
{
return data_race(rsv->full);
}
#endif /* BTRFS_BLOCK_RSV_H */ #endif /* BTRFS_BLOCK_RSV_H */
...@@ -594,7 +594,7 @@ start_transaction(struct btrfs_root *root, unsigned int num_items, ...@@ -594,7 +594,7 @@ start_transaction(struct btrfs_root *root, unsigned int num_items,
*/ */
num_bytes = btrfs_calc_insert_metadata_size(fs_info, num_items); num_bytes = btrfs_calc_insert_metadata_size(fs_info, num_items);
if (flush == BTRFS_RESERVE_FLUSH_ALL && if (flush == BTRFS_RESERVE_FLUSH_ALL &&
delayed_refs_rsv->full == 0) { btrfs_block_rsv_full(delayed_refs_rsv) == 0) {
delayed_refs_bytes = num_bytes; delayed_refs_bytes = num_bytes;
num_bytes <<= 1; num_bytes <<= 1;
} }
...@@ -619,7 +619,7 @@ start_transaction(struct btrfs_root *root, unsigned int num_items, ...@@ -619,7 +619,7 @@ start_transaction(struct btrfs_root *root, unsigned int num_items,
if (rsv->space_info->force_alloc) if (rsv->space_info->force_alloc)
do_chunk_alloc = true; do_chunk_alloc = true;
} else if (num_items == 0 && flush == BTRFS_RESERVE_FLUSH_ALL && } else if (num_items == 0 && flush == BTRFS_RESERVE_FLUSH_ALL &&
!delayed_refs_rsv->full) { !btrfs_block_rsv_full(delayed_refs_rsv)) {
/* /*
* Some people call with btrfs_start_transaction(root, 0) * Some people call with btrfs_start_transaction(root, 0)
* because they can be throttled, but have some other mechanism * because they can be throttled, but have some other mechanism
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment