Commit ff09c4ca authored by Anand Jain's avatar Anand Jain Committed by David Sterba

btrfs: scrub: convert scrub_workers_refcnt to refcount_t

Use the refcount_t for fs_info::scrub_workers_refcnt instead of int so
we get the extra checks. All reference changes are still done under
scrub_lock.
Signed-off-by: default avatarAnand Jain <anand.jain@oracle.com>
Reviewed-by: default avatarDavid Sterba <dsterba@suse.com>
Signed-off-by: default avatarDavid Sterba <dsterba@suse.com>
parent eb4318e5
...@@ -1075,7 +1075,7 @@ struct btrfs_fs_info { ...@@ -1075,7 +1075,7 @@ struct btrfs_fs_info {
atomic_t scrubs_paused; atomic_t scrubs_paused;
atomic_t scrub_cancel_req; atomic_t scrub_cancel_req;
wait_queue_head_t scrub_pause_wait; wait_queue_head_t scrub_pause_wait;
int scrub_workers_refcnt; refcount_t scrub_workers_refcnt;
struct btrfs_workqueue *scrub_workers; struct btrfs_workqueue *scrub_workers;
struct btrfs_workqueue *scrub_wr_completion_workers; struct btrfs_workqueue *scrub_wr_completion_workers;
struct btrfs_workqueue *scrub_nocow_workers; struct btrfs_workqueue *scrub_nocow_workers;
......
...@@ -2109,7 +2109,7 @@ static void btrfs_init_scrub(struct btrfs_fs_info *fs_info) ...@@ -2109,7 +2109,7 @@ static void btrfs_init_scrub(struct btrfs_fs_info *fs_info)
atomic_set(&fs_info->scrubs_paused, 0); atomic_set(&fs_info->scrubs_paused, 0);
atomic_set(&fs_info->scrub_cancel_req, 0); atomic_set(&fs_info->scrub_cancel_req, 0);
init_waitqueue_head(&fs_info->scrub_pause_wait); init_waitqueue_head(&fs_info->scrub_pause_wait);
fs_info->scrub_workers_refcnt = 0; refcount_set(&fs_info->scrub_workers_refcnt, 0);
} }
static void btrfs_init_balance(struct btrfs_fs_info *fs_info) static void btrfs_init_balance(struct btrfs_fs_info *fs_info)
......
...@@ -3743,7 +3743,7 @@ static noinline_for_stack int scrub_workers_get(struct btrfs_fs_info *fs_info, ...@@ -3743,7 +3743,7 @@ static noinline_for_stack int scrub_workers_get(struct btrfs_fs_info *fs_info,
lockdep_assert_held(&fs_info->scrub_lock); lockdep_assert_held(&fs_info->scrub_lock);
if (fs_info->scrub_workers_refcnt == 0) { if (refcount_read(&fs_info->scrub_workers_refcnt) == 0) {
fs_info->scrub_workers = btrfs_alloc_workqueue(fs_info, "scrub", fs_info->scrub_workers = btrfs_alloc_workqueue(fs_info, "scrub",
flags, is_dev_replace ? 1 : max_active, 4); flags, is_dev_replace ? 1 : max_active, 4);
if (!fs_info->scrub_workers) if (!fs_info->scrub_workers)
...@@ -3760,8 +3760,11 @@ static noinline_for_stack int scrub_workers_get(struct btrfs_fs_info *fs_info, ...@@ -3760,8 +3760,11 @@ static noinline_for_stack int scrub_workers_get(struct btrfs_fs_info *fs_info,
max_active, 2); max_active, 2);
if (!fs_info->scrub_parity_workers) if (!fs_info->scrub_parity_workers)
goto fail_scrub_parity_workers; goto fail_scrub_parity_workers;
refcount_set(&fs_info->scrub_workers_refcnt, 1);
} else {
refcount_inc(&fs_info->scrub_workers_refcnt);
} }
++fs_info->scrub_workers_refcnt;
return 0; return 0;
fail_scrub_parity_workers: fail_scrub_parity_workers:
...@@ -3927,7 +3930,7 @@ int btrfs_scrub_dev(struct btrfs_fs_info *fs_info, u64 devid, u64 start, ...@@ -3927,7 +3930,7 @@ int btrfs_scrub_dev(struct btrfs_fs_info *fs_info, u64 devid, u64 start,
mutex_lock(&fs_info->scrub_lock); mutex_lock(&fs_info->scrub_lock);
dev->scrub_ctx = NULL; dev->scrub_ctx = NULL;
if (--fs_info->scrub_workers_refcnt == 0) { if (refcount_dec_and_test(&fs_info->scrub_workers_refcnt)) {
scrub_workers = fs_info->scrub_workers; scrub_workers = fs_info->scrub_workers;
scrub_wr_comp = fs_info->scrub_wr_completion_workers; scrub_wr_comp = fs_info->scrub_wr_completion_workers;
scrub_parity = fs_info->scrub_parity_workers; scrub_parity = fs_info->scrub_parity_workers;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment