Commit 99f4cdb1 authored by Elena Reshetova's avatar Elena Reshetova Committed by David Sterba

btrfs: convert scrub_ctx.refs from atomic_t to refcount_t

refcount_t type and corresponding API should be
used instead of atomic_t when the variable is used as
a reference counter. This allows to avoid accidental
refcounter overflows that might lead to use-after-free
situations.
Signed-off-by: default avatarElena Reshetova <elena.reshetova@intel.com>
Signed-off-by: default avatarHans Liljestrand <ishkamiel@gmail.com>
Signed-off-by: default avatarKees Cook <keescook@chromium.org>
Signed-off-by: default avatarDavid Windsor <dwindsor@gmail.com>
Signed-off-by: default avatarDavid Sterba <dsterba@suse.com>
parent 78a76450
...@@ -202,7 +202,7 @@ struct scrub_ctx { ...@@ -202,7 +202,7 @@ struct scrub_ctx {
* doesn't free the scrub context before or while the workers are * doesn't free the scrub context before or while the workers are
* doing the wakeup() call. * doing the wakeup() call.
*/ */
atomic_t refs; refcount_t refs;
}; };
struct scrub_fixup_nodatasum { struct scrub_fixup_nodatasum {
...@@ -305,7 +305,7 @@ static void scrub_put_ctx(struct scrub_ctx *sctx); ...@@ -305,7 +305,7 @@ static void scrub_put_ctx(struct scrub_ctx *sctx);
static void scrub_pending_bio_inc(struct scrub_ctx *sctx) static void scrub_pending_bio_inc(struct scrub_ctx *sctx)
{ {
atomic_inc(&sctx->refs); refcount_inc(&sctx->refs);
atomic_inc(&sctx->bios_in_flight); atomic_inc(&sctx->bios_in_flight);
} }
...@@ -356,7 +356,7 @@ static void scrub_pending_trans_workers_inc(struct scrub_ctx *sctx) ...@@ -356,7 +356,7 @@ static void scrub_pending_trans_workers_inc(struct scrub_ctx *sctx)
{ {
struct btrfs_fs_info *fs_info = sctx->fs_info; struct btrfs_fs_info *fs_info = sctx->fs_info;
atomic_inc(&sctx->refs); refcount_inc(&sctx->refs);
/* /*
* increment scrubs_running to prevent cancel requests from * increment scrubs_running to prevent cancel requests from
* completing as long as a worker is running. we must also * completing as long as a worker is running. we must also
...@@ -447,7 +447,7 @@ static noinline_for_stack void scrub_free_ctx(struct scrub_ctx *sctx) ...@@ -447,7 +447,7 @@ static noinline_for_stack void scrub_free_ctx(struct scrub_ctx *sctx)
static void scrub_put_ctx(struct scrub_ctx *sctx) static void scrub_put_ctx(struct scrub_ctx *sctx)
{ {
if (atomic_dec_and_test(&sctx->refs)) if (refcount_dec_and_test(&sctx->refs))
scrub_free_ctx(sctx); scrub_free_ctx(sctx);
} }
...@@ -462,7 +462,7 @@ struct scrub_ctx *scrub_setup_ctx(struct btrfs_device *dev, int is_dev_replace) ...@@ -462,7 +462,7 @@ struct scrub_ctx *scrub_setup_ctx(struct btrfs_device *dev, int is_dev_replace)
sctx = kzalloc(sizeof(*sctx), GFP_KERNEL); sctx = kzalloc(sizeof(*sctx), GFP_KERNEL);
if (!sctx) if (!sctx)
goto nomem; goto nomem;
atomic_set(&sctx->refs, 1); refcount_set(&sctx->refs, 1);
sctx->is_dev_replace = is_dev_replace; sctx->is_dev_replace = is_dev_replace;
sctx->pages_per_rd_bio = SCRUB_PAGES_PER_RD_BIO; sctx->pages_per_rd_bio = SCRUB_PAGES_PER_RD_BIO;
sctx->curr = -1; sctx->curr = -1;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment