Commit 1e4f4714 authored by Elena Reshetova's avatar Elena Reshetova Committed by David Sterba

btrfs: convert btrfs_caching_control.count from atomic_t to refcount_t

refcount_t type and corresponding API should be
used instead of atomic_t when the variable is used as
a reference counter. This allows to avoid accidental
refcounter overflows that might lead to use-after-free
situations.
Signed-off-by: default avatarElena Reshetova <elena.reshetova@intel.com>
Signed-off-by: default avatarHans Liljestrand <ishkamiel@gmail.com>
Signed-off-by: default avatarKees Cook <keescook@chromium.org>
Signed-off-by: default avatarDavid Windsor <dwindsor@gmail.com>
Signed-off-by: default avatarDavid Sterba <dsterba@suse.com>
parent e76edab7
...@@ -39,6 +39,7 @@ ...@@ -39,6 +39,7 @@
#include <linux/security.h> #include <linux/security.h>
#include <linux/sizes.h> #include <linux/sizes.h>
#include <linux/dynamic_debug.h> #include <linux/dynamic_debug.h>
#include <linux/refcount.h>
#include "extent_io.h" #include "extent_io.h"
#include "extent_map.h" #include "extent_map.h"
#include "async-thread.h" #include "async-thread.h"
...@@ -518,7 +519,7 @@ struct btrfs_caching_control { ...@@ -518,7 +519,7 @@ struct btrfs_caching_control {
struct btrfs_work work; struct btrfs_work work;
struct btrfs_block_group_cache *block_group; struct btrfs_block_group_cache *block_group;
u64 progress; u64 progress;
atomic_t count; refcount_t count;
}; };
/* Once caching_thread() finds this much free space, it will wake up waiters. */ /* Once caching_thread() finds this much free space, it will wake up waiters. */
......
...@@ -316,14 +316,14 @@ get_caching_control(struct btrfs_block_group_cache *cache) ...@@ -316,14 +316,14 @@ get_caching_control(struct btrfs_block_group_cache *cache)
} }
ctl = cache->caching_ctl; ctl = cache->caching_ctl;
atomic_inc(&ctl->count); refcount_inc(&ctl->count);
spin_unlock(&cache->lock); spin_unlock(&cache->lock);
return ctl; return ctl;
} }
static void put_caching_control(struct btrfs_caching_control *ctl) static void put_caching_control(struct btrfs_caching_control *ctl)
{ {
if (atomic_dec_and_test(&ctl->count)) if (refcount_dec_and_test(&ctl->count))
kfree(ctl); kfree(ctl);
} }
...@@ -599,7 +599,7 @@ static int cache_block_group(struct btrfs_block_group_cache *cache, ...@@ -599,7 +599,7 @@ static int cache_block_group(struct btrfs_block_group_cache *cache,
init_waitqueue_head(&caching_ctl->wait); init_waitqueue_head(&caching_ctl->wait);
caching_ctl->block_group = cache; caching_ctl->block_group = cache;
caching_ctl->progress = cache->key.objectid; caching_ctl->progress = cache->key.objectid;
atomic_set(&caching_ctl->count, 1); refcount_set(&caching_ctl->count, 1);
btrfs_init_work(&caching_ctl->work, btrfs_cache_helper, btrfs_init_work(&caching_ctl->work, btrfs_cache_helper,
caching_thread, NULL, NULL); caching_thread, NULL, NULL);
...@@ -620,7 +620,7 @@ static int cache_block_group(struct btrfs_block_group_cache *cache, ...@@ -620,7 +620,7 @@ static int cache_block_group(struct btrfs_block_group_cache *cache,
struct btrfs_caching_control *ctl; struct btrfs_caching_control *ctl;
ctl = cache->caching_ctl; ctl = cache->caching_ctl;
atomic_inc(&ctl->count); refcount_inc(&ctl->count);
prepare_to_wait(&ctl->wait, &wait, TASK_UNINTERRUPTIBLE); prepare_to_wait(&ctl->wait, &wait, TASK_UNINTERRUPTIBLE);
spin_unlock(&cache->lock); spin_unlock(&cache->lock);
...@@ -707,7 +707,7 @@ static int cache_block_group(struct btrfs_block_group_cache *cache, ...@@ -707,7 +707,7 @@ static int cache_block_group(struct btrfs_block_group_cache *cache,
} }
down_write(&fs_info->commit_root_sem); down_write(&fs_info->commit_root_sem);
atomic_inc(&caching_ctl->count); refcount_inc(&caching_ctl->count);
list_add_tail(&caching_ctl->list, &fs_info->caching_block_groups); list_add_tail(&caching_ctl->list, &fs_info->caching_block_groups);
up_write(&fs_info->commit_root_sem); up_write(&fs_info->commit_root_sem);
...@@ -10416,7 +10416,7 @@ int btrfs_remove_block_group(struct btrfs_trans_handle *trans, ...@@ -10416,7 +10416,7 @@ int btrfs_remove_block_group(struct btrfs_trans_handle *trans,
&fs_info->caching_block_groups, list) &fs_info->caching_block_groups, list)
if (ctl->block_group == block_group) { if (ctl->block_group == block_group) {
caching_ctl = ctl; caching_ctl = ctl;
atomic_inc(&caching_ctl->count); refcount_inc(&caching_ctl->count);
break; break;
} }
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment