Commit ab97f873 authored by Elena Reshetova's avatar Elena Reshetova Committed by Jan Kara

fsnotify: convert fsnotify_mark.refcnt from atomic_t to refcount_t

atomic_t variables are currently used to implement reference
counters with the following properties:
 - counter is initialized to 1 using atomic_set()
 - a resource is freed upon counter reaching zero
 - once counter reaches zero, its further
   increments aren't allowed
 - counter schema uses basic atomic operations
   (set, inc, inc_not_zero, dec_and_test, etc.)

Such atomic variables should be converted to a newly provided
refcount_t type and API that prevents accidental counter overflows
and underflows. This is important since overflows and underflows
can lead to use-after-free situation and be exploitable.

The variable fsnotify_mark.refcnt is used as pure reference counter.
Convert it to refcount_t and fix up the operations.
Suggested-by: default avatarKees Cook <keescook@chromium.org>
Reviewed-by: default avatarDavid Windsor <dwindsor@gmail.com>
Reviewed-by: default avatarHans Liljestrand <ishkamiel@gmail.com>
Signed-off-by: default avatarElena Reshetova <elena.reshetova@intel.com>
Signed-off-by: default avatarJan Kara <jack@suse.cz>
parent 6685df31
...@@ -376,7 +376,7 @@ static struct inotify_inode_mark *inotify_idr_find_locked(struct fsnotify_group ...@@ -376,7 +376,7 @@ static struct inotify_inode_mark *inotify_idr_find_locked(struct fsnotify_group
fsnotify_get_mark(fsn_mark); fsnotify_get_mark(fsn_mark);
/* One ref for being in the idr, one ref we just took */ /* One ref for being in the idr, one ref we just took */
BUG_ON(atomic_read(&fsn_mark->refcnt) < 2); BUG_ON(refcount_read(&fsn_mark->refcnt) < 2);
} }
return i_mark; return i_mark;
...@@ -446,7 +446,7 @@ static void inotify_remove_from_idr(struct fsnotify_group *group, ...@@ -446,7 +446,7 @@ static void inotify_remove_from_idr(struct fsnotify_group *group,
* One ref for being in the idr * One ref for being in the idr
* one ref grabbed by inotify_idr_find * one ref grabbed by inotify_idr_find
*/ */
if (unlikely(atomic_read(&i_mark->fsn_mark.refcnt) < 2)) { if (unlikely(refcount_read(&i_mark->fsn_mark.refcnt) < 2)) {
printk(KERN_ERR "%s: i_mark=%p i_mark->wd=%d i_mark->group=%p\n", printk(KERN_ERR "%s: i_mark=%p i_mark->wd=%d i_mark->group=%p\n",
__func__, i_mark, i_mark->wd, i_mark->fsn_mark.group); __func__, i_mark, i_mark->wd, i_mark->fsn_mark.group);
/* we can't really recover with bad ref cnting.. */ /* we can't really recover with bad ref cnting.. */
......
...@@ -105,8 +105,8 @@ static DECLARE_WORK(connector_reaper_work, fsnotify_connector_destroy_workfn); ...@@ -105,8 +105,8 @@ static DECLARE_WORK(connector_reaper_work, fsnotify_connector_destroy_workfn);
void fsnotify_get_mark(struct fsnotify_mark *mark) void fsnotify_get_mark(struct fsnotify_mark *mark)
{ {
WARN_ON_ONCE(!atomic_read(&mark->refcnt)); WARN_ON_ONCE(!refcount_read(&mark->refcnt));
atomic_inc(&mark->refcnt); refcount_inc(&mark->refcnt);
} }
static void __fsnotify_recalc_mask(struct fsnotify_mark_connector *conn) static void __fsnotify_recalc_mask(struct fsnotify_mark_connector *conn)
...@@ -201,7 +201,7 @@ void fsnotify_put_mark(struct fsnotify_mark *mark) ...@@ -201,7 +201,7 @@ void fsnotify_put_mark(struct fsnotify_mark *mark)
/* Catch marks that were actually never attached to object */ /* Catch marks that were actually never attached to object */
if (!mark->connector) { if (!mark->connector) {
if (atomic_dec_and_test(&mark->refcnt)) if (refcount_dec_and_test(&mark->refcnt))
fsnotify_final_mark_destroy(mark); fsnotify_final_mark_destroy(mark);
return; return;
} }
...@@ -210,7 +210,7 @@ void fsnotify_put_mark(struct fsnotify_mark *mark) ...@@ -210,7 +210,7 @@ void fsnotify_put_mark(struct fsnotify_mark *mark)
* We have to be careful so that traversals of obj_list under lock can * We have to be careful so that traversals of obj_list under lock can
* safely grab mark reference. * safely grab mark reference.
*/ */
if (!atomic_dec_and_lock(&mark->refcnt, &mark->connector->lock)) if (!refcount_dec_and_lock(&mark->refcnt, &mark->connector->lock))
return; return;
conn = mark->connector; conn = mark->connector;
...@@ -258,7 +258,7 @@ static bool fsnotify_get_mark_safe(struct fsnotify_mark *mark) ...@@ -258,7 +258,7 @@ static bool fsnotify_get_mark_safe(struct fsnotify_mark *mark)
if (!mark) if (!mark)
return true; return true;
if (atomic_inc_not_zero(&mark->refcnt)) { if (refcount_inc_not_zero(&mark->refcnt)) {
spin_lock(&mark->lock); spin_lock(&mark->lock);
if (mark->flags & FSNOTIFY_MARK_FLAG_ATTACHED) { if (mark->flags & FSNOTIFY_MARK_FLAG_ATTACHED) {
/* mark is attached, group is still alive then */ /* mark is attached, group is still alive then */
...@@ -335,7 +335,7 @@ void fsnotify_detach_mark(struct fsnotify_mark *mark) ...@@ -335,7 +335,7 @@ void fsnotify_detach_mark(struct fsnotify_mark *mark)
WARN_ON_ONCE(!mutex_is_locked(&group->mark_mutex)); WARN_ON_ONCE(!mutex_is_locked(&group->mark_mutex));
WARN_ON_ONCE(!srcu_read_lock_held(&fsnotify_mark_srcu) && WARN_ON_ONCE(!srcu_read_lock_held(&fsnotify_mark_srcu) &&
atomic_read(&mark->refcnt) < 1 + refcount_read(&mark->refcnt) < 1 +
!!(mark->flags & FSNOTIFY_MARK_FLAG_ATTACHED)); !!(mark->flags & FSNOTIFY_MARK_FLAG_ATTACHED));
spin_lock(&mark->lock); spin_lock(&mark->lock);
...@@ -737,7 +737,7 @@ void fsnotify_init_mark(struct fsnotify_mark *mark, ...@@ -737,7 +737,7 @@ void fsnotify_init_mark(struct fsnotify_mark *mark,
{ {
memset(mark, 0, sizeof(*mark)); memset(mark, 0, sizeof(*mark));
spin_lock_init(&mark->lock); spin_lock_init(&mark->lock);
atomic_set(&mark->refcnt, 1); refcount_set(&mark->refcnt, 1);
fsnotify_get_group(group); fsnotify_get_group(group);
mark->group = group; mark->group = group;
} }
......
...@@ -242,7 +242,7 @@ struct fsnotify_mark { ...@@ -242,7 +242,7 @@ struct fsnotify_mark {
__u32 mask; __u32 mask;
/* We hold one for presence in g_list. Also one ref for each 'thing' /* We hold one for presence in g_list. Also one ref for each 'thing'
* in kernel that found and may be using this mark. */ * in kernel that found and may be using this mark. */
atomic_t refcnt; refcount_t refcnt;
/* Group this mark is for. Set on mark creation, stable until last ref /* Group this mark is for. Set on mark creation, stable until last ref
* is dropped */ * is dropped */
struct fsnotify_group *group; struct fsnotify_group *group;
......
...@@ -1007,7 +1007,7 @@ static void audit_tree_freeing_mark(struct fsnotify_mark *entry, struct fsnotify ...@@ -1007,7 +1007,7 @@ static void audit_tree_freeing_mark(struct fsnotify_mark *entry, struct fsnotify
* We are guaranteed to have at least one reference to the mark from * We are guaranteed to have at least one reference to the mark from
* either the inode or the caller of fsnotify_destroy_mark(). * either the inode or the caller of fsnotify_destroy_mark().
*/ */
BUG_ON(atomic_read(&entry->refcnt) < 1); BUG_ON(refcount_read(&entry->refcnt) < 1);
} }
static const struct fsnotify_ops audit_tree_ops = { static const struct fsnotify_ops audit_tree_ops = {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment