Commit edcb6918 authored by Reshetova, Elena's avatar Reshetova, Elena Committed by David S. Miller

net: convert inet_frag_queue.refcnt from atomic_t to refcount_t

refcount_t type and corresponding API should be
used instead of atomic_t when the variable is used as
a reference counter. This allows to avoid accidental
refcounter overflows that might lead to use-after-free
situations.
Signed-off-by: default avatarElena Reshetova <elena.reshetova@intel.com>
Signed-off-by: default avatarHans Liljestrand <ishkamiel@gmail.com>
Signed-off-by: default avatarKees Cook <keescook@chromium.org>
Signed-off-by: default avatarDavid Windsor <dwindsor@gmail.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent 717d1e99
...@@ -50,7 +50,7 @@ struct inet_frag_queue { ...@@ -50,7 +50,7 @@ struct inet_frag_queue {
spinlock_t lock; spinlock_t lock;
struct timer_list timer; struct timer_list timer;
struct hlist_node list; struct hlist_node list;
atomic_t refcnt; refcount_t refcnt;
struct sk_buff *fragments; struct sk_buff *fragments;
struct sk_buff *fragments_tail; struct sk_buff *fragments_tail;
ktime_t stamp; ktime_t stamp;
...@@ -129,7 +129,7 @@ void inet_frag_maybe_warn_overflow(struct inet_frag_queue *q, ...@@ -129,7 +129,7 @@ void inet_frag_maybe_warn_overflow(struct inet_frag_queue *q,
static inline void inet_frag_put(struct inet_frag_queue *q, struct inet_frags *f) static inline void inet_frag_put(struct inet_frag_queue *q, struct inet_frags *f)
{ {
if (atomic_dec_and_test(&q->refcnt)) if (refcount_dec_and_test(&q->refcnt))
inet_frag_destroy(q, f); inet_frag_destroy(q, f);
} }
......
...@@ -276,11 +276,11 @@ static inline void fq_unlink(struct inet_frag_queue *fq, struct inet_frags *f) ...@@ -276,11 +276,11 @@ static inline void fq_unlink(struct inet_frag_queue *fq, struct inet_frags *f)
void inet_frag_kill(struct inet_frag_queue *fq, struct inet_frags *f) void inet_frag_kill(struct inet_frag_queue *fq, struct inet_frags *f)
{ {
if (del_timer(&fq->timer)) if (del_timer(&fq->timer))
atomic_dec(&fq->refcnt); refcount_dec(&fq->refcnt);
if (!(fq->flags & INET_FRAG_COMPLETE)) { if (!(fq->flags & INET_FRAG_COMPLETE)) {
fq_unlink(fq, f); fq_unlink(fq, f);
atomic_dec(&fq->refcnt); refcount_dec(&fq->refcnt);
} }
} }
EXPORT_SYMBOL(inet_frag_kill); EXPORT_SYMBOL(inet_frag_kill);
...@@ -329,7 +329,7 @@ static struct inet_frag_queue *inet_frag_intern(struct netns_frags *nf, ...@@ -329,7 +329,7 @@ static struct inet_frag_queue *inet_frag_intern(struct netns_frags *nf,
*/ */
hlist_for_each_entry(qp, &hb->chain, list) { hlist_for_each_entry(qp, &hb->chain, list) {
if (qp->net == nf && f->match(qp, arg)) { if (qp->net == nf && f->match(qp, arg)) {
atomic_inc(&qp->refcnt); refcount_inc(&qp->refcnt);
spin_unlock(&hb->chain_lock); spin_unlock(&hb->chain_lock);
qp_in->flags |= INET_FRAG_COMPLETE; qp_in->flags |= INET_FRAG_COMPLETE;
inet_frag_put(qp_in, f); inet_frag_put(qp_in, f);
...@@ -339,9 +339,9 @@ static struct inet_frag_queue *inet_frag_intern(struct netns_frags *nf, ...@@ -339,9 +339,9 @@ static struct inet_frag_queue *inet_frag_intern(struct netns_frags *nf,
#endif #endif
qp = qp_in; qp = qp_in;
if (!mod_timer(&qp->timer, jiffies + nf->timeout)) if (!mod_timer(&qp->timer, jiffies + nf->timeout))
atomic_inc(&qp->refcnt); refcount_inc(&qp->refcnt);
atomic_inc(&qp->refcnt); refcount_inc(&qp->refcnt);
hlist_add_head(&qp->list, &hb->chain); hlist_add_head(&qp->list, &hb->chain);
spin_unlock(&hb->chain_lock); spin_unlock(&hb->chain_lock);
...@@ -370,7 +370,7 @@ static struct inet_frag_queue *inet_frag_alloc(struct netns_frags *nf, ...@@ -370,7 +370,7 @@ static struct inet_frag_queue *inet_frag_alloc(struct netns_frags *nf,
setup_timer(&q->timer, f->frag_expire, (unsigned long)q); setup_timer(&q->timer, f->frag_expire, (unsigned long)q);
spin_lock_init(&q->lock); spin_lock_init(&q->lock);
atomic_set(&q->refcnt, 1); refcount_set(&q->refcnt, 1);
return q; return q;
} }
...@@ -405,7 +405,7 @@ struct inet_frag_queue *inet_frag_find(struct netns_frags *nf, ...@@ -405,7 +405,7 @@ struct inet_frag_queue *inet_frag_find(struct netns_frags *nf,
spin_lock(&hb->chain_lock); spin_lock(&hb->chain_lock);
hlist_for_each_entry(q, &hb->chain, list) { hlist_for_each_entry(q, &hb->chain, list) {
if (q->net == nf && f->match(q, key)) { if (q->net == nf && f->match(q, key)) {
atomic_inc(&q->refcnt); refcount_inc(&q->refcnt);
spin_unlock(&hb->chain_lock); spin_unlock(&hb->chain_lock);
return q; return q;
} }
......
...@@ -312,7 +312,7 @@ static int ip_frag_reinit(struct ipq *qp) ...@@ -312,7 +312,7 @@ static int ip_frag_reinit(struct ipq *qp)
unsigned int sum_truesize = 0; unsigned int sum_truesize = 0;
if (!mod_timer(&qp->q.timer, jiffies + qp->q.net->timeout)) { if (!mod_timer(&qp->q.timer, jiffies + qp->q.net->timeout)) {
atomic_inc(&qp->q.refcnt); refcount_inc(&qp->q.refcnt);
return -ETIMEDOUT; return -ETIMEDOUT;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment