Commit b8a23e8d authored by Eric Dumazet's avatar Eric Dumazet Committed by David S. Miller

caif: fix leaks and race in caif_queue_rcv_skb()

1) If sk_filter() is applied, skb was leaked (not freed)
2) Testing SOCK_DEAD twice is racy :
   packet could be freed while already queued.
3) Remove obsolete comment about caching skb->len
Signed-off-by: default avatarEric Dumazet <edumazet@google.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent e3426ca7
...@@ -121,12 +121,13 @@ static void caif_flow_ctrl(struct sock *sk, int mode) ...@@ -121,12 +121,13 @@ static void caif_flow_ctrl(struct sock *sk, int mode)
* Copied from sock.c:sock_queue_rcv_skb(), but changed so packets are * Copied from sock.c:sock_queue_rcv_skb(), but changed so packets are
* not dropped, but CAIF is sending flow off instead. * not dropped, but CAIF is sending flow off instead.
*/ */
static int caif_queue_rcv_skb(struct sock *sk, struct sk_buff *skb) static void caif_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
{ {
int err; int err;
unsigned long flags; unsigned long flags;
struct sk_buff_head *list = &sk->sk_receive_queue; struct sk_buff_head *list = &sk->sk_receive_queue;
struct caifsock *cf_sk = container_of(sk, struct caifsock, sk); struct caifsock *cf_sk = container_of(sk, struct caifsock, sk);
bool queued = false;
if (atomic_read(&sk->sk_rmem_alloc) + skb->truesize >= if (atomic_read(&sk->sk_rmem_alloc) + skb->truesize >=
(unsigned int)sk->sk_rcvbuf && rx_flow_is_on(cf_sk)) { (unsigned int)sk->sk_rcvbuf && rx_flow_is_on(cf_sk)) {
...@@ -139,7 +140,8 @@ static int caif_queue_rcv_skb(struct sock *sk, struct sk_buff *skb) ...@@ -139,7 +140,8 @@ static int caif_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
err = sk_filter(sk, skb); err = sk_filter(sk, skb);
if (err) if (err)
return err; goto out;
if (!sk_rmem_schedule(sk, skb, skb->truesize) && rx_flow_is_on(cf_sk)) { if (!sk_rmem_schedule(sk, skb, skb->truesize) && rx_flow_is_on(cf_sk)) {
set_rx_flow_off(cf_sk); set_rx_flow_off(cf_sk);
net_dbg_ratelimited("sending flow OFF due to rmem_schedule\n"); net_dbg_ratelimited("sending flow OFF due to rmem_schedule\n");
...@@ -147,21 +149,16 @@ static int caif_queue_rcv_skb(struct sock *sk, struct sk_buff *skb) ...@@ -147,21 +149,16 @@ static int caif_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
} }
skb->dev = NULL; skb->dev = NULL;
skb_set_owner_r(skb, sk); skb_set_owner_r(skb, sk);
/* Cache the SKB length before we tack it onto the receive
* queue. Once it is added it no longer belongs to us and
* may be freed by other threads of control pulling packets
* from the queue.
*/
spin_lock_irqsave(&list->lock, flags); spin_lock_irqsave(&list->lock, flags);
if (!sock_flag(sk, SOCK_DEAD)) queued = !sock_flag(sk, SOCK_DEAD);
if (queued)
__skb_queue_tail(list, skb); __skb_queue_tail(list, skb);
spin_unlock_irqrestore(&list->lock, flags); spin_unlock_irqrestore(&list->lock, flags);
out:
if (!sock_flag(sk, SOCK_DEAD)) if (queued)
sk->sk_data_ready(sk); sk->sk_data_ready(sk);
else else
kfree_skb(skb); kfree_skb(skb);
return 0;
} }
/* Packet Receive Callback function called from CAIF Stack */ /* Packet Receive Callback function called from CAIF Stack */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment