Commit cc9b17ad authored by Jason Wang's avatar Jason Wang Committed by David S. Miller

net: sock: validate data_len before allocating skb in sock_alloc_send_pskb()

We need to validate the number of pages consumed by data_len, otherwise frags
array could be overflowed by userspace. So this patch validate data_len and
return -EMSGSIZE when data_len may occupies more frags than MAX_SKB_FRAGS.
Signed-off-by: default avatarJason Wang <jasowang@redhat.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent 914bec10
...@@ -1592,6 +1592,11 @@ struct sk_buff *sock_alloc_send_pskb(struct sock *sk, unsigned long header_len, ...@@ -1592,6 +1592,11 @@ struct sk_buff *sock_alloc_send_pskb(struct sock *sk, unsigned long header_len,
gfp_t gfp_mask; gfp_t gfp_mask;
long timeo; long timeo;
int err; int err;
int npages = (data_len + (PAGE_SIZE - 1)) >> PAGE_SHIFT;
err = -EMSGSIZE;
if (npages > MAX_SKB_FRAGS)
goto failure;
gfp_mask = sk->sk_allocation; gfp_mask = sk->sk_allocation;
if (gfp_mask & __GFP_WAIT) if (gfp_mask & __GFP_WAIT)
...@@ -1610,14 +1615,12 @@ struct sk_buff *sock_alloc_send_pskb(struct sock *sk, unsigned long header_len, ...@@ -1610,14 +1615,12 @@ struct sk_buff *sock_alloc_send_pskb(struct sock *sk, unsigned long header_len,
if (atomic_read(&sk->sk_wmem_alloc) < sk->sk_sndbuf) { if (atomic_read(&sk->sk_wmem_alloc) < sk->sk_sndbuf) {
skb = alloc_skb(header_len, gfp_mask); skb = alloc_skb(header_len, gfp_mask);
if (skb) { if (skb) {
int npages;
int i; int i;
/* No pages, we're done... */ /* No pages, we're done... */
if (!data_len) if (!data_len)
break; break;
npages = (data_len + (PAGE_SIZE - 1)) >> PAGE_SHIFT;
skb->truesize += data_len; skb->truesize += data_len;
skb_shinfo(skb)->nr_frags = npages; skb_shinfo(skb)->nr_frags = npages;
for (i = 0; i < npages; i++) { for (i = 0; i < npages; i++) {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment