Commit 694aba69 authored by Eric Dumazet's avatar Eric Dumazet Committed by David S. Miller

ipv4: factorize sk_wmem_alloc updates done by __ip_append_data()

While testing my inet defrag changes, I found that the senders
could spend ~20% of cpu cycles in skb_set_owner_w() updating
sk->sk_wmem_alloc for every fragment they cook.

The solution to this problem is to use alloc_skb() instead
of sock_wmalloc() and manually perform a single sk_wmem_alloc change.

Similar change for IPv6 is provided in following patch.
Signed-off-by: default avatarEric Dumazet <edumazet@google.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent c0725502
...@@ -876,6 +876,7 @@ static int __ip_append_data(struct sock *sk, ...@@ -876,6 +876,7 @@ static int __ip_append_data(struct sock *sk,
unsigned int maxfraglen, fragheaderlen, maxnonfragsize; unsigned int maxfraglen, fragheaderlen, maxnonfragsize;
int csummode = CHECKSUM_NONE; int csummode = CHECKSUM_NONE;
struct rtable *rt = (struct rtable *)cork->dst; struct rtable *rt = (struct rtable *)cork->dst;
unsigned int wmem_alloc_delta = 0;
u32 tskey = 0; u32 tskey = 0;
skb = skb_peek_tail(queue); skb = skb_peek_tail(queue);
...@@ -971,11 +972,10 @@ static int __ip_append_data(struct sock *sk, ...@@ -971,11 +972,10 @@ static int __ip_append_data(struct sock *sk,
(flags & MSG_DONTWAIT), &err); (flags & MSG_DONTWAIT), &err);
} else { } else {
skb = NULL; skb = NULL;
if (refcount_read(&sk->sk_wmem_alloc) <= if (refcount_read(&sk->sk_wmem_alloc) + wmem_alloc_delta <=
2 * sk->sk_sndbuf) 2 * sk->sk_sndbuf)
skb = sock_wmalloc(sk, skb = alloc_skb(alloclen + hh_len + 15,
alloclen + hh_len + 15, 1, sk->sk_allocation);
sk->sk_allocation);
if (unlikely(!skb)) if (unlikely(!skb))
err = -ENOBUFS; err = -ENOBUFS;
} }
...@@ -1033,6 +1033,11 @@ static int __ip_append_data(struct sock *sk, ...@@ -1033,6 +1033,11 @@ static int __ip_append_data(struct sock *sk,
/* /*
* Put the packet on the pending queue. * Put the packet on the pending queue.
*/ */
if (!skb->destructor) {
skb->destructor = sock_wfree;
skb->sk = sk;
wmem_alloc_delta += skb->truesize;
}
__skb_queue_tail(queue, skb); __skb_queue_tail(queue, skb);
continue; continue;
} }
...@@ -1079,12 +1084,13 @@ static int __ip_append_data(struct sock *sk, ...@@ -1079,12 +1084,13 @@ static int __ip_append_data(struct sock *sk,
skb->len += copy; skb->len += copy;
skb->data_len += copy; skb->data_len += copy;
skb->truesize += copy; skb->truesize += copy;
refcount_add(copy, &sk->sk_wmem_alloc); wmem_alloc_delta += copy;
} }
offset += copy; offset += copy;
length -= copy; length -= copy;
} }
refcount_add(wmem_alloc_delta, &sk->sk_wmem_alloc);
return 0; return 0;
error_efault: error_efault:
...@@ -1092,6 +1098,7 @@ static int __ip_append_data(struct sock *sk, ...@@ -1092,6 +1098,7 @@ static int __ip_append_data(struct sock *sk,
error: error:
cork->length -= length; cork->length -= length;
IP_INC_STATS(sock_net(sk), IPSTATS_MIB_OUTDISCARDS); IP_INC_STATS(sock_net(sk), IPSTATS_MIB_OUTDISCARDS);
refcount_add(wmem_alloc_delta, &sk->sk_wmem_alloc);
return err; return err;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment