Commit ed9208be authored by David S. Miller's avatar David S. Miller

Merge branch 'udp-ipv6-use-scratch-helpers'

Paolo Abeni says:

====================
ipv6: udp: exploit dev_scratch helpers

When bringing in the recent cache optimization for the UDP protocol, I forgot
to leverage the newly introduced scratched area helpers in the UDPv6 code path.
As a result, the UDPv6 implementation suffers some unnecessary performance
penality when compared to v4.

This series aim to bring back UDPv6 on equal footing in respect to v4.
The first patch moves the shared helpers to the common include files, while
the second uses them in the UDPv6 code.

This gives 5-8% performance improvement for a system under flood with small
UDPv6 packets. The performance delta is less than the one reported on the
original patch set because the UDPv6 code path already leveraged some of the
optimization.
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents d97af30f 67a51780
...@@ -302,6 +302,67 @@ struct sock *__udp6_lib_lookup(struct net *net, ...@@ -302,6 +302,67 @@ struct sock *__udp6_lib_lookup(struct net *net,
struct sock *udp6_lib_lookup_skb(struct sk_buff *skb, struct sock *udp6_lib_lookup_skb(struct sk_buff *skb,
__be16 sport, __be16 dport); __be16 sport, __be16 dport);
/* UDP uses skb->dev_scratch to cache as much information as possible and avoid
* possibly multiple cache miss on dequeue()
*/
#if BITS_PER_LONG == 64
/* truesize, len and the bit needed to compute skb_csum_unnecessary will be on
* cold cache lines at recvmsg time.
* skb->len can be stored on 16 bits since the udp header has been already
* validated and pulled.
*/
struct udp_dev_scratch {
u32 truesize;
u16 len;
bool is_linear;
bool csum_unnecessary;
};
static inline unsigned int udp_skb_len(struct sk_buff *skb)
{
return ((struct udp_dev_scratch *)&skb->dev_scratch)->len;
}
static inline bool udp_skb_csum_unnecessary(struct sk_buff *skb)
{
return ((struct udp_dev_scratch *)&skb->dev_scratch)->csum_unnecessary;
}
static inline bool udp_skb_is_linear(struct sk_buff *skb)
{
return ((struct udp_dev_scratch *)&skb->dev_scratch)->is_linear;
}
#else
static inline unsigned int udp_skb_len(struct sk_buff *skb)
{
return skb->len;
}
static inline bool udp_skb_csum_unnecessary(struct sk_buff *skb)
{
return skb_csum_unnecessary(skb);
}
static inline bool udp_skb_is_linear(struct sk_buff *skb)
{
return !skb_is_nonlinear(skb);
}
#endif
static inline int copy_linear_skb(struct sk_buff *skb, int len, int off,
struct iov_iter *to)
{
int n, copy = len - off;
n = copy_to_iter(skb->data + off, copy, to);
if (n == copy)
return 0;
return -EFAULT;
}
/* /*
* SNMP statistics for UDP and UDP-Lite * SNMP statistics for UDP and UDP-Lite
*/ */
......
...@@ -1163,24 +1163,7 @@ int udp_sendpage(struct sock *sk, struct page *page, int offset, ...@@ -1163,24 +1163,7 @@ int udp_sendpage(struct sock *sk, struct page *page, int offset,
return ret; return ret;
} }
/* Copy as much information as possible into skb->dev_scratch to avoid
* possibly multiple cache miss on dequeue();
*/
#if BITS_PER_LONG == 64 #if BITS_PER_LONG == 64
/* we can store multiple info here: truesize, len and the bit needed to
* compute skb_csum_unnecessary will be on cold cache lines at recvmsg
* time.
* skb->len can be stored on 16 bits since the udp header has been already
* validated and pulled.
*/
struct udp_dev_scratch {
u32 truesize;
u16 len;
bool is_linear;
bool csum_unnecessary;
};
static void udp_set_dev_scratch(struct sk_buff *skb) static void udp_set_dev_scratch(struct sk_buff *skb)
{ {
struct udp_dev_scratch *scratch; struct udp_dev_scratch *scratch;
...@@ -1197,22 +1180,6 @@ static int udp_skb_truesize(struct sk_buff *skb) ...@@ -1197,22 +1180,6 @@ static int udp_skb_truesize(struct sk_buff *skb)
{ {
return ((struct udp_dev_scratch *)&skb->dev_scratch)->truesize; return ((struct udp_dev_scratch *)&skb->dev_scratch)->truesize;
} }
static unsigned int udp_skb_len(struct sk_buff *skb)
{
return ((struct udp_dev_scratch *)&skb->dev_scratch)->len;
}
static bool udp_skb_csum_unnecessary(struct sk_buff *skb)
{
return ((struct udp_dev_scratch *)&skb->dev_scratch)->csum_unnecessary;
}
static bool udp_skb_is_linear(struct sk_buff *skb)
{
return ((struct udp_dev_scratch *)&skb->dev_scratch)->is_linear;
}
#else #else
static void udp_set_dev_scratch(struct sk_buff *skb) static void udp_set_dev_scratch(struct sk_buff *skb)
{ {
...@@ -1223,21 +1190,6 @@ static int udp_skb_truesize(struct sk_buff *skb) ...@@ -1223,21 +1190,6 @@ static int udp_skb_truesize(struct sk_buff *skb)
{ {
return skb->dev_scratch; return skb->dev_scratch;
} }
static unsigned int udp_skb_len(struct sk_buff *skb)
{
return skb->len;
}
static bool udp_skb_csum_unnecessary(struct sk_buff *skb)
{
return skb_csum_unnecessary(skb);
}
static bool udp_skb_is_linear(struct sk_buff *skb)
{
return !skb_is_nonlinear(skb);
}
#endif #endif
/* fully reclaim rmem/fwd memory allocated for skb */ /* fully reclaim rmem/fwd memory allocated for skb */
...@@ -1598,18 +1550,6 @@ struct sk_buff *__skb_recv_udp(struct sock *sk, unsigned int flags, ...@@ -1598,18 +1550,6 @@ struct sk_buff *__skb_recv_udp(struct sock *sk, unsigned int flags,
} }
EXPORT_SYMBOL_GPL(__skb_recv_udp); EXPORT_SYMBOL_GPL(__skb_recv_udp);
static int copy_linear_skb(struct sk_buff *skb, int len, int off,
struct iov_iter *to)
{
int n, copy = len - off;
n = copy_to_iter(skb->data + off, copy, to);
if (n == copy)
return 0;
return -EFAULT;
}
/* /*
* This should be easy, if there is something there we * This should be easy, if there is something there we
* return it, otherwise we block. * return it, otherwise we block.
......
...@@ -362,7 +362,7 @@ int udpv6_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, ...@@ -362,7 +362,7 @@ int udpv6_recvmsg(struct sock *sk, struct msghdr *msg, size_t len,
if (!skb) if (!skb)
return err; return err;
ulen = skb->len; ulen = udp_skb_len(skb);
copied = len; copied = len;
if (copied > ulen - off) if (copied > ulen - off)
copied = ulen - off; copied = ulen - off;
...@@ -379,14 +379,18 @@ int udpv6_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, ...@@ -379,14 +379,18 @@ int udpv6_recvmsg(struct sock *sk, struct msghdr *msg, size_t len,
if (copied < ulen || peeking || if (copied < ulen || peeking ||
(is_udplite && UDP_SKB_CB(skb)->partial_cov)) { (is_udplite && UDP_SKB_CB(skb)->partial_cov)) {
checksum_valid = !udp_lib_checksum_complete(skb); checksum_valid = udp_skb_csum_unnecessary(skb) ||
!__udp_lib_checksum_complete(skb);
if (!checksum_valid) if (!checksum_valid)
goto csum_copy_err; goto csum_copy_err;
} }
if (checksum_valid || skb_csum_unnecessary(skb)) if (checksum_valid || udp_skb_csum_unnecessary(skb)) {
err = skb_copy_datagram_msg(skb, off, msg, copied); if (udp_skb_is_linear(skb))
else { err = copy_linear_skb(skb, copied, off, &msg->msg_iter);
else
err = skb_copy_datagram_msg(skb, off, msg, copied);
} else {
err = skb_copy_and_csum_datagram_msg(skb, off, msg); err = skb_copy_and_csum_datagram_msg(skb, off, msg);
if (err == -EINVAL) if (err == -EINVAL)
goto csum_copy_err; goto csum_copy_err;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment