Commit b26bbdae authored by Paolo Abeni's avatar Paolo Abeni Committed by David S. Miller

udp: move scratch area helpers into the include file

So that they can be later used by the IPv6 code, too.
Also lift the comments a bit.
Signed-off-by: default avatarPaolo Abeni <pabeni@redhat.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent d97af30f
...@@ -302,6 +302,67 @@ struct sock *__udp6_lib_lookup(struct net *net, ...@@ -302,6 +302,67 @@ struct sock *__udp6_lib_lookup(struct net *net,
struct sock *udp6_lib_lookup_skb(struct sk_buff *skb, struct sock *udp6_lib_lookup_skb(struct sk_buff *skb,
__be16 sport, __be16 dport); __be16 sport, __be16 dport);
/* UDP uses skb->dev_scratch to cache as much information as possible and avoid
* possibly multiple cache miss on dequeue()
*/
#if BITS_PER_LONG == 64
/* truesize, len and the bit needed to compute skb_csum_unnecessary will be on
* cold cache lines at recvmsg time.
* skb->len can be stored on 16 bits since the udp header has been already
* validated and pulled.
*/
struct udp_dev_scratch {
u32 truesize;
u16 len;
bool is_linear;
bool csum_unnecessary;
};
static inline unsigned int udp_skb_len(struct sk_buff *skb)
{
return ((struct udp_dev_scratch *)&skb->dev_scratch)->len;
}
static inline bool udp_skb_csum_unnecessary(struct sk_buff *skb)
{
return ((struct udp_dev_scratch *)&skb->dev_scratch)->csum_unnecessary;
}
static inline bool udp_skb_is_linear(struct sk_buff *skb)
{
return ((struct udp_dev_scratch *)&skb->dev_scratch)->is_linear;
}
#else
static inline unsigned int udp_skb_len(struct sk_buff *skb)
{
return skb->len;
}
static inline bool udp_skb_csum_unnecessary(struct sk_buff *skb)
{
return skb_csum_unnecessary(skb);
}
static inline bool udp_skb_is_linear(struct sk_buff *skb)
{
return !skb_is_nonlinear(skb);
}
#endif
static inline int copy_linear_skb(struct sk_buff *skb, int len, int off,
struct iov_iter *to)
{
int n, copy = len - off;
n = copy_to_iter(skb->data + off, copy, to);
if (n == copy)
return 0;
return -EFAULT;
}
/* /*
* SNMP statistics for UDP and UDP-Lite * SNMP statistics for UDP and UDP-Lite
*/ */
......
...@@ -1163,24 +1163,7 @@ int udp_sendpage(struct sock *sk, struct page *page, int offset, ...@@ -1163,24 +1163,7 @@ int udp_sendpage(struct sock *sk, struct page *page, int offset,
return ret; return ret;
} }
/* Copy as much information as possible into skb->dev_scratch to avoid
* possibly multiple cache miss on dequeue();
*/
#if BITS_PER_LONG == 64 #if BITS_PER_LONG == 64
/* we can store multiple info here: truesize, len and the bit needed to
* compute skb_csum_unnecessary will be on cold cache lines at recvmsg
* time.
* skb->len can be stored on 16 bits since the udp header has been already
* validated and pulled.
*/
struct udp_dev_scratch {
u32 truesize;
u16 len;
bool is_linear;
bool csum_unnecessary;
};
static void udp_set_dev_scratch(struct sk_buff *skb) static void udp_set_dev_scratch(struct sk_buff *skb)
{ {
struct udp_dev_scratch *scratch; struct udp_dev_scratch *scratch;
...@@ -1197,22 +1180,6 @@ static int udp_skb_truesize(struct sk_buff *skb) ...@@ -1197,22 +1180,6 @@ static int udp_skb_truesize(struct sk_buff *skb)
{ {
return ((struct udp_dev_scratch *)&skb->dev_scratch)->truesize; return ((struct udp_dev_scratch *)&skb->dev_scratch)->truesize;
} }
static unsigned int udp_skb_len(struct sk_buff *skb)
{
return ((struct udp_dev_scratch *)&skb->dev_scratch)->len;
}
static bool udp_skb_csum_unnecessary(struct sk_buff *skb)
{
return ((struct udp_dev_scratch *)&skb->dev_scratch)->csum_unnecessary;
}
static bool udp_skb_is_linear(struct sk_buff *skb)
{
return ((struct udp_dev_scratch *)&skb->dev_scratch)->is_linear;
}
#else #else
static void udp_set_dev_scratch(struct sk_buff *skb) static void udp_set_dev_scratch(struct sk_buff *skb)
{ {
...@@ -1223,21 +1190,6 @@ static int udp_skb_truesize(struct sk_buff *skb) ...@@ -1223,21 +1190,6 @@ static int udp_skb_truesize(struct sk_buff *skb)
{ {
return skb->dev_scratch; return skb->dev_scratch;
} }
static unsigned int udp_skb_len(struct sk_buff *skb)
{
return skb->len;
}
static bool udp_skb_csum_unnecessary(struct sk_buff *skb)
{
return skb_csum_unnecessary(skb);
}
static bool udp_skb_is_linear(struct sk_buff *skb)
{
return !skb_is_nonlinear(skb);
}
#endif #endif
/* fully reclaim rmem/fwd memory allocated for skb */ /* fully reclaim rmem/fwd memory allocated for skb */
...@@ -1598,18 +1550,6 @@ struct sk_buff *__skb_recv_udp(struct sock *sk, unsigned int flags, ...@@ -1598,18 +1550,6 @@ struct sk_buff *__skb_recv_udp(struct sock *sk, unsigned int flags,
} }
EXPORT_SYMBOL_GPL(__skb_recv_udp); EXPORT_SYMBOL_GPL(__skb_recv_udp);
static int copy_linear_skb(struct sk_buff *skb, int len, int off,
struct iov_iter *to)
{
int n, copy = len - off;
n = copy_to_iter(skb->data + off, copy, to);
if (n == copy)
return 0;
return -EFAULT;
}
/* /*
* This should be easy, if there is something there we * This should be easy, if there is something there we
* return it, otherwise we block. * return it, otherwise we block.
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment