Commit 971f10ec authored by Eric Dumazet's avatar Eric Dumazet Committed by David S. Miller

tcp: better TCP_SKB_CB layout to reduce cache line misses

TCP maintains lists of skb in write queue, and in receive queues
(in order and out of order queues)

Scanning these lists both in input and output path usually requires
access to skb->next, TCP_SKB_CB(skb)->seq, and TCP_SKB_CB(skb)->end_seq

These fields are currently in two different cache lines, meaning we
waste lot of memory bandwidth when these queues are big and flows
have either packet drops or packet reorders.

We can move TCP_SKB_CB(skb)->header at the end of TCP_SKB_CB, because
this header is not used in fast path. This allows TCP to search much faster
in the skb lists.

Even with regular flows, we save one cache line miss in fast path.

Thanks to Christoph Paasch for noticing we need to cleanup
skb->cb[] (IPCB/IP6CB) before entering IP stack in tx path,
and that I forgot IPCB use in tcp_v4_hnd_req() and tcp_v4_save_options().
Signed-off-by: default avatarEric Dumazet <edumazet@google.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent a224772d
...@@ -696,12 +696,6 @@ static inline u32 tcp_skb_timestamp(const struct sk_buff *skb) ...@@ -696,12 +696,6 @@ static inline u32 tcp_skb_timestamp(const struct sk_buff *skb)
* If this grows please adjust skbuff.h:skbuff->cb[xxx] size appropriately. * If this grows please adjust skbuff.h:skbuff->cb[xxx] size appropriately.
*/ */
struct tcp_skb_cb { struct tcp_skb_cb {
union {
struct inet_skb_parm h4;
#if IS_ENABLED(CONFIG_IPV6)
struct inet6_skb_parm h6;
#endif
} header; /* For incoming frames */
__u32 seq; /* Starting sequence number */ __u32 seq; /* Starting sequence number */
__u32 end_seq; /* SEQ + FIN + SYN + datalen */ __u32 end_seq; /* SEQ + FIN + SYN + datalen */
__u32 tcp_tw_isn; /* isn chosen by tcp_timewait_state_process() */ __u32 tcp_tw_isn; /* isn chosen by tcp_timewait_state_process() */
...@@ -720,6 +714,12 @@ struct tcp_skb_cb { ...@@ -720,6 +714,12 @@ struct tcp_skb_cb {
__u8 ip_dsfield; /* IPv4 tos or IPv6 dsfield */ __u8 ip_dsfield; /* IPv4 tos or IPv6 dsfield */
/* 1 byte hole */ /* 1 byte hole */
__u32 ack_seq; /* Sequence number ACK'd */ __u32 ack_seq; /* Sequence number ACK'd */
union {
struct inet_skb_parm h4;
#if IS_ENABLED(CONFIG_IPV6)
struct inet6_skb_parm h6;
#endif
} header; /* For incoming frames */
}; };
#define TCP_SKB_CB(__skb) ((struct tcp_skb_cb *)&((__skb)->cb[0])) #define TCP_SKB_CB(__skb) ((struct tcp_skb_cb *)&((__skb)->cb[0]))
......
...@@ -886,20 +886,18 @@ EXPORT_SYMBOL(tcp_syn_flood_action); ...@@ -886,20 +886,18 @@ EXPORT_SYMBOL(tcp_syn_flood_action);
*/ */
static struct ip_options_rcu *tcp_v4_save_options(struct sk_buff *skb) static struct ip_options_rcu *tcp_v4_save_options(struct sk_buff *skb)
{ {
const struct ip_options *opt = &(IPCB(skb)->opt); const struct ip_options *opt = &TCP_SKB_CB(skb)->header.h4.opt;
struct ip_options_rcu *dopt = NULL; struct ip_options_rcu *dopt = NULL;
if (opt && opt->optlen) { if (opt && opt->optlen) {
int opt_size = sizeof(*dopt) + opt->optlen; int opt_size = sizeof(*dopt) + opt->optlen;
dopt = kmalloc(opt_size, GFP_ATOMIC); dopt = kmalloc(opt_size, GFP_ATOMIC);
if (dopt) { if (dopt && __ip_options_echo(&dopt->opt, skb, opt)) {
if (ip_options_echo(&dopt->opt, skb)) {
kfree(dopt); kfree(dopt);
dopt = NULL; dopt = NULL;
} }
} }
}
return dopt; return dopt;
} }
...@@ -1431,7 +1429,7 @@ static struct sock *tcp_v4_hnd_req(struct sock *sk, struct sk_buff *skb) ...@@ -1431,7 +1429,7 @@ static struct sock *tcp_v4_hnd_req(struct sock *sk, struct sk_buff *skb)
#ifdef CONFIG_SYN_COOKIES #ifdef CONFIG_SYN_COOKIES
if (!th->syn) if (!th->syn)
sk = cookie_v4_check(sk, skb, &(IPCB(skb)->opt)); sk = cookie_v4_check(sk, skb, &TCP_SKB_CB(skb)->header.h4.opt);
#endif #endif
return sk; return sk;
} }
...@@ -1636,6 +1634,13 @@ int tcp_v4_rcv(struct sk_buff *skb) ...@@ -1636,6 +1634,13 @@ int tcp_v4_rcv(struct sk_buff *skb)
th = tcp_hdr(skb); th = tcp_hdr(skb);
iph = ip_hdr(skb); iph = ip_hdr(skb);
/* This is tricky : We move IPCB at its correct location into TCP_SKB_CB()
* barrier() makes sure compiler wont play fool^Waliasing games.
*/
memmove(&TCP_SKB_CB(skb)->header.h4, IPCB(skb),
sizeof(struct inet_skb_parm));
barrier();
TCP_SKB_CB(skb)->seq = ntohl(th->seq); TCP_SKB_CB(skb)->seq = ntohl(th->seq);
TCP_SKB_CB(skb)->end_seq = (TCP_SKB_CB(skb)->seq + th->syn + th->fin + TCP_SKB_CB(skb)->end_seq = (TCP_SKB_CB(skb)->seq + th->syn + th->fin +
skb->len - th->doff * 4); skb->len - th->doff * 4);
......
...@@ -974,6 +974,11 @@ static int tcp_transmit_skb(struct sock *sk, struct sk_buff *skb, int clone_it, ...@@ -974,6 +974,11 @@ static int tcp_transmit_skb(struct sock *sk, struct sk_buff *skb, int clone_it,
/* Our usage of tstamp should remain private */ /* Our usage of tstamp should remain private */
skb->tstamp.tv64 = 0; skb->tstamp.tv64 = 0;
/* Cleanup our debris for IP stacks */
memset(skb->cb, 0, max(sizeof(struct inet_skb_parm),
sizeof(struct inet6_skb_parm)));
err = icsk->icsk_af_ops->queue_xmit(sk, skb, &inet->cork.fl); err = icsk->icsk_af_ops->queue_xmit(sk, skb, &inet->cork.fl);
if (likely(err <= 0)) if (likely(err <= 0))
......
...@@ -1412,6 +1412,13 @@ static int tcp_v6_rcv(struct sk_buff *skb) ...@@ -1412,6 +1412,13 @@ static int tcp_v6_rcv(struct sk_buff *skb)
th = tcp_hdr(skb); th = tcp_hdr(skb);
hdr = ipv6_hdr(skb); hdr = ipv6_hdr(skb);
/* This is tricky : We move IPCB at its correct location into TCP_SKB_CB()
* barrier() makes sure compiler wont play fool^Waliasing games.
*/
memmove(&TCP_SKB_CB(skb)->header.h6, IP6CB(skb),
sizeof(struct inet6_skb_parm));
barrier();
TCP_SKB_CB(skb)->seq = ntohl(th->seq); TCP_SKB_CB(skb)->seq = ntohl(th->seq);
TCP_SKB_CB(skb)->end_seq = (TCP_SKB_CB(skb)->seq + th->syn + th->fin + TCP_SKB_CB(skb)->end_seq = (TCP_SKB_CB(skb)->seq + th->syn + th->fin +
skb->len - th->doff*4); skb->len - th->doff*4);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment