Commit fff32699 authored by Eric Dumazet's avatar Eric Dumazet Committed by David S. Miller

tcp: reflect SYN queue_mapping into SYNACK packets

While testing how linux behaves on SYNFLOOD attack on multiqueue device
(ixgbe), I found that SYNACK messages were dropped at Qdisc level
because we send them all on a single queue.

Obvious choice is to reflect incoming SYN packet @queue_mapping to
SYNACK packet.

Under stress, my machine could only send 25.000 SYNACK per second (for
200.000 incoming SYN per second). NIC : ixgbe with 16 rx/tx queues.

After patch, not a single SYNACK is dropped.
Signed-off-by: default avatarEric Dumazet <edumazet@google.com>
Cc: Hans Schillstrom <hans.schillstrom@ericsson.com>
Cc: Jesper Dangaard Brouer <brouer@redhat.com>
Cc: Neal Cardwell <ncardwell@google.com>
Cc: Tom Herbert <therbert@google.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent 7433819a
...@@ -824,7 +824,8 @@ static void tcp_v4_reqsk_send_ack(struct sock *sk, struct sk_buff *skb, ...@@ -824,7 +824,8 @@ static void tcp_v4_reqsk_send_ack(struct sock *sk, struct sk_buff *skb,
*/ */
static int tcp_v4_send_synack(struct sock *sk, struct dst_entry *dst, static int tcp_v4_send_synack(struct sock *sk, struct dst_entry *dst,
struct request_sock *req, struct request_sock *req,
struct request_values *rvp) struct request_values *rvp,
u16 queue_mapping)
{ {
const struct inet_request_sock *ireq = inet_rsk(req); const struct inet_request_sock *ireq = inet_rsk(req);
struct flowi4 fl4; struct flowi4 fl4;
...@@ -840,6 +841,7 @@ static int tcp_v4_send_synack(struct sock *sk, struct dst_entry *dst, ...@@ -840,6 +841,7 @@ static int tcp_v4_send_synack(struct sock *sk, struct dst_entry *dst,
if (skb) { if (skb) {
__tcp_v4_send_check(skb, ireq->loc_addr, ireq->rmt_addr); __tcp_v4_send_check(skb, ireq->loc_addr, ireq->rmt_addr);
skb_set_queue_mapping(skb, queue_mapping);
err = ip_build_and_send_pkt(skb, sk, ireq->loc_addr, err = ip_build_and_send_pkt(skb, sk, ireq->loc_addr,
ireq->rmt_addr, ireq->rmt_addr,
ireq->opt); ireq->opt);
...@@ -854,7 +856,7 @@ static int tcp_v4_rtx_synack(struct sock *sk, struct request_sock *req, ...@@ -854,7 +856,7 @@ static int tcp_v4_rtx_synack(struct sock *sk, struct request_sock *req,
struct request_values *rvp) struct request_values *rvp)
{ {
TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_RETRANSSEGS); TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_RETRANSSEGS);
return tcp_v4_send_synack(sk, NULL, req, rvp); return tcp_v4_send_synack(sk, NULL, req, rvp, 0);
} }
/* /*
...@@ -1422,7 +1424,8 @@ int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb) ...@@ -1422,7 +1424,8 @@ int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb)
tcp_rsk(req)->snt_synack = tcp_time_stamp; tcp_rsk(req)->snt_synack = tcp_time_stamp;
if (tcp_v4_send_synack(sk, dst, req, if (tcp_v4_send_synack(sk, dst, req,
(struct request_values *)&tmp_ext) || (struct request_values *)&tmp_ext,
skb_get_queue_mapping(skb)) ||
want_cookie) want_cookie)
goto drop_and_free; goto drop_and_free;
......
...@@ -476,7 +476,8 @@ static void tcp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt, ...@@ -476,7 +476,8 @@ static void tcp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
static int tcp_v6_send_synack(struct sock *sk, struct request_sock *req, static int tcp_v6_send_synack(struct sock *sk, struct request_sock *req,
struct request_values *rvp) struct request_values *rvp,
u16 queue_mapping)
{ {
struct inet6_request_sock *treq = inet6_rsk(req); struct inet6_request_sock *treq = inet6_rsk(req);
struct ipv6_pinfo *np = inet6_sk(sk); struct ipv6_pinfo *np = inet6_sk(sk);
...@@ -513,6 +514,7 @@ static int tcp_v6_send_synack(struct sock *sk, struct request_sock *req, ...@@ -513,6 +514,7 @@ static int tcp_v6_send_synack(struct sock *sk, struct request_sock *req,
__tcp_v6_send_check(skb, &treq->loc_addr, &treq->rmt_addr); __tcp_v6_send_check(skb, &treq->loc_addr, &treq->rmt_addr);
fl6.daddr = treq->rmt_addr; fl6.daddr = treq->rmt_addr;
skb_set_queue_mapping(skb, queue_mapping);
err = ip6_xmit(sk, skb, &fl6, opt, np->tclass); err = ip6_xmit(sk, skb, &fl6, opt, np->tclass);
err = net_xmit_eval(err); err = net_xmit_eval(err);
} }
...@@ -528,7 +530,7 @@ static int tcp_v6_rtx_synack(struct sock *sk, struct request_sock *req, ...@@ -528,7 +530,7 @@ static int tcp_v6_rtx_synack(struct sock *sk, struct request_sock *req,
struct request_values *rvp) struct request_values *rvp)
{ {
TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_RETRANSSEGS); TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_RETRANSSEGS);
return tcp_v6_send_synack(sk, req, rvp); return tcp_v6_send_synack(sk, req, rvp, 0);
} }
static void tcp_v6_reqsk_destructor(struct request_sock *req) static void tcp_v6_reqsk_destructor(struct request_sock *req)
...@@ -1213,7 +1215,8 @@ static int tcp_v6_conn_request(struct sock *sk, struct sk_buff *skb) ...@@ -1213,7 +1215,8 @@ static int tcp_v6_conn_request(struct sock *sk, struct sk_buff *skb)
security_inet_conn_request(sk, skb, req); security_inet_conn_request(sk, skb, req);
if (tcp_v6_send_synack(sk, req, if (tcp_v6_send_synack(sk, req,
(struct request_values *)&tmp_ext) || (struct request_values *)&tmp_ext,
skb_get_queue_mapping(skb)) ||
want_cookie) want_cookie)
goto drop_and_free; goto drop_and_free;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment