Commit c10d9310 authored by Eric Dumazet's avatar Eric Dumazet Committed by David S. Miller

tcp: do not assume TCP code is non preemptible

We want to to make TCP stack preemptible, as draining prequeue
and backlog queues can take lot of time.

Many SNMP updates were assuming that BH (and preemption) was disabled.

Need to convert some __NET_INC_STATS() calls to NET_INC_STATS()
and some __TCP_INC_STATS() to TCP_INC_STATS()

Before using this_cpu_ptr(net->ipv4.tcp_sk) in tcp_v4_send_reset()
and tcp_v4_send_ack(), we add an explicit preempt disabled section.
Signed-off-by: default avatarEric Dumazet <edumazet@google.com>
Acked-by: default avatarSoheil Hassas Yeganeh <soheil@google.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent 5e59c83f
...@@ -3095,7 +3095,7 @@ void tcp_done(struct sock *sk) ...@@ -3095,7 +3095,7 @@ void tcp_done(struct sock *sk)
struct request_sock *req = tcp_sk(sk)->fastopen_rsk; struct request_sock *req = tcp_sk(sk)->fastopen_rsk;
if (sk->sk_state == TCP_SYN_SENT || sk->sk_state == TCP_SYN_RECV) if (sk->sk_state == TCP_SYN_SENT || sk->sk_state == TCP_SYN_RECV)
__TCP_INC_STATS(sock_net(sk), TCP_MIB_ATTEMPTFAILS); TCP_INC_STATS(sock_net(sk), TCP_MIB_ATTEMPTFAILS);
tcp_set_state(sk, TCP_CLOSE); tcp_set_state(sk, TCP_CLOSE);
tcp_clear_xmit_timers(sk); tcp_clear_xmit_timers(sk);
......
...@@ -155,11 +155,11 @@ static void tcp_cdg_hystart_update(struct sock *sk) ...@@ -155,11 +155,11 @@ static void tcp_cdg_hystart_update(struct sock *sk)
ca->last_ack = now_us; ca->last_ack = now_us;
if (after(now_us, ca->round_start + base_owd)) { if (after(now_us, ca->round_start + base_owd)) {
__NET_INC_STATS(sock_net(sk), NET_INC_STATS(sock_net(sk),
LINUX_MIB_TCPHYSTARTTRAINDETECT); LINUX_MIB_TCPHYSTARTTRAINDETECT);
__NET_ADD_STATS(sock_net(sk), NET_ADD_STATS(sock_net(sk),
LINUX_MIB_TCPHYSTARTTRAINCWND, LINUX_MIB_TCPHYSTARTTRAINCWND,
tp->snd_cwnd); tp->snd_cwnd);
tp->snd_ssthresh = tp->snd_cwnd; tp->snd_ssthresh = tp->snd_cwnd;
return; return;
} }
...@@ -174,11 +174,11 @@ static void tcp_cdg_hystart_update(struct sock *sk) ...@@ -174,11 +174,11 @@ static void tcp_cdg_hystart_update(struct sock *sk)
125U); 125U);
if (ca->rtt.min > thresh) { if (ca->rtt.min > thresh) {
__NET_INC_STATS(sock_net(sk), NET_INC_STATS(sock_net(sk),
LINUX_MIB_TCPHYSTARTDELAYDETECT); LINUX_MIB_TCPHYSTARTDELAYDETECT);
__NET_ADD_STATS(sock_net(sk), NET_ADD_STATS(sock_net(sk),
LINUX_MIB_TCPHYSTARTDELAYCWND, LINUX_MIB_TCPHYSTARTDELAYCWND,
tp->snd_cwnd); tp->snd_cwnd);
tp->snd_ssthresh = tp->snd_cwnd; tp->snd_ssthresh = tp->snd_cwnd;
} }
} }
......
...@@ -402,11 +402,11 @@ static void hystart_update(struct sock *sk, u32 delay) ...@@ -402,11 +402,11 @@ static void hystart_update(struct sock *sk, u32 delay)
ca->last_ack = now; ca->last_ack = now;
if ((s32)(now - ca->round_start) > ca->delay_min >> 4) { if ((s32)(now - ca->round_start) > ca->delay_min >> 4) {
ca->found |= HYSTART_ACK_TRAIN; ca->found |= HYSTART_ACK_TRAIN;
__NET_INC_STATS(sock_net(sk), NET_INC_STATS(sock_net(sk),
LINUX_MIB_TCPHYSTARTTRAINDETECT); LINUX_MIB_TCPHYSTARTTRAINDETECT);
__NET_ADD_STATS(sock_net(sk), NET_ADD_STATS(sock_net(sk),
LINUX_MIB_TCPHYSTARTTRAINCWND, LINUX_MIB_TCPHYSTARTTRAINCWND,
tp->snd_cwnd); tp->snd_cwnd);
tp->snd_ssthresh = tp->snd_cwnd; tp->snd_ssthresh = tp->snd_cwnd;
} }
} }
...@@ -423,11 +423,11 @@ static void hystart_update(struct sock *sk, u32 delay) ...@@ -423,11 +423,11 @@ static void hystart_update(struct sock *sk, u32 delay)
if (ca->curr_rtt > ca->delay_min + if (ca->curr_rtt > ca->delay_min +
HYSTART_DELAY_THRESH(ca->delay_min >> 3)) { HYSTART_DELAY_THRESH(ca->delay_min >> 3)) {
ca->found |= HYSTART_DELAY; ca->found |= HYSTART_DELAY;
__NET_INC_STATS(sock_net(sk), NET_INC_STATS(sock_net(sk),
LINUX_MIB_TCPHYSTARTDELAYDETECT); LINUX_MIB_TCPHYSTARTDELAYDETECT);
__NET_ADD_STATS(sock_net(sk), NET_ADD_STATS(sock_net(sk),
LINUX_MIB_TCPHYSTARTDELAYCWND, LINUX_MIB_TCPHYSTARTDELAYCWND,
tp->snd_cwnd); tp->snd_cwnd);
tp->snd_ssthresh = tp->snd_cwnd; tp->snd_ssthresh = tp->snd_cwnd;
} }
} }
......
...@@ -255,9 +255,9 @@ static bool tcp_fastopen_queue_check(struct sock *sk) ...@@ -255,9 +255,9 @@ static bool tcp_fastopen_queue_check(struct sock *sk)
spin_lock(&fastopenq->lock); spin_lock(&fastopenq->lock);
req1 = fastopenq->rskq_rst_head; req1 = fastopenq->rskq_rst_head;
if (!req1 || time_after(req1->rsk_timer.expires, jiffies)) { if (!req1 || time_after(req1->rsk_timer.expires, jiffies)) {
spin_unlock(&fastopenq->lock);
__NET_INC_STATS(sock_net(sk), __NET_INC_STATS(sock_net(sk),
LINUX_MIB_TCPFASTOPENLISTENOVERFLOW); LINUX_MIB_TCPFASTOPENLISTENOVERFLOW);
spin_unlock(&fastopenq->lock);
return false; return false;
} }
fastopenq->rskq_rst_head = req1->dl_next; fastopenq->rskq_rst_head = req1->dl_next;
...@@ -282,7 +282,7 @@ struct sock *tcp_try_fastopen(struct sock *sk, struct sk_buff *skb, ...@@ -282,7 +282,7 @@ struct sock *tcp_try_fastopen(struct sock *sk, struct sk_buff *skb,
struct sock *child; struct sock *child;
if (foc->len == 0) /* Client requests a cookie */ if (foc->len == 0) /* Client requests a cookie */
__NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPFASTOPENCOOKIEREQD); NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPFASTOPENCOOKIEREQD);
if (!((sysctl_tcp_fastopen & TFO_SERVER_ENABLE) && if (!((sysctl_tcp_fastopen & TFO_SERVER_ENABLE) &&
(syn_data || foc->len >= 0) && (syn_data || foc->len >= 0) &&
...@@ -311,13 +311,13 @@ struct sock *tcp_try_fastopen(struct sock *sk, struct sk_buff *skb, ...@@ -311,13 +311,13 @@ struct sock *tcp_try_fastopen(struct sock *sk, struct sk_buff *skb,
child = tcp_fastopen_create_child(sk, skb, dst, req); child = tcp_fastopen_create_child(sk, skb, dst, req);
if (child) { if (child) {
foc->len = -1; foc->len = -1;
__NET_INC_STATS(sock_net(sk), NET_INC_STATS(sock_net(sk),
LINUX_MIB_TCPFASTOPENPASSIVE); LINUX_MIB_TCPFASTOPENPASSIVE);
return child; return child;
} }
__NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPFASTOPENPASSIVEFAIL); NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPFASTOPENPASSIVEFAIL);
} else if (foc->len > 0) /* Client presents an invalid cookie */ } else if (foc->len > 0) /* Client presents an invalid cookie */
__NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPFASTOPENPASSIVEFAIL); NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPFASTOPENPASSIVEFAIL);
valid_foc.exp = foc->exp; valid_foc.exp = foc->exp;
*foc = valid_foc; *foc = valid_foc;
......
This diff is collapsed.
...@@ -692,6 +692,7 @@ static void tcp_v4_send_reset(const struct sock *sk, struct sk_buff *skb) ...@@ -692,6 +692,7 @@ static void tcp_v4_send_reset(const struct sock *sk, struct sk_buff *skb)
offsetof(struct inet_timewait_sock, tw_bound_dev_if)); offsetof(struct inet_timewait_sock, tw_bound_dev_if));
arg.tos = ip_hdr(skb)->tos; arg.tos = ip_hdr(skb)->tos;
preempt_disable();
ip_send_unicast_reply(*this_cpu_ptr(net->ipv4.tcp_sk), ip_send_unicast_reply(*this_cpu_ptr(net->ipv4.tcp_sk),
skb, &TCP_SKB_CB(skb)->header.h4.opt, skb, &TCP_SKB_CB(skb)->header.h4.opt,
ip_hdr(skb)->saddr, ip_hdr(skb)->daddr, ip_hdr(skb)->saddr, ip_hdr(skb)->daddr,
...@@ -699,6 +700,7 @@ static void tcp_v4_send_reset(const struct sock *sk, struct sk_buff *skb) ...@@ -699,6 +700,7 @@ static void tcp_v4_send_reset(const struct sock *sk, struct sk_buff *skb)
__TCP_INC_STATS(net, TCP_MIB_OUTSEGS); __TCP_INC_STATS(net, TCP_MIB_OUTSEGS);
__TCP_INC_STATS(net, TCP_MIB_OUTRSTS); __TCP_INC_STATS(net, TCP_MIB_OUTRSTS);
preempt_enable();
#ifdef CONFIG_TCP_MD5SIG #ifdef CONFIG_TCP_MD5SIG
out: out:
...@@ -774,12 +776,14 @@ static void tcp_v4_send_ack(struct net *net, ...@@ -774,12 +776,14 @@ static void tcp_v4_send_ack(struct net *net,
if (oif) if (oif)
arg.bound_dev_if = oif; arg.bound_dev_if = oif;
arg.tos = tos; arg.tos = tos;
preempt_disable();
ip_send_unicast_reply(*this_cpu_ptr(net->ipv4.tcp_sk), ip_send_unicast_reply(*this_cpu_ptr(net->ipv4.tcp_sk),
skb, &TCP_SKB_CB(skb)->header.h4.opt, skb, &TCP_SKB_CB(skb)->header.h4.opt,
ip_hdr(skb)->saddr, ip_hdr(skb)->daddr, ip_hdr(skb)->saddr, ip_hdr(skb)->daddr,
&arg, arg.iov[0].iov_len); &arg, arg.iov[0].iov_len);
__TCP_INC_STATS(net, TCP_MIB_OUTSEGS); __TCP_INC_STATS(net, TCP_MIB_OUTSEGS);
preempt_enable();
} }
static void tcp_v4_timewait_ack(struct sock *sk, struct sk_buff *skb) static void tcp_v4_timewait_ack(struct sock *sk, struct sk_buff *skb)
...@@ -1151,12 +1155,12 @@ static bool tcp_v4_inbound_md5_hash(const struct sock *sk, ...@@ -1151,12 +1155,12 @@ static bool tcp_v4_inbound_md5_hash(const struct sock *sk,
return false; return false;
if (hash_expected && !hash_location) { if (hash_expected && !hash_location) {
__NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPMD5NOTFOUND); NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPMD5NOTFOUND);
return true; return true;
} }
if (!hash_expected && hash_location) { if (!hash_expected && hash_location) {
__NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPMD5UNEXPECTED); NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPMD5UNEXPECTED);
return true; return true;
} }
...@@ -1342,7 +1346,7 @@ struct sock *tcp_v4_syn_recv_sock(const struct sock *sk, struct sk_buff *skb, ...@@ -1342,7 +1346,7 @@ struct sock *tcp_v4_syn_recv_sock(const struct sock *sk, struct sk_buff *skb,
return newsk; return newsk;
exit_overflow: exit_overflow:
__NET_INC_STATS(sock_net(sk), LINUX_MIB_LISTENOVERFLOWS); NET_INC_STATS(sock_net(sk), LINUX_MIB_LISTENOVERFLOWS);
exit_nonewsk: exit_nonewsk:
dst_release(dst); dst_release(dst);
exit: exit:
...@@ -1432,8 +1436,8 @@ int tcp_v4_do_rcv(struct sock *sk, struct sk_buff *skb) ...@@ -1432,8 +1436,8 @@ int tcp_v4_do_rcv(struct sock *sk, struct sk_buff *skb)
return 0; return 0;
csum_err: csum_err:
__TCP_INC_STATS(sock_net(sk), TCP_MIB_CSUMERRORS); TCP_INC_STATS(sock_net(sk), TCP_MIB_CSUMERRORS);
__TCP_INC_STATS(sock_net(sk), TCP_MIB_INERRS); TCP_INC_STATS(sock_net(sk), TCP_MIB_INERRS);
goto discard; goto discard;
} }
EXPORT_SYMBOL(tcp_v4_do_rcv); EXPORT_SYMBOL(tcp_v4_do_rcv);
......
...@@ -337,7 +337,7 @@ void tcp_time_wait(struct sock *sk, int state, int timeo) ...@@ -337,7 +337,7 @@ void tcp_time_wait(struct sock *sk, int state, int timeo)
* socket up. We've got bigger problems than * socket up. We've got bigger problems than
* non-graceful socket closings. * non-graceful socket closings.
*/ */
__NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPTIMEWAITOVERFLOW); NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPTIMEWAITOVERFLOW);
} }
tcp_update_metrics(sk); tcp_update_metrics(sk);
......
...@@ -2221,14 +2221,13 @@ bool tcp_schedule_loss_probe(struct sock *sk) ...@@ -2221,14 +2221,13 @@ bool tcp_schedule_loss_probe(struct sock *sk)
/* Thanks to skb fast clones, we can detect if a prior transmit of /* Thanks to skb fast clones, we can detect if a prior transmit of
* a packet is still in a qdisc or driver queue. * a packet is still in a qdisc or driver queue.
* In this case, there is very little point doing a retransmit ! * In this case, there is very little point doing a retransmit !
* Note: This is called from BH context only.
*/ */
static bool skb_still_in_host_queue(const struct sock *sk, static bool skb_still_in_host_queue(const struct sock *sk,
const struct sk_buff *skb) const struct sk_buff *skb)
{ {
if (unlikely(skb_fclone_busy(sk, skb))) { if (unlikely(skb_fclone_busy(sk, skb))) {
__NET_INC_STATS(sock_net(sk), NET_INC_STATS(sock_net(sk),
LINUX_MIB_TCPSPURIOUS_RTX_HOSTQUEUES); LINUX_MIB_TCPSPURIOUS_RTX_HOSTQUEUES);
return true; return true;
} }
return false; return false;
...@@ -2290,7 +2289,7 @@ void tcp_send_loss_probe(struct sock *sk) ...@@ -2290,7 +2289,7 @@ void tcp_send_loss_probe(struct sock *sk)
tp->tlp_high_seq = tp->snd_nxt; tp->tlp_high_seq = tp->snd_nxt;
probe_sent: probe_sent:
__NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPLOSSPROBES); NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPLOSSPROBES);
/* Reset s.t. tcp_rearm_rto will restart timer from now */ /* Reset s.t. tcp_rearm_rto will restart timer from now */
inet_csk(sk)->icsk_pending = 0; inet_csk(sk)->icsk_pending = 0;
rearm_timer: rearm_timer:
...@@ -2699,7 +2698,7 @@ int tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb, int segs) ...@@ -2699,7 +2698,7 @@ int tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb, int segs)
tp->retrans_stamp = tcp_skb_timestamp(skb); tp->retrans_stamp = tcp_skb_timestamp(skb);
} else if (err != -EBUSY) { } else if (err != -EBUSY) {
__NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPRETRANSFAIL); NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPRETRANSFAIL);
} }
if (tp->undo_retrans < 0) if (tp->undo_retrans < 0)
...@@ -2823,7 +2822,7 @@ void tcp_xmit_retransmit_queue(struct sock *sk) ...@@ -2823,7 +2822,7 @@ void tcp_xmit_retransmit_queue(struct sock *sk)
if (tcp_retransmit_skb(sk, skb, segs)) if (tcp_retransmit_skb(sk, skb, segs))
return; return;
__NET_INC_STATS(sock_net(sk), mib_idx); NET_INC_STATS(sock_net(sk), mib_idx);
if (tcp_in_cwnd_reduction(sk)) if (tcp_in_cwnd_reduction(sk))
tp->prr_out += tcp_skb_pcount(skb); tp->prr_out += tcp_skb_pcount(skb);
......
...@@ -65,8 +65,8 @@ int tcp_rack_mark_lost(struct sock *sk) ...@@ -65,8 +65,8 @@ int tcp_rack_mark_lost(struct sock *sk)
if (scb->sacked & TCPCB_SACKED_RETRANS) { if (scb->sacked & TCPCB_SACKED_RETRANS) {
scb->sacked &= ~TCPCB_SACKED_RETRANS; scb->sacked &= ~TCPCB_SACKED_RETRANS;
tp->retrans_out -= tcp_skb_pcount(skb); tp->retrans_out -= tcp_skb_pcount(skb);
__NET_INC_STATS(sock_net(sk), NET_INC_STATS(sock_net(sk),
LINUX_MIB_TCPLOSTRETRANSMIT); LINUX_MIB_TCPLOSTRETRANSMIT);
} }
} else if (!(scb->sacked & TCPCB_RETRANS)) { } else if (!(scb->sacked & TCPCB_RETRANS)) {
/* Original data are sent sequentially so stop early /* Original data are sent sequentially so stop early
......
...@@ -162,8 +162,8 @@ static int tcp_write_timeout(struct sock *sk) ...@@ -162,8 +162,8 @@ static int tcp_write_timeout(struct sock *sk)
if (tp->syn_fastopen || tp->syn_data) if (tp->syn_fastopen || tp->syn_data)
tcp_fastopen_cache_set(sk, 0, NULL, true, 0); tcp_fastopen_cache_set(sk, 0, NULL, true, 0);
if (tp->syn_data && icsk->icsk_retransmits == 1) if (tp->syn_data && icsk->icsk_retransmits == 1)
__NET_INC_STATS(sock_net(sk), NET_INC_STATS(sock_net(sk),
LINUX_MIB_TCPFASTOPENACTIVEFAIL); LINUX_MIB_TCPFASTOPENACTIVEFAIL);
} }
retry_until = icsk->icsk_syn_retries ? : net->ipv4.sysctl_tcp_syn_retries; retry_until = icsk->icsk_syn_retries ? : net->ipv4.sysctl_tcp_syn_retries;
syn_set = true; syn_set = true;
...@@ -178,8 +178,8 @@ static int tcp_write_timeout(struct sock *sk) ...@@ -178,8 +178,8 @@ static int tcp_write_timeout(struct sock *sk)
tp->bytes_acked <= tp->rx_opt.mss_clamp) { tp->bytes_acked <= tp->rx_opt.mss_clamp) {
tcp_fastopen_cache_set(sk, 0, NULL, true, 0); tcp_fastopen_cache_set(sk, 0, NULL, true, 0);
if (icsk->icsk_retransmits == net->ipv4.sysctl_tcp_retries1) if (icsk->icsk_retransmits == net->ipv4.sysctl_tcp_retries1)
__NET_INC_STATS(sock_net(sk), NET_INC_STATS(sock_net(sk),
LINUX_MIB_TCPFASTOPENACTIVEFAIL); LINUX_MIB_TCPFASTOPENACTIVEFAIL);
} }
/* Black hole detection */ /* Black hole detection */
tcp_mtu_probing(icsk, sk); tcp_mtu_probing(icsk, sk);
...@@ -209,6 +209,7 @@ static int tcp_write_timeout(struct sock *sk) ...@@ -209,6 +209,7 @@ static int tcp_write_timeout(struct sock *sk)
return 0; return 0;
} }
/* Called with BH disabled */
void tcp_delack_timer_handler(struct sock *sk) void tcp_delack_timer_handler(struct sock *sk)
{ {
struct tcp_sock *tp = tcp_sk(sk); struct tcp_sock *tp = tcp_sk(sk);
...@@ -493,6 +494,7 @@ void tcp_retransmit_timer(struct sock *sk) ...@@ -493,6 +494,7 @@ void tcp_retransmit_timer(struct sock *sk)
out:; out:;
} }
/* Called with BH disabled */
void tcp_write_timer_handler(struct sock *sk) void tcp_write_timer_handler(struct sock *sk)
{ {
struct inet_connection_sock *icsk = inet_csk(sk); struct inet_connection_sock *icsk = inet_csk(sk);
......
...@@ -649,12 +649,12 @@ static bool tcp_v6_inbound_md5_hash(const struct sock *sk, ...@@ -649,12 +649,12 @@ static bool tcp_v6_inbound_md5_hash(const struct sock *sk,
return false; return false;
if (hash_expected && !hash_location) { if (hash_expected && !hash_location) {
__NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPMD5NOTFOUND); NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPMD5NOTFOUND);
return true; return true;
} }
if (!hash_expected && hash_location) { if (!hash_expected && hash_location) {
__NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPMD5UNEXPECTED); NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPMD5UNEXPECTED);
return true; return true;
} }
...@@ -825,9 +825,9 @@ static void tcp_v6_send_response(const struct sock *sk, struct sk_buff *skb, u32 ...@@ -825,9 +825,9 @@ static void tcp_v6_send_response(const struct sock *sk, struct sk_buff *skb, u32
if (!IS_ERR(dst)) { if (!IS_ERR(dst)) {
skb_dst_set(buff, dst); skb_dst_set(buff, dst);
ip6_xmit(ctl_sk, buff, &fl6, NULL, tclass); ip6_xmit(ctl_sk, buff, &fl6, NULL, tclass);
__TCP_INC_STATS(net, TCP_MIB_OUTSEGS); TCP_INC_STATS(net, TCP_MIB_OUTSEGS);
if (rst) if (rst)
__TCP_INC_STATS(net, TCP_MIB_OUTRSTS); TCP_INC_STATS(net, TCP_MIB_OUTRSTS);
return; return;
} }
...@@ -1276,8 +1276,8 @@ static int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb) ...@@ -1276,8 +1276,8 @@ static int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb)
kfree_skb(skb); kfree_skb(skb);
return 0; return 0;
csum_err: csum_err:
__TCP_INC_STATS(sock_net(sk), TCP_MIB_CSUMERRORS); TCP_INC_STATS(sock_net(sk), TCP_MIB_CSUMERRORS);
__TCP_INC_STATS(sock_net(sk), TCP_MIB_INERRS); TCP_INC_STATS(sock_net(sk), TCP_MIB_INERRS);
goto discard; goto discard;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment