Commit 3aff5017 authored by David S. Miller's avatar David S. Miller

Merge branch 'tcp'

Eric Dumazet says:

====================
tcp: deduplicate TCP_SKB_CB(skb)->when

TCP_SKB_CB(skb)->when has different meaning in output and input paths.

In output path, it contains a timestamp.
In input path, it contains an ISN, chosen by tcp_timewait_state_process()

Its usage in output path is obsolete after usec timestamping.
Lets simplify and clean this.
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents 2ba38943 7faee5c0
...@@ -672,6 +672,12 @@ void tcp_send_window_probe(struct sock *sk); ...@@ -672,6 +672,12 @@ void tcp_send_window_probe(struct sock *sk);
*/ */
#define tcp_time_stamp ((__u32)(jiffies)) #define tcp_time_stamp ((__u32)(jiffies))
static inline u32 tcp_skb_timestamp(const struct sk_buff *skb)
{
return skb->skb_mstamp.stamp_jiffies;
}
#define tcp_flag_byte(th) (((u_int8_t *)th)[13]) #define tcp_flag_byte(th) (((u_int8_t *)th)[13])
#define TCPHDR_FIN 0x01 #define TCPHDR_FIN 0x01
...@@ -698,7 +704,7 @@ struct tcp_skb_cb { ...@@ -698,7 +704,7 @@ struct tcp_skb_cb {
} header; /* For incoming frames */ } header; /* For incoming frames */
__u32 seq; /* Starting sequence number */ __u32 seq; /* Starting sequence number */
__u32 end_seq; /* SEQ + FIN + SYN + datalen */ __u32 end_seq; /* SEQ + FIN + SYN + datalen */
__u32 when; /* used to compute rtt's */ __u32 tcp_tw_isn; /* isn chosen by tcp_timewait_state_process() */
__u8 tcp_flags; /* TCP header flags. (tcp[13]) */ __u8 tcp_flags; /* TCP header flags. (tcp[13]) */
__u8 sacked; /* State flags for SACK/FACK. */ __u8 sacked; /* State flags for SACK/FACK. */
......
...@@ -2967,7 +2967,8 @@ void tcp_rearm_rto(struct sock *sk) ...@@ -2967,7 +2967,8 @@ void tcp_rearm_rto(struct sock *sk)
if (icsk->icsk_pending == ICSK_TIME_EARLY_RETRANS || if (icsk->icsk_pending == ICSK_TIME_EARLY_RETRANS ||
icsk->icsk_pending == ICSK_TIME_LOSS_PROBE) { icsk->icsk_pending == ICSK_TIME_LOSS_PROBE) {
struct sk_buff *skb = tcp_write_queue_head(sk); struct sk_buff *skb = tcp_write_queue_head(sk);
const u32 rto_time_stamp = TCP_SKB_CB(skb)->when + rto; const u32 rto_time_stamp =
tcp_skb_timestamp(skb) + rto;
s32 delta = (s32)(rto_time_stamp - tcp_time_stamp); s32 delta = (s32)(rto_time_stamp - tcp_time_stamp);
/* delta may not be positive if the socket is locked /* delta may not be positive if the socket is locked
* when the retrans timer fires and is rescheduled. * when the retrans timer fires and is rescheduled.
...@@ -5906,7 +5907,7 @@ int tcp_conn_request(struct request_sock_ops *rsk_ops, ...@@ -5906,7 +5907,7 @@ int tcp_conn_request(struct request_sock_ops *rsk_ops,
struct request_sock *req; struct request_sock *req;
struct tcp_sock *tp = tcp_sk(sk); struct tcp_sock *tp = tcp_sk(sk);
struct dst_entry *dst = NULL; struct dst_entry *dst = NULL;
__u32 isn = TCP_SKB_CB(skb)->when; __u32 isn = TCP_SKB_CB(skb)->tcp_tw_isn;
bool want_cookie = false, fastopen; bool want_cookie = false, fastopen;
struct flowi fl; struct flowi fl;
struct tcp_fastopen_cookie foc = { .len = -1 }; struct tcp_fastopen_cookie foc = { .len = -1 };
......
...@@ -437,8 +437,9 @@ void tcp_v4_err(struct sk_buff *icmp_skb, u32 info) ...@@ -437,8 +437,9 @@ void tcp_v4_err(struct sk_buff *icmp_skb, u32 info)
skb = tcp_write_queue_head(sk); skb = tcp_write_queue_head(sk);
BUG_ON(!skb); BUG_ON(!skb);
remaining = icsk->icsk_rto - min(icsk->icsk_rto, remaining = icsk->icsk_rto -
tcp_time_stamp - TCP_SKB_CB(skb)->when); min(icsk->icsk_rto,
tcp_time_stamp - tcp_skb_timestamp(skb));
if (remaining) { if (remaining) {
inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS, inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS,
...@@ -1627,7 +1628,7 @@ int tcp_v4_rcv(struct sk_buff *skb) ...@@ -1627,7 +1628,7 @@ int tcp_v4_rcv(struct sk_buff *skb)
TCP_SKB_CB(skb)->end_seq = (TCP_SKB_CB(skb)->seq + th->syn + th->fin + TCP_SKB_CB(skb)->end_seq = (TCP_SKB_CB(skb)->seq + th->syn + th->fin +
skb->len - th->doff * 4); skb->len - th->doff * 4);
TCP_SKB_CB(skb)->ack_seq = ntohl(th->ack_seq); TCP_SKB_CB(skb)->ack_seq = ntohl(th->ack_seq);
TCP_SKB_CB(skb)->when = 0; TCP_SKB_CB(skb)->tcp_tw_isn = 0;
TCP_SKB_CB(skb)->ip_dsfield = ipv4_get_dsfield(iph); TCP_SKB_CB(skb)->ip_dsfield = ipv4_get_dsfield(iph);
TCP_SKB_CB(skb)->sacked = 0; TCP_SKB_CB(skb)->sacked = 0;
......
...@@ -232,7 +232,7 @@ tcp_timewait_state_process(struct inet_timewait_sock *tw, struct sk_buff *skb, ...@@ -232,7 +232,7 @@ tcp_timewait_state_process(struct inet_timewait_sock *tw, struct sk_buff *skb,
u32 isn = tcptw->tw_snd_nxt + 65535 + 2; u32 isn = tcptw->tw_snd_nxt + 65535 + 2;
if (isn == 0) if (isn == 0)
isn++; isn++;
TCP_SKB_CB(skb)->when = isn; TCP_SKB_CB(skb)->tcp_tw_isn = isn;
return TCP_TW_SYN; return TCP_TW_SYN;
} }
......
...@@ -550,7 +550,7 @@ static unsigned int tcp_syn_options(struct sock *sk, struct sk_buff *skb, ...@@ -550,7 +550,7 @@ static unsigned int tcp_syn_options(struct sock *sk, struct sk_buff *skb,
if (likely(sysctl_tcp_timestamps && *md5 == NULL)) { if (likely(sysctl_tcp_timestamps && *md5 == NULL)) {
opts->options |= OPTION_TS; opts->options |= OPTION_TS;
opts->tsval = TCP_SKB_CB(skb)->when + tp->tsoffset; opts->tsval = tcp_skb_timestamp(skb) + tp->tsoffset;
opts->tsecr = tp->rx_opt.ts_recent; opts->tsecr = tp->rx_opt.ts_recent;
remaining -= TCPOLEN_TSTAMP_ALIGNED; remaining -= TCPOLEN_TSTAMP_ALIGNED;
} }
...@@ -618,7 +618,7 @@ static unsigned int tcp_synack_options(struct sock *sk, ...@@ -618,7 +618,7 @@ static unsigned int tcp_synack_options(struct sock *sk,
} }
if (likely(ireq->tstamp_ok)) { if (likely(ireq->tstamp_ok)) {
opts->options |= OPTION_TS; opts->options |= OPTION_TS;
opts->tsval = TCP_SKB_CB(skb)->when; opts->tsval = tcp_skb_timestamp(skb);
opts->tsecr = req->ts_recent; opts->tsecr = req->ts_recent;
remaining -= TCPOLEN_TSTAMP_ALIGNED; remaining -= TCPOLEN_TSTAMP_ALIGNED;
} }
...@@ -647,7 +647,6 @@ static unsigned int tcp_established_options(struct sock *sk, struct sk_buff *skb ...@@ -647,7 +647,6 @@ static unsigned int tcp_established_options(struct sock *sk, struct sk_buff *skb
struct tcp_out_options *opts, struct tcp_out_options *opts,
struct tcp_md5sig_key **md5) struct tcp_md5sig_key **md5)
{ {
struct tcp_skb_cb *tcb = skb ? TCP_SKB_CB(skb) : NULL;
struct tcp_sock *tp = tcp_sk(sk); struct tcp_sock *tp = tcp_sk(sk);
unsigned int size = 0; unsigned int size = 0;
unsigned int eff_sacks; unsigned int eff_sacks;
...@@ -666,7 +665,7 @@ static unsigned int tcp_established_options(struct sock *sk, struct sk_buff *skb ...@@ -666,7 +665,7 @@ static unsigned int tcp_established_options(struct sock *sk, struct sk_buff *skb
if (likely(tp->rx_opt.tstamp_ok)) { if (likely(tp->rx_opt.tstamp_ok)) {
opts->options |= OPTION_TS; opts->options |= OPTION_TS;
opts->tsval = tcb ? tcb->when + tp->tsoffset : 0; opts->tsval = skb ? tcp_skb_timestamp(skb) + tp->tsoffset : 0;
opts->tsecr = tp->rx_opt.ts_recent; opts->tsecr = tp->rx_opt.ts_recent;
size += TCPOLEN_TSTAMP_ALIGNED; size += TCPOLEN_TSTAMP_ALIGNED;
} }
...@@ -886,8 +885,6 @@ static int tcp_transmit_skb(struct sock *sk, struct sk_buff *skb, int clone_it, ...@@ -886,8 +885,6 @@ static int tcp_transmit_skb(struct sock *sk, struct sk_buff *skb, int clone_it,
skb = skb_clone(skb, gfp_mask); skb = skb_clone(skb, gfp_mask);
if (unlikely(!skb)) if (unlikely(!skb))
return -ENOBUFS; return -ENOBUFS;
/* Our usage of tstamp should remain private */
skb->tstamp.tv64 = 0;
} }
inet = inet_sk(sk); inet = inet_sk(sk);
...@@ -975,7 +972,10 @@ static int tcp_transmit_skb(struct sock *sk, struct sk_buff *skb, int clone_it, ...@@ -975,7 +972,10 @@ static int tcp_transmit_skb(struct sock *sk, struct sk_buff *skb, int clone_it,
TCP_ADD_STATS(sock_net(sk), TCP_MIB_OUTSEGS, TCP_ADD_STATS(sock_net(sk), TCP_MIB_OUTSEGS,
tcp_skb_pcount(skb)); tcp_skb_pcount(skb));
/* Our usage of tstamp should remain private */
skb->tstamp.tv64 = 0;
err = icsk->icsk_af_ops->queue_xmit(sk, skb, &inet->cork.fl); err = icsk->icsk_af_ops->queue_xmit(sk, skb, &inet->cork.fl);
if (likely(err <= 0)) if (likely(err <= 0))
return err; return err;
...@@ -1149,7 +1149,6 @@ int tcp_fragment(struct sock *sk, struct sk_buff *skb, u32 len, ...@@ -1149,7 +1149,6 @@ int tcp_fragment(struct sock *sk, struct sk_buff *skb, u32 len,
/* Looks stupid, but our code really uses when of /* Looks stupid, but our code really uses when of
* skbs, which it never sent before. --ANK * skbs, which it never sent before. --ANK
*/ */
TCP_SKB_CB(buff)->when = TCP_SKB_CB(skb)->when;
buff->tstamp = skb->tstamp; buff->tstamp = skb->tstamp;
tcp_fragment_tstamp(skb, buff); tcp_fragment_tstamp(skb, buff);
...@@ -1874,8 +1873,8 @@ static int tcp_mtu_probe(struct sock *sk) ...@@ -1874,8 +1873,8 @@ static int tcp_mtu_probe(struct sock *sk)
tcp_init_tso_segs(sk, nskb, nskb->len); tcp_init_tso_segs(sk, nskb, nskb->len);
/* We're ready to send. If this fails, the probe will /* We're ready to send. If this fails, the probe will
* be resegmented into mss-sized pieces by tcp_write_xmit(). */ * be resegmented into mss-sized pieces by tcp_write_xmit().
TCP_SKB_CB(nskb)->when = tcp_time_stamp; */
if (!tcp_transmit_skb(sk, nskb, 1, GFP_ATOMIC)) { if (!tcp_transmit_skb(sk, nskb, 1, GFP_ATOMIC)) {
/* Decrement cwnd here because we are sending /* Decrement cwnd here because we are sending
* effectively two packets. */ * effectively two packets. */
...@@ -1935,8 +1934,8 @@ static bool tcp_write_xmit(struct sock *sk, unsigned int mss_now, int nonagle, ...@@ -1935,8 +1934,8 @@ static bool tcp_write_xmit(struct sock *sk, unsigned int mss_now, int nonagle,
BUG_ON(!tso_segs); BUG_ON(!tso_segs);
if (unlikely(tp->repair) && tp->repair_queue == TCP_SEND_QUEUE) { if (unlikely(tp->repair) && tp->repair_queue == TCP_SEND_QUEUE) {
/* "when" is used as a start point for the retransmit timer */ /* "skb_mstamp" is used as a start point for the retransmit timer */
TCP_SKB_CB(skb)->when = tcp_time_stamp; skb_mstamp_get(&skb->skb_mstamp);
goto repair; /* Skip network transmission */ goto repair; /* Skip network transmission */
} }
...@@ -2000,8 +1999,6 @@ static bool tcp_write_xmit(struct sock *sk, unsigned int mss_now, int nonagle, ...@@ -2000,8 +1999,6 @@ static bool tcp_write_xmit(struct sock *sk, unsigned int mss_now, int nonagle,
unlikely(tso_fragment(sk, skb, limit, mss_now, gfp))) unlikely(tso_fragment(sk, skb, limit, mss_now, gfp)))
break; break;
TCP_SKB_CB(skb)->when = tcp_time_stamp;
if (unlikely(tcp_transmit_skb(sk, skb, 1, gfp))) if (unlikely(tcp_transmit_skb(sk, skb, 1, gfp)))
break; break;
...@@ -2499,7 +2496,6 @@ int __tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb) ...@@ -2499,7 +2496,6 @@ int __tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb)
/* Make a copy, if the first transmission SKB clone we made /* Make a copy, if the first transmission SKB clone we made
* is still in somebody's hands, else make a clone. * is still in somebody's hands, else make a clone.
*/ */
TCP_SKB_CB(skb)->when = tcp_time_stamp;
/* make sure skb->data is aligned on arches that require it /* make sure skb->data is aligned on arches that require it
* and check if ack-trimming & collapsing extended the headroom * and check if ack-trimming & collapsing extended the headroom
...@@ -2544,7 +2540,7 @@ int tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb) ...@@ -2544,7 +2540,7 @@ int tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb)
/* Save stamp of the first retransmit. */ /* Save stamp of the first retransmit. */
if (!tp->retrans_stamp) if (!tp->retrans_stamp)
tp->retrans_stamp = TCP_SKB_CB(skb)->when; tp->retrans_stamp = tcp_skb_timestamp(skb);
/* snd_nxt is stored to detect loss of retransmitted segment, /* snd_nxt is stored to detect loss of retransmitted segment,
* see tcp_input.c tcp_sacktag_write_queue(). * see tcp_input.c tcp_sacktag_write_queue().
...@@ -2752,7 +2748,6 @@ void tcp_send_active_reset(struct sock *sk, gfp_t priority) ...@@ -2752,7 +2748,6 @@ void tcp_send_active_reset(struct sock *sk, gfp_t priority)
tcp_init_nondata_skb(skb, tcp_acceptable_seq(sk), tcp_init_nondata_skb(skb, tcp_acceptable_seq(sk),
TCPHDR_ACK | TCPHDR_RST); TCPHDR_ACK | TCPHDR_RST);
/* Send it off. */ /* Send it off. */
TCP_SKB_CB(skb)->when = tcp_time_stamp;
if (tcp_transmit_skb(sk, skb, 0, priority)) if (tcp_transmit_skb(sk, skb, 0, priority))
NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPABORTFAILED); NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPABORTFAILED);
...@@ -2791,7 +2786,6 @@ int tcp_send_synack(struct sock *sk) ...@@ -2791,7 +2786,6 @@ int tcp_send_synack(struct sock *sk)
TCP_SKB_CB(skb)->tcp_flags |= TCPHDR_ACK; TCP_SKB_CB(skb)->tcp_flags |= TCPHDR_ACK;
TCP_ECN_send_synack(tcp_sk(sk), skb); TCP_ECN_send_synack(tcp_sk(sk), skb);
} }
TCP_SKB_CB(skb)->when = tcp_time_stamp;
return tcp_transmit_skb(sk, skb, 1, GFP_ATOMIC); return tcp_transmit_skb(sk, skb, 1, GFP_ATOMIC);
} }
...@@ -2835,10 +2829,10 @@ struct sk_buff *tcp_make_synack(struct sock *sk, struct dst_entry *dst, ...@@ -2835,10 +2829,10 @@ struct sk_buff *tcp_make_synack(struct sock *sk, struct dst_entry *dst,
memset(&opts, 0, sizeof(opts)); memset(&opts, 0, sizeof(opts));
#ifdef CONFIG_SYN_COOKIES #ifdef CONFIG_SYN_COOKIES
if (unlikely(req->cookie_ts)) if (unlikely(req->cookie_ts))
TCP_SKB_CB(skb)->when = cookie_init_timestamp(req); skb->skb_mstamp.stamp_jiffies = cookie_init_timestamp(req);
else else
#endif #endif
TCP_SKB_CB(skb)->when = tcp_time_stamp; skb_mstamp_get(&skb->skb_mstamp);
tcp_header_size = tcp_synack_options(sk, req, mss, skb, &opts, &md5, tcp_header_size = tcp_synack_options(sk, req, mss, skb, &opts, &md5,
foc) + sizeof(*th); foc) + sizeof(*th);
...@@ -3086,7 +3080,7 @@ int tcp_connect(struct sock *sk) ...@@ -3086,7 +3080,7 @@ int tcp_connect(struct sock *sk)
skb_reserve(buff, MAX_TCP_HEADER); skb_reserve(buff, MAX_TCP_HEADER);
tcp_init_nondata_skb(buff, tp->write_seq++, TCPHDR_SYN); tcp_init_nondata_skb(buff, tp->write_seq++, TCPHDR_SYN);
tp->retrans_stamp = TCP_SKB_CB(buff)->when = tcp_time_stamp; tp->retrans_stamp = tcp_time_stamp;
tcp_connect_queue_skb(sk, buff); tcp_connect_queue_skb(sk, buff);
TCP_ECN_send_syn(sk, buff); TCP_ECN_send_syn(sk, buff);
...@@ -3194,7 +3188,7 @@ void tcp_send_ack(struct sock *sk) ...@@ -3194,7 +3188,7 @@ void tcp_send_ack(struct sock *sk)
tcp_init_nondata_skb(buff, tcp_acceptable_seq(sk), TCPHDR_ACK); tcp_init_nondata_skb(buff, tcp_acceptable_seq(sk), TCPHDR_ACK);
/* Send it off, this clears delayed acks for us. */ /* Send it off, this clears delayed acks for us. */
TCP_SKB_CB(buff)->when = tcp_time_stamp; skb_mstamp_get(&buff->skb_mstamp);
tcp_transmit_skb(sk, buff, 0, sk_gfp_atomic(sk, GFP_ATOMIC)); tcp_transmit_skb(sk, buff, 0, sk_gfp_atomic(sk, GFP_ATOMIC));
} }
...@@ -3226,7 +3220,7 @@ static int tcp_xmit_probe_skb(struct sock *sk, int urgent) ...@@ -3226,7 +3220,7 @@ static int tcp_xmit_probe_skb(struct sock *sk, int urgent)
* send it. * send it.
*/ */
tcp_init_nondata_skb(skb, tp->snd_una - !urgent, TCPHDR_ACK); tcp_init_nondata_skb(skb, tp->snd_una - !urgent, TCPHDR_ACK);
TCP_SKB_CB(skb)->when = tcp_time_stamp; skb_mstamp_get(&skb->skb_mstamp);
return tcp_transmit_skb(sk, skb, 0, GFP_ATOMIC); return tcp_transmit_skb(sk, skb, 0, GFP_ATOMIC);
} }
...@@ -3270,7 +3264,6 @@ int tcp_write_wakeup(struct sock *sk) ...@@ -3270,7 +3264,6 @@ int tcp_write_wakeup(struct sock *sk)
tcp_set_skb_tso_segs(sk, skb, mss); tcp_set_skb_tso_segs(sk, skb, mss);
TCP_SKB_CB(skb)->tcp_flags |= TCPHDR_PSH; TCP_SKB_CB(skb)->tcp_flags |= TCPHDR_PSH;
TCP_SKB_CB(skb)->when = tcp_time_stamp;
err = tcp_transmit_skb(sk, skb, 1, GFP_ATOMIC); err = tcp_transmit_skb(sk, skb, 1, GFP_ATOMIC);
if (!err) if (!err)
tcp_event_new_data_sent(sk, skb); tcp_event_new_data_sent(sk, skb);
......
...@@ -135,10 +135,9 @@ static bool retransmits_timed_out(struct sock *sk, ...@@ -135,10 +135,9 @@ static bool retransmits_timed_out(struct sock *sk,
if (!inet_csk(sk)->icsk_retransmits) if (!inet_csk(sk)->icsk_retransmits)
return false; return false;
if (unlikely(!tcp_sk(sk)->retrans_stamp)) start_ts = tcp_sk(sk)->retrans_stamp;
start_ts = TCP_SKB_CB(tcp_write_queue_head(sk))->when; if (unlikely(!start_ts))
else start_ts = tcp_skb_timestamp(tcp_write_queue_head(sk));
start_ts = tcp_sk(sk)->retrans_stamp;
if (likely(timeout == 0)) { if (likely(timeout == 0)) {
linear_backoff_thresh = ilog2(TCP_RTO_MAX/rto_base); linear_backoff_thresh = ilog2(TCP_RTO_MAX/rto_base);
......
...@@ -738,7 +738,7 @@ static void tcp_v6_init_req(struct request_sock *req, struct sock *sk, ...@@ -738,7 +738,7 @@ static void tcp_v6_init_req(struct request_sock *req, struct sock *sk,
ipv6_addr_type(&ireq->ir_v6_rmt_addr) & IPV6_ADDR_LINKLOCAL) ipv6_addr_type(&ireq->ir_v6_rmt_addr) & IPV6_ADDR_LINKLOCAL)
ireq->ir_iif = inet6_iif(skb); ireq->ir_iif = inet6_iif(skb);
if (!TCP_SKB_CB(skb)->when && if (!TCP_SKB_CB(skb)->tcp_tw_isn &&
(ipv6_opt_accepted(sk, skb) || np->rxopt.bits.rxinfo || (ipv6_opt_accepted(sk, skb) || np->rxopt.bits.rxinfo ||
np->rxopt.bits.rxoinfo || np->rxopt.bits.rxhlim || np->rxopt.bits.rxoinfo || np->rxopt.bits.rxhlim ||
np->rxopt.bits.rxohlim || np->repflow)) { np->rxopt.bits.rxohlim || np->repflow)) {
...@@ -1412,7 +1412,7 @@ static int tcp_v6_rcv(struct sk_buff *skb) ...@@ -1412,7 +1412,7 @@ static int tcp_v6_rcv(struct sk_buff *skb)
TCP_SKB_CB(skb)->end_seq = (TCP_SKB_CB(skb)->seq + th->syn + th->fin + TCP_SKB_CB(skb)->end_seq = (TCP_SKB_CB(skb)->seq + th->syn + th->fin +
skb->len - th->doff*4); skb->len - th->doff*4);
TCP_SKB_CB(skb)->ack_seq = ntohl(th->ack_seq); TCP_SKB_CB(skb)->ack_seq = ntohl(th->ack_seq);
TCP_SKB_CB(skb)->when = 0; TCP_SKB_CB(skb)->tcp_tw_isn = 0;
TCP_SKB_CB(skb)->ip_dsfield = ipv6_get_dsfield(hdr); TCP_SKB_CB(skb)->ip_dsfield = ipv6_get_dsfield(hdr);
TCP_SKB_CB(skb)->sacked = 0; TCP_SKB_CB(skb)->sacked = 0;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment