Commit 2fd66ffb authored by Eric Dumazet's avatar Eric Dumazet Committed by David S. Miller

tcp: introduce tcp_skb_timestamp_us() helper

There are few places where TCP reads skb->skb_mstamp expecting
a value in usec unit.

skb->tstamp (aka skb->skb_mstamp) will soon store CLOCK_TAI nsec value.

Add tcp_skb_timestamp_us() to provide proper conversion when needed.
Signed-off-by: default avatarEric Dumazet <edumazet@google.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent 72b0094f
...@@ -774,6 +774,12 @@ static inline u32 tcp_skb_timestamp(const struct sk_buff *skb) ...@@ -774,6 +774,12 @@ static inline u32 tcp_skb_timestamp(const struct sk_buff *skb)
return div_u64(skb->skb_mstamp, USEC_PER_SEC / TCP_TS_HZ); return div_u64(skb->skb_mstamp, USEC_PER_SEC / TCP_TS_HZ);
} }
/* provide the departure time in us unit */
static inline u64 tcp_skb_timestamp_us(const struct sk_buff *skb)
{
return skb->skb_mstamp;
}
#define tcp_flag_byte(th) (((u_int8_t *)th)[13]) #define tcp_flag_byte(th) (((u_int8_t *)th)[13])
...@@ -1940,7 +1946,7 @@ static inline s64 tcp_rto_delta_us(const struct sock *sk) ...@@ -1940,7 +1946,7 @@ static inline s64 tcp_rto_delta_us(const struct sock *sk)
{ {
const struct sk_buff *skb = tcp_rtx_queue_head(sk); const struct sk_buff *skb = tcp_rtx_queue_head(sk);
u32 rto = inet_csk(sk)->icsk_rto; u32 rto = inet_csk(sk)->icsk_rto;
u64 rto_time_stamp_us = skb->skb_mstamp + jiffies_to_usecs(rto); u64 rto_time_stamp_us = tcp_skb_timestamp_us(skb) + jiffies_to_usecs(rto);
return rto_time_stamp_us - tcp_sk(sk)->tcp_mstamp; return rto_time_stamp_us - tcp_sk(sk)->tcp_mstamp;
} }
......
...@@ -1305,7 +1305,7 @@ static bool tcp_shifted_skb(struct sock *sk, struct sk_buff *prev, ...@@ -1305,7 +1305,7 @@ static bool tcp_shifted_skb(struct sock *sk, struct sk_buff *prev,
*/ */
tcp_sacktag_one(sk, state, TCP_SKB_CB(skb)->sacked, tcp_sacktag_one(sk, state, TCP_SKB_CB(skb)->sacked,
start_seq, end_seq, dup_sack, pcount, start_seq, end_seq, dup_sack, pcount,
skb->skb_mstamp); tcp_skb_timestamp_us(skb));
tcp_rate_skb_delivered(sk, skb, state->rate); tcp_rate_skb_delivered(sk, skb, state->rate);
if (skb == tp->lost_skb_hint) if (skb == tp->lost_skb_hint)
...@@ -1580,7 +1580,7 @@ static struct sk_buff *tcp_sacktag_walk(struct sk_buff *skb, struct sock *sk, ...@@ -1580,7 +1580,7 @@ static struct sk_buff *tcp_sacktag_walk(struct sk_buff *skb, struct sock *sk,
TCP_SKB_CB(skb)->end_seq, TCP_SKB_CB(skb)->end_seq,
dup_sack, dup_sack,
tcp_skb_pcount(skb), tcp_skb_pcount(skb),
skb->skb_mstamp); tcp_skb_timestamp_us(skb));
tcp_rate_skb_delivered(sk, skb, state->rate); tcp_rate_skb_delivered(sk, skb, state->rate);
if (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_ACKED) if (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_ACKED)
list_del_init(&skb->tcp_tsorted_anchor); list_del_init(&skb->tcp_tsorted_anchor);
...@@ -3103,7 +3103,7 @@ static int tcp_clean_rtx_queue(struct sock *sk, u32 prior_fack, ...@@ -3103,7 +3103,7 @@ static int tcp_clean_rtx_queue(struct sock *sk, u32 prior_fack,
tp->retrans_out -= acked_pcount; tp->retrans_out -= acked_pcount;
flag |= FLAG_RETRANS_DATA_ACKED; flag |= FLAG_RETRANS_DATA_ACKED;
} else if (!(sacked & TCPCB_SACKED_ACKED)) { } else if (!(sacked & TCPCB_SACKED_ACKED)) {
last_ackt = skb->skb_mstamp; last_ackt = tcp_skb_timestamp_us(skb);
WARN_ON_ONCE(last_ackt == 0); WARN_ON_ONCE(last_ackt == 0);
if (!first_ackt) if (!first_ackt)
first_ackt = last_ackt; first_ackt = last_ackt;
...@@ -3121,7 +3121,7 @@ static int tcp_clean_rtx_queue(struct sock *sk, u32 prior_fack, ...@@ -3121,7 +3121,7 @@ static int tcp_clean_rtx_queue(struct sock *sk, u32 prior_fack,
tp->delivered += acked_pcount; tp->delivered += acked_pcount;
if (!tcp_skb_spurious_retrans(tp, skb)) if (!tcp_skb_spurious_retrans(tp, skb))
tcp_rack_advance(tp, sacked, scb->end_seq, tcp_rack_advance(tp, sacked, scb->end_seq,
skb->skb_mstamp); tcp_skb_timestamp_us(skb));
} }
if (sacked & TCPCB_LOST) if (sacked & TCPCB_LOST)
tp->lost_out -= acked_pcount; tp->lost_out -= acked_pcount;
...@@ -3215,7 +3215,8 @@ static int tcp_clean_rtx_queue(struct sock *sk, u32 prior_fack, ...@@ -3215,7 +3215,8 @@ static int tcp_clean_rtx_queue(struct sock *sk, u32 prior_fack,
tp->lost_cnt_hint -= min(tp->lost_cnt_hint, delta); tp->lost_cnt_hint -= min(tp->lost_cnt_hint, delta);
} }
} else if (skb && rtt_update && sack_rtt_us >= 0 && } else if (skb && rtt_update && sack_rtt_us >= 0 &&
sack_rtt_us > tcp_stamp_us_delta(tp->tcp_mstamp, skb->skb_mstamp)) { sack_rtt_us > tcp_stamp_us_delta(tp->tcp_mstamp,
tcp_skb_timestamp_us(skb))) {
/* Do not re-arm RTO if the sack RTT is measured from data sent /* Do not re-arm RTO if the sack RTT is measured from data sent
* after when the head was last (re)transmitted. Otherwise the * after when the head was last (re)transmitted. Otherwise the
* timeout may continue to extend in loss recovery. * timeout may continue to extend in loss recovery.
......
...@@ -544,7 +544,7 @@ void tcp_v4_err(struct sk_buff *icmp_skb, u32 info) ...@@ -544,7 +544,7 @@ void tcp_v4_err(struct sk_buff *icmp_skb, u32 info)
BUG_ON(!skb); BUG_ON(!skb);
tcp_mstamp_refresh(tp); tcp_mstamp_refresh(tp);
delta_us = (u32)(tp->tcp_mstamp - skb->skb_mstamp); delta_us = (u32)(tp->tcp_mstamp - tcp_skb_timestamp_us(skb));
remaining = icsk->icsk_rto - remaining = icsk->icsk_rto -
usecs_to_jiffies(delta_us); usecs_to_jiffies(delta_us);
......
...@@ -1966,7 +1966,7 @@ static bool tcp_tso_should_defer(struct sock *sk, struct sk_buff *skb, ...@@ -1966,7 +1966,7 @@ static bool tcp_tso_should_defer(struct sock *sk, struct sk_buff *skb,
head = tcp_rtx_queue_head(sk); head = tcp_rtx_queue_head(sk);
if (!head) if (!head)
goto send_now; goto send_now;
age = tcp_stamp_us_delta(tp->tcp_mstamp, head->skb_mstamp); age = tcp_stamp_us_delta(tp->tcp_mstamp, tcp_skb_timestamp_us(head));
/* If next ACK is likely to come too late (half srtt), do not defer */ /* If next ACK is likely to come too late (half srtt), do not defer */
if (age < (tp->srtt_us >> 4)) if (age < (tp->srtt_us >> 4))
goto send_now; goto send_now;
......
...@@ -55,8 +55,10 @@ void tcp_rate_skb_sent(struct sock *sk, struct sk_buff *skb) ...@@ -55,8 +55,10 @@ void tcp_rate_skb_sent(struct sock *sk, struct sk_buff *skb)
* bandwidth estimate. * bandwidth estimate.
*/ */
if (!tp->packets_out) { if (!tp->packets_out) {
tp->first_tx_mstamp = skb->skb_mstamp; u64 tstamp_us = tcp_skb_timestamp_us(skb);
tp->delivered_mstamp = skb->skb_mstamp;
tp->first_tx_mstamp = tstamp_us;
tp->delivered_mstamp = tstamp_us;
} }
TCP_SKB_CB(skb)->tx.first_tx_mstamp = tp->first_tx_mstamp; TCP_SKB_CB(skb)->tx.first_tx_mstamp = tp->first_tx_mstamp;
...@@ -88,13 +90,12 @@ void tcp_rate_skb_delivered(struct sock *sk, struct sk_buff *skb, ...@@ -88,13 +90,12 @@ void tcp_rate_skb_delivered(struct sock *sk, struct sk_buff *skb,
rs->is_app_limited = scb->tx.is_app_limited; rs->is_app_limited = scb->tx.is_app_limited;
rs->is_retrans = scb->sacked & TCPCB_RETRANS; rs->is_retrans = scb->sacked & TCPCB_RETRANS;
/* Record send time of most recently ACKed packet: */
tp->first_tx_mstamp = tcp_skb_timestamp_us(skb);
/* Find the duration of the "send phase" of this window: */ /* Find the duration of the "send phase" of this window: */
rs->interval_us = tcp_stamp_us_delta( rs->interval_us = tcp_stamp_us_delta(tp->first_tx_mstamp,
skb->skb_mstamp, scb->tx.first_tx_mstamp);
scb->tx.first_tx_mstamp);
/* Record send time of most recently ACKed packet: */
tp->first_tx_mstamp = skb->skb_mstamp;
} }
/* Mark off the skb delivered once it's sacked to avoid being /* Mark off the skb delivered once it's sacked to avoid being
* used again when it's cumulatively acked. For acked packets * used again when it's cumulatively acked. For acked packets
......
...@@ -50,7 +50,7 @@ static u32 tcp_rack_reo_wnd(const struct sock *sk) ...@@ -50,7 +50,7 @@ static u32 tcp_rack_reo_wnd(const struct sock *sk)
s32 tcp_rack_skb_timeout(struct tcp_sock *tp, struct sk_buff *skb, u32 reo_wnd) s32 tcp_rack_skb_timeout(struct tcp_sock *tp, struct sk_buff *skb, u32 reo_wnd)
{ {
return tp->rack.rtt_us + reo_wnd - return tp->rack.rtt_us + reo_wnd -
tcp_stamp_us_delta(tp->tcp_mstamp, skb->skb_mstamp); tcp_stamp_us_delta(tp->tcp_mstamp, tcp_skb_timestamp_us(skb));
} }
/* RACK loss detection (IETF draft draft-ietf-tcpm-rack-01): /* RACK loss detection (IETF draft draft-ietf-tcpm-rack-01):
...@@ -91,7 +91,8 @@ static void tcp_rack_detect_loss(struct sock *sk, u32 *reo_timeout) ...@@ -91,7 +91,8 @@ static void tcp_rack_detect_loss(struct sock *sk, u32 *reo_timeout)
!(scb->sacked & TCPCB_SACKED_RETRANS)) !(scb->sacked & TCPCB_SACKED_RETRANS))
continue; continue;
if (!tcp_rack_sent_after(tp->rack.mstamp, skb->skb_mstamp, if (!tcp_rack_sent_after(tp->rack.mstamp,
tcp_skb_timestamp_us(skb),
tp->rack.end_seq, scb->end_seq)) tp->rack.end_seq, scb->end_seq))
break; break;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment