Commit f3319816 authored by Eric Dumazet's avatar Eric Dumazet Committed by David S. Miller

tcp: pass previous skb to tcp_shifted_skb()

No need to recompute previous skb, as it will be a bit more
expensive when rtx queue is converted to RB tree.
Signed-off-by: default avatarEric Dumazet <edumazet@google.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent 8ba6ddaa
...@@ -1288,13 +1288,13 @@ static u8 tcp_sacktag_one(struct sock *sk, ...@@ -1288,13 +1288,13 @@ static u8 tcp_sacktag_one(struct sock *sk,
/* Shift newly-SACKed bytes from this skb to the immediately previous /* Shift newly-SACKed bytes from this skb to the immediately previous
* already-SACKed sk_buff. Mark the newly-SACKed bytes as such. * already-SACKed sk_buff. Mark the newly-SACKed bytes as such.
*/ */
static bool tcp_shifted_skb(struct sock *sk, struct sk_buff *skb, static bool tcp_shifted_skb(struct sock *sk, struct sk_buff *prev,
struct sk_buff *skb,
struct tcp_sacktag_state *state, struct tcp_sacktag_state *state,
unsigned int pcount, int shifted, int mss, unsigned int pcount, int shifted, int mss,
bool dup_sack) bool dup_sack)
{ {
struct tcp_sock *tp = tcp_sk(sk); struct tcp_sock *tp = tcp_sk(sk);
struct sk_buff *prev = tcp_write_queue_prev(sk, skb);
u32 start_seq = TCP_SKB_CB(skb)->seq; /* start of newly-SACKed */ u32 start_seq = TCP_SKB_CB(skb)->seq; /* start of newly-SACKed */
u32 end_seq = start_seq + shifted; /* end of newly-SACKed */ u32 end_seq = start_seq + shifted; /* end of newly-SACKed */
...@@ -1495,7 +1495,7 @@ static struct sk_buff *tcp_shift_skb_data(struct sock *sk, struct sk_buff *skb, ...@@ -1495,7 +1495,7 @@ static struct sk_buff *tcp_shift_skb_data(struct sock *sk, struct sk_buff *skb,
if (!skb_shift(prev, skb, len)) if (!skb_shift(prev, skb, len))
goto fallback; goto fallback;
if (!tcp_shifted_skb(sk, skb, state, pcount, len, mss, dup_sack)) if (!tcp_shifted_skb(sk, prev, skb, state, pcount, len, mss, dup_sack))
goto out; goto out;
/* Hole filled allows collapsing with the next as well, this is very /* Hole filled allows collapsing with the next as well, this is very
...@@ -1514,7 +1514,8 @@ static struct sk_buff *tcp_shift_skb_data(struct sock *sk, struct sk_buff *skb, ...@@ -1514,7 +1514,8 @@ static struct sk_buff *tcp_shift_skb_data(struct sock *sk, struct sk_buff *skb,
len = skb->len; len = skb->len;
if (skb_shift(prev, skb, len)) { if (skb_shift(prev, skb, len)) {
pcount += tcp_skb_pcount(skb); pcount += tcp_skb_pcount(skb);
tcp_shifted_skb(sk, skb, state, tcp_skb_pcount(skb), len, mss, 0); tcp_shifted_skb(sk, prev, skb, state, tcp_skb_pcount(skb),
len, mss, 0);
} }
out: out:
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment