Commit 18a4c0ea authored by Eric Dumazet's avatar Eric Dumazet Committed by David S. Miller

net: add rb_to_skb() and other rb tree helpers

Geeralize private netem_rb_to_skb()

TCP rtx queue will soon be converted to rb-tree,
so we will need skb_rbtree_walk() helpers.
Signed-off-by: default avatarEric Dumazet <edumazet@google.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent f5333f80
...@@ -3158,6 +3158,12 @@ static inline int __skb_grow_rcsum(struct sk_buff *skb, unsigned int len) ...@@ -3158,6 +3158,12 @@ static inline int __skb_grow_rcsum(struct sk_buff *skb, unsigned int len)
return __skb_grow(skb, len); return __skb_grow(skb, len);
} }
#define rb_to_skb(rb) rb_entry_safe(rb, struct sk_buff, rbnode)
#define skb_rb_first(root) rb_to_skb(rb_first(root))
#define skb_rb_last(root) rb_to_skb(rb_last(root))
#define skb_rb_next(skb) rb_to_skb(rb_next(&(skb)->rbnode))
#define skb_rb_prev(skb) rb_to_skb(rb_prev(&(skb)->rbnode))
#define skb_queue_walk(queue, skb) \ #define skb_queue_walk(queue, skb) \
for (skb = (queue)->next; \ for (skb = (queue)->next; \
skb != (struct sk_buff *)(queue); \ skb != (struct sk_buff *)(queue); \
...@@ -3172,6 +3178,18 @@ static inline int __skb_grow_rcsum(struct sk_buff *skb, unsigned int len) ...@@ -3172,6 +3178,18 @@ static inline int __skb_grow_rcsum(struct sk_buff *skb, unsigned int len)
for (; skb != (struct sk_buff *)(queue); \ for (; skb != (struct sk_buff *)(queue); \
skb = skb->next) skb = skb->next)
#define skb_rbtree_walk(skb, root) \
for (skb = skb_rb_first(root); skb != NULL; \
skb = skb_rb_next(skb))
#define skb_rbtree_walk_from(skb) \
for (; skb != NULL; \
skb = skb_rb_next(skb))
#define skb_rbtree_walk_from_safe(skb, tmp) \
for (; tmp = skb ? skb_rb_next(skb) : NULL, (skb != NULL); \
skb = tmp)
#define skb_queue_walk_from_safe(queue, skb, tmp) \ #define skb_queue_walk_from_safe(queue, skb, tmp) \
for (tmp = skb->next; \ for (tmp = skb->next; \
skb != (struct sk_buff *)(queue); \ skb != (struct sk_buff *)(queue); \
......
...@@ -465,17 +465,15 @@ bool tcp_fastopen_active_should_disable(struct sock *sk) ...@@ -465,17 +465,15 @@ bool tcp_fastopen_active_should_disable(struct sock *sk)
void tcp_fastopen_active_disable_ofo_check(struct sock *sk) void tcp_fastopen_active_disable_ofo_check(struct sock *sk)
{ {
struct tcp_sock *tp = tcp_sk(sk); struct tcp_sock *tp = tcp_sk(sk);
struct rb_node *p;
struct sk_buff *skb;
struct dst_entry *dst; struct dst_entry *dst;
struct sk_buff *skb;
if (!tp->syn_fastopen) if (!tp->syn_fastopen)
return; return;
if (!tp->data_segs_in) { if (!tp->data_segs_in) {
p = rb_first(&tp->out_of_order_queue); skb = skb_rb_first(&tp->out_of_order_queue);
if (p && !rb_next(p)) { if (skb && !skb_rb_next(skb)) {
skb = rb_entry(p, struct sk_buff, rbnode);
if (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN) { if (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN) {
tcp_fastopen_active_disable(sk); tcp_fastopen_active_disable(sk);
return; return;
......
...@@ -4335,7 +4335,7 @@ static void tcp_ofo_queue(struct sock *sk) ...@@ -4335,7 +4335,7 @@ static void tcp_ofo_queue(struct sock *sk)
p = rb_first(&tp->out_of_order_queue); p = rb_first(&tp->out_of_order_queue);
while (p) { while (p) {
skb = rb_entry(p, struct sk_buff, rbnode); skb = rb_to_skb(p);
if (after(TCP_SKB_CB(skb)->seq, tp->rcv_nxt)) if (after(TCP_SKB_CB(skb)->seq, tp->rcv_nxt))
break; break;
...@@ -4399,7 +4399,7 @@ static int tcp_try_rmem_schedule(struct sock *sk, struct sk_buff *skb, ...@@ -4399,7 +4399,7 @@ static int tcp_try_rmem_schedule(struct sock *sk, struct sk_buff *skb,
static void tcp_data_queue_ofo(struct sock *sk, struct sk_buff *skb) static void tcp_data_queue_ofo(struct sock *sk, struct sk_buff *skb)
{ {
struct tcp_sock *tp = tcp_sk(sk); struct tcp_sock *tp = tcp_sk(sk);
struct rb_node **p, *q, *parent; struct rb_node **p, *parent;
struct sk_buff *skb1; struct sk_buff *skb1;
u32 seq, end_seq; u32 seq, end_seq;
bool fragstolen; bool fragstolen;
...@@ -4458,7 +4458,7 @@ static void tcp_data_queue_ofo(struct sock *sk, struct sk_buff *skb) ...@@ -4458,7 +4458,7 @@ static void tcp_data_queue_ofo(struct sock *sk, struct sk_buff *skb)
parent = NULL; parent = NULL;
while (*p) { while (*p) {
parent = *p; parent = *p;
skb1 = rb_entry(parent, struct sk_buff, rbnode); skb1 = rb_to_skb(parent);
if (before(seq, TCP_SKB_CB(skb1)->seq)) { if (before(seq, TCP_SKB_CB(skb1)->seq)) {
p = &parent->rb_left; p = &parent->rb_left;
continue; continue;
...@@ -4503,9 +4503,7 @@ static void tcp_data_queue_ofo(struct sock *sk, struct sk_buff *skb) ...@@ -4503,9 +4503,7 @@ static void tcp_data_queue_ofo(struct sock *sk, struct sk_buff *skb)
merge_right: merge_right:
/* Remove other segments covered by skb. */ /* Remove other segments covered by skb. */
while ((q = rb_next(&skb->rbnode)) != NULL) { while ((skb1 = skb_rb_next(skb)) != NULL) {
skb1 = rb_entry(q, struct sk_buff, rbnode);
if (!after(end_seq, TCP_SKB_CB(skb1)->seq)) if (!after(end_seq, TCP_SKB_CB(skb1)->seq))
break; break;
if (before(end_seq, TCP_SKB_CB(skb1)->end_seq)) { if (before(end_seq, TCP_SKB_CB(skb1)->end_seq)) {
...@@ -4520,7 +4518,7 @@ static void tcp_data_queue_ofo(struct sock *sk, struct sk_buff *skb) ...@@ -4520,7 +4518,7 @@ static void tcp_data_queue_ofo(struct sock *sk, struct sk_buff *skb)
tcp_drop(sk, skb1); tcp_drop(sk, skb1);
} }
/* If there is no skb after us, we are the last_skb ! */ /* If there is no skb after us, we are the last_skb ! */
if (!q) if (!skb1)
tp->ooo_last_skb = skb; tp->ooo_last_skb = skb;
add_sack: add_sack:
...@@ -4706,7 +4704,7 @@ static struct sk_buff *tcp_skb_next(struct sk_buff *skb, struct sk_buff_head *li ...@@ -4706,7 +4704,7 @@ static struct sk_buff *tcp_skb_next(struct sk_buff *skb, struct sk_buff_head *li
if (list) if (list)
return !skb_queue_is_last(list, skb) ? skb->next : NULL; return !skb_queue_is_last(list, skb) ? skb->next : NULL;
return rb_entry_safe(rb_next(&skb->rbnode), struct sk_buff, rbnode); return skb_rb_next(skb);
} }
static struct sk_buff *tcp_collapse_one(struct sock *sk, struct sk_buff *skb, static struct sk_buff *tcp_collapse_one(struct sock *sk, struct sk_buff *skb,
...@@ -4735,7 +4733,7 @@ static void tcp_rbtree_insert(struct rb_root *root, struct sk_buff *skb) ...@@ -4735,7 +4733,7 @@ static void tcp_rbtree_insert(struct rb_root *root, struct sk_buff *skb)
while (*p) { while (*p) {
parent = *p; parent = *p;
skb1 = rb_entry(parent, struct sk_buff, rbnode); skb1 = rb_to_skb(parent);
if (before(TCP_SKB_CB(skb)->seq, TCP_SKB_CB(skb1)->seq)) if (before(TCP_SKB_CB(skb)->seq, TCP_SKB_CB(skb1)->seq))
p = &parent->rb_left; p = &parent->rb_left;
else else
...@@ -4854,26 +4852,19 @@ static void tcp_collapse_ofo_queue(struct sock *sk) ...@@ -4854,26 +4852,19 @@ static void tcp_collapse_ofo_queue(struct sock *sk)
{ {
struct tcp_sock *tp = tcp_sk(sk); struct tcp_sock *tp = tcp_sk(sk);
struct sk_buff *skb, *head; struct sk_buff *skb, *head;
struct rb_node *p;
u32 start, end; u32 start, end;
p = rb_first(&tp->out_of_order_queue); skb = skb_rb_first(&tp->out_of_order_queue);
skb = rb_entry_safe(p, struct sk_buff, rbnode);
new_range: new_range:
if (!skb) { if (!skb) {
p = rb_last(&tp->out_of_order_queue); tp->ooo_last_skb = skb_rb_last(&tp->out_of_order_queue);
/* Note: This is possible p is NULL here. We do not
* use rb_entry_safe(), as ooo_last_skb is valid only
* if rbtree is not empty.
*/
tp->ooo_last_skb = rb_entry(p, struct sk_buff, rbnode);
return; return;
} }
start = TCP_SKB_CB(skb)->seq; start = TCP_SKB_CB(skb)->seq;
end = TCP_SKB_CB(skb)->end_seq; end = TCP_SKB_CB(skb)->end_seq;
for (head = skb;;) { for (head = skb;;) {
skb = tcp_skb_next(skb, NULL); skb = skb_rb_next(skb);
/* Range is terminated when we see a gap or when /* Range is terminated when we see a gap or when
* we are at the queue end. * we are at the queue end.
...@@ -4916,14 +4907,14 @@ static bool tcp_prune_ofo_queue(struct sock *sk) ...@@ -4916,14 +4907,14 @@ static bool tcp_prune_ofo_queue(struct sock *sk)
do { do {
prev = rb_prev(node); prev = rb_prev(node);
rb_erase(node, &tp->out_of_order_queue); rb_erase(node, &tp->out_of_order_queue);
tcp_drop(sk, rb_entry(node, struct sk_buff, rbnode)); tcp_drop(sk, rb_to_skb(node));
sk_mem_reclaim(sk); sk_mem_reclaim(sk);
if (atomic_read(&sk->sk_rmem_alloc) <= sk->sk_rcvbuf && if (atomic_read(&sk->sk_rmem_alloc) <= sk->sk_rcvbuf &&
!tcp_under_memory_pressure(sk)) !tcp_under_memory_pressure(sk))
break; break;
node = prev; node = prev;
} while (node); } while (node);
tp->ooo_last_skb = rb_entry(prev, struct sk_buff, rbnode); tp->ooo_last_skb = rb_to_skb(prev);
/* Reset SACK state. A conforming SACK implementation will /* Reset SACK state. A conforming SACK implementation will
* do the same at a timeout based retransmit. When a connection * do the same at a timeout based retransmit. When a connection
......
...@@ -148,12 +148,6 @@ struct netem_skb_cb { ...@@ -148,12 +148,6 @@ struct netem_skb_cb {
psched_time_t time_to_send; psched_time_t time_to_send;
}; };
static struct sk_buff *netem_rb_to_skb(struct rb_node *rb)
{
return rb_entry(rb, struct sk_buff, rbnode);
}
static inline struct netem_skb_cb *netem_skb_cb(struct sk_buff *skb) static inline struct netem_skb_cb *netem_skb_cb(struct sk_buff *skb)
{ {
/* we assume we can use skb next/prev/tstamp as storage for rb_node */ /* we assume we can use skb next/prev/tstamp as storage for rb_node */
...@@ -364,7 +358,7 @@ static void tfifo_reset(struct Qdisc *sch) ...@@ -364,7 +358,7 @@ static void tfifo_reset(struct Qdisc *sch)
struct rb_node *p = rb_first(&q->t_root); struct rb_node *p = rb_first(&q->t_root);
while (p) { while (p) {
struct sk_buff *skb = netem_rb_to_skb(p); struct sk_buff *skb = rb_to_skb(p);
p = rb_next(p); p = rb_next(p);
rb_erase(&skb->rbnode, &q->t_root); rb_erase(&skb->rbnode, &q->t_root);
...@@ -382,7 +376,7 @@ static void tfifo_enqueue(struct sk_buff *nskb, struct Qdisc *sch) ...@@ -382,7 +376,7 @@ static void tfifo_enqueue(struct sk_buff *nskb, struct Qdisc *sch)
struct sk_buff *skb; struct sk_buff *skb;
parent = *p; parent = *p;
skb = netem_rb_to_skb(parent); skb = rb_to_skb(parent);
if (tnext >= netem_skb_cb(skb)->time_to_send) if (tnext >= netem_skb_cb(skb)->time_to_send)
p = &parent->rb_right; p = &parent->rb_right;
else else
...@@ -538,7 +532,7 @@ static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch, ...@@ -538,7 +532,7 @@ static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch,
struct sk_buff *t_skb; struct sk_buff *t_skb;
struct netem_skb_cb *t_last; struct netem_skb_cb *t_last;
t_skb = netem_rb_to_skb(rb_last(&q->t_root)); t_skb = skb_rb_last(&q->t_root);
t_last = netem_skb_cb(t_skb); t_last = netem_skb_cb(t_skb);
if (!last || if (!last ||
t_last->time_to_send > last->time_to_send) { t_last->time_to_send > last->time_to_send) {
...@@ -617,7 +611,7 @@ static struct sk_buff *netem_dequeue(struct Qdisc *sch) ...@@ -617,7 +611,7 @@ static struct sk_buff *netem_dequeue(struct Qdisc *sch)
if (p) { if (p) {
psched_time_t time_to_send; psched_time_t time_to_send;
skb = netem_rb_to_skb(p); skb = rb_to_skb(p);
/* if more time remaining? */ /* if more time remaining? */
time_to_send = netem_skb_cb(skb)->time_to_send; time_to_send = netem_skb_cb(skb)->time_to_send;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment