Commit 7df40c26 authored by Eric Dumazet's avatar Eric Dumazet Committed by David S. Miller

net_sched: fq: take care of throttled flows before reuse

Normally, a socket can not be freed/reused unless all its TX packets
left qdisc and were TX-completed. However connect(AF_UNSPEC) allows
this to happen.

With commit fc59d5bd ("pkt_sched: fq: clear time_next_packet for
reused flows") we cleared f->time_next_packet but took no special
action if the flow was still in the throttled rb-tree.

Since f->time_next_packet is the key used in the rb-tree searches,
blindly clearing it might break rb-tree integrity. We need to make
sure the flow is no longer in the rb-tree to avoid this problem.

Fixes: fc59d5bd ("pkt_sched: fq: clear time_next_packet for reused flows")
Signed-off-by: default avatarEric Dumazet <edumazet@google.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent 30ca22e4
...@@ -128,6 +128,28 @@ static bool fq_flow_is_detached(const struct fq_flow *f) ...@@ -128,6 +128,28 @@ static bool fq_flow_is_detached(const struct fq_flow *f)
return f->next == &detached; return f->next == &detached;
} }
static bool fq_flow_is_throttled(const struct fq_flow *f)
{
return f->next == &throttled;
}
static void fq_flow_add_tail(struct fq_flow_head *head, struct fq_flow *flow)
{
if (head->first)
head->last->next = flow;
else
head->first = flow;
head->last = flow;
flow->next = NULL;
}
static void fq_flow_unset_throttled(struct fq_sched_data *q, struct fq_flow *f)
{
rb_erase(&f->rate_node, &q->delayed);
q->throttled_flows--;
fq_flow_add_tail(&q->old_flows, f);
}
static void fq_flow_set_throttled(struct fq_sched_data *q, struct fq_flow *f) static void fq_flow_set_throttled(struct fq_sched_data *q, struct fq_flow *f)
{ {
struct rb_node **p = &q->delayed.rb_node, *parent = NULL; struct rb_node **p = &q->delayed.rb_node, *parent = NULL;
...@@ -155,15 +177,6 @@ static void fq_flow_set_throttled(struct fq_sched_data *q, struct fq_flow *f) ...@@ -155,15 +177,6 @@ static void fq_flow_set_throttled(struct fq_sched_data *q, struct fq_flow *f)
static struct kmem_cache *fq_flow_cachep __read_mostly; static struct kmem_cache *fq_flow_cachep __read_mostly;
static void fq_flow_add_tail(struct fq_flow_head *head, struct fq_flow *flow)
{
if (head->first)
head->last->next = flow;
else
head->first = flow;
head->last = flow;
flow->next = NULL;
}
/* limit number of collected flows per round */ /* limit number of collected flows per round */
#define FQ_GC_MAX 8 #define FQ_GC_MAX 8
...@@ -267,6 +280,8 @@ static struct fq_flow *fq_classify(struct sk_buff *skb, struct fq_sched_data *q) ...@@ -267,6 +280,8 @@ static struct fq_flow *fq_classify(struct sk_buff *skb, struct fq_sched_data *q)
f->socket_hash != sk->sk_hash)) { f->socket_hash != sk->sk_hash)) {
f->credit = q->initial_quantum; f->credit = q->initial_quantum;
f->socket_hash = sk->sk_hash; f->socket_hash = sk->sk_hash;
if (fq_flow_is_throttled(f))
fq_flow_unset_throttled(q, f);
f->time_next_packet = 0ULL; f->time_next_packet = 0ULL;
} }
return f; return f;
...@@ -438,9 +453,7 @@ static void fq_check_throttled(struct fq_sched_data *q, u64 now) ...@@ -438,9 +453,7 @@ static void fq_check_throttled(struct fq_sched_data *q, u64 now)
q->time_next_delayed_flow = f->time_next_packet; q->time_next_delayed_flow = f->time_next_packet;
break; break;
} }
rb_erase(p, &q->delayed); fq_flow_unset_throttled(q, f);
q->throttled_flows--;
fq_flow_add_tail(&q->old_flows, f);
} }
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment