Commit f7efd01f authored by Eric Dumazet's avatar Eric Dumazet Committed by David S. Miller

net: enqueue_to_backlog() cleanup

We can remove a goto and a label by reversing a condition.
Signed-off-by: default avatarEric Dumazet <edumazet@google.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent a7ae7b0b
...@@ -4816,20 +4816,18 @@ static int enqueue_to_backlog(struct sk_buff *skb, int cpu, ...@@ -4816,20 +4816,18 @@ static int enqueue_to_backlog(struct sk_buff *skb, int cpu,
backlog_lock_irq_save(sd, &flags); backlog_lock_irq_save(sd, &flags);
qlen = skb_queue_len(&sd->input_pkt_queue); qlen = skb_queue_len(&sd->input_pkt_queue);
if (qlen <= max_backlog && !skb_flow_limit(skb, qlen)) { if (qlen <= max_backlog && !skb_flow_limit(skb, qlen)) {
if (qlen) { if (!qlen) {
enqueue: /* Schedule NAPI for backlog device. We can use
__skb_queue_tail(&sd->input_pkt_queue, skb); * non atomic operation as we own the queue lock.
input_queue_tail_incr_save(sd, qtail); */
backlog_unlock_irq_restore(sd, &flags); if (!__test_and_set_bit(NAPI_STATE_SCHED,
return NET_RX_SUCCESS; &sd->backlog.state))
napi_schedule_rps(sd);
} }
__skb_queue_tail(&sd->input_pkt_queue, skb);
/* Schedule NAPI for backlog device input_queue_tail_incr_save(sd, qtail);
* We can use non atomic operation since we own the queue lock backlog_unlock_irq_restore(sd, &flags);
*/ return NET_RX_SUCCESS;
if (!__test_and_set_bit(NAPI_STATE_SCHED, &sd->backlog.state))
napi_schedule_rps(sd);
goto enqueue;
} }
backlog_unlock_irq_restore(sd, &flags); backlog_unlock_irq_restore(sd, &flags);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment