Commit 9191ab2f authored by Julian Anastasov's avatar Julian Anastasov Committed by Ben Hutchings

net: call rcu_read_lock early in process_backlog

commit 2c17d27c upstream.

Incoming packet should be either in backlog queue or
in RCU read-side section. Otherwise, the final sequence of
flush_backlog() and synchronize_net() may miss packets
that can run without device reference:

CPU 1                  CPU 2
                       skb->dev: no reference
                       process_backlog:__skb_dequeue
                       process_backlog:local_irq_enable

on_each_cpu for
flush_backlog =>       IPI(hardirq): flush_backlog
                       - packet not found in backlog

                       CPU delayed ...
synchronize_net
- no ongoing RCU
read-side sections

netdev_run_todo,
rcu_barrier: no
ongoing callbacks
                       __netif_receive_skb_core:rcu_read_lock
                       - too late
free dev
                       process packet for freed dev

Fixes: 6e583ce5 ("net: eliminate refcounting in backlog queue")
Cc: Eric W. Biederman <ebiederm@xmission.com>
Cc: Stephen Hemminger <stephen@networkplumber.org>
Signed-off-by: default avatarJulian Anastasov <ja@ssi.bg>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
[bwh: Backported to 3.2:
 - Adjust context
 - No need to rename the label in __netif_receive_skb()]
Signed-off-by: default avatarBen Hutchings <ben@decadent.org.uk>
parent 78b6803a
......@@ -3261,8 +3261,6 @@ static int __netif_receive_skb(struct sk_buff *skb)
pt_prev = NULL;
rcu_read_lock();
another_round:
__this_cpu_inc(softnet_data.processed);
......@@ -3357,7 +3355,6 @@ static int __netif_receive_skb(struct sk_buff *skb)
}
out:
rcu_read_unlock();
return ret;
}
......@@ -3378,34 +3375,31 @@ static int __netif_receive_skb(struct sk_buff *skb)
*/
int netif_receive_skb(struct sk_buff *skb)
{
int ret;
if (netdev_tstamp_prequeue)
net_timestamp_check(skb);
if (skb_defer_rx_timestamp(skb))
return NET_RX_SUCCESS;
rcu_read_lock();
#ifdef CONFIG_RPS
{
struct rps_dev_flow voidflow, *rflow = &voidflow;
int cpu, ret;
rcu_read_lock();
cpu = get_rps_cpu(skb->dev, skb, &rflow);
int cpu = get_rps_cpu(skb->dev, skb, &rflow);
if (cpu >= 0) {
ret = enqueue_to_backlog(skb, cpu, &rflow->last_qtail);
rcu_read_unlock();
} else {
rcu_read_unlock();
ret = __netif_receive_skb(skb);
return ret;
}
return ret;
}
#else
return __netif_receive_skb(skb);
#endif
ret = __netif_receive_skb(skb);
rcu_read_unlock();
return ret;
}
EXPORT_SYMBOL(netif_receive_skb);
......@@ -3796,8 +3790,10 @@ static int process_backlog(struct napi_struct *napi, int quota)
unsigned int qlen;
while ((skb = __skb_dequeue(&sd->process_queue))) {
rcu_read_lock();
local_irq_enable();
__netif_receive_skb(skb);
rcu_read_unlock();
local_irq_disable();
input_queue_head_incr(sd);
if (++work >= quota) {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment