Commit 96e97bc0 authored by Jakub Kicinski's avatar Jakub Kicinski Committed by David S. Miller

net: disable netpoll on fresh napis

napi_disable() makes sure to set the NAPI_STATE_NPSVC bit to prevent
netpoll from accessing rings before init is complete. However, the
same is not done for fresh napi instances in netif_napi_add(),
even though we expect NAPI instances to be added as disabled.

This causes crashes during driver reconfiguration (enabling XDP,
changing the channel count) - if there is any printk() after
netif_napi_add() but before napi_enable().

To ensure memory ordering is correct we need to use RCU accessors.
Reported-by: default avatarRob Sherwood <rsher@fb.com>
Fixes: 2d8bff12 ("netpoll: Close race condition between poll_one_napi and napi_disable")
Signed-off-by: default avatarJakub Kicinski <kuba@kernel.org>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent 7f6f32bb
...@@ -6612,12 +6612,13 @@ void netif_napi_add(struct net_device *dev, struct napi_struct *napi, ...@@ -6612,12 +6612,13 @@ void netif_napi_add(struct net_device *dev, struct napi_struct *napi,
netdev_err_once(dev, "%s() called with weight %d\n", __func__, netdev_err_once(dev, "%s() called with weight %d\n", __func__,
weight); weight);
napi->weight = weight; napi->weight = weight;
list_add(&napi->dev_list, &dev->napi_list);
napi->dev = dev; napi->dev = dev;
#ifdef CONFIG_NETPOLL #ifdef CONFIG_NETPOLL
napi->poll_owner = -1; napi->poll_owner = -1;
#endif #endif
set_bit(NAPI_STATE_SCHED, &napi->state); set_bit(NAPI_STATE_SCHED, &napi->state);
set_bit(NAPI_STATE_NPSVC, &napi->state);
list_add_rcu(&napi->dev_list, &dev->napi_list);
napi_hash_add(napi); napi_hash_add(napi);
} }
EXPORT_SYMBOL(netif_napi_add); EXPORT_SYMBOL(netif_napi_add);
......
...@@ -162,7 +162,7 @@ static void poll_napi(struct net_device *dev) ...@@ -162,7 +162,7 @@ static void poll_napi(struct net_device *dev)
struct napi_struct *napi; struct napi_struct *napi;
int cpu = smp_processor_id(); int cpu = smp_processor_id();
list_for_each_entry(napi, &dev->napi_list, dev_list) { list_for_each_entry_rcu(napi, &dev->napi_list, dev_list) {
if (cmpxchg(&napi->poll_owner, -1, cpu) == -1) { if (cmpxchg(&napi->poll_owner, -1, cpu) == -1) {
poll_one_napi(napi); poll_one_napi(napi);
smp_store_release(&napi->poll_owner, -1); smp_store_release(&napi->poll_owner, -1);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment