Commit 52d8b8ad authored by Eric Dumazet's avatar Eric Dumazet Committed by Greg Kroah-Hartman

net: better skb->sender_cpu and skb->napi_id cohabitation

commit 52bd2d62 upstream.

skb->sender_cpu and skb->napi_id share a common storage,
and we had various bugs about this.

We had to call skb_sender_cpu_clear() in some places to
not leave a prior skb->napi_id and fool netdev_pick_tx()

As suggested by Alexei, we could split the space so that
these errors can not happen.

0 value being reserved as the common (not initialized) value,
let's reserve [1 .. NR_CPUS] range for valid sender_cpu,
and [NR_CPUS+1 .. ~0U] for valid napi_id.

This will allow proper busy polling support over tunnels.
Signed-off-by: default avatarEric Dumazet <edumazet@google.com>
Suggested-by: default avatarAlexei Starovoitov <ast@kernel.org>
Acked-by: default avatarAlexei Starovoitov <ast@kernel.org>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
Cc: Paul Menzel <pmenzel@molgen.mpg.de>
Signed-off-by: default avatarGreg Kroah-Hartman <gregkh@linuxfoundation.org>
parent 3c0fcb52
...@@ -1084,9 +1084,6 @@ static inline void skb_copy_hash(struct sk_buff *to, const struct sk_buff *from) ...@@ -1084,9 +1084,6 @@ static inline void skb_copy_hash(struct sk_buff *to, const struct sk_buff *from)
static inline void skb_sender_cpu_clear(struct sk_buff *skb) static inline void skb_sender_cpu_clear(struct sk_buff *skb)
{ {
#ifdef CONFIG_XPS
skb->sender_cpu = 0;
#endif
} }
#ifdef NET_SKBUFF_DATA_USES_OFFSET #ifdef NET_SKBUFF_DATA_USES_OFFSET
......
...@@ -182,7 +182,7 @@ EXPORT_SYMBOL(dev_base_lock); ...@@ -182,7 +182,7 @@ EXPORT_SYMBOL(dev_base_lock);
/* protects napi_hash addition/deletion and napi_gen_id */ /* protects napi_hash addition/deletion and napi_gen_id */
static DEFINE_SPINLOCK(napi_hash_lock); static DEFINE_SPINLOCK(napi_hash_lock);
static unsigned int napi_gen_id; static unsigned int napi_gen_id = NR_CPUS;
static DEFINE_HASHTABLE(napi_hash, 8); static DEFINE_HASHTABLE(napi_hash, 8);
static seqcount_t devnet_rename_seq; static seqcount_t devnet_rename_seq;
...@@ -3049,7 +3049,9 @@ struct netdev_queue *netdev_pick_tx(struct net_device *dev, ...@@ -3049,7 +3049,9 @@ struct netdev_queue *netdev_pick_tx(struct net_device *dev,
int queue_index = 0; int queue_index = 0;
#ifdef CONFIG_XPS #ifdef CONFIG_XPS
if (skb->sender_cpu == 0) u32 sender_cpu = skb->sender_cpu - 1;
if (sender_cpu >= (u32)NR_CPUS)
skb->sender_cpu = raw_smp_processor_id() + 1; skb->sender_cpu = raw_smp_processor_id() + 1;
#endif #endif
...@@ -4726,25 +4728,22 @@ EXPORT_SYMBOL_GPL(napi_by_id); ...@@ -4726,25 +4728,22 @@ EXPORT_SYMBOL_GPL(napi_by_id);
void napi_hash_add(struct napi_struct *napi) void napi_hash_add(struct napi_struct *napi)
{ {
if (!test_and_set_bit(NAPI_STATE_HASHED, &napi->state)) { if (test_and_set_bit(NAPI_STATE_HASHED, &napi->state))
return;
spin_lock(&napi_hash_lock); spin_lock(&napi_hash_lock);
/* 0 is not a valid id, we also skip an id that is taken /* 0..NR_CPUS+1 range is reserved for sender_cpu use */
* we expect both events to be extremely rare do {
*/ if (unlikely(++napi_gen_id < NR_CPUS + 1))
napi->napi_id = 0; napi_gen_id = NR_CPUS + 1;
while (!napi->napi_id) { } while (napi_by_id(napi_gen_id));
napi->napi_id = ++napi_gen_id; napi->napi_id = napi_gen_id;
if (napi_by_id(napi->napi_id))
napi->napi_id = 0;
}
hlist_add_head_rcu(&napi->napi_hash_node, hlist_add_head_rcu(&napi->napi_hash_node,
&napi_hash[napi->napi_id % HASH_SIZE(napi_hash)]); &napi_hash[napi->napi_id % HASH_SIZE(napi_hash)]);
spin_unlock(&napi_hash_lock); spin_unlock(&napi_hash_lock);
}
} }
EXPORT_SYMBOL_GPL(napi_hash_add); EXPORT_SYMBOL_GPL(napi_hash_add);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment