Commit 5a7a40ba authored by Jesper Dangaard Brouer's avatar Jesper Dangaard Brouer Committed by Greg Kroah-Hartman

Revert "net: use lib/percpu_counter API for fragmentation mem accounting"


[ Upstream commit fb452a1a ]

This reverts commit 6d7b857d.

There is a bug in fragmentation codes use of the percpu_counter API,
that can cause issues on systems with many CPUs.

The frag_mem_limit() just reads the global counter (fbc->count),
without considering other CPUs can have upto batch size (130K) that
haven't been subtracted yet.  Due to the 3MBytes lower thresh limit,
this become dangerous at >=24 CPUs (3*1024*1024/130000=24).

The correct API usage would be to use __percpu_counter_compare() which
does the right thing, and takes into account the number of (online)
CPUs and batch size, to account for this and call __percpu_counter_sum()
when needed.

We choose to revert the use of the lib/percpu_counter API for frag
memory accounting for several reasons:

1) On systems with CPUs > 24, the heavier fully locked
   __percpu_counter_sum() is always invoked, which will be more
   expensive than the atomic_t that is reverted to.

Given systems with more than 24 CPUs are becoming common this doesn't
seem like a good option.  To mitigate this, the batch size could be
decreased and thresh be increased.

2) The add_frag_mem_limit+sub_frag_mem_limit pairs happen on the RX
   CPU, before SKBs are pushed into sockets on remote CPUs.  Given
   NICs can only hash on L2 part of the IP-header, the NIC-RXq's will
   likely be limited.  Thus, a fair chance that atomic add+dec happen
   on the same CPU.

Revert note that commit 1d6119ba ("net: fix percpu memory leaks")
removed init_frag_mem_limit() and instead use inet_frags_init_net().
After this revert, inet_frags_uninit_net() becomes empty.

Fixes: 6d7b857d ("net: use lib/percpu_counter API for fragmentation mem accounting")
Fixes: 1d6119ba ("net: fix percpu memory leaks")
Signed-off-by: default avatarJesper Dangaard Brouer <brouer@redhat.com>
Acked-by: default avatarFlorian Westphal <fw@strlen.de>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
Signed-off-by: default avatarGreg Kroah-Hartman <gregkh@linuxfoundation.org>
parent b5a3ae8b
#ifndef __NET_FRAG_H__ #ifndef __NET_FRAG_H__
#define __NET_FRAG_H__ #define __NET_FRAG_H__
#include <linux/percpu_counter.h>
struct netns_frags { struct netns_frags {
/* The percpu_counter "mem" need to be cacheline aligned. /* Keep atomic mem on separate cachelines in structs that include it */
* mem.count must not share cacheline with other writers atomic_t mem ____cacheline_aligned_in_smp;
*/
struct percpu_counter mem ____cacheline_aligned_in_smp;
/* sysctls */ /* sysctls */
int timeout; int timeout;
int high_thresh; int high_thresh;
...@@ -110,11 +105,11 @@ void inet_frags_fini(struct inet_frags *); ...@@ -110,11 +105,11 @@ void inet_frags_fini(struct inet_frags *);
static inline int inet_frags_init_net(struct netns_frags *nf) static inline int inet_frags_init_net(struct netns_frags *nf)
{ {
return percpu_counter_init(&nf->mem, 0, GFP_KERNEL); atomic_set(&nf->mem, 0);
return 0;
} }
static inline void inet_frags_uninit_net(struct netns_frags *nf) static inline void inet_frags_uninit_net(struct netns_frags *nf)
{ {
percpu_counter_destroy(&nf->mem);
} }
void inet_frags_exit_net(struct netns_frags *nf, struct inet_frags *f); void inet_frags_exit_net(struct netns_frags *nf, struct inet_frags *f);
...@@ -140,37 +135,24 @@ static inline bool inet_frag_evicting(struct inet_frag_queue *q) ...@@ -140,37 +135,24 @@ static inline bool inet_frag_evicting(struct inet_frag_queue *q)
/* Memory Tracking Functions. */ /* Memory Tracking Functions. */
/* The default percpu_counter batch size is not big enough to scale to
* fragmentation mem acct sizes.
* The mem size of a 64K fragment is approx:
* (44 fragments * 2944 truesize) + frag_queue struct(200) = 129736 bytes
*/
static unsigned int frag_percpu_counter_batch = 130000;
static inline int frag_mem_limit(struct netns_frags *nf) static inline int frag_mem_limit(struct netns_frags *nf)
{ {
return percpu_counter_read(&nf->mem); return atomic_read(&nf->mem);
} }
static inline void sub_frag_mem_limit(struct netns_frags *nf, int i) static inline void sub_frag_mem_limit(struct netns_frags *nf, int i)
{ {
__percpu_counter_add(&nf->mem, -i, frag_percpu_counter_batch); atomic_sub(i, &nf->mem);
} }
static inline void add_frag_mem_limit(struct netns_frags *nf, int i) static inline void add_frag_mem_limit(struct netns_frags *nf, int i)
{ {
__percpu_counter_add(&nf->mem, i, frag_percpu_counter_batch); atomic_add(i, &nf->mem);
} }
static inline unsigned int sum_frag_mem_limit(struct netns_frags *nf) static inline int sum_frag_mem_limit(struct netns_frags *nf)
{ {
unsigned int res; return atomic_read(&nf->mem);
local_bh_disable();
res = percpu_counter_sum_positive(&nf->mem);
local_bh_enable();
return res;
} }
/* RFC 3168 support : /* RFC 3168 support :
......
...@@ -234,10 +234,8 @@ void inet_frags_exit_net(struct netns_frags *nf, struct inet_frags *f) ...@@ -234,10 +234,8 @@ void inet_frags_exit_net(struct netns_frags *nf, struct inet_frags *f)
cond_resched(); cond_resched();
if (read_seqretry(&f->rnd_seqlock, seq) || if (read_seqretry(&f->rnd_seqlock, seq) ||
percpu_counter_sum(&nf->mem)) sum_frag_mem_limit(nf))
goto evict_again; goto evict_again;
percpu_counter_destroy(&nf->mem);
} }
EXPORT_SYMBOL(inet_frags_exit_net); EXPORT_SYMBOL(inet_frags_exit_net);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment