Commit dbae2b06 authored by Paolo Abeni's avatar Paolo Abeni Committed by Jakub Kicinski

net: skb: introduce and use a single page frag cache

After commit 3226b158 ("net: avoid 32 x truesize under-estimation
for tiny skbs") we are observing 10-20% regressions in performance
tests with small packets. The perf trace points to high pressure on
the slab allocator.

This change tries to improve the allocation schema for small packets
using an idea originally suggested by Eric: a new per CPU page frag is
introduced and used in __napi_alloc_skb to cope with small allocation
requests.

To ensure that the above does not lead to excessive truesize
underestimation, the frag size for small allocation is inflated to 1K
and all the above is restricted to build with 4K page size.

Note that we need to update accordingly the run-time check introduced
with commit fd9ea57f ("net: add napi_get_frags_check() helper").

Alex suggested a smart page refcount schema to reduce the number
of atomic operations and deal properly with pfmemalloc pages.

Under small packet UDP flood, I measure a 15% peak tput increases.
Suggested-by: default avatarEric Dumazet <eric.dumazet@gmail.com>
Suggested-by: default avatarAlexander H Duyck <alexanderduyck@fb.com>
Signed-off-by: default avatarPaolo Abeni <pabeni@redhat.com>
Reviewed-by: default avatarEric Dumazet <edumazet@google.com>
Reviewed-by: default avatarAlexander Duyck <alexanderduyck@fb.com>
Link: https://lore.kernel.org/r/6b6f65957c59f86a353fc09a5127e83a32ab5999.1664350652.git.pabeni@redhat.comSigned-off-by: default avatarJakub Kicinski <kuba@kernel.org>
parent 7cba1833
...@@ -3821,6 +3821,7 @@ void netif_receive_skb_list(struct list_head *head); ...@@ -3821,6 +3821,7 @@ void netif_receive_skb_list(struct list_head *head);
gro_result_t napi_gro_receive(struct napi_struct *napi, struct sk_buff *skb); gro_result_t napi_gro_receive(struct napi_struct *napi, struct sk_buff *skb);
void napi_gro_flush(struct napi_struct *napi, bool flush_old); void napi_gro_flush(struct napi_struct *napi, bool flush_old);
struct sk_buff *napi_get_frags(struct napi_struct *napi); struct sk_buff *napi_get_frags(struct napi_struct *napi);
void napi_get_frags_check(struct napi_struct *napi);
gro_result_t napi_gro_frags(struct napi_struct *napi); gro_result_t napi_gro_frags(struct napi_struct *napi);
struct packet_offload *gro_find_receive_by_type(__be16 type); struct packet_offload *gro_find_receive_by_type(__be16 type);
struct packet_offload *gro_find_complete_by_type(__be16 type); struct packet_offload *gro_find_complete_by_type(__be16 type);
......
...@@ -6358,23 +6358,6 @@ int dev_set_threaded(struct net_device *dev, bool threaded) ...@@ -6358,23 +6358,6 @@ int dev_set_threaded(struct net_device *dev, bool threaded)
} }
EXPORT_SYMBOL(dev_set_threaded); EXPORT_SYMBOL(dev_set_threaded);
/* Double check that napi_get_frags() allocates skbs with
* skb->head being backed by slab, not a page fragment.
* This is to make sure bug fixed in 3226b158e67c
* ("net: avoid 32 x truesize under-estimation for tiny skbs")
* does not accidentally come back.
*/
static void napi_get_frags_check(struct napi_struct *napi)
{
struct sk_buff *skb;
local_bh_disable();
skb = napi_get_frags(napi);
WARN_ON_ONCE(skb && skb->head_frag);
napi_free_frags(napi);
local_bh_enable();
}
void netif_napi_add_weight(struct net_device *dev, struct napi_struct *napi, void netif_napi_add_weight(struct net_device *dev, struct napi_struct *napi,
int (*poll)(struct napi_struct *, int), int weight) int (*poll)(struct napi_struct *, int), int weight)
{ {
......
...@@ -134,8 +134,66 @@ static void skb_under_panic(struct sk_buff *skb, unsigned int sz, void *addr) ...@@ -134,8 +134,66 @@ static void skb_under_panic(struct sk_buff *skb, unsigned int sz, void *addr)
#define NAPI_SKB_CACHE_BULK 16 #define NAPI_SKB_CACHE_BULK 16
#define NAPI_SKB_CACHE_HALF (NAPI_SKB_CACHE_SIZE / 2) #define NAPI_SKB_CACHE_HALF (NAPI_SKB_CACHE_SIZE / 2)
#if PAGE_SIZE == SZ_4K
#define NAPI_HAS_SMALL_PAGE_FRAG 1
#define NAPI_SMALL_PAGE_PFMEMALLOC(nc) ((nc).pfmemalloc)
/* specialized page frag allocator using a single order 0 page
* and slicing it into 1K sized fragment. Constrained to systems
* with a very limited amount of 1K fragments fitting a single
* page - to avoid excessive truesize underestimation
*/
struct page_frag_1k {
void *va;
u16 offset;
bool pfmemalloc;
};
static void *page_frag_alloc_1k(struct page_frag_1k *nc, gfp_t gfp)
{
struct page *page;
int offset;
offset = nc->offset - SZ_1K;
if (likely(offset >= 0))
goto use_frag;
page = alloc_pages_node(NUMA_NO_NODE, gfp, 0);
if (!page)
return NULL;
nc->va = page_address(page);
nc->pfmemalloc = page_is_pfmemalloc(page);
offset = PAGE_SIZE - SZ_1K;
page_ref_add(page, offset / SZ_1K);
use_frag:
nc->offset = offset;
return nc->va + offset;
}
#else
/* the small page is actually unused in this build; add dummy helpers
* to please the compiler and avoid later preprocessor's conditionals
*/
#define NAPI_HAS_SMALL_PAGE_FRAG 0
#define NAPI_SMALL_PAGE_PFMEMALLOC(nc) false
struct page_frag_1k {
};
static void *page_frag_alloc_1k(struct page_frag_1k *nc, gfp_t gfp_mask)
{
return NULL;
}
#endif
struct napi_alloc_cache { struct napi_alloc_cache {
struct page_frag_cache page; struct page_frag_cache page;
struct page_frag_1k page_small;
unsigned int skb_count; unsigned int skb_count;
void *skb_cache[NAPI_SKB_CACHE_SIZE]; void *skb_cache[NAPI_SKB_CACHE_SIZE];
}; };
...@@ -143,6 +201,23 @@ struct napi_alloc_cache { ...@@ -143,6 +201,23 @@ struct napi_alloc_cache {
static DEFINE_PER_CPU(struct page_frag_cache, netdev_alloc_cache); static DEFINE_PER_CPU(struct page_frag_cache, netdev_alloc_cache);
static DEFINE_PER_CPU(struct napi_alloc_cache, napi_alloc_cache); static DEFINE_PER_CPU(struct napi_alloc_cache, napi_alloc_cache);
/* Double check that napi_get_frags() allocates skbs with
* skb->head being backed by slab, not a page fragment.
* This is to make sure bug fixed in 3226b158e67c
* ("net: avoid 32 x truesize under-estimation for tiny skbs")
* does not accidentally come back.
*/
void napi_get_frags_check(struct napi_struct *napi)
{
struct sk_buff *skb;
local_bh_disable();
skb = napi_get_frags(napi);
WARN_ON_ONCE(!NAPI_HAS_SMALL_PAGE_FRAG && skb && skb->head_frag);
napi_free_frags(napi);
local_bh_enable();
}
void *__napi_alloc_frag_align(unsigned int fragsz, unsigned int align_mask) void *__napi_alloc_frag_align(unsigned int fragsz, unsigned int align_mask)
{ {
struct napi_alloc_cache *nc = this_cpu_ptr(&napi_alloc_cache); struct napi_alloc_cache *nc = this_cpu_ptr(&napi_alloc_cache);
...@@ -561,6 +636,7 @@ struct sk_buff *__napi_alloc_skb(struct napi_struct *napi, unsigned int len, ...@@ -561,6 +636,7 @@ struct sk_buff *__napi_alloc_skb(struct napi_struct *napi, unsigned int len,
{ {
struct napi_alloc_cache *nc; struct napi_alloc_cache *nc;
struct sk_buff *skb; struct sk_buff *skb;
bool pfmemalloc;
void *data; void *data;
DEBUG_NET_WARN_ON_ONCE(!in_softirq()); DEBUG_NET_WARN_ON_ONCE(!in_softirq());
...@@ -568,8 +644,10 @@ struct sk_buff *__napi_alloc_skb(struct napi_struct *napi, unsigned int len, ...@@ -568,8 +644,10 @@ struct sk_buff *__napi_alloc_skb(struct napi_struct *napi, unsigned int len,
/* If requested length is either too small or too big, /* If requested length is either too small or too big,
* we use kmalloc() for skb->head allocation. * we use kmalloc() for skb->head allocation.
* When the small frag allocator is available, prefer it over kmalloc
* for small fragments
*/ */
if (len <= SKB_WITH_OVERHEAD(1024) || if ((!NAPI_HAS_SMALL_PAGE_FRAG && len <= SKB_WITH_OVERHEAD(1024)) ||
len > SKB_WITH_OVERHEAD(PAGE_SIZE) || len > SKB_WITH_OVERHEAD(PAGE_SIZE) ||
(gfp_mask & (__GFP_DIRECT_RECLAIM | GFP_DMA))) { (gfp_mask & (__GFP_DIRECT_RECLAIM | GFP_DMA))) {
skb = __alloc_skb(len, gfp_mask, SKB_ALLOC_RX | SKB_ALLOC_NAPI, skb = __alloc_skb(len, gfp_mask, SKB_ALLOC_RX | SKB_ALLOC_NAPI,
...@@ -580,13 +658,33 @@ struct sk_buff *__napi_alloc_skb(struct napi_struct *napi, unsigned int len, ...@@ -580,13 +658,33 @@ struct sk_buff *__napi_alloc_skb(struct napi_struct *napi, unsigned int len,
} }
nc = this_cpu_ptr(&napi_alloc_cache); nc = this_cpu_ptr(&napi_alloc_cache);
len += SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
len = SKB_DATA_ALIGN(len);
if (sk_memalloc_socks()) if (sk_memalloc_socks())
gfp_mask |= __GFP_MEMALLOC; gfp_mask |= __GFP_MEMALLOC;
if (NAPI_HAS_SMALL_PAGE_FRAG && len <= SKB_WITH_OVERHEAD(1024)) {
/* we are artificially inflating the allocation size, but
* that is not as bad as it may look like, as:
* - 'len' less than GRO_MAX_HEAD makes little sense
* - On most systems, larger 'len' values lead to fragment
* size above 512 bytes
* - kmalloc would use the kmalloc-1k slab for such values
* - Builds with smaller GRO_MAX_HEAD will very likely do
* little networking, as that implies no WiFi and no
* tunnels support, and 32 bits arches.
*/
len = SZ_1K;
data = page_frag_alloc_1k(&nc->page_small, gfp_mask);
pfmemalloc = NAPI_SMALL_PAGE_PFMEMALLOC(nc->page_small);
} else {
len += SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
len = SKB_DATA_ALIGN(len);
data = page_frag_alloc(&nc->page, len, gfp_mask); data = page_frag_alloc(&nc->page, len, gfp_mask);
pfmemalloc = nc->page.pfmemalloc;
}
if (unlikely(!data)) if (unlikely(!data))
return NULL; return NULL;
...@@ -596,7 +694,7 @@ struct sk_buff *__napi_alloc_skb(struct napi_struct *napi, unsigned int len, ...@@ -596,7 +694,7 @@ struct sk_buff *__napi_alloc_skb(struct napi_struct *napi, unsigned int len,
return NULL; return NULL;
} }
if (nc->page.pfmemalloc) if (pfmemalloc)
skb->pfmemalloc = 1; skb->pfmemalloc = 1;
skb->head_frag = 1; skb->head_frag = 1;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment