Commit 559ae55c authored by Lukas Bulwahn's avatar Lukas Bulwahn Committed by Jakub Kicinski

net: skbuff: remove special handling for SLOB

Commit c9929f0e ("mm/slob: remove CONFIG_SLOB") removes CONFIG_SLOB.
Now, we can also remove special handling for socket buffers with the SLOB
allocator. The code with HAVE_SKB_SMALL_HEAD_CACHE=1 is now the default
behavior for all allocators.

Remove an unnecessary distinction between SLOB and SLAB/SLUB allocator
after the SLOB allocator is gone.
Signed-off-by: default avatarLukas Bulwahn <lukas.bulwahn@gmail.com>
Reviewed-by: default avatarLeon Romanovsky <leonro@nvidia.com>
Reviewed-by: default avatarEric Dumazet <edumazet@google.com>
Link: https://lore.kernel.org/r/20230509071207.28942-1-lukas.bulwahn@gmail.comSigned-off-by: default avatarJakub Kicinski <kuba@kernel.org>
parent a731a43e
...@@ -92,15 +92,7 @@ static struct kmem_cache *skbuff_fclone_cache __ro_after_init; ...@@ -92,15 +92,7 @@ static struct kmem_cache *skbuff_fclone_cache __ro_after_init;
static struct kmem_cache *skbuff_ext_cache __ro_after_init; static struct kmem_cache *skbuff_ext_cache __ro_after_init;
#endif #endif
/* skb_small_head_cache and related code is only supported
* for CONFIG_SLAB and CONFIG_SLUB.
* As soon as SLOB is removed from the kernel, we can clean up this.
*/
#if !defined(CONFIG_SLOB)
# define HAVE_SKB_SMALL_HEAD_CACHE 1
#endif
#ifdef HAVE_SKB_SMALL_HEAD_CACHE
static struct kmem_cache *skb_small_head_cache __ro_after_init; static struct kmem_cache *skb_small_head_cache __ro_after_init;
#define SKB_SMALL_HEAD_SIZE SKB_HEAD_ALIGN(MAX_TCP_HEADER) #define SKB_SMALL_HEAD_SIZE SKB_HEAD_ALIGN(MAX_TCP_HEADER)
...@@ -117,7 +109,6 @@ static struct kmem_cache *skb_small_head_cache __ro_after_init; ...@@ -117,7 +109,6 @@ static struct kmem_cache *skb_small_head_cache __ro_after_init;
#define SKB_SMALL_HEAD_HEADROOM \ #define SKB_SMALL_HEAD_HEADROOM \
SKB_WITH_OVERHEAD(SKB_SMALL_HEAD_CACHE_SIZE) SKB_WITH_OVERHEAD(SKB_SMALL_HEAD_CACHE_SIZE)
#endif /* HAVE_SKB_SMALL_HEAD_CACHE */
int sysctl_max_skb_frags __read_mostly = MAX_SKB_FRAGS; int sysctl_max_skb_frags __read_mostly = MAX_SKB_FRAGS;
EXPORT_SYMBOL(sysctl_max_skb_frags); EXPORT_SYMBOL(sysctl_max_skb_frags);
...@@ -562,7 +553,6 @@ static void *kmalloc_reserve(unsigned int *size, gfp_t flags, int node, ...@@ -562,7 +553,6 @@ static void *kmalloc_reserve(unsigned int *size, gfp_t flags, int node,
void *obj; void *obj;
obj_size = SKB_HEAD_ALIGN(*size); obj_size = SKB_HEAD_ALIGN(*size);
#ifdef HAVE_SKB_SMALL_HEAD_CACHE
if (obj_size <= SKB_SMALL_HEAD_CACHE_SIZE && if (obj_size <= SKB_SMALL_HEAD_CACHE_SIZE &&
!(flags & KMALLOC_NOT_NORMAL_BITS)) { !(flags & KMALLOC_NOT_NORMAL_BITS)) {
obj = kmem_cache_alloc_node(skb_small_head_cache, obj = kmem_cache_alloc_node(skb_small_head_cache,
...@@ -576,7 +566,6 @@ static void *kmalloc_reserve(unsigned int *size, gfp_t flags, int node, ...@@ -576,7 +566,6 @@ static void *kmalloc_reserve(unsigned int *size, gfp_t flags, int node,
obj = kmem_cache_alloc_node(skb_small_head_cache, flags, node); obj = kmem_cache_alloc_node(skb_small_head_cache, flags, node);
goto out; goto out;
} }
#endif
*size = obj_size = kmalloc_size_roundup(obj_size); *size = obj_size = kmalloc_size_roundup(obj_size);
/* /*
* Try a regular allocation, when that fails and we're not entitled * Try a regular allocation, when that fails and we're not entitled
...@@ -898,11 +887,9 @@ static bool skb_pp_recycle(struct sk_buff *skb, void *data, bool napi_safe) ...@@ -898,11 +887,9 @@ static bool skb_pp_recycle(struct sk_buff *skb, void *data, bool napi_safe)
static void skb_kfree_head(void *head, unsigned int end_offset) static void skb_kfree_head(void *head, unsigned int end_offset)
{ {
#ifdef HAVE_SKB_SMALL_HEAD_CACHE
if (end_offset == SKB_SMALL_HEAD_HEADROOM) if (end_offset == SKB_SMALL_HEAD_HEADROOM)
kmem_cache_free(skb_small_head_cache, head); kmem_cache_free(skb_small_head_cache, head);
else else
#endif
kfree(head); kfree(head);
} }
...@@ -2160,7 +2147,6 @@ int __skb_unclone_keeptruesize(struct sk_buff *skb, gfp_t pri) ...@@ -2160,7 +2147,6 @@ int __skb_unclone_keeptruesize(struct sk_buff *skb, gfp_t pri)
if (likely(skb_end_offset(skb) == saved_end_offset)) if (likely(skb_end_offset(skb) == saved_end_offset))
return 0; return 0;
#ifdef HAVE_SKB_SMALL_HEAD_CACHE
/* We can not change skb->end if the original or new value /* We can not change skb->end if the original or new value
* is SKB_SMALL_HEAD_HEADROOM, as it might break skb_kfree_head(). * is SKB_SMALL_HEAD_HEADROOM, as it might break skb_kfree_head().
*/ */
...@@ -2174,7 +2160,6 @@ int __skb_unclone_keeptruesize(struct sk_buff *skb, gfp_t pri) ...@@ -2174,7 +2160,6 @@ int __skb_unclone_keeptruesize(struct sk_buff *skb, gfp_t pri)
WARN_ON_ONCE(1); WARN_ON_ONCE(1);
return 0; return 0;
} }
#endif
shinfo = skb_shinfo(skb); shinfo = skb_shinfo(skb);
...@@ -4768,7 +4753,6 @@ void __init skb_init(void) ...@@ -4768,7 +4753,6 @@ void __init skb_init(void)
0, 0,
SLAB_HWCACHE_ALIGN|SLAB_PANIC, SLAB_HWCACHE_ALIGN|SLAB_PANIC,
NULL); NULL);
#ifdef HAVE_SKB_SMALL_HEAD_CACHE
/* usercopy should only access first SKB_SMALL_HEAD_HEADROOM bytes. /* usercopy should only access first SKB_SMALL_HEAD_HEADROOM bytes.
* struct skb_shared_info is located at the end of skb->head, * struct skb_shared_info is located at the end of skb->head,
* and should not be copied to/from user. * and should not be copied to/from user.
...@@ -4780,7 +4764,6 @@ void __init skb_init(void) ...@@ -4780,7 +4764,6 @@ void __init skb_init(void)
0, 0,
SKB_SMALL_HEAD_HEADROOM, SKB_SMALL_HEAD_HEADROOM,
NULL); NULL);
#endif
skb_extensions_init(); skb_extensions_init();
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment