Commit 9077fc22 authored by Hou Tao's avatar Hou Tao Committed by Alexei Starovoitov

bpf: Use kmalloc_size_roundup() to adjust size_index

Commit d52b5931 ("bpf: Adjust size_index according to the value of
KMALLOC_MIN_SIZE") uses KMALLOC_MIN_SIZE to adjust size_index, but as
reported by Nathan, the adjustment is not enough, because
__kmalloc_minalign() also decides the minimal alignment of slab object
as shown in new_kmalloc_cache() and its value may be greater than
KMALLOC_MIN_SIZE (e.g., 64 bytes vs 8 bytes under a riscv QEMU VM).

Instead of invoking __kmalloc_minalign() in bpf subsystem to find the
maximal alignment, just using kmalloc_size_roundup() directly to get the
corresponding slab object size for each allocation size. If these two
sizes are unmatched, adjust size_index to select a bpf_mem_cache with
unit_size equal to the object_size of the underlying slab cache for the
allocation size.

Fixes: 822fb26b ("bpf: Add a hint to allocated objects.")
Reported-by: default avatarNathan Chancellor <nathan@kernel.org>
Closes: https://lore.kernel.org/bpf/20230914181407.GA1000274@dev-arch.thelio-3990X/Signed-off-by: default avatarHou Tao <houtao1@huawei.com>
Tested-by: default avatarEmil Renner Berthing <emil.renner.berthing@canonical.com>
Link: https://lore.kernel.org/r/20230928101558.2594068-1-houtao@huaweicloud.comSigned-off-by: default avatarAlexei Starovoitov <ast@kernel.org>
parent d1a783da
...@@ -965,37 +965,31 @@ void notrace *bpf_mem_cache_alloc_flags(struct bpf_mem_alloc *ma, gfp_t flags) ...@@ -965,37 +965,31 @@ void notrace *bpf_mem_cache_alloc_flags(struct bpf_mem_alloc *ma, gfp_t flags)
return !ret ? NULL : ret + LLIST_NODE_SZ; return !ret ? NULL : ret + LLIST_NODE_SZ;
} }
/* Most of the logic is taken from setup_kmalloc_cache_index_table() */
static __init int bpf_mem_cache_adjust_size(void) static __init int bpf_mem_cache_adjust_size(void)
{ {
unsigned int size, index; unsigned int size;
/* Normally KMALLOC_MIN_SIZE is 8-bytes, but it can be /* Adjusting the indexes in size_index() according to the object_size
* up-to 256-bytes. * of underlying slab cache, so bpf_mem_alloc() will select a
* bpf_mem_cache with unit_size equal to the object_size of
* the underlying slab cache.
*
* The maximal value of KMALLOC_MIN_SIZE and __kmalloc_minalign() is
* 256-bytes, so only do adjustment for [8-bytes, 192-bytes].
*/ */
size = KMALLOC_MIN_SIZE; for (size = 192; size >= 8; size -= 8) {
if (size <= 192) unsigned int kmalloc_size, index;
index = size_index[(size - 1) / 8];
else
index = fls(size - 1) - 1;
for (size = 8; size < KMALLOC_MIN_SIZE && size <= 192; size += 8)
size_index[(size - 1) / 8] = index;
/* The minimal alignment is 64-bytes, so disable 96-bytes cache and kmalloc_size = kmalloc_size_roundup(size);
* use 128-bytes cache instead. if (kmalloc_size == size)
*/ continue;
if (KMALLOC_MIN_SIZE >= 64) {
index = size_index[(128 - 1) / 8];
for (size = 64 + 8; size <= 96; size += 8)
size_index[(size - 1) / 8] = index;
}
/* The minimal alignment is 128-bytes, so disable 192-bytes cache and if (kmalloc_size <= 192)
* use 256-bytes cache instead. index = size_index[(kmalloc_size - 1) / 8];
*/ else
if (KMALLOC_MIN_SIZE >= 128) { index = fls(kmalloc_size - 1) - 1;
index = fls(256 - 1) - 1; /* Only overwrite if necessary */
for (size = 128 + 8; size <= 192; size += 8) if (size_index[(size - 1) / 8] != index)
size_index[(size - 1) / 8] = index; size_index[(size - 1) / 8] = index;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment