Commit f67dd6ce authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'slab-for-6.1-rc4-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/vbabka/slab

Pull slab fixes from Vlastimil Babka:
 "Most are small fixups as described below.

  The !CONFIG_TRACING fix is a bit bigger and would normally be done in
  the next merge window as part of upcoming hardening changes. But we
  realized it can make the kmalloc waste tracking introduced in this
  window inaccurate, so decided to go with it now.

  Summary:

   - Remove !CONFIG_TRACING kmalloc() wrappers intended to save a
     function call, due to incompatilibity with recently introduced
     wasted space tracking and planned hardening changes.

   - A tracing parameter regression fix, by Kees Cook.

   - Two kernel-doc warning fixups, by Lukas Bulwahn and myself

* tag 'slab-for-6.1-rc4-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/vbabka/slab:
  mm, slab: remove duplicate kernel-doc comment for ksize()
  mm/slab_common: Restore passing "caller" for tracing
  mm/slab: remove !CONFIG_TRACING variants of kmalloc_[node_]trace()
  mm/slab_common: repair kernel-doc for __ksize()
parents f141df37 c18c20f1
...@@ -470,35 +470,12 @@ void *__kmalloc_node(size_t size, gfp_t flags, int node) __assume_kmalloc_alignm ...@@ -470,35 +470,12 @@ void *__kmalloc_node(size_t size, gfp_t flags, int node) __assume_kmalloc_alignm
void *kmem_cache_alloc_node(struct kmem_cache *s, gfp_t flags, int node) __assume_slab_alignment void *kmem_cache_alloc_node(struct kmem_cache *s, gfp_t flags, int node) __assume_slab_alignment
__malloc; __malloc;
#ifdef CONFIG_TRACING
void *kmalloc_trace(struct kmem_cache *s, gfp_t flags, size_t size) void *kmalloc_trace(struct kmem_cache *s, gfp_t flags, size_t size)
__assume_kmalloc_alignment __alloc_size(3); __assume_kmalloc_alignment __alloc_size(3);
void *kmalloc_node_trace(struct kmem_cache *s, gfp_t gfpflags, void *kmalloc_node_trace(struct kmem_cache *s, gfp_t gfpflags,
int node, size_t size) __assume_kmalloc_alignment int node, size_t size) __assume_kmalloc_alignment
__alloc_size(4); __alloc_size(4);
#else /* CONFIG_TRACING */
/* Save a function call when CONFIG_TRACING=n */
static __always_inline __alloc_size(3)
void *kmalloc_trace(struct kmem_cache *s, gfp_t flags, size_t size)
{
void *ret = kmem_cache_alloc(s, flags);
ret = kasan_kmalloc(s, ret, size, flags);
return ret;
}
static __always_inline __alloc_size(4)
void *kmalloc_node_trace(struct kmem_cache *s, gfp_t gfpflags,
int node, size_t size)
{
void *ret = kmem_cache_alloc_node(s, gfpflags, node);
ret = kasan_kmalloc(s, ret, size, gfpflags);
return ret;
}
#endif /* CONFIG_TRACING */
void *kmalloc_large(size_t size, gfp_t flags) __assume_page_alignment void *kmalloc_large(size_t size, gfp_t flags) __assume_page_alignment
__alloc_size(1); __alloc_size(1);
......
...@@ -941,7 +941,7 @@ void *__do_kmalloc_node(size_t size, gfp_t flags, int node, unsigned long caller ...@@ -941,7 +941,7 @@ void *__do_kmalloc_node(size_t size, gfp_t flags, int node, unsigned long caller
if (unlikely(size > KMALLOC_MAX_CACHE_SIZE)) { if (unlikely(size > KMALLOC_MAX_CACHE_SIZE)) {
ret = __kmalloc_large_node(size, flags, node); ret = __kmalloc_large_node(size, flags, node);
trace_kmalloc(_RET_IP_, ret, size, trace_kmalloc(caller, ret, size,
PAGE_SIZE << get_order(size), flags, node); PAGE_SIZE << get_order(size), flags, node);
return ret; return ret;
} }
...@@ -953,7 +953,7 @@ void *__do_kmalloc_node(size_t size, gfp_t flags, int node, unsigned long caller ...@@ -953,7 +953,7 @@ void *__do_kmalloc_node(size_t size, gfp_t flags, int node, unsigned long caller
ret = __kmem_cache_alloc_node(s, flags, node, size, caller); ret = __kmem_cache_alloc_node(s, flags, node, size, caller);
ret = kasan_kmalloc(s, ret, size, flags); ret = kasan_kmalloc(s, ret, size, flags);
trace_kmalloc(_RET_IP_, ret, size, s->size, flags, node); trace_kmalloc(caller, ret, size, s->size, flags, node);
return ret; return ret;
} }
...@@ -1010,7 +1010,7 @@ EXPORT_SYMBOL(kfree); ...@@ -1010,7 +1010,7 @@ EXPORT_SYMBOL(kfree);
/** /**
* __ksize -- Report full size of underlying allocation * __ksize -- Report full size of underlying allocation
* @objp: pointer to the object * @object: pointer to the object
* *
* This should only be used internally to query the true size of allocations. * This should only be used internally to query the true size of allocations.
* It is not meant to be a way to discover the usable size of an allocation * It is not meant to be a way to discover the usable size of an allocation
...@@ -1018,7 +1018,7 @@ EXPORT_SYMBOL(kfree); ...@@ -1018,7 +1018,7 @@ EXPORT_SYMBOL(kfree);
* the originally requested allocation size may trigger KASAN, UBSAN_BOUNDS, * the originally requested allocation size may trigger KASAN, UBSAN_BOUNDS,
* and/or FORTIFY_SOURCE. * and/or FORTIFY_SOURCE.
* *
* Return: size of the actual memory used by @objp in bytes * Return: size of the actual memory used by @object in bytes
*/ */
size_t __ksize(const void *object) size_t __ksize(const void *object)
{ {
...@@ -1040,7 +1040,6 @@ size_t __ksize(const void *object) ...@@ -1040,7 +1040,6 @@ size_t __ksize(const void *object)
return slab_ksize(folio_slab(folio)->slab_cache); return slab_ksize(folio_slab(folio)->slab_cache);
} }
#ifdef CONFIG_TRACING
void *kmalloc_trace(struct kmem_cache *s, gfp_t gfpflags, size_t size) void *kmalloc_trace(struct kmem_cache *s, gfp_t gfpflags, size_t size)
{ {
void *ret = __kmem_cache_alloc_node(s, gfpflags, NUMA_NO_NODE, void *ret = __kmem_cache_alloc_node(s, gfpflags, NUMA_NO_NODE,
...@@ -1064,7 +1063,6 @@ void *kmalloc_node_trace(struct kmem_cache *s, gfp_t gfpflags, ...@@ -1064,7 +1063,6 @@ void *kmalloc_node_trace(struct kmem_cache *s, gfp_t gfpflags,
return ret; return ret;
} }
EXPORT_SYMBOL(kmalloc_node_trace); EXPORT_SYMBOL(kmalloc_node_trace);
#endif /* !CONFIG_TRACING */
#endif /* !CONFIG_SLOB */ #endif /* !CONFIG_SLOB */
gfp_t kmalloc_fix_flags(gfp_t flags) gfp_t kmalloc_fix_flags(gfp_t flags)
...@@ -1411,20 +1409,6 @@ void kfree_sensitive(const void *p) ...@@ -1411,20 +1409,6 @@ void kfree_sensitive(const void *p)
} }
EXPORT_SYMBOL(kfree_sensitive); EXPORT_SYMBOL(kfree_sensitive);
/**
* ksize - get the actual amount of memory allocated for a given object
* @objp: Pointer to the object
*
* kmalloc may internally round up allocations and return more memory
* than requested. ksize() can be used to determine the actual amount of
* memory allocated. The caller may use this additional memory, even though
* a smaller amount of memory was initially specified with the kmalloc call.
* The caller must guarantee that objp points to a valid object previously
* allocated with either kmalloc() or kmem_cache_alloc(). The object
* must not be freed during the duration of the call.
*
* Return: size of the actual memory used by @objp in bytes
*/
size_t ksize(const void *objp) size_t ksize(const void *objp)
{ {
size_t size; size_t size;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment