Commit 5f0d5a3a authored by Paul E. McKenney's avatar Paul E. McKenney

mm: Rename SLAB_DESTROY_BY_RCU to SLAB_TYPESAFE_BY_RCU

A group of Linux kernel hackers reported chasing a bug that resulted
from their assumption that SLAB_DESTROY_BY_RCU provided an existence
guarantee, that is, that no block from such a slab would be reallocated
during an RCU read-side critical section.  Of course, that is not the
case.  Instead, SLAB_DESTROY_BY_RCU only prevents freeing of an entire
slab of blocks.

However, there is a phrase for this, namely "type safety".  This commit
therefore renames SLAB_DESTROY_BY_RCU to SLAB_TYPESAFE_BY_RCU in order
to avoid future instances of this sort of confusion.
Signed-off-by: default avatarPaul E. McKenney <paulmck@linux.vnet.ibm.com>
Cc: Christoph Lameter <cl@linux.com>
Cc: Pekka Enberg <penberg@kernel.org>
Cc: David Rientjes <rientjes@google.com>
Cc: Joonsoo Kim <iamjoonsoo.kim@lge.com>
Cc: Andrew Morton <akpm@linux-foundation.org>
Cc: <linux-mm@kvack.org>
Acked-by: default avatarJohannes Weiner <hannes@cmpxchg.org>
Acked-by: default avatarVlastimil Babka <vbabka@suse.cz>
[ paulmck: Add comments mentioning the old name, as requested by Eric
  Dumazet, in order to help people familiar with the old name find
  the new one. ]
Acked-by: default avatarDavid Rientjes <rientjes@google.com>
parent 4495c08e
...@@ -17,7 +17,7 @@ rcu_dereference.txt ...@@ -17,7 +17,7 @@ rcu_dereference.txt
rcubarrier.txt rcubarrier.txt
- RCU and Unloadable Modules - RCU and Unloadable Modules
rculist_nulls.txt rculist_nulls.txt
- RCU list primitives for use with SLAB_DESTROY_BY_RCU - RCU list primitives for use with SLAB_TYPESAFE_BY_RCU
rcuref.txt rcuref.txt
- Reference-count design for elements of lists/arrays protected by RCU - Reference-count design for elements of lists/arrays protected by RCU
rcu.txt rcu.txt
......
Using hlist_nulls to protect read-mostly linked lists and Using hlist_nulls to protect read-mostly linked lists and
objects using SLAB_DESTROY_BY_RCU allocations. objects using SLAB_TYPESAFE_BY_RCU allocations.
Please read the basics in Documentation/RCU/listRCU.txt Please read the basics in Documentation/RCU/listRCU.txt
...@@ -7,7 +7,7 @@ Using special makers (called 'nulls') is a convenient way ...@@ -7,7 +7,7 @@ Using special makers (called 'nulls') is a convenient way
to solve following problem : to solve following problem :
A typical RCU linked list managing objects which are A typical RCU linked list managing objects which are
allocated with SLAB_DESTROY_BY_RCU kmem_cache can allocated with SLAB_TYPESAFE_BY_RCU kmem_cache can
use following algos : use following algos :
1) Lookup algo 1) Lookup algo
...@@ -96,7 +96,7 @@ unlock_chain(); // typically a spin_unlock() ...@@ -96,7 +96,7 @@ unlock_chain(); // typically a spin_unlock()
3) Remove algo 3) Remove algo
-------------- --------------
Nothing special here, we can use a standard RCU hlist deletion. Nothing special here, we can use a standard RCU hlist deletion.
But thanks to SLAB_DESTROY_BY_RCU, beware a deleted object can be reused But thanks to SLAB_TYPESAFE_BY_RCU, beware a deleted object can be reused
very very fast (before the end of RCU grace period) very very fast (before the end of RCU grace period)
if (put_last_reference_on(obj) { if (put_last_reference_on(obj) {
......
...@@ -925,7 +925,8 @@ d. Do you need RCU grace periods to complete even in the face ...@@ -925,7 +925,8 @@ d. Do you need RCU grace periods to complete even in the face
e. Is your workload too update-intensive for normal use of e. Is your workload too update-intensive for normal use of
RCU, but inappropriate for other synchronization mechanisms? RCU, but inappropriate for other synchronization mechanisms?
If so, consider SLAB_DESTROY_BY_RCU. But please be careful! If so, consider SLAB_TYPESAFE_BY_RCU (which was originally
named SLAB_DESTROY_BY_RCU). But please be careful!
f. Do you need read-side critical sections that are respected f. Do you need read-side critical sections that are respected
even though they are in the middle of the idle loop, during even though they are in the middle of the idle loop, during
......
...@@ -4552,7 +4552,7 @@ i915_gem_load_init(struct drm_i915_private *dev_priv) ...@@ -4552,7 +4552,7 @@ i915_gem_load_init(struct drm_i915_private *dev_priv)
dev_priv->requests = KMEM_CACHE(drm_i915_gem_request, dev_priv->requests = KMEM_CACHE(drm_i915_gem_request,
SLAB_HWCACHE_ALIGN | SLAB_HWCACHE_ALIGN |
SLAB_RECLAIM_ACCOUNT | SLAB_RECLAIM_ACCOUNT |
SLAB_DESTROY_BY_RCU); SLAB_TYPESAFE_BY_RCU);
if (!dev_priv->requests) if (!dev_priv->requests)
goto err_vmas; goto err_vmas;
......
...@@ -493,7 +493,7 @@ static inline struct drm_i915_gem_request * ...@@ -493,7 +493,7 @@ static inline struct drm_i915_gem_request *
__i915_gem_active_get_rcu(const struct i915_gem_active *active) __i915_gem_active_get_rcu(const struct i915_gem_active *active)
{ {
/* Performing a lockless retrieval of the active request is super /* Performing a lockless retrieval of the active request is super
* tricky. SLAB_DESTROY_BY_RCU merely guarantees that the backing * tricky. SLAB_TYPESAFE_BY_RCU merely guarantees that the backing
* slab of request objects will not be freed whilst we hold the * slab of request objects will not be freed whilst we hold the
* RCU read lock. It does not guarantee that the request itself * RCU read lock. It does not guarantee that the request itself
* will not be freed and then *reused*. Viz, * will not be freed and then *reused*. Viz,
......
...@@ -1071,7 +1071,7 @@ int ldlm_init(void) ...@@ -1071,7 +1071,7 @@ int ldlm_init(void)
ldlm_lock_slab = kmem_cache_create("ldlm_locks", ldlm_lock_slab = kmem_cache_create("ldlm_locks",
sizeof(struct ldlm_lock), 0, sizeof(struct ldlm_lock), 0,
SLAB_HWCACHE_ALIGN | SLAB_HWCACHE_ALIGN |
SLAB_DESTROY_BY_RCU, NULL); SLAB_TYPESAFE_BY_RCU, NULL);
if (!ldlm_lock_slab) { if (!ldlm_lock_slab) {
kmem_cache_destroy(ldlm_resource_slab); kmem_cache_destroy(ldlm_resource_slab);
return -ENOMEM; return -ENOMEM;
......
...@@ -2340,7 +2340,7 @@ static int jbd2_journal_init_journal_head_cache(void) ...@@ -2340,7 +2340,7 @@ static int jbd2_journal_init_journal_head_cache(void)
jbd2_journal_head_cache = kmem_cache_create("jbd2_journal_head", jbd2_journal_head_cache = kmem_cache_create("jbd2_journal_head",
sizeof(struct journal_head), sizeof(struct journal_head),
0, /* offset */ 0, /* offset */
SLAB_TEMPORARY | SLAB_DESTROY_BY_RCU, SLAB_TEMPORARY | SLAB_TYPESAFE_BY_RCU,
NULL); /* ctor */ NULL); /* ctor */
retval = 0; retval = 0;
if (!jbd2_journal_head_cache) { if (!jbd2_journal_head_cache) {
......
...@@ -38,7 +38,7 @@ void signalfd_cleanup(struct sighand_struct *sighand) ...@@ -38,7 +38,7 @@ void signalfd_cleanup(struct sighand_struct *sighand)
/* /*
* The lockless check can race with remove_wait_queue() in progress, * The lockless check can race with remove_wait_queue() in progress,
* but in this case its caller should run under rcu_read_lock() and * but in this case its caller should run under rcu_read_lock() and
* sighand_cachep is SLAB_DESTROY_BY_RCU, we can safely return. * sighand_cachep is SLAB_TYPESAFE_BY_RCU, we can safely return.
*/ */
if (likely(!waitqueue_active(wqh))) if (likely(!waitqueue_active(wqh)))
return; return;
......
...@@ -229,7 +229,7 @@ static inline struct dma_fence *dma_fence_get_rcu(struct dma_fence *fence) ...@@ -229,7 +229,7 @@ static inline struct dma_fence *dma_fence_get_rcu(struct dma_fence *fence)
* *
* Function returns NULL if no refcount could be obtained, or the fence. * Function returns NULL if no refcount could be obtained, or the fence.
* This function handles acquiring a reference to a fence that may be * This function handles acquiring a reference to a fence that may be
* reallocated within the RCU grace period (such as with SLAB_DESTROY_BY_RCU), * reallocated within the RCU grace period (such as with SLAB_TYPESAFE_BY_RCU),
* so long as the caller is using RCU on the pointer to the fence. * so long as the caller is using RCU on the pointer to the fence.
* *
* An alternative mechanism is to employ a seqlock to protect a bunch of * An alternative mechanism is to employ a seqlock to protect a bunch of
...@@ -257,7 +257,7 @@ dma_fence_get_rcu_safe(struct dma_fence * __rcu *fencep) ...@@ -257,7 +257,7 @@ dma_fence_get_rcu_safe(struct dma_fence * __rcu *fencep)
* have successfully acquire a reference to it. If it no * have successfully acquire a reference to it. If it no
* longer matches, we are holding a reference to some other * longer matches, we are holding a reference to some other
* reallocated pointer. This is possible if the allocator * reallocated pointer. This is possible if the allocator
* is using a freelist like SLAB_DESTROY_BY_RCU where the * is using a freelist like SLAB_TYPESAFE_BY_RCU where the
* fence remains valid for the RCU grace period, but it * fence remains valid for the RCU grace period, but it
* may be reallocated. When using such allocators, we are * may be reallocated. When using such allocators, we are
* responsible for ensuring the reference we get is to * responsible for ensuring the reference we get is to
......
...@@ -28,7 +28,7 @@ ...@@ -28,7 +28,7 @@
#define SLAB_STORE_USER 0x00010000UL /* DEBUG: Store the last owner for bug hunting */ #define SLAB_STORE_USER 0x00010000UL /* DEBUG: Store the last owner for bug hunting */
#define SLAB_PANIC 0x00040000UL /* Panic if kmem_cache_create() fails */ #define SLAB_PANIC 0x00040000UL /* Panic if kmem_cache_create() fails */
/* /*
* SLAB_DESTROY_BY_RCU - **WARNING** READ THIS! * SLAB_TYPESAFE_BY_RCU - **WARNING** READ THIS!
* *
* This delays freeing the SLAB page by a grace period, it does _NOT_ * This delays freeing the SLAB page by a grace period, it does _NOT_
* delay object freeing. This means that if you do kmem_cache_free() * delay object freeing. This means that if you do kmem_cache_free()
...@@ -61,8 +61,10 @@ ...@@ -61,8 +61,10 @@
* *
* rcu_read_lock before reading the address, then rcu_read_unlock after * rcu_read_lock before reading the address, then rcu_read_unlock after
* taking the spinlock within the structure expected at that address. * taking the spinlock within the structure expected at that address.
*
* Note that SLAB_TYPESAFE_BY_RCU was originally named SLAB_DESTROY_BY_RCU.
*/ */
#define SLAB_DESTROY_BY_RCU 0x00080000UL /* Defer freeing slabs to RCU */ #define SLAB_TYPESAFE_BY_RCU 0x00080000UL /* Defer freeing slabs to RCU */
#define SLAB_MEM_SPREAD 0x00100000UL /* Spread some memory over cpuset */ #define SLAB_MEM_SPREAD 0x00100000UL /* Spread some memory over cpuset */
#define SLAB_TRACE 0x00200000UL /* Trace allocations and frees */ #define SLAB_TRACE 0x00200000UL /* Trace allocations and frees */
......
...@@ -993,7 +993,7 @@ struct smc_hashinfo; ...@@ -993,7 +993,7 @@ struct smc_hashinfo;
struct module; struct module;
/* /*
* caches using SLAB_DESTROY_BY_RCU should let .next pointer from nulls nodes * caches using SLAB_TYPESAFE_BY_RCU should let .next pointer from nulls nodes
* un-modified. Special care is taken when initializing object to zero. * un-modified. Special care is taken when initializing object to zero.
*/ */
static inline void sk_prot_clear_nulls(struct sock *sk, int size) static inline void sk_prot_clear_nulls(struct sock *sk, int size)
......
...@@ -1313,7 +1313,7 @@ void __cleanup_sighand(struct sighand_struct *sighand) ...@@ -1313,7 +1313,7 @@ void __cleanup_sighand(struct sighand_struct *sighand)
if (atomic_dec_and_test(&sighand->count)) { if (atomic_dec_and_test(&sighand->count)) {
signalfd_cleanup(sighand); signalfd_cleanup(sighand);
/* /*
* sighand_cachep is SLAB_DESTROY_BY_RCU so we can free it * sighand_cachep is SLAB_TYPESAFE_BY_RCU so we can free it
* without an RCU grace period, see __lock_task_sighand(). * without an RCU grace period, see __lock_task_sighand().
*/ */
kmem_cache_free(sighand_cachep, sighand); kmem_cache_free(sighand_cachep, sighand);
...@@ -2144,7 +2144,7 @@ void __init proc_caches_init(void) ...@@ -2144,7 +2144,7 @@ void __init proc_caches_init(void)
{ {
sighand_cachep = kmem_cache_create("sighand_cache", sighand_cachep = kmem_cache_create("sighand_cache",
sizeof(struct sighand_struct), 0, sizeof(struct sighand_struct), 0,
SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_DESTROY_BY_RCU| SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_TYPESAFE_BY_RCU|
SLAB_NOTRACK|SLAB_ACCOUNT, sighand_ctor); SLAB_NOTRACK|SLAB_ACCOUNT, sighand_ctor);
signal_cachep = kmem_cache_create("signal_cache", signal_cachep = kmem_cache_create("signal_cache",
sizeof(struct signal_struct), 0, sizeof(struct signal_struct), 0,
......
...@@ -1237,7 +1237,7 @@ struct sighand_struct *__lock_task_sighand(struct task_struct *tsk, ...@@ -1237,7 +1237,7 @@ struct sighand_struct *__lock_task_sighand(struct task_struct *tsk,
} }
/* /*
* This sighand can be already freed and even reused, but * This sighand can be already freed and even reused, but
* we rely on SLAB_DESTROY_BY_RCU and sighand_ctor() which * we rely on SLAB_TYPESAFE_BY_RCU and sighand_ctor() which
* initializes ->siglock: this slab can't go away, it has * initializes ->siglock: this slab can't go away, it has
* the same object type, ->siglock can't be reinitialized. * the same object type, ->siglock can't be reinitialized.
* *
......
...@@ -413,7 +413,7 @@ void kasan_cache_create(struct kmem_cache *cache, size_t *size, ...@@ -413,7 +413,7 @@ void kasan_cache_create(struct kmem_cache *cache, size_t *size,
*size += sizeof(struct kasan_alloc_meta); *size += sizeof(struct kasan_alloc_meta);
/* Add free meta. */ /* Add free meta. */
if (cache->flags & SLAB_DESTROY_BY_RCU || cache->ctor || if (cache->flags & SLAB_TYPESAFE_BY_RCU || cache->ctor ||
cache->object_size < sizeof(struct kasan_free_meta)) { cache->object_size < sizeof(struct kasan_free_meta)) {
cache->kasan_info.free_meta_offset = *size; cache->kasan_info.free_meta_offset = *size;
*size += sizeof(struct kasan_free_meta); *size += sizeof(struct kasan_free_meta);
...@@ -561,7 +561,7 @@ static void kasan_poison_slab_free(struct kmem_cache *cache, void *object) ...@@ -561,7 +561,7 @@ static void kasan_poison_slab_free(struct kmem_cache *cache, void *object)
unsigned long rounded_up_size = round_up(size, KASAN_SHADOW_SCALE_SIZE); unsigned long rounded_up_size = round_up(size, KASAN_SHADOW_SCALE_SIZE);
/* RCU slabs could be legally used after free within the RCU period */ /* RCU slabs could be legally used after free within the RCU period */
if (unlikely(cache->flags & SLAB_DESTROY_BY_RCU)) if (unlikely(cache->flags & SLAB_TYPESAFE_BY_RCU))
return; return;
kasan_poison_shadow(object, rounded_up_size, KASAN_KMALLOC_FREE); kasan_poison_shadow(object, rounded_up_size, KASAN_KMALLOC_FREE);
...@@ -572,7 +572,7 @@ bool kasan_slab_free(struct kmem_cache *cache, void *object) ...@@ -572,7 +572,7 @@ bool kasan_slab_free(struct kmem_cache *cache, void *object)
s8 shadow_byte; s8 shadow_byte;
/* RCU slabs could be legally used after free within the RCU period */ /* RCU slabs could be legally used after free within the RCU period */
if (unlikely(cache->flags & SLAB_DESTROY_BY_RCU)) if (unlikely(cache->flags & SLAB_TYPESAFE_BY_RCU))
return false; return false;
shadow_byte = READ_ONCE(*(s8 *)kasan_mem_to_shadow(object)); shadow_byte = READ_ONCE(*(s8 *)kasan_mem_to_shadow(object));
......
...@@ -95,7 +95,7 @@ void kmemcheck_slab_alloc(struct kmem_cache *s, gfp_t gfpflags, void *object, ...@@ -95,7 +95,7 @@ void kmemcheck_slab_alloc(struct kmem_cache *s, gfp_t gfpflags, void *object,
void kmemcheck_slab_free(struct kmem_cache *s, void *object, size_t size) void kmemcheck_slab_free(struct kmem_cache *s, void *object, size_t size)
{ {
/* TODO: RCU freeing is unsupported for now; hide false positives. */ /* TODO: RCU freeing is unsupported for now; hide false positives. */
if (!s->ctor && !(s->flags & SLAB_DESTROY_BY_RCU)) if (!s->ctor && !(s->flags & SLAB_TYPESAFE_BY_RCU))
kmemcheck_mark_freed(object, size); kmemcheck_mark_freed(object, size);
} }
......
...@@ -430,7 +430,7 @@ static void anon_vma_ctor(void *data) ...@@ -430,7 +430,7 @@ static void anon_vma_ctor(void *data)
void __init anon_vma_init(void) void __init anon_vma_init(void)
{ {
anon_vma_cachep = kmem_cache_create("anon_vma", sizeof(struct anon_vma), anon_vma_cachep = kmem_cache_create("anon_vma", sizeof(struct anon_vma),
0, SLAB_DESTROY_BY_RCU|SLAB_PANIC|SLAB_ACCOUNT, 0, SLAB_TYPESAFE_BY_RCU|SLAB_PANIC|SLAB_ACCOUNT,
anon_vma_ctor); anon_vma_ctor);
anon_vma_chain_cachep = KMEM_CACHE(anon_vma_chain, anon_vma_chain_cachep = KMEM_CACHE(anon_vma_chain,
SLAB_PANIC|SLAB_ACCOUNT); SLAB_PANIC|SLAB_ACCOUNT);
...@@ -481,7 +481,7 @@ struct anon_vma *page_get_anon_vma(struct page *page) ...@@ -481,7 +481,7 @@ struct anon_vma *page_get_anon_vma(struct page *page)
* If this page is still mapped, then its anon_vma cannot have been * If this page is still mapped, then its anon_vma cannot have been
* freed. But if it has been unmapped, we have no security against the * freed. But if it has been unmapped, we have no security against the
* anon_vma structure being freed and reused (for another anon_vma: * anon_vma structure being freed and reused (for another anon_vma:
* SLAB_DESTROY_BY_RCU guarantees that - so the atomic_inc_not_zero() * SLAB_TYPESAFE_BY_RCU guarantees that - so the atomic_inc_not_zero()
* above cannot corrupt). * above cannot corrupt).
*/ */
if (!page_mapped(page)) { if (!page_mapped(page)) {
......
...@@ -1728,7 +1728,7 @@ static void slab_destroy(struct kmem_cache *cachep, struct page *page) ...@@ -1728,7 +1728,7 @@ static void slab_destroy(struct kmem_cache *cachep, struct page *page)
freelist = page->freelist; freelist = page->freelist;
slab_destroy_debugcheck(cachep, page); slab_destroy_debugcheck(cachep, page);
if (unlikely(cachep->flags & SLAB_DESTROY_BY_RCU)) if (unlikely(cachep->flags & SLAB_TYPESAFE_BY_RCU))
call_rcu(&page->rcu_head, kmem_rcu_free); call_rcu(&page->rcu_head, kmem_rcu_free);
else else
kmem_freepages(cachep, page); kmem_freepages(cachep, page);
...@@ -1924,7 +1924,7 @@ static bool set_objfreelist_slab_cache(struct kmem_cache *cachep, ...@@ -1924,7 +1924,7 @@ static bool set_objfreelist_slab_cache(struct kmem_cache *cachep,
cachep->num = 0; cachep->num = 0;
if (cachep->ctor || flags & SLAB_DESTROY_BY_RCU) if (cachep->ctor || flags & SLAB_TYPESAFE_BY_RCU)
return false; return false;
left = calculate_slab_order(cachep, size, left = calculate_slab_order(cachep, size,
...@@ -2030,7 +2030,7 @@ __kmem_cache_create (struct kmem_cache *cachep, unsigned long flags) ...@@ -2030,7 +2030,7 @@ __kmem_cache_create (struct kmem_cache *cachep, unsigned long flags)
if (size < 4096 || fls(size - 1) == fls(size-1 + REDZONE_ALIGN + if (size < 4096 || fls(size - 1) == fls(size-1 + REDZONE_ALIGN +
2 * sizeof(unsigned long long))) 2 * sizeof(unsigned long long)))
flags |= SLAB_RED_ZONE | SLAB_STORE_USER; flags |= SLAB_RED_ZONE | SLAB_STORE_USER;
if (!(flags & SLAB_DESTROY_BY_RCU)) if (!(flags & SLAB_TYPESAFE_BY_RCU))
flags |= SLAB_POISON; flags |= SLAB_POISON;
#endif #endif
#endif #endif
......
...@@ -126,7 +126,7 @@ static inline unsigned long kmem_cache_flags(unsigned long object_size, ...@@ -126,7 +126,7 @@ static inline unsigned long kmem_cache_flags(unsigned long object_size,
/* Legal flag mask for kmem_cache_create(), for various configurations */ /* Legal flag mask for kmem_cache_create(), for various configurations */
#define SLAB_CORE_FLAGS (SLAB_HWCACHE_ALIGN | SLAB_CACHE_DMA | SLAB_PANIC | \ #define SLAB_CORE_FLAGS (SLAB_HWCACHE_ALIGN | SLAB_CACHE_DMA | SLAB_PANIC | \
SLAB_DESTROY_BY_RCU | SLAB_DEBUG_OBJECTS ) SLAB_TYPESAFE_BY_RCU | SLAB_DEBUG_OBJECTS )
#if defined(CONFIG_DEBUG_SLAB) #if defined(CONFIG_DEBUG_SLAB)
#define SLAB_DEBUG_FLAGS (SLAB_RED_ZONE | SLAB_POISON | SLAB_STORE_USER) #define SLAB_DEBUG_FLAGS (SLAB_RED_ZONE | SLAB_POISON | SLAB_STORE_USER)
...@@ -415,7 +415,7 @@ static inline size_t slab_ksize(const struct kmem_cache *s) ...@@ -415,7 +415,7 @@ static inline size_t slab_ksize(const struct kmem_cache *s)
* back there or track user information then we can * back there or track user information then we can
* only use the space before that information. * only use the space before that information.
*/ */
if (s->flags & (SLAB_DESTROY_BY_RCU | SLAB_STORE_USER)) if (s->flags & (SLAB_TYPESAFE_BY_RCU | SLAB_STORE_USER))
return s->inuse; return s->inuse;
/* /*
* Else we can use all the padding etc for the allocation * Else we can use all the padding etc for the allocation
......
...@@ -39,7 +39,7 @@ static DECLARE_WORK(slab_caches_to_rcu_destroy_work, ...@@ -39,7 +39,7 @@ static DECLARE_WORK(slab_caches_to_rcu_destroy_work,
* Set of flags that will prevent slab merging * Set of flags that will prevent slab merging
*/ */
#define SLAB_NEVER_MERGE (SLAB_RED_ZONE | SLAB_POISON | SLAB_STORE_USER | \ #define SLAB_NEVER_MERGE (SLAB_RED_ZONE | SLAB_POISON | SLAB_STORE_USER | \
SLAB_TRACE | SLAB_DESTROY_BY_RCU | SLAB_NOLEAKTRACE | \ SLAB_TRACE | SLAB_TYPESAFE_BY_RCU | SLAB_NOLEAKTRACE | \
SLAB_FAILSLAB | SLAB_KASAN) SLAB_FAILSLAB | SLAB_KASAN)
#define SLAB_MERGE_SAME (SLAB_RECLAIM_ACCOUNT | SLAB_CACHE_DMA | \ #define SLAB_MERGE_SAME (SLAB_RECLAIM_ACCOUNT | SLAB_CACHE_DMA | \
...@@ -500,7 +500,7 @@ static void slab_caches_to_rcu_destroy_workfn(struct work_struct *work) ...@@ -500,7 +500,7 @@ static void slab_caches_to_rcu_destroy_workfn(struct work_struct *work)
struct kmem_cache *s, *s2; struct kmem_cache *s, *s2;
/* /*
* On destruction, SLAB_DESTROY_BY_RCU kmem_caches are put on the * On destruction, SLAB_TYPESAFE_BY_RCU kmem_caches are put on the
* @slab_caches_to_rcu_destroy list. The slab pages are freed * @slab_caches_to_rcu_destroy list. The slab pages are freed
* through RCU and and the associated kmem_cache are dereferenced * through RCU and and the associated kmem_cache are dereferenced
* while freeing the pages, so the kmem_caches should be freed only * while freeing the pages, so the kmem_caches should be freed only
...@@ -537,7 +537,7 @@ static int shutdown_cache(struct kmem_cache *s) ...@@ -537,7 +537,7 @@ static int shutdown_cache(struct kmem_cache *s)
memcg_unlink_cache(s); memcg_unlink_cache(s);
list_del(&s->list); list_del(&s->list);
if (s->flags & SLAB_DESTROY_BY_RCU) { if (s->flags & SLAB_TYPESAFE_BY_RCU) {
list_add_tail(&s->list, &slab_caches_to_rcu_destroy); list_add_tail(&s->list, &slab_caches_to_rcu_destroy);
schedule_work(&slab_caches_to_rcu_destroy_work); schedule_work(&slab_caches_to_rcu_destroy_work);
} else { } else {
......
...@@ -126,7 +126,7 @@ static inline void clear_slob_page_free(struct page *sp) ...@@ -126,7 +126,7 @@ static inline void clear_slob_page_free(struct page *sp)
/* /*
* struct slob_rcu is inserted at the tail of allocated slob blocks, which * struct slob_rcu is inserted at the tail of allocated slob blocks, which
* were created with a SLAB_DESTROY_BY_RCU slab. slob_rcu is used to free * were created with a SLAB_TYPESAFE_BY_RCU slab. slob_rcu is used to free
* the block using call_rcu. * the block using call_rcu.
*/ */
struct slob_rcu { struct slob_rcu {
...@@ -524,7 +524,7 @@ EXPORT_SYMBOL(ksize); ...@@ -524,7 +524,7 @@ EXPORT_SYMBOL(ksize);
int __kmem_cache_create(struct kmem_cache *c, unsigned long flags) int __kmem_cache_create(struct kmem_cache *c, unsigned long flags)
{ {
if (flags & SLAB_DESTROY_BY_RCU) { if (flags & SLAB_TYPESAFE_BY_RCU) {
/* leave room for rcu footer at the end of object */ /* leave room for rcu footer at the end of object */
c->size += sizeof(struct slob_rcu); c->size += sizeof(struct slob_rcu);
} }
...@@ -598,7 +598,7 @@ static void kmem_rcu_free(struct rcu_head *head) ...@@ -598,7 +598,7 @@ static void kmem_rcu_free(struct rcu_head *head)
void kmem_cache_free(struct kmem_cache *c, void *b) void kmem_cache_free(struct kmem_cache *c, void *b)
{ {
kmemleak_free_recursive(b, c->flags); kmemleak_free_recursive(b, c->flags);
if (unlikely(c->flags & SLAB_DESTROY_BY_RCU)) { if (unlikely(c->flags & SLAB_TYPESAFE_BY_RCU)) {
struct slob_rcu *slob_rcu; struct slob_rcu *slob_rcu;
slob_rcu = b + (c->size - sizeof(struct slob_rcu)); slob_rcu = b + (c->size - sizeof(struct slob_rcu));
slob_rcu->size = c->size; slob_rcu->size = c->size;
......
...@@ -1687,7 +1687,7 @@ static void rcu_free_slab(struct rcu_head *h) ...@@ -1687,7 +1687,7 @@ static void rcu_free_slab(struct rcu_head *h)
static void free_slab(struct kmem_cache *s, struct page *page) static void free_slab(struct kmem_cache *s, struct page *page)
{ {
if (unlikely(s->flags & SLAB_DESTROY_BY_RCU)) { if (unlikely(s->flags & SLAB_TYPESAFE_BY_RCU)) {
struct rcu_head *head; struct rcu_head *head;
if (need_reserve_slab_rcu) { if (need_reserve_slab_rcu) {
...@@ -2963,7 +2963,7 @@ static __always_inline void slab_free(struct kmem_cache *s, struct page *page, ...@@ -2963,7 +2963,7 @@ static __always_inline void slab_free(struct kmem_cache *s, struct page *page,
* slab_free_freelist_hook() could have put the items into quarantine. * slab_free_freelist_hook() could have put the items into quarantine.
* If so, no need to free them. * If so, no need to free them.
*/ */
if (s->flags & SLAB_KASAN && !(s->flags & SLAB_DESTROY_BY_RCU)) if (s->flags & SLAB_KASAN && !(s->flags & SLAB_TYPESAFE_BY_RCU))
return; return;
do_slab_free(s, page, head, tail, cnt, addr); do_slab_free(s, page, head, tail, cnt, addr);
} }
...@@ -3433,7 +3433,7 @@ static int calculate_sizes(struct kmem_cache *s, int forced_order) ...@@ -3433,7 +3433,7 @@ static int calculate_sizes(struct kmem_cache *s, int forced_order)
* the slab may touch the object after free or before allocation * the slab may touch the object after free or before allocation
* then we should never poison the object itself. * then we should never poison the object itself.
*/ */
if ((flags & SLAB_POISON) && !(flags & SLAB_DESTROY_BY_RCU) && if ((flags & SLAB_POISON) && !(flags & SLAB_TYPESAFE_BY_RCU) &&
!s->ctor) !s->ctor)
s->flags |= __OBJECT_POISON; s->flags |= __OBJECT_POISON;
else else
...@@ -3455,7 +3455,7 @@ static int calculate_sizes(struct kmem_cache *s, int forced_order) ...@@ -3455,7 +3455,7 @@ static int calculate_sizes(struct kmem_cache *s, int forced_order)
*/ */
s->inuse = size; s->inuse = size;
if (((flags & (SLAB_DESTROY_BY_RCU | SLAB_POISON)) || if (((flags & (SLAB_TYPESAFE_BY_RCU | SLAB_POISON)) ||
s->ctor)) { s->ctor)) {
/* /*
* Relocate free pointer after the object if it is not * Relocate free pointer after the object if it is not
...@@ -3537,7 +3537,7 @@ static int kmem_cache_open(struct kmem_cache *s, unsigned long flags) ...@@ -3537,7 +3537,7 @@ static int kmem_cache_open(struct kmem_cache *s, unsigned long flags)
s->flags = kmem_cache_flags(s->size, flags, s->name, s->ctor); s->flags = kmem_cache_flags(s->size, flags, s->name, s->ctor);
s->reserved = 0; s->reserved = 0;
if (need_reserve_slab_rcu && (s->flags & SLAB_DESTROY_BY_RCU)) if (need_reserve_slab_rcu && (s->flags & SLAB_TYPESAFE_BY_RCU))
s->reserved = sizeof(struct rcu_head); s->reserved = sizeof(struct rcu_head);
if (!calculate_sizes(s, -1)) if (!calculate_sizes(s, -1))
...@@ -5042,7 +5042,7 @@ SLAB_ATTR_RO(cache_dma); ...@@ -5042,7 +5042,7 @@ SLAB_ATTR_RO(cache_dma);
static ssize_t destroy_by_rcu_show(struct kmem_cache *s, char *buf) static ssize_t destroy_by_rcu_show(struct kmem_cache *s, char *buf)
{ {
return sprintf(buf, "%d\n", !!(s->flags & SLAB_DESTROY_BY_RCU)); return sprintf(buf, "%d\n", !!(s->flags & SLAB_TYPESAFE_BY_RCU));
} }
SLAB_ATTR_RO(destroy_by_rcu); SLAB_ATTR_RO(destroy_by_rcu);
......
...@@ -950,7 +950,7 @@ static struct proto dccp_v4_prot = { ...@@ -950,7 +950,7 @@ static struct proto dccp_v4_prot = {
.orphan_count = &dccp_orphan_count, .orphan_count = &dccp_orphan_count,
.max_header = MAX_DCCP_HEADER, .max_header = MAX_DCCP_HEADER,
.obj_size = sizeof(struct dccp_sock), .obj_size = sizeof(struct dccp_sock),
.slab_flags = SLAB_DESTROY_BY_RCU, .slab_flags = SLAB_TYPESAFE_BY_RCU,
.rsk_prot = &dccp_request_sock_ops, .rsk_prot = &dccp_request_sock_ops,
.twsk_prot = &dccp_timewait_sock_ops, .twsk_prot = &dccp_timewait_sock_ops,
.h.hashinfo = &dccp_hashinfo, .h.hashinfo = &dccp_hashinfo,
......
...@@ -1012,7 +1012,7 @@ static struct proto dccp_v6_prot = { ...@@ -1012,7 +1012,7 @@ static struct proto dccp_v6_prot = {
.orphan_count = &dccp_orphan_count, .orphan_count = &dccp_orphan_count,
.max_header = MAX_DCCP_HEADER, .max_header = MAX_DCCP_HEADER,
.obj_size = sizeof(struct dccp6_sock), .obj_size = sizeof(struct dccp6_sock),
.slab_flags = SLAB_DESTROY_BY_RCU, .slab_flags = SLAB_TYPESAFE_BY_RCU,
.rsk_prot = &dccp6_request_sock_ops, .rsk_prot = &dccp6_request_sock_ops,
.twsk_prot = &dccp6_timewait_sock_ops, .twsk_prot = &dccp6_timewait_sock_ops,
.h.hashinfo = &dccp_hashinfo, .h.hashinfo = &dccp_hashinfo,
......
...@@ -2398,7 +2398,7 @@ struct proto tcp_prot = { ...@@ -2398,7 +2398,7 @@ struct proto tcp_prot = {
.sysctl_rmem = sysctl_tcp_rmem, .sysctl_rmem = sysctl_tcp_rmem,
.max_header = MAX_TCP_HEADER, .max_header = MAX_TCP_HEADER,
.obj_size = sizeof(struct tcp_sock), .obj_size = sizeof(struct tcp_sock),
.slab_flags = SLAB_DESTROY_BY_RCU, .slab_flags = SLAB_TYPESAFE_BY_RCU,
.twsk_prot = &tcp_timewait_sock_ops, .twsk_prot = &tcp_timewait_sock_ops,
.rsk_prot = &tcp_request_sock_ops, .rsk_prot = &tcp_request_sock_ops,
.h.hashinfo = &tcp_hashinfo, .h.hashinfo = &tcp_hashinfo,
......
...@@ -1919,7 +1919,7 @@ struct proto tcpv6_prot = { ...@@ -1919,7 +1919,7 @@ struct proto tcpv6_prot = {
.sysctl_rmem = sysctl_tcp_rmem, .sysctl_rmem = sysctl_tcp_rmem,
.max_header = MAX_TCP_HEADER, .max_header = MAX_TCP_HEADER,
.obj_size = sizeof(struct tcp6_sock), .obj_size = sizeof(struct tcp6_sock),
.slab_flags = SLAB_DESTROY_BY_RCU, .slab_flags = SLAB_TYPESAFE_BY_RCU,
.twsk_prot = &tcp6_timewait_sock_ops, .twsk_prot = &tcp6_timewait_sock_ops,
.rsk_prot = &tcp6_request_sock_ops, .rsk_prot = &tcp6_request_sock_ops,
.h.hashinfo = &tcp_hashinfo, .h.hashinfo = &tcp_hashinfo,
......
...@@ -142,7 +142,7 @@ static struct proto llc_proto = { ...@@ -142,7 +142,7 @@ static struct proto llc_proto = {
.name = "LLC", .name = "LLC",
.owner = THIS_MODULE, .owner = THIS_MODULE,
.obj_size = sizeof(struct llc_sock), .obj_size = sizeof(struct llc_sock),
.slab_flags = SLAB_DESTROY_BY_RCU, .slab_flags = SLAB_TYPESAFE_BY_RCU,
}; };
/** /**
......
...@@ -506,7 +506,7 @@ static struct sock *__llc_lookup_established(struct llc_sap *sap, ...@@ -506,7 +506,7 @@ static struct sock *__llc_lookup_established(struct llc_sap *sap,
again: again:
sk_nulls_for_each_rcu(rc, node, laddr_hb) { sk_nulls_for_each_rcu(rc, node, laddr_hb) {
if (llc_estab_match(sap, daddr, laddr, rc)) { if (llc_estab_match(sap, daddr, laddr, rc)) {
/* Extra checks required by SLAB_DESTROY_BY_RCU */ /* Extra checks required by SLAB_TYPESAFE_BY_RCU */
if (unlikely(!atomic_inc_not_zero(&rc->sk_refcnt))) if (unlikely(!atomic_inc_not_zero(&rc->sk_refcnt)))
goto again; goto again;
if (unlikely(llc_sk(rc)->sap != sap || if (unlikely(llc_sk(rc)->sap != sap ||
...@@ -565,7 +565,7 @@ static struct sock *__llc_lookup_listener(struct llc_sap *sap, ...@@ -565,7 +565,7 @@ static struct sock *__llc_lookup_listener(struct llc_sap *sap,
again: again:
sk_nulls_for_each_rcu(rc, node, laddr_hb) { sk_nulls_for_each_rcu(rc, node, laddr_hb) {
if (llc_listener_match(sap, laddr, rc)) { if (llc_listener_match(sap, laddr, rc)) {
/* Extra checks required by SLAB_DESTROY_BY_RCU */ /* Extra checks required by SLAB_TYPESAFE_BY_RCU */
if (unlikely(!atomic_inc_not_zero(&rc->sk_refcnt))) if (unlikely(!atomic_inc_not_zero(&rc->sk_refcnt)))
goto again; goto again;
if (unlikely(llc_sk(rc)->sap != sap || if (unlikely(llc_sk(rc)->sap != sap ||
......
...@@ -328,7 +328,7 @@ static struct sock *llc_lookup_dgram(struct llc_sap *sap, ...@@ -328,7 +328,7 @@ static struct sock *llc_lookup_dgram(struct llc_sap *sap,
again: again:
sk_nulls_for_each_rcu(rc, node, laddr_hb) { sk_nulls_for_each_rcu(rc, node, laddr_hb) {
if (llc_dgram_match(sap, laddr, rc)) { if (llc_dgram_match(sap, laddr, rc)) {
/* Extra checks required by SLAB_DESTROY_BY_RCU */ /* Extra checks required by SLAB_TYPESAFE_BY_RCU */
if (unlikely(!atomic_inc_not_zero(&rc->sk_refcnt))) if (unlikely(!atomic_inc_not_zero(&rc->sk_refcnt)))
goto again; goto again;
if (unlikely(llc_sk(rc)->sap != sap || if (unlikely(llc_sk(rc)->sap != sap ||
......
...@@ -914,7 +914,7 @@ static unsigned int early_drop_list(struct net *net, ...@@ -914,7 +914,7 @@ static unsigned int early_drop_list(struct net *net,
continue; continue;
/* kill only if still in same netns -- might have moved due to /* kill only if still in same netns -- might have moved due to
* SLAB_DESTROY_BY_RCU rules. * SLAB_TYPESAFE_BY_RCU rules.
* *
* We steal the timer reference. If that fails timer has * We steal the timer reference. If that fails timer has
* already fired or someone else deleted it. Just drop ref * already fired or someone else deleted it. Just drop ref
...@@ -1069,7 +1069,7 @@ __nf_conntrack_alloc(struct net *net, ...@@ -1069,7 +1069,7 @@ __nf_conntrack_alloc(struct net *net,
/* /*
* Do not use kmem_cache_zalloc(), as this cache uses * Do not use kmem_cache_zalloc(), as this cache uses
* SLAB_DESTROY_BY_RCU. * SLAB_TYPESAFE_BY_RCU.
*/ */
ct = kmem_cache_alloc(nf_conntrack_cachep, gfp); ct = kmem_cache_alloc(nf_conntrack_cachep, gfp);
if (ct == NULL) if (ct == NULL)
...@@ -1114,7 +1114,7 @@ void nf_conntrack_free(struct nf_conn *ct) ...@@ -1114,7 +1114,7 @@ void nf_conntrack_free(struct nf_conn *ct)
struct net *net = nf_ct_net(ct); struct net *net = nf_ct_net(ct);
/* A freed object has refcnt == 0, that's /* A freed object has refcnt == 0, that's
* the golden rule for SLAB_DESTROY_BY_RCU * the golden rule for SLAB_TYPESAFE_BY_RCU
*/ */
NF_CT_ASSERT(atomic_read(&ct->ct_general.use) == 0); NF_CT_ASSERT(atomic_read(&ct->ct_general.use) == 0);
...@@ -1878,7 +1878,7 @@ int nf_conntrack_init_start(void) ...@@ -1878,7 +1878,7 @@ int nf_conntrack_init_start(void)
nf_conntrack_cachep = kmem_cache_create("nf_conntrack", nf_conntrack_cachep = kmem_cache_create("nf_conntrack",
sizeof(struct nf_conn), sizeof(struct nf_conn),
NFCT_INFOMASK + 1, NFCT_INFOMASK + 1,
SLAB_DESTROY_BY_RCU | SLAB_HWCACHE_ALIGN, NULL); SLAB_TYPESAFE_BY_RCU | SLAB_HWCACHE_ALIGN, NULL);
if (!nf_conntrack_cachep) if (!nf_conntrack_cachep)
goto err_cachep; goto err_cachep;
......
...@@ -101,7 +101,7 @@ struct proto smc_proto = { ...@@ -101,7 +101,7 @@ struct proto smc_proto = {
.unhash = smc_unhash_sk, .unhash = smc_unhash_sk,
.obj_size = sizeof(struct smc_sock), .obj_size = sizeof(struct smc_sock),
.h.smc_hash = &smc_v4_hashinfo, .h.smc_hash = &smc_v4_hashinfo,
.slab_flags = SLAB_DESTROY_BY_RCU, .slab_flags = SLAB_TYPESAFE_BY_RCU,
}; };
EXPORT_SYMBOL_GPL(smc_proto); EXPORT_SYMBOL_GPL(smc_proto);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment