Commit 1f04b07d authored by Thomas Gleixner's avatar Thomas Gleixner Committed by Vlastimil Babka

slub: Make PREEMPT_RT support less convoluted

The slub code already has a few helpers depending on PREEMPT_RT. Add a few
more and get rid of the CONFIG_PREEMPT_RT conditionals all over the place.

No functional change.
Signed-off-by: default avatarThomas Gleixner <tglx@linutronix.de>
Cc: Andrew Morton <akpm@linux-foundation.org>
Cc: Christoph Lameter <cl@linux.com>
Cc: David Rientjes <rientjes@google.com>
Cc: Joonsoo Kim <iamjoonsoo.kim@lge.com>
Cc: Pekka Enberg <penberg@kernel.org>
Cc: Vlastimil Babka <vbabka@suse.cz>
Cc: linux-mm@kvack.org
Signed-off-by: default avatarSebastian Andrzej Siewior <bigeasy@linutronix.de>
Acked-by: default avatarPeter Zijlstra (Intel) <peterz@infradead.org>
Reviewed-by: default avatarHyeonggon Yoo <42.hyeyoo@gmail.com>
Signed-off-by: default avatarVlastimil Babka <vbabka@suse.cz>
parent 5875e598
...@@ -104,9 +104,11 @@ ...@@ -104,9 +104,11 @@
* except the stat counters. This is a percpu structure manipulated only by * except the stat counters. This is a percpu structure manipulated only by
* the local cpu, so the lock protects against being preempted or interrupted * the local cpu, so the lock protects against being preempted or interrupted
* by an irq. Fast path operations rely on lockless operations instead. * by an irq. Fast path operations rely on lockless operations instead.
* On PREEMPT_RT, the local lock does not actually disable irqs (and thus *
* prevent the lockless operations), so fastpath operations also need to take * On PREEMPT_RT, the local lock neither disables interrupts nor preemption
* the lock and are no longer lockless. * which means the lockless fastpath cannot be used as it might interfere with
* an in-progress slow path operations. In this case the local lock is always
* taken but it still utilizes the freelist for the common operations.
* *
* lockless fastpaths * lockless fastpaths
* *
...@@ -167,8 +169,9 @@ ...@@ -167,8 +169,9 @@
* function call even on !PREEMPT_RT, use inline preempt_disable() there. * function call even on !PREEMPT_RT, use inline preempt_disable() there.
*/ */
#ifndef CONFIG_PREEMPT_RT #ifndef CONFIG_PREEMPT_RT
#define slub_get_cpu_ptr(var) get_cpu_ptr(var) #define slub_get_cpu_ptr(var) get_cpu_ptr(var)
#define slub_put_cpu_ptr(var) put_cpu_ptr(var) #define slub_put_cpu_ptr(var) put_cpu_ptr(var)
#define USE_LOCKLESS_FAST_PATH() (true)
#else #else
#define slub_get_cpu_ptr(var) \ #define slub_get_cpu_ptr(var) \
({ \ ({ \
...@@ -180,6 +183,7 @@ do { \ ...@@ -180,6 +183,7 @@ do { \
(void)(var); \ (void)(var); \
migrate_enable(); \ migrate_enable(); \
} while (0) } while (0)
#define USE_LOCKLESS_FAST_PATH() (false)
#endif #endif
#ifdef CONFIG_SLUB_DEBUG #ifdef CONFIG_SLUB_DEBUG
...@@ -474,7 +478,7 @@ static inline bool __cmpxchg_double_slab(struct kmem_cache *s, struct slab *slab ...@@ -474,7 +478,7 @@ static inline bool __cmpxchg_double_slab(struct kmem_cache *s, struct slab *slab
void *freelist_new, unsigned long counters_new, void *freelist_new, unsigned long counters_new,
const char *n) const char *n)
{ {
if (!IS_ENABLED(CONFIG_PREEMPT_RT)) if (USE_LOCKLESS_FAST_PATH())
lockdep_assert_irqs_disabled(); lockdep_assert_irqs_disabled();
#if defined(CONFIG_HAVE_CMPXCHG_DOUBLE) && \ #if defined(CONFIG_HAVE_CMPXCHG_DOUBLE) && \
defined(CONFIG_HAVE_ALIGNED_STRUCT_PAGE) defined(CONFIG_HAVE_ALIGNED_STRUCT_PAGE)
...@@ -3288,14 +3292,8 @@ static __always_inline void *slab_alloc_node(struct kmem_cache *s, struct list_l ...@@ -3288,14 +3292,8 @@ static __always_inline void *slab_alloc_node(struct kmem_cache *s, struct list_l
object = c->freelist; object = c->freelist;
slab = c->slab; slab = c->slab;
/*
* We cannot use the lockless fastpath on PREEMPT_RT because if a if (!USE_LOCKLESS_FAST_PATH() ||
* slowpath has taken the local_lock_irqsave(), it is not protected
* against a fast path operation in an irq handler. So we need to take
* the slow path which uses local_lock. It is still relatively fast if
* there is a suitable cpu freelist.
*/
if (IS_ENABLED(CONFIG_PREEMPT_RT) ||
unlikely(!object || !slab || !node_match(slab, node))) { unlikely(!object || !slab || !node_match(slab, node))) {
object = __slab_alloc(s, gfpflags, node, addr, c); object = __slab_alloc(s, gfpflags, node, addr, c);
} else { } else {
...@@ -3555,6 +3553,7 @@ static __always_inline void do_slab_free(struct kmem_cache *s, ...@@ -3555,6 +3553,7 @@ static __always_inline void do_slab_free(struct kmem_cache *s,
void *tail_obj = tail ? : head; void *tail_obj = tail ? : head;
struct kmem_cache_cpu *c; struct kmem_cache_cpu *c;
unsigned long tid; unsigned long tid;
void **freelist;
redo: redo:
/* /*
...@@ -3569,9 +3568,13 @@ static __always_inline void do_slab_free(struct kmem_cache *s, ...@@ -3569,9 +3568,13 @@ static __always_inline void do_slab_free(struct kmem_cache *s,
/* Same with comment on barrier() in slab_alloc_node() */ /* Same with comment on barrier() in slab_alloc_node() */
barrier(); barrier();
if (likely(slab == c->slab)) { if (unlikely(slab != c->slab)) {
#ifndef CONFIG_PREEMPT_RT __slab_free(s, slab, head, tail_obj, cnt, addr);
void **freelist = READ_ONCE(c->freelist); return;
}
if (USE_LOCKLESS_FAST_PATH()) {
freelist = READ_ONCE(c->freelist);
set_freepointer(s, tail_obj, freelist); set_freepointer(s, tail_obj, freelist);
...@@ -3583,16 +3586,8 @@ static __always_inline void do_slab_free(struct kmem_cache *s, ...@@ -3583,16 +3586,8 @@ static __always_inline void do_slab_free(struct kmem_cache *s,
note_cmpxchg_failure("slab_free", s, tid); note_cmpxchg_failure("slab_free", s, tid);
goto redo; goto redo;
} }
#else /* CONFIG_PREEMPT_RT */ } else {
/* /* Update the free list under the local lock */
* We cannot use the lockless fastpath on PREEMPT_RT because if
* a slowpath has taken the local_lock_irqsave(), it is not
* protected against a fast path operation in an irq handler. So
* we need to take the local_lock. We shouldn't simply defer to
* __slab_free() as that wouldn't use the cpu freelist at all.
*/
void **freelist;
local_lock(&s->cpu_slab->lock); local_lock(&s->cpu_slab->lock);
c = this_cpu_ptr(s->cpu_slab); c = this_cpu_ptr(s->cpu_slab);
if (unlikely(slab != c->slab)) { if (unlikely(slab != c->slab)) {
...@@ -3607,11 +3602,8 @@ static __always_inline void do_slab_free(struct kmem_cache *s, ...@@ -3607,11 +3602,8 @@ static __always_inline void do_slab_free(struct kmem_cache *s,
c->tid = next_tid(tid); c->tid = next_tid(tid);
local_unlock(&s->cpu_slab->lock); local_unlock(&s->cpu_slab->lock);
#endif }
stat(s, FREE_FASTPATH); stat(s, FREE_FASTPATH);
} else
__slab_free(s, slab, head, tail_obj, cnt, addr);
} }
static __always_inline void slab_free(struct kmem_cache *s, struct slab *slab, static __always_inline void slab_free(struct kmem_cache *s, struct slab *slab,
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment