Commit 43570fd2 authored by Heiko Carstens's avatar Heiko Carstens Committed by Linus Torvalds

mm,slub,x86: decouple size of struct page from CONFIG_CMPXCHG_LOCAL

While implementing cmpxchg_double() on s390 I realized that we don't set
CONFIG_CMPXCHG_LOCAL despite the fact that we have support for it.

However setting that option will increase the size of struct page by
eight bytes on 64 bit, which we certainly do not want.  Also, it doesn't
make sense that a present cpu feature should increase the size of struct
page.

Besides that it looks like the dependency to CMPXCHG_LOCAL is wrong and
that it should depend on CMPXCHG_DOUBLE instead.

This patch:

If an architecture supports CMPXCHG_LOCAL this shouldn't result
automatically in larger struct pages if the SLUB allocator is used.
Instead introduce a new config option "HAVE_ALIGNED_STRUCT_PAGE" which
can be selected if a double word aligned struct page is required.  Also
update x86 Kconfig so that it should work as before.
Signed-off-by: default avatarHeiko Carstens <heiko.carstens@de.ibm.com>
Acked-by: default avatarChristoph Lameter <cl@linux.com>
Cc: Pekka Enberg <penberg@kernel.org>
Cc: Ingo Molnar <mingo@elte.hu>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: "H. Peter Anvin" <hpa@zytor.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent 0d259cf8
...@@ -185,4 +185,12 @@ config HAVE_RCU_TABLE_FREE ...@@ -185,4 +185,12 @@ config HAVE_RCU_TABLE_FREE
config ARCH_HAVE_NMI_SAFE_CMPXCHG config ARCH_HAVE_NMI_SAFE_CMPXCHG
bool bool
config HAVE_ALIGNED_STRUCT_PAGE
bool
help
This makes sure that struct pages are double word aligned and that
e.g. the SLUB allocator can perform double word atomic operations
on a struct page for better performance. However selecting this
might increase the size of a struct page by a word.
source "kernel/gcov/Kconfig" source "kernel/gcov/Kconfig"
...@@ -60,6 +60,7 @@ config X86 ...@@ -60,6 +60,7 @@ config X86
select PERF_EVENTS select PERF_EVENTS
select HAVE_PERF_EVENTS_NMI select HAVE_PERF_EVENTS_NMI
select ANON_INODES select ANON_INODES
select HAVE_ALIGNED_STRUCT_PAGE if SLUB && !M386
select HAVE_ARCH_KMEMCHECK select HAVE_ARCH_KMEMCHECK
select HAVE_USER_RETURN_NOTIFIER select HAVE_USER_RETURN_NOTIFIER
select ARCH_BINFMT_ELF_RANDOMIZE_PIE select ARCH_BINFMT_ELF_RANDOMIZE_PIE
......
...@@ -151,12 +151,11 @@ struct page { ...@@ -151,12 +151,11 @@ struct page {
#endif #endif
} }
/* /*
* If another subsystem starts using the double word pairing for atomic * The struct page can be forced to be double word aligned so that atomic ops
* operations on struct page then it must change the #if to ensure * on double words work. The SLUB allocator can make use of such a feature.
* proper alignment of the page struct.
*/ */
#if defined(CONFIG_SLUB) && defined(CONFIG_CMPXCHG_LOCAL) #ifdef CONFIG_HAVE_ALIGNED_STRUCT_PAGE
__attribute__((__aligned__(2*sizeof(unsigned long)))) __aligned(2 * sizeof(unsigned long))
#endif #endif
; ;
......
...@@ -366,7 +366,7 @@ static inline bool __cmpxchg_double_slab(struct kmem_cache *s, struct page *page ...@@ -366,7 +366,7 @@ static inline bool __cmpxchg_double_slab(struct kmem_cache *s, struct page *page
const char *n) const char *n)
{ {
VM_BUG_ON(!irqs_disabled()); VM_BUG_ON(!irqs_disabled());
#ifdef CONFIG_CMPXCHG_DOUBLE #if defined(CONFIG_CMPXCHG_DOUBLE) && defined(CONFIG_HAVE_ALIGNED_STRUCT_PAGE)
if (s->flags & __CMPXCHG_DOUBLE) { if (s->flags & __CMPXCHG_DOUBLE) {
if (cmpxchg_double(&page->freelist, &page->counters, if (cmpxchg_double(&page->freelist, &page->counters,
freelist_old, counters_old, freelist_old, counters_old,
...@@ -400,7 +400,7 @@ static inline bool cmpxchg_double_slab(struct kmem_cache *s, struct page *page, ...@@ -400,7 +400,7 @@ static inline bool cmpxchg_double_slab(struct kmem_cache *s, struct page *page,
void *freelist_new, unsigned long counters_new, void *freelist_new, unsigned long counters_new,
const char *n) const char *n)
{ {
#ifdef CONFIG_CMPXCHG_DOUBLE #if defined(CONFIG_CMPXCHG_DOUBLE) && defined(CONFIG_HAVE_ALIGNED_STRUCT_PAGE)
if (s->flags & __CMPXCHG_DOUBLE) { if (s->flags & __CMPXCHG_DOUBLE) {
if (cmpxchg_double(&page->freelist, &page->counters, if (cmpxchg_double(&page->freelist, &page->counters,
freelist_old, counters_old, freelist_old, counters_old,
...@@ -3014,7 +3014,7 @@ static int kmem_cache_open(struct kmem_cache *s, ...@@ -3014,7 +3014,7 @@ static int kmem_cache_open(struct kmem_cache *s,
} }
} }
#ifdef CONFIG_CMPXCHG_DOUBLE #if defined(CONFIG_CMPXCHG_DOUBLE) && defined(CONFIG_HAVE_ALIGNED_STRUCT_PAGE)
if (system_has_cmpxchg_double() && (s->flags & SLAB_DEBUG_FLAGS) == 0) if (system_has_cmpxchg_double() && (s->flags & SLAB_DEBUG_FLAGS) == 0)
/* Enable fast mode */ /* Enable fast mode */
s->flags |= __CMPXCHG_DOUBLE; s->flags |= __CMPXCHG_DOUBLE;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment