Commit 756dee75 authored by Christoph Lameter's avatar Christoph Lameter Committed by Pekka Enberg

SLUB: Get rid of dynamic DMA kmalloc cache allocation

Dynamic DMA kmalloc cache allocation is troublesome since the
new percpu allocator does not support allocations in atomic contexts.
Reserve some statically allocated kmalloc_cpu structures instead.
Signed-off-by: default avatarChristoph Lameter <cl@linux-foundation.org>
Signed-off-by: default avatarPekka Enberg <penberg@cs.helsinki.fi>
parent 9dfc6e68
...@@ -131,11 +131,21 @@ struct kmem_cache { ...@@ -131,11 +131,21 @@ struct kmem_cache {
#define SLUB_PAGE_SHIFT (PAGE_SHIFT + 2) #define SLUB_PAGE_SHIFT (PAGE_SHIFT + 2)
#ifdef CONFIG_ZONE_DMA
#define SLUB_DMA __GFP_DMA
/* Reserve extra caches for potential DMA use */
#define KMALLOC_CACHES (2 * SLUB_PAGE_SHIFT - 6)
#else
/* Disable DMA functionality */
#define SLUB_DMA (__force gfp_t)0
#define KMALLOC_CACHES SLUB_PAGE_SHIFT
#endif
/* /*
* We keep the general caches in an array of slab caches that are used for * We keep the general caches in an array of slab caches that are used for
* 2^x bytes of allocations. * 2^x bytes of allocations.
*/ */
extern struct kmem_cache kmalloc_caches[SLUB_PAGE_SHIFT]; extern struct kmem_cache kmalloc_caches[KMALLOC_CACHES];
/* /*
* Sorry that the following has to be that ugly but some versions of GCC * Sorry that the following has to be that ugly but some versions of GCC
...@@ -203,13 +213,6 @@ static __always_inline struct kmem_cache *kmalloc_slab(size_t size) ...@@ -203,13 +213,6 @@ static __always_inline struct kmem_cache *kmalloc_slab(size_t size)
return &kmalloc_caches[index]; return &kmalloc_caches[index];
} }
#ifdef CONFIG_ZONE_DMA
#define SLUB_DMA __GFP_DMA
#else
/* Disable DMA functionality */
#define SLUB_DMA (__force gfp_t)0
#endif
void *kmem_cache_alloc(struct kmem_cache *, gfp_t); void *kmem_cache_alloc(struct kmem_cache *, gfp_t);
void *__kmalloc(size_t size, gfp_t flags); void *__kmalloc(size_t size, gfp_t flags);
......
...@@ -2092,7 +2092,7 @@ static inline int alloc_kmem_cache_cpus(struct kmem_cache *s, gfp_t flags) ...@@ -2092,7 +2092,7 @@ static inline int alloc_kmem_cache_cpus(struct kmem_cache *s, gfp_t flags)
{ {
int cpu; int cpu;
if (s < kmalloc_caches + SLUB_PAGE_SHIFT && s >= kmalloc_caches) if (s < kmalloc_caches + KMALLOC_CACHES && s >= kmalloc_caches)
/* /*
* Boot time creation of the kmalloc array. Use static per cpu data * Boot time creation of the kmalloc array. Use static per cpu data
* since the per cpu allocator is not available yet. * since the per cpu allocator is not available yet.
...@@ -2539,7 +2539,7 @@ EXPORT_SYMBOL(kmem_cache_destroy); ...@@ -2539,7 +2539,7 @@ EXPORT_SYMBOL(kmem_cache_destroy);
* Kmalloc subsystem * Kmalloc subsystem
*******************************************************************/ *******************************************************************/
struct kmem_cache kmalloc_caches[SLUB_PAGE_SHIFT] __cacheline_aligned; struct kmem_cache kmalloc_caches[KMALLOC_CACHES] __cacheline_aligned;
EXPORT_SYMBOL(kmalloc_caches); EXPORT_SYMBOL(kmalloc_caches);
static int __init setup_slub_min_order(char *str) static int __init setup_slub_min_order(char *str)
...@@ -2629,6 +2629,7 @@ static noinline struct kmem_cache *dma_kmalloc_cache(int index, gfp_t flags) ...@@ -2629,6 +2629,7 @@ static noinline struct kmem_cache *dma_kmalloc_cache(int index, gfp_t flags)
char *text; char *text;
size_t realsize; size_t realsize;
unsigned long slabflags; unsigned long slabflags;
int i;
s = kmalloc_caches_dma[index]; s = kmalloc_caches_dma[index];
if (s) if (s)
...@@ -2649,18 +2650,13 @@ static noinline struct kmem_cache *dma_kmalloc_cache(int index, gfp_t flags) ...@@ -2649,18 +2650,13 @@ static noinline struct kmem_cache *dma_kmalloc_cache(int index, gfp_t flags)
text = kasprintf(flags & ~SLUB_DMA, "kmalloc_dma-%d", text = kasprintf(flags & ~SLUB_DMA, "kmalloc_dma-%d",
(unsigned int)realsize); (unsigned int)realsize);
if (flags & __GFP_WAIT) s = NULL;
s = kmalloc(kmem_size, flags & ~SLUB_DMA); for (i = 0; i < KMALLOC_CACHES; i++)
else { if (!kmalloc_caches[i].size)
int i; break;
s = NULL; BUG_ON(i >= KMALLOC_CACHES);
for (i = 0; i < SLUB_PAGE_SHIFT; i++) s = kmalloc_caches + i;
if (kmalloc_caches[i].size) {
s = kmalloc_caches + i;
break;
}
}
/* /*
* Must defer sysfs creation to a workqueue because we don't know * Must defer sysfs creation to a workqueue because we don't know
...@@ -2674,7 +2670,7 @@ static noinline struct kmem_cache *dma_kmalloc_cache(int index, gfp_t flags) ...@@ -2674,7 +2670,7 @@ static noinline struct kmem_cache *dma_kmalloc_cache(int index, gfp_t flags)
if (!s || !text || !kmem_cache_open(s, flags, text, if (!s || !text || !kmem_cache_open(s, flags, text,
realsize, ARCH_KMALLOC_MINALIGN, slabflags, NULL)) { realsize, ARCH_KMALLOC_MINALIGN, slabflags, NULL)) {
kfree(s); s->size = 0;
kfree(text); kfree(text);
goto unlock_out; goto unlock_out;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment