Commit 8ab1372f authored by Christoph Lameter's avatar Christoph Lameter Committed by Linus Torvalds

SLUB: Fix CONFIG_SLUB_DEBUG use for CONFIG_NUMA

We currently cannot disable CONFIG_SLUB_DEBUG for CONFIG_NUMA.  Now that
embedded systems start to use NUMA we may need this.

Put an #ifdef around places where NUMA only code uses fields only valid
for CONFIG_SLUB_DEBUG.
Signed-off-by: default avatarChristoph Lameter <clameter@sgi.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent a0e1d1be
...@@ -1853,7 +1853,9 @@ static void init_kmem_cache_node(struct kmem_cache_node *n) ...@@ -1853,7 +1853,9 @@ static void init_kmem_cache_node(struct kmem_cache_node *n)
atomic_long_set(&n->nr_slabs, 0); atomic_long_set(&n->nr_slabs, 0);
spin_lock_init(&n->list_lock); spin_lock_init(&n->list_lock);
INIT_LIST_HEAD(&n->partial); INIT_LIST_HEAD(&n->partial);
#ifdef CONFIG_SLUB_DEBUG
INIT_LIST_HEAD(&n->full); INIT_LIST_HEAD(&n->full);
#endif
} }
#ifdef CONFIG_NUMA #ifdef CONFIG_NUMA
...@@ -1881,8 +1883,10 @@ static struct kmem_cache_node * __init early_kmem_cache_node_alloc(gfp_t gfpflag ...@@ -1881,8 +1883,10 @@ static struct kmem_cache_node * __init early_kmem_cache_node_alloc(gfp_t gfpflag
page->freelist = get_freepointer(kmalloc_caches, n); page->freelist = get_freepointer(kmalloc_caches, n);
page->inuse++; page->inuse++;
kmalloc_caches->node[node] = n; kmalloc_caches->node[node] = n;
#ifdef CONFIG_SLUB_DEBUG
init_object(kmalloc_caches, n, 1); init_object(kmalloc_caches, n, 1);
init_tracking(kmalloc_caches, n); init_tracking(kmalloc_caches, n);
#endif
init_kmem_cache_node(n); init_kmem_cache_node(n);
atomic_long_inc(&n->nr_slabs); atomic_long_inc(&n->nr_slabs);
add_partial(n, page); add_partial(n, page);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment