Commit 260b61dd authored by Joonsoo Kim's avatar Joonsoo Kim Committed by Linus Torvalds

mm/slab: remove the checks for slab implementation bug

Some of "#if DEBUG" are for reporting slab implementation bug rather
than user usecase bug.  It's not really needed because slab is stable
for a quite long time and it makes code too dirty.  This patch remove
it.
Signed-off-by: default avatarJoonsoo Kim <iamjoonsoo.kim@lge.com>
Acked-by: default avatarChristoph Lameter <cl@linux.com>
Cc: Pekka Enberg <penberg@kernel.org>
Cc: David Rientjes <rientjes@google.com>
Cc: Joonsoo Kim <iamjoonsoo.kim@lge.com>
Cc: Jesper Dangaard Brouer <brouer@redhat.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent 6fb92430
...@@ -2110,8 +2110,6 @@ __kmem_cache_create (struct kmem_cache *cachep, unsigned long flags) ...@@ -2110,8 +2110,6 @@ __kmem_cache_create (struct kmem_cache *cachep, unsigned long flags)
if (!(flags & SLAB_DESTROY_BY_RCU)) if (!(flags & SLAB_DESTROY_BY_RCU))
flags |= SLAB_POISON; flags |= SLAB_POISON;
#endif #endif
if (flags & SLAB_DESTROY_BY_RCU)
BUG_ON(flags & SLAB_POISON);
#endif #endif
/* /*
...@@ -2368,9 +2366,6 @@ static int drain_freelist(struct kmem_cache *cache, ...@@ -2368,9 +2366,6 @@ static int drain_freelist(struct kmem_cache *cache,
} }
page = list_entry(p, struct page, lru); page = list_entry(p, struct page, lru);
#if DEBUG
BUG_ON(page->active);
#endif
list_del(&page->lru); list_del(&page->lru);
/* /*
* Safe to drop the lock. The slab is no longer linked * Safe to drop the lock. The slab is no longer linked
...@@ -2528,30 +2523,23 @@ static void kmem_flagcheck(struct kmem_cache *cachep, gfp_t flags) ...@@ -2528,30 +2523,23 @@ static void kmem_flagcheck(struct kmem_cache *cachep, gfp_t flags)
} }
} }
static void *slab_get_obj(struct kmem_cache *cachep, struct page *page, static void *slab_get_obj(struct kmem_cache *cachep, struct page *page)
int nodeid)
{ {
void *objp; void *objp;
objp = index_to_obj(cachep, page, get_free_obj(page, page->active)); objp = index_to_obj(cachep, page, get_free_obj(page, page->active));
page->active++; page->active++;
#if DEBUG
WARN_ON(page_to_nid(virt_to_page(objp)) != nodeid);
#endif
return objp; return objp;
} }
static void slab_put_obj(struct kmem_cache *cachep, struct page *page, static void slab_put_obj(struct kmem_cache *cachep,
void *objp, int nodeid) struct page *page, void *objp)
{ {
unsigned int objnr = obj_to_index(cachep, page, objp); unsigned int objnr = obj_to_index(cachep, page, objp);
#if DEBUG #if DEBUG
unsigned int i; unsigned int i;
/* Verify that the slab belongs to the intended node */
WARN_ON(page_to_nid(virt_to_page(objp)) != nodeid);
/* Verify double free bug */ /* Verify double free bug */
for (i = page->active; i < cachep->num; i++) { for (i = page->active; i < cachep->num; i++) {
if (get_free_obj(page, i) == objnr) { if (get_free_obj(page, i) == objnr) {
...@@ -2817,8 +2805,7 @@ static void *cache_alloc_refill(struct kmem_cache *cachep, gfp_t flags, ...@@ -2817,8 +2805,7 @@ static void *cache_alloc_refill(struct kmem_cache *cachep, gfp_t flags,
STATS_INC_ACTIVE(cachep); STATS_INC_ACTIVE(cachep);
STATS_SET_HIGH(cachep); STATS_SET_HIGH(cachep);
ac_put_obj(cachep, ac, slab_get_obj(cachep, page, ac_put_obj(cachep, ac, slab_get_obj(cachep, page));
node));
} }
/* move slabp to correct slabp list: */ /* move slabp to correct slabp list: */
...@@ -3101,7 +3088,7 @@ static void *____cache_alloc_node(struct kmem_cache *cachep, gfp_t flags, ...@@ -3101,7 +3088,7 @@ static void *____cache_alloc_node(struct kmem_cache *cachep, gfp_t flags,
BUG_ON(page->active == cachep->num); BUG_ON(page->active == cachep->num);
obj = slab_get_obj(cachep, page, nodeid); obj = slab_get_obj(cachep, page);
n->free_objects--; n->free_objects--;
/* move slabp to correct slabp list: */ /* move slabp to correct slabp list: */
list_del(&page->lru); list_del(&page->lru);
...@@ -3252,7 +3239,7 @@ static void free_block(struct kmem_cache *cachep, void **objpp, ...@@ -3252,7 +3239,7 @@ static void free_block(struct kmem_cache *cachep, void **objpp,
page = virt_to_head_page(objp); page = virt_to_head_page(objp);
list_del(&page->lru); list_del(&page->lru);
check_spinlock_acquired_node(cachep, node); check_spinlock_acquired_node(cachep, node);
slab_put_obj(cachep, page, objp, node); slab_put_obj(cachep, page, objp);
STATS_DEC_ACTIVE(cachep); STATS_DEC_ACTIVE(cachep);
n->free_objects++; n->free_objects++;
...@@ -3282,9 +3269,7 @@ static void cache_flusharray(struct kmem_cache *cachep, struct array_cache *ac) ...@@ -3282,9 +3269,7 @@ static void cache_flusharray(struct kmem_cache *cachep, struct array_cache *ac)
LIST_HEAD(list); LIST_HEAD(list);
batchcount = ac->batchcount; batchcount = ac->batchcount;
#if DEBUG
BUG_ON(!batchcount || batchcount > ac->avail);
#endif
check_irq_off(); check_irq_off();
n = get_node(cachep, node); n = get_node(cachep, node);
spin_lock(&n->list_lock); spin_lock(&n->list_lock);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment