Commit 1be1cb7b authored by Thomas Gleixner's avatar Thomas Gleixner

debugobjects: replace static objects when slab cache becomes available

Impact: refactor/consolidate object management, prepare for delayed free

debugobjects allocates static reference objects to track objects which
are initialized or activated before the slab cache becomes
available. These static reference objects have to be handled
seperately in free_object(). The handling of these objects is in the
way of implementing a delayed free functionality. The delayed free is
required to avoid callbacks into the mm code from
debug_check_no_obj_freed().

Replace the static object references with dynamic ones after the slab
cache has been initialized. The static objects are now marked initdata.
Signed-off-by: default avatarThomas Gleixner <tglx@linutronix.de>
LKML-Reference: <200903162049.58058.nickpiggin@yahoo.com.au>
parent 3e8ebb5c
...@@ -30,7 +30,7 @@ struct debug_bucket { ...@@ -30,7 +30,7 @@ struct debug_bucket {
static struct debug_bucket obj_hash[ODEBUG_HASH_SIZE]; static struct debug_bucket obj_hash[ODEBUG_HASH_SIZE];
static struct debug_obj obj_static_pool[ODEBUG_POOL_SIZE]; static struct debug_obj obj_static_pool[ODEBUG_POOL_SIZE] __initdata;
static DEFINE_SPINLOCK(pool_lock); static DEFINE_SPINLOCK(pool_lock);
...@@ -883,6 +883,63 @@ void __init debug_objects_early_init(void) ...@@ -883,6 +883,63 @@ void __init debug_objects_early_init(void)
hlist_add_head(&obj_static_pool[i].node, &obj_pool); hlist_add_head(&obj_static_pool[i].node, &obj_pool);
} }
/*
* Convert the statically allocated objects to dynamic ones:
*/
static int debug_objects_replace_static_objects(void)
{
struct debug_bucket *db = obj_hash;
struct hlist_node *node, *tmp;
struct debug_obj *obj, *new;
HLIST_HEAD(objects);
int i, cnt = 0;
for (i = 0; i < ODEBUG_POOL_SIZE; i++) {
obj = kmem_cache_zalloc(obj_cache, GFP_KERNEL);
if (!obj)
goto free;
hlist_add_head(&obj->node, &objects);
}
/*
* When debug_objects_mem_init() is called we know that only
* one CPU is up, so disabling interrupts is enough
* protection. This avoids the lockdep hell of lock ordering.
*/
local_irq_disable();
/* Remove the statically allocated objects from the pool */
hlist_for_each_entry_safe(obj, node, tmp, &obj_pool, node)
hlist_del(&obj->node);
/* Move the allocated objects to the pool */
hlist_move_list(&objects, &obj_pool);
/* Replace the active object references */
for (i = 0; i < ODEBUG_HASH_SIZE; i++, db++) {
hlist_move_list(&db->list, &objects);
hlist_for_each_entry(obj, node, &objects, node) {
new = hlist_entry(obj_pool.first, typeof(*obj), node);
hlist_del(&new->node);
/* copy object data */
*new = *obj;
hlist_add_head(&new->node, &db->list);
cnt++;
}
}
printk(KERN_DEBUG "ODEBUG: %d of %d active objects replaced\n", cnt,
obj_pool_used);
local_irq_enable();
return 0;
free:
hlist_for_each_entry_safe(obj, node, tmp, &objects, node) {
hlist_del(&obj->node);
kmem_cache_free(obj_cache, obj);
}
return -ENOMEM;
}
/* /*
* Called after the kmem_caches are functional to setup a dedicated * Called after the kmem_caches are functional to setup a dedicated
* cache pool, which has the SLAB_DEBUG_OBJECTS flag set. This flag * cache pool, which has the SLAB_DEBUG_OBJECTS flag set. This flag
...@@ -898,8 +955,11 @@ void __init debug_objects_mem_init(void) ...@@ -898,8 +955,11 @@ void __init debug_objects_mem_init(void)
sizeof (struct debug_obj), 0, sizeof (struct debug_obj), 0,
SLAB_DEBUG_OBJECTS, NULL); SLAB_DEBUG_OBJECTS, NULL);
if (!obj_cache) if (!obj_cache || debug_objects_replace_static_objects()) {
debug_objects_enabled = 0; debug_objects_enabled = 0;
else if (obj_cache)
kmem_cache_destroy(obj_cache);
printk(KERN_WARNING "ODEBUG: out of memory.\n");
} else
debug_objects_selftest(); debug_objects_selftest();
} }
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment