Commit f875aa02 authored by Andrew Morton's avatar Andrew Morton Committed by Linus Torvalds

[PATCH] reduce function inlining in slab.c

From: Manfred Spraul <manfred@colorfullife.com>

slab.c contains too many inline functions:

- some functions that are not performance critical were inlined.  Waste
  of text size.

- The debug code relies on __builtin_return_address(0) to keep track of
  the callers.  According to rmk, gcc didn't inline some functions as
  expected and that resulted in useless debug output.  This was probably
  caused by the large debug-only inline functions.

The attached patche removes most inline functions:

- the empty on release/huge on debug inline functions were replaced with
  empty macros on release/normal functions on debug.

- spurious inline statements were removed.

The code is down to 6 inline functions: three one-liners for struct
abstractions, one for a might_sleep_if test and two for the performance
critical __cache_alloc / __cache_free functions.

Note: If an embedded arch wants to save a few bytes by uninlining
__cache_{free,alloc}: The right way to do that is to fold the functions
into kmem_cache_xy and then replace kmalloc with
kmem_cache_alloc(kmem_find_general_cachep(),).

Signed-Off: Manfred Spraul <manfred@colorfullife.com>
Signed-off-by: default avatarAndrew Morton <akpm@osdl.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@osdl.org>
parent b167eef8
......@@ -384,12 +384,12 @@ struct kmem_cache_s {
* cachep->objsize - 2* BYTES_PER_WORD: redzone word [BYTES_PER_WORD long]
* cachep->objsize - 1* BYTES_PER_WORD: last caller address [BYTES_PER_WORD long]
*/
static inline int obj_dbghead(kmem_cache_t *cachep)
static int obj_dbghead(kmem_cache_t *cachep)
{
return cachep->dbghead;
}
static inline int obj_reallen(kmem_cache_t *cachep)
static int obj_reallen(kmem_cache_t *cachep)
{
return cachep->reallen;
}
......@@ -413,30 +413,15 @@ static void **dbg_userword(kmem_cache_t *cachep, void *objp)
BUG_ON(!(cachep->flags & SLAB_STORE_USER));
return (void**)(objp+cachep->objsize-BYTES_PER_WORD);
}
#else
static inline int obj_dbghead(kmem_cache_t *cachep)
{
return 0;
}
static inline int obj_reallen(kmem_cache_t *cachep)
{
return cachep->objsize;
}
static inline unsigned long *dbg_redzone1(kmem_cache_t *cachep, void *objp)
{
BUG();
return 0;
}
static inline unsigned long *dbg_redzone2(kmem_cache_t *cachep, void *objp)
{
BUG();
return 0;
}
static inline void **dbg_userword(kmem_cache_t *cachep, void *objp)
{
BUG();
return 0;
}
#define obj_dbghead(x) 0
#define obj_reallen(cachep) (cachep->objsize)
#define dbg_redzone1(cachep, objp) ({BUG(); (unsigned long *)NULL;})
#define dbg_redzone2(cachep, objp) ({BUG(); (unsigned long *)NULL;})
#define dbg_userword(cachep, objp) ({BUG(); (void **)NULL;})
#endif
/*
......@@ -879,7 +864,7 @@ static void *kmem_getpages(kmem_cache_t *cachep, int flags, int nodeid)
/*
* Interface to system's page release.
*/
static inline void kmem_freepages(kmem_cache_t *cachep, void *addr)
static void kmem_freepages(kmem_cache_t *cachep, void *addr)
{
unsigned long i = (1<<cachep->gfporder);
struct page *page = virt_to_page(addr);
......@@ -1413,27 +1398,29 @@ kmem_cache_create (const char *name, size_t size, size_t align,
}
EXPORT_SYMBOL(kmem_cache_create);
static inline void check_irq_off(void)
{
#if DEBUG
static void check_irq_off(void)
{
BUG_ON(!irqs_disabled());
#endif
}
static inline void check_irq_on(void)
static void check_irq_on(void)
{
#if DEBUG
BUG_ON(irqs_disabled());
#endif
}
static inline void check_spinlock_acquired(kmem_cache_t *cachep)
static void check_spinlock_acquired(kmem_cache_t *cachep)
{
#ifdef CONFIG_SMP
check_irq_off();
BUG_ON(spin_trylock(&cachep->spinlock));
#endif
}
#else
#define check_irq_off() do { } while(0)
#define check_irq_on() do { } while(0)
#define check_spinlock_acquired(x) do { } while(0)
#endif
/*
* Waits for all CPUs to execute func().
......@@ -1596,7 +1583,7 @@ int kmem_cache_destroy (kmem_cache_t * cachep)
EXPORT_SYMBOL(kmem_cache_destroy);
/* Get the memory for a slab management obj. */
static inline struct slab* alloc_slabmgmt (kmem_cache_t *cachep,
static struct slab* alloc_slabmgmt (kmem_cache_t *cachep,
void *objp, int colour_off, int local_flags)
{
struct slab *slabp;
......@@ -1779,15 +1766,16 @@ static int cache_grow (kmem_cache_t * cachep, int flags)
return 0;
}
#if DEBUG
/*
* Perform extra freeing checks:
* - detect bad pointers.
* - POISON/RED_ZONE checking
* - destructor calls, for caches with POISON+dtor
*/
static inline void kfree_debugcheck(const void *objp)
static void kfree_debugcheck(const void *objp)
{
#if DEBUG
struct page *page;
if (!virt_addr_valid(objp)) {
......@@ -1800,12 +1788,10 @@ static inline void kfree_debugcheck(const void *objp)
printk(KERN_ERR "kfree_debugcheck: bad ptr %lxh.\n", (unsigned long)objp);
BUG();
}
#endif
}
static inline void *cache_free_debugcheck (kmem_cache_t * cachep, void * objp, void *caller)
static void *cache_free_debugcheck (kmem_cache_t * cachep, void * objp, void *caller)
{
#if DEBUG
struct page *page;
unsigned int objnr;
struct slab *slabp;
......@@ -1867,13 +1853,11 @@ static inline void *cache_free_debugcheck (kmem_cache_t * cachep, void * objp, v
poison_obj(cachep, objp, POISON_FREE);
#endif
}
#endif
return objp;
}
static inline void check_slabp(kmem_cache_t *cachep, struct slab *slabp)
static void check_slabp(kmem_cache_t *cachep, struct slab *slabp)
{
#if DEBUG
int i;
int entries = 0;
......@@ -1897,8 +1881,12 @@ static inline void check_slabp(kmem_cache_t *cachep, struct slab *slabp)
printk("\n");
BUG();
}
#endif
}
#else
#define kfree_debugcheck(x) do { } while(0)
#define cache_free_debugcheck(x,objp,z) (objp)
#define check_slabp(x,y) do { } while(0)
#endif
static void* cache_alloc_refill(kmem_cache_t* cachep, int flags)
{
......@@ -2005,11 +1993,11 @@ cache_alloc_debugcheck_before(kmem_cache_t *cachep, int flags)
#endif
}
static inline void *
#if DEBUG
static void *
cache_alloc_debugcheck_after(kmem_cache_t *cachep,
unsigned long flags, void *objp, void *caller)
{
#if DEBUG
if (!objp)
return objp;
if (cachep->flags & SLAB_POISON) {
......@@ -2045,9 +2033,11 @@ cache_alloc_debugcheck_after(kmem_cache_t *cachep,
cachep->ctor(objp, cachep, ctor_flags);
}
#endif
return objp;
}
#else
#define cache_alloc_debugcheck_after(a,b,objp,d) (objp)
#endif
static inline void * __cache_alloc (kmem_cache_t *cachep, int flags)
......@@ -2693,7 +2683,7 @@ static void drain_array_locked(kmem_cache_t *cachep,
* If we cannot acquire the cache chain semaphore then just give up - we'll
* try again next timer interrupt.
*/
static inline void cache_reap (void)
static void cache_reap (void)
{
struct list_head *walk;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment