Commit 64096c17 authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'slab-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/penberg/slab-2.6

* 'slab-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/penberg/slab-2.6:
  SLUB: Fix per-cpu merge conflict
  failslab: add ability to filter slab caches
  slab: fix regression in touched logic
  dma kmalloc handling fixes
  slub: remove impossible condition
  slab: initialize unused alien cache entry as NULL at alloc_alien_cache().
  SLUB: Make slub statistics use this_cpu_inc
  SLUB: this_cpu: Remove slub kmem_cache fields
  SLUB: Get rid of dynamic DMA kmalloc cache allocation
  SLUB: Use this_cpu operations in slub
parents cc7889ff 1154fab7
...@@ -41,6 +41,7 @@ Possible debug options are ...@@ -41,6 +41,7 @@ Possible debug options are
P Poisoning (object and padding) P Poisoning (object and padding)
U User tracking (free and alloc) U User tracking (free and alloc)
T Trace (please only use on single slabs) T Trace (please only use on single slabs)
A Toggle failslab filter mark for the cache
O Switch debugging off for caches that would have O Switch debugging off for caches that would have
caused higher minimum slab orders caused higher minimum slab orders
- Switch all debugging off (useful if the kernel is - Switch all debugging off (useful if the kernel is
......
...@@ -82,9 +82,10 @@ static inline void cleanup_fault_attr_dentries(struct fault_attr *attr) ...@@ -82,9 +82,10 @@ static inline void cleanup_fault_attr_dentries(struct fault_attr *attr)
#endif /* CONFIG_FAULT_INJECTION */ #endif /* CONFIG_FAULT_INJECTION */
#ifdef CONFIG_FAILSLAB #ifdef CONFIG_FAILSLAB
extern bool should_failslab(size_t size, gfp_t gfpflags); extern bool should_failslab(size_t size, gfp_t gfpflags, unsigned long flags);
#else #else
static inline bool should_failslab(size_t size, gfp_t gfpflags) static inline bool should_failslab(size_t size, gfp_t gfpflags,
unsigned long flags)
{ {
return false; return false;
} }
......
...@@ -70,6 +70,11 @@ ...@@ -70,6 +70,11 @@
#else #else
# define SLAB_NOTRACK 0x00000000UL # define SLAB_NOTRACK 0x00000000UL
#endif #endif
#ifdef CONFIG_FAILSLAB
# define SLAB_FAILSLAB 0x02000000UL /* Fault injection mark */
#else
# define SLAB_FAILSLAB 0x00000000UL
#endif
/* The following flags affect the page allocator grouping pages by mobility */ /* The following flags affect the page allocator grouping pages by mobility */
#define SLAB_RECLAIM_ACCOUNT 0x00020000UL /* Objects are reclaimable */ #define SLAB_RECLAIM_ACCOUNT 0x00020000UL /* Objects are reclaimable */
......
...@@ -38,8 +38,6 @@ struct kmem_cache_cpu { ...@@ -38,8 +38,6 @@ struct kmem_cache_cpu {
void **freelist; /* Pointer to first free per cpu object */ void **freelist; /* Pointer to first free per cpu object */
struct page *page; /* The slab from which we are allocating */ struct page *page; /* The slab from which we are allocating */
int node; /* The node of the page (or -1 for debug) */ int node; /* The node of the page (or -1 for debug) */
unsigned int offset; /* Freepointer offset (in word units) */
unsigned int objsize; /* Size of an object (from kmem_cache) */
#ifdef CONFIG_SLUB_STATS #ifdef CONFIG_SLUB_STATS
unsigned stat[NR_SLUB_STAT_ITEMS]; unsigned stat[NR_SLUB_STAT_ITEMS];
#endif #endif
...@@ -69,6 +67,7 @@ struct kmem_cache_order_objects { ...@@ -69,6 +67,7 @@ struct kmem_cache_order_objects {
* Slab cache management. * Slab cache management.
*/ */
struct kmem_cache { struct kmem_cache {
struct kmem_cache_cpu *cpu_slab;
/* Used for retriving partial slabs etc */ /* Used for retriving partial slabs etc */
unsigned long flags; unsigned long flags;
int size; /* The size of an object including meta data */ int size; /* The size of an object including meta data */
...@@ -104,11 +103,6 @@ struct kmem_cache { ...@@ -104,11 +103,6 @@ struct kmem_cache {
int remote_node_defrag_ratio; int remote_node_defrag_ratio;
struct kmem_cache_node *node[MAX_NUMNODES]; struct kmem_cache_node *node[MAX_NUMNODES];
#endif #endif
#ifdef CONFIG_SMP
struct kmem_cache_cpu *cpu_slab[NR_CPUS];
#else
struct kmem_cache_cpu cpu_slab;
#endif
}; };
/* /*
...@@ -135,11 +129,21 @@ struct kmem_cache { ...@@ -135,11 +129,21 @@ struct kmem_cache {
#define SLUB_PAGE_SHIFT (PAGE_SHIFT + 2) #define SLUB_PAGE_SHIFT (PAGE_SHIFT + 2)
#ifdef CONFIG_ZONE_DMA
#define SLUB_DMA __GFP_DMA
/* Reserve extra caches for potential DMA use */
#define KMALLOC_CACHES (2 * SLUB_PAGE_SHIFT - 6)
#else
/* Disable DMA functionality */
#define SLUB_DMA (__force gfp_t)0
#define KMALLOC_CACHES SLUB_PAGE_SHIFT
#endif
/* /*
* We keep the general caches in an array of slab caches that are used for * We keep the general caches in an array of slab caches that are used for
* 2^x bytes of allocations. * 2^x bytes of allocations.
*/ */
extern struct kmem_cache kmalloc_caches[SLUB_PAGE_SHIFT]; extern struct kmem_cache kmalloc_caches[KMALLOC_CACHES];
/* /*
* Sorry that the following has to be that ugly but some versions of GCC * Sorry that the following has to be that ugly but some versions of GCC
...@@ -207,13 +211,6 @@ static __always_inline struct kmem_cache *kmalloc_slab(size_t size) ...@@ -207,13 +211,6 @@ static __always_inline struct kmem_cache *kmalloc_slab(size_t size)
return &kmalloc_caches[index]; return &kmalloc_caches[index];
} }
#ifdef CONFIG_ZONE_DMA
#define SLUB_DMA __GFP_DMA
#else
/* Disable DMA functionality */
#define SLUB_DMA (__force gfp_t)0
#endif
void *kmem_cache_alloc(struct kmem_cache *, gfp_t); void *kmem_cache_alloc(struct kmem_cache *, gfp_t);
void *__kmalloc(size_t size, gfp_t flags); void *__kmalloc(size_t size, gfp_t flags);
......
#include <linux/fault-inject.h> #include <linux/fault-inject.h>
#include <linux/gfp.h> #include <linux/gfp.h>
#include <linux/slab.h>
static struct { static struct {
struct fault_attr attr; struct fault_attr attr;
u32 ignore_gfp_wait; u32 ignore_gfp_wait;
int cache_filter;
#ifdef CONFIG_FAULT_INJECTION_DEBUG_FS #ifdef CONFIG_FAULT_INJECTION_DEBUG_FS
struct dentry *ignore_gfp_wait_file; struct dentry *ignore_gfp_wait_file;
struct dentry *cache_filter_file;
#endif #endif
} failslab = { } failslab = {
.attr = FAULT_ATTR_INITIALIZER, .attr = FAULT_ATTR_INITIALIZER,
.ignore_gfp_wait = 1, .ignore_gfp_wait = 1,
.cache_filter = 0,
}; };
bool should_failslab(size_t size, gfp_t gfpflags) bool should_failslab(size_t size, gfp_t gfpflags, unsigned long cache_flags)
{ {
if (gfpflags & __GFP_NOFAIL) if (gfpflags & __GFP_NOFAIL)
return false; return false;
...@@ -20,6 +24,9 @@ bool should_failslab(size_t size, gfp_t gfpflags) ...@@ -20,6 +24,9 @@ bool should_failslab(size_t size, gfp_t gfpflags)
if (failslab.ignore_gfp_wait && (gfpflags & __GFP_WAIT)) if (failslab.ignore_gfp_wait && (gfpflags & __GFP_WAIT))
return false; return false;
if (failslab.cache_filter && !(cache_flags & SLAB_FAILSLAB))
return false;
return should_fail(&failslab.attr, size); return should_fail(&failslab.attr, size);
} }
...@@ -30,7 +37,6 @@ static int __init setup_failslab(char *str) ...@@ -30,7 +37,6 @@ static int __init setup_failslab(char *str)
__setup("failslab=", setup_failslab); __setup("failslab=", setup_failslab);
#ifdef CONFIG_FAULT_INJECTION_DEBUG_FS #ifdef CONFIG_FAULT_INJECTION_DEBUG_FS
static int __init failslab_debugfs_init(void) static int __init failslab_debugfs_init(void)
{ {
mode_t mode = S_IFREG | S_IRUSR | S_IWUSR; mode_t mode = S_IFREG | S_IRUSR | S_IWUSR;
...@@ -46,8 +52,14 @@ static int __init failslab_debugfs_init(void) ...@@ -46,8 +52,14 @@ static int __init failslab_debugfs_init(void)
debugfs_create_bool("ignore-gfp-wait", mode, dir, debugfs_create_bool("ignore-gfp-wait", mode, dir,
&failslab.ignore_gfp_wait); &failslab.ignore_gfp_wait);
if (!failslab.ignore_gfp_wait_file) { failslab.cache_filter_file =
debugfs_create_bool("cache-filter", mode, dir,
&failslab.cache_filter);
if (!failslab.ignore_gfp_wait_file ||
!failslab.cache_filter_file) {
err = -ENOMEM; err = -ENOMEM;
debugfs_remove(failslab.cache_filter_file);
debugfs_remove(failslab.ignore_gfp_wait_file); debugfs_remove(failslab.ignore_gfp_wait_file);
cleanup_fault_attr_dentries(&failslab.attr); cleanup_fault_attr_dentries(&failslab.attr);
} }
......
...@@ -935,7 +935,6 @@ static int transfer_objects(struct array_cache *to, ...@@ -935,7 +935,6 @@ static int transfer_objects(struct array_cache *to,
from->avail -= nr; from->avail -= nr;
to->avail += nr; to->avail += nr;
to->touched = 1;
return nr; return nr;
} }
...@@ -983,13 +982,11 @@ static struct array_cache **alloc_alien_cache(int node, int limit, gfp_t gfp) ...@@ -983,13 +982,11 @@ static struct array_cache **alloc_alien_cache(int node, int limit, gfp_t gfp)
if (limit > 1) if (limit > 1)
limit = 12; limit = 12;
ac_ptr = kmalloc_node(memsize, gfp, node); ac_ptr = kzalloc_node(memsize, gfp, node);
if (ac_ptr) { if (ac_ptr) {
for_each_node(i) { for_each_node(i) {
if (i == node || !node_online(i)) { if (i == node || !node_online(i))
ac_ptr[i] = NULL;
continue; continue;
}
ac_ptr[i] = alloc_arraycache(node, limit, 0xbaadf00d, gfp); ac_ptr[i] = alloc_arraycache(node, limit, 0xbaadf00d, gfp);
if (!ac_ptr[i]) { if (!ac_ptr[i]) {
for (i--; i >= 0; i--) for (i--; i >= 0; i--)
...@@ -2963,8 +2960,10 @@ static void *cache_alloc_refill(struct kmem_cache *cachep, gfp_t flags) ...@@ -2963,8 +2960,10 @@ static void *cache_alloc_refill(struct kmem_cache *cachep, gfp_t flags)
spin_lock(&l3->list_lock); spin_lock(&l3->list_lock);
/* See if we can refill from the shared array */ /* See if we can refill from the shared array */
if (l3->shared && transfer_objects(ac, l3->shared, batchcount)) if (l3->shared && transfer_objects(ac, l3->shared, batchcount)) {
l3->shared->touched = 1;
goto alloc_done; goto alloc_done;
}
while (batchcount > 0) { while (batchcount > 0) {
struct list_head *entry; struct list_head *entry;
...@@ -3101,7 +3100,7 @@ static bool slab_should_failslab(struct kmem_cache *cachep, gfp_t flags) ...@@ -3101,7 +3100,7 @@ static bool slab_should_failslab(struct kmem_cache *cachep, gfp_t flags)
if (cachep == &cache_cache) if (cachep == &cache_cache)
return false; return false;
return should_failslab(obj_size(cachep), flags); return should_failslab(obj_size(cachep), flags, cachep->flags);
} }
static inline void *____cache_alloc(struct kmem_cache *cachep, gfp_t flags) static inline void *____cache_alloc(struct kmem_cache *cachep, gfp_t flags)
......
...@@ -151,7 +151,8 @@ ...@@ -151,7 +151,8 @@
* Set of flags that will prevent slab merging * Set of flags that will prevent slab merging
*/ */
#define SLUB_NEVER_MERGE (SLAB_RED_ZONE | SLAB_POISON | SLAB_STORE_USER | \ #define SLUB_NEVER_MERGE (SLAB_RED_ZONE | SLAB_POISON | SLAB_STORE_USER | \
SLAB_TRACE | SLAB_DESTROY_BY_RCU | SLAB_NOLEAKTRACE) SLAB_TRACE | SLAB_DESTROY_BY_RCU | SLAB_NOLEAKTRACE | \
SLAB_FAILSLAB)
#define SLUB_MERGE_SAME (SLAB_DEBUG_FREE | SLAB_RECLAIM_ACCOUNT | \ #define SLUB_MERGE_SAME (SLAB_DEBUG_FREE | SLAB_RECLAIM_ACCOUNT | \
SLAB_CACHE_DMA | SLAB_NOTRACK) SLAB_CACHE_DMA | SLAB_NOTRACK)
...@@ -217,10 +218,10 @@ static inline void sysfs_slab_remove(struct kmem_cache *s) ...@@ -217,10 +218,10 @@ static inline void sysfs_slab_remove(struct kmem_cache *s)
#endif #endif
static inline void stat(struct kmem_cache_cpu *c, enum stat_item si) static inline void stat(struct kmem_cache *s, enum stat_item si)
{ {
#ifdef CONFIG_SLUB_STATS #ifdef CONFIG_SLUB_STATS
c->stat[si]++; __this_cpu_inc(s->cpu_slab->stat[si]);
#endif #endif
} }
...@@ -242,15 +243,6 @@ static inline struct kmem_cache_node *get_node(struct kmem_cache *s, int node) ...@@ -242,15 +243,6 @@ static inline struct kmem_cache_node *get_node(struct kmem_cache *s, int node)
#endif #endif
} }
static inline struct kmem_cache_cpu *get_cpu_slab(struct kmem_cache *s, int cpu)
{
#ifdef CONFIG_SMP
return s->cpu_slab[cpu];
#else
return &s->cpu_slab;
#endif
}
/* Verify that a pointer has an address that is valid within a slab page */ /* Verify that a pointer has an address that is valid within a slab page */
static inline int check_valid_pointer(struct kmem_cache *s, static inline int check_valid_pointer(struct kmem_cache *s,
struct page *page, const void *object) struct page *page, const void *object)
...@@ -269,13 +261,6 @@ static inline int check_valid_pointer(struct kmem_cache *s, ...@@ -269,13 +261,6 @@ static inline int check_valid_pointer(struct kmem_cache *s,
return 1; return 1;
} }
/*
* Slow version of get and set free pointer.
*
* This version requires touching the cache lines of kmem_cache which
* we avoid to do in the fast alloc free paths. There we obtain the offset
* from the page struct.
*/
static inline void *get_freepointer(struct kmem_cache *s, void *object) static inline void *get_freepointer(struct kmem_cache *s, void *object)
{ {
return *(void **)(object + s->offset); return *(void **)(object + s->offset);
...@@ -1020,6 +1005,9 @@ static int __init setup_slub_debug(char *str) ...@@ -1020,6 +1005,9 @@ static int __init setup_slub_debug(char *str)
case 't': case 't':
slub_debug |= SLAB_TRACE; slub_debug |= SLAB_TRACE;
break; break;
case 'a':
slub_debug |= SLAB_FAILSLAB;
break;
default: default:
printk(KERN_ERR "slub_debug option '%c' " printk(KERN_ERR "slub_debug option '%c' "
"unknown. skipped\n", *str); "unknown. skipped\n", *str);
...@@ -1124,7 +1112,7 @@ static struct page *allocate_slab(struct kmem_cache *s, gfp_t flags, int node) ...@@ -1124,7 +1112,7 @@ static struct page *allocate_slab(struct kmem_cache *s, gfp_t flags, int node)
if (!page) if (!page)
return NULL; return NULL;
stat(get_cpu_slab(s, raw_smp_processor_id()), ORDER_FALLBACK); stat(s, ORDER_FALLBACK);
} }
if (kmemcheck_enabled if (kmemcheck_enabled
...@@ -1422,23 +1410,22 @@ static struct page *get_partial(struct kmem_cache *s, gfp_t flags, int node) ...@@ -1422,23 +1410,22 @@ static struct page *get_partial(struct kmem_cache *s, gfp_t flags, int node)
static void unfreeze_slab(struct kmem_cache *s, struct page *page, int tail) static void unfreeze_slab(struct kmem_cache *s, struct page *page, int tail)
{ {
struct kmem_cache_node *n = get_node(s, page_to_nid(page)); struct kmem_cache_node *n = get_node(s, page_to_nid(page));
struct kmem_cache_cpu *c = get_cpu_slab(s, smp_processor_id());
__ClearPageSlubFrozen(page); __ClearPageSlubFrozen(page);
if (page->inuse) { if (page->inuse) {
if (page->freelist) { if (page->freelist) {
add_partial(n, page, tail); add_partial(n, page, tail);
stat(c, tail ? DEACTIVATE_TO_TAIL : DEACTIVATE_TO_HEAD); stat(s, tail ? DEACTIVATE_TO_TAIL : DEACTIVATE_TO_HEAD);
} else { } else {
stat(c, DEACTIVATE_FULL); stat(s, DEACTIVATE_FULL);
if (SLABDEBUG && PageSlubDebug(page) && if (SLABDEBUG && PageSlubDebug(page) &&
(s->flags & SLAB_STORE_USER)) (s->flags & SLAB_STORE_USER))
add_full(n, page); add_full(n, page);
} }
slab_unlock(page); slab_unlock(page);
} else { } else {
stat(c, DEACTIVATE_EMPTY); stat(s, DEACTIVATE_EMPTY);
if (n->nr_partial < s->min_partial) { if (n->nr_partial < s->min_partial) {
/* /*
* Adding an empty slab to the partial slabs in order * Adding an empty slab to the partial slabs in order
...@@ -1454,7 +1441,7 @@ static void unfreeze_slab(struct kmem_cache *s, struct page *page, int tail) ...@@ -1454,7 +1441,7 @@ static void unfreeze_slab(struct kmem_cache *s, struct page *page, int tail)
slab_unlock(page); slab_unlock(page);
} else { } else {
slab_unlock(page); slab_unlock(page);
stat(get_cpu_slab(s, raw_smp_processor_id()), FREE_SLAB); stat(s, FREE_SLAB);
discard_slab(s, page); discard_slab(s, page);
} }
} }
...@@ -1469,7 +1456,7 @@ static void deactivate_slab(struct kmem_cache *s, struct kmem_cache_cpu *c) ...@@ -1469,7 +1456,7 @@ static void deactivate_slab(struct kmem_cache *s, struct kmem_cache_cpu *c)
int tail = 1; int tail = 1;
if (page->freelist) if (page->freelist)
stat(c, DEACTIVATE_REMOTE_FREES); stat(s, DEACTIVATE_REMOTE_FREES);
/* /*
* Merge cpu freelist into slab freelist. Typically we get here * Merge cpu freelist into slab freelist. Typically we get here
* because both freelists are empty. So this is unlikely * because both freelists are empty. So this is unlikely
...@@ -1482,10 +1469,10 @@ static void deactivate_slab(struct kmem_cache *s, struct kmem_cache_cpu *c) ...@@ -1482,10 +1469,10 @@ static void deactivate_slab(struct kmem_cache *s, struct kmem_cache_cpu *c)
/* Retrieve object from cpu_freelist */ /* Retrieve object from cpu_freelist */
object = c->freelist; object = c->freelist;
c->freelist = c->freelist[c->offset]; c->freelist = get_freepointer(s, c->freelist);
/* And put onto the regular freelist */ /* And put onto the regular freelist */
object[c->offset] = page->freelist; set_freepointer(s, object, page->freelist);
page->freelist = object; page->freelist = object;
page->inuse--; page->inuse--;
} }
...@@ -1495,7 +1482,7 @@ static void deactivate_slab(struct kmem_cache *s, struct kmem_cache_cpu *c) ...@@ -1495,7 +1482,7 @@ static void deactivate_slab(struct kmem_cache *s, struct kmem_cache_cpu *c)
static inline void flush_slab(struct kmem_cache *s, struct kmem_cache_cpu *c) static inline void flush_slab(struct kmem_cache *s, struct kmem_cache_cpu *c)
{ {
stat(c, CPUSLAB_FLUSH); stat(s, CPUSLAB_FLUSH);
slab_lock(c->page); slab_lock(c->page);
deactivate_slab(s, c); deactivate_slab(s, c);
} }
...@@ -1507,7 +1494,7 @@ static inline void flush_slab(struct kmem_cache *s, struct kmem_cache_cpu *c) ...@@ -1507,7 +1494,7 @@ static inline void flush_slab(struct kmem_cache *s, struct kmem_cache_cpu *c)
*/ */
static inline void __flush_cpu_slab(struct kmem_cache *s, int cpu) static inline void __flush_cpu_slab(struct kmem_cache *s, int cpu)
{ {
struct kmem_cache_cpu *c = get_cpu_slab(s, cpu); struct kmem_cache_cpu *c = per_cpu_ptr(s->cpu_slab, cpu);
if (likely(c && c->page)) if (likely(c && c->page))
flush_slab(s, c); flush_slab(s, c);
...@@ -1635,7 +1622,7 @@ static void *__slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node, ...@@ -1635,7 +1622,7 @@ static void *__slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node,
if (unlikely(!node_match(c, node))) if (unlikely(!node_match(c, node)))
goto another_slab; goto another_slab;
stat(c, ALLOC_REFILL); stat(s, ALLOC_REFILL);
load_freelist: load_freelist:
object = c->page->freelist; object = c->page->freelist;
...@@ -1644,13 +1631,13 @@ static void *__slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node, ...@@ -1644,13 +1631,13 @@ static void *__slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node,
if (unlikely(SLABDEBUG && PageSlubDebug(c->page))) if (unlikely(SLABDEBUG && PageSlubDebug(c->page)))
goto debug; goto debug;
c->freelist = object[c->offset]; c->freelist = get_freepointer(s, object);
c->page->inuse = c->page->objects; c->page->inuse = c->page->objects;
c->page->freelist = NULL; c->page->freelist = NULL;
c->node = page_to_nid(c->page); c->node = page_to_nid(c->page);
unlock_out: unlock_out:
slab_unlock(c->page); slab_unlock(c->page);
stat(c, ALLOC_SLOWPATH); stat(s, ALLOC_SLOWPATH);
return object; return object;
another_slab: another_slab:
...@@ -1660,7 +1647,7 @@ static void *__slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node, ...@@ -1660,7 +1647,7 @@ static void *__slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node,
new = get_partial(s, gfpflags, node); new = get_partial(s, gfpflags, node);
if (new) { if (new) {
c->page = new; c->page = new;
stat(c, ALLOC_FROM_PARTIAL); stat(s, ALLOC_FROM_PARTIAL);
goto load_freelist; goto load_freelist;
} }
...@@ -1673,8 +1660,8 @@ static void *__slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node, ...@@ -1673,8 +1660,8 @@ static void *__slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node,
local_irq_disable(); local_irq_disable();
if (new) { if (new) {
c = get_cpu_slab(s, smp_processor_id()); c = __this_cpu_ptr(s->cpu_slab);
stat(c, ALLOC_SLAB); stat(s, ALLOC_SLAB);
if (c->page) if (c->page)
flush_slab(s, c); flush_slab(s, c);
slab_lock(new); slab_lock(new);
...@@ -1690,7 +1677,7 @@ static void *__slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node, ...@@ -1690,7 +1677,7 @@ static void *__slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node,
goto another_slab; goto another_slab;
c->page->inuse++; c->page->inuse++;
c->page->freelist = object[c->offset]; c->page->freelist = get_freepointer(s, object);
c->node = -1; c->node = -1;
goto unlock_out; goto unlock_out;
} }
...@@ -1711,35 +1698,33 @@ static __always_inline void *slab_alloc(struct kmem_cache *s, ...@@ -1711,35 +1698,33 @@ static __always_inline void *slab_alloc(struct kmem_cache *s,
void **object; void **object;
struct kmem_cache_cpu *c; struct kmem_cache_cpu *c;
unsigned long flags; unsigned long flags;
unsigned int objsize;
gfpflags &= gfp_allowed_mask; gfpflags &= gfp_allowed_mask;
lockdep_trace_alloc(gfpflags); lockdep_trace_alloc(gfpflags);
might_sleep_if(gfpflags & __GFP_WAIT); might_sleep_if(gfpflags & __GFP_WAIT);
if (should_failslab(s->objsize, gfpflags)) if (should_failslab(s->objsize, gfpflags, s->flags))
return NULL; return NULL;
local_irq_save(flags); local_irq_save(flags);
c = get_cpu_slab(s, smp_processor_id()); c = __this_cpu_ptr(s->cpu_slab);
objsize = c->objsize; object = c->freelist;
if (unlikely(!c->freelist || !node_match(c, node))) if (unlikely(!object || !node_match(c, node)))
object = __slab_alloc(s, gfpflags, node, addr, c); object = __slab_alloc(s, gfpflags, node, addr, c);
else { else {
object = c->freelist; c->freelist = get_freepointer(s, object);
c->freelist = object[c->offset]; stat(s, ALLOC_FASTPATH);
stat(c, ALLOC_FASTPATH);
} }
local_irq_restore(flags); local_irq_restore(flags);
if (unlikely(gfpflags & __GFP_ZERO) && object) if (unlikely(gfpflags & __GFP_ZERO) && object)
memset(object, 0, objsize); memset(object, 0, s->objsize);
kmemcheck_slab_alloc(s, gfpflags, object, c->objsize); kmemcheck_slab_alloc(s, gfpflags, object, s->objsize);
kmemleak_alloc_recursive(object, objsize, 1, s->flags, gfpflags); kmemleak_alloc_recursive(object, s->objsize, 1, s->flags, gfpflags);
return object; return object;
} }
...@@ -1794,26 +1779,25 @@ EXPORT_SYMBOL(kmem_cache_alloc_node_notrace); ...@@ -1794,26 +1779,25 @@ EXPORT_SYMBOL(kmem_cache_alloc_node_notrace);
* handling required then we can return immediately. * handling required then we can return immediately.
*/ */
static void __slab_free(struct kmem_cache *s, struct page *page, static void __slab_free(struct kmem_cache *s, struct page *page,
void *x, unsigned long addr, unsigned int offset) void *x, unsigned long addr)
{ {
void *prior; void *prior;
void **object = (void *)x; void **object = (void *)x;
struct kmem_cache_cpu *c;
c = get_cpu_slab(s, raw_smp_processor_id()); stat(s, FREE_SLOWPATH);
stat(c, FREE_SLOWPATH);
slab_lock(page); slab_lock(page);
if (unlikely(SLABDEBUG && PageSlubDebug(page))) if (unlikely(SLABDEBUG && PageSlubDebug(page)))
goto debug; goto debug;
checks_ok: checks_ok:
prior = object[offset] = page->freelist; prior = page->freelist;
set_freepointer(s, object, prior);
page->freelist = object; page->freelist = object;
page->inuse--; page->inuse--;
if (unlikely(PageSlubFrozen(page))) { if (unlikely(PageSlubFrozen(page))) {
stat(c, FREE_FROZEN); stat(s, FREE_FROZEN);
goto out_unlock; goto out_unlock;
} }
...@@ -1826,7 +1810,7 @@ static void __slab_free(struct kmem_cache *s, struct page *page, ...@@ -1826,7 +1810,7 @@ static void __slab_free(struct kmem_cache *s, struct page *page,
*/ */
if (unlikely(!prior)) { if (unlikely(!prior)) {
add_partial(get_node(s, page_to_nid(page)), page, 1); add_partial(get_node(s, page_to_nid(page)), page, 1);
stat(c, FREE_ADD_PARTIAL); stat(s, FREE_ADD_PARTIAL);
} }
out_unlock: out_unlock:
...@@ -1839,10 +1823,10 @@ static void __slab_free(struct kmem_cache *s, struct page *page, ...@@ -1839,10 +1823,10 @@ static void __slab_free(struct kmem_cache *s, struct page *page,
* Slab still on the partial list. * Slab still on the partial list.
*/ */
remove_partial(s, page); remove_partial(s, page);
stat(c, FREE_REMOVE_PARTIAL); stat(s, FREE_REMOVE_PARTIAL);
} }
slab_unlock(page); slab_unlock(page);
stat(c, FREE_SLAB); stat(s, FREE_SLAB);
discard_slab(s, page); discard_slab(s, page);
return; return;
...@@ -1872,17 +1856,17 @@ static __always_inline void slab_free(struct kmem_cache *s, ...@@ -1872,17 +1856,17 @@ static __always_inline void slab_free(struct kmem_cache *s,
kmemleak_free_recursive(x, s->flags); kmemleak_free_recursive(x, s->flags);
local_irq_save(flags); local_irq_save(flags);
c = get_cpu_slab(s, smp_processor_id()); c = __this_cpu_ptr(s->cpu_slab);
kmemcheck_slab_free(s, object, c->objsize); kmemcheck_slab_free(s, object, s->objsize);
debug_check_no_locks_freed(object, c->objsize); debug_check_no_locks_freed(object, s->objsize);
if (!(s->flags & SLAB_DEBUG_OBJECTS)) if (!(s->flags & SLAB_DEBUG_OBJECTS))
debug_check_no_obj_freed(object, c->objsize); debug_check_no_obj_freed(object, s->objsize);
if (likely(page == c->page && c->node >= 0)) { if (likely(page == c->page && c->node >= 0)) {
object[c->offset] = c->freelist; set_freepointer(s, object, c->freelist);
c->freelist = object; c->freelist = object;
stat(c, FREE_FASTPATH); stat(s, FREE_FASTPATH);
} else } else
__slab_free(s, page, x, addr, c->offset); __slab_free(s, page, x, addr);
local_irq_restore(flags); local_irq_restore(flags);
} }
...@@ -2069,19 +2053,6 @@ static unsigned long calculate_alignment(unsigned long flags, ...@@ -2069,19 +2053,6 @@ static unsigned long calculate_alignment(unsigned long flags,
return ALIGN(align, sizeof(void *)); return ALIGN(align, sizeof(void *));
} }
static void init_kmem_cache_cpu(struct kmem_cache *s,
struct kmem_cache_cpu *c)
{
c->page = NULL;
c->freelist = NULL;
c->node = 0;
c->offset = s->offset / sizeof(void *);
c->objsize = s->objsize;
#ifdef CONFIG_SLUB_STATS
memset(c->stat, 0, NR_SLUB_STAT_ITEMS * sizeof(unsigned));
#endif
}
static void static void
init_kmem_cache_node(struct kmem_cache_node *n, struct kmem_cache *s) init_kmem_cache_node(struct kmem_cache_node *n, struct kmem_cache *s)
{ {
...@@ -2095,130 +2066,24 @@ init_kmem_cache_node(struct kmem_cache_node *n, struct kmem_cache *s) ...@@ -2095,130 +2066,24 @@ init_kmem_cache_node(struct kmem_cache_node *n, struct kmem_cache *s)
#endif #endif
} }
#ifdef CONFIG_SMP static DEFINE_PER_CPU(struct kmem_cache_cpu, kmalloc_percpu[KMALLOC_CACHES]);
/*
* Per cpu array for per cpu structures.
*
* The per cpu array places all kmem_cache_cpu structures from one processor
* close together meaning that it becomes possible that multiple per cpu
* structures are contained in one cacheline. This may be particularly
* beneficial for the kmalloc caches.
*
* A desktop system typically has around 60-80 slabs. With 100 here we are
* likely able to get per cpu structures for all caches from the array defined
* here. We must be able to cover all kmalloc caches during bootstrap.
*
* If the per cpu array is exhausted then fall back to kmalloc
* of individual cachelines. No sharing is possible then.
*/
#define NR_KMEM_CACHE_CPU 100
static DEFINE_PER_CPU(struct kmem_cache_cpu [NR_KMEM_CACHE_CPU],
kmem_cache_cpu);
static DEFINE_PER_CPU(struct kmem_cache_cpu *, kmem_cache_cpu_free);
static DECLARE_BITMAP(kmem_cach_cpu_free_init_once, CONFIG_NR_CPUS);
static struct kmem_cache_cpu *alloc_kmem_cache_cpu(struct kmem_cache *s,
int cpu, gfp_t flags)
{
struct kmem_cache_cpu *c = per_cpu(kmem_cache_cpu_free, cpu);
if (c)
per_cpu(kmem_cache_cpu_free, cpu) =
(void *)c->freelist;
else {
/* Table overflow: So allocate ourselves */
c = kmalloc_node(
ALIGN(sizeof(struct kmem_cache_cpu), cache_line_size()),
flags, cpu_to_node(cpu));
if (!c)
return NULL;
}
init_kmem_cache_cpu(s, c);
return c;
}
static void free_kmem_cache_cpu(struct kmem_cache_cpu *c, int cpu)
{
if (c < per_cpu(kmem_cache_cpu, cpu) ||
c >= per_cpu(kmem_cache_cpu, cpu) + NR_KMEM_CACHE_CPU) {
kfree(c);
return;
}
c->freelist = (void *)per_cpu(kmem_cache_cpu_free, cpu);
per_cpu(kmem_cache_cpu_free, cpu) = c;
}
static void free_kmem_cache_cpus(struct kmem_cache *s)
{
int cpu;
for_each_online_cpu(cpu) {
struct kmem_cache_cpu *c = get_cpu_slab(s, cpu);
if (c) {
s->cpu_slab[cpu] = NULL;
free_kmem_cache_cpu(c, cpu);
}
}
}
static int alloc_kmem_cache_cpus(struct kmem_cache *s, gfp_t flags) static inline int alloc_kmem_cache_cpus(struct kmem_cache *s, gfp_t flags)
{ {
int cpu; if (s < kmalloc_caches + KMALLOC_CACHES && s >= kmalloc_caches)
/*
for_each_online_cpu(cpu) { * Boot time creation of the kmalloc array. Use static per cpu data
struct kmem_cache_cpu *c = get_cpu_slab(s, cpu); * since the per cpu allocator is not available yet.
if (c)
continue;
c = alloc_kmem_cache_cpu(s, cpu, flags);
if (!c) {
free_kmem_cache_cpus(s);
return 0;
}
s->cpu_slab[cpu] = c;
}
return 1;
}
/*
* Initialize the per cpu array.
*/ */
static void init_alloc_cpu_cpu(int cpu) s->cpu_slab = kmalloc_percpu + (s - kmalloc_caches);
{ else
int i; s->cpu_slab = alloc_percpu(struct kmem_cache_cpu);
if (cpumask_test_cpu(cpu, to_cpumask(kmem_cach_cpu_free_init_once)))
return;
for (i = NR_KMEM_CACHE_CPU - 1; i >= 0; i--)
free_kmem_cache_cpu(&per_cpu(kmem_cache_cpu, cpu)[i], cpu);
cpumask_set_cpu(cpu, to_cpumask(kmem_cach_cpu_free_init_once));
}
static void __init init_alloc_cpu(void)
{
int cpu;
for_each_online_cpu(cpu)
init_alloc_cpu_cpu(cpu);
}
#else if (!s->cpu_slab)
static inline void free_kmem_cache_cpus(struct kmem_cache *s) {} return 0;
static inline void init_alloc_cpu(void) {}
static inline int alloc_kmem_cache_cpus(struct kmem_cache *s, gfp_t flags)
{
init_kmem_cache_cpu(s, &s->cpu_slab);
return 1; return 1;
} }
#endif
#ifdef CONFIG_NUMA #ifdef CONFIG_NUMA
/* /*
...@@ -2287,7 +2152,8 @@ static int init_kmem_cache_nodes(struct kmem_cache *s, gfp_t gfpflags) ...@@ -2287,7 +2152,8 @@ static int init_kmem_cache_nodes(struct kmem_cache *s, gfp_t gfpflags)
int node; int node;
int local_node; int local_node;
if (slab_state >= UP) if (slab_state >= UP && (s < kmalloc_caches ||
s > kmalloc_caches + KMALLOC_CACHES))
local_node = page_to_nid(virt_to_page(s)); local_node = page_to_nid(virt_to_page(s));
else else
local_node = 0; local_node = 0;
...@@ -2502,6 +2368,7 @@ static int kmem_cache_open(struct kmem_cache *s, gfp_t gfpflags, ...@@ -2502,6 +2368,7 @@ static int kmem_cache_open(struct kmem_cache *s, gfp_t gfpflags,
if (alloc_kmem_cache_cpus(s, gfpflags & ~SLUB_DMA)) if (alloc_kmem_cache_cpus(s, gfpflags & ~SLUB_DMA))
return 1; return 1;
free_kmem_cache_nodes(s); free_kmem_cache_nodes(s);
error: error:
if (flags & SLAB_PANIC) if (flags & SLAB_PANIC)
...@@ -2609,9 +2476,8 @@ static inline int kmem_cache_close(struct kmem_cache *s) ...@@ -2609,9 +2476,8 @@ static inline int kmem_cache_close(struct kmem_cache *s)
int node; int node;
flush_all(s); flush_all(s);
free_percpu(s->cpu_slab);
/* Attempt to free all objects */ /* Attempt to free all objects */
free_kmem_cache_cpus(s);
for_each_node_state(node, N_NORMAL_MEMORY) { for_each_node_state(node, N_NORMAL_MEMORY) {
struct kmem_cache_node *n = get_node(s, node); struct kmem_cache_node *n = get_node(s, node);
...@@ -2651,7 +2517,7 @@ EXPORT_SYMBOL(kmem_cache_destroy); ...@@ -2651,7 +2517,7 @@ EXPORT_SYMBOL(kmem_cache_destroy);
* Kmalloc subsystem * Kmalloc subsystem
*******************************************************************/ *******************************************************************/
struct kmem_cache kmalloc_caches[SLUB_PAGE_SHIFT] __cacheline_aligned; struct kmem_cache kmalloc_caches[KMALLOC_CACHES] __cacheline_aligned;
EXPORT_SYMBOL(kmalloc_caches); EXPORT_SYMBOL(kmalloc_caches);
static int __init setup_slub_min_order(char *str) static int __init setup_slub_min_order(char *str)
...@@ -2741,6 +2607,7 @@ static noinline struct kmem_cache *dma_kmalloc_cache(int index, gfp_t flags) ...@@ -2741,6 +2607,7 @@ static noinline struct kmem_cache *dma_kmalloc_cache(int index, gfp_t flags)
char *text; char *text;
size_t realsize; size_t realsize;
unsigned long slabflags; unsigned long slabflags;
int i;
s = kmalloc_caches_dma[index]; s = kmalloc_caches_dma[index];
if (s) if (s)
...@@ -2760,7 +2627,14 @@ static noinline struct kmem_cache *dma_kmalloc_cache(int index, gfp_t flags) ...@@ -2760,7 +2627,14 @@ static noinline struct kmem_cache *dma_kmalloc_cache(int index, gfp_t flags)
realsize = kmalloc_caches[index].objsize; realsize = kmalloc_caches[index].objsize;
text = kasprintf(flags & ~SLUB_DMA, "kmalloc_dma-%d", text = kasprintf(flags & ~SLUB_DMA, "kmalloc_dma-%d",
(unsigned int)realsize); (unsigned int)realsize);
s = kmalloc(kmem_size, flags & ~SLUB_DMA);
s = NULL;
for (i = 0; i < KMALLOC_CACHES; i++)
if (!kmalloc_caches[i].size)
break;
BUG_ON(i >= KMALLOC_CACHES);
s = kmalloc_caches + i;
/* /*
* Must defer sysfs creation to a workqueue because we don't know * Must defer sysfs creation to a workqueue because we don't know
...@@ -2772,9 +2646,9 @@ static noinline struct kmem_cache *dma_kmalloc_cache(int index, gfp_t flags) ...@@ -2772,9 +2646,9 @@ static noinline struct kmem_cache *dma_kmalloc_cache(int index, gfp_t flags)
if (slab_state >= SYSFS) if (slab_state >= SYSFS)
slabflags |= __SYSFS_ADD_DEFERRED; slabflags |= __SYSFS_ADD_DEFERRED;
if (!s || !text || !kmem_cache_open(s, flags, text, if (!text || !kmem_cache_open(s, flags, text,
realsize, ARCH_KMALLOC_MINALIGN, slabflags, NULL)) { realsize, ARCH_KMALLOC_MINALIGN, slabflags, NULL)) {
kfree(s); s->size = 0;
kfree(text); kfree(text);
goto unlock_out; goto unlock_out;
} }
...@@ -3176,8 +3050,6 @@ void __init kmem_cache_init(void) ...@@ -3176,8 +3050,6 @@ void __init kmem_cache_init(void)
int i; int i;
int caches = 0; int caches = 0;
init_alloc_cpu();
#ifdef CONFIG_NUMA #ifdef CONFIG_NUMA
/* /*
* Must first have the slab cache available for the allocations of the * Must first have the slab cache available for the allocations of the
...@@ -3261,8 +3133,10 @@ void __init kmem_cache_init(void) ...@@ -3261,8 +3133,10 @@ void __init kmem_cache_init(void)
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
register_cpu_notifier(&slab_notifier); register_cpu_notifier(&slab_notifier);
kmem_size = offsetof(struct kmem_cache, cpu_slab) + #endif
nr_cpu_ids * sizeof(struct kmem_cache_cpu *); #ifdef CONFIG_NUMA
kmem_size = offsetof(struct kmem_cache, node) +
nr_node_ids * sizeof(struct kmem_cache_node *);
#else #else
kmem_size = sizeof(struct kmem_cache); kmem_size = sizeof(struct kmem_cache);
#endif #endif
...@@ -3351,22 +3225,12 @@ struct kmem_cache *kmem_cache_create(const char *name, size_t size, ...@@ -3351,22 +3225,12 @@ struct kmem_cache *kmem_cache_create(const char *name, size_t size,
down_write(&slub_lock); down_write(&slub_lock);
s = find_mergeable(size, align, flags, name, ctor); s = find_mergeable(size, align, flags, name, ctor);
if (s) { if (s) {
int cpu;
s->refcount++; s->refcount++;
/* /*
* Adjust the object sizes so that we clear * Adjust the object sizes so that we clear
* the complete object on kzalloc. * the complete object on kzalloc.
*/ */
s->objsize = max(s->objsize, (int)size); s->objsize = max(s->objsize, (int)size);
/*
* And then we need to update the object size in the
* per cpu structures
*/
for_each_online_cpu(cpu)
get_cpu_slab(s, cpu)->objsize = s->objsize;
s->inuse = max_t(int, s->inuse, ALIGN(size, sizeof(void *))); s->inuse = max_t(int, s->inuse, ALIGN(size, sizeof(void *)));
up_write(&slub_lock); up_write(&slub_lock);
...@@ -3420,29 +3284,15 @@ static int __cpuinit slab_cpuup_callback(struct notifier_block *nfb, ...@@ -3420,29 +3284,15 @@ static int __cpuinit slab_cpuup_callback(struct notifier_block *nfb,
unsigned long flags; unsigned long flags;
switch (action) { switch (action) {
case CPU_UP_PREPARE:
case CPU_UP_PREPARE_FROZEN:
init_alloc_cpu_cpu(cpu);
down_read(&slub_lock);
list_for_each_entry(s, &slab_caches, list)
s->cpu_slab[cpu] = alloc_kmem_cache_cpu(s, cpu,
GFP_KERNEL);
up_read(&slub_lock);
break;
case CPU_UP_CANCELED: case CPU_UP_CANCELED:
case CPU_UP_CANCELED_FROZEN: case CPU_UP_CANCELED_FROZEN:
case CPU_DEAD: case CPU_DEAD:
case CPU_DEAD_FROZEN: case CPU_DEAD_FROZEN:
down_read(&slub_lock); down_read(&slub_lock);
list_for_each_entry(s, &slab_caches, list) { list_for_each_entry(s, &slab_caches, list) {
struct kmem_cache_cpu *c = get_cpu_slab(s, cpu);
local_irq_save(flags); local_irq_save(flags);
__flush_cpu_slab(s, cpu); __flush_cpu_slab(s, cpu);
local_irq_restore(flags); local_irq_restore(flags);
free_kmem_cache_cpu(c, cpu);
s->cpu_slab[cpu] = NULL;
} }
up_read(&slub_lock); up_read(&slub_lock);
break; break;
...@@ -3928,7 +3778,7 @@ static ssize_t show_slab_objects(struct kmem_cache *s, ...@@ -3928,7 +3778,7 @@ static ssize_t show_slab_objects(struct kmem_cache *s,
int cpu; int cpu;
for_each_possible_cpu(cpu) { for_each_possible_cpu(cpu) {
struct kmem_cache_cpu *c = get_cpu_slab(s, cpu); struct kmem_cache_cpu *c = per_cpu_ptr(s->cpu_slab, cpu);
if (!c || c->node < 0) if (!c || c->node < 0)
continue; continue;
...@@ -4171,6 +4021,23 @@ static ssize_t trace_store(struct kmem_cache *s, const char *buf, ...@@ -4171,6 +4021,23 @@ static ssize_t trace_store(struct kmem_cache *s, const char *buf,
} }
SLAB_ATTR(trace); SLAB_ATTR(trace);
#ifdef CONFIG_FAILSLAB
static ssize_t failslab_show(struct kmem_cache *s, char *buf)
{
return sprintf(buf, "%d\n", !!(s->flags & SLAB_FAILSLAB));
}
static ssize_t failslab_store(struct kmem_cache *s, const char *buf,
size_t length)
{
s->flags &= ~SLAB_FAILSLAB;
if (buf[0] == '1')
s->flags |= SLAB_FAILSLAB;
return length;
}
SLAB_ATTR(failslab);
#endif
static ssize_t reclaim_account_show(struct kmem_cache *s, char *buf) static ssize_t reclaim_account_show(struct kmem_cache *s, char *buf)
{ {
return sprintf(buf, "%d\n", !!(s->flags & SLAB_RECLAIM_ACCOUNT)); return sprintf(buf, "%d\n", !!(s->flags & SLAB_RECLAIM_ACCOUNT));
...@@ -4353,7 +4220,7 @@ static int show_stat(struct kmem_cache *s, char *buf, enum stat_item si) ...@@ -4353,7 +4220,7 @@ static int show_stat(struct kmem_cache *s, char *buf, enum stat_item si)
return -ENOMEM; return -ENOMEM;
for_each_online_cpu(cpu) { for_each_online_cpu(cpu) {
unsigned x = get_cpu_slab(s, cpu)->stat[si]; unsigned x = per_cpu_ptr(s->cpu_slab, cpu)->stat[si];
data[cpu] = x; data[cpu] = x;
sum += x; sum += x;
...@@ -4376,7 +4243,7 @@ static void clear_stat(struct kmem_cache *s, enum stat_item si) ...@@ -4376,7 +4243,7 @@ static void clear_stat(struct kmem_cache *s, enum stat_item si)
int cpu; int cpu;
for_each_online_cpu(cpu) for_each_online_cpu(cpu)
get_cpu_slab(s, cpu)->stat[si] = 0; per_cpu_ptr(s->cpu_slab, cpu)->stat[si] = 0;
} }
#define STAT_ATTR(si, text) \ #define STAT_ATTR(si, text) \
...@@ -4467,6 +4334,10 @@ static struct attribute *slab_attrs[] = { ...@@ -4467,6 +4334,10 @@ static struct attribute *slab_attrs[] = {
&deactivate_remote_frees_attr.attr, &deactivate_remote_frees_attr.attr,
&order_fallback_attr.attr, &order_fallback_attr.attr,
#endif #endif
#ifdef CONFIG_FAILSLAB
&failslab_attr.attr,
#endif
NULL NULL
}; };
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment