Commit 14577beb authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/penberg/slab-2.6

* 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/penberg/slab-2.6:
  slub: Dont define useless label in the !CONFIG_CMPXCHG_LOCAL case
  slab,rcu: don't assume the size of struct rcu_head
  slub,rcu: don't assume the size of struct rcu_head
  slub: automatically reserve bytes at the end of slab
  Lockless (and preemptless) fastpaths for slub
  slub: Get rid of slab_free_hook_irq()
  slub: min_partial needs to be in first cacheline
  slub: fix ksize() build error
  slub: fix kmemcheck calls to match ksize() hints
  Revert "slab: Fix missing DEBUG_SLAB last user"
  mm: Remove support for kmem_cache_name()
parents 09b9cc44 e8c500c2
...@@ -105,7 +105,6 @@ void kmem_cache_destroy(struct kmem_cache *); ...@@ -105,7 +105,6 @@ void kmem_cache_destroy(struct kmem_cache *);
int kmem_cache_shrink(struct kmem_cache *); int kmem_cache_shrink(struct kmem_cache *);
void kmem_cache_free(struct kmem_cache *, void *); void kmem_cache_free(struct kmem_cache *, void *);
unsigned int kmem_cache_size(struct kmem_cache *); unsigned int kmem_cache_size(struct kmem_cache *);
const char *kmem_cache_name(struct kmem_cache *);
/* /*
* Please use this macro to create slab caches. Simply specify the * Please use this macro to create slab caches. Simply specify the
......
...@@ -35,7 +35,10 @@ enum stat_item { ...@@ -35,7 +35,10 @@ enum stat_item {
NR_SLUB_STAT_ITEMS }; NR_SLUB_STAT_ITEMS };
struct kmem_cache_cpu { struct kmem_cache_cpu {
void **freelist; /* Pointer to first free per cpu object */ void **freelist; /* Pointer to next available object */
#ifdef CONFIG_CMPXCHG_LOCAL
unsigned long tid; /* Globally unique transaction id */
#endif
struct page *page; /* The slab from which we are allocating */ struct page *page; /* The slab from which we are allocating */
int node; /* The node of the page (or -1 for debug) */ int node; /* The node of the page (or -1 for debug) */
#ifdef CONFIG_SLUB_STATS #ifdef CONFIG_SLUB_STATS
...@@ -70,6 +73,7 @@ struct kmem_cache { ...@@ -70,6 +73,7 @@ struct kmem_cache {
struct kmem_cache_cpu __percpu *cpu_slab; struct kmem_cache_cpu __percpu *cpu_slab;
/* Used for retriving partial slabs etc */ /* Used for retriving partial slabs etc */
unsigned long flags; unsigned long flags;
unsigned long min_partial;
int size; /* The size of an object including meta data */ int size; /* The size of an object including meta data */
int objsize; /* The size of an object without meta data */ int objsize; /* The size of an object without meta data */
int offset; /* Free pointer offset. */ int offset; /* Free pointer offset. */
...@@ -83,7 +87,7 @@ struct kmem_cache { ...@@ -83,7 +87,7 @@ struct kmem_cache {
void (*ctor)(void *); void (*ctor)(void *);
int inuse; /* Offset to metadata */ int inuse; /* Offset to metadata */
int align; /* Alignment */ int align; /* Alignment */
unsigned long min_partial; int reserved; /* Reserved bytes at the end of slabs */
const char *name; /* Name (only for display!) */ const char *name; /* Name (only for display!) */
struct list_head list; /* List of slab caches */ struct list_head list; /* List of slab caches */
#ifdef CONFIG_SYSFS #ifdef CONFIG_SYSFS
......
...@@ -190,22 +190,6 @@ typedef unsigned int kmem_bufctl_t; ...@@ -190,22 +190,6 @@ typedef unsigned int kmem_bufctl_t;
#define BUFCTL_ACTIVE (((kmem_bufctl_t)(~0U))-2) #define BUFCTL_ACTIVE (((kmem_bufctl_t)(~0U))-2)
#define SLAB_LIMIT (((kmem_bufctl_t)(~0U))-3) #define SLAB_LIMIT (((kmem_bufctl_t)(~0U))-3)
/*
* struct slab
*
* Manages the objs in a slab. Placed either at the beginning of mem allocated
* for a slab, or allocated from an general cache.
* Slabs are chained into three list: fully used, partial, fully free slabs.
*/
struct slab {
struct list_head list;
unsigned long colouroff;
void *s_mem; /* including colour offset */
unsigned int inuse; /* num of objs active in slab */
kmem_bufctl_t free;
unsigned short nodeid;
};
/* /*
* struct slab_rcu * struct slab_rcu
* *
...@@ -219,8 +203,6 @@ struct slab { ...@@ -219,8 +203,6 @@ struct slab {
* *
* rcu_read_lock before reading the address, then rcu_read_unlock after * rcu_read_lock before reading the address, then rcu_read_unlock after
* taking the spinlock within the structure expected at that address. * taking the spinlock within the structure expected at that address.
*
* We assume struct slab_rcu can overlay struct slab when destroying.
*/ */
struct slab_rcu { struct slab_rcu {
struct rcu_head head; struct rcu_head head;
...@@ -228,6 +210,27 @@ struct slab_rcu { ...@@ -228,6 +210,27 @@ struct slab_rcu {
void *addr; void *addr;
}; };
/*
* struct slab
*
* Manages the objs in a slab. Placed either at the beginning of mem allocated
* for a slab, or allocated from an general cache.
* Slabs are chained into three list: fully used, partial, fully free slabs.
*/
struct slab {
union {
struct {
struct list_head list;
unsigned long colouroff;
void *s_mem; /* including colour offset */
unsigned int inuse; /* num of objs active in slab */
kmem_bufctl_t free;
unsigned short nodeid;
};
struct slab_rcu __slab_cover_slab_rcu;
};
};
/* /*
* struct array_cache * struct array_cache
* *
...@@ -2147,8 +2150,6 @@ static int __init_refok setup_cpu_cache(struct kmem_cache *cachep, gfp_t gfp) ...@@ -2147,8 +2150,6 @@ static int __init_refok setup_cpu_cache(struct kmem_cache *cachep, gfp_t gfp)
* *
* @name must be valid until the cache is destroyed. This implies that * @name must be valid until the cache is destroyed. This implies that
* the module calling this has to destroy the cache before getting unloaded. * the module calling this has to destroy the cache before getting unloaded.
* Note that kmem_cache_name() is not guaranteed to return the same pointer,
* therefore applications must manage it themselves.
* *
* The flags are * The flags are
* *
...@@ -2288,8 +2289,8 @@ kmem_cache_create (const char *name, size_t size, size_t align, ...@@ -2288,8 +2289,8 @@ kmem_cache_create (const char *name, size_t size, size_t align,
if (ralign < align) { if (ralign < align) {
ralign = align; ralign = align;
} }
/* disable debug if not aligning with REDZONE_ALIGN */ /* disable debug if necessary */
if (ralign & (__alignof__(unsigned long long) - 1)) if (ralign > __alignof__(unsigned long long))
flags &= ~(SLAB_RED_ZONE | SLAB_STORE_USER); flags &= ~(SLAB_RED_ZONE | SLAB_STORE_USER);
/* /*
* 4) Store it. * 4) Store it.
...@@ -2315,8 +2316,8 @@ kmem_cache_create (const char *name, size_t size, size_t align, ...@@ -2315,8 +2316,8 @@ kmem_cache_create (const char *name, size_t size, size_t align,
*/ */
if (flags & SLAB_RED_ZONE) { if (flags & SLAB_RED_ZONE) {
/* add space for red zone words */ /* add space for red zone words */
cachep->obj_offset += align; cachep->obj_offset += sizeof(unsigned long long);
size += align + sizeof(unsigned long long); size += 2 * sizeof(unsigned long long);
} }
if (flags & SLAB_STORE_USER) { if (flags & SLAB_STORE_USER) {
/* user store requires one word storage behind the end of /* user store requires one word storage behind the end of
...@@ -3840,12 +3841,6 @@ unsigned int kmem_cache_size(struct kmem_cache *cachep) ...@@ -3840,12 +3841,6 @@ unsigned int kmem_cache_size(struct kmem_cache *cachep)
} }
EXPORT_SYMBOL(kmem_cache_size); EXPORT_SYMBOL(kmem_cache_size);
const char *kmem_cache_name(struct kmem_cache *cachep)
{
return cachep->name;
}
EXPORT_SYMBOL_GPL(kmem_cache_name);
/* /*
* This initializes kmem_list3 or resizes various caches for all nodes. * This initializes kmem_list3 or resizes various caches for all nodes.
*/ */
......
...@@ -666,12 +666,6 @@ unsigned int kmem_cache_size(struct kmem_cache *c) ...@@ -666,12 +666,6 @@ unsigned int kmem_cache_size(struct kmem_cache *c)
} }
EXPORT_SYMBOL(kmem_cache_size); EXPORT_SYMBOL(kmem_cache_size);
const char *kmem_cache_name(struct kmem_cache *c)
{
return c->name;
}
EXPORT_SYMBOL(kmem_cache_name);
int kmem_cache_shrink(struct kmem_cache *d) int kmem_cache_shrink(struct kmem_cache *d)
{ {
return 0; return 0;
......
This diff is collapsed.
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment