Commit 54266640 authored by Wei Yang's avatar Wei Yang Committed by Linus Torvalds

slub: avoid duplicate creation on the first object

When a kmem_cache is created with ctor, each object in the kmem_cache
will be initialized before ready to use.  While in slub implementation,
the first object will be initialized twice.

This patch reduces the duplication of initialization of the first
object.

Fix commit 7656c72b ("SLUB: add macros for scanning objects in a slab").
Signed-off-by: default avatarWei Yang <weiyang@linux.vnet.ibm.com>
Acked-by: default avatarChristoph Lameter <cl@linux.com>
Cc: Pekka Enberg <penberg@kernel.org>
Cc: David Rientjes <rientjes@google.com>
Cc: Joonsoo Kim <iamjoonsoo.kim@lge.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent 5e804789
...@@ -283,6 +283,10 @@ static inline void set_freepointer(struct kmem_cache *s, void *object, void *fp) ...@@ -283,6 +283,10 @@ static inline void set_freepointer(struct kmem_cache *s, void *object, void *fp)
for (__p = (__addr); __p < (__addr) + (__objects) * (__s)->size;\ for (__p = (__addr); __p < (__addr) + (__objects) * (__s)->size;\
__p += (__s)->size) __p += (__s)->size)
#define for_each_object_idx(__p, __idx, __s, __addr, __objects) \
for (__p = (__addr), __idx = 1; __idx <= __objects;\
__p += (__s)->size, __idx++)
/* Determine object index from a given position */ /* Determine object index from a given position */
static inline int slab_index(void *p, struct kmem_cache *s, void *addr) static inline int slab_index(void *p, struct kmem_cache *s, void *addr)
{ {
...@@ -1379,9 +1383,9 @@ static struct page *new_slab(struct kmem_cache *s, gfp_t flags, int node) ...@@ -1379,9 +1383,9 @@ static struct page *new_slab(struct kmem_cache *s, gfp_t flags, int node)
{ {
struct page *page; struct page *page;
void *start; void *start;
void *last;
void *p; void *p;
int order; int order;
int idx;
BUG_ON(flags & GFP_SLAB_BUG_MASK); BUG_ON(flags & GFP_SLAB_BUG_MASK);
...@@ -1402,14 +1406,13 @@ static struct page *new_slab(struct kmem_cache *s, gfp_t flags, int node) ...@@ -1402,14 +1406,13 @@ static struct page *new_slab(struct kmem_cache *s, gfp_t flags, int node)
if (unlikely(s->flags & SLAB_POISON)) if (unlikely(s->flags & SLAB_POISON))
memset(start, POISON_INUSE, PAGE_SIZE << order); memset(start, POISON_INUSE, PAGE_SIZE << order);
last = start; for_each_object_idx(p, idx, s, start, page->objects) {
for_each_object(p, s, start, page->objects) { setup_object(s, page, p);
setup_object(s, page, last); if (likely(idx < page->objects))
set_freepointer(s, last, p); set_freepointer(s, p, p + s->size);
last = p; else
set_freepointer(s, p, NULL);
} }
setup_object(s, page, last);
set_freepointer(s, last, NULL);
page->freelist = start; page->freelist = start;
page->inuse = page->objects; page->inuse = page->objects;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment