Commit ec3ab083 authored by Christoph Lameter's avatar Christoph Lameter Committed by Pekka Enberg

slub: Get rid of the node field

The node field is always page_to_nid(c->page). So its rather easy to
replace. Note that there maybe slightly more overhead in various hot paths
due to the need to shift the bits from page->flags. However, that is mostly
compensated for by a smaller footprint of the kmem_cache_cpu structure (this
patch reduces that to 3 words per cache) which allows better caching.
Signed-off-by: default avatarChristoph Lameter <cl@linux.com>
Signed-off-by: default avatarPekka Enberg <penberg@kernel.org>
parent 188fd063
...@@ -48,7 +48,6 @@ struct kmem_cache_cpu { ...@@ -48,7 +48,6 @@ struct kmem_cache_cpu {
unsigned long tid; /* Globally unique transaction id */ unsigned long tid; /* Globally unique transaction id */
struct page *page; /* The slab from which we are allocating */ struct page *page; /* The slab from which we are allocating */
struct page *partial; /* Partially allocated frozen slabs */ struct page *partial; /* Partially allocated frozen slabs */
int node; /* The node of the page (or -1 for debug) */
#ifdef CONFIG_SLUB_STATS #ifdef CONFIG_SLUB_STATS
unsigned stat[NR_SLUB_STAT_ITEMS]; unsigned stat[NR_SLUB_STAT_ITEMS];
#endif #endif
......
...@@ -1561,7 +1561,6 @@ static void *get_partial_node(struct kmem_cache *s, ...@@ -1561,7 +1561,6 @@ static void *get_partial_node(struct kmem_cache *s,
if (!object) { if (!object) {
c->page = page; c->page = page;
c->node = page_to_nid(page);
stat(s, ALLOC_FROM_PARTIAL); stat(s, ALLOC_FROM_PARTIAL);
object = t; object = t;
available = page->objects - page->inuse; available = page->objects - page->inuse;
...@@ -2057,7 +2056,7 @@ static void flush_all(struct kmem_cache *s) ...@@ -2057,7 +2056,7 @@ static void flush_all(struct kmem_cache *s)
static inline int node_match(struct kmem_cache_cpu *c, int node) static inline int node_match(struct kmem_cache_cpu *c, int node)
{ {
#ifdef CONFIG_NUMA #ifdef CONFIG_NUMA
if (node != NUMA_NO_NODE && c->node != node) if (node != NUMA_NO_NODE && page_to_nid(c->page) != node)
return 0; return 0;
#endif #endif
return 1; return 1;
...@@ -2152,7 +2151,6 @@ static inline void *new_slab_objects(struct kmem_cache *s, gfp_t flags, ...@@ -2152,7 +2151,6 @@ static inline void *new_slab_objects(struct kmem_cache *s, gfp_t flags,
page->freelist = NULL; page->freelist = NULL;
stat(s, ALLOC_SLAB); stat(s, ALLOC_SLAB);
c->node = page_to_nid(page);
c->page = page; c->page = page;
*pc = c; *pc = c;
} else } else
...@@ -2269,7 +2267,6 @@ static void *__slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node, ...@@ -2269,7 +2267,6 @@ static void *__slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node,
if (c->partial) { if (c->partial) {
c->page = c->partial; c->page = c->partial;
c->partial = c->page->next; c->partial = c->page->next;
c->node = page_to_nid(c->page);
stat(s, CPU_PARTIAL_ALLOC); stat(s, CPU_PARTIAL_ALLOC);
c->freelist = NULL; c->freelist = NULL;
goto redo; goto redo;
...@@ -2294,7 +2291,6 @@ static void *__slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node, ...@@ -2294,7 +2291,6 @@ static void *__slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node,
c->freelist = get_freepointer(s, freelist); c->freelist = get_freepointer(s, freelist);
deactivate_slab(s, c); deactivate_slab(s, c);
c->node = NUMA_NO_NODE;
local_irq_restore(flags); local_irq_restore(flags);
return freelist; return freelist;
} }
...@@ -4507,13 +4503,14 @@ static ssize_t show_slab_objects(struct kmem_cache *s, ...@@ -4507,13 +4503,14 @@ static ssize_t show_slab_objects(struct kmem_cache *s,
for_each_possible_cpu(cpu) { for_each_possible_cpu(cpu) {
struct kmem_cache_cpu *c = per_cpu_ptr(s->cpu_slab, cpu); struct kmem_cache_cpu *c = per_cpu_ptr(s->cpu_slab, cpu);
int node = ACCESS_ONCE(c->node); int node;
struct page *page; struct page *page;
if (node < 0)
continue;
page = ACCESS_ONCE(c->page); page = ACCESS_ONCE(c->page);
if (page) { if (!page)
continue;
node = page_to_nid(page);
if (flags & SO_TOTAL) if (flags & SO_TOTAL)
x = page->objects; x = page->objects;
else if (flags & SO_OBJECTS) else if (flags & SO_OBJECTS)
...@@ -4523,14 +4520,14 @@ static ssize_t show_slab_objects(struct kmem_cache *s, ...@@ -4523,14 +4520,14 @@ static ssize_t show_slab_objects(struct kmem_cache *s,
total += x; total += x;
nodes[node] += x; nodes[node] += x;
}
page = c->partial;
page = ACCESS_ONCE(c->partial);
if (page) { if (page) {
x = page->pobjects; x = page->pobjects;
total += x; total += x;
nodes[node] += x; nodes[node] += x;
} }
per_cpu[node]++; per_cpu[node]++;
} }
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment