Commit e35e1a97 authored by Chen Gang's avatar Chen Gang Committed by Pekka Enberg

mm/slub: remove 'per_cpu' which is useless variable

Remove 'per_cpu', since it is useless now after the patch: "205ab99d
slub: Update statistics handling for variable order slabs". And the
partial list is handled in the same way as the per cpu slab.
Acked-by: default avatarChristoph Lameter <cl@linux.com>
Signed-off-by: default avatarChen Gang <gang.chen@asianux.com>
Signed-off-by: default avatarPekka Enberg <penberg@kernel.org>
parent ad81f054
...@@ -4271,12 +4271,10 @@ static ssize_t show_slab_objects(struct kmem_cache *s, ...@@ -4271,12 +4271,10 @@ static ssize_t show_slab_objects(struct kmem_cache *s,
int node; int node;
int x; int x;
unsigned long *nodes; unsigned long *nodes;
unsigned long *per_cpu;
nodes = kzalloc(2 * sizeof(unsigned long) * nr_node_ids, GFP_KERNEL); nodes = kzalloc(sizeof(unsigned long) * nr_node_ids, GFP_KERNEL);
if (!nodes) if (!nodes)
return -ENOMEM; return -ENOMEM;
per_cpu = nodes + nr_node_ids;
if (flags & SO_CPU) { if (flags & SO_CPU) {
int cpu; int cpu;
...@@ -4307,8 +4305,6 @@ static ssize_t show_slab_objects(struct kmem_cache *s, ...@@ -4307,8 +4305,6 @@ static ssize_t show_slab_objects(struct kmem_cache *s,
total += x; total += x;
nodes[node] += x; nodes[node] += x;
} }
per_cpu[node]++;
} }
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment