Commit 145949a1 authored by Raghavendra K T's avatar Raghavendra K T Committed by Linus Torvalds

mm/list_lru.c: replace nr_node_ids for loop with for_each_node()

The functions used in the patch are in slowpath, which gets called
whenever alloc_super is called during mounts.

Though this should not make difference for the architectures with
sequential numa node ids, for the powerpc which can potentially have
sparse node ids (for e.g., 4 node system having numa ids, 0,1,16,17 is
common), this patch saves some unnecessary allocations for non existing
numa nodes.

Even without that saving, perhaps patch makes code more readable.

[vdavydov@parallels.com: take memcg_aware check outside for_each loop]
Signed-off-by: default avatarRaghavendra K T <raghavendra.kt@linux.vnet.ibm.com>
Reviewed-by: default avatarVladimir Davydov <vdavydov@parallels.com>
Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org>
Cc: Paul Mackerras <paulus@samba.org>
Cc: Michael Ellerman <mpe@ellerman.id.au>
Cc: Anton Blanchard <anton@samba.org>
Cc: Nishanth Aravamudan <nacc@linux.vnet.ibm.com>
Cc: Greg Kurz <gkurz@linux.vnet.ibm.com>
Cc: Grant Likely <grant.likely@linaro.org>
Cc: Nikunj A Dadhania <nikunj@linux.vnet.ibm.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent 61f9ec1d
...@@ -42,6 +42,10 @@ static void list_lru_unregister(struct list_lru *lru) ...@@ -42,6 +42,10 @@ static void list_lru_unregister(struct list_lru *lru)
#ifdef CONFIG_MEMCG_KMEM #ifdef CONFIG_MEMCG_KMEM
static inline bool list_lru_memcg_aware(struct list_lru *lru) static inline bool list_lru_memcg_aware(struct list_lru *lru)
{ {
/*
* This needs node 0 to be always present, even
* in the systems supporting sparse numa ids.
*/
return !!lru->node[0].memcg_lrus; return !!lru->node[0].memcg_lrus;
} }
...@@ -377,16 +381,20 @@ static int memcg_init_list_lru(struct list_lru *lru, bool memcg_aware) ...@@ -377,16 +381,20 @@ static int memcg_init_list_lru(struct list_lru *lru, bool memcg_aware)
{ {
int i; int i;
for (i = 0; i < nr_node_ids; i++) { if (!memcg_aware)
if (!memcg_aware) return 0;
lru->node[i].memcg_lrus = NULL;
else if (memcg_init_list_lru_node(&lru->node[i])) for_each_node(i) {
if (memcg_init_list_lru_node(&lru->node[i]))
goto fail; goto fail;
} }
return 0; return 0;
fail: fail:
for (i = i - 1; i >= 0; i--) for (i = i - 1; i >= 0; i--) {
if (!lru->node[i].memcg_lrus)
continue;
memcg_destroy_list_lru_node(&lru->node[i]); memcg_destroy_list_lru_node(&lru->node[i]);
}
return -ENOMEM; return -ENOMEM;
} }
...@@ -397,7 +405,7 @@ static void memcg_destroy_list_lru(struct list_lru *lru) ...@@ -397,7 +405,7 @@ static void memcg_destroy_list_lru(struct list_lru *lru)
if (!list_lru_memcg_aware(lru)) if (!list_lru_memcg_aware(lru))
return; return;
for (i = 0; i < nr_node_ids; i++) for_each_node(i)
memcg_destroy_list_lru_node(&lru->node[i]); memcg_destroy_list_lru_node(&lru->node[i]);
} }
...@@ -409,16 +417,20 @@ static int memcg_update_list_lru(struct list_lru *lru, ...@@ -409,16 +417,20 @@ static int memcg_update_list_lru(struct list_lru *lru,
if (!list_lru_memcg_aware(lru)) if (!list_lru_memcg_aware(lru))
return 0; return 0;
for (i = 0; i < nr_node_ids; i++) { for_each_node(i) {
if (memcg_update_list_lru_node(&lru->node[i], if (memcg_update_list_lru_node(&lru->node[i],
old_size, new_size)) old_size, new_size))
goto fail; goto fail;
} }
return 0; return 0;
fail: fail:
for (i = i - 1; i >= 0; i--) for (i = i - 1; i >= 0; i--) {
if (!lru->node[i].memcg_lrus)
continue;
memcg_cancel_update_list_lru_node(&lru->node[i], memcg_cancel_update_list_lru_node(&lru->node[i],
old_size, new_size); old_size, new_size);
}
return -ENOMEM; return -ENOMEM;
} }
...@@ -430,7 +442,7 @@ static void memcg_cancel_update_list_lru(struct list_lru *lru, ...@@ -430,7 +442,7 @@ static void memcg_cancel_update_list_lru(struct list_lru *lru,
if (!list_lru_memcg_aware(lru)) if (!list_lru_memcg_aware(lru))
return; return;
for (i = 0; i < nr_node_ids; i++) for_each_node(i)
memcg_cancel_update_list_lru_node(&lru->node[i], memcg_cancel_update_list_lru_node(&lru->node[i],
old_size, new_size); old_size, new_size);
} }
...@@ -485,7 +497,7 @@ static void memcg_drain_list_lru(struct list_lru *lru, ...@@ -485,7 +497,7 @@ static void memcg_drain_list_lru(struct list_lru *lru,
if (!list_lru_memcg_aware(lru)) if (!list_lru_memcg_aware(lru))
return; return;
for (i = 0; i < nr_node_ids; i++) for_each_node(i)
memcg_drain_list_lru_node(&lru->node[i], src_idx, dst_idx); memcg_drain_list_lru_node(&lru->node[i], src_idx, dst_idx);
} }
...@@ -522,7 +534,7 @@ int __list_lru_init(struct list_lru *lru, bool memcg_aware, ...@@ -522,7 +534,7 @@ int __list_lru_init(struct list_lru *lru, bool memcg_aware,
if (!lru->node) if (!lru->node)
goto out; goto out;
for (i = 0; i < nr_node_ids; i++) { for_each_node(i) {
spin_lock_init(&lru->node[i].lock); spin_lock_init(&lru->node[i].lock);
if (key) if (key)
lockdep_set_class(&lru->node[i].lock, key); lockdep_set_class(&lru->node[i].lock, key);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment