Commit 3eef1127 authored by Muchun Song's avatar Muchun Song Committed by Linus Torvalds

mm: list_lru: only add memcg-aware lrus to the global lru list

The non-memcg-aware lru is always skiped when traversing the global lru
list, which is not efficient.  We can only add the memcg-aware lru to
the global lru list instead to make traversing more efficient.

Link: https://lkml.kernel.org/r/20211025124353.55781-1-songmuchun@bytedance.comSigned-off-by: default avatarMuchun Song <songmuchun@bytedance.com>
Cc: Johannes Weiner <hannes@cmpxchg.org>
Cc: Matthew Wilcox (Oracle) <willy@infradead.org>
Cc: Michal Hocko <mhocko@kernel.org>
Cc: Roman Gushchin <guro@fb.com>
Cc: Shakeel Butt <shakeelb@google.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent e80216d9
...@@ -15,18 +15,29 @@ ...@@ -15,18 +15,29 @@
#include "slab.h" #include "slab.h"
#ifdef CONFIG_MEMCG_KMEM #ifdef CONFIG_MEMCG_KMEM
static LIST_HEAD(list_lrus); static LIST_HEAD(memcg_list_lrus);
static DEFINE_MUTEX(list_lrus_mutex); static DEFINE_MUTEX(list_lrus_mutex);
static inline bool list_lru_memcg_aware(struct list_lru *lru)
{
return lru->memcg_aware;
}
static void list_lru_register(struct list_lru *lru) static void list_lru_register(struct list_lru *lru)
{ {
if (!list_lru_memcg_aware(lru))
return;
mutex_lock(&list_lrus_mutex); mutex_lock(&list_lrus_mutex);
list_add(&lru->list, &list_lrus); list_add(&lru->list, &memcg_list_lrus);
mutex_unlock(&list_lrus_mutex); mutex_unlock(&list_lrus_mutex);
} }
static void list_lru_unregister(struct list_lru *lru) static void list_lru_unregister(struct list_lru *lru)
{ {
if (!list_lru_memcg_aware(lru))
return;
mutex_lock(&list_lrus_mutex); mutex_lock(&list_lrus_mutex);
list_del(&lru->list); list_del(&lru->list);
mutex_unlock(&list_lrus_mutex); mutex_unlock(&list_lrus_mutex);
...@@ -37,11 +48,6 @@ static int lru_shrinker_id(struct list_lru *lru) ...@@ -37,11 +48,6 @@ static int lru_shrinker_id(struct list_lru *lru)
return lru->shrinker_id; return lru->shrinker_id;
} }
static inline bool list_lru_memcg_aware(struct list_lru *lru)
{
return lru->memcg_aware;
}
static inline struct list_lru_one * static inline struct list_lru_one *
list_lru_from_memcg_idx(struct list_lru_node *nlru, int idx) list_lru_from_memcg_idx(struct list_lru_node *nlru, int idx)
{ {
...@@ -457,9 +463,6 @@ static int memcg_update_list_lru(struct list_lru *lru, ...@@ -457,9 +463,6 @@ static int memcg_update_list_lru(struct list_lru *lru,
{ {
int i; int i;
if (!list_lru_memcg_aware(lru))
return 0;
for_each_node(i) { for_each_node(i) {
if (memcg_update_list_lru_node(&lru->node[i], if (memcg_update_list_lru_node(&lru->node[i],
old_size, new_size)) old_size, new_size))
...@@ -482,9 +485,6 @@ static void memcg_cancel_update_list_lru(struct list_lru *lru, ...@@ -482,9 +485,6 @@ static void memcg_cancel_update_list_lru(struct list_lru *lru,
{ {
int i; int i;
if (!list_lru_memcg_aware(lru))
return;
for_each_node(i) for_each_node(i)
memcg_cancel_update_list_lru_node(&lru->node[i], memcg_cancel_update_list_lru_node(&lru->node[i],
old_size, new_size); old_size, new_size);
...@@ -497,7 +497,7 @@ int memcg_update_all_list_lrus(int new_size) ...@@ -497,7 +497,7 @@ int memcg_update_all_list_lrus(int new_size)
int old_size = memcg_nr_cache_ids; int old_size = memcg_nr_cache_ids;
mutex_lock(&list_lrus_mutex); mutex_lock(&list_lrus_mutex);
list_for_each_entry(lru, &list_lrus, list) { list_for_each_entry(lru, &memcg_list_lrus, list) {
ret = memcg_update_list_lru(lru, old_size, new_size); ret = memcg_update_list_lru(lru, old_size, new_size);
if (ret) if (ret)
goto fail; goto fail;
...@@ -506,7 +506,7 @@ int memcg_update_all_list_lrus(int new_size) ...@@ -506,7 +506,7 @@ int memcg_update_all_list_lrus(int new_size)
mutex_unlock(&list_lrus_mutex); mutex_unlock(&list_lrus_mutex);
return ret; return ret;
fail: fail:
list_for_each_entry_continue_reverse(lru, &list_lrus, list) list_for_each_entry_continue_reverse(lru, &memcg_list_lrus, list)
memcg_cancel_update_list_lru(lru, old_size, new_size); memcg_cancel_update_list_lru(lru, old_size, new_size);
goto out; goto out;
} }
...@@ -543,9 +543,6 @@ static void memcg_drain_list_lru(struct list_lru *lru, ...@@ -543,9 +543,6 @@ static void memcg_drain_list_lru(struct list_lru *lru,
{ {
int i; int i;
if (!list_lru_memcg_aware(lru))
return;
for_each_node(i) for_each_node(i)
memcg_drain_list_lru_node(lru, i, src_idx, dst_memcg); memcg_drain_list_lru_node(lru, i, src_idx, dst_memcg);
} }
...@@ -555,7 +552,7 @@ void memcg_drain_all_list_lrus(int src_idx, struct mem_cgroup *dst_memcg) ...@@ -555,7 +552,7 @@ void memcg_drain_all_list_lrus(int src_idx, struct mem_cgroup *dst_memcg)
struct list_lru *lru; struct list_lru *lru;
mutex_lock(&list_lrus_mutex); mutex_lock(&list_lrus_mutex);
list_for_each_entry(lru, &list_lrus, list) list_for_each_entry(lru, &memcg_list_lrus, list)
memcg_drain_list_lru(lru, src_idx, dst_memcg); memcg_drain_list_lru(lru, src_idx, dst_memcg);
mutex_unlock(&list_lrus_mutex); mutex_unlock(&list_lrus_mutex);
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment