Commit f2e4d28d authored by Muchun Song's avatar Muchun Song Committed by Linus Torvalds

mm: memcontrol: simplify lruvec_holds_page_lru_lock

We already have a helper lruvec_memcg() to get the memcg from lruvec, we
do not need to do it ourselves in the lruvec_holds_page_lru_lock().  So
use lruvec_memcg() instead.  And if mem_cgroup_disabled() returns false,
the page_memcg(page) (the LRU pages) cannot be NULL.  So remove the odd
logic of "memcg = page_memcg(page) ?  : root_mem_cgroup".  And use
lruvec_pgdat to simplify the code.  We can have a single definition for
this function that works for !CONFIG_MEMCG, CONFIG_MEMCG +
mem_cgroup_disabled() and CONFIG_MEMCG.

Link: https://lkml.kernel.org/r/20210417043538.9793-5-songmuchun@bytedance.comSigned-off-by: default avatarMuchun Song <songmuchun@bytedance.com>
Acked-by: default avatarJohannes Weiner <hannes@cmpxchg.org>
Reviewed-by: default avatarShakeel Butt <shakeelb@google.com>
Acked-by: default avatarRoman Gushchin <guro@fb.com>
Acked-by: default avatarMichal Hocko <mhocko@suse.com>
Cc: Vladimir Davydov <vdavydov.dev@gmail.com>
Cc: Xiongchun Duan <duanxiongchun@bytedance.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent a984226f
...@@ -755,22 +755,6 @@ static inline struct lruvec *mem_cgroup_page_lruvec(struct page *page) ...@@ -755,22 +755,6 @@ static inline struct lruvec *mem_cgroup_page_lruvec(struct page *page)
return mem_cgroup_lruvec(memcg, pgdat); return mem_cgroup_lruvec(memcg, pgdat);
} }
static inline bool lruvec_holds_page_lru_lock(struct page *page,
struct lruvec *lruvec)
{
pg_data_t *pgdat = page_pgdat(page);
const struct mem_cgroup *memcg;
struct mem_cgroup_per_node *mz;
if (mem_cgroup_disabled())
return lruvec == &pgdat->__lruvec;
mz = container_of(lruvec, struct mem_cgroup_per_node, lruvec);
memcg = page_memcg(page) ? : root_mem_cgroup;
return lruvec->pgdat == pgdat && mz->memcg == memcg;
}
struct mem_cgroup *mem_cgroup_from_task(struct task_struct *p); struct mem_cgroup *mem_cgroup_from_task(struct task_struct *p);
struct mem_cgroup *get_mem_cgroup_from_mm(struct mm_struct *mm); struct mem_cgroup *get_mem_cgroup_from_mm(struct mm_struct *mm);
...@@ -1227,14 +1211,6 @@ static inline struct lruvec *mem_cgroup_page_lruvec(struct page *page) ...@@ -1227,14 +1211,6 @@ static inline struct lruvec *mem_cgroup_page_lruvec(struct page *page)
return &pgdat->__lruvec; return &pgdat->__lruvec;
} }
static inline bool lruvec_holds_page_lru_lock(struct page *page,
struct lruvec *lruvec)
{
pg_data_t *pgdat = page_pgdat(page);
return lruvec == &pgdat->__lruvec;
}
static inline void lruvec_memcg_debug(struct lruvec *lruvec, struct page *page) static inline void lruvec_memcg_debug(struct lruvec *lruvec, struct page *page)
{ {
} }
...@@ -1516,6 +1492,13 @@ static inline void unlock_page_lruvec_irqrestore(struct lruvec *lruvec, ...@@ -1516,6 +1492,13 @@ static inline void unlock_page_lruvec_irqrestore(struct lruvec *lruvec,
spin_unlock_irqrestore(&lruvec->lru_lock, flags); spin_unlock_irqrestore(&lruvec->lru_lock, flags);
} }
static inline bool lruvec_holds_page_lru_lock(struct page *page,
struct lruvec *lruvec)
{
return lruvec_pgdat(lruvec) == page_pgdat(page) &&
lruvec_memcg(lruvec) == page_memcg(page);
}
/* Don't lock again iff page's lruvec locked */ /* Don't lock again iff page's lruvec locked */
static inline struct lruvec *relock_page_lruvec_irq(struct page *page, static inline struct lruvec *relock_page_lruvec_irq(struct page *page,
struct lruvec *locked_lruvec) struct lruvec *locked_lruvec)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment