Commit f77bd4b1 authored by Roman Gushchin's avatar Roman Gushchin Committed by Andrew Morton

mm: memcg: don't call propagate_protected_usage() needlessly

Patch series "mm: memcg: page counters optimizations", v3.

This patchset contains 3 independent small optimizations of page counters.


This patch (of 3):

Memory protection (min/low) requires a constant tracking of protected
memory usage.  propagate_protected_usage() is called on each page counters
update and does a number of operations even in cases when the actual
memory protection functionality is not supported (e.g.  hugetlb cgroups or
memcg swap counters).

It's obviously inefficient and leads to a waste of CPU cycles.  It can be
addressed by calling propagate_protected_usage() only for the counters
which do support memory guarantees.  As of now it's only memcg->memory -
the unified memory memcg counter.

Link: https://lkml.kernel.org/r/20240726203110.1577216-2-roman.gushchin@linux.devSigned-off-by: default avatarRoman Gushchin <roman.gushchin@linux.dev>
Acked-by: default avatarShakeel Butt <shakeel.butt@linux.dev>
Acked-by: default avatarJohannes Weiner <hannes@cmpxchg.org>
Cc: Michal Hocko <mhocko@suse.com>
Cc: Muchun Song <songmuchun@bytedance.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
parent 6c469957
...@@ -31,6 +31,7 @@ struct page_counter { ...@@ -31,6 +31,7 @@ struct page_counter {
/* Keep all the read most fields in a separete cacheline. */ /* Keep all the read most fields in a separete cacheline. */
CACHELINE_PADDING(_pad2_); CACHELINE_PADDING(_pad2_);
bool protection_support;
unsigned long min; unsigned long min;
unsigned long low; unsigned long low;
unsigned long high; unsigned long high;
...@@ -44,12 +45,17 @@ struct page_counter { ...@@ -44,12 +45,17 @@ struct page_counter {
#define PAGE_COUNTER_MAX (LONG_MAX / PAGE_SIZE) #define PAGE_COUNTER_MAX (LONG_MAX / PAGE_SIZE)
#endif #endif
/*
* Protection is supported only for the first counter (with id 0).
*/
static inline void page_counter_init(struct page_counter *counter, static inline void page_counter_init(struct page_counter *counter,
struct page_counter *parent) struct page_counter *parent,
bool protection_support)
{ {
atomic_long_set(&counter->usage, 0); atomic_long_set(&counter->usage, 0);
counter->max = PAGE_COUNTER_MAX; counter->max = PAGE_COUNTER_MAX;
counter->parent = parent; counter->parent = parent;
counter->protection_support = protection_support;
} }
static inline unsigned long page_counter_read(struct page_counter *counter) static inline unsigned long page_counter_read(struct page_counter *counter)
......
...@@ -114,10 +114,10 @@ static void hugetlb_cgroup_init(struct hugetlb_cgroup *h_cgroup, ...@@ -114,10 +114,10 @@ static void hugetlb_cgroup_init(struct hugetlb_cgroup *h_cgroup,
} }
page_counter_init(hugetlb_cgroup_counter_from_cgroup(h_cgroup, page_counter_init(hugetlb_cgroup_counter_from_cgroup(h_cgroup,
idx), idx),
fault_parent); fault_parent, false);
page_counter_init( page_counter_init(
hugetlb_cgroup_counter_from_cgroup_rsvd(h_cgroup, idx), hugetlb_cgroup_counter_from_cgroup_rsvd(h_cgroup, idx),
rsvd_parent); rsvd_parent, false);
limit = round_down(PAGE_COUNTER_MAX, limit = round_down(PAGE_COUNTER_MAX,
pages_per_huge_page(&hstates[idx])); pages_per_huge_page(&hstates[idx]));
......
...@@ -3596,21 +3596,21 @@ mem_cgroup_css_alloc(struct cgroup_subsys_state *parent_css) ...@@ -3596,21 +3596,21 @@ mem_cgroup_css_alloc(struct cgroup_subsys_state *parent_css)
if (parent) { if (parent) {
WRITE_ONCE(memcg->swappiness, mem_cgroup_swappiness(parent)); WRITE_ONCE(memcg->swappiness, mem_cgroup_swappiness(parent));
page_counter_init(&memcg->memory, &parent->memory); page_counter_init(&memcg->memory, &parent->memory, true);
page_counter_init(&memcg->swap, &parent->swap); page_counter_init(&memcg->swap, &parent->swap, false);
#ifdef CONFIG_MEMCG_V1 #ifdef CONFIG_MEMCG_V1
WRITE_ONCE(memcg->oom_kill_disable, READ_ONCE(parent->oom_kill_disable)); WRITE_ONCE(memcg->oom_kill_disable, READ_ONCE(parent->oom_kill_disable));
page_counter_init(&memcg->kmem, &parent->kmem); page_counter_init(&memcg->kmem, &parent->kmem, false);
page_counter_init(&memcg->tcpmem, &parent->tcpmem); page_counter_init(&memcg->tcpmem, &parent->tcpmem, false);
#endif #endif
} else { } else {
init_memcg_stats(); init_memcg_stats();
init_memcg_events(); init_memcg_events();
page_counter_init(&memcg->memory, NULL); page_counter_init(&memcg->memory, NULL, true);
page_counter_init(&memcg->swap, NULL); page_counter_init(&memcg->swap, NULL, false);
#ifdef CONFIG_MEMCG_V1 #ifdef CONFIG_MEMCG_V1
page_counter_init(&memcg->kmem, NULL); page_counter_init(&memcg->kmem, NULL, false);
page_counter_init(&memcg->tcpmem, NULL); page_counter_init(&memcg->tcpmem, NULL, false);
#endif #endif
root_mem_cgroup = memcg; root_mem_cgroup = memcg;
return &memcg->css; return &memcg->css;
......
...@@ -13,6 +13,11 @@ ...@@ -13,6 +13,11 @@
#include <linux/bug.h> #include <linux/bug.h>
#include <asm/page.h> #include <asm/page.h>
static bool track_protection(struct page_counter *c)
{
return c->protection_support;
}
static void propagate_protected_usage(struct page_counter *c, static void propagate_protected_usage(struct page_counter *c,
unsigned long usage) unsigned long usage)
{ {
...@@ -57,7 +62,8 @@ void page_counter_cancel(struct page_counter *counter, unsigned long nr_pages) ...@@ -57,7 +62,8 @@ void page_counter_cancel(struct page_counter *counter, unsigned long nr_pages)
new = 0; new = 0;
atomic_long_set(&counter->usage, new); atomic_long_set(&counter->usage, new);
} }
propagate_protected_usage(counter, new); if (track_protection(counter))
propagate_protected_usage(counter, new);
} }
/** /**
...@@ -70,12 +76,14 @@ void page_counter_cancel(struct page_counter *counter, unsigned long nr_pages) ...@@ -70,12 +76,14 @@ void page_counter_cancel(struct page_counter *counter, unsigned long nr_pages)
void page_counter_charge(struct page_counter *counter, unsigned long nr_pages) void page_counter_charge(struct page_counter *counter, unsigned long nr_pages)
{ {
struct page_counter *c; struct page_counter *c;
bool protection = track_protection(counter);
for (c = counter; c; c = c->parent) { for (c = counter; c; c = c->parent) {
long new; long new;
new = atomic_long_add_return(nr_pages, &c->usage); new = atomic_long_add_return(nr_pages, &c->usage);
propagate_protected_usage(c, new); if (protection)
propagate_protected_usage(c, new);
/* /*
* This is indeed racy, but we can live with some * This is indeed racy, but we can live with some
* inaccuracy in the watermark. * inaccuracy in the watermark.
...@@ -99,6 +107,7 @@ bool page_counter_try_charge(struct page_counter *counter, ...@@ -99,6 +107,7 @@ bool page_counter_try_charge(struct page_counter *counter,
struct page_counter **fail) struct page_counter **fail)
{ {
struct page_counter *c; struct page_counter *c;
bool protection = track_protection(counter);
for (c = counter; c; c = c->parent) { for (c = counter; c; c = c->parent) {
long new; long new;
...@@ -128,7 +137,9 @@ bool page_counter_try_charge(struct page_counter *counter, ...@@ -128,7 +137,9 @@ bool page_counter_try_charge(struct page_counter *counter,
*fail = c; *fail = c;
goto failed; goto failed;
} }
propagate_protected_usage(c, new); if (protection)
propagate_protected_usage(c, new);
/* /*
* Just like with failcnt, we can live with some * Just like with failcnt, we can live with some
* inaccuracy in the watermark. * inaccuracy in the watermark.
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment