Commit 9852ae3f authored by Chris Down's avatar Chris Down Committed by Linus Torvalds

mm, memcg: consider subtrees in memory.events

memory.stat and other files already consider subtrees in their output, and
we should too in order to not present an inconsistent interface.

The current situation is fairly confusing, because people interacting with
cgroups expect hierarchical behaviour in the vein of memory.stat,
cgroup.events, and other files.  For example, this causes confusion when
debugging reclaim events under low, as currently these always read "0" at
non-leaf memcg nodes, which frequently causes people to misdiagnose breach
behaviour.  The same confusion applies to other counters in this file when
debugging issues.

Aggregation is done at write time instead of at read-time since these
counters aren't hot (unlike memory.stat which is per-page, so it does it
at read time), and it makes sense to bundle this with the file
notifications.

After this patch, events are propagated up the hierarchy:

    [root@ktst ~]# cat /sys/fs/cgroup/system.slice/memory.events
    low 0
    high 0
    max 0
    oom 0
    oom_kill 0
    [root@ktst ~]# systemd-run -p MemoryMax=1 true
    Running as unit: run-r251162a189fb4562b9dabfdc9b0422f5.service
    [root@ktst ~]# cat /sys/fs/cgroup/system.slice/memory.events
    low 0
    high 0
    max 7
    oom 1
    oom_kill 1

As this is a change in behaviour, this can be reverted to the old
behaviour by mounting with the `memory_localevents' flag set.  However, we
use the new behaviour by default as there's a lack of evidence that there
are any current users of memory.events that would find this change
undesirable.

akpm: this is a behaviour change, so Cc:stable.  THis is so that
forthcoming distros which use cgroup v2 are more likely to pick up the
revised behaviour.

Link: http://lkml.kernel.org/r/20190208224419.GA24772@chrisdown.nameSigned-off-by: default avatarChris Down <chris@chrisdown.name>
Acked-by: default avatarJohannes Weiner <hannes@cmpxchg.org>
Reviewed-by: default avatarShakeel Butt <shakeelb@google.com>
Cc: Michal Hocko <mhocko@kernel.org>
Cc: Tejun Heo <tj@kernel.org>
Cc: Roman Gushchin <guro@fb.com>
Cc: Dennis Zhou <dennis@kernel.org>
Cc: Suren Baghdasaryan <surenb@google.com>
Cc: <stable@vger.kernel.org>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent bc81426f
...@@ -177,6 +177,15 @@ cgroup v2 currently supports the following mount options. ...@@ -177,6 +177,15 @@ cgroup v2 currently supports the following mount options.
ignored on non-init namespace mounts. Please refer to the ignored on non-init namespace mounts. Please refer to the
Delegation section for details. Delegation section for details.
memory_localevents
Only populate memory.events with data for the current cgroup,
and not any subtrees. This is legacy behaviour, the default
behaviour without this option is to include subtree counts.
This option is system wide and can only be set on mount or
modified through remount from the init namespace. The mount
option is ignored on non-init namespace mounts.
Organizing Processes and Threads Organizing Processes and Threads
-------------------------------- --------------------------------
......
...@@ -89,6 +89,11 @@ enum { ...@@ -89,6 +89,11 @@ enum {
* Enable cpuset controller in v1 cgroup to use v2 behavior. * Enable cpuset controller in v1 cgroup to use v2 behavior.
*/ */
CGRP_ROOT_CPUSET_V2_MODE = (1 << 4), CGRP_ROOT_CPUSET_V2_MODE = (1 << 4),
/*
* Enable legacy local memory.events.
*/
CGRP_ROOT_MEMORY_LOCAL_EVENTS = (1 << 5),
}; };
/* cftype->flags */ /* cftype->flags */
......
...@@ -737,8 +737,14 @@ static inline void count_memcg_event_mm(struct mm_struct *mm, ...@@ -737,8 +737,14 @@ static inline void count_memcg_event_mm(struct mm_struct *mm,
static inline void memcg_memory_event(struct mem_cgroup *memcg, static inline void memcg_memory_event(struct mem_cgroup *memcg,
enum memcg_memory_event event) enum memcg_memory_event event)
{ {
atomic_long_inc(&memcg->memory_events[event]); do {
cgroup_file_notify(&memcg->events_file); atomic_long_inc(&memcg->memory_events[event]);
cgroup_file_notify(&memcg->events_file);
if (cgrp_dfl_root.flags & CGRP_ROOT_MEMORY_LOCAL_EVENTS)
break;
} while ((memcg = parent_mem_cgroup(memcg)) &&
!mem_cgroup_is_root(memcg));
} }
static inline void memcg_memory_event_mm(struct mm_struct *mm, static inline void memcg_memory_event_mm(struct mm_struct *mm,
......
...@@ -1810,11 +1810,13 @@ int cgroup_show_path(struct seq_file *sf, struct kernfs_node *kf_node, ...@@ -1810,11 +1810,13 @@ int cgroup_show_path(struct seq_file *sf, struct kernfs_node *kf_node,
enum cgroup2_param { enum cgroup2_param {
Opt_nsdelegate, Opt_nsdelegate,
Opt_memory_localevents,
nr__cgroup2_params nr__cgroup2_params
}; };
static const struct fs_parameter_spec cgroup2_param_specs[] = { static const struct fs_parameter_spec cgroup2_param_specs[] = {
fsparam_flag ("nsdelegate", Opt_nsdelegate), fsparam_flag("nsdelegate", Opt_nsdelegate),
fsparam_flag("memory_localevents", Opt_memory_localevents),
{} {}
}; };
...@@ -1837,6 +1839,9 @@ static int cgroup2_parse_param(struct fs_context *fc, struct fs_parameter *param ...@@ -1837,6 +1839,9 @@ static int cgroup2_parse_param(struct fs_context *fc, struct fs_parameter *param
case Opt_nsdelegate: case Opt_nsdelegate:
ctx->flags |= CGRP_ROOT_NS_DELEGATE; ctx->flags |= CGRP_ROOT_NS_DELEGATE;
return 0; return 0;
case Opt_memory_localevents:
ctx->flags |= CGRP_ROOT_MEMORY_LOCAL_EVENTS;
return 0;
} }
return -EINVAL; return -EINVAL;
} }
...@@ -1848,6 +1853,11 @@ static void apply_cgroup_root_flags(unsigned int root_flags) ...@@ -1848,6 +1853,11 @@ static void apply_cgroup_root_flags(unsigned int root_flags)
cgrp_dfl_root.flags |= CGRP_ROOT_NS_DELEGATE; cgrp_dfl_root.flags |= CGRP_ROOT_NS_DELEGATE;
else else
cgrp_dfl_root.flags &= ~CGRP_ROOT_NS_DELEGATE; cgrp_dfl_root.flags &= ~CGRP_ROOT_NS_DELEGATE;
if (root_flags & CGRP_ROOT_MEMORY_LOCAL_EVENTS)
cgrp_dfl_root.flags |= CGRP_ROOT_MEMORY_LOCAL_EVENTS;
else
cgrp_dfl_root.flags &= ~CGRP_ROOT_MEMORY_LOCAL_EVENTS;
} }
} }
...@@ -1855,6 +1865,8 @@ static int cgroup_show_options(struct seq_file *seq, struct kernfs_root *kf_root ...@@ -1855,6 +1865,8 @@ static int cgroup_show_options(struct seq_file *seq, struct kernfs_root *kf_root
{ {
if (cgrp_dfl_root.flags & CGRP_ROOT_NS_DELEGATE) if (cgrp_dfl_root.flags & CGRP_ROOT_NS_DELEGATE)
seq_puts(seq, ",nsdelegate"); seq_puts(seq, ",nsdelegate");
if (cgrp_dfl_root.flags & CGRP_ROOT_MEMORY_LOCAL_EVENTS)
seq_puts(seq, ",memory_localevents");
return 0; return 0;
} }
...@@ -6325,7 +6337,7 @@ static struct kobj_attribute cgroup_delegate_attr = __ATTR_RO(delegate); ...@@ -6325,7 +6337,7 @@ static struct kobj_attribute cgroup_delegate_attr = __ATTR_RO(delegate);
static ssize_t features_show(struct kobject *kobj, struct kobj_attribute *attr, static ssize_t features_show(struct kobject *kobj, struct kobj_attribute *attr,
char *buf) char *buf)
{ {
return snprintf(buf, PAGE_SIZE, "nsdelegate\n"); return snprintf(buf, PAGE_SIZE, "nsdelegate\nmemory_localevents\n");
} }
static struct kobj_attribute cgroup_features_attr = __ATTR_RO(features); static struct kobj_attribute cgroup_features_attr = __ATTR_RO(features);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment