Commit 772616b0 authored by Roman Gushchin's avatar Roman Gushchin Committed by Linus Torvalds

mm: memcg/percpu: per-memcg percpu memory statistics

Percpu memory can represent a noticeable chunk of the total memory
consumption, especially on big machines with many CPUs.  Let's track
percpu memory usage for each memcg and display it in memory.stat.

A percpu allocation is usually scattered over multiple pages (and nodes),
and can be significantly smaller than a page.  So let's add a byte-sized
counter on the memcg level: MEMCG_PERCPU_B.  Byte-sized vmstat infra
created for slabs can be perfectly reused for percpu case.

[guro@fb.com: v3]
  Link: http://lkml.kernel.org/r/20200623184515.4132564-4-guro@fb.comSigned-off-by: default avatarRoman Gushchin <guro@fb.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Reviewed-by: default avatarShakeel Butt <shakeelb@google.com>
Acked-by: default avatarDennis Zhou <dennis@kernel.org>
Acked-by: default avatarJohannes Weiner <hannes@cmpxchg.org>
Cc: Christoph Lameter <cl@linux.com>
Cc: David Rientjes <rientjes@google.com>
Cc: Joonsoo Kim <iamjoonsoo.kim@lge.com>
Cc: Mel Gorman <mgorman@techsingularity.net>
Cc: Michal Hocko <mhocko@kernel.org>
Cc: Pekka Enberg <penberg@kernel.org>
Cc: Tejun Heo <tj@kernel.org>
Cc: Tobin C. Harding <tobin@kernel.org>
Cc: Vlastimil Babka <vbabka@suse.cz>
Cc: Waiman Long <longman@redhat.com>
Cc: Bixuan Cui <cuibixuan@huawei.com>
Cc: Michal Koutný <mkoutny@suse.com>
Cc: Stephen Rothwell <sfr@canb.auug.org.au>
Link: http://lkml.kernel.org/r/20200608230819.832349-4-guro@fb.comSigned-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent 3c7be18a
...@@ -1274,6 +1274,10 @@ PAGE_SIZE multiple when read back. ...@@ -1274,6 +1274,10 @@ PAGE_SIZE multiple when read back.
Amount of memory used for storing in-kernel data Amount of memory used for storing in-kernel data
structures. structures.
percpu
Amount of memory used for storing per-cpu kernel
data structures.
sock sock
Amount of memory used in network transmission buffers Amount of memory used in network transmission buffers
......
...@@ -32,6 +32,7 @@ struct kmem_cache; ...@@ -32,6 +32,7 @@ struct kmem_cache;
enum memcg_stat_item { enum memcg_stat_item {
MEMCG_SWAP = NR_VM_NODE_STAT_ITEMS, MEMCG_SWAP = NR_VM_NODE_STAT_ITEMS,
MEMCG_SOCK, MEMCG_SOCK,
MEMCG_PERCPU_B,
MEMCG_NR_STAT, MEMCG_NR_STAT,
}; };
...@@ -339,6 +340,13 @@ struct mem_cgroup { ...@@ -339,6 +340,13 @@ struct mem_cgroup {
extern struct mem_cgroup *root_mem_cgroup; extern struct mem_cgroup *root_mem_cgroup;
static __always_inline bool memcg_stat_item_in_bytes(int idx)
{
if (idx == MEMCG_PERCPU_B)
return true;
return vmstat_item_in_bytes(idx);
}
static inline bool mem_cgroup_is_root(struct mem_cgroup *memcg) static inline bool mem_cgroup_is_root(struct mem_cgroup *memcg)
{ {
return (memcg == root_mem_cgroup); return (memcg == root_mem_cgroup);
......
...@@ -781,7 +781,7 @@ void __mod_memcg_state(struct mem_cgroup *memcg, int idx, int val) ...@@ -781,7 +781,7 @@ void __mod_memcg_state(struct mem_cgroup *memcg, int idx, int val)
if (mem_cgroup_disabled()) if (mem_cgroup_disabled())
return; return;
if (vmstat_item_in_bytes(idx)) if (memcg_stat_item_in_bytes(idx))
threshold <<= PAGE_SHIFT; threshold <<= PAGE_SHIFT;
x = val + __this_cpu_read(memcg->vmstats_percpu->stat[idx]); x = val + __this_cpu_read(memcg->vmstats_percpu->stat[idx]);
...@@ -1488,6 +1488,8 @@ static char *memory_stat_format(struct mem_cgroup *memcg) ...@@ -1488,6 +1488,8 @@ static char *memory_stat_format(struct mem_cgroup *memcg)
seq_buf_printf(&s, "slab %llu\n", seq_buf_printf(&s, "slab %llu\n",
(u64)(memcg_page_state(memcg, NR_SLAB_RECLAIMABLE_B) + (u64)(memcg_page_state(memcg, NR_SLAB_RECLAIMABLE_B) +
memcg_page_state(memcg, NR_SLAB_UNRECLAIMABLE_B))); memcg_page_state(memcg, NR_SLAB_UNRECLAIMABLE_B)));
seq_buf_printf(&s, "percpu %llu\n",
(u64)memcg_page_state(memcg, MEMCG_PERCPU_B));
seq_buf_printf(&s, "sock %llu\n", seq_buf_printf(&s, "sock %llu\n",
(u64)memcg_page_state(memcg, MEMCG_SOCK) * (u64)memcg_page_state(memcg, MEMCG_SOCK) *
PAGE_SIZE); PAGE_SIZE);
......
...@@ -1610,6 +1610,11 @@ static void pcpu_memcg_post_alloc_hook(struct obj_cgroup *objcg, ...@@ -1610,6 +1610,11 @@ static void pcpu_memcg_post_alloc_hook(struct obj_cgroup *objcg,
if (chunk) { if (chunk) {
chunk->obj_cgroups[off >> PCPU_MIN_ALLOC_SHIFT] = objcg; chunk->obj_cgroups[off >> PCPU_MIN_ALLOC_SHIFT] = objcg;
rcu_read_lock();
mod_memcg_state(obj_cgroup_memcg(objcg), MEMCG_PERCPU_B,
size * num_possible_cpus());
rcu_read_unlock();
} else { } else {
obj_cgroup_uncharge(objcg, size * num_possible_cpus()); obj_cgroup_uncharge(objcg, size * num_possible_cpus());
obj_cgroup_put(objcg); obj_cgroup_put(objcg);
...@@ -1628,6 +1633,11 @@ static void pcpu_memcg_free_hook(struct pcpu_chunk *chunk, int off, size_t size) ...@@ -1628,6 +1633,11 @@ static void pcpu_memcg_free_hook(struct pcpu_chunk *chunk, int off, size_t size)
obj_cgroup_uncharge(objcg, size * num_possible_cpus()); obj_cgroup_uncharge(objcg, size * num_possible_cpus());
rcu_read_lock();
mod_memcg_state(obj_cgroup_memcg(objcg), MEMCG_PERCPU_B,
-(size * num_possible_cpus()));
rcu_read_unlock();
obj_cgroup_put(objcg); obj_cgroup_put(objcg);
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment