Commit 12580e4b authored by Vladimir Davydov's avatar Vladimir Davydov Committed by Linus Torvalds

mm: memcontrol: report kernel stack usage in cgroup2 memory.stat

Show how much memory is allocated to kernel stacks.
Signed-off-by: default avatarVladimir Davydov <vdavydov@virtuozzo.com>
Acked-by: default avatarJohannes Weiner <hannes@cmpxchg.org>
Cc: Michal Hocko <mhocko@kernel.org>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent 27ee57c9
...@@ -843,6 +843,10 @@ PAGE_SIZE multiple when read back. ...@@ -843,6 +843,10 @@ PAGE_SIZE multiple when read back.
Amount of memory used to cache filesystem data, Amount of memory used to cache filesystem data,
including tmpfs and shared memory. including tmpfs and shared memory.
kernel_stack
Amount of memory allocated to kernel stacks.
slab slab
Amount of memory used for storing in-kernel data Amount of memory used for storing in-kernel data
......
...@@ -52,9 +52,10 @@ enum mem_cgroup_stat_index { ...@@ -52,9 +52,10 @@ enum mem_cgroup_stat_index {
MEM_CGROUP_STAT_SWAP, /* # of pages, swapped out */ MEM_CGROUP_STAT_SWAP, /* # of pages, swapped out */
MEM_CGROUP_STAT_NSTATS, MEM_CGROUP_STAT_NSTATS,
/* default hierarchy stats */ /* default hierarchy stats */
MEMCG_SOCK = MEM_CGROUP_STAT_NSTATS, MEMCG_KERNEL_STACK = MEM_CGROUP_STAT_NSTATS,
MEMCG_SLAB_RECLAIMABLE, MEMCG_SLAB_RECLAIMABLE,
MEMCG_SLAB_UNRECLAIMABLE, MEMCG_SLAB_UNRECLAIMABLE,
MEMCG_SOCK,
MEMCG_NR_STAT, MEMCG_NR_STAT,
}; };
......
...@@ -164,12 +164,20 @@ static struct thread_info *alloc_thread_info_node(struct task_struct *tsk, ...@@ -164,12 +164,20 @@ static struct thread_info *alloc_thread_info_node(struct task_struct *tsk,
struct page *page = alloc_kmem_pages_node(node, THREADINFO_GFP, struct page *page = alloc_kmem_pages_node(node, THREADINFO_GFP,
THREAD_SIZE_ORDER); THREAD_SIZE_ORDER);
if (page)
memcg_kmem_update_page_stat(page, MEMCG_KERNEL_STACK,
1 << THREAD_SIZE_ORDER);
return page ? page_address(page) : NULL; return page ? page_address(page) : NULL;
} }
static inline void free_thread_info(struct thread_info *ti) static inline void free_thread_info(struct thread_info *ti)
{ {
free_kmem_pages((unsigned long)ti, THREAD_SIZE_ORDER); struct page *page = virt_to_page(ti);
memcg_kmem_update_page_stat(page, MEMCG_KERNEL_STACK,
-(1 << THREAD_SIZE_ORDER));
__free_kmem_pages(page, THREAD_SIZE_ORDER);
} }
# else # else
static struct kmem_cache *thread_info_cache; static struct kmem_cache *thread_info_cache;
......
...@@ -5106,6 +5106,8 @@ static int memory_stat_show(struct seq_file *m, void *v) ...@@ -5106,6 +5106,8 @@ static int memory_stat_show(struct seq_file *m, void *v)
(u64)stat[MEM_CGROUP_STAT_RSS] * PAGE_SIZE); (u64)stat[MEM_CGROUP_STAT_RSS] * PAGE_SIZE);
seq_printf(m, "file %llu\n", seq_printf(m, "file %llu\n",
(u64)stat[MEM_CGROUP_STAT_CACHE] * PAGE_SIZE); (u64)stat[MEM_CGROUP_STAT_CACHE] * PAGE_SIZE);
seq_printf(m, "kernel_stack %llu\n",
(u64)stat[MEMCG_KERNEL_STACK] * PAGE_SIZE);
seq_printf(m, "slab %llu\n", seq_printf(m, "slab %llu\n",
(u64)(stat[MEMCG_SLAB_RECLAIMABLE] + (u64)(stat[MEMCG_SLAB_RECLAIMABLE] +
stat[MEMCG_SLAB_UNRECLAIMABLE]) * PAGE_SIZE); stat[MEMCG_SLAB_UNRECLAIMABLE]) * PAGE_SIZE);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment