Commit efdc9490 authored by Andy Lutomirski's avatar Andy Lutomirski Committed by Linus Torvalds

mm: fix memcg stack accounting for sub-page stacks

We should account for stacks regardless of stack size, and we need to
account in sub-page units if THREAD_SIZE < PAGE_SIZE.  Change the units
to kilobytes and Move it into account_kernel_stack().

Fixes: 12580e4b ("mm: memcontrol: report kernel stack usage in cgroup2 memory.stat")
Link: http://lkml.kernel.org/r/9b5314e3ee5eda61b0317ec1563768602c1ef438.1468523549.git.luto@kernel.orgSigned-off-by: default avatarAndy Lutomirski <luto@kernel.org>
Cc: Vladimir Davydov <vdavydov@virtuozzo.com>
Acked-by: default avatarJohannes Weiner <hannes@cmpxchg.org>
Cc: Michal Hocko <mhocko@kernel.org>
Reviewed-by: default avatarJosh Poimboeuf <jpoimboe@redhat.com>
Reviewed-by: default avatarVladimir Davydov <vdavydov@virtuozzo.com>
Acked-by: default avatarMichal Hocko <mhocko@suse.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent d30dd8be
...@@ -52,7 +52,7 @@ enum mem_cgroup_stat_index { ...@@ -52,7 +52,7 @@ enum mem_cgroup_stat_index {
MEM_CGROUP_STAT_SWAP, /* # of pages, swapped out */ MEM_CGROUP_STAT_SWAP, /* # of pages, swapped out */
MEM_CGROUP_STAT_NSTATS, MEM_CGROUP_STAT_NSTATS,
/* default hierarchy stats */ /* default hierarchy stats */
MEMCG_KERNEL_STACK = MEM_CGROUP_STAT_NSTATS, MEMCG_KERNEL_STACK_KB = MEM_CGROUP_STAT_NSTATS,
MEMCG_SLAB_RECLAIMABLE, MEMCG_SLAB_RECLAIMABLE,
MEMCG_SLAB_UNRECLAIMABLE, MEMCG_SLAB_UNRECLAIMABLE,
MEMCG_SOCK, MEMCG_SOCK,
......
...@@ -165,20 +165,12 @@ static unsigned long *alloc_thread_stack_node(struct task_struct *tsk, ...@@ -165,20 +165,12 @@ static unsigned long *alloc_thread_stack_node(struct task_struct *tsk,
struct page *page = alloc_pages_node(node, THREADINFO_GFP, struct page *page = alloc_pages_node(node, THREADINFO_GFP,
THREAD_SIZE_ORDER); THREAD_SIZE_ORDER);
if (page)
memcg_kmem_update_page_stat(page, MEMCG_KERNEL_STACK,
1 << THREAD_SIZE_ORDER);
return page ? page_address(page) : NULL; return page ? page_address(page) : NULL;
} }
static inline void free_thread_stack(unsigned long *stack) static inline void free_thread_stack(unsigned long *stack)
{ {
struct page *page = virt_to_page(stack); __free_pages(virt_to_page(stack), THREAD_SIZE_ORDER);
memcg_kmem_update_page_stat(page, MEMCG_KERNEL_STACK,
-(1 << THREAD_SIZE_ORDER));
__free_pages(page, THREAD_SIZE_ORDER);
} }
# else # else
static struct kmem_cache *thread_stack_cache; static struct kmem_cache *thread_stack_cache;
...@@ -223,10 +215,15 @@ static struct kmem_cache *mm_cachep; ...@@ -223,10 +215,15 @@ static struct kmem_cache *mm_cachep;
static void account_kernel_stack(unsigned long *stack, int account) static void account_kernel_stack(unsigned long *stack, int account)
{ {
struct zone *zone = page_zone(virt_to_page(stack)); /* All stack pages are in the same zone and belong to the same memcg. */
struct page *first_page = virt_to_page(stack);
mod_zone_page_state(zone, NR_KERNEL_STACK_KB, mod_zone_page_state(page_zone(first_page), NR_KERNEL_STACK_KB,
THREAD_SIZE / 1024 * account); THREAD_SIZE / 1024 * account);
memcg_kmem_update_page_stat(
first_page, MEMCG_KERNEL_STACK_KB,
account * (THREAD_SIZE / 1024));
} }
void free_task(struct task_struct *tsk) void free_task(struct task_struct *tsk)
......
...@@ -5171,7 +5171,7 @@ static int memory_stat_show(struct seq_file *m, void *v) ...@@ -5171,7 +5171,7 @@ static int memory_stat_show(struct seq_file *m, void *v)
seq_printf(m, "file %llu\n", seq_printf(m, "file %llu\n",
(u64)stat[MEM_CGROUP_STAT_CACHE] * PAGE_SIZE); (u64)stat[MEM_CGROUP_STAT_CACHE] * PAGE_SIZE);
seq_printf(m, "kernel_stack %llu\n", seq_printf(m, "kernel_stack %llu\n",
(u64)stat[MEMCG_KERNEL_STACK] * PAGE_SIZE); (u64)stat[MEMCG_KERNEL_STACK_KB] * 1024);
seq_printf(m, "slab %llu\n", seq_printf(m, "slab %llu\n",
(u64)(stat[MEMCG_SLAB_RECLAIMABLE] + (u64)(stat[MEMCG_SLAB_RECLAIMABLE] +
stat[MEMCG_SLAB_UNRECLAIMABLE]) * PAGE_SIZE); stat[MEMCG_SLAB_UNRECLAIMABLE]) * PAGE_SIZE);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment