Commit be25d1d4 authored by Suren Baghdasaryan's avatar Suren Baghdasaryan Committed by Andrew Morton

mm: create new codetag references during page splitting

When a high-order page is split into smaller ones, each newly split page
should get its codetag.  After the split each split page will be
referencing the original codetag.  The codetag's "bytes" counter remains
the same because the amount of allocated memory has not changed, however
the "calls" counter gets increased to keep the counter correct when these
individual pages get freed.

Link: https://lkml.kernel.org/r/20240321163705.3067592-20-surenb@google.comSigned-off-by: default avatarSuren Baghdasaryan <surenb@google.com>
Reviewed-by: default avatarVlastimil Babka <vbabka@suse.cz>
Tested-by: default avatarKees Cook <keescook@chromium.org>
Cc: Alexander Viro <viro@zeniv.linux.org.uk>
Cc: Alex Gaynor <alex.gaynor@gmail.com>
Cc: Alice Ryhl <aliceryhl@google.com>
Cc: Andreas Hindborg <a.hindborg@samsung.com>
Cc: Benno Lossin <benno.lossin@proton.me>
Cc: "Björn Roy Baron" <bjorn3_gh@protonmail.com>
Cc: Boqun Feng <boqun.feng@gmail.com>
Cc: Christoph Lameter <cl@linux.com>
Cc: Dennis Zhou <dennis@kernel.org>
Cc: Gary Guo <gary@garyguo.net>
Cc: Kent Overstreet <kent.overstreet@linux.dev>
Cc: Miguel Ojeda <ojeda@kernel.org>
Cc: Pasha Tatashin <pasha.tatashin@soleen.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Tejun Heo <tj@kernel.org>
Cc: Wedson Almeida Filho <wedsonaf@gmail.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
parent b951aaff
...@@ -117,6 +117,15 @@ static inline void __alloc_tag_ref_set(union codetag_ref *ref, struct alloc_tag ...@@ -117,6 +117,15 @@ static inline void __alloc_tag_ref_set(union codetag_ref *ref, struct alloc_tag
this_cpu_inc(tag->counters->calls); this_cpu_inc(tag->counters->calls);
} }
static inline void alloc_tag_ref_set(union codetag_ref *ref, struct alloc_tag *tag)
{
alloc_tag_add_check(ref, tag);
if (!ref || !tag)
return;
__alloc_tag_ref_set(ref, tag);
}
static inline void alloc_tag_add(union codetag_ref *ref, struct alloc_tag *tag, size_t bytes) static inline void alloc_tag_add(union codetag_ref *ref, struct alloc_tag *tag, size_t bytes)
{ {
alloc_tag_add_check(ref, tag); alloc_tag_add_check(ref, tag);
......
...@@ -67,11 +67,41 @@ static inline void pgalloc_tag_sub(struct page *page, unsigned int nr) ...@@ -67,11 +67,41 @@ static inline void pgalloc_tag_sub(struct page *page, unsigned int nr)
} }
} }
static inline void pgalloc_tag_split(struct page *page, unsigned int nr)
{
int i;
struct page_ext *page_ext;
union codetag_ref *ref;
struct alloc_tag *tag;
if (!mem_alloc_profiling_enabled())
return;
page_ext = page_ext_get(page);
if (unlikely(!page_ext))
return;
ref = codetag_ref_from_page_ext(page_ext);
if (!ref->ct)
goto out;
tag = ct_to_alloc_tag(ref->ct);
page_ext = page_ext_next(page_ext);
for (i = 1; i < nr; i++) {
/* Set new reference to point to the original tag */
alloc_tag_ref_set(codetag_ref_from_page_ext(page_ext), tag);
page_ext = page_ext_next(page_ext);
}
out:
page_ext_put(page_ext);
}
#else /* CONFIG_MEM_ALLOC_PROFILING */ #else /* CONFIG_MEM_ALLOC_PROFILING */
static inline void pgalloc_tag_add(struct page *page, struct task_struct *task, static inline void pgalloc_tag_add(struct page *page, struct task_struct *task,
unsigned int nr) {} unsigned int nr) {}
static inline void pgalloc_tag_sub(struct page *page, unsigned int nr) {} static inline void pgalloc_tag_sub(struct page *page, unsigned int nr) {}
static inline void pgalloc_tag_split(struct page *page, unsigned int nr) {}
#endif /* CONFIG_MEM_ALLOC_PROFILING */ #endif /* CONFIG_MEM_ALLOC_PROFILING */
......
...@@ -38,6 +38,7 @@ ...@@ -38,6 +38,7 @@
#include <linux/sched/sysctl.h> #include <linux/sched/sysctl.h>
#include <linux/memory-tiers.h> #include <linux/memory-tiers.h>
#include <linux/compat.h> #include <linux/compat.h>
#include <linux/pgalloc_tag.h>
#include <asm/tlb.h> #include <asm/tlb.h>
#include <asm/pgalloc.h> #include <asm/pgalloc.h>
...@@ -2946,6 +2947,7 @@ static void __split_huge_page(struct page *page, struct list_head *list, ...@@ -2946,6 +2947,7 @@ static void __split_huge_page(struct page *page, struct list_head *list,
/* Caller disabled irqs, so they are still disabled here */ /* Caller disabled irqs, so they are still disabled here */
split_page_owner(head, order, new_order); split_page_owner(head, order, new_order);
pgalloc_tag_split(head, 1 << order);
/* See comment in __split_huge_page_tail() */ /* See comment in __split_huge_page_tail() */
if (folio_test_anon(folio)) { if (folio_test_anon(folio)) {
......
...@@ -2630,6 +2630,7 @@ void split_page(struct page *page, unsigned int order) ...@@ -2630,6 +2630,7 @@ void split_page(struct page *page, unsigned int order)
for (i = 1; i < (1 << order); i++) for (i = 1; i < (1 << order); i++)
set_page_refcounted(page + i); set_page_refcounted(page + i);
split_page_owner(page, order, 0); split_page_owner(page, order, 0);
pgalloc_tag_split(page, 1 << order);
split_page_memcg(page, order, 0); split_page_memcg(page, order, 0);
} }
EXPORT_SYMBOL_GPL(split_page); EXPORT_SYMBOL_GPL(split_page);
...@@ -4827,6 +4828,7 @@ static void *make_alloc_exact(unsigned long addr, unsigned int order, ...@@ -4827,6 +4828,7 @@ static void *make_alloc_exact(unsigned long addr, unsigned int order,
struct page *last = page + nr; struct page *last = page + nr;
split_page_owner(page, order, 0); split_page_owner(page, order, 0);
pgalloc_tag_split(page, 1 << order);
split_page_memcg(page, order, 0); split_page_memcg(page, order, 0);
while (page < --last) while (page < --last)
set_page_refcounted(last); set_page_refcounted(last);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment