Commit b951aaff authored by Suren Baghdasaryan's avatar Suren Baghdasaryan Committed by Andrew Morton

mm: enable page allocation tagging

Redefine page allocators to record allocation tags upon their invocation. 
Instrument post_alloc_hook and free_pages_prepare to modify current
allocation tag.

[surenb@google.com: undo _noprof additions in the documentation]
  Link: https://lkml.kernel.org/r/20240326231453.1206227-3-surenb@google.com
Link: https://lkml.kernel.org/r/20240321163705.3067592-19-surenb@google.comSigned-off-by: default avatarSuren Baghdasaryan <surenb@google.com>
Co-developed-by: default avatarKent Overstreet <kent.overstreet@linux.dev>
Signed-off-by: default avatarKent Overstreet <kent.overstreet@linux.dev>
Reviewed-by: default avatarKees Cook <keescook@chromium.org>
Tested-by: default avatarKees Cook <keescook@chromium.org>
Cc: Alexander Viro <viro@zeniv.linux.org.uk>
Cc: Alex Gaynor <alex.gaynor@gmail.com>
Cc: Alice Ryhl <aliceryhl@google.com>
Cc: Andreas Hindborg <a.hindborg@samsung.com>
Cc: Benno Lossin <benno.lossin@proton.me>
Cc: "Björn Roy Baron" <bjorn3_gh@protonmail.com>
Cc: Boqun Feng <boqun.feng@gmail.com>
Cc: Christoph Lameter <cl@linux.com>
Cc: Dennis Zhou <dennis@kernel.org>
Cc: Gary Guo <gary@garyguo.net>
Cc: Miguel Ojeda <ojeda@kernel.org>
Cc: Pasha Tatashin <pasha.tatashin@soleen.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Tejun Heo <tj@kernel.org>
Cc: Vlastimil Babka <vbabka@suse.cz>
Cc: Wedson Almeida Filho <wedsonaf@gmail.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
parent 8a2f1187
...@@ -153,4 +153,18 @@ static inline void alloc_tag_sub(union codetag_ref *ref, size_t bytes) {} ...@@ -153,4 +153,18 @@ static inline void alloc_tag_sub(union codetag_ref *ref, size_t bytes) {}
#endif /* CONFIG_MEM_ALLOC_PROFILING */ #endif /* CONFIG_MEM_ALLOC_PROFILING */
#define alloc_hooks_tag(_tag, _do_alloc) \
({ \
struct alloc_tag * __maybe_unused _old = alloc_tag_save(_tag); \
typeof(_do_alloc) _res = _do_alloc; \
alloc_tag_restore(_tag, _old); \
_res; \
})
#define alloc_hooks(_do_alloc) \
({ \
DEFINE_ALLOC_TAG(_alloc_tag); \
alloc_hooks_tag(&_alloc_tag, _do_alloc); \
})
#endif /* _LINUX_ALLOC_TAG_H */ #endif /* _LINUX_ALLOC_TAG_H */
...@@ -6,6 +6,8 @@ ...@@ -6,6 +6,8 @@
#include <linux/mmzone.h> #include <linux/mmzone.h>
#include <linux/topology.h> #include <linux/topology.h>
#include <linux/alloc_tag.h>
#include <linux/sched.h>
struct vm_area_struct; struct vm_area_struct;
struct mempolicy; struct mempolicy;
...@@ -175,42 +177,46 @@ static inline void arch_free_page(struct page *page, int order) { } ...@@ -175,42 +177,46 @@ static inline void arch_free_page(struct page *page, int order) { }
static inline void arch_alloc_page(struct page *page, int order) { } static inline void arch_alloc_page(struct page *page, int order) { }
#endif #endif
struct page *__alloc_pages(gfp_t gfp, unsigned int order, int preferred_nid, struct page *__alloc_pages_noprof(gfp_t gfp, unsigned int order, int preferred_nid,
nodemask_t *nodemask); nodemask_t *nodemask);
struct folio *__folio_alloc(gfp_t gfp, unsigned int order, int preferred_nid, #define __alloc_pages(...) alloc_hooks(__alloc_pages_noprof(__VA_ARGS__))
struct folio *__folio_alloc_noprof(gfp_t gfp, unsigned int order, int preferred_nid,
nodemask_t *nodemask); nodemask_t *nodemask);
#define __folio_alloc(...) alloc_hooks(__folio_alloc_noprof(__VA_ARGS__))
unsigned long __alloc_pages_bulk(gfp_t gfp, int preferred_nid, unsigned long alloc_pages_bulk_noprof(gfp_t gfp, int preferred_nid,
nodemask_t *nodemask, int nr_pages, nodemask_t *nodemask, int nr_pages,
struct list_head *page_list, struct list_head *page_list,
struct page **page_array); struct page **page_array);
#define __alloc_pages_bulk(...) alloc_hooks(alloc_pages_bulk_noprof(__VA_ARGS__))
unsigned long alloc_pages_bulk_array_mempolicy(gfp_t gfp, unsigned long alloc_pages_bulk_array_mempolicy_noprof(gfp_t gfp,
unsigned long nr_pages, unsigned long nr_pages,
struct page **page_array); struct page **page_array);
#define alloc_pages_bulk_array_mempolicy(...) \
alloc_hooks(alloc_pages_bulk_array_mempolicy_noprof(__VA_ARGS__))
/* Bulk allocate order-0 pages */ /* Bulk allocate order-0 pages */
static inline unsigned long #define alloc_pages_bulk_list(_gfp, _nr_pages, _list) \
alloc_pages_bulk_list(gfp_t gfp, unsigned long nr_pages, struct list_head *list) __alloc_pages_bulk(_gfp, numa_mem_id(), NULL, _nr_pages, _list, NULL)
{
return __alloc_pages_bulk(gfp, numa_mem_id(), NULL, nr_pages, list, NULL);
}
static inline unsigned long #define alloc_pages_bulk_array(_gfp, _nr_pages, _page_array) \
alloc_pages_bulk_array(gfp_t gfp, unsigned long nr_pages, struct page **page_array) __alloc_pages_bulk(_gfp, numa_mem_id(), NULL, _nr_pages, NULL, _page_array)
{
return __alloc_pages_bulk(gfp, numa_mem_id(), NULL, nr_pages, NULL, page_array);
}
static inline unsigned long static inline unsigned long
alloc_pages_bulk_array_node(gfp_t gfp, int nid, unsigned long nr_pages, struct page **page_array) alloc_pages_bulk_array_node_noprof(gfp_t gfp, int nid, unsigned long nr_pages,
struct page **page_array)
{ {
if (nid == NUMA_NO_NODE) if (nid == NUMA_NO_NODE)
nid = numa_mem_id(); nid = numa_mem_id();
return __alloc_pages_bulk(gfp, nid, NULL, nr_pages, NULL, page_array); return alloc_pages_bulk_noprof(gfp, nid, NULL, nr_pages, NULL, page_array);
} }
#define alloc_pages_bulk_array_node(...) \
alloc_hooks(alloc_pages_bulk_array_node_noprof(__VA_ARGS__))
static inline void warn_if_node_offline(int this_node, gfp_t gfp_mask) static inline void warn_if_node_offline(int this_node, gfp_t gfp_mask)
{ {
gfp_t warn_gfp = gfp_mask & (__GFP_THISNODE|__GFP_NOWARN); gfp_t warn_gfp = gfp_mask & (__GFP_THISNODE|__GFP_NOWARN);
...@@ -230,82 +236,104 @@ static inline void warn_if_node_offline(int this_node, gfp_t gfp_mask) ...@@ -230,82 +236,104 @@ static inline void warn_if_node_offline(int this_node, gfp_t gfp_mask)
* online. For more general interface, see alloc_pages_node(). * online. For more general interface, see alloc_pages_node().
*/ */
static inline struct page * static inline struct page *
__alloc_pages_node(int nid, gfp_t gfp_mask, unsigned int order) __alloc_pages_node_noprof(int nid, gfp_t gfp_mask, unsigned int order)
{ {
VM_BUG_ON(nid < 0 || nid >= MAX_NUMNODES); VM_BUG_ON(nid < 0 || nid >= MAX_NUMNODES);
warn_if_node_offline(nid, gfp_mask); warn_if_node_offline(nid, gfp_mask);
return __alloc_pages(gfp_mask, order, nid, NULL); return __alloc_pages_noprof(gfp_mask, order, nid, NULL);
} }
#define __alloc_pages_node(...) alloc_hooks(__alloc_pages_node_noprof(__VA_ARGS__))
static inline static inline
struct folio *__folio_alloc_node(gfp_t gfp, unsigned int order, int nid) struct folio *__folio_alloc_node_noprof(gfp_t gfp, unsigned int order, int nid)
{ {
VM_BUG_ON(nid < 0 || nid >= MAX_NUMNODES); VM_BUG_ON(nid < 0 || nid >= MAX_NUMNODES);
warn_if_node_offline(nid, gfp); warn_if_node_offline(nid, gfp);
return __folio_alloc(gfp, order, nid, NULL); return __folio_alloc_noprof(gfp, order, nid, NULL);
} }
#define __folio_alloc_node(...) alloc_hooks(__folio_alloc_node_noprof(__VA_ARGS__))
/* /*
* Allocate pages, preferring the node given as nid. When nid == NUMA_NO_NODE, * Allocate pages, preferring the node given as nid. When nid == NUMA_NO_NODE,
* prefer the current CPU's closest node. Otherwise node must be valid and * prefer the current CPU's closest node. Otherwise node must be valid and
* online. * online.
*/ */
static inline struct page *alloc_pages_node(int nid, gfp_t gfp_mask, static inline struct page *alloc_pages_node_noprof(int nid, gfp_t gfp_mask,
unsigned int order) unsigned int order)
{ {
if (nid == NUMA_NO_NODE) if (nid == NUMA_NO_NODE)
nid = numa_mem_id(); nid = numa_mem_id();
return __alloc_pages_node(nid, gfp_mask, order); return __alloc_pages_node_noprof(nid, gfp_mask, order);
} }
#define alloc_pages_node(...) alloc_hooks(alloc_pages_node_noprof(__VA_ARGS__))
#ifdef CONFIG_NUMA #ifdef CONFIG_NUMA
struct page *alloc_pages(gfp_t gfp, unsigned int order); struct page *alloc_pages_noprof(gfp_t gfp, unsigned int order);
struct page *alloc_pages_mpol(gfp_t gfp, unsigned int order, struct page *alloc_pages_mpol_noprof(gfp_t gfp, unsigned int order,
struct mempolicy *mpol, pgoff_t ilx, int nid); struct mempolicy *mpol, pgoff_t ilx, int nid);
struct folio *folio_alloc(gfp_t gfp, unsigned int order); struct folio *folio_alloc_noprof(gfp_t gfp, unsigned int order);
struct folio *vma_alloc_folio(gfp_t gfp, int order, struct vm_area_struct *vma, struct folio *vma_alloc_folio_noprof(gfp_t gfp, int order, struct vm_area_struct *vma,
unsigned long addr, bool hugepage); unsigned long addr, bool hugepage);
#else #else
static inline struct page *alloc_pages(gfp_t gfp_mask, unsigned int order) static inline struct page *alloc_pages_noprof(gfp_t gfp_mask, unsigned int order)
{ {
return alloc_pages_node(numa_node_id(), gfp_mask, order); return alloc_pages_node_noprof(numa_node_id(), gfp_mask, order);
} }
static inline struct page *alloc_pages_mpol(gfp_t gfp, unsigned int order, static inline struct page *alloc_pages_mpol_noprof(gfp_t gfp, unsigned int order,
struct mempolicy *mpol, pgoff_t ilx, int nid) struct mempolicy *mpol, pgoff_t ilx, int nid)
{ {
return alloc_pages(gfp, order); return alloc_pages_noprof(gfp, order);
} }
static inline struct folio *folio_alloc(gfp_t gfp, unsigned int order) static inline struct folio *folio_alloc_noprof(gfp_t gfp, unsigned int order)
{ {
return __folio_alloc_node(gfp, order, numa_node_id()); return __folio_alloc_node(gfp, order, numa_node_id());
} }
#define vma_alloc_folio(gfp, order, vma, addr, hugepage) \ #define vma_alloc_folio_noprof(gfp, order, vma, addr, hugepage) \
folio_alloc(gfp, order) folio_alloc_noprof(gfp, order)
#endif #endif
#define alloc_pages(...) alloc_hooks(alloc_pages_noprof(__VA_ARGS__))
#define alloc_pages_mpol(...) alloc_hooks(alloc_pages_mpol_noprof(__VA_ARGS__))
#define folio_alloc(...) alloc_hooks(folio_alloc_noprof(__VA_ARGS__))
#define vma_alloc_folio(...) alloc_hooks(vma_alloc_folio_noprof(__VA_ARGS__))
#define alloc_page(gfp_mask) alloc_pages(gfp_mask, 0) #define alloc_page(gfp_mask) alloc_pages(gfp_mask, 0)
static inline struct page *alloc_page_vma(gfp_t gfp,
static inline struct page *alloc_page_vma_noprof(gfp_t gfp,
struct vm_area_struct *vma, unsigned long addr) struct vm_area_struct *vma, unsigned long addr)
{ {
struct folio *folio = vma_alloc_folio(gfp, 0, vma, addr, false); struct folio *folio = vma_alloc_folio_noprof(gfp, 0, vma, addr, false);
return &folio->page; return &folio->page;
} }
#define alloc_page_vma(...) alloc_hooks(alloc_page_vma_noprof(__VA_ARGS__))
extern unsigned long get_free_pages_noprof(gfp_t gfp_mask, unsigned int order);
#define __get_free_pages(...) alloc_hooks(get_free_pages_noprof(__VA_ARGS__))
extern unsigned long __get_free_pages(gfp_t gfp_mask, unsigned int order); extern unsigned long get_zeroed_page_noprof(gfp_t gfp_mask);
extern unsigned long get_zeroed_page(gfp_t gfp_mask); #define get_zeroed_page(...) alloc_hooks(get_zeroed_page_noprof(__VA_ARGS__))
void *alloc_pages_exact_noprof(size_t size, gfp_t gfp_mask) __alloc_size(1);
#define alloc_pages_exact(...) alloc_hooks(alloc_pages_exact_noprof(__VA_ARGS__))
void *alloc_pages_exact(size_t size, gfp_t gfp_mask) __alloc_size(1);
void free_pages_exact(void *virt, size_t size); void free_pages_exact(void *virt, size_t size);
__meminit void *alloc_pages_exact_nid(int nid, size_t size, gfp_t gfp_mask) __alloc_size(2);
#define __get_free_page(gfp_mask) \ __meminit void *alloc_pages_exact_nid_noprof(int nid, size_t size, gfp_t gfp_mask) __alloc_size(2);
__get_free_pages((gfp_mask), 0) #define alloc_pages_exact_nid(...) \
alloc_hooks(alloc_pages_exact_nid_noprof(__VA_ARGS__))
#define __get_free_page(gfp_mask) \
__get_free_pages((gfp_mask), 0)
#define __get_dma_pages(gfp_mask, order) \ #define __get_dma_pages(gfp_mask, order) \
__get_free_pages((gfp_mask) | GFP_DMA, (order)) __get_free_pages((gfp_mask) | GFP_DMA, (order))
extern void __free_pages(struct page *page, unsigned int order); extern void __free_pages(struct page *page, unsigned int order);
extern void free_pages(unsigned long addr, unsigned int order); extern void free_pages(unsigned long addr, unsigned int order);
...@@ -374,10 +402,14 @@ extern gfp_t vma_thp_gfp_mask(struct vm_area_struct *vma); ...@@ -374,10 +402,14 @@ extern gfp_t vma_thp_gfp_mask(struct vm_area_struct *vma);
#ifdef CONFIG_CONTIG_ALLOC #ifdef CONFIG_CONTIG_ALLOC
/* The below functions must be run on a range from a single zone. */ /* The below functions must be run on a range from a single zone. */
extern int alloc_contig_range(unsigned long start, unsigned long end, extern int alloc_contig_range_noprof(unsigned long start, unsigned long end,
unsigned migratetype, gfp_t gfp_mask); unsigned migratetype, gfp_t gfp_mask);
extern struct page *alloc_contig_pages(unsigned long nr_pages, gfp_t gfp_mask, #define alloc_contig_range(...) alloc_hooks(alloc_contig_range_noprof(__VA_ARGS__))
int nid, nodemask_t *nodemask);
extern struct page *alloc_contig_pages_noprof(unsigned long nr_pages, gfp_t gfp_mask,
int nid, nodemask_t *nodemask);
#define alloc_contig_pages(...) alloc_hooks(alloc_contig_pages_noprof(__VA_ARGS__))
#endif #endif
void free_contig_range(unsigned long pfn, unsigned long nr_pages); void free_contig_range(unsigned long pfn, unsigned long nr_pages);
......
...@@ -542,14 +542,17 @@ static inline void *detach_page_private(struct page *page) ...@@ -542,14 +542,17 @@ static inline void *detach_page_private(struct page *page)
#endif #endif
#ifdef CONFIG_NUMA #ifdef CONFIG_NUMA
struct folio *filemap_alloc_folio(gfp_t gfp, unsigned int order); struct folio *filemap_alloc_folio_noprof(gfp_t gfp, unsigned int order);
#else #else
static inline struct folio *filemap_alloc_folio(gfp_t gfp, unsigned int order) static inline struct folio *filemap_alloc_folio_noprof(gfp_t gfp, unsigned int order)
{ {
return folio_alloc(gfp, order); return folio_alloc_noprof(gfp, order);
} }
#endif #endif
#define filemap_alloc_folio(...) \
alloc_hooks(filemap_alloc_folio_noprof(__VA_ARGS__))
static inline struct page *__page_cache_alloc(gfp_t gfp) static inline struct page *__page_cache_alloc(gfp_t gfp)
{ {
return &filemap_alloc_folio(gfp, 0)->page; return &filemap_alloc_folio(gfp, 0)->page;
......
...@@ -1851,7 +1851,7 @@ static void isolate_freepages(struct compact_control *cc) ...@@ -1851,7 +1851,7 @@ static void isolate_freepages(struct compact_control *cc)
* This is a migrate-callback that "allocates" freepages by taking pages * This is a migrate-callback that "allocates" freepages by taking pages
* from the isolated freelists in the block we are migrating to. * from the isolated freelists in the block we are migrating to.
*/ */
static struct folio *compaction_alloc(struct folio *src, unsigned long data) static struct folio *compaction_alloc_noprof(struct folio *src, unsigned long data)
{ {
struct compact_control *cc = (struct compact_control *)data; struct compact_control *cc = (struct compact_control *)data;
struct folio *dst; struct folio *dst;
...@@ -1898,6 +1898,11 @@ static struct folio *compaction_alloc(struct folio *src, unsigned long data) ...@@ -1898,6 +1898,11 @@ static struct folio *compaction_alloc(struct folio *src, unsigned long data)
return page_rmappable_folio(&dst->page); return page_rmappable_folio(&dst->page);
} }
static struct folio *compaction_alloc(struct folio *src, unsigned long data)
{
return alloc_hooks(compaction_alloc_noprof(src, data));
}
/* /*
* This is a migrate-callback that "frees" freepages back to the isolated * This is a migrate-callback that "frees" freepages back to the isolated
* freelist. All pages on the freelist are from the same zone, so there is no * freelist. All pages on the freelist are from the same zone, so there is no
......
...@@ -966,7 +966,7 @@ int filemap_add_folio(struct address_space *mapping, struct folio *folio, ...@@ -966,7 +966,7 @@ int filemap_add_folio(struct address_space *mapping, struct folio *folio,
EXPORT_SYMBOL_GPL(filemap_add_folio); EXPORT_SYMBOL_GPL(filemap_add_folio);
#ifdef CONFIG_NUMA #ifdef CONFIG_NUMA
struct folio *filemap_alloc_folio(gfp_t gfp, unsigned int order) struct folio *filemap_alloc_folio_noprof(gfp_t gfp, unsigned int order)
{ {
int n; int n;
struct folio *folio; struct folio *folio;
...@@ -981,9 +981,9 @@ struct folio *filemap_alloc_folio(gfp_t gfp, unsigned int order) ...@@ -981,9 +981,9 @@ struct folio *filemap_alloc_folio(gfp_t gfp, unsigned int order)
return folio; return folio;
} }
return folio_alloc(gfp, order); return folio_alloc_noprof(gfp, order);
} }
EXPORT_SYMBOL(filemap_alloc_folio); EXPORT_SYMBOL(filemap_alloc_folio_noprof);
#endif #endif
/* /*
......
...@@ -2201,9 +2201,9 @@ static struct page *alloc_pages_preferred_many(gfp_t gfp, unsigned int order, ...@@ -2201,9 +2201,9 @@ static struct page *alloc_pages_preferred_many(gfp_t gfp, unsigned int order,
*/ */
preferred_gfp = gfp | __GFP_NOWARN; preferred_gfp = gfp | __GFP_NOWARN;
preferred_gfp &= ~(__GFP_DIRECT_RECLAIM | __GFP_NOFAIL); preferred_gfp &= ~(__GFP_DIRECT_RECLAIM | __GFP_NOFAIL);
page = __alloc_pages(preferred_gfp, order, nid, nodemask); page = __alloc_pages_noprof(preferred_gfp, order, nid, nodemask);
if (!page) if (!page)
page = __alloc_pages(gfp, order, nid, NULL); page = __alloc_pages_noprof(gfp, order, nid, NULL);
return page; return page;
} }
...@@ -2218,7 +2218,7 @@ static struct page *alloc_pages_preferred_many(gfp_t gfp, unsigned int order, ...@@ -2218,7 +2218,7 @@ static struct page *alloc_pages_preferred_many(gfp_t gfp, unsigned int order,
* *
* Return: The page on success or NULL if allocation fails. * Return: The page on success or NULL if allocation fails.
*/ */
struct page *alloc_pages_mpol(gfp_t gfp, unsigned int order, struct page *alloc_pages_mpol_noprof(gfp_t gfp, unsigned int order,
struct mempolicy *pol, pgoff_t ilx, int nid) struct mempolicy *pol, pgoff_t ilx, int nid)
{ {
nodemask_t *nodemask; nodemask_t *nodemask;
...@@ -2249,7 +2249,7 @@ struct page *alloc_pages_mpol(gfp_t gfp, unsigned int order, ...@@ -2249,7 +2249,7 @@ struct page *alloc_pages_mpol(gfp_t gfp, unsigned int order,
* First, try to allocate THP only on local node, but * First, try to allocate THP only on local node, but
* don't reclaim unnecessarily, just compact. * don't reclaim unnecessarily, just compact.
*/ */
page = __alloc_pages_node(nid, page = __alloc_pages_node_noprof(nid,
gfp | __GFP_THISNODE | __GFP_NORETRY, order); gfp | __GFP_THISNODE | __GFP_NORETRY, order);
if (page || !(gfp & __GFP_DIRECT_RECLAIM)) if (page || !(gfp & __GFP_DIRECT_RECLAIM))
return page; return page;
...@@ -2262,7 +2262,7 @@ struct page *alloc_pages_mpol(gfp_t gfp, unsigned int order, ...@@ -2262,7 +2262,7 @@ struct page *alloc_pages_mpol(gfp_t gfp, unsigned int order,
} }
} }
page = __alloc_pages(gfp, order, nid, nodemask); page = __alloc_pages_noprof(gfp, order, nid, nodemask);
if (unlikely(pol->mode == MPOL_INTERLEAVE) && page) { if (unlikely(pol->mode == MPOL_INTERLEAVE) && page) {
/* skip NUMA_INTERLEAVE_HIT update if numa stats is disabled */ /* skip NUMA_INTERLEAVE_HIT update if numa stats is disabled */
...@@ -2293,7 +2293,7 @@ struct page *alloc_pages_mpol(gfp_t gfp, unsigned int order, ...@@ -2293,7 +2293,7 @@ struct page *alloc_pages_mpol(gfp_t gfp, unsigned int order,
* *
* Return: The folio on success or NULL if allocation fails. * Return: The folio on success or NULL if allocation fails.
*/ */
struct folio *vma_alloc_folio(gfp_t gfp, int order, struct vm_area_struct *vma, struct folio *vma_alloc_folio_noprof(gfp_t gfp, int order, struct vm_area_struct *vma,
unsigned long addr, bool hugepage) unsigned long addr, bool hugepage)
{ {
struct mempolicy *pol; struct mempolicy *pol;
...@@ -2301,12 +2301,12 @@ struct folio *vma_alloc_folio(gfp_t gfp, int order, struct vm_area_struct *vma, ...@@ -2301,12 +2301,12 @@ struct folio *vma_alloc_folio(gfp_t gfp, int order, struct vm_area_struct *vma,
struct page *page; struct page *page;
pol = get_vma_policy(vma, addr, order, &ilx); pol = get_vma_policy(vma, addr, order, &ilx);
page = alloc_pages_mpol(gfp | __GFP_COMP, order, page = alloc_pages_mpol_noprof(gfp | __GFP_COMP, order,
pol, ilx, numa_node_id()); pol, ilx, numa_node_id());
mpol_cond_put(pol); mpol_cond_put(pol);
return page_rmappable_folio(page); return page_rmappable_folio(page);
} }
EXPORT_SYMBOL(vma_alloc_folio); EXPORT_SYMBOL(vma_alloc_folio_noprof);
/** /**
* alloc_pages - Allocate pages. * alloc_pages - Allocate pages.
...@@ -2322,7 +2322,7 @@ EXPORT_SYMBOL(vma_alloc_folio); ...@@ -2322,7 +2322,7 @@ EXPORT_SYMBOL(vma_alloc_folio);
* flags are used. * flags are used.
* Return: The page on success or NULL if allocation fails. * Return: The page on success or NULL if allocation fails.
*/ */
struct page *alloc_pages(gfp_t gfp, unsigned int order) struct page *alloc_pages_noprof(gfp_t gfp, unsigned int order)
{ {
struct mempolicy *pol = &default_policy; struct mempolicy *pol = &default_policy;
...@@ -2333,16 +2333,16 @@ struct page *alloc_pages(gfp_t gfp, unsigned int order) ...@@ -2333,16 +2333,16 @@ struct page *alloc_pages(gfp_t gfp, unsigned int order)
if (!in_interrupt() && !(gfp & __GFP_THISNODE)) if (!in_interrupt() && !(gfp & __GFP_THISNODE))
pol = get_task_policy(current); pol = get_task_policy(current);
return alloc_pages_mpol(gfp, order, return alloc_pages_mpol_noprof(gfp, order, pol, NO_INTERLEAVE_INDEX,
pol, NO_INTERLEAVE_INDEX, numa_node_id()); numa_node_id());
} }
EXPORT_SYMBOL(alloc_pages); EXPORT_SYMBOL(alloc_pages_noprof);
struct folio *folio_alloc(gfp_t gfp, unsigned int order) struct folio *folio_alloc_noprof(gfp_t gfp, unsigned int order)
{ {
return page_rmappable_folio(alloc_pages(gfp | __GFP_COMP, order)); return page_rmappable_folio(alloc_pages_noprof(gfp | __GFP_COMP, order));
} }
EXPORT_SYMBOL(folio_alloc); EXPORT_SYMBOL(folio_alloc_noprof);
static unsigned long alloc_pages_bulk_array_interleave(gfp_t gfp, static unsigned long alloc_pages_bulk_array_interleave(gfp_t gfp,
struct mempolicy *pol, unsigned long nr_pages, struct mempolicy *pol, unsigned long nr_pages,
...@@ -2361,13 +2361,13 @@ static unsigned long alloc_pages_bulk_array_interleave(gfp_t gfp, ...@@ -2361,13 +2361,13 @@ static unsigned long alloc_pages_bulk_array_interleave(gfp_t gfp,
for (i = 0; i < nodes; i++) { for (i = 0; i < nodes; i++) {
if (delta) { if (delta) {
nr_allocated = __alloc_pages_bulk(gfp, nr_allocated = alloc_pages_bulk_noprof(gfp,
interleave_nodes(pol), NULL, interleave_nodes(pol), NULL,
nr_pages_per_node + 1, NULL, nr_pages_per_node + 1, NULL,
page_array); page_array);
delta--; delta--;
} else { } else {
nr_allocated = __alloc_pages_bulk(gfp, nr_allocated = alloc_pages_bulk_noprof(gfp,
interleave_nodes(pol), NULL, interleave_nodes(pol), NULL,
nr_pages_per_node, NULL, page_array); nr_pages_per_node, NULL, page_array);
} }
...@@ -2504,11 +2504,11 @@ static unsigned long alloc_pages_bulk_array_preferred_many(gfp_t gfp, int nid, ...@@ -2504,11 +2504,11 @@ static unsigned long alloc_pages_bulk_array_preferred_many(gfp_t gfp, int nid,
preferred_gfp = gfp | __GFP_NOWARN; preferred_gfp = gfp | __GFP_NOWARN;
preferred_gfp &= ~(__GFP_DIRECT_RECLAIM | __GFP_NOFAIL); preferred_gfp &= ~(__GFP_DIRECT_RECLAIM | __GFP_NOFAIL);
nr_allocated = __alloc_pages_bulk(preferred_gfp, nid, &pol->nodes, nr_allocated = alloc_pages_bulk_noprof(preferred_gfp, nid, &pol->nodes,
nr_pages, NULL, page_array); nr_pages, NULL, page_array);
if (nr_allocated < nr_pages) if (nr_allocated < nr_pages)
nr_allocated += __alloc_pages_bulk(gfp, numa_node_id(), NULL, nr_allocated += alloc_pages_bulk_noprof(gfp, numa_node_id(), NULL,
nr_pages - nr_allocated, NULL, nr_pages - nr_allocated, NULL,
page_array + nr_allocated); page_array + nr_allocated);
return nr_allocated; return nr_allocated;
...@@ -2520,7 +2520,7 @@ static unsigned long alloc_pages_bulk_array_preferred_many(gfp_t gfp, int nid, ...@@ -2520,7 +2520,7 @@ static unsigned long alloc_pages_bulk_array_preferred_many(gfp_t gfp, int nid,
* It can accelerate memory allocation especially interleaving * It can accelerate memory allocation especially interleaving
* allocate memory. * allocate memory.
*/ */
unsigned long alloc_pages_bulk_array_mempolicy(gfp_t gfp, unsigned long alloc_pages_bulk_array_mempolicy_noprof(gfp_t gfp,
unsigned long nr_pages, struct page **page_array) unsigned long nr_pages, struct page **page_array)
{ {
struct mempolicy *pol = &default_policy; struct mempolicy *pol = &default_policy;
...@@ -2544,8 +2544,8 @@ unsigned long alloc_pages_bulk_array_mempolicy(gfp_t gfp, ...@@ -2544,8 +2544,8 @@ unsigned long alloc_pages_bulk_array_mempolicy(gfp_t gfp,
nid = numa_node_id(); nid = numa_node_id();
nodemask = policy_nodemask(gfp, pol, NO_INTERLEAVE_INDEX, &nid); nodemask = policy_nodemask(gfp, pol, NO_INTERLEAVE_INDEX, &nid);
return __alloc_pages_bulk(gfp, nid, nodemask, return alloc_pages_bulk_noprof(gfp, nid, nodemask,
nr_pages, NULL, page_array); nr_pages, NULL, page_array);
} }
int vma_dup_policy(struct vm_area_struct *src, struct vm_area_struct *dst) int vma_dup_policy(struct vm_area_struct *src, struct vm_area_struct *dst)
......
...@@ -4391,7 +4391,7 @@ static inline bool prepare_alloc_pages(gfp_t gfp_mask, unsigned int order, ...@@ -4391,7 +4391,7 @@ static inline bool prepare_alloc_pages(gfp_t gfp_mask, unsigned int order,
* *
* Returns the number of pages on the list or array. * Returns the number of pages on the list or array.
*/ */
unsigned long __alloc_pages_bulk(gfp_t gfp, int preferred_nid, unsigned long alloc_pages_bulk_noprof(gfp_t gfp, int preferred_nid,
nodemask_t *nodemask, int nr_pages, nodemask_t *nodemask, int nr_pages,
struct list_head *page_list, struct list_head *page_list,
struct page **page_array) struct page **page_array)
...@@ -4527,7 +4527,7 @@ unsigned long __alloc_pages_bulk(gfp_t gfp, int preferred_nid, ...@@ -4527,7 +4527,7 @@ unsigned long __alloc_pages_bulk(gfp_t gfp, int preferred_nid,
pcp_trylock_finish(UP_flags); pcp_trylock_finish(UP_flags);
failed: failed:
page = __alloc_pages(gfp, 0, preferred_nid, nodemask); page = __alloc_pages_noprof(gfp, 0, preferred_nid, nodemask);
if (page) { if (page) {
if (page_list) if (page_list)
list_add(&page->lru, page_list); list_add(&page->lru, page_list);
...@@ -4538,13 +4538,13 @@ unsigned long __alloc_pages_bulk(gfp_t gfp, int preferred_nid, ...@@ -4538,13 +4538,13 @@ unsigned long __alloc_pages_bulk(gfp_t gfp, int preferred_nid,
goto out; goto out;
} }
EXPORT_SYMBOL_GPL(__alloc_pages_bulk); EXPORT_SYMBOL_GPL(alloc_pages_bulk_noprof);
/* /*
* This is the 'heart' of the zoned buddy allocator. * This is the 'heart' of the zoned buddy allocator.
*/ */
struct page *__alloc_pages(gfp_t gfp, unsigned int order, int preferred_nid, struct page *__alloc_pages_noprof(gfp_t gfp, unsigned int order,
nodemask_t *nodemask) int preferred_nid, nodemask_t *nodemask)
{ {
struct page *page; struct page *page;
unsigned int alloc_flags = ALLOC_WMARK_LOW; unsigned int alloc_flags = ALLOC_WMARK_LOW;
...@@ -4606,38 +4606,38 @@ struct page *__alloc_pages(gfp_t gfp, unsigned int order, int preferred_nid, ...@@ -4606,38 +4606,38 @@ struct page *__alloc_pages(gfp_t gfp, unsigned int order, int preferred_nid,
return page; return page;
} }
EXPORT_SYMBOL(__alloc_pages); EXPORT_SYMBOL(__alloc_pages_noprof);
struct folio *__folio_alloc(gfp_t gfp, unsigned int order, int preferred_nid, struct folio *__folio_alloc_noprof(gfp_t gfp, unsigned int order, int preferred_nid,
nodemask_t *nodemask) nodemask_t *nodemask)
{ {
struct page *page = __alloc_pages(gfp | __GFP_COMP, order, struct page *page = __alloc_pages_noprof(gfp | __GFP_COMP, order,
preferred_nid, nodemask); preferred_nid, nodemask);
return page_rmappable_folio(page); return page_rmappable_folio(page);
} }
EXPORT_SYMBOL(__folio_alloc); EXPORT_SYMBOL(__folio_alloc_noprof);
/* /*
* Common helper functions. Never use with __GFP_HIGHMEM because the returned * Common helper functions. Never use with __GFP_HIGHMEM because the returned
* address cannot represent highmem pages. Use alloc_pages and then kmap if * address cannot represent highmem pages. Use alloc_pages and then kmap if
* you need to access high mem. * you need to access high mem.
*/ */
unsigned long __get_free_pages(gfp_t gfp_mask, unsigned int order) unsigned long get_free_pages_noprof(gfp_t gfp_mask, unsigned int order)
{ {
struct page *page; struct page *page;
page = alloc_pages(gfp_mask & ~__GFP_HIGHMEM, order); page = alloc_pages_noprof(gfp_mask & ~__GFP_HIGHMEM, order);
if (!page) if (!page)
return 0; return 0;
return (unsigned long) page_address(page); return (unsigned long) page_address(page);
} }
EXPORT_SYMBOL(__get_free_pages); EXPORT_SYMBOL(get_free_pages_noprof);
unsigned long get_zeroed_page(gfp_t gfp_mask) unsigned long get_zeroed_page_noprof(gfp_t gfp_mask)
{ {
return __get_free_page(gfp_mask | __GFP_ZERO); return get_free_pages_noprof(gfp_mask | __GFP_ZERO, 0);
} }
EXPORT_SYMBOL(get_zeroed_page); EXPORT_SYMBOL(get_zeroed_page_noprof);
/** /**
* __free_pages - Free pages allocated with alloc_pages(). * __free_pages - Free pages allocated with alloc_pages().
...@@ -4853,7 +4853,7 @@ static void *make_alloc_exact(unsigned long addr, unsigned int order, ...@@ -4853,7 +4853,7 @@ static void *make_alloc_exact(unsigned long addr, unsigned int order,
* *
* Return: pointer to the allocated area or %NULL in case of error. * Return: pointer to the allocated area or %NULL in case of error.
*/ */
void *alloc_pages_exact(size_t size, gfp_t gfp_mask) void *alloc_pages_exact_noprof(size_t size, gfp_t gfp_mask)
{ {
unsigned int order = get_order(size); unsigned int order = get_order(size);
unsigned long addr; unsigned long addr;
...@@ -4861,10 +4861,10 @@ void *alloc_pages_exact(size_t size, gfp_t gfp_mask) ...@@ -4861,10 +4861,10 @@ void *alloc_pages_exact(size_t size, gfp_t gfp_mask)
if (WARN_ON_ONCE(gfp_mask & (__GFP_COMP | __GFP_HIGHMEM))) if (WARN_ON_ONCE(gfp_mask & (__GFP_COMP | __GFP_HIGHMEM)))
gfp_mask &= ~(__GFP_COMP | __GFP_HIGHMEM); gfp_mask &= ~(__GFP_COMP | __GFP_HIGHMEM);
addr = __get_free_pages(gfp_mask, order); addr = get_free_pages_noprof(gfp_mask, order);
return make_alloc_exact(addr, order, size); return make_alloc_exact(addr, order, size);
} }
EXPORT_SYMBOL(alloc_pages_exact); EXPORT_SYMBOL(alloc_pages_exact_noprof);
/** /**
* alloc_pages_exact_nid - allocate an exact number of physically-contiguous * alloc_pages_exact_nid - allocate an exact number of physically-contiguous
...@@ -4878,7 +4878,7 @@ EXPORT_SYMBOL(alloc_pages_exact); ...@@ -4878,7 +4878,7 @@ EXPORT_SYMBOL(alloc_pages_exact);
* *
* Return: pointer to the allocated area or %NULL in case of error. * Return: pointer to the allocated area or %NULL in case of error.
*/ */
void * __meminit alloc_pages_exact_nid(int nid, size_t size, gfp_t gfp_mask) void * __meminit alloc_pages_exact_nid_noprof(int nid, size_t size, gfp_t gfp_mask)
{ {
unsigned int order = get_order(size); unsigned int order = get_order(size);
struct page *p; struct page *p;
...@@ -4886,7 +4886,7 @@ void * __meminit alloc_pages_exact_nid(int nid, size_t size, gfp_t gfp_mask) ...@@ -4886,7 +4886,7 @@ void * __meminit alloc_pages_exact_nid(int nid, size_t size, gfp_t gfp_mask)
if (WARN_ON_ONCE(gfp_mask & (__GFP_COMP | __GFP_HIGHMEM))) if (WARN_ON_ONCE(gfp_mask & (__GFP_COMP | __GFP_HIGHMEM)))
gfp_mask &= ~(__GFP_COMP | __GFP_HIGHMEM); gfp_mask &= ~(__GFP_COMP | __GFP_HIGHMEM);
p = alloc_pages_node(nid, gfp_mask, order); p = alloc_pages_node_noprof(nid, gfp_mask, order);
if (!p) if (!p)
return NULL; return NULL;
return make_alloc_exact((unsigned long)page_address(p), order, size); return make_alloc_exact((unsigned long)page_address(p), order, size);
...@@ -6343,7 +6343,7 @@ int __alloc_contig_migrate_range(struct compact_control *cc, ...@@ -6343,7 +6343,7 @@ int __alloc_contig_migrate_range(struct compact_control *cc,
* pages which PFN is in [start, end) are allocated for the caller and * pages which PFN is in [start, end) are allocated for the caller and
* need to be freed with free_contig_range(). * need to be freed with free_contig_range().
*/ */
int alloc_contig_range(unsigned long start, unsigned long end, int alloc_contig_range_noprof(unsigned long start, unsigned long end,
unsigned migratetype, gfp_t gfp_mask) unsigned migratetype, gfp_t gfp_mask)
{ {
unsigned long outer_start, outer_end; unsigned long outer_start, outer_end;
...@@ -6467,15 +6467,15 @@ int alloc_contig_range(unsigned long start, unsigned long end, ...@@ -6467,15 +6467,15 @@ int alloc_contig_range(unsigned long start, unsigned long end,
undo_isolate_page_range(start, end, migratetype); undo_isolate_page_range(start, end, migratetype);
return ret; return ret;
} }
EXPORT_SYMBOL(alloc_contig_range); EXPORT_SYMBOL(alloc_contig_range_noprof);
static int __alloc_contig_pages(unsigned long start_pfn, static int __alloc_contig_pages(unsigned long start_pfn,
unsigned long nr_pages, gfp_t gfp_mask) unsigned long nr_pages, gfp_t gfp_mask)
{ {
unsigned long end_pfn = start_pfn + nr_pages; unsigned long end_pfn = start_pfn + nr_pages;
return alloc_contig_range(start_pfn, end_pfn, MIGRATE_MOVABLE, return alloc_contig_range_noprof(start_pfn, end_pfn, MIGRATE_MOVABLE,
gfp_mask); gfp_mask);
} }
static bool pfn_range_valid_contig(struct zone *z, unsigned long start_pfn, static bool pfn_range_valid_contig(struct zone *z, unsigned long start_pfn,
...@@ -6530,8 +6530,8 @@ static bool zone_spans_last_pfn(const struct zone *zone, ...@@ -6530,8 +6530,8 @@ static bool zone_spans_last_pfn(const struct zone *zone,
* *
* Return: pointer to contiguous pages on success, or NULL if not successful. * Return: pointer to contiguous pages on success, or NULL if not successful.
*/ */
struct page *alloc_contig_pages(unsigned long nr_pages, gfp_t gfp_mask, struct page *alloc_contig_pages_noprof(unsigned long nr_pages, gfp_t gfp_mask,
int nid, nodemask_t *nodemask) int nid, nodemask_t *nodemask)
{ {
unsigned long ret, pfn, flags; unsigned long ret, pfn, flags;
struct zonelist *zonelist; struct zonelist *zonelist;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment