Commit 2c321f3f authored by Suren Baghdasaryan's avatar Suren Baghdasaryan Committed by Andrew Morton

mm: change inlined allocation helpers to account at the call site

Main goal of memory allocation profiling patchset is to provide accounting
that is cheap enough to run in production.  To achieve that we inject
counters using codetags at the allocation call sites to account every time
allocation is made.  This injection allows us to perform accounting
efficiently because injected counters are immediately available as opposed
to the alternative methods, such as using _RET_IP_, which would require
counter lookup and appropriate locking that makes accounting much more
expensive.  This method requires all allocation functions to inject
separate counters at their call sites so that their callers can be
individually accounted.  Counter injection is implemented by allocation
hooks which should wrap all allocation functions.

Inlined functions which perform allocations but do not use allocation
hooks are directly charged for the allocations they perform.  In most
cases these functions are just specialized allocation wrappers used from
multiple places to allocate objects of a specific type.  It would be more
useful to do the accounting at their call sites instead.  Instrument these
helpers to do accounting at the call site.  Simple inlined allocation
wrappers are converted directly into macros.  More complex allocators or
allocators with documentation are converted into _noprof versions and
allocation hooks are added.  This allows memory allocation profiling
mechanism to charge allocations to the callers of these functions.

Link: https://lkml.kernel.org/r/20240415020731.1152108-1-surenb@google.comSigned-off-by: default avatarSuren Baghdasaryan <surenb@google.com>
Acked-by: Jan Kara <jack@suse.cz>		[jbd2]
Cc: Anna Schumaker <anna@kernel.org>
Cc: Arnd Bergmann <arnd@arndb.de>
Cc: Benjamin Tissoires <benjamin.tissoires@redhat.com>
Cc: Christoph Lameter <cl@linux.com>
Cc: David Rientjes <rientjes@google.com>
Cc: David S. Miller <davem@davemloft.net>
Cc: Dennis Zhou <dennis@kernel.org>
Cc: Eric Dumazet <edumazet@google.com>
Cc: Herbert Xu <herbert@gondor.apana.org.au>
Cc: Jakub Kicinski <kuba@kernel.org>
Cc: Jakub Sitnicki <jakub@cloudflare.com>
Cc: Jiri Kosina <jikos@kernel.org>
Cc: Joerg Roedel <joro@8bytes.org>
Cc: Joonsoo Kim <iamjoonsoo.kim@lge.com>
Cc: Kent Overstreet <kent.overstreet@linux.dev>
Cc: Matthew Wilcox (Oracle) <willy@infradead.org>
Cc: Paolo Abeni <pabeni@redhat.com>
Cc: Pekka Enberg <penberg@kernel.org>
Cc: Tejun Heo <tj@kernel.org>
Cc: Theodore Ts'o <tytso@mit.edu>
Cc: Trond Myklebust <trond.myklebust@hammerspace.com>
Cc: Vlastimil Babka <vbabka@suse.cz>
Cc: Will Deacon <will@kernel.org>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
parent ebdf9ad4
...@@ -134,13 +134,14 @@ static inline int get_pci_sbdf_id(struct pci_dev *pdev) ...@@ -134,13 +134,14 @@ static inline int get_pci_sbdf_id(struct pci_dev *pdev)
return PCI_SEG_DEVID_TO_SBDF(seg, devid); return PCI_SEG_DEVID_TO_SBDF(seg, devid);
} }
static inline void *alloc_pgtable_page(int nid, gfp_t gfp) static inline void *alloc_pgtable_page_noprof(int nid, gfp_t gfp)
{ {
struct page *page; struct page *page;
page = alloc_pages_node(nid, gfp | __GFP_ZERO, 0); page = alloc_pages_node_noprof(nid, gfp | __GFP_ZERO, 0);
return page ? page_address(page) : NULL; return page ? page_address(page) : NULL;
} }
#define alloc_pgtable_page(...) alloc_hooks(alloc_pgtable_page_noprof(__VA_ARGS__))
/* /*
* This must be called after device probe completes. During probe * This must be called after device probe completes. During probe
......
...@@ -46,10 +46,7 @@ static inline void nfs_add_stats(const struct inode *inode, ...@@ -46,10 +46,7 @@ static inline void nfs_add_stats(const struct inode *inode,
nfs_add_server_stats(NFS_SERVER(inode), stat, addend); nfs_add_server_stats(NFS_SERVER(inode), stat, addend);
} }
static inline struct nfs_iostats __percpu *nfs_alloc_iostats(void) #define nfs_alloc_iostats() alloc_percpu(struct nfs_iostats)
{
return alloc_percpu(struct nfs_iostats);
}
static inline void nfs_free_iostats(struct nfs_iostats __percpu *stats) static inline void nfs_free_iostats(struct nfs_iostats __percpu *stats)
{ {
......
...@@ -47,26 +47,19 @@ acpi_status acpi_os_terminate(void); ...@@ -47,26 +47,19 @@ acpi_status acpi_os_terminate(void);
* However, boot has (system_state != SYSTEM_RUNNING) * However, boot has (system_state != SYSTEM_RUNNING)
* to quiet __might_sleep() in kmalloc() and resume does not. * to quiet __might_sleep() in kmalloc() and resume does not.
*/ */
static inline void *acpi_os_allocate(acpi_size size) #define acpi_os_allocate(_size) \
{ kmalloc(_size, irqs_disabled() ? GFP_ATOMIC : GFP_KERNEL)
return kmalloc(size, irqs_disabled()? GFP_ATOMIC : GFP_KERNEL);
}
static inline void *acpi_os_allocate_zeroed(acpi_size size) #define acpi_os_allocate_zeroed(_size) \
{ kzalloc(_size, irqs_disabled() ? GFP_ATOMIC : GFP_KERNEL)
return kzalloc(size, irqs_disabled()? GFP_ATOMIC : GFP_KERNEL);
}
static inline void acpi_os_free(void *memory) static inline void acpi_os_free(void *memory)
{ {
kfree(memory); kfree(memory);
} }
static inline void *acpi_os_acquire_object(acpi_cache_t * cache) #define acpi_os_acquire_object(_cache) \
{ kmem_cache_zalloc(_cache, irqs_disabled() ? GFP_ATOMIC : GFP_KERNEL)
return kmem_cache_zalloc(cache,
irqs_disabled()? GFP_ATOMIC : GFP_KERNEL);
}
static inline acpi_thread_id acpi_os_get_thread_id(void) static inline acpi_thread_id acpi_os_get_thread_id(void)
{ {
......
...@@ -16,15 +16,16 @@ ...@@ -16,15 +16,16 @@
* *
* Return: pointer to the allocated memory or %NULL on error * Return: pointer to the allocated memory or %NULL on error
*/ */
static inline pte_t *__pte_alloc_one_kernel(struct mm_struct *mm) static inline pte_t *__pte_alloc_one_kernel_noprof(struct mm_struct *mm)
{ {
struct ptdesc *ptdesc = pagetable_alloc(GFP_PGTABLE_KERNEL & struct ptdesc *ptdesc = pagetable_alloc_noprof(GFP_PGTABLE_KERNEL &
~__GFP_HIGHMEM, 0); ~__GFP_HIGHMEM, 0);
if (!ptdesc) if (!ptdesc)
return NULL; return NULL;
return ptdesc_address(ptdesc); return ptdesc_address(ptdesc);
} }
#define __pte_alloc_one_kernel(...) alloc_hooks(__pte_alloc_one_kernel_noprof(__VA_ARGS__))
#ifndef __HAVE_ARCH_PTE_ALLOC_ONE_KERNEL #ifndef __HAVE_ARCH_PTE_ALLOC_ONE_KERNEL
/** /**
...@@ -33,10 +34,11 @@ static inline pte_t *__pte_alloc_one_kernel(struct mm_struct *mm) ...@@ -33,10 +34,11 @@ static inline pte_t *__pte_alloc_one_kernel(struct mm_struct *mm)
* *
* Return: pointer to the allocated memory or %NULL on error * Return: pointer to the allocated memory or %NULL on error
*/ */
static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm) static inline pte_t *pte_alloc_one_kernel_noprof(struct mm_struct *mm)
{ {
return __pte_alloc_one_kernel(mm); return __pte_alloc_one_kernel_noprof(mm);
} }
#define pte_alloc_one_kernel(...) alloc_hooks(pte_alloc_one_kernel_noprof(__VA_ARGS__))
#endif #endif
/** /**
...@@ -61,11 +63,11 @@ static inline void pte_free_kernel(struct mm_struct *mm, pte_t *pte) ...@@ -61,11 +63,11 @@ static inline void pte_free_kernel(struct mm_struct *mm, pte_t *pte)
* *
* Return: `struct page` referencing the ptdesc or %NULL on error * Return: `struct page` referencing the ptdesc or %NULL on error
*/ */
static inline pgtable_t __pte_alloc_one(struct mm_struct *mm, gfp_t gfp) static inline pgtable_t __pte_alloc_one_noprof(struct mm_struct *mm, gfp_t gfp)
{ {
struct ptdesc *ptdesc; struct ptdesc *ptdesc;
ptdesc = pagetable_alloc(gfp, 0); ptdesc = pagetable_alloc_noprof(gfp, 0);
if (!ptdesc) if (!ptdesc)
return NULL; return NULL;
if (!pagetable_pte_ctor(ptdesc)) { if (!pagetable_pte_ctor(ptdesc)) {
...@@ -75,6 +77,7 @@ static inline pgtable_t __pte_alloc_one(struct mm_struct *mm, gfp_t gfp) ...@@ -75,6 +77,7 @@ static inline pgtable_t __pte_alloc_one(struct mm_struct *mm, gfp_t gfp)
return ptdesc_page(ptdesc); return ptdesc_page(ptdesc);
} }
#define __pte_alloc_one(...) alloc_hooks(__pte_alloc_one_noprof(__VA_ARGS__))
#ifndef __HAVE_ARCH_PTE_ALLOC_ONE #ifndef __HAVE_ARCH_PTE_ALLOC_ONE
/** /**
...@@ -85,10 +88,11 @@ static inline pgtable_t __pte_alloc_one(struct mm_struct *mm, gfp_t gfp) ...@@ -85,10 +88,11 @@ static inline pgtable_t __pte_alloc_one(struct mm_struct *mm, gfp_t gfp)
* *
* Return: `struct page` referencing the ptdesc or %NULL on error * Return: `struct page` referencing the ptdesc or %NULL on error
*/ */
static inline pgtable_t pte_alloc_one(struct mm_struct *mm) static inline pgtable_t pte_alloc_one_noprof(struct mm_struct *mm)
{ {
return __pte_alloc_one(mm, GFP_PGTABLE_USER); return __pte_alloc_one_noprof(mm, GFP_PGTABLE_USER);
} }
#define pte_alloc_one(...) alloc_hooks(pte_alloc_one_noprof(__VA_ARGS__))
#endif #endif
/* /*
...@@ -124,14 +128,14 @@ static inline void pte_free(struct mm_struct *mm, struct page *pte_page) ...@@ -124,14 +128,14 @@ static inline void pte_free(struct mm_struct *mm, struct page *pte_page)
* *
* Return: pointer to the allocated memory or %NULL on error * Return: pointer to the allocated memory or %NULL on error
*/ */
static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long addr) static inline pmd_t *pmd_alloc_one_noprof(struct mm_struct *mm, unsigned long addr)
{ {
struct ptdesc *ptdesc; struct ptdesc *ptdesc;
gfp_t gfp = GFP_PGTABLE_USER; gfp_t gfp = GFP_PGTABLE_USER;
if (mm == &init_mm) if (mm == &init_mm)
gfp = GFP_PGTABLE_KERNEL; gfp = GFP_PGTABLE_KERNEL;
ptdesc = pagetable_alloc(gfp, 0); ptdesc = pagetable_alloc_noprof(gfp, 0);
if (!ptdesc) if (!ptdesc)
return NULL; return NULL;
if (!pagetable_pmd_ctor(ptdesc)) { if (!pagetable_pmd_ctor(ptdesc)) {
...@@ -140,6 +144,7 @@ static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long addr) ...@@ -140,6 +144,7 @@ static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long addr)
} }
return ptdesc_address(ptdesc); return ptdesc_address(ptdesc);
} }
#define pmd_alloc_one(...) alloc_hooks(pmd_alloc_one_noprof(__VA_ARGS__))
#endif #endif
#ifndef __HAVE_ARCH_PMD_FREE #ifndef __HAVE_ARCH_PMD_FREE
...@@ -157,7 +162,7 @@ static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd) ...@@ -157,7 +162,7 @@ static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd)
#if CONFIG_PGTABLE_LEVELS > 3 #if CONFIG_PGTABLE_LEVELS > 3
static inline pud_t *__pud_alloc_one(struct mm_struct *mm, unsigned long addr) static inline pud_t *__pud_alloc_one_noprof(struct mm_struct *mm, unsigned long addr)
{ {
gfp_t gfp = GFP_PGTABLE_USER; gfp_t gfp = GFP_PGTABLE_USER;
struct ptdesc *ptdesc; struct ptdesc *ptdesc;
...@@ -166,13 +171,14 @@ static inline pud_t *__pud_alloc_one(struct mm_struct *mm, unsigned long addr) ...@@ -166,13 +171,14 @@ static inline pud_t *__pud_alloc_one(struct mm_struct *mm, unsigned long addr)
gfp = GFP_PGTABLE_KERNEL; gfp = GFP_PGTABLE_KERNEL;
gfp &= ~__GFP_HIGHMEM; gfp &= ~__GFP_HIGHMEM;
ptdesc = pagetable_alloc(gfp, 0); ptdesc = pagetable_alloc_noprof(gfp, 0);
if (!ptdesc) if (!ptdesc)
return NULL; return NULL;
pagetable_pud_ctor(ptdesc); pagetable_pud_ctor(ptdesc);
return ptdesc_address(ptdesc); return ptdesc_address(ptdesc);
} }
#define __pud_alloc_one(...) alloc_hooks(__pud_alloc_one_noprof(__VA_ARGS__))
#ifndef __HAVE_ARCH_PUD_ALLOC_ONE #ifndef __HAVE_ARCH_PUD_ALLOC_ONE
/** /**
...@@ -184,10 +190,11 @@ static inline pud_t *__pud_alloc_one(struct mm_struct *mm, unsigned long addr) ...@@ -184,10 +190,11 @@ static inline pud_t *__pud_alloc_one(struct mm_struct *mm, unsigned long addr)
* *
* Return: pointer to the allocated memory or %NULL on error * Return: pointer to the allocated memory or %NULL on error
*/ */
static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long addr) static inline pud_t *pud_alloc_one_noprof(struct mm_struct *mm, unsigned long addr)
{ {
return __pud_alloc_one(mm, addr); return __pud_alloc_one_noprof(mm, addr);
} }
#define pud_alloc_one(...) alloc_hooks(pud_alloc_one_noprof(__VA_ARGS__))
#endif #endif
static inline void __pud_free(struct mm_struct *mm, pud_t *pud) static inline void __pud_free(struct mm_struct *mm, pud_t *pud)
......
...@@ -578,12 +578,12 @@ static inline void ahash_request_set_tfm(struct ahash_request *req, ...@@ -578,12 +578,12 @@ static inline void ahash_request_set_tfm(struct ahash_request *req,
* *
* Return: allocated request handle in case of success, or NULL if out of memory * Return: allocated request handle in case of success, or NULL if out of memory
*/ */
static inline struct ahash_request *ahash_request_alloc( static inline struct ahash_request *ahash_request_alloc_noprof(
struct crypto_ahash *tfm, gfp_t gfp) struct crypto_ahash *tfm, gfp_t gfp)
{ {
struct ahash_request *req; struct ahash_request *req;
req = kmalloc(sizeof(struct ahash_request) + req = kmalloc_noprof(sizeof(struct ahash_request) +
crypto_ahash_reqsize(tfm), gfp); crypto_ahash_reqsize(tfm), gfp);
if (likely(req)) if (likely(req))
...@@ -591,6 +591,7 @@ static inline struct ahash_request *ahash_request_alloc( ...@@ -591,6 +591,7 @@ static inline struct ahash_request *ahash_request_alloc(
return req; return req;
} }
#define ahash_request_alloc(...) alloc_hooks(ahash_request_alloc_noprof(__VA_ARGS__))
/** /**
* ahash_request_free() - zeroize and free the request data structure * ahash_request_free() - zeroize and free the request data structure
......
...@@ -69,15 +69,16 @@ static inline void acomp_request_complete(struct acomp_req *req, ...@@ -69,15 +69,16 @@ static inline void acomp_request_complete(struct acomp_req *req,
crypto_request_complete(&req->base, err); crypto_request_complete(&req->base, err);
} }
static inline struct acomp_req *__acomp_request_alloc(struct crypto_acomp *tfm) static inline struct acomp_req *__acomp_request_alloc_noprof(struct crypto_acomp *tfm)
{ {
struct acomp_req *req; struct acomp_req *req;
req = kzalloc(sizeof(*req) + crypto_acomp_reqsize(tfm), GFP_KERNEL); req = kzalloc_noprof(sizeof(*req) + crypto_acomp_reqsize(tfm), GFP_KERNEL);
if (likely(req)) if (likely(req))
acomp_request_set_tfm(req, tfm); acomp_request_set_tfm(req, tfm);
return req; return req;
} }
#define __acomp_request_alloc(...) alloc_hooks(__acomp_request_alloc_noprof(__VA_ARGS__))
static inline void __acomp_request_free(struct acomp_req *req) static inline void __acomp_request_free(struct acomp_req *req)
{ {
......
...@@ -861,12 +861,12 @@ static inline struct skcipher_request *skcipher_request_cast( ...@@ -861,12 +861,12 @@ static inline struct skcipher_request *skcipher_request_cast(
* *
* Return: allocated request handle in case of success, or NULL if out of memory * Return: allocated request handle in case of success, or NULL if out of memory
*/ */
static inline struct skcipher_request *skcipher_request_alloc( static inline struct skcipher_request *skcipher_request_alloc_noprof(
struct crypto_skcipher *tfm, gfp_t gfp) struct crypto_skcipher *tfm, gfp_t gfp)
{ {
struct skcipher_request *req; struct skcipher_request *req;
req = kmalloc(sizeof(struct skcipher_request) + req = kmalloc_noprof(sizeof(struct skcipher_request) +
crypto_skcipher_reqsize(tfm), gfp); crypto_skcipher_reqsize(tfm), gfp);
if (likely(req)) if (likely(req))
...@@ -874,6 +874,7 @@ static inline struct skcipher_request *skcipher_request_alloc( ...@@ -874,6 +874,7 @@ static inline struct skcipher_request *skcipher_request_alloc(
return req; return req;
} }
#define skcipher_request_alloc(...) alloc_hooks(skcipher_request_alloc_noprof(__VA_ARGS__))
/** /**
* skcipher_request_free() - zeroize and free request data structure * skcipher_request_free() - zeroize and free request data structure
......
...@@ -2244,31 +2244,14 @@ void *bpf_map_kvcalloc(struct bpf_map *map, size_t n, size_t size, ...@@ -2244,31 +2244,14 @@ void *bpf_map_kvcalloc(struct bpf_map *map, size_t n, size_t size,
void __percpu *bpf_map_alloc_percpu(const struct bpf_map *map, size_t size, void __percpu *bpf_map_alloc_percpu(const struct bpf_map *map, size_t size,
size_t align, gfp_t flags); size_t align, gfp_t flags);
#else #else
static inline void * #define bpf_map_kmalloc_node(_map, _size, _flags, _node) \
bpf_map_kmalloc_node(const struct bpf_map *map, size_t size, gfp_t flags, kmalloc_node(_size, _flags, _node)
int node) #define bpf_map_kzalloc(_map, _size, _flags) \
{ kzalloc(_size, _flags)
return kmalloc_node(size, flags, node); #define bpf_map_kvcalloc(_map, _n, _size, _flags) \
} kvcalloc(_n, _size, _flags)
#define bpf_map_alloc_percpu(_map, _size, _align, _flags) \
static inline void * __alloc_percpu_gfp(_size, _align, _flags)
bpf_map_kzalloc(const struct bpf_map *map, size_t size, gfp_t flags)
{
return kzalloc(size, flags);
}
static inline void *
bpf_map_kvcalloc(struct bpf_map *map, size_t n, size_t size, gfp_t flags)
{
return kvcalloc(n, size, flags);
}
static inline void __percpu *
bpf_map_alloc_percpu(const struct bpf_map *map, size_t size, size_t align,
gfp_t flags)
{
return __alloc_percpu_gfp(size, align, flags);
}
#endif #endif
static inline int static inline int
......
...@@ -65,9 +65,9 @@ static inline int copy_to_bpfptr_offset(bpfptr_t dst, size_t offset, ...@@ -65,9 +65,9 @@ static inline int copy_to_bpfptr_offset(bpfptr_t dst, size_t offset,
return copy_to_sockptr_offset((sockptr_t) dst, offset, src, size); return copy_to_sockptr_offset((sockptr_t) dst, offset, src, size);
} }
static inline void *kvmemdup_bpfptr(bpfptr_t src, size_t len) static inline void *kvmemdup_bpfptr_noprof(bpfptr_t src, size_t len)
{ {
void *p = kvmalloc(len, GFP_USER | __GFP_NOWARN); void *p = kvmalloc_noprof(len, GFP_USER | __GFP_NOWARN);
if (!p) if (!p)
return ERR_PTR(-ENOMEM); return ERR_PTR(-ENOMEM);
...@@ -77,6 +77,7 @@ static inline void *kvmemdup_bpfptr(bpfptr_t src, size_t len) ...@@ -77,6 +77,7 @@ static inline void *kvmemdup_bpfptr(bpfptr_t src, size_t len)
} }
return p; return p;
} }
#define kvmemdup_bpfptr(...) alloc_hooks(kvmemdup_bpfptr_noprof(__VA_ARGS__))
static inline long strncpy_from_bpfptr(char *dst, bpfptr_t src, size_t count) static inline long strncpy_from_bpfptr(char *dst, bpfptr_t src, size_t count)
{ {
......
...@@ -86,10 +86,8 @@ dma_fence_chain_contained(struct dma_fence *fence) ...@@ -86,10 +86,8 @@ dma_fence_chain_contained(struct dma_fence *fence)
* *
* Returns a new struct dma_fence_chain object or NULL on failure. * Returns a new struct dma_fence_chain object or NULL on failure.
*/ */
static inline struct dma_fence_chain *dma_fence_chain_alloc(void) #define dma_fence_chain_alloc() \
{ ((struct dma_fence_chain *)kmalloc(sizeof(struct dma_fence_chain), GFP_KERNEL))
return kmalloc(sizeof(struct dma_fence_chain), GFP_KERNEL);
};
/** /**
* dma_fence_chain_free * dma_fence_chain_free
......
...@@ -149,10 +149,8 @@ static inline int hid_bpf_connect_device(struct hid_device *hdev) { return 0; } ...@@ -149,10 +149,8 @@ static inline int hid_bpf_connect_device(struct hid_device *hdev) { return 0; }
static inline void hid_bpf_disconnect_device(struct hid_device *hdev) {} static inline void hid_bpf_disconnect_device(struct hid_device *hdev) {}
static inline void hid_bpf_destroy_device(struct hid_device *hid) {} static inline void hid_bpf_destroy_device(struct hid_device *hid) {}
static inline void hid_bpf_device_init(struct hid_device *hid) {} static inline void hid_bpf_device_init(struct hid_device *hid) {}
static inline u8 *call_hid_bpf_rdesc_fixup(struct hid_device *hdev, u8 *rdesc, unsigned int *size) #define call_hid_bpf_rdesc_fixup(_hdev, _rdesc, _size) \
{ ((u8 *)kmemdup(_rdesc, *(_size), GFP_KERNEL))
return kmemdup(rdesc, *size, GFP_KERNEL);
}
#endif /* CONFIG_HID_BPF */ #endif /* CONFIG_HID_BPF */
......
...@@ -1586,10 +1586,8 @@ void jbd2_journal_put_journal_head(struct journal_head *jh); ...@@ -1586,10 +1586,8 @@ void jbd2_journal_put_journal_head(struct journal_head *jh);
*/ */
extern struct kmem_cache *jbd2_handle_cache; extern struct kmem_cache *jbd2_handle_cache;
static inline handle_t *jbd2_alloc_handle(gfp_t gfp_flags) #define jbd2_alloc_handle(_gfp_flags) \
{ ((handle_t *)kmem_cache_zalloc(jbd2_handle_cache, _gfp_flags))
return kmem_cache_zalloc(jbd2_handle_cache, gfp_flags);
}
static inline void jbd2_free_handle(handle_t *handle) static inline void jbd2_free_handle(handle_t *handle)
{ {
...@@ -1602,10 +1600,8 @@ static inline void jbd2_free_handle(handle_t *handle) ...@@ -1602,10 +1600,8 @@ static inline void jbd2_free_handle(handle_t *handle)
*/ */
extern struct kmem_cache *jbd2_inode_cache; extern struct kmem_cache *jbd2_inode_cache;
static inline struct jbd2_inode *jbd2_alloc_inode(gfp_t gfp_flags) #define jbd2_alloc_inode(_gfp_flags) \
{ ((struct jbd2_inode *)kmem_cache_alloc(jbd2_inode_cache, _gfp_flags))
return kmem_cache_alloc(jbd2_inode_cache, gfp_flags);
}
static inline void jbd2_free_inode(struct jbd2_inode *jinode) static inline void jbd2_free_inode(struct jbd2_inode *jinode)
{ {
......
...@@ -2860,12 +2860,13 @@ static inline bool pagetable_is_reserved(struct ptdesc *pt) ...@@ -2860,12 +2860,13 @@ static inline bool pagetable_is_reserved(struct ptdesc *pt)
* *
* Return: The ptdesc describing the allocated page tables. * Return: The ptdesc describing the allocated page tables.
*/ */
static inline struct ptdesc *pagetable_alloc(gfp_t gfp, unsigned int order) static inline struct ptdesc *pagetable_alloc_noprof(gfp_t gfp, unsigned int order)
{ {
struct page *page = alloc_pages(gfp | __GFP_COMP, order); struct page *page = alloc_pages_noprof(gfp | __GFP_COMP, order);
return page_ptdesc(page); return page_ptdesc(page);
} }
#define pagetable_alloc(...) alloc_hooks(pagetable_alloc_noprof(__VA_ARGS__))
/** /**
* pagetable_free - Free pagetables * pagetable_free - Free pagetables
......
...@@ -1170,14 +1170,15 @@ static inline void mm_init_cid(struct mm_struct *mm) ...@@ -1170,14 +1170,15 @@ static inline void mm_init_cid(struct mm_struct *mm)
cpumask_clear(mm_cidmask(mm)); cpumask_clear(mm_cidmask(mm));
} }
static inline int mm_alloc_cid(struct mm_struct *mm) static inline int mm_alloc_cid_noprof(struct mm_struct *mm)
{ {
mm->pcpu_cid = alloc_percpu(struct mm_cid); mm->pcpu_cid = alloc_percpu_noprof(struct mm_cid);
if (!mm->pcpu_cid) if (!mm->pcpu_cid)
return -ENOMEM; return -ENOMEM;
mm_init_cid(mm); mm_init_cid(mm);
return 0; return 0;
} }
#define mm_alloc_cid(...) alloc_hooks(mm_alloc_cid_noprof(__VA_ARGS__))
static inline void mm_destroy_cid(struct mm_struct *mm) static inline void mm_destroy_cid(struct mm_struct *mm)
{ {
......
...@@ -151,6 +151,9 @@ extern size_t pcpu_alloc_size(void __percpu *__pdata); ...@@ -151,6 +151,9 @@ extern size_t pcpu_alloc_size(void __percpu *__pdata);
#define alloc_percpu(type) \ #define alloc_percpu(type) \
(typeof(type) __percpu *)__alloc_percpu(sizeof(type), \ (typeof(type) __percpu *)__alloc_percpu(sizeof(type), \
__alignof__(type)) __alignof__(type))
#define alloc_percpu_noprof(type) \
((typeof(type) __percpu *)pcpu_alloc_noprof(sizeof(type), \
__alignof__(type), false, GFP_KERNEL))
extern void free_percpu(void __percpu *__pdata); extern void free_percpu(void __percpu *__pdata);
......
...@@ -464,11 +464,11 @@ static inline int ptr_ring_consume_batched_bh(struct ptr_ring *r, ...@@ -464,11 +464,11 @@ static inline int ptr_ring_consume_batched_bh(struct ptr_ring *r,
/* Not all gfp_t flags (besides GFP_KERNEL) are allowed. See /* Not all gfp_t flags (besides GFP_KERNEL) are allowed. See
* documentation for vmalloc for which of them are legal. * documentation for vmalloc for which of them are legal.
*/ */
static inline void **__ptr_ring_init_queue_alloc(unsigned int size, gfp_t gfp) static inline void **__ptr_ring_init_queue_alloc_noprof(unsigned int size, gfp_t gfp)
{ {
if (size > KMALLOC_MAX_SIZE / sizeof(void *)) if (size > KMALLOC_MAX_SIZE / sizeof(void *))
return NULL; return NULL;
return kvmalloc_array(size, sizeof(void *), gfp | __GFP_ZERO); return kvmalloc_array_noprof(size, sizeof(void *), gfp | __GFP_ZERO);
} }
static inline void __ptr_ring_set_size(struct ptr_ring *r, int size) static inline void __ptr_ring_set_size(struct ptr_ring *r, int size)
...@@ -484,9 +484,9 @@ static inline void __ptr_ring_set_size(struct ptr_ring *r, int size) ...@@ -484,9 +484,9 @@ static inline void __ptr_ring_set_size(struct ptr_ring *r, int size)
r->batch = 1; r->batch = 1;
} }
static inline int ptr_ring_init(struct ptr_ring *r, int size, gfp_t gfp) static inline int ptr_ring_init_noprof(struct ptr_ring *r, int size, gfp_t gfp)
{ {
r->queue = __ptr_ring_init_queue_alloc(size, gfp); r->queue = __ptr_ring_init_queue_alloc_noprof(size, gfp);
if (!r->queue) if (!r->queue)
return -ENOMEM; return -ENOMEM;
...@@ -497,6 +497,7 @@ static inline int ptr_ring_init(struct ptr_ring *r, int size, gfp_t gfp) ...@@ -497,6 +497,7 @@ static inline int ptr_ring_init(struct ptr_ring *r, int size, gfp_t gfp)
return 0; return 0;
} }
#define ptr_ring_init(...) alloc_hooks(ptr_ring_init_noprof(__VA_ARGS__))
/* /*
* Return entries into ring. Destroy entries that don't fit. * Return entries into ring. Destroy entries that don't fit.
...@@ -587,11 +588,11 @@ static inline void **__ptr_ring_swap_queue(struct ptr_ring *r, void **queue, ...@@ -587,11 +588,11 @@ static inline void **__ptr_ring_swap_queue(struct ptr_ring *r, void **queue,
* In particular if you consume ring in interrupt or BH context, you must * In particular if you consume ring in interrupt or BH context, you must
* disable interrupts/BH when doing so. * disable interrupts/BH when doing so.
*/ */
static inline int ptr_ring_resize(struct ptr_ring *r, int size, gfp_t gfp, static inline int ptr_ring_resize_noprof(struct ptr_ring *r, int size, gfp_t gfp,
void (*destroy)(void *)) void (*destroy)(void *))
{ {
unsigned long flags; unsigned long flags;
void **queue = __ptr_ring_init_queue_alloc(size, gfp); void **queue = __ptr_ring_init_queue_alloc_noprof(size, gfp);
void **old; void **old;
if (!queue) if (!queue)
...@@ -609,6 +610,7 @@ static inline int ptr_ring_resize(struct ptr_ring *r, int size, gfp_t gfp, ...@@ -609,6 +610,7 @@ static inline int ptr_ring_resize(struct ptr_ring *r, int size, gfp_t gfp,
return 0; return 0;
} }
#define ptr_ring_resize(...) alloc_hooks(ptr_ring_resize_noprof(__VA_ARGS__))
/* /*
* Note: producer lock is nested within consumer lock, so if you * Note: producer lock is nested within consumer lock, so if you
...@@ -616,7 +618,7 @@ static inline int ptr_ring_resize(struct ptr_ring *r, int size, gfp_t gfp, ...@@ -616,7 +618,7 @@ static inline int ptr_ring_resize(struct ptr_ring *r, int size, gfp_t gfp,
* In particular if you consume ring in interrupt or BH context, you must * In particular if you consume ring in interrupt or BH context, you must
* disable interrupts/BH when doing so. * disable interrupts/BH when doing so.
*/ */
static inline int ptr_ring_resize_multiple(struct ptr_ring **rings, static inline int ptr_ring_resize_multiple_noprof(struct ptr_ring **rings,
unsigned int nrings, unsigned int nrings,
int size, int size,
gfp_t gfp, void (*destroy)(void *)) gfp_t gfp, void (*destroy)(void *))
...@@ -625,12 +627,12 @@ static inline int ptr_ring_resize_multiple(struct ptr_ring **rings, ...@@ -625,12 +627,12 @@ static inline int ptr_ring_resize_multiple(struct ptr_ring **rings,
void ***queues; void ***queues;
int i; int i;
queues = kmalloc_array(nrings, sizeof(*queues), gfp); queues = kmalloc_array_noprof(nrings, sizeof(*queues), gfp);
if (!queues) if (!queues)
goto noqueues; goto noqueues;
for (i = 0; i < nrings; ++i) { for (i = 0; i < nrings; ++i) {
queues[i] = __ptr_ring_init_queue_alloc(size, gfp); queues[i] = __ptr_ring_init_queue_alloc_noprof(size, gfp);
if (!queues[i]) if (!queues[i])
goto nomem; goto nomem;
} }
...@@ -660,6 +662,8 @@ static inline int ptr_ring_resize_multiple(struct ptr_ring **rings, ...@@ -660,6 +662,8 @@ static inline int ptr_ring_resize_multiple(struct ptr_ring **rings,
noqueues: noqueues:
return -ENOMEM; return -ENOMEM;
} }
#define ptr_ring_resize_multiple(...) \
alloc_hooks(ptr_ring_resize_multiple_noprof(__VA_ARGS__))
static inline void ptr_ring_cleanup(struct ptr_ring *r, void (*destroy)(void *)) static inline void ptr_ring_cleanup(struct ptr_ring *r, void (*destroy)(void *))
{ {
......
...@@ -177,10 +177,11 @@ static inline int skb_array_peek_len_any(struct skb_array *a) ...@@ -177,10 +177,11 @@ static inline int skb_array_peek_len_any(struct skb_array *a)
return PTR_RING_PEEK_CALL_ANY(&a->ring, __skb_array_len_with_tag); return PTR_RING_PEEK_CALL_ANY(&a->ring, __skb_array_len_with_tag);
} }
static inline int skb_array_init(struct skb_array *a, int size, gfp_t gfp) static inline int skb_array_init_noprof(struct skb_array *a, int size, gfp_t gfp)
{ {
return ptr_ring_init(&a->ring, size, gfp); return ptr_ring_init_noprof(&a->ring, size, gfp);
} }
#define skb_array_init(...) alloc_hooks(skb_array_init_noprof(__VA_ARGS__))
static void __skb_array_destroy_skb(void *ptr) static void __skb_array_destroy_skb(void *ptr)
{ {
...@@ -198,15 +199,17 @@ static inline int skb_array_resize(struct skb_array *a, int size, gfp_t gfp) ...@@ -198,15 +199,17 @@ static inline int skb_array_resize(struct skb_array *a, int size, gfp_t gfp)
return ptr_ring_resize(&a->ring, size, gfp, __skb_array_destroy_skb); return ptr_ring_resize(&a->ring, size, gfp, __skb_array_destroy_skb);
} }
static inline int skb_array_resize_multiple(struct skb_array **rings, static inline int skb_array_resize_multiple_noprof(struct skb_array **rings,
int nrings, unsigned int size, int nrings, unsigned int size,
gfp_t gfp) gfp_t gfp)
{ {
BUILD_BUG_ON(offsetof(struct skb_array, ring)); BUILD_BUG_ON(offsetof(struct skb_array, ring));
return ptr_ring_resize_multiple((struct ptr_ring **)rings, return ptr_ring_resize_multiple_noprof((struct ptr_ring **)rings,
nrings, size, gfp, nrings, size, gfp,
__skb_array_destroy_skb); __skb_array_destroy_skb);
} }
#define skb_array_resize_multiple(...) \
alloc_hooks(skb_array_resize_multiple_noprof(__VA_ARGS__))
static inline void skb_array_cleanup(struct skb_array *a) static inline void skb_array_cleanup(struct skb_array *a)
{ {
......
...@@ -3371,7 +3371,7 @@ void __napi_kfree_skb(struct sk_buff *skb, enum skb_drop_reason reason); ...@@ -3371,7 +3371,7 @@ void __napi_kfree_skb(struct sk_buff *skb, enum skb_drop_reason reason);
* *
* %NULL is returned if there is no free memory. * %NULL is returned if there is no free memory.
*/ */
static inline struct page *__dev_alloc_pages(gfp_t gfp_mask, static inline struct page *__dev_alloc_pages_noprof(gfp_t gfp_mask,
unsigned int order) unsigned int order)
{ {
/* This piece of code contains several assumptions. /* This piece of code contains several assumptions.
...@@ -3384,13 +3384,11 @@ static inline struct page *__dev_alloc_pages(gfp_t gfp_mask, ...@@ -3384,13 +3384,11 @@ static inline struct page *__dev_alloc_pages(gfp_t gfp_mask,
*/ */
gfp_mask |= __GFP_COMP | __GFP_MEMALLOC; gfp_mask |= __GFP_COMP | __GFP_MEMALLOC;
return alloc_pages_node(NUMA_NO_NODE, gfp_mask, order); return alloc_pages_node_noprof(NUMA_NO_NODE, gfp_mask, order);
} }
#define __dev_alloc_pages(...) alloc_hooks(__dev_alloc_pages_noprof(__VA_ARGS__))
static inline struct page *dev_alloc_pages(unsigned int order) #define dev_alloc_pages(_order) __dev_alloc_pages(GFP_ATOMIC | __GFP_NOWARN, _order)
{
return __dev_alloc_pages(GFP_ATOMIC | __GFP_NOWARN, order);
}
/** /**
* __dev_alloc_page - allocate a page for network Rx * __dev_alloc_page - allocate a page for network Rx
...@@ -3400,15 +3398,13 @@ static inline struct page *dev_alloc_pages(unsigned int order) ...@@ -3400,15 +3398,13 @@ static inline struct page *dev_alloc_pages(unsigned int order)
* *
* %NULL is returned if there is no free memory. * %NULL is returned if there is no free memory.
*/ */
static inline struct page *__dev_alloc_page(gfp_t gfp_mask) static inline struct page *__dev_alloc_page_noprof(gfp_t gfp_mask)
{ {
return __dev_alloc_pages(gfp_mask, 0); return __dev_alloc_pages_noprof(gfp_mask, 0);
} }
#define __dev_alloc_page(...) alloc_hooks(__dev_alloc_page_noprof(__VA_ARGS__))
static inline struct page *dev_alloc_page(void) #define dev_alloc_page() dev_alloc_pages(0)
{
return dev_alloc_pages(0);
}
/** /**
* dev_page_is_reusable - check whether a page can be reused for network Rx * dev_page_is_reusable - check whether a page can be reused for network Rx
......
...@@ -410,11 +410,9 @@ void sk_psock_stop_verdict(struct sock *sk, struct sk_psock *psock); ...@@ -410,11 +410,9 @@ void sk_psock_stop_verdict(struct sock *sk, struct sk_psock *psock);
int sk_psock_msg_verdict(struct sock *sk, struct sk_psock *psock, int sk_psock_msg_verdict(struct sock *sk, struct sk_psock *psock,
struct sk_msg *msg); struct sk_msg *msg);
static inline struct sk_psock_link *sk_psock_init_link(void) #define sk_psock_init_link() \
{ ((struct sk_psock_link *)kzalloc(sizeof(struct sk_psock_link), \
return kzalloc(sizeof(struct sk_psock_link), GFP_ATOMIC | __GFP_NOWARN))
GFP_ATOMIC | __GFP_NOWARN);
}
static inline void sk_psock_free_link(struct sk_psock_link *link) static inline void sk_psock_free_link(struct sk_psock_link *link)
{ {
......
...@@ -744,6 +744,9 @@ void *kmalloc_node_track_caller_noprof(size_t size, gfp_t flags, int node, ...@@ -744,6 +744,9 @@ void *kmalloc_node_track_caller_noprof(size_t size, gfp_t flags, int node,
*/ */
#define kmalloc_track_caller(...) kmalloc_node_track_caller(__VA_ARGS__, NUMA_NO_NODE) #define kmalloc_track_caller(...) kmalloc_node_track_caller(__VA_ARGS__, NUMA_NO_NODE)
#define kmalloc_track_caller_noprof(...) \
kmalloc_node_track_caller_noprof(__VA_ARGS__, NUMA_NO_NODE, _RET_IP_)
static inline __alloc_size(1, 2) void *kmalloc_array_node_noprof(size_t n, size_t size, gfp_t flags, static inline __alloc_size(1, 2) void *kmalloc_array_node_noprof(size_t n, size_t size, gfp_t flags,
int node) int node)
{ {
...@@ -781,6 +784,7 @@ extern void *kvmalloc_node_noprof(size_t size, gfp_t flags, int node) __alloc_si ...@@ -781,6 +784,7 @@ extern void *kvmalloc_node_noprof(size_t size, gfp_t flags, int node) __alloc_si
#define kvmalloc_node(...) alloc_hooks(kvmalloc_node_noprof(__VA_ARGS__)) #define kvmalloc_node(...) alloc_hooks(kvmalloc_node_noprof(__VA_ARGS__))
#define kvmalloc(_size, _flags) kvmalloc_node(_size, _flags, NUMA_NO_NODE) #define kvmalloc(_size, _flags) kvmalloc_node(_size, _flags, NUMA_NO_NODE)
#define kvmalloc_noprof(_size, _flags) kvmalloc_node_noprof(_size, _flags, NUMA_NO_NODE)
#define kvzalloc(_size, _flags) kvmalloc(_size, _flags|__GFP_ZERO) #define kvzalloc(_size, _flags) kvmalloc(_size, _flags|__GFP_ZERO)
#define kvzalloc_node(_size, _flags, _node) kvmalloc_node(_size, _flags|__GFP_ZERO, _node) #define kvzalloc_node(_size, _flags, _node) kvmalloc_node(_size, _flags|__GFP_ZERO, _node)
...@@ -797,6 +801,7 @@ static inline __alloc_size(1, 2) void *kvmalloc_array_noprof(size_t n, size_t si ...@@ -797,6 +801,7 @@ static inline __alloc_size(1, 2) void *kvmalloc_array_noprof(size_t n, size_t si
#define kvmalloc_array(...) alloc_hooks(kvmalloc_array_noprof(__VA_ARGS__)) #define kvmalloc_array(...) alloc_hooks(kvmalloc_array_noprof(__VA_ARGS__))
#define kvcalloc(_n, _size, _flags) kvmalloc_array(_n, _size, _flags|__GFP_ZERO) #define kvcalloc(_n, _size, _flags) kvmalloc_array(_n, _size, _flags|__GFP_ZERO)
#define kvcalloc_noprof(_n, _size, _flags) kvmalloc_array_noprof(_n, _size, _flags|__GFP_ZERO)
extern void *kvrealloc_noprof(const void *p, size_t oldsize, size_t newsize, gfp_t flags) extern void *kvrealloc_noprof(const void *p, size_t oldsize, size_t newsize, gfp_t flags)
__realloc_size(3); __realloc_size(3);
......
...@@ -117,9 +117,9 @@ static inline int copy_to_sockptr(sockptr_t dst, const void *src, size_t size) ...@@ -117,9 +117,9 @@ static inline int copy_to_sockptr(sockptr_t dst, const void *src, size_t size)
return copy_to_sockptr_offset(dst, 0, src, size); return copy_to_sockptr_offset(dst, 0, src, size);
} }
static inline void *memdup_sockptr(sockptr_t src, size_t len) static inline void *memdup_sockptr_noprof(sockptr_t src, size_t len)
{ {
void *p = kmalloc_track_caller(len, GFP_USER | __GFP_NOWARN); void *p = kmalloc_track_caller_noprof(len, GFP_USER | __GFP_NOWARN);
if (!p) if (!p)
return ERR_PTR(-ENOMEM); return ERR_PTR(-ENOMEM);
...@@ -129,10 +129,11 @@ static inline void *memdup_sockptr(sockptr_t src, size_t len) ...@@ -129,10 +129,11 @@ static inline void *memdup_sockptr(sockptr_t src, size_t len)
} }
return p; return p;
} }
#define memdup_sockptr(...) alloc_hooks(memdup_sockptr_noprof(__VA_ARGS__))
static inline void *memdup_sockptr_nul(sockptr_t src, size_t len) static inline void *memdup_sockptr_nul_noprof(sockptr_t src, size_t len)
{ {
char *p = kmalloc_track_caller(len + 1, GFP_KERNEL); char *p = kmalloc_track_caller_noprof(len + 1, GFP_KERNEL);
if (!p) if (!p)
return ERR_PTR(-ENOMEM); return ERR_PTR(-ENOMEM);
...@@ -143,6 +144,7 @@ static inline void *memdup_sockptr_nul(sockptr_t src, size_t len) ...@@ -143,6 +144,7 @@ static inline void *memdup_sockptr_nul(sockptr_t src, size_t len)
p[len] = '\0'; p[len] = '\0';
return p; return p;
} }
#define memdup_sockptr_nul(...) alloc_hooks(memdup_sockptr_nul_noprof(__VA_ARGS__))
static inline long strncpy_from_sockptr(char *dst, sockptr_t src, size_t count) static inline long strncpy_from_sockptr(char *dst, sockptr_t src, size_t count)
{ {
......
...@@ -274,15 +274,17 @@ struct netlbl_calipso_ops { ...@@ -274,15 +274,17 @@ struct netlbl_calipso_ops {
* on success, NULL on failure. * on success, NULL on failure.
* *
*/ */
static inline struct netlbl_lsm_cache *netlbl_secattr_cache_alloc(gfp_t flags) static inline struct netlbl_lsm_cache *netlbl_secattr_cache_alloc_noprof(gfp_t flags)
{ {
struct netlbl_lsm_cache *cache; struct netlbl_lsm_cache *cache;
cache = kzalloc(sizeof(*cache), flags); cache = kzalloc_noprof(sizeof(*cache), flags);
if (cache) if (cache)
refcount_set(&cache->refcount, 1); refcount_set(&cache->refcount, 1);
return cache; return cache;
} }
#define netlbl_secattr_cache_alloc(...) \
alloc_hooks(netlbl_secattr_cache_alloc_noprof(__VA_ARGS__))
/** /**
* netlbl_secattr_cache_free - Frees a netlbl_lsm_cache struct * netlbl_secattr_cache_free - Frees a netlbl_lsm_cache struct
...@@ -311,10 +313,11 @@ static inline void netlbl_secattr_cache_free(struct netlbl_lsm_cache *cache) ...@@ -311,10 +313,11 @@ static inline void netlbl_secattr_cache_free(struct netlbl_lsm_cache *cache)
* on failure. * on failure.
* *
*/ */
static inline struct netlbl_lsm_catmap *netlbl_catmap_alloc(gfp_t flags) static inline struct netlbl_lsm_catmap *netlbl_catmap_alloc_noprof(gfp_t flags)
{ {
return kzalloc(sizeof(struct netlbl_lsm_catmap), flags); return kzalloc_noprof(sizeof(struct netlbl_lsm_catmap), flags);
} }
#define netlbl_catmap_alloc(...) alloc_hooks(netlbl_catmap_alloc_noprof(__VA_ARGS__))
/** /**
* netlbl_catmap_free - Free a LSM secattr catmap * netlbl_catmap_free - Free a LSM secattr catmap
...@@ -376,10 +379,11 @@ static inline void netlbl_secattr_destroy(struct netlbl_lsm_secattr *secattr) ...@@ -376,10 +379,11 @@ static inline void netlbl_secattr_destroy(struct netlbl_lsm_secattr *secattr)
* pointer on success, or NULL on failure. * pointer on success, or NULL on failure.
* *
*/ */
static inline struct netlbl_lsm_secattr *netlbl_secattr_alloc(gfp_t flags) static inline struct netlbl_lsm_secattr *netlbl_secattr_alloc_noprof(gfp_t flags)
{ {
return kzalloc(sizeof(struct netlbl_lsm_secattr), flags); return kzalloc_noprof(sizeof(struct netlbl_lsm_secattr), flags);
} }
#define netlbl_secattr_alloc(...) alloc_hooks(netlbl_secattr_alloc_noprof(__VA_ARGS__))
/** /**
* netlbl_secattr_free - Frees a netlbl_lsm_secattr struct * netlbl_secattr_free - Frees a netlbl_lsm_secattr struct
......
...@@ -1891,10 +1891,11 @@ static inline struct nla_bitfield32 nla_get_bitfield32(const struct nlattr *nla) ...@@ -1891,10 +1891,11 @@ static inline struct nla_bitfield32 nla_get_bitfield32(const struct nlattr *nla)
* @src: netlink attribute to duplicate from * @src: netlink attribute to duplicate from
* @gfp: GFP mask * @gfp: GFP mask
*/ */
static inline void *nla_memdup(const struct nlattr *src, gfp_t gfp) static inline void *nla_memdup_noprof(const struct nlattr *src, gfp_t gfp)
{ {
return kmemdup(nla_data(src), nla_len(src), gfp); return kmemdup_noprof(nla_data(src), nla_len(src), gfp);
} }
#define nla_memdup(...) alloc_hooks(nla_memdup_noprof(__VA_ARGS__))
/** /**
* nla_nest_start_noflag - Start a new level of nested attributes * nla_nest_start_noflag - Start a new level of nested attributes
......
...@@ -127,12 +127,12 @@ static inline struct sock *skb_steal_sock(struct sk_buff *skb, ...@@ -127,12 +127,12 @@ static inline struct sock *skb_steal_sock(struct sk_buff *skb,
} }
static inline struct request_sock * static inline struct request_sock *
reqsk_alloc(const struct request_sock_ops *ops, struct sock *sk_listener, reqsk_alloc_noprof(const struct request_sock_ops *ops, struct sock *sk_listener,
bool attach_listener) bool attach_listener)
{ {
struct request_sock *req; struct request_sock *req;
req = kmem_cache_alloc(ops->slab, GFP_ATOMIC | __GFP_NOWARN); req = kmem_cache_alloc_noprof(ops->slab, GFP_ATOMIC | __GFP_NOWARN);
if (!req) if (!req)
return NULL; return NULL;
req->rsk_listener = NULL; req->rsk_listener = NULL;
...@@ -157,6 +157,7 @@ reqsk_alloc(const struct request_sock_ops *ops, struct sock *sk_listener, ...@@ -157,6 +157,7 @@ reqsk_alloc(const struct request_sock_ops *ops, struct sock *sk_listener,
return req; return req;
} }
#define reqsk_alloc(...) alloc_hooks(reqsk_alloc_noprof(__VA_ARGS__))
static inline void __reqsk_free(struct request_sock *req) static inline void __reqsk_free(struct request_sock *req)
{ {
......
...@@ -75,9 +75,9 @@ tcx_entry_fetch(struct net_device *dev, bool ingress) ...@@ -75,9 +75,9 @@ tcx_entry_fetch(struct net_device *dev, bool ingress)
return rcu_dereference_rtnl(dev->tcx_egress); return rcu_dereference_rtnl(dev->tcx_egress);
} }
static inline struct bpf_mprog_entry *tcx_entry_create(void) static inline struct bpf_mprog_entry *tcx_entry_create_noprof(void)
{ {
struct tcx_entry *tcx = kzalloc(sizeof(*tcx), GFP_KERNEL); struct tcx_entry *tcx = kzalloc_noprof(sizeof(*tcx), GFP_KERNEL);
if (tcx) { if (tcx) {
bpf_mprog_bundle_init(&tcx->bundle); bpf_mprog_bundle_init(&tcx->bundle);
...@@ -85,6 +85,7 @@ static inline struct bpf_mprog_entry *tcx_entry_create(void) ...@@ -85,6 +85,7 @@ static inline struct bpf_mprog_entry *tcx_entry_create(void)
} }
return NULL; return NULL;
} }
#define tcx_entry_create(...) alloc_hooks(tcx_entry_create_noprof(__VA_ARGS__))
static inline void tcx_entry_free(struct bpf_mprog_entry *entry) static inline void tcx_entry_free(struct bpf_mprog_entry *entry)
{ {
......
...@@ -23,7 +23,7 @@ simple_get_bytes(const void *p, const void *end, void *res, size_t len) ...@@ -23,7 +23,7 @@ simple_get_bytes(const void *p, const void *end, void *res, size_t len)
} }
static inline const void * static inline const void *
simple_get_netobj(const void *p, const void *end, struct xdr_netobj *dest) simple_get_netobj_noprof(const void *p, const void *end, struct xdr_netobj *dest)
{ {
const void *q; const void *q;
unsigned int len; unsigned int len;
...@@ -35,7 +35,7 @@ simple_get_netobj(const void *p, const void *end, struct xdr_netobj *dest) ...@@ -35,7 +35,7 @@ simple_get_netobj(const void *p, const void *end, struct xdr_netobj *dest)
if (unlikely(q > end || q < p)) if (unlikely(q > end || q < p))
return ERR_PTR(-EFAULT); return ERR_PTR(-EFAULT);
if (len) { if (len) {
dest->data = kmemdup(p, len, GFP_KERNEL); dest->data = kmemdup_noprof(p, len, GFP_KERNEL);
if (unlikely(dest->data == NULL)) if (unlikely(dest->data == NULL))
return ERR_PTR(-ENOMEM); return ERR_PTR(-ENOMEM);
} else } else
...@@ -43,3 +43,5 @@ simple_get_netobj(const void *p, const void *end, struct xdr_netobj *dest) ...@@ -43,3 +43,5 @@ simple_get_netobj(const void *p, const void *end, struct xdr_netobj *dest)
dest->len = len; dest->len = len;
return q; return q;
} }
#define simple_get_netobj(...) alloc_hooks(simple_get_netobj_noprof(__VA_ARGS__))
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment