Commit 2b905948 authored by Christoph Hellwig's avatar Christoph Hellwig Committed by Linus Torvalds

mm: remove __vmalloc_node_flags_caller

Just use __vmalloc_node instead which gets and extra argument.  To be able
to to use __vmalloc_node in all caller make it available outside of
vmalloc and implement it in nommu.c.

[akpm@linux-foundation.org: fix nommu build]
Signed-off-by: default avatarChristoph Hellwig <hch@lst.de>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Acked-by: default avatarPeter Zijlstra (Intel) <peterz@infradead.org>
Cc: Christian Borntraeger <borntraeger@de.ibm.com>
Cc: Christophe Leroy <christophe.leroy@c-s.fr>
Cc: Daniel Vetter <daniel.vetter@ffwll.ch>
Cc: David Airlie <airlied@linux.ie>
Cc: Gao Xiang <xiang@kernel.org>
Cc: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
Cc: Haiyang Zhang <haiyangz@microsoft.com>
Cc: Johannes Weiner <hannes@cmpxchg.org>
Cc: "K. Y. Srinivasan" <kys@microsoft.com>
Cc: Laura Abbott <labbott@redhat.com>
Cc: Mark Rutland <mark.rutland@arm.com>
Cc: Michael Kelley <mikelley@microsoft.com>
Cc: Minchan Kim <minchan@kernel.org>
Cc: Nitin Gupta <ngupta@vflare.org>
Cc: Robin Murphy <robin.murphy@arm.com>
Cc: Sakari Ailus <sakari.ailus@linux.intel.com>
Cc: Stephen Hemminger <sthemmin@microsoft.com>
Cc: Sumit Semwal <sumit.semwal@linaro.org>
Cc: Wei Liu <wei.liu@kernel.org>
Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org>
Cc: Catalin Marinas <catalin.marinas@arm.com>
Cc: Heiko Carstens <heiko.carstens@de.ibm.com>
Cc: Paul Mackerras <paulus@ozlabs.org>
Cc: Vasily Gorbik <gor@linux.ibm.com>
Cc: Will Deacon <will@kernel.org>
Cc: Stephen Rothwell <sfr@canb.auug.org.au>
Link: http://lkml.kernel.org/r/20200414131348.444715-25-hch@lst.deSigned-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent 4d39d728
...@@ -115,8 +115,8 @@ extern void *__vmalloc_node_range(unsigned long size, unsigned long align, ...@@ -115,8 +115,8 @@ extern void *__vmalloc_node_range(unsigned long size, unsigned long align,
unsigned long start, unsigned long end, gfp_t gfp_mask, unsigned long start, unsigned long end, gfp_t gfp_mask,
pgprot_t prot, unsigned long vm_flags, int node, pgprot_t prot, unsigned long vm_flags, int node,
const void *caller); const void *caller);
extern void *__vmalloc_node_flags_caller(unsigned long size, void *__vmalloc_node(unsigned long size, unsigned long align, gfp_t gfp_mask,
int node, gfp_t flags, void *caller); int node, const void *caller);
extern void vfree(const void *addr); extern void vfree(const void *addr);
extern void vfree_atomic(const void *addr); extern void vfree_atomic(const void *addr);
......
...@@ -299,9 +299,8 @@ static void *__bpf_map_area_alloc(u64 size, int numa_node, bool mmapable) ...@@ -299,9 +299,8 @@ static void *__bpf_map_area_alloc(u64 size, int numa_node, bool mmapable)
return vmalloc_user_node_flags(size, numa_node, GFP_KERNEL | return vmalloc_user_node_flags(size, numa_node, GFP_KERNEL |
__GFP_RETRY_MAYFAIL | flags); __GFP_RETRY_MAYFAIL | flags);
} }
return __vmalloc_node_flags_caller(size, numa_node, return __vmalloc_node(size, 1, GFP_KERNEL | __GFP_RETRY_MAYFAIL | flags,
GFP_KERNEL | __GFP_RETRY_MAYFAIL | numa_node, __builtin_return_address(0));
flags, __builtin_return_address(0));
} }
void *bpf_map_area_alloc(u64 size, int numa_node) void *bpf_map_area_alloc(u64 size, int numa_node)
......
...@@ -150,10 +150,10 @@ void *__vmalloc(unsigned long size, gfp_t gfp_mask) ...@@ -150,10 +150,10 @@ void *__vmalloc(unsigned long size, gfp_t gfp_mask)
} }
EXPORT_SYMBOL(__vmalloc); EXPORT_SYMBOL(__vmalloc);
void *__vmalloc_node_flags_caller(unsigned long size, int node, gfp_t flags, void *__vmalloc_node(unsigned long size, unsigned long align, gfp_t gfp_mask,
void *caller) int node, const void *caller)
{ {
return __vmalloc(size, flags); return __vmalloc(size, gfp_mask);
} }
static void *__vmalloc_user_flags(unsigned long size, gfp_t flags) static void *__vmalloc_user_flags(unsigned long size, gfp_t flags)
......
...@@ -580,7 +580,7 @@ void *kvmalloc_node(size_t size, gfp_t flags, int node) ...@@ -580,7 +580,7 @@ void *kvmalloc_node(size_t size, gfp_t flags, int node)
if (ret || size <= PAGE_SIZE) if (ret || size <= PAGE_SIZE)
return ret; return ret;
return __vmalloc_node_flags_caller(size, node, flags, return __vmalloc_node(size, 1, flags, node,
__builtin_return_address(0)); __builtin_return_address(0));
} }
EXPORT_SYMBOL(kvmalloc_node); EXPORT_SYMBOL(kvmalloc_node);
......
...@@ -2401,8 +2401,6 @@ void *vmap(struct page **pages, unsigned int count, ...@@ -2401,8 +2401,6 @@ void *vmap(struct page **pages, unsigned int count,
} }
EXPORT_SYMBOL(vmap); EXPORT_SYMBOL(vmap);
static void *__vmalloc_node(unsigned long size, unsigned long align,
gfp_t gfp_mask, int node, const void *caller);
static void *__vmalloc_area_node(struct vm_struct *area, gfp_t gfp_mask, static void *__vmalloc_area_node(struct vm_struct *area, gfp_t gfp_mask,
pgprot_t prot, int node) pgprot_t prot, int node)
{ {
...@@ -2553,7 +2551,7 @@ EXPORT_SYMBOL_GPL(__vmalloc_node_range); ...@@ -2553,7 +2551,7 @@ EXPORT_SYMBOL_GPL(__vmalloc_node_range);
* *
* Return: pointer to the allocated memory or %NULL on error * Return: pointer to the allocated memory or %NULL on error
*/ */
static void *__vmalloc_node(unsigned long size, unsigned long align, void *__vmalloc_node(unsigned long size, unsigned long align,
gfp_t gfp_mask, int node, const void *caller) gfp_t gfp_mask, int node, const void *caller)
{ {
return __vmalloc_node_range(size, align, VMALLOC_START, VMALLOC_END, return __vmalloc_node_range(size, align, VMALLOC_START, VMALLOC_END,
...@@ -2567,12 +2565,6 @@ void *__vmalloc(unsigned long size, gfp_t gfp_mask) ...@@ -2567,12 +2565,6 @@ void *__vmalloc(unsigned long size, gfp_t gfp_mask)
} }
EXPORT_SYMBOL(__vmalloc); EXPORT_SYMBOL(__vmalloc);
void *__vmalloc_node_flags_caller(unsigned long size, int node, gfp_t flags,
void *caller)
{
return __vmalloc_node(size, 1, flags, node, caller);
}
/** /**
* vmalloc - allocate virtually contiguous memory * vmalloc - allocate virtually contiguous memory
* @size: allocation size * @size: allocation size
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment