Commit f38fcb9c authored by Christoph Hellwig's avatar Christoph Hellwig Committed by Linus Torvalds

mm: remove the prot argument to __vmalloc_node

This is always PAGE_KERNEL now.
Signed-off-by: default avatarChristoph Hellwig <hch@lst.de>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Acked-by: default avatarPeter Zijlstra (Intel) <peterz@infradead.org>
Cc: Christian Borntraeger <borntraeger@de.ibm.com>
Cc: Christophe Leroy <christophe.leroy@c-s.fr>
Cc: Daniel Vetter <daniel.vetter@ffwll.ch>
Cc: David Airlie <airlied@linux.ie>
Cc: Gao Xiang <xiang@kernel.org>
Cc: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
Cc: Haiyang Zhang <haiyangz@microsoft.com>
Cc: Johannes Weiner <hannes@cmpxchg.org>
Cc: "K. Y. Srinivasan" <kys@microsoft.com>
Cc: Laura Abbott <labbott@redhat.com>
Cc: Mark Rutland <mark.rutland@arm.com>
Cc: Michael Kelley <mikelley@microsoft.com>
Cc: Minchan Kim <minchan@kernel.org>
Cc: Nitin Gupta <ngupta@vflare.org>
Cc: Robin Murphy <robin.murphy@arm.com>
Cc: Sakari Ailus <sakari.ailus@linux.intel.com>
Cc: Stephen Hemminger <sthemmin@microsoft.com>
Cc: Sumit Semwal <sumit.semwal@linaro.org>
Cc: Wei Liu <wei.liu@kernel.org>
Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org>
Cc: Catalin Marinas <catalin.marinas@arm.com>
Cc: Heiko Carstens <heiko.carstens@de.ibm.com>
Cc: Paul Mackerras <paulus@ozlabs.org>
Cc: Vasily Gorbik <gor@linux.ibm.com>
Cc: Will Deacon <will@kernel.org>
Link: http://lkml.kernel.org/r/20200414131348.444715-23-hch@lst.deSigned-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent 88dca4ca
...@@ -2402,8 +2402,7 @@ void *vmap(struct page **pages, unsigned int count, ...@@ -2402,8 +2402,7 @@ void *vmap(struct page **pages, unsigned int count,
EXPORT_SYMBOL(vmap); EXPORT_SYMBOL(vmap);
static void *__vmalloc_node(unsigned long size, unsigned long align, static void *__vmalloc_node(unsigned long size, unsigned long align,
gfp_t gfp_mask, pgprot_t prot, gfp_t gfp_mask, int node, const void *caller);
int node, const void *caller);
static void *__vmalloc_area_node(struct vm_struct *area, gfp_t gfp_mask, static void *__vmalloc_area_node(struct vm_struct *area, gfp_t gfp_mask,
pgprot_t prot, int node) pgprot_t prot, int node)
{ {
...@@ -2421,7 +2420,7 @@ static void *__vmalloc_area_node(struct vm_struct *area, gfp_t gfp_mask, ...@@ -2421,7 +2420,7 @@ static void *__vmalloc_area_node(struct vm_struct *area, gfp_t gfp_mask,
/* Please note that the recursion is strictly bounded. */ /* Please note that the recursion is strictly bounded. */
if (array_size > PAGE_SIZE) { if (array_size > PAGE_SIZE) {
pages = __vmalloc_node(array_size, 1, nested_gfp|highmem_mask, pages = __vmalloc_node(array_size, 1, nested_gfp|highmem_mask,
PAGE_KERNEL, node, area->caller); node, area->caller);
} else { } else {
pages = kmalloc_node(array_size, nested_gfp, node); pages = kmalloc_node(array_size, nested_gfp, node);
} }
...@@ -2540,13 +2539,11 @@ EXPORT_SYMBOL_GPL(__vmalloc_node_range); ...@@ -2540,13 +2539,11 @@ EXPORT_SYMBOL_GPL(__vmalloc_node_range);
* @size: allocation size * @size: allocation size
* @align: desired alignment * @align: desired alignment
* @gfp_mask: flags for the page level allocator * @gfp_mask: flags for the page level allocator
* @prot: protection mask for the allocated pages
* @node: node to use for allocation or NUMA_NO_NODE * @node: node to use for allocation or NUMA_NO_NODE
* @caller: caller's return address * @caller: caller's return address
* *
* Allocate enough pages to cover @size from the page level * Allocate enough pages to cover @size from the page level allocator with
* allocator with @gfp_mask flags. Map them into contiguous * @gfp_mask flags. Map them into contiguous kernel virtual space.
* kernel virtual space, using a pagetable protection of @prot.
* *
* Reclaim modifiers in @gfp_mask - __GFP_NORETRY, __GFP_RETRY_MAYFAIL * Reclaim modifiers in @gfp_mask - __GFP_NORETRY, __GFP_RETRY_MAYFAIL
* and __GFP_NOFAIL are not supported * and __GFP_NOFAIL are not supported
...@@ -2557,16 +2554,15 @@ EXPORT_SYMBOL_GPL(__vmalloc_node_range); ...@@ -2557,16 +2554,15 @@ EXPORT_SYMBOL_GPL(__vmalloc_node_range);
* Return: pointer to the allocated memory or %NULL on error * Return: pointer to the allocated memory or %NULL on error
*/ */
static void *__vmalloc_node(unsigned long size, unsigned long align, static void *__vmalloc_node(unsigned long size, unsigned long align,
gfp_t gfp_mask, pgprot_t prot, gfp_t gfp_mask, int node, const void *caller)
int node, const void *caller)
{ {
return __vmalloc_node_range(size, align, VMALLOC_START, VMALLOC_END, return __vmalloc_node_range(size, align, VMALLOC_START, VMALLOC_END,
gfp_mask, prot, 0, node, caller); gfp_mask, PAGE_KERNEL, 0, node, caller);
} }
void *__vmalloc(unsigned long size, gfp_t gfp_mask) void *__vmalloc(unsigned long size, gfp_t gfp_mask)
{ {
return __vmalloc_node(size, 1, gfp_mask, PAGE_KERNEL, NUMA_NO_NODE, return __vmalloc_node(size, 1, gfp_mask, NUMA_NO_NODE,
__builtin_return_address(0)); __builtin_return_address(0));
} }
EXPORT_SYMBOL(__vmalloc); EXPORT_SYMBOL(__vmalloc);
...@@ -2574,15 +2570,15 @@ EXPORT_SYMBOL(__vmalloc); ...@@ -2574,15 +2570,15 @@ EXPORT_SYMBOL(__vmalloc);
static inline void *__vmalloc_node_flags(unsigned long size, static inline void *__vmalloc_node_flags(unsigned long size,
int node, gfp_t flags) int node, gfp_t flags)
{ {
return __vmalloc_node(size, 1, flags, PAGE_KERNEL, return __vmalloc_node(size, 1, flags, node,
node, __builtin_return_address(0)); __builtin_return_address(0));
} }
void *__vmalloc_node_flags_caller(unsigned long size, int node, gfp_t flags, void *__vmalloc_node_flags_caller(unsigned long size, int node, gfp_t flags,
void *caller) void *caller)
{ {
return __vmalloc_node(size, 1, flags, PAGE_KERNEL, node, caller); return __vmalloc_node(size, 1, flags, node, caller);
} }
/** /**
...@@ -2657,8 +2653,8 @@ EXPORT_SYMBOL(vmalloc_user); ...@@ -2657,8 +2653,8 @@ EXPORT_SYMBOL(vmalloc_user);
*/ */
void *vmalloc_node(unsigned long size, int node) void *vmalloc_node(unsigned long size, int node)
{ {
return __vmalloc_node(size, 1, GFP_KERNEL, PAGE_KERNEL, return __vmalloc_node(size, 1, GFP_KERNEL, node,
node, __builtin_return_address(0)); __builtin_return_address(0));
} }
EXPORT_SYMBOL(vmalloc_node); EXPORT_SYMBOL(vmalloc_node);
...@@ -2671,9 +2667,6 @@ EXPORT_SYMBOL(vmalloc_node); ...@@ -2671,9 +2667,6 @@ EXPORT_SYMBOL(vmalloc_node);
* allocator and map them into contiguous kernel virtual space. * allocator and map them into contiguous kernel virtual space.
* The memory allocated is set to zero. * The memory allocated is set to zero.
* *
* For tight control over page level allocator and protection flags
* use __vmalloc_node() instead.
*
* Return: pointer to the allocated memory or %NULL on error * Return: pointer to the allocated memory or %NULL on error
*/ */
void *vzalloc_node(unsigned long size, int node) void *vzalloc_node(unsigned long size, int node)
...@@ -2746,8 +2739,8 @@ void *vmalloc_exec(unsigned long size) ...@@ -2746,8 +2739,8 @@ void *vmalloc_exec(unsigned long size)
*/ */
void *vmalloc_32(unsigned long size) void *vmalloc_32(unsigned long size)
{ {
return __vmalloc_node(size, 1, GFP_VMALLOC32, PAGE_KERNEL, return __vmalloc_node(size, 1, GFP_VMALLOC32, NUMA_NO_NODE,
NUMA_NO_NODE, __builtin_return_address(0)); __builtin_return_address(0));
} }
EXPORT_SYMBOL(vmalloc_32); EXPORT_SYMBOL(vmalloc_32);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment