Commit cb9e3c29 authored by Andrey Ryabinin's avatar Andrey Ryabinin Committed by Linus Torvalds

mm: vmalloc: pass additional vm_flags to __vmalloc_node_range()

For instrumenting global variables KASan will shadow memory backing memory
for modules.  So on module loading we will need to allocate memory for
shadow and map it at address in shadow that corresponds to the address
allocated in module_alloc().

__vmalloc_node_range() could be used for this purpose, except it puts a
guard hole after allocated area.  Guard hole in shadow memory should be a
problem because at some future point we might need to have a shadow memory
at address occupied by guard hole.  So we could fail to allocate shadow
for module_alloc().

Now we have VM_NO_GUARD flag disabling guard page, so we need to pass into
__vmalloc_node_range().  Add new parameter 'vm_flags' to
__vmalloc_node_range() function.
Signed-off-by: default avatarAndrey Ryabinin <a.ryabinin@samsung.com>
Cc: Dmitry Vyukov <dvyukov@google.com>
Cc: Konstantin Serebryany <kcc@google.com>
Cc: Dmitry Chernenkov <dmitryc@google.com>
Signed-off-by: default avatarAndrey Konovalov <adech.fo@gmail.com>
Cc: Yuri Gribov <tetra2005@gmail.com>
Cc: Konstantin Khlebnikov <koct9i@gmail.com>
Cc: Sasha Levin <sasha.levin@oracle.com>
Cc: Christoph Lameter <cl@linux.com>
Cc: Joonsoo Kim <iamjoonsoo.kim@lge.com>
Cc: Dave Hansen <dave.hansen@intel.com>
Cc: Andi Kleen <andi@firstfloor.org>
Cc: Ingo Molnar <mingo@elte.hu>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: "H. Peter Anvin" <hpa@zytor.com>
Cc: Christoph Lameter <cl@linux.com>
Cc: Pekka Enberg <penberg@kernel.org>
Cc: David Rientjes <rientjes@google.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent 71394fe5
...@@ -41,7 +41,7 @@ ...@@ -41,7 +41,7 @@
void *module_alloc(unsigned long size) void *module_alloc(unsigned long size)
{ {
return __vmalloc_node_range(size, 1, MODULES_VADDR, MODULES_END, return __vmalloc_node_range(size, 1, MODULES_VADDR, MODULES_END,
GFP_KERNEL, PAGE_KERNEL_EXEC, NUMA_NO_NODE, GFP_KERNEL, PAGE_KERNEL_EXEC, 0, NUMA_NO_NODE,
__builtin_return_address(0)); __builtin_return_address(0));
} }
#endif #endif
......
...@@ -35,8 +35,8 @@ ...@@ -35,8 +35,8 @@
void *module_alloc(unsigned long size) void *module_alloc(unsigned long size)
{ {
return __vmalloc_node_range(size, 1, MODULES_VADDR, MODULES_END, return __vmalloc_node_range(size, 1, MODULES_VADDR, MODULES_END,
GFP_KERNEL, PAGE_KERNEL_EXEC, NUMA_NO_NODE, GFP_KERNEL, PAGE_KERNEL_EXEC, 0,
__builtin_return_address(0)); NUMA_NO_NODE, __builtin_return_address(0));
} }
enum aarch64_reloc_op { enum aarch64_reloc_op {
......
...@@ -47,7 +47,7 @@ static DEFINE_SPINLOCK(dbe_lock); ...@@ -47,7 +47,7 @@ static DEFINE_SPINLOCK(dbe_lock);
void *module_alloc(unsigned long size) void *module_alloc(unsigned long size)
{ {
return __vmalloc_node_range(size, 1, MODULE_START, MODULE_END, return __vmalloc_node_range(size, 1, MODULE_START, MODULE_END,
GFP_KERNEL, PAGE_KERNEL, NUMA_NO_NODE, GFP_KERNEL, PAGE_KERNEL, 0, NUMA_NO_NODE,
__builtin_return_address(0)); __builtin_return_address(0));
} }
#endif #endif
......
...@@ -219,7 +219,7 @@ void *module_alloc(unsigned long size) ...@@ -219,7 +219,7 @@ void *module_alloc(unsigned long size)
* init_data correctly */ * init_data correctly */
return __vmalloc_node_range(size, 1, VMALLOC_START, VMALLOC_END, return __vmalloc_node_range(size, 1, VMALLOC_START, VMALLOC_END,
GFP_KERNEL | __GFP_HIGHMEM, GFP_KERNEL | __GFP_HIGHMEM,
PAGE_KERNEL_RWX, NUMA_NO_NODE, PAGE_KERNEL_RWX, 0, NUMA_NO_NODE,
__builtin_return_address(0)); __builtin_return_address(0));
} }
......
...@@ -50,7 +50,7 @@ void *module_alloc(unsigned long size) ...@@ -50,7 +50,7 @@ void *module_alloc(unsigned long size)
if (PAGE_ALIGN(size) > MODULES_LEN) if (PAGE_ALIGN(size) > MODULES_LEN)
return NULL; return NULL;
return __vmalloc_node_range(size, 1, MODULES_VADDR, MODULES_END, return __vmalloc_node_range(size, 1, MODULES_VADDR, MODULES_END,
GFP_KERNEL, PAGE_KERNEL, NUMA_NO_NODE, GFP_KERNEL, PAGE_KERNEL, 0, NUMA_NO_NODE,
__builtin_return_address(0)); __builtin_return_address(0));
} }
#endif #endif
......
...@@ -29,7 +29,7 @@ static void *module_map(unsigned long size) ...@@ -29,7 +29,7 @@ static void *module_map(unsigned long size)
if (PAGE_ALIGN(size) > MODULES_LEN) if (PAGE_ALIGN(size) > MODULES_LEN)
return NULL; return NULL;
return __vmalloc_node_range(size, 1, MODULES_VADDR, MODULES_END, return __vmalloc_node_range(size, 1, MODULES_VADDR, MODULES_END,
GFP_KERNEL, PAGE_KERNEL, NUMA_NO_NODE, GFP_KERNEL, PAGE_KERNEL, 0, NUMA_NO_NODE,
__builtin_return_address(0)); __builtin_return_address(0));
} }
#else #else
......
...@@ -25,7 +25,7 @@ ...@@ -25,7 +25,7 @@
void *module_alloc(unsigned long size) void *module_alloc(unsigned long size)
{ {
return __vmalloc_node_range(size, 1, MODULES_VADDR, MODULES_END, return __vmalloc_node_range(size, 1, MODULES_VADDR, MODULES_END,
GFP_KERNEL, PAGE_KERNEL_EXEC, NUMA_NO_NODE, GFP_KERNEL, PAGE_KERNEL_EXEC, 0, NUMA_NO_NODE,
__builtin_return_address(0)); __builtin_return_address(0));
} }
......
...@@ -88,7 +88,7 @@ void *module_alloc(unsigned long size) ...@@ -88,7 +88,7 @@ void *module_alloc(unsigned long size)
return __vmalloc_node_range(size, 1, return __vmalloc_node_range(size, 1,
MODULES_VADDR + get_module_load_offset(), MODULES_VADDR + get_module_load_offset(),
MODULES_END, GFP_KERNEL | __GFP_HIGHMEM, MODULES_END, GFP_KERNEL | __GFP_HIGHMEM,
PAGE_KERNEL_EXEC, NUMA_NO_NODE, PAGE_KERNEL_EXEC, 0, NUMA_NO_NODE,
__builtin_return_address(0)); __builtin_return_address(0));
} }
......
...@@ -76,7 +76,9 @@ extern void *vmalloc_32_user(unsigned long size); ...@@ -76,7 +76,9 @@ extern void *vmalloc_32_user(unsigned long size);
extern void *__vmalloc(unsigned long size, gfp_t gfp_mask, pgprot_t prot); extern void *__vmalloc(unsigned long size, gfp_t gfp_mask, pgprot_t prot);
extern void *__vmalloc_node_range(unsigned long size, unsigned long align, extern void *__vmalloc_node_range(unsigned long size, unsigned long align,
unsigned long start, unsigned long end, gfp_t gfp_mask, unsigned long start, unsigned long end, gfp_t gfp_mask,
pgprot_t prot, int node, const void *caller); pgprot_t prot, unsigned long vm_flags, int node,
const void *caller);
extern void vfree(const void *addr); extern void vfree(const void *addr);
extern void *vmap(struct page **pages, unsigned int count, extern void *vmap(struct page **pages, unsigned int count,
......
...@@ -1619,6 +1619,7 @@ static void *__vmalloc_area_node(struct vm_struct *area, gfp_t gfp_mask, ...@@ -1619,6 +1619,7 @@ static void *__vmalloc_area_node(struct vm_struct *area, gfp_t gfp_mask,
* @end: vm area range end * @end: vm area range end
* @gfp_mask: flags for the page level allocator * @gfp_mask: flags for the page level allocator
* @prot: protection mask for the allocated pages * @prot: protection mask for the allocated pages
* @vm_flags: additional vm area flags (e.g. %VM_NO_GUARD)
* @node: node to use for allocation or NUMA_NO_NODE * @node: node to use for allocation or NUMA_NO_NODE
* @caller: caller's return address * @caller: caller's return address
* *
...@@ -1628,7 +1629,8 @@ static void *__vmalloc_area_node(struct vm_struct *area, gfp_t gfp_mask, ...@@ -1628,7 +1629,8 @@ static void *__vmalloc_area_node(struct vm_struct *area, gfp_t gfp_mask,
*/ */
void *__vmalloc_node_range(unsigned long size, unsigned long align, void *__vmalloc_node_range(unsigned long size, unsigned long align,
unsigned long start, unsigned long end, gfp_t gfp_mask, unsigned long start, unsigned long end, gfp_t gfp_mask,
pgprot_t prot, int node, const void *caller) pgprot_t prot, unsigned long vm_flags, int node,
const void *caller)
{ {
struct vm_struct *area; struct vm_struct *area;
void *addr; void *addr;
...@@ -1638,8 +1640,8 @@ void *__vmalloc_node_range(unsigned long size, unsigned long align, ...@@ -1638,8 +1640,8 @@ void *__vmalloc_node_range(unsigned long size, unsigned long align,
if (!size || (size >> PAGE_SHIFT) > totalram_pages) if (!size || (size >> PAGE_SHIFT) > totalram_pages)
goto fail; goto fail;
area = __get_vm_area_node(size, align, VM_ALLOC | VM_UNINITIALIZED, area = __get_vm_area_node(size, align, VM_ALLOC | VM_UNINITIALIZED |
start, end, node, gfp_mask, caller); vm_flags, start, end, node, gfp_mask, caller);
if (!area) if (!area)
goto fail; goto fail;
...@@ -1688,7 +1690,7 @@ static void *__vmalloc_node(unsigned long size, unsigned long align, ...@@ -1688,7 +1690,7 @@ static void *__vmalloc_node(unsigned long size, unsigned long align,
int node, const void *caller) int node, const void *caller)
{ {
return __vmalloc_node_range(size, align, VMALLOC_START, VMALLOC_END, return __vmalloc_node_range(size, align, VMALLOC_START, VMALLOC_END,
gfp_mask, prot, node, caller); gfp_mask, prot, 0, node, caller);
} }
void *__vmalloc(unsigned long size, gfp_t gfp_mask, pgprot_t prot) void *__vmalloc(unsigned long size, gfp_t gfp_mask, pgprot_t prot)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment