Commit 25dfe478 authored by Thomas Garnier's avatar Thomas Garnier Committed by Ingo Molnar

x86/mm/64: Enable KASLR for vmemmap memory region

Add vmemmap in the list of randomized memory regions.

The vmemmap region holds a representation of the physical memory (through
a struct page array). An attacker could use this region to disclose the
kernel memory layout (walking the page linked list).
Signed-off-by: default avatarThomas Garnier <thgarnie@google.com>
Signed-off-by: default avatarKees Cook <keescook@chromium.org>
Cc: Andy Lutomirski <luto@kernel.org>
Cc: Borislav Petkov <bp@alien8.de>
Cc: Brian Gerst <brgerst@gmail.com>
Cc: Denys Vlasenko <dvlasenk@redhat.com>
Cc: H. Peter Anvin <hpa@zytor.com>
Cc: Josh Poimboeuf <jpoimboe@redhat.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: kernel-hardening@lists.openwall.com
Link: http://lkml.kernel.org/r/1469635196-122447-1-git-send-email-thgarnie@google.com
[ Minor edits. ]
Signed-off-by: default avatarIngo Molnar <mingo@kernel.org>
parent 404f6aac
...@@ -6,6 +6,7 @@ unsigned long kaslr_get_random_long(const char *purpose); ...@@ -6,6 +6,7 @@ unsigned long kaslr_get_random_long(const char *purpose);
#ifdef CONFIG_RANDOMIZE_MEMORY #ifdef CONFIG_RANDOMIZE_MEMORY
extern unsigned long page_offset_base; extern unsigned long page_offset_base;
extern unsigned long vmalloc_base; extern unsigned long vmalloc_base;
extern unsigned long vmemmap_base;
void kernel_randomize_memory(void); void kernel_randomize_memory(void);
#else #else
......
...@@ -57,11 +57,13 @@ typedef struct { pteval_t pte; } pte_t; ...@@ -57,11 +57,13 @@ typedef struct { pteval_t pte; } pte_t;
#define MAXMEM _AC(__AC(1, UL) << MAX_PHYSMEM_BITS, UL) #define MAXMEM _AC(__AC(1, UL) << MAX_PHYSMEM_BITS, UL)
#define VMALLOC_SIZE_TB _AC(32, UL) #define VMALLOC_SIZE_TB _AC(32, UL)
#define __VMALLOC_BASE _AC(0xffffc90000000000, UL) #define __VMALLOC_BASE _AC(0xffffc90000000000, UL)
#define VMEMMAP_START _AC(0xffffea0000000000, UL) #define __VMEMMAP_BASE _AC(0xffffea0000000000, UL)
#ifdef CONFIG_RANDOMIZE_MEMORY #ifdef CONFIG_RANDOMIZE_MEMORY
#define VMALLOC_START vmalloc_base #define VMALLOC_START vmalloc_base
#define VMEMMAP_START vmemmap_base
#else #else
#define VMALLOC_START __VMALLOC_BASE #define VMALLOC_START __VMALLOC_BASE
#define VMEMMAP_START __VMEMMAP_BASE
#endif /* CONFIG_RANDOMIZE_MEMORY */ #endif /* CONFIG_RANDOMIZE_MEMORY */
#define VMALLOC_END (VMALLOC_START + _AC((VMALLOC_SIZE_TB << 40) - 1, UL)) #define VMALLOC_END (VMALLOC_START + _AC((VMALLOC_SIZE_TB << 40) - 1, UL))
#define MODULES_VADDR (__START_KERNEL_map + KERNEL_IMAGE_SIZE) #define MODULES_VADDR (__START_KERNEL_map + KERNEL_IMAGE_SIZE)
......
...@@ -40,17 +40,26 @@ ...@@ -40,17 +40,26 @@
* You need to add an if/def entry if you introduce a new memory region * You need to add an if/def entry if you introduce a new memory region
* compatible with KASLR. Your entry must be in logical order with memory * compatible with KASLR. Your entry must be in logical order with memory
* layout. For example, ESPFIX is before EFI because its virtual address is * layout. For example, ESPFIX is before EFI because its virtual address is
* before. You also need to add a BUILD_BUG_ON in kernel_randomize_memory to * before. You also need to add a BUILD_BUG_ON() in kernel_randomize_memory() to
* ensure that this order is correct and won't be changed. * ensure that this order is correct and won't be changed.
*/ */
static const unsigned long vaddr_start = __PAGE_OFFSET_BASE; static const unsigned long vaddr_start = __PAGE_OFFSET_BASE;
static const unsigned long vaddr_end = VMEMMAP_START;
#if defined(CONFIG_X86_ESPFIX64)
static const unsigned long vaddr_end = ESPFIX_BASE_ADDR;
#elif defined(CONFIG_EFI)
static const unsigned long vaddr_end = EFI_VA_START;
#else
static const unsigned long vaddr_end = __START_KERNEL_map;
#endif
/* Default values */ /* Default values */
unsigned long page_offset_base = __PAGE_OFFSET_BASE; unsigned long page_offset_base = __PAGE_OFFSET_BASE;
EXPORT_SYMBOL(page_offset_base); EXPORT_SYMBOL(page_offset_base);
unsigned long vmalloc_base = __VMALLOC_BASE; unsigned long vmalloc_base = __VMALLOC_BASE;
EXPORT_SYMBOL(vmalloc_base); EXPORT_SYMBOL(vmalloc_base);
unsigned long vmemmap_base = __VMEMMAP_BASE;
EXPORT_SYMBOL(vmemmap_base);
/* /*
* Memory regions randomized by KASLR (except modules that use a separate logic * Memory regions randomized by KASLR (except modules that use a separate logic
...@@ -63,6 +72,7 @@ static __initdata struct kaslr_memory_region { ...@@ -63,6 +72,7 @@ static __initdata struct kaslr_memory_region {
} kaslr_regions[] = { } kaslr_regions[] = {
{ &page_offset_base, 64/* Maximum */ }, { &page_offset_base, 64/* Maximum */ },
{ &vmalloc_base, VMALLOC_SIZE_TB }, { &vmalloc_base, VMALLOC_SIZE_TB },
{ &vmemmap_base, 1 },
}; };
/* Get size in bytes used by the memory region */ /* Get size in bytes used by the memory region */
...@@ -89,6 +99,18 @@ void __init kernel_randomize_memory(void) ...@@ -89,6 +99,18 @@ void __init kernel_randomize_memory(void)
struct rnd_state rand_state; struct rnd_state rand_state;
unsigned long remain_entropy; unsigned long remain_entropy;
/*
* All these BUILD_BUG_ON checks ensures the memory layout is
* consistent with the vaddr_start/vaddr_end variables.
*/
BUILD_BUG_ON(vaddr_start >= vaddr_end);
BUILD_BUG_ON(config_enabled(CONFIG_X86_ESPFIX64) &&
vaddr_end >= EFI_VA_START);
BUILD_BUG_ON((config_enabled(CONFIG_X86_ESPFIX64) ||
config_enabled(CONFIG_EFI)) &&
vaddr_end >= __START_KERNEL_map);
BUILD_BUG_ON(vaddr_end > __START_KERNEL_map);
if (!kaslr_memory_enabled()) if (!kaslr_memory_enabled())
return; return;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment