Commit 78cac48c authored by Borislav Petkov's avatar Borislav Petkov Committed by Ingo Molnar

x86/mm/KASLR: Propagate KASLR status to kernel proper

Commit:

  e2b32e67 ("x86, kaslr: randomize module base load address")

made module base address randomization unconditional and didn't regard
disabled KKASLR due to CONFIG_HIBERNATION and command line option
"nokaslr". For more info see (now reverted) commit:

  f47233c2 ("x86/mm/ASLR: Propagate base load address calculation")

In order to propagate KASLR status to kernel proper, we need a single bit
in boot_params.hdr.loadflags and we've chosen bit 1 thus leaving the
top-down allocated bits for bits supposed to be used by the bootloader.

Originally-From: Jiri Kosina <jkosina@suse.cz>
Suggested-by: default avatarH. Peter Anvin <hpa@zytor.com>
Signed-off-by: default avatarBorislav Petkov <bp@suse.de>
Cc: Kees Cook <keescook@chromium.org>
Signed-off-by: default avatarIngo Molnar <mingo@kernel.org>
parent 47091e3c
...@@ -406,6 +406,12 @@ Protocol: 2.00+ ...@@ -406,6 +406,12 @@ Protocol: 2.00+
- If 0, the protected-mode code is loaded at 0x10000. - If 0, the protected-mode code is loaded at 0x10000.
- If 1, the protected-mode code is loaded at 0x100000. - If 1, the protected-mode code is loaded at 0x100000.
Bit 1 (kernel internal): ALSR_FLAG
- Used internally by the compressed kernel to communicate
KASLR status to kernel proper.
If 1, KASLR enabled.
If 0, KASLR disabled.
Bit 5 (write): QUIET_FLAG Bit 5 (write): QUIET_FLAG
- If 0, print early messages. - If 0, print early messages.
- If 1, suppress early messages. - If 1, suppress early messages.
......
...@@ -295,7 +295,8 @@ static unsigned long find_random_addr(unsigned long minimum, ...@@ -295,7 +295,8 @@ static unsigned long find_random_addr(unsigned long minimum,
return slots_fetch_random(); return slots_fetch_random();
} }
unsigned char *choose_kernel_location(unsigned char *input, unsigned char *choose_kernel_location(struct boot_params *boot_params,
unsigned char *input,
unsigned long input_size, unsigned long input_size,
unsigned char *output, unsigned char *output,
unsigned long output_size) unsigned long output_size)
...@@ -315,6 +316,8 @@ unsigned char *choose_kernel_location(unsigned char *input, ...@@ -315,6 +316,8 @@ unsigned char *choose_kernel_location(unsigned char *input,
} }
#endif #endif
boot_params->hdr.loadflags |= KASLR_FLAG;
/* Record the various known unsafe memory ranges. */ /* Record the various known unsafe memory ranges. */
mem_avoid_init((unsigned long)input, input_size, mem_avoid_init((unsigned long)input, input_size,
(unsigned long)output, output_size); (unsigned long)output, output_size);
......
...@@ -377,6 +377,9 @@ asmlinkage __visible void *decompress_kernel(void *rmode, memptr heap, ...@@ -377,6 +377,9 @@ asmlinkage __visible void *decompress_kernel(void *rmode, memptr heap,
real_mode = rmode; real_mode = rmode;
/* Clear it for solely in-kernel use */
real_mode->hdr.loadflags &= ~KASLR_FLAG;
sanitize_boot_params(real_mode); sanitize_boot_params(real_mode);
if (real_mode->screen_info.orig_video_mode == 7) { if (real_mode->screen_info.orig_video_mode == 7) {
...@@ -401,7 +404,7 @@ asmlinkage __visible void *decompress_kernel(void *rmode, memptr heap, ...@@ -401,7 +404,7 @@ asmlinkage __visible void *decompress_kernel(void *rmode, memptr heap,
* the entire decompressed kernel plus relocation table, or the * the entire decompressed kernel plus relocation table, or the
* entire decompressed kernel plus .bss and .brk sections. * entire decompressed kernel plus .bss and .brk sections.
*/ */
output = choose_kernel_location(input_data, input_len, output, output = choose_kernel_location(real_mode, input_data, input_len, output,
output_len > run_size ? output_len output_len > run_size ? output_len
: run_size); : run_size);
......
...@@ -57,7 +57,8 @@ int cmdline_find_option_bool(const char *option); ...@@ -57,7 +57,8 @@ int cmdline_find_option_bool(const char *option);
#if CONFIG_RANDOMIZE_BASE #if CONFIG_RANDOMIZE_BASE
/* aslr.c */ /* aslr.c */
unsigned char *choose_kernel_location(unsigned char *input, unsigned char *choose_kernel_location(struct boot_params *boot_params,
unsigned char *input,
unsigned long input_size, unsigned long input_size,
unsigned char *output, unsigned char *output,
unsigned long output_size); unsigned long output_size);
...@@ -65,7 +66,8 @@ unsigned char *choose_kernel_location(unsigned char *input, ...@@ -65,7 +66,8 @@ unsigned char *choose_kernel_location(unsigned char *input,
bool has_cpuflag(int flag); bool has_cpuflag(int flag);
#else #else
static inline static inline
unsigned char *choose_kernel_location(unsigned char *input, unsigned char *choose_kernel_location(struct boot_params *boot_params,
unsigned char *input,
unsigned long input_size, unsigned long input_size,
unsigned char *output, unsigned char *output,
unsigned long output_size) unsigned long output_size)
......
...@@ -66,6 +66,11 @@ static inline void x86_ce4100_early_setup(void) { } ...@@ -66,6 +66,11 @@ static inline void x86_ce4100_early_setup(void) { }
*/ */
extern struct boot_params boot_params; extern struct boot_params boot_params;
static inline bool kaslr_enabled(void)
{
return !!(boot_params.hdr.loadflags & KASLR_FLAG);
}
/* /*
* Do NOT EVER look at the BIOS memory size location. * Do NOT EVER look at the BIOS memory size location.
* It does not work on many machines. * It does not work on many machines.
......
...@@ -15,6 +15,7 @@ ...@@ -15,6 +15,7 @@
/* loadflags */ /* loadflags */
#define LOADED_HIGH (1<<0) #define LOADED_HIGH (1<<0)
#define KASLR_FLAG (1<<1)
#define QUIET_FLAG (1<<5) #define QUIET_FLAG (1<<5)
#define KEEP_SEGMENTS (1<<6) #define KEEP_SEGMENTS (1<<6)
#define CAN_USE_HEAP (1<<7) #define CAN_USE_HEAP (1<<7)
......
...@@ -33,6 +33,7 @@ ...@@ -33,6 +33,7 @@
#include <asm/page.h> #include <asm/page.h>
#include <asm/pgtable.h> #include <asm/pgtable.h>
#include <asm/setup.h>
#if 0 #if 0
#define DEBUGP(fmt, ...) \ #define DEBUGP(fmt, ...) \
...@@ -47,21 +48,13 @@ do { \ ...@@ -47,21 +48,13 @@ do { \
#ifdef CONFIG_RANDOMIZE_BASE #ifdef CONFIG_RANDOMIZE_BASE
static unsigned long module_load_offset; static unsigned long module_load_offset;
static int randomize_modules = 1;
/* Mutex protects the module_load_offset. */ /* Mutex protects the module_load_offset. */
static DEFINE_MUTEX(module_kaslr_mutex); static DEFINE_MUTEX(module_kaslr_mutex);
static int __init parse_nokaslr(char *p)
{
randomize_modules = 0;
return 0;
}
early_param("nokaslr", parse_nokaslr);
static unsigned long int get_module_load_offset(void) static unsigned long int get_module_load_offset(void)
{ {
if (randomize_modules) { if (kaslr_enabled()) {
mutex_lock(&module_kaslr_mutex); mutex_lock(&module_kaslr_mutex);
/* /*
* Calculate the module_load_offset the first time this * Calculate the module_load_offset the first time this
......
...@@ -832,10 +832,15 @@ static void __init trim_low_memory_range(void) ...@@ -832,10 +832,15 @@ static void __init trim_low_memory_range(void)
static int static int
dump_kernel_offset(struct notifier_block *self, unsigned long v, void *p) dump_kernel_offset(struct notifier_block *self, unsigned long v, void *p)
{ {
pr_emerg("Kernel Offset: 0x%lx from 0x%lx " if (kaslr_enabled()) {
"(relocation range: 0x%lx-0x%lx)\n", pr_emerg("Kernel Offset: 0x%lx from 0x%lx (relocation range: 0x%lx-0x%lx)\n",
(unsigned long)&_text - __START_KERNEL, __START_KERNEL, (unsigned long)&_text - __START_KERNEL,
__START_KERNEL_map, MODULES_VADDR-1); __START_KERNEL,
__START_KERNEL_map,
MODULES_VADDR-1);
} else {
pr_emerg("Kernel Offset: disabled\n");
}
return 0; return 0;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment