Commit b2d24b97 authored by Gerald Schaefer's avatar Gerald Schaefer Committed by Martin Schwidefsky

s390/kernel: add support for kernel address space layout randomization (KASLR)

This patch adds support for relocating the kernel to a random address.
The random kernel offset is obtained from cpacf, using either TRNG, PRNO,
or KMC_PRNG, depending on supported MSA level.

KERNELOFFSET is added to vmcoreinfo, for crash --kaslr support.
Signed-off-by: default avatarGerald Schaefer <gerald.schaefer@de.ibm.com>
Reviewed-by: default avatarPhilipp Rudo <prudo@linux.ibm.com>
Signed-off-by: default avatarMartin Schwidefsky <schwidefsky@de.ibm.com>
parent a80313ff
...@@ -637,6 +637,16 @@ config RELOCATABLE ...@@ -637,6 +637,16 @@ config RELOCATABLE
The relocations make the kernel image about 15% larger (compressed The relocations make the kernel image about 15% larger (compressed
10%), but are discarded at runtime. 10%), but are discarded at runtime.
config RANDOMIZE_BASE
bool "Randomize the address of the kernel image (KASLR)"
depends on RELOCATABLE
default y
help
In support of Kernel Address Space Layout Randomization (KASLR),
this randomizes the address at which the kernel image is loaded,
as a security feature that deters exploit attempts relying on
knowledge of the location of kernel internals.
endmenu endmenu
menu "Memory setup" menu "Memory setup"
......
...@@ -33,6 +33,7 @@ obj-y += string.o ebcdic.o sclp_early_core.o mem.o ipl_vmparm.o cmdline.o ...@@ -33,6 +33,7 @@ obj-y += string.o ebcdic.o sclp_early_core.o mem.o ipl_vmparm.o cmdline.o
obj-y += ctype.o text_dma.o obj-y += ctype.o text_dma.o
obj-$(CONFIG_PROTECTED_VIRTUALIZATION_GUEST) += uv.o obj-$(CONFIG_PROTECTED_VIRTUALIZATION_GUEST) += uv.o
obj-$(CONFIG_RELOCATABLE) += machine_kexec_reloc.o obj-$(CONFIG_RELOCATABLE) += machine_kexec_reloc.o
obj-$(CONFIG_RANDOMIZE_BASE) += kaslr.o
targets := bzImage startup.a section_cmp.boot.data section_cmp.boot.preserved.data $(obj-y) targets := bzImage startup.a section_cmp.boot.data section_cmp.boot.preserved.data $(obj-y)
subdir- := compressed subdir- := compressed
......
...@@ -9,6 +9,9 @@ void setup_boot_command_line(void); ...@@ -9,6 +9,9 @@ void setup_boot_command_line(void);
void parse_boot_command_line(void); void parse_boot_command_line(void);
void setup_memory_end(void); void setup_memory_end(void);
void print_missing_facilities(void); void print_missing_facilities(void);
unsigned long get_random_base(unsigned long safe_addr);
extern int kaslr_enabled;
unsigned long read_ipl_report(unsigned long safe_offset); unsigned long read_ipl_report(unsigned long safe_offset);
......
...@@ -18,6 +18,8 @@ unsigned long __bootdata(memory_end); ...@@ -18,6 +18,8 @@ unsigned long __bootdata(memory_end);
int __bootdata(memory_end_set); int __bootdata(memory_end_set);
int __bootdata(noexec_disabled); int __bootdata(noexec_disabled);
int kaslr_enabled __section(.data);
static inline int __diag308(unsigned long subcode, void *addr) static inline int __diag308(unsigned long subcode, void *addr)
{ {
register unsigned long _addr asm("0") = (unsigned long)addr; register unsigned long _addr asm("0") = (unsigned long)addr;
...@@ -214,6 +216,7 @@ void parse_boot_command_line(void) ...@@ -214,6 +216,7 @@ void parse_boot_command_line(void)
char *args; char *args;
int rc; int rc;
kaslr_enabled = IS_ENABLED(CONFIG_RANDOMIZE_BASE);
args = strcpy(command_line_buf, early_command_line); args = strcpy(command_line_buf, early_command_line);
while (*args) { while (*args) {
args = next_arg(args, &param, &val); args = next_arg(args, &param, &val);
...@@ -231,15 +234,21 @@ void parse_boot_command_line(void) ...@@ -231,15 +234,21 @@ void parse_boot_command_line(void)
if (!strcmp(param, "facilities")) if (!strcmp(param, "facilities"))
modify_fac_list(val); modify_fac_list(val);
if (!strcmp(param, "nokaslr"))
kaslr_enabled = 0;
} }
} }
void setup_memory_end(void) void setup_memory_end(void)
{ {
#ifdef CONFIG_CRASH_DUMP #ifdef CONFIG_CRASH_DUMP
if (!OLDMEM_BASE && ipl_block_valid && if (OLDMEM_BASE) {
kaslr_enabled = 0;
} else if (ipl_block_valid &&
ipl_block.pb0_hdr.pbt == IPL_PBT_FCP && ipl_block.pb0_hdr.pbt == IPL_PBT_FCP &&
ipl_block.fcp.opt == IPL_PB0_FCP_OPT_DUMP) { ipl_block.fcp.opt == IPL_PB0_FCP_OPT_DUMP) {
kaslr_enabled = 0;
if (!sclp_early_get_hsa_size(&memory_end) && memory_end) if (!sclp_early_get_hsa_size(&memory_end) && memory_end)
memory_end_set = 1; memory_end_set = 1;
} }
......
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright IBM Corp. 2019
*/
#include <asm/mem_detect.h>
#include <asm/cpacf.h>
#include <asm/timex.h>
#include <asm/sclp.h>
#include "compressed/decompressor.h"
#define PRNG_MODE_TDES 1
#define PRNG_MODE_SHA512 2
#define PRNG_MODE_TRNG 3
struct prno_parm {
u32 res;
u32 reseed_counter;
u64 stream_bytes;
u8 V[112];
u8 C[112];
};
struct prng_parm {
u8 parm_block[32];
u32 reseed_counter;
u64 byte_counter;
};
static int check_prng(void)
{
if (!cpacf_query_func(CPACF_KMC, CPACF_KMC_PRNG)) {
sclp_early_printk("KASLR disabled: CPU has no PRNG\n");
return 0;
}
if (cpacf_query_func(CPACF_PRNO, CPACF_PRNO_TRNG))
return PRNG_MODE_TRNG;
if (cpacf_query_func(CPACF_PRNO, CPACF_PRNO_SHA512_DRNG_GEN))
return PRNG_MODE_SHA512;
else
return PRNG_MODE_TDES;
}
static unsigned long get_random(unsigned long limit)
{
struct prng_parm prng = {
/* initial parameter block for tdes mode, copied from libica */
.parm_block = {
0x0F, 0x2B, 0x8E, 0x63, 0x8C, 0x8E, 0xD2, 0x52,
0x64, 0xB7, 0xA0, 0x7B, 0x75, 0x28, 0xB8, 0xF4,
0x75, 0x5F, 0xD2, 0xA6, 0x8D, 0x97, 0x11, 0xFF,
0x49, 0xD8, 0x23, 0xF3, 0x7E, 0x21, 0xEC, 0xA0
},
};
unsigned long seed, random;
struct prno_parm prno;
__u64 entropy[4];
int mode, i;
mode = check_prng();
seed = get_tod_clock_fast();
switch (mode) {
case PRNG_MODE_TRNG:
cpacf_trng(NULL, 0, (u8 *) &random, sizeof(random));
break;
case PRNG_MODE_SHA512:
cpacf_prno(CPACF_PRNO_SHA512_DRNG_SEED, &prno, NULL, 0,
(u8 *) &seed, sizeof(seed));
cpacf_prno(CPACF_PRNO_SHA512_DRNG_GEN, &prno, (u8 *) &random,
sizeof(random), NULL, 0);
break;
case PRNG_MODE_TDES:
/* add entropy */
*(unsigned long *) prng.parm_block ^= seed;
for (i = 0; i < 16; i++) {
cpacf_kmc(CPACF_KMC_PRNG, prng.parm_block,
(char *) entropy, (char *) entropy,
sizeof(entropy));
memcpy(prng.parm_block, entropy, sizeof(entropy));
}
random = seed;
cpacf_kmc(CPACF_KMC_PRNG, prng.parm_block, (u8 *) &random,
(u8 *) &random, sizeof(random));
break;
default:
random = 0;
}
return random % limit;
}
unsigned long get_random_base(unsigned long safe_addr)
{
unsigned long base, start, end, kernel_size;
unsigned long block_sum, offset;
int i;
if (IS_ENABLED(CONFIG_BLK_DEV_INITRD) && INITRD_START && INITRD_SIZE) {
if (safe_addr < INITRD_START + INITRD_SIZE)
safe_addr = INITRD_START + INITRD_SIZE;
}
safe_addr = ALIGN(safe_addr, THREAD_SIZE);
kernel_size = vmlinux.image_size + vmlinux.bss_size;
block_sum = 0;
for_each_mem_detect_block(i, &start, &end) {
if (memory_end_set) {
if (start >= memory_end)
break;
if (end > memory_end)
end = memory_end;
}
if (end - start < kernel_size)
continue;
block_sum += end - start - kernel_size;
}
if (!block_sum) {
sclp_early_printk("KASLR disabled: not enough memory\n");
return 0;
}
base = get_random(block_sum);
if (base == 0)
return 0;
if (base < safe_addr)
base = safe_addr;
block_sum = offset = 0;
for_each_mem_detect_block(i, &start, &end) {
if (memory_end_set) {
if (start >= memory_end)
break;
if (end > memory_end)
end = memory_end;
}
if (end - start < kernel_size)
continue;
block_sum += end - start - kernel_size;
if (base <= block_sum) {
base = start + base - offset;
base = ALIGN_DOWN(base, THREAD_SIZE);
break;
}
offset = block_sum;
}
return base;
}
...@@ -12,6 +12,7 @@ ...@@ -12,6 +12,7 @@
extern char __boot_data_start[], __boot_data_end[]; extern char __boot_data_start[], __boot_data_end[];
extern char __boot_data_preserved_start[], __boot_data_preserved_end[]; extern char __boot_data_preserved_start[], __boot_data_preserved_end[];
unsigned long __bootdata_preserved(__kaslr_offset);
/* /*
* Some code and data needs to stay below 2 GB, even when the kernel would be * Some code and data needs to stay below 2 GB, even when the kernel would be
...@@ -113,6 +114,7 @@ static void handle_relocs(unsigned long offset) ...@@ -113,6 +114,7 @@ static void handle_relocs(unsigned long offset)
void startup_kernel(void) void startup_kernel(void)
{ {
unsigned long random_lma;
unsigned long safe_addr; unsigned long safe_addr;
void *img; void *img;
...@@ -126,12 +128,37 @@ void startup_kernel(void) ...@@ -126,12 +128,37 @@ void startup_kernel(void)
parse_boot_command_line(); parse_boot_command_line();
setup_memory_end(); setup_memory_end();
detect_memory(); detect_memory();
random_lma = __kaslr_offset = 0;
if (IS_ENABLED(CONFIG_RANDOMIZE_BASE) && kaslr_enabled) {
random_lma = get_random_base(safe_addr);
if (random_lma) {
__kaslr_offset = random_lma - vmlinux.default_lma;
img = (void *)vmlinux.default_lma;
vmlinux.default_lma += __kaslr_offset;
vmlinux.entry += __kaslr_offset;
vmlinux.bootdata_off += __kaslr_offset;
vmlinux.bootdata_preserved_off += __kaslr_offset;
vmlinux.rela_dyn_start += __kaslr_offset;
vmlinux.rela_dyn_end += __kaslr_offset;
vmlinux.dynsym_start += __kaslr_offset;
}
}
if (!IS_ENABLED(CONFIG_KERNEL_UNCOMPRESSED)) { if (!IS_ENABLED(CONFIG_KERNEL_UNCOMPRESSED)) {
img = decompress_kernel(); img = decompress_kernel();
memmove((void *)vmlinux.default_lma, img, vmlinux.image_size); memmove((void *)vmlinux.default_lma, img, vmlinux.image_size);
} } else if (__kaslr_offset)
memcpy((void *)vmlinux.default_lma, img, vmlinux.image_size);
copy_bootdata(); copy_bootdata();
if (IS_ENABLED(CONFIG_RELOCATABLE)) if (IS_ENABLED(CONFIG_RELOCATABLE))
handle_relocs(0); handle_relocs(__kaslr_offset);
if (__kaslr_offset) {
/* Clear non-relocated kernel */
if (IS_ENABLED(CONFIG_KERNEL_UNCOMPRESSED))
memset(img, 0, vmlinux.image_size);
}
vmlinux.entry(); vmlinux.entry();
} }
...@@ -147,6 +147,12 @@ extern void (*_machine_restart)(char *command); ...@@ -147,6 +147,12 @@ extern void (*_machine_restart)(char *command);
extern void (*_machine_halt)(void); extern void (*_machine_halt)(void);
extern void (*_machine_power_off)(void); extern void (*_machine_power_off)(void);
extern unsigned long __kaslr_offset;
static inline unsigned long kaslr_offset(void)
{
return __kaslr_offset;
}
#else /* __ASSEMBLY__ */ #else /* __ASSEMBLY__ */
#define IPL_DEVICE (IPL_DEVICE_OFFSET) #define IPL_DEVICE (IPL_DEVICE_OFFSET)
......
...@@ -255,6 +255,7 @@ void arch_crash_save_vmcoreinfo(void) ...@@ -255,6 +255,7 @@ void arch_crash_save_vmcoreinfo(void)
mem_assign_absolute(S390_lowcore.vmcore_info, paddr_vmcoreinfo_note()); mem_assign_absolute(S390_lowcore.vmcore_info, paddr_vmcoreinfo_note());
vmcoreinfo_append_str("SDMA=%lx\n", __sdma); vmcoreinfo_append_str("SDMA=%lx\n", __sdma);
vmcoreinfo_append_str("EDMA=%lx\n", __edma); vmcoreinfo_append_str("EDMA=%lx\n", __edma);
vmcoreinfo_append_str("KERNELOFFSET=%lx\n", kaslr_offset());
} }
void machine_shutdown(void) void machine_shutdown(void)
......
...@@ -108,6 +108,7 @@ unsigned long __bootdata_preserved(__stext_dma); ...@@ -108,6 +108,7 @@ unsigned long __bootdata_preserved(__stext_dma);
unsigned long __bootdata_preserved(__etext_dma); unsigned long __bootdata_preserved(__etext_dma);
unsigned long __bootdata_preserved(__sdma); unsigned long __bootdata_preserved(__sdma);
unsigned long __bootdata_preserved(__edma); unsigned long __bootdata_preserved(__edma);
unsigned long __bootdata_preserved(__kaslr_offset);
unsigned long VMALLOC_START; unsigned long VMALLOC_START;
EXPORT_SYMBOL(VMALLOC_START); EXPORT_SYMBOL(VMALLOC_START);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment