Commit d58106c3 authored by Vasily Gorbik's avatar Vasily Gorbik Committed by Martin Schwidefsky

s390/kasan: use noexec and large pages

To lower memory footprint and speed up kasan initialisation detect
EDAT availability and use large pages if possible. As we know how
much memory is needed for initialisation, another simplistic large
page allocator is introduced to avoid memory fragmentation.

Since facilities list is retrieved anyhow, detect noexec support and
adjust pages attributes. Handle noexec kernel option to avoid inconsistent
kasan shadow memory pages flags.
Signed-off-by: default avatarVasily Gorbik <gor@linux.ibm.com>
Signed-off-by: default avatarMartin Schwidefsky <schwidefsky@de.ibm.com>
parent 793213a8
...@@ -13,6 +13,7 @@ int __bootdata(early_ipl_block_valid); ...@@ -13,6 +13,7 @@ int __bootdata(early_ipl_block_valid);
unsigned long __bootdata(memory_end); unsigned long __bootdata(memory_end);
int __bootdata(memory_end_set); int __bootdata(memory_end_set);
int __bootdata(noexec_disabled);
static inline int __diag308(unsigned long subcode, void *addr) static inline int __diag308(unsigned long subcode, void *addr)
{ {
...@@ -145,8 +146,10 @@ void setup_boot_command_line(void) ...@@ -145,8 +146,10 @@ void setup_boot_command_line(void)
static char command_line_buf[COMMAND_LINE_SIZE] __section(.data); static char command_line_buf[COMMAND_LINE_SIZE] __section(.data);
static void parse_mem_opt(void) static void parse_mem_opt(void)
{ {
char *args;
char *param, *val; char *param, *val;
bool enabled;
char *args;
int rc;
args = strcpy(command_line_buf, early_command_line); args = strcpy(command_line_buf, early_command_line);
while (*args) { while (*args) {
...@@ -156,6 +159,12 @@ static void parse_mem_opt(void) ...@@ -156,6 +159,12 @@ static void parse_mem_opt(void)
memory_end = memparse(val, NULL); memory_end = memparse(val, NULL);
memory_end_set = 1; memory_end_set = 1;
} }
if (!strcmp(param, "noexec")) {
rc = kstrtobool(val, &enabled);
if (!rc && !enabled)
noexec_disabled = 1;
}
} }
} }
......
// SPDX-License-Identifier: GPL-2.0 // SPDX-License-Identifier: GPL-2.0
#include <linux/ctype.h> #include <linux/ctype.h>
#include <linux/kernel.h> #include <linux/kernel.h>
#include <linux/errno.h>
#include "../lib/string.c" #include "../lib/string.c"
int strncmp(const char *cs, const char *ct, size_t count) int strncmp(const char *cs, const char *ct, size_t count)
...@@ -98,3 +99,40 @@ long simple_strtol(const char *cp, char **endp, unsigned int base) ...@@ -98,3 +99,40 @@ long simple_strtol(const char *cp, char **endp, unsigned int base)
return simple_strtoull(cp, endp, base); return simple_strtoull(cp, endp, base);
} }
int kstrtobool(const char *s, bool *res)
{
if (!s)
return -EINVAL;
switch (s[0]) {
case 'y':
case 'Y':
case '1':
*res = true;
return 0;
case 'n':
case 'N':
case '0':
*res = false;
return 0;
case 'o':
case 'O':
switch (s[1]) {
case 'n':
case 'N':
*res = true;
return 0;
case 'f':
case 'F':
*res = false;
return 0;
default:
break;
}
default:
break;
}
return -EINVAL;
}
...@@ -468,6 +468,12 @@ static inline int is_module_addr(void *addr) ...@@ -468,6 +468,12 @@ static inline int is_module_addr(void *addr)
_SEGMENT_ENTRY_YOUNG | \ _SEGMENT_ENTRY_YOUNG | \
_SEGMENT_ENTRY_PROTECT | \ _SEGMENT_ENTRY_PROTECT | \
_SEGMENT_ENTRY_NOEXEC) _SEGMENT_ENTRY_NOEXEC)
#define SEGMENT_KERNEL_EXEC __pgprot(_SEGMENT_ENTRY | \
_SEGMENT_ENTRY_LARGE | \
_SEGMENT_ENTRY_READ | \
_SEGMENT_ENTRY_WRITE | \
_SEGMENT_ENTRY_YOUNG | \
_SEGMENT_ENTRY_DIRTY)
/* /*
* Region3 entry (large page) protection definitions. * Region3 entry (large page) protection definitions.
......
...@@ -65,6 +65,7 @@ ...@@ -65,6 +65,7 @@
#define OLDMEM_SIZE (*(unsigned long *) (OLDMEM_SIZE_OFFSET)) #define OLDMEM_SIZE (*(unsigned long *) (OLDMEM_SIZE_OFFSET))
#define COMMAND_LINE ((char *) (COMMAND_LINE_OFFSET)) #define COMMAND_LINE ((char *) (COMMAND_LINE_OFFSET))
extern int noexec_disabled;
extern int memory_end_set; extern int memory_end_set;
extern unsigned long memory_end; extern unsigned long memory_end;
extern unsigned long max_physmem_end; extern unsigned long max_physmem_end;
......
...@@ -90,6 +90,7 @@ char elf_platform[ELF_PLATFORM_SIZE]; ...@@ -90,6 +90,7 @@ char elf_platform[ELF_PLATFORM_SIZE];
unsigned long int_hwcap = 0; unsigned long int_hwcap = 0;
int __bootdata(noexec_disabled);
int __bootdata(memory_end_set); int __bootdata(memory_end_set);
unsigned long __bootdata(memory_end); unsigned long __bootdata(memory_end);
unsigned long __bootdata(max_physmem_end); unsigned long __bootdata(max_physmem_end);
......
...@@ -7,11 +7,16 @@ ...@@ -7,11 +7,16 @@
#include <asm/kasan.h> #include <asm/kasan.h>
#include <asm/processor.h> #include <asm/processor.h>
#include <asm/sclp.h> #include <asm/sclp.h>
#include <asm/facility.h>
#include <asm/sections.h> #include <asm/sections.h>
#include <asm/setup.h> #include <asm/setup.h>
static unsigned long segment_pos __initdata;
static unsigned long segment_low __initdata;
static unsigned long pgalloc_pos __initdata; static unsigned long pgalloc_pos __initdata;
static unsigned long pgalloc_low __initdata; static unsigned long pgalloc_low __initdata;
static bool has_edat __initdata;
static bool has_nx __initdata;
#define __sha(x) ((unsigned long)kasan_mem_to_shadow((void *)x)) #define __sha(x) ((unsigned long)kasan_mem_to_shadow((void *)x))
...@@ -24,6 +29,16 @@ static void __init kasan_early_panic(const char *reason) ...@@ -24,6 +29,16 @@ static void __init kasan_early_panic(const char *reason)
disabled_wait(0); disabled_wait(0);
} }
static void * __init kasan_early_alloc_segment(void)
{
segment_pos -= _SEGMENT_SIZE;
if (segment_pos < segment_low)
kasan_early_panic("out of memory during initialisation\n");
return (void *)segment_pos;
}
static void * __init kasan_early_alloc_pages(unsigned int order) static void * __init kasan_early_alloc_pages(unsigned int order)
{ {
pgalloc_pos -= (PAGE_SIZE << order); pgalloc_pos -= (PAGE_SIZE << order);
...@@ -71,7 +86,7 @@ static void __init kasan_early_vmemmap_populate(unsigned long address, ...@@ -71,7 +86,7 @@ static void __init kasan_early_vmemmap_populate(unsigned long address,
unsigned long end, unsigned long end,
enum populate_mode mode) enum populate_mode mode)
{ {
unsigned long pgt_prot_zero, pgt_prot; unsigned long pgt_prot_zero, pgt_prot, sgt_prot;
pgd_t *pg_dir; pgd_t *pg_dir;
p4d_t *p4_dir; p4d_t *p4_dir;
pud_t *pu_dir; pud_t *pu_dir;
...@@ -79,8 +94,10 @@ static void __init kasan_early_vmemmap_populate(unsigned long address, ...@@ -79,8 +94,10 @@ static void __init kasan_early_vmemmap_populate(unsigned long address,
pte_t *pt_dir; pte_t *pt_dir;
pgt_prot_zero = pgprot_val(PAGE_KERNEL_RO); pgt_prot_zero = pgprot_val(PAGE_KERNEL_RO);
pgt_prot_zero &= ~_PAGE_NOEXEC; if (!has_nx)
pgt_prot_zero &= ~_PAGE_NOEXEC;
pgt_prot = pgprot_val(PAGE_KERNEL_EXEC); pgt_prot = pgprot_val(PAGE_KERNEL_EXEC);
sgt_prot = pgprot_val(SEGMENT_KERNEL_EXEC);
while (address < end) { while (address < end) {
pg_dir = pgd_offset_k(address); pg_dir = pgd_offset_k(address);
...@@ -131,8 +148,27 @@ static void __init kasan_early_vmemmap_populate(unsigned long address, ...@@ -131,8 +148,27 @@ static void __init kasan_early_vmemmap_populate(unsigned long address,
address = (address + PMD_SIZE) & PMD_MASK; address = (address + PMD_SIZE) & PMD_MASK;
continue; continue;
} }
/* the first megabyte of 1:1 is mapped with 4k pages */
if (has_edat && address && end - address >= PMD_SIZE &&
mode != POPULATE_ZERO_SHADOW) {
void *page;
if (mode == POPULATE_ONE2ONE) {
page = (void *)address;
} else {
page = kasan_early_alloc_segment();
memset(page, 0, _SEGMENT_SIZE);
}
pmd_val(*pm_dir) = __pa(page) | sgt_prot;
address = (address + PMD_SIZE) & PMD_MASK;
continue;
}
pt_dir = kasan_early_pte_alloc(); pt_dir = kasan_early_pte_alloc();
pmd_populate(&init_mm, pm_dir, pt_dir); pmd_populate(&init_mm, pm_dir, pt_dir);
} else if (pmd_large(*pm_dir)) {
address = (address + PMD_SIZE) & PMD_MASK;
continue;
} }
pt_dir = pte_offset_kernel(pm_dir, address); pt_dir = pte_offset_kernel(pm_dir, address);
...@@ -182,6 +218,20 @@ static void __init kasan_enable_dat(void) ...@@ -182,6 +218,20 @@ static void __init kasan_enable_dat(void)
__load_psw_mask(psw.mask); __load_psw_mask(psw.mask);
} }
static void __init kasan_early_detect_facilities(void)
{
stfle(S390_lowcore.stfle_fac_list,
ARRAY_SIZE(S390_lowcore.stfle_fac_list));
if (test_facility(8)) {
has_edat = true;
__ctl_set_bit(0, 23);
}
if (!noexec_disabled && test_facility(130)) {
has_nx = true;
__ctl_set_bit(0, 20);
}
}
void __init kasan_early_init(void) void __init kasan_early_init(void)
{ {
unsigned long untracked_mem_end; unsigned long untracked_mem_end;
...@@ -196,7 +246,9 @@ void __init kasan_early_init(void) ...@@ -196,7 +246,9 @@ void __init kasan_early_init(void)
pud_t pud_z = __pud(__pa(kasan_zero_pmd) | _REGION3_ENTRY); pud_t pud_z = __pud(__pa(kasan_zero_pmd) | _REGION3_ENTRY);
p4d_t p4d_z = __p4d(__pa(kasan_zero_pud) | _REGION2_ENTRY); p4d_t p4d_z = __p4d(__pa(kasan_zero_pud) | _REGION2_ENTRY);
pgt_prot &= ~_PAGE_NOEXEC; kasan_early_detect_facilities();
if (!has_nx)
pgt_prot &= ~_PAGE_NOEXEC;
pte_z = __pte(__pa(kasan_zero_page) | pgt_prot); pte_z = __pte(__pa(kasan_zero_page) | pgt_prot);
/* 3 level paging */ /* 3 level paging */
...@@ -224,7 +276,13 @@ void __init kasan_early_init(void) ...@@ -224,7 +276,13 @@ void __init kasan_early_init(void)
if (pgalloc_low + shadow_alloc_size > memsize) if (pgalloc_low + shadow_alloc_size > memsize)
kasan_early_panic("out of memory during initialisation\n"); kasan_early_panic("out of memory during initialisation\n");
pgalloc_pos = memsize; if (has_edat) {
segment_pos = round_down(memsize, _SEGMENT_SIZE);
segment_low = segment_pos - shadow_alloc_size;
pgalloc_pos = segment_low;
} else {
pgalloc_pos = memsize;
}
init_mm.pgd = early_pg_dir; init_mm.pgd = early_pg_dir;
/* /*
* Current memory layout: * Current memory layout:
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment