Commit 3e39ce26 authored by Vasily Gorbik's avatar Vasily Gorbik

s390/kasan: add KASAN_VMALLOC support

Add KASAN_VMALLOC support which now enables vmalloc memory area access
checks as well as enables usage of VMAP_STACK under kasan.

KASAN_VMALLOC changes the way vmalloc and modules areas shadow memory
is handled. With this new approach only top level page tables are
pre-populated and lower levels are filled dynamically upon memory
allocation.
Acked-by: default avatarIlya Leoshkevich <iii@linux.ibm.com>
Signed-off-by: default avatarVasily Gorbik <gor@linux.ibm.com>
parent 1b68ac86
...@@ -124,6 +124,7 @@ config S390 ...@@ -124,6 +124,7 @@ config S390
select HAVE_ARCH_JUMP_LABEL select HAVE_ARCH_JUMP_LABEL
select HAVE_ARCH_JUMP_LABEL_RELATIVE select HAVE_ARCH_JUMP_LABEL_RELATIVE
select HAVE_ARCH_KASAN select HAVE_ARCH_KASAN
select HAVE_ARCH_KASAN_VMALLOC
select CPU_NO_EFFICIENT_FFS if !HAVE_MARCH_Z9_109_FEATURES select CPU_NO_EFFICIENT_FFS if !HAVE_MARCH_Z9_109_FEATURES
select HAVE_ARCH_SECCOMP_FILTER select HAVE_ARCH_SECCOMP_FILTER
select HAVE_ARCH_SOFT_DIRTY select HAVE_ARCH_SOFT_DIRTY
......
...@@ -82,7 +82,8 @@ static pte_t * __init kasan_early_pte_alloc(void) ...@@ -82,7 +82,8 @@ static pte_t * __init kasan_early_pte_alloc(void)
enum populate_mode { enum populate_mode {
POPULATE_ONE2ONE, POPULATE_ONE2ONE,
POPULATE_MAP, POPULATE_MAP,
POPULATE_ZERO_SHADOW POPULATE_ZERO_SHADOW,
POPULATE_SHALLOW
}; };
static void __init kasan_early_vmemmap_populate(unsigned long address, static void __init kasan_early_vmemmap_populate(unsigned long address,
unsigned long end, unsigned long end,
...@@ -116,6 +117,12 @@ static void __init kasan_early_vmemmap_populate(unsigned long address, ...@@ -116,6 +117,12 @@ static void __init kasan_early_vmemmap_populate(unsigned long address,
pgd_populate(&init_mm, pg_dir, p4_dir); pgd_populate(&init_mm, pg_dir, p4_dir);
} }
if (IS_ENABLED(CONFIG_KASAN_S390_4_LEVEL_PAGING) &&
mode == POPULATE_SHALLOW) {
address = (address + P4D_SIZE) & P4D_MASK;
continue;
}
p4_dir = p4d_offset(pg_dir, address); p4_dir = p4d_offset(pg_dir, address);
if (p4d_none(*p4_dir)) { if (p4d_none(*p4_dir)) {
if (mode == POPULATE_ZERO_SHADOW && if (mode == POPULATE_ZERO_SHADOW &&
...@@ -130,6 +137,12 @@ static void __init kasan_early_vmemmap_populate(unsigned long address, ...@@ -130,6 +137,12 @@ static void __init kasan_early_vmemmap_populate(unsigned long address,
p4d_populate(&init_mm, p4_dir, pu_dir); p4d_populate(&init_mm, p4_dir, pu_dir);
} }
if (!IS_ENABLED(CONFIG_KASAN_S390_4_LEVEL_PAGING) &&
mode == POPULATE_SHALLOW) {
address = (address + PUD_SIZE) & PUD_MASK;
continue;
}
pu_dir = pud_offset(p4_dir, address); pu_dir = pud_offset(p4_dir, address);
if (pud_none(*pu_dir)) { if (pud_none(*pu_dir)) {
if (mode == POPULATE_ZERO_SHADOW && if (mode == POPULATE_ZERO_SHADOW &&
...@@ -195,6 +208,9 @@ static void __init kasan_early_vmemmap_populate(unsigned long address, ...@@ -195,6 +208,9 @@ static void __init kasan_early_vmemmap_populate(unsigned long address,
page = kasan_early_shadow_page; page = kasan_early_shadow_page;
pte_val(*pt_dir) = __pa(page) | pgt_prot_zero; pte_val(*pt_dir) = __pa(page) | pgt_prot_zero;
break; break;
case POPULATE_SHALLOW:
/* should never happen */
break;
} }
} }
address += PAGE_SIZE; address += PAGE_SIZE;
...@@ -313,22 +329,50 @@ void __init kasan_early_init(void) ...@@ -313,22 +329,50 @@ void __init kasan_early_init(void)
init_mm.pgd = early_pg_dir; init_mm.pgd = early_pg_dir;
/* /*
* Current memory layout: * Current memory layout:
* +- 0 -------------+ +- shadow start -+ * +- 0 -------------+ +- shadow start -+
* | 1:1 ram mapping | /| 1/8 ram | * | 1:1 ram mapping | /| 1/8 ram |
* +- end of ram ----+ / +----------------+ * | | / | |
* | ... gap ... |/ | kasan | * +- end of ram ----+ / +----------------+
* +- shadow start --+ | zero | * | ... gap ... | / | |
* | 1/8 addr space | | page | * | |/ | kasan |
* +- shadow end -+ | mapping | * +- shadow start --+ | zero |
* | ... gap ... |\ | (untracked) | * | 1/8 addr space | | page |
* +- modules vaddr -+ \ +----------------+ * +- shadow end -+ | mapping |
* | 2Gb | \| unmapped | allocated per module * | ... gap ... |\ | (untracked) |
* +-----------------+ +- shadow end ---+ * +- vmalloc area -+ \ | |
* | vmalloc_size | \ | |
* +- modules vaddr -+ \ +----------------+
* | 2Gb | \| unmapped | allocated per module
* +-----------------+ +- shadow end ---+
*
* Current memory layout (KASAN_VMALLOC):
* +- 0 -------------+ +- shadow start -+
* | 1:1 ram mapping | /| 1/8 ram |
* | | / | |
* +- end of ram ----+ / +----------------+
* | ... gap ... | / | kasan |
* | |/ | zero |
* +- shadow start --+ | page |
* | 1/8 addr space | | mapping |
* +- shadow end -+ | (untracked) |
* | ... gap ... |\ | |
* +- vmalloc area -+ \ +- vmalloc area -+
* | vmalloc_size | \ |shallow populate|
* +- modules vaddr -+ \ +- modules area -+
* | 2Gb | \|shallow populate|
* +-----------------+ +- shadow end ---+
*/ */
/* populate kasan shadow (for identity mapping and zero page mapping) */ /* populate kasan shadow (for identity mapping and zero page mapping) */
kasan_early_vmemmap_populate(__sha(0), __sha(memsize), POPULATE_MAP); kasan_early_vmemmap_populate(__sha(0), __sha(memsize), POPULATE_MAP);
if (IS_ENABLED(CONFIG_MODULES)) if (IS_ENABLED(CONFIG_MODULES))
untracked_mem_end = vmax - MODULES_LEN; untracked_mem_end = vmax - MODULES_LEN;
if (IS_ENABLED(CONFIG_KASAN_VMALLOC)) {
untracked_mem_end = vmax - vmalloc_size - MODULES_LEN;
/* shallowly populate kasan shadow for vmalloc and modules */
kasan_early_vmemmap_populate(__sha(untracked_mem_end),
__sha(vmax), POPULATE_SHALLOW);
}
/* populate kasan shadow for untracked memory */
kasan_early_vmemmap_populate(__sha(max_physmem_end), kasan_early_vmemmap_populate(__sha(max_physmem_end),
__sha(untracked_mem_end), __sha(untracked_mem_end),
POPULATE_ZERO_SHADOW); POPULATE_ZERO_SHADOW);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment