Commit 90cbac0e authored by Christophe Leroy's avatar Christophe Leroy Committed by Michael Ellerman

powerpc: Enable KFENCE for PPC32

Add architecture specific implementation details for KFENCE and enable
KFENCE for the ppc32 architecture. In particular, this implements the
required interface in <asm/kfence.h>.

KFENCE requires that attributes for pages from its memory pool can
individually be set. Therefore, force the Read/Write linear map to be
mapped at page granularity.
Signed-off-by: default avatarChristophe Leroy <christophe.leroy@csgroup.eu>
Acked-by: default avatarMarco Elver <elver@google.com>
Signed-off-by: default avatarMichael Ellerman <mpe@ellerman.id.au>
Link: https://lore.kernel.org/r/8dfe1bd2abde26337c1d8c1ad0acfcc82185e0d5.1614868445.git.christophe.leroy@csgroup.eu
parent 0b71b372
...@@ -185,6 +185,7 @@ config PPC ...@@ -185,6 +185,7 @@ config PPC
select HAVE_ARCH_KASAN if PPC32 && PPC_PAGE_SHIFT <= 14 select HAVE_ARCH_KASAN if PPC32 && PPC_PAGE_SHIFT <= 14
select HAVE_ARCH_KASAN_VMALLOC if PPC32 && PPC_PAGE_SHIFT <= 14 select HAVE_ARCH_KASAN_VMALLOC if PPC32 && PPC_PAGE_SHIFT <= 14
select HAVE_ARCH_KGDB select HAVE_ARCH_KGDB
select HAVE_ARCH_KFENCE if PPC32
select HAVE_ARCH_MMAP_RND_BITS select HAVE_ARCH_MMAP_RND_BITS
select HAVE_ARCH_MMAP_RND_COMPAT_BITS if COMPAT select HAVE_ARCH_MMAP_RND_COMPAT_BITS if COMPAT
select HAVE_ARCH_NVRAM_OPS select HAVE_ARCH_NVRAM_OPS
...@@ -786,7 +787,7 @@ config THREAD_SHIFT ...@@ -786,7 +787,7 @@ config THREAD_SHIFT
config DATA_SHIFT_BOOL config DATA_SHIFT_BOOL
bool "Set custom data alignment" bool "Set custom data alignment"
depends on ADVANCED_OPTIONS depends on ADVANCED_OPTIONS
depends on STRICT_KERNEL_RWX || DEBUG_PAGEALLOC depends on STRICT_KERNEL_RWX || DEBUG_PAGEALLOC || KFENCE
depends on PPC_BOOK3S_32 || (PPC_8xx && !PIN_TLB_DATA && !STRICT_KERNEL_RWX) depends on PPC_BOOK3S_32 || (PPC_8xx && !PIN_TLB_DATA && !STRICT_KERNEL_RWX)
help help
This option allows you to set the kernel data alignment. When This option allows you to set the kernel data alignment. When
...@@ -798,13 +799,13 @@ config DATA_SHIFT_BOOL ...@@ -798,13 +799,13 @@ config DATA_SHIFT_BOOL
config DATA_SHIFT config DATA_SHIFT
int "Data shift" if DATA_SHIFT_BOOL int "Data shift" if DATA_SHIFT_BOOL
default 24 if STRICT_KERNEL_RWX && PPC64 default 24 if STRICT_KERNEL_RWX && PPC64
range 17 28 if (STRICT_KERNEL_RWX || DEBUG_PAGEALLOC) && PPC_BOOK3S_32 range 17 28 if (STRICT_KERNEL_RWX || DEBUG_PAGEALLOC || KFENCE) && PPC_BOOK3S_32
range 19 23 if (STRICT_KERNEL_RWX || DEBUG_PAGEALLOC) && PPC_8xx range 19 23 if (STRICT_KERNEL_RWX || DEBUG_PAGEALLOC || KFENCE) && PPC_8xx
default 22 if STRICT_KERNEL_RWX && PPC_BOOK3S_32 default 22 if STRICT_KERNEL_RWX && PPC_BOOK3S_32
default 18 if DEBUG_PAGEALLOC && PPC_BOOK3S_32 default 18 if (DEBUG_PAGEALLOC || KFENCE) && PPC_BOOK3S_32
default 23 if STRICT_KERNEL_RWX && PPC_8xx default 23 if STRICT_KERNEL_RWX && PPC_8xx
default 23 if DEBUG_PAGEALLOC && PPC_8xx && PIN_TLB_DATA default 23 if (DEBUG_PAGEALLOC || KFENCE) && PPC_8xx && PIN_TLB_DATA
default 19 if DEBUG_PAGEALLOC && PPC_8xx default 19 if (DEBUG_PAGEALLOC || KFENCE) && PPC_8xx
default PPC_PAGE_SHIFT default PPC_PAGE_SHIFT
help help
On Book3S 32 (603+), DBATs are used to map kernel text and rodata RO. On Book3S 32 (603+), DBATs are used to map kernel text and rodata RO.
......
/* SPDX-License-Identifier: GPL-2.0 */
/*
* powerpc KFENCE support.
*
* Copyright (C) 2020 CS GROUP France
*/
#ifndef __ASM_POWERPC_KFENCE_H
#define __ASM_POWERPC_KFENCE_H
#include <linux/mm.h>
#include <asm/pgtable.h>
static inline bool arch_kfence_init_pool(void)
{
return true;
}
static inline bool kfence_protect_page(unsigned long addr, bool protect)
{
pte_t *kpte = virt_to_kpte(addr);
if (protect) {
pte_update(&init_mm, addr, kpte, _PAGE_PRESENT, 0, 0);
flush_tlb_kernel_range(addr, addr + PAGE_SIZE);
} else {
pte_update(&init_mm, addr, kpte, 0, _PAGE_PRESENT, 0);
}
return true;
}
#endif /* __ASM_POWERPC_KFENCE_H */
...@@ -162,7 +162,7 @@ unsigned long __init mmu_mapin_ram(unsigned long base, unsigned long top) ...@@ -162,7 +162,7 @@ unsigned long __init mmu_mapin_ram(unsigned long base, unsigned long top)
unsigned long border = (unsigned long)__init_begin - PAGE_OFFSET; unsigned long border = (unsigned long)__init_begin - PAGE_OFFSET;
if (debug_pagealloc_enabled() || __map_without_bats) { if (debug_pagealloc_enabled_or_kfence() || __map_without_bats) {
pr_debug_once("Read-Write memory mapped without BATs\n"); pr_debug_once("Read-Write memory mapped without BATs\n");
if (base >= border) if (base >= border)
return base; return base;
......
...@@ -32,6 +32,7 @@ ...@@ -32,6 +32,7 @@
#include <linux/context_tracking.h> #include <linux/context_tracking.h>
#include <linux/hugetlb.h> #include <linux/hugetlb.h>
#include <linux/uaccess.h> #include <linux/uaccess.h>
#include <linux/kfence.h>
#include <asm/firmware.h> #include <asm/firmware.h>
#include <asm/interrupt.h> #include <asm/interrupt.h>
...@@ -418,8 +419,12 @@ static int ___do_page_fault(struct pt_regs *regs, unsigned long address, ...@@ -418,8 +419,12 @@ static int ___do_page_fault(struct pt_regs *regs, unsigned long address,
* take a page fault to a kernel address or a page fault to a user * take a page fault to a kernel address or a page fault to a user
* address outside of dedicated places * address outside of dedicated places
*/ */
if (unlikely(!is_user && bad_kernel_fault(regs, error_code, address, is_write))) if (unlikely(!is_user && bad_kernel_fault(regs, error_code, address, is_write))) {
if (kfence_handle_page_fault(address, is_write, regs))
return 0;
return SIGSEGV; return SIGSEGV;
}
/* /*
* If we're in an interrupt, have no user context or are running * If we're in an interrupt, have no user context or are running
......
...@@ -97,6 +97,9 @@ static void __init MMU_setup(void) ...@@ -97,6 +97,9 @@ static void __init MMU_setup(void)
if (IS_ENABLED(CONFIG_PPC_8xx)) if (IS_ENABLED(CONFIG_PPC_8xx))
return; return;
if (IS_ENABLED(CONFIG_KFENCE))
__map_without_ltlbs = 1;
if (debug_pagealloc_enabled()) if (debug_pagealloc_enabled())
__map_without_ltlbs = 1; __map_without_ltlbs = 1;
......
...@@ -185,3 +185,8 @@ void ptdump_check_wx(void); ...@@ -185,3 +185,8 @@ void ptdump_check_wx(void);
#else #else
static inline void ptdump_check_wx(void) { } static inline void ptdump_check_wx(void) { }
#endif #endif
static inline bool debug_pagealloc_enabled_or_kfence(void)
{
return IS_ENABLED(CONFIG_KFENCE) || debug_pagealloc_enabled();
}
...@@ -149,7 +149,7 @@ unsigned long __init mmu_mapin_ram(unsigned long base, unsigned long top) ...@@ -149,7 +149,7 @@ unsigned long __init mmu_mapin_ram(unsigned long base, unsigned long top)
{ {
unsigned long etext8 = ALIGN(__pa(_etext), SZ_8M); unsigned long etext8 = ALIGN(__pa(_etext), SZ_8M);
unsigned long sinittext = __pa(_sinittext); unsigned long sinittext = __pa(_sinittext);
bool strict_boundary = strict_kernel_rwx_enabled() || debug_pagealloc_enabled(); bool strict_boundary = strict_kernel_rwx_enabled() || debug_pagealloc_enabled_or_kfence();
unsigned long boundary = strict_boundary ? sinittext : etext8; unsigned long boundary = strict_boundary ? sinittext : etext8;
unsigned long einittext8 = ALIGN(__pa(_einittext), SZ_8M); unsigned long einittext8 = ALIGN(__pa(_einittext), SZ_8M);
...@@ -161,7 +161,7 @@ unsigned long __init mmu_mapin_ram(unsigned long base, unsigned long top) ...@@ -161,7 +161,7 @@ unsigned long __init mmu_mapin_ram(unsigned long base, unsigned long top)
return 0; return 0;
mmu_mapin_ram_chunk(0, boundary, PAGE_KERNEL_TEXT, true); mmu_mapin_ram_chunk(0, boundary, PAGE_KERNEL_TEXT, true);
if (debug_pagealloc_enabled()) { if (debug_pagealloc_enabled_or_kfence()) {
top = boundary; top = boundary;
} else { } else {
mmu_mapin_ram_chunk(boundary, einittext8, PAGE_KERNEL_TEXT, true); mmu_mapin_ram_chunk(boundary, einittext8, PAGE_KERNEL_TEXT, true);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment